query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
54edd522ff7874290002d20c11258464
Displays information about a given dataloader. Prints the size of the dataset, the dimensions of the images and target images, and displays a batch of images and targets with `imshow_batch()`.
[ { "docid": "229d1344396f33cd0894a070ef6f73a1", "score": "0.8973931", "text": "def dataloader_info(dataloader):\n images, targets = iter(dataloader).next()\n print(\"Number of images:\", len(dataloader.dataset))\n print(\"Image size:\", images.size())\n print(\"Targets size:\", targets.size())\n imshow_batch(images, targets)", "title": "" } ]
[ { "docid": "c860b5e3ffea39704ce31445f96037ca", "score": "0.73829144", "text": "def print_dataset(dataloader, num_elements=3):\n for element, i in zip(dataloader, range(num_elements)):\n print('+++ Image {} +++'.format(i))\n for key in element.keys():\n print(key, element[key].shape)\n plt.imshow(np.array(element[('color', 0, 0)])[0, :, :, :].transpose(1, 2, 0))", "title": "" }, { "docid": "8cca901fb2b8578ae53da27f7f36e4d0", "score": "0.73245406", "text": "def show_batch(self):\n\n data_iter = iter(self.train_loader)\n images, labels = data_iter.next()\n\n print('Labels: ', labels)\n print('Batch shape: ', images.size())\n\n im = utils.make_grid(images)\n plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))\n plt.show()", "title": "" }, { "docid": "58dc4cc41149f465a82165160a60f3a6", "score": "0.7292736", "text": "def show_batch(self):\n\n data_iter = iter(self.train_loader)\n images, labels = data_iter.next()\n\n print('Labels: ', labels)\n print('Batch shape: ', images.size())\n\n img = utils.make_grid(images)\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()", "title": "" }, { "docid": "3388154d24f5d9e828811ea081910c5e", "score": "0.71086234", "text": "def display_dataset(self, train_utils): \n \n print(train_utils.mode)\n dataiter = iter(train_utils.loader)\n images, labels = dataiter.next()\n print(images.shape)\n fig = plt.figure(figsize=(25, 4))\n\n for idx in np.arange(min(train_utils.batch_size, 20)):\n ax = fig.add_subplot(2, 10, idx+1, xticks=[], yticks=[])\n plt.imshow(self._im_convert(images[idx], mean=train_utils.mean, std=train_utils.std))\n ax.set_title(train_utils.classes[labels[idx].numpy()])", "title": "" }, { "docid": "6cedaada5ec8bbf87743c2522c06975c", "score": "0.6886764", "text": "def visualize_dataset(images, labels):\n num_samples = len(images)\n for i in range(num_samples):\n plt.subplot(1, num_samples, i + 1)\n plt.imshow(images[i])\n plt.title(labels[i])\n plt.show()", "title": "" }, { "docid": "8163b74f8703c9645a9975510dee22c7", "score": "0.68611073", "text": "def visualize_dataset(train_loader):\r\n idx = random.randint(0, len(train_loader.dataset))\r\n sample = train_loader.dataset[idx]\r\n print(idx, sample['image'].shape, CLASSES[sample['classes']], SPECIES[sample['species']])\r\n img = sample['image']\r\n plt.imshow(transforms.ToPILImage()(img))\r\n plt.show()", "title": "" }, { "docid": "88e006c7ec41bdaa15b0dcef96886935", "score": "0.6490155", "text": "def visualize_model(model, device, class_names, dataloaders,num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (samples) in enumerate(dataloaders['Val']):\n inputs = samples['image']\n labels = samples['label']\n inputs = inputs.to(device= device, dtype=torch.float)\n labels = labels.to(device= device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: %d, real %d'%(class_names[preds[j]], labels.cpu().data[j]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)", "title": "" }, { "docid": "53c1596e6d3a9d89b3cf264ba92ecb5b", "score": "0.64771116", "text": "def display_image_samples(dataset):\r\n\tsamples = sample_images(dataset.feature, dataset.label)\r\n\tsubtitles = load_names()\r\n\tdisplay_image_grid_alt(samples, title=\"Image samples\", subtitle=subtitles)", "title": "" }, { "docid": "b2d9a1f1295240d2e6641be90985c27d", "score": "0.64277613", "text": "def plot_data(dataloader, idx2word, idx2label, num_plots=4):\n for i, data in enumerate(dataloader):\n\n # Read dataset, select one random sample from the mini-batch\n batch_size = len(data['label'])\n idx = np.random.choice(batch_size)\n ques = data['question'][idx]\n label = data['label'][idx]\n img = data['image'][idx]\n\n # Convert question tokens to words & answer class index to label\n ques_str = ' '.join([idx2word[word_idx] for word_idx in ques.tolist()])\n ans_str = ' '.join(idx2label[label.tolist()])\n\n # Plot Data\n plt.imshow(img.permute(1, 2, 0))\n plt.text(0, 0, ques_str, bbox=dict(fill=True, facecolor='white', edgecolor='red', linewidth=2))\n plt.text(220, 220, ans_str, bbox=dict(fill=True, facecolor='white', edgecolor='blue', linewidth=2))\n plt.show()\n\n i += 1\n\n if i >= num_plots:\n break", "title": "" }, { "docid": "c3cec1538f52aad5392bdca36ecf8603", "score": "0.64085317", "text": "def display_batches(data_loader: torch.utils.data.DataLoader, *, n_batches: int=1, width:int=800, show_y: bool=False):\n to_pil_img = ToPILImage()\n for i, (x, y) in enumerate(data_loader):\n if i >= n_batches:\n return\n if isinstance(x[0], torch.Tensor):\n x = [to_pil_img(t) for t in x]\n display(stack_imgs(x, width=width))\n if show_y:\n display(y)", "title": "" }, { "docid": "98e7d013be1945d982564cc982023cf2", "score": "0.6267293", "text": "def info(self, *args):\n if not args:\n args = self.images.keys()\n for i in args:\n print(f\"Image: {i}\\t Shape: {self.images[i].shape}\")", "title": "" }, { "docid": "1c758ac7e9154e5a910981d6b557bbd0", "score": "0.62287796", "text": "def inspect(dataset, labels, i):\n if dataset.shape[3] == 1:\n shape = dataset.shape\n dataset = dataset.reshape(shape[0], shape[1], shape[2])\n print(labels)\n plt.imshow(dataset[i])\n plt.show()", "title": "" }, { "docid": "624759c4bb8342882f714a99a73bceaa", "score": "0.61783916", "text": "def imshow_batch(\n images, targets=None, nrow=8, padding=2, scale_each=False, pad_value=0\n):\n if not isinstance(images, torch.Tensor) or images.dim() != 4:\n raise ValueError(\n \"expected '{0}', got '{1}'\".format(type(torch.Tensor), type(images))\n )\n\n # Make a grid with the images\n images = (\n torchvision.utils.make_grid(\n images,\n nrow=nrow,\n padding=padding,\n scale_each=scale_each,\n pad_value=pad_value,\n )\n .cpu()\n .numpy()\n )\n\n # Check if targets is a Tensor. If it is, display it; otherwise, show the images\n if isinstance(targets, torch.Tensor) and targets.dim() == 4:\n targets = (\n torchvision.utils.make_grid(\n targets,\n nrow=nrow,\n padding=padding,\n scale_each=scale_each,\n pad_value=pad_value,\n )\n .cpu()\n .numpy()\n )\n\n fig, axarr = plt.subplots(3, 1)\n axarr[0].set_title(\"Batch of images\")\n axarr[0].axis(\"off\")\n axarr[0].imshow(np.transpose(images, (1, 2, 0)))\n\n axarr[1].set_title(\"Batch of targets\")\n axarr[1].axis(\"off\")\n axarr[1].imshow(np.transpose(targets, (1, 2, 0)))\n\n axarr[2].set_title(\"Targets overlayed with images\")\n axarr[2].axis(\"off\")\n axarr[2].imshow(np.transpose(images, (1, 2, 0)))\n axarr[2].imshow(np.transpose(targets, (1, 2, 0)), alpha=0.5)\n else:\n plt.imshow(np.transpose(images, (1, 2, 0)))\n plt.axis(\"off\")\n plt.gca().set_title(\"Batch of samples\")\n\n plt.show()", "title": "" }, { "docid": "afa09731f57eb32bb1c463cbf430a037", "score": "0.616995", "text": "def show_batch(sample_batched,figsize =(15,15)):\n images_batch,labels_batch = sample_batched['image'],sample_batched['label']\n batch_size = len(images_batch)\n im_size = images_batch.shape[2]\n \n grid_img = utils.make_grid(images_batch)\n grid_label = utils.make_grid(labels_batch.reshape(-1,1,labels_batch.shape[1],labels_batch.shape[2]))\n \n \n show_sample(grid_img.numpy().transpose(1,2,0),figsize=figsize)\n show_sample(grid_label.numpy().transpose(1,2,0)[:,:,0],figsize=figsize)", "title": "" }, { "docid": "fbac70043d53b79bf8f33e6bd99a84da", "score": "0.61138695", "text": "def show_img(img_arr, label_arr, meta, index, label_fn=default_label_fn):\n one_img = img_arr[index, :]\n # Assume image size is 32 x 32. First 1024 px is r, next 1024 px is g, last 1024 px is b from the (r,g b) channel\n r = one_img[:1024].reshape(32, 32)\n g = one_img[1024:2048].reshape(32, 32)\n b = one_img[2048:].reshape(32, 32)\n rgb = np.dstack([r, g, b])\n img = Image.fromarray(np.array(rgb), 'RGB')\n #display(img) # doesn't work...\n print(label_fn(index, meta[label_arr[index][0]].decode('utf-8')))", "title": "" }, { "docid": "46a936c0add5a6b612e5f04489914953", "score": "0.6110557", "text": "def show_sample(dataset, idx=None, figsize=(20,20), seed=None):\n np.random.seed(seed=seed)\n if idx==None: \n idx = np.random.randint(low=0, high=len(dataset)-1)\n x, y = dataset[idx]\n f, axarr = plt.subplots(1,2, figsize=figsize) # create visualizations\n \n axarr[0].imshow(x.permute(1,2,0)) # visualize image tensor\n axarr[0].set_title('Input')\n axarr[1].imshow(y.permute(1,2,0).squeeze(), cmap=plt.cm.gray) # visualize image tensor\n axarr[1].set_title('Target')", "title": "" }, { "docid": "80eba7e2fbe03383c7b7c612ea202b5c", "score": "0.60951453", "text": "def print_dataset_info(dtype, trainset_, query_label_, gall_label_, start_time_):\n n_class_ = len(np.unique(trainset_.train_color_label))\n nquery_ = len(query_label_)\n ngall_ = len(gall_label_)\n print(f'Dataset {dtype} statistics:')\n print(' ------------------------------')\n print(' subset | # ids | # images')\n print(' ------------------------------')\n print(f' visible | {n_class_:5d} | {len(trainset_.train_color_label):8d}')\n print(f' thermal | {n_class_:5d} | {len(trainset_.train_thermal_label):8d}')\n print(' ------------------------------')\n print(f' query | {len(np.unique(query_label_)):5d} | {nquery_:8d}')\n print(f' gallery | {len(np.unique(gall_label_)):5d} | {ngall_:8d}')\n print(' ------------------------------')\n print(f'Data Loading Time:\\t {time.time() - start_time_:.3f}')", "title": "" }, { "docid": "410c5120e3198160696c29810f1f7572", "score": "0.60731965", "text": "def example_of_usage(pytorch_dataloader_args):\n\n data_loader = AugmentedOnlineMixingDataset(**vars(pytorch_dataloader_args))\n data_gen = get_data_gen_from_loader(data_loader)\n\n batch_cnt = 0\n print(\"Loading {} Batches of size: {} for mixtures with {} active \"\n \"sources...\".format(\n data_loader.get_n_batches(),\n data_loader.batch_size,\n data_loader.n_sources) + \"\\n\" + \"=\" * 20 + \"\\n\")\n\n from tqdm import tqdm\n for batch_data_list in tqdm(data_gen):\n # the returned elements are tensors\n # Always the first dimension is the selected batch size\n mixture_wav, sources_wavs = batch_data_list\n if not batch_cnt:\n print(\"Returned mixture_wav of type: {} and size: \"\n \"{}\".format(type(mixture_wav),\n mixture_wav.size()))\n print(\"Returned sources_wavs of type: {} and size: \"\n \"{}\".format(type(sources_wavs),\n sources_wavs.size()))\n batch_cnt += 1", "title": "" }, { "docid": "2d439dfcfb45bf7a8e3635fa03b8f875", "score": "0.6047583", "text": "def show_batch(dl,rows=4,figsize=(4*3,4*3),padding=2,normalize=True):\n plt.figure(figsize=figsize)\n data = next(iter(dl))\n x,_ = data[:rows**2]\n imgs = vutils.make_grid(x,padding=padding,normalize=normalize).numpy().transpose(1,2,0)\n plt.imshow(imgs)\n plt.axis('off')", "title": "" }, { "docid": "e75826d0f6764f8e495b60fa92296200", "score": "0.6045826", "text": "def show(thing, domain=(0, 1), **kwargs):\n def collapse_if_needed(arr):\n channels = arr.shape[-1]\n if channels not in [1, 3, 4]:\n # log.debug(\"Collapsing %s channels into 3 RGB channels.\" % K)\n return collapse_channels(arr)\n return arr\n\n if isinstance(thing, np.ndarray):\n rank = len(thing.shape)\n\n if rank in [3, 4]:\n thing = collapse_if_needed(thing)\n\n if rank == 4:\n # log.debug(\"Show is assuming rank 4 tensor to be a list of images.\")\n images(thing, domain=domain, **kwargs)\n elif rank in (2, 3):\n # log.debug(\"Show is assuming rank 2 or 3 tensor to be an image.\")\n image(thing, domain=domain, **kwargs)\n else:\n # log.warning(\"Show only supports numpy arrays of rank 2-4. Using repr().\")\n print(repr(thing))\n elif isinstance(thing, (list, tuple)):\n # log.debug(\"Show is assuming list or tuple to be a collection of images.\")\n\n if isinstance(thing[0], np.ndarray) and len(thing[0].shape) == 3:\n thing = [collapse_if_needed(t) for t in thing]\n\n images(thing, domain=domain, **kwargs)\n else:\n # log.warning(\"Show only supports numpy arrays so far. Using repr().\")\n print(repr(thing))", "title": "" }, { "docid": "fcf9cd6ba1201ad7e6109683f4853a00", "score": "0.6020732", "text": "def __show_image_data(self, generator: tf.keras.utils.Sequence) -> None:\n j = random.randint(0, len(generator))\n images, labels = generator[j]\n labels = np.argmax(labels, axis=-1)\n if images.shape[0] > 5:\n images = images[:5, :, :, :]\n labels = labels[:5]\n predict_labels = np.argmax(self.nn.predict(images), axis=-1)\n predictions = np.max(self.nn.predict(images), axis=-1)\n\n fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(12, 4))\n fig.suptitle('Network prediction results:', fontsize=14, fontweight=\"bold\")\n for i in range(images.shape[0]):\n text = 'True label:\\n {},\\n Predicted label:\\n {},\\n Confidence of prediction:\\n {:.02f}%.'.format(\n self.class_names[labels[i]],\n self.class_names[predict_labels[i]],\n predictions[i] * 100\n )\n axes[i].imshow(images[i, :, :, :])\n axes[i].set_title(text, size=9)\n axes[i].axis('off')\n plt.show()", "title": "" }, { "docid": "8e7cb9bd212eddd9405971e91410a1b8", "score": "0.6013365", "text": "def show_landmarks_batch(sample_batched):\n images_batch, labels_batch = \\\n sample_batched['image'], sample_batched['label']\n grid = utils.make_grid(images_batch)\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n plt.title('Batch from dataloader')\n print(labels_batch)", "title": "" }, { "docid": "cf3293aa2329bc2fa493f2c5439f5ff0", "score": "0.6003867", "text": "def plot_dataset(self):\n # load random 30 images from dataset\n rows = 10\n cols = 5\n\n p = (Path(self.opt.dataroot) / self.opt.phase).glob('**/*')\n files = [x for x in p if x.is_file()]\n\n image_grid = torch.tensor([])\n for i in range(rows):\n image_row = torch.tensor([])\n for j in range(cols):\n idx = random.randint(0, len(files) - 1)\n image = ToTensor()(Image.open(files[idx]).resize((200, 100)))\n image_row = torch.cat((image_row, image), 1)\n\n image_grid = torch.cat((image_grid, image_row), 2)\n\n self.writer.add_image(f'Dataset/{self.opt.phase}', image_grid, 0)", "title": "" }, { "docid": "b052a1405a7dd2df102dd97880d280c8", "score": "0.59269667", "text": "def load_and_display(num_epochs=10, learning_rate=0.001, dropout_p=0.0):\n\n model_filename = f'ne{num_epochs}lr{learning_rate}dp{dropout_p}'\n model = CNNModel().cuda()\n model.load_state_dict(torch.load(os.path.join(SAVED_MODEL_DIR, f'{model_filename}.pth')))\n model.eval()\n\n test_set = datasets.FashionMNIST(root=DATASET_ROOT, train=False, \n transform=transforms.ToTensor())\n\n sampler = RandomSampler(data_source=test_set)\n test_loader = DataLoader(dataset=test_set, batch_size=5, sampler=sampler)\n\n\n images, labels = next(test_loader.__iter__())\n images = images.cuda()\n labels = labels.cuda()\n\n logits = model(images)\n probs = F.softmax(logits.data, dim=1)\n _, predicted = torch.max(probs, 1)\n\n # Invert the dictionary class_to_idx to idx_to_class\n idx_to_class = {v: k for k, v in test_set.class_to_idx.items()}\n\n fig, axex = plt.subplots(1, 5, figsize=(25,25))\n\n zip_gen = axex.ravel(), predicted, labels, images.cpu().numpy().squeeze()\n for ax, predicted_class, label_class, img in zip(*zip_gen):\n ax.imshow(img, cmap='gray' if predicted_class == label_class else 'autumn')\n ax.axis('off')\n ax.set_title('Predicted: {} | True: {}'.format(idx_to_class[predicted_class.item()], \n idx_to_class[label_class.item()]))", "title": "" }, { "docid": "ef9d2cdab39397c4b07e94de9f53e11c", "score": "0.5911148", "text": "def show_aerofoil_batch(batch_num, **sample_batched):\n # TODO: tidy up matplotlib plotting of batches\n # TODO: add TensorBoard functionality. Put all plots in a grid using: https://www.tensorflow.org/tensorboard/image_summaries\n # images = np.reshape(train_images[0:25], (-1, 28, 28, 1)) # Don't forget to reshape.\n # tf.summary.image(\"25 training data examples\", images, max_outputs=25, step=0)\n\n aerofoils_batch, coordinates_batch, y_batch = sample_batched['aerofoil'], sample_batched['coordinates'],\\\n sample_batched[\"y\"]\n ClCd_batch, angle_batch = y_batch[:, 0], y_batch[:, 1]\n batch_size = len(aerofoils_batch)\n\n fig = plt.figure()\n for i, (aerofoil, coords, ClCd, angle) in enumerate(zip(aerofoils_batch, coordinates_batch, ClCd_batch,\n angle_batch)):\n ax = fig.add_subplot(1, batch_size, i+1)\n ax.plot(coords, 'r-')\n plt.title(f\"{aerofoil}\\n\"\n f\"Max ClCd = {ClCd:.2f} at {angle:.2f} degrees\")\n\n plt.suptitle(f'Batch #{batch_num} from dataloader')\n plt.show()", "title": "" }, { "docid": "058583018ecbfea537a720ab1d32cd0a", "score": "0.590609", "text": "def display_three_train_images(train_dataset):\r\n plt.figure(figsize=(10, 14))\r\n for input_images, _ in train_dataset.take(1):\r\n for i in range(3):\r\n plt.subplot(3, 1, i+1)\r\n plt.imshow(np.squeeze(input_images[i]), cmap='gray')\r\n plt.show()", "title": "" }, { "docid": "ba4176101ee25be0d088bb8823c76233", "score": "0.5896774", "text": "def show_images(self, split):\n\n # get split\n data_split = getattr(self, split, None)\n if data_split is None:\n raise ValueError('Invalid data split')\n\n # display images\n util.disp_imdata(data_split.x, self.image_size, [6, 10])\n\n plt.show()", "title": "" }, { "docid": "68cfcc9abc45deffd4ab8adf37b87370", "score": "0.5858434", "text": "def display_tf_dataset(dataset_data, data_format, data_dims, weighted=False):\n\n # make figure\n fig = plt.figure(figsize=(10, 4))\n\n # define close event and create timer\n def close_event():\n plt.close()\n timer = fig.canvas.new_timer(interval=4000)\n timer.add_callback(close_event)\n\n # handle 2d case\n if len(data_dims) == 2:\n # image data\n image_data = dataset_data[0] # dataset_data[0]\n if len(image_data.shape) > 3:\n image_data = np.squeeze(image_data[0, :, :, :]) # handle batch data\n nplots = image_data.shape[0] + 1 if data_format == 'channels_first' else image_data.shape[2] + 1\n channels = image_data.shape[0] if data_format == 'channels_first' else image_data.shape[2]\n for z in range(channels):\n ax = fig.add_subplot(1, nplots, z + 1)\n data_img = np.swapaxes(np.squeeze(image_data[z, :, :]), 0,\n 1) if data_format == 'channels_first' else np.squeeze(\n image_data[:, :, z])\n ax.imshow(data_img, cmap='gray')\n ax.set_title('Data Image ' + str(z + 1))\n\n # label data\n label_data = dataset_data[1] # dataset_data[1]\n if len(label_data.shape) > 3:\n label_data = np.squeeze(label_data[0, :, :, :]) # handle batch data\n ax = fig.add_subplot(1, nplots, nplots)\n label_img = np.swapaxes(np.squeeze(label_data), 0, 1) if data_format == 'channels_first' else np.squeeze(\n label_data)\n ax.imshow(label_img, cmap='gray')\n ax.set_title('Labels')\n\n # handle 3d case\n if len(data_dims) == 3:\n\n # load image data\n image_data = dataset_data[0] # dataset_data[0]\n\n # handle channels first and batch data\n if len(image_data.shape) > 4:\n if data_format == 'channels_first':\n image_data = np.transpose(image_data, [0, 2, 3, 4, 1])\n image_data = np.squeeze(image_data[0, :, :, :, :]) # handle batch data\n else:\n if data_format == 'channels_first':\n image_data = np.transpose(image_data, [1, 2, 3, 0])\n\n # determine n plots and channels\n nplots = image_data.shape[-1] + 1\n channels = image_data.shape[-1]\n if weighted:\n nplots += 1\n\n # loop through channels\n for z in range(channels):\n ax = fig.add_subplot(1, nplots, z + 1)\n data_img = np.squeeze(image_data[:, :, :, z])\n # concatenate along z to make 1 2d image per slab\n data_img = np.reshape(np.transpose(data_img), [data_img.shape[0] * data_img.shape[2], data_img.shape[1]])\n ax.imshow(data_img, cmap='gray')\n ax.set_title('Data Image ' + str(z + 1))\n\n # load label data\n label_data = dataset_data[1] # dataset_data[1]\n\n # handle channels first and batch data\n if len(label_data.shape) > 4:\n if data_format == 'channels_first':\n label_data = np.transpose(label_data, [0, 2, 3, 4, 1])\n label_data = label_data[0, :, :, :, :] # handle batch data by taking only first element of batch\n else:\n if data_format == 'channels_first':\n label_data = np.transpose(label_data, [1, 2, 3, 0])\n\n # handle weights\n weights = None\n if weighted:\n weights = label_data[..., [-1]] # last channel is weights\n label_data = label_data[..., [0]] # use first channel for labes\n\n # add to fig\n if weighted:\n # handle labels first\n ax = fig.add_subplot(1, nplots, nplots-1)\n label_img = np.squeeze(label_data)\n inds = [label_img.shape[0] * label_img.shape[2], label_img.shape[1]]\n label_img = np.reshape(np.transpose(label_img), inds)\n ax.imshow(label_img, cmap='gray')\n ax.set_title('Labels')\n # finally handle weights\n ax = fig.add_subplot(1, nplots, nplots)\n weight_img = np.reshape(np.transpose(weights), inds)\n ax.imshow(weight_img, cmap='gray')\n ax.set_title('Weights')\n else:\n # handle labels only\n ax = fig.add_subplot(1, nplots, nplots)\n label_img = np.squeeze(label_data)\n inds = [label_img.shape[0] * label_img.shape[2], label_img.shape[1]]\n label_img = np.reshape(np.transpose(label_img), inds)\n ax.imshow(label_img, cmap='gray')\n ax.set_title('Labels')\n\n # start timer and show plot\n timer.start()\n plt.show()\n\n return", "title": "" }, { "docid": "35973ade115be6085a1ff150b559c4cc", "score": "0.5854436", "text": "def displayTensorData(x, y):\r\n train_images = []\r\n for i in range(x_train.shape[0]):\r\n train_images.append(piltoarray(x[i]))\r\n train_images = np.array(train_images)\r\n display_images(train_images, y.detach().numpy())\r\n return None", "title": "" }, { "docid": "b2e95193e2fcc60d30fa8f1358e64e69", "score": "0.5835482", "text": "def show_images(self):\n\t\tself.im.show()\n\t\tself.kmeans_colorset_im.show()", "title": "" }, { "docid": "54fed2802864758702a5a9a9615166da", "score": "0.58316654", "text": "def display_images(imbatch):\n n_im = imbatch.shape[0]\n n_rows = n_im // 4 + 1\n _, axes = plt.subplots(n_rows, 4, squeeze=False, figsize=(4 * 6.4, n_rows * 4.8))\n for r in range(n_rows):\n for c in range(4):\n axes[r][c].axis(\"off\")\n for i in range(imbatch.shape[0]):\n display_image(imbatch[i], axes[i // 4][i % 4])", "title": "" }, { "docid": "3709e2e66bc683bd218989c4011c40b2", "score": "0.58222634", "text": "def show_labelled_images(img, labels, classes, nrows=8, ncols=8, savepath=None):\n if isinstance(img, torch.Tensor):\n npimg = img.numpy()\n elif isinstance(img, np.ndarray):\n npimg = img\n else:\n raise TypeError(\"Image type {} not recognized\".format(type(img)))\n\n grid_sz = ncols * nrows\n fig = plt.figure(figsize=(ncols*1.5, nrows*1.5))\n for idx in range(0, npimg.shape[0]):\n ax = fig.add_subplot(nrows, ncols, idx % grid_sz + 1, xticks=[], yticks=[])\n # add image\n show_images(npimg[idx])\n # add label\n try:\n ax.set_title(classes[int(labels[idx].item())])\n except IndexError:\n raise \"label index {} out of range for {} number of \\\n classes\".format(labels[idx], len(classes))\n # save figure when current grid is full or when end of loop reached\n # create a new fig object once current grid is full\n if (idx + 1) % grid_sz == 0 or idx == npimg.shape[0]-1:\n fig.subplots_adjust(hspace=0.5)\n plt.savefig(savepath + \"_{}.png\".format(idx // grid_sz))\n plt.show()\n if (idx + 1) % grid_sz == 0:\n fig = plt.figure(figsize=(ncols*1.5, nrows*1.5))", "title": "" }, { "docid": "a2747b50b983d2691f3f30710126aa6f", "score": "0.58134466", "text": "def show_images(self):\n\t\tself.im.show()\n\t\tself.color_balanced_im.show()\n\t\tself.scaled_color_balanced_im.show()", "title": "" }, { "docid": "b39c2f806f4d65b735325d5e3bc5fd7e", "score": "0.58132046", "text": "def show_images(images, labels, unique_labels, n_of_img: int) -> None:\n\n # Setup the figure\n plt.figure(figsize=(10, 10))\n # Loop trough images\n for i in range(n_of_img):\n # Create subplots (5 rows, 5 cols)\n # noinspection PyUnusedLocal\n ax = plt.subplot(int(math.sqrt(n_of_img)), int(math.sqrt(n_of_img)), i + 1)\n # Display an image\n plt.imshow(images[i])\n # Add the image label as the title\n plt.title(unique_labels[labels[i].argmax()])\n # turn the grid lines\n plt.axis(\"off\")", "title": "" }, { "docid": "331cdd87f34089afa09fb542e3e4d20c", "score": "0.58061", "text": "def draw_dataset(dataset, num_images, num_col, figsize=(15, 40)):\n fig = plt.figure(figsize=figsize)\n for idx, image in enumerate(dataset):\n plt.subplot(math.ceil(num_images / num_col), num_col, idx + 1)\n plt.imshow(image)\n plt.title(idx)\n if idx + 1 == num_images:\n break\n plt.show()\n return fig", "title": "" }, { "docid": "8987c0ed4bc92b3dc1a9f93cedb31c1e", "score": "0.57983536", "text": "def visualize_images (images, name = 'images', num_images = 6):\n tf.summary.image(name, images, max_outputs = num_images)", "title": "" }, { "docid": "5957411745a4a99e0a4f7866efb881de", "score": "0.5725313", "text": "def show(self, batch_idx: int) -> None:\n batch = self.data[batch_idx * self.batch_size:(batch_idx + 1) * self.batch_size]\n for i in range(self.batch_size):\n img_original = batch[i][0].copy()\n img_augmented = self.aug.augment(img_original.copy())\n fig, axes = plt.subplots(1, 2, figsize=(10, 4), dpi=100)\n plt.subplot(axes[0])\n plt.imshow(img_original[:, :, 0])\n plt.title('Original, class = \"{}\"'.format(self.classes[int(batch[i][1][1])]))\n plt.subplot(axes[1])\n plt.imshow(img_augmented[:, :, 0])\n plt.title('Augmented, class = \"{}\"'.format(self.classes[int(batch[i][1][1])]))\n if plt.waitforbuttonpress(0):\n plt.close('all')\n raise SystemExit\n plt.close(fig)", "title": "" }, { "docid": "5986358291e05cf71ca3ccee4833f0e3", "score": "0.57140803", "text": "def show(self, *args):\n if not args:\n args = self.images.keys()\n [cv2.imshow(str(i), self.images[i]) for i in args]\n cv2.waitKey(0)", "title": "" }, { "docid": "89f7c0a00870d618e21d2cb910bd9836", "score": "0.5713093", "text": "def select_some_specific_images_from_dataloader(dataloader,called_dataset):\r\n if called_dataset==\"PascalVOC\":\r\n image_size=Resize_PascalVoc_Image\r\n images_to_show_n_save = torch.zeros((0,3,image_size,image_size), device=Device) \r\n for _, sample_data in enumerate(dataloader): \r\n Data,Label=MyHelpFunc.get_images_and_labels_from_voc_detection_loader(sample_data)\r\n \r\n Data = Data.to(Device) \r\n images_to_show_n_save = torch.cat((images_to_show_n_save, Data[0].unsqueeze(0))) # len(Data[0])=3, \r\n if images_to_show_n_save.shape[0] == Number_of_Images_To_Save:\r\n break\r\n\r\n elif called_dataset==\"STL10\" or called_dataset==\"CIFAR10\" :\r\n image_size=Resize_STL10_Image if called_dataset==\"STL10\" else 32\r\n images_to_show_n_save = torch.zeros(0,3,image_size,image_size, device=Device) \r\n for _, (Data, Label) in enumerate(dataloader):\r\n Data = Data.to(Device) \r\n images_to_show_n_save = torch.cat((images_to_show_n_save, Data[0].unsqueeze(0))) # len(Data[0])=3, \r\n if images_to_show_n_save.shape[0] == Number_of_Images_To_Save:\r\n break\r\n \r\n del Data\r\n return images_to_show_n_save", "title": "" }, { "docid": "6287a11e58303bab55392bc1a07cf58d", "score": "0.5690774", "text": "def display_images_and_labels(images, labels):\n unique_labels = set(labels)\n plt.figure(figsize=(15, 15))\n i = 1\n sign_classes, class_indices, class_counts = np.unique(labels, return_index = True, return_counts = True)\n n_classes = class_counts.shape[0]\n for label in class_indices:\n image = images[label]\n plt.subplot(8,8,i)\n plt.axis('off')\n plt.title(\"Class {0} ({1})\".format(i, class_counts[i-1]))\n i += 1\n plt.imshow(image)\n plt.show()\n \n #Use with list\n '''\n for label in unique_labels:\n # Pick the first image for each label.\n # list.index(obj) returns the lowest index in list that obj appears\n image = images[labels.index(label)]\n plt.subplot(8, 8, i) # A grid of 8 rows x 8 columns\n plt.axis('off')\n plt.title(\"Label {0} ({1})\".format(label, labels.count(label)))\n i += 1\n plt.imshow(image)\n plt.show()\n '''", "title": "" }, { "docid": "5d2fa9d6681a66cec33ea9451f5cca98", "score": "0.56513274", "text": "def train(self, dataloader, epochs):\n losses = []\n\n pbar = tqdm()\n\n for epoch in range(1, epochs + 1):\n print(\"*\" * 10 + f\" Epoch {epoch}/{epochs} \" + \"*\" * 10)\n total_loss = 0\n\n pbar.reset(total=len(dataloader))\n for i, (img_feats, captions) in enumerate(dataloader):\n img_feats = img_feats.to(self.device)\n captions = captions.to(self.device)\n\n batch_loss, t_loss = self.train_step(img_feats, captions)\n total_loss += t_loss\n\n if self.wandb:\n average_batch_loss = batch_loss / captions.size(1)\n self.wandb.log({\"average_batch_loss\": average_batch_loss})\n\n pbar.update()\n\n epoch_loss = total_loss / len(dataloader)\n if self.wandb:\n result, attention_plot = self.eval_step()\n pred_sent = \" \".join(result)\n fig_ = plot_attention(\n self.sample_image, result, attention_plot, wandb=True\n )\n self.wandb.log(\n {\n \"epoch_loss\": epoch_loss,\n \"attention\": wandb.Image(fig_, caption=pred_sent),\n \"epoch\": epoch,\n }\n )\n\n losses.append(epoch_loss)\n print(f\"Total Loss: {epoch_loss:.6f}\")\n\n pbar.refresh()", "title": "" }, { "docid": "17a39973178f10595e7458034fd5db11", "score": "0.56410056", "text": "def show_result(x, y, pred, figsize=(20,20)):\n f, axarr = plt.subplots(1,3, figsize=figsize) # create visualizations\n axarr[0].imshow(x.permute(1,2,0)) # visualize image tensor\n axarr[0].set_title('Input')\n axarr[1].imshow(y.permute(1,2,0).squeeze(), cmap=plt.cm.gray) # visualize image tensor\n axarr[1].set_title('Target')\n axarr[2].imshow(pred[0].permute(1,2,0).squeeze(), cmap=plt.cm.gray) # visualize image tensor\n axarr[2].set_title('Prediction')", "title": "" }, { "docid": "2974960304fa5fd47f785e6d9d193949", "score": "0.5636933", "text": "def show_images(self, idxs, title):\n fig = plt.figure(figsize=(15, 3))\n \n fig_i = 1\n for i in idxs:\n img, label = self.image_dataset[i]\n img = img.numpy()\n ax = fig.add_subplot(1, len(idxs), fig_i)\n ax.axis(\"off\")\n fig.suptitle(title)\n imshow(img)\n fig_i += 1\n \n return fig", "title": "" }, { "docid": "878de244f5e7925cea3c3c619995e635", "score": "0.5628978", "text": "def show_batch(self, max_plot: int = 5):\n size = self.batch_size if self.batch_size <= max_plot else max_plot\n fig, ax = plt.subplots(1, size, figsize=(10, 10), sharey='all', sharex='all')\n ax[0].set(xticks=[], yticks=[])\n fig.tight_layout()\n for batch in self:\n\n for i, im in enumerate(batch['image'][:max_plot]):\n im = np.array(im.permute((1, 2, 0)).int())\n id = int(batch['label'][i])\n label = next(key for key, value in self.dataset.label_ids.items() if value == id)\n max_val = im.max(axis=(0,1)).reshape((1,1,3))\n min_val = im.min(axis=(0,1)).reshape((1,1,3))\n\n im = (im - min_val)/(max_val-min_val)\n ax[i].imshow(im)\n ax[i].set(title=label)\n break", "title": "" }, { "docid": "e8f5dfae0e1791d9c4bfedbd3f9699fe", "score": "0.56266075", "text": "def plot_examples(dataloaer, model, save_dir, save_file_name, class_colormap, device, mode=\"train\", batch_id=0, num_examples=4):\n \n # variable for legend\n category_and_rgb = [[category, (r,g,b)] for idx, (category, r, g, b) in enumerate(class_colormap.values)]\n legend_elements = [Patch(facecolor=webcolors.rgb_to_hex(rgb), \n edgecolor=webcolors.rgb_to_hex(rgb), \n label=category) for category, rgb in category_and_rgb]\n \n # test / validation set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n if (mode in ('train', 'val')):\n with torch.no_grad():\n for index, (imgs, masks, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n temp_masks = masks\n\n model.eval()\n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=3, figsize=(12, 4*num_examples), constrained_layout=True)\n fig.tight_layout()\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Groud Truth\n ax[row_num][1].imshow(label_to_color_image(masks[row_num].detach().cpu().numpy(), class_colormap))\n ax[row_num][1].set_title(f\"Groud Truth : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][2].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][2].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n \n # test set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n elif (mode in ('test')):\n with torch.no_grad():\n for index, (imgs, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n\n model.eval()\n \n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=2, figsize=(10, 4*num_examples), constrained_layout=True)\n\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][1].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][1].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][1].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n elif (mode in ('test_crf')):\n dense_crf = DenseCRF()\n with torch.no_grad():\n for index, (imgs, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n\n model.eval()\n \n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n\n # crf ์ถ”๊ฐ€\n crf_outs = list()\n for img, out in zip(imgs, outs):\n crf_prob = dense_crf(img,out)\n crf_outs.append(crf_prob)\n\n oms = torch.argmax(torch.cat(crf_outs, 0), dim=1).detach().cpu().numpy()\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=2, figsize=(10, 4*num_examples), constrained_layout=True)\n\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][1].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][1].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][1].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n elif (mode in ('train_crf', 'val_crf')):\n dense_crf = DenseCRF()\n with torch.no_grad():\n for index, (imgs, masks, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n temp_masks = masks\n\n model.eval()\n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n \n # crf ์ถ”๊ฐ€\n crf_outs = list()\n for img, out in zip(imgs, outs):\n crf_prob = dense_crf(img,out)\n crf_outs.append(crf_prob)\n\n oms = torch.argmax(torch.cat(crf_outs, 0), dim=1).detach().cpu().numpy()\n\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=3, figsize=(12, 4*num_examples), constrained_layout=True)\n fig.tight_layout()\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Groud Truth\n ax[row_num][1].imshow(label_to_color_image(masks[row_num].detach().cpu().numpy(), class_colormap))\n ax[row_num][1].set_title(f\"Groud Truth : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][2].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][2].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n \n # test set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n else:\n print('unknown mode')", "title": "" }, { "docid": "f743b4659fafb5b6ddcb8ea8884b0c1a", "score": "0.5609397", "text": "def show_images(images):\n img = images\n grid = utils.make_grid(img)\n plt.imshow(img_as_float(grid.numpy().transpose((1, 2, 0)))) \n plt.axis('off')\n plt.ioff()\n plt.show()", "title": "" }, { "docid": "9d9b6400402f31223038d763fb5c726f", "score": "0.56066567", "text": "def display_dataset_summary(dataset_labels: Dict[str, str]) -> None:\n dataset_rows = []\n for dataset_name, dataset_label in dataset_labels.items():\n dataset = DATASETS[dataset_name]()\n dataset_rows.append({\n 'name': dataset.name,\n 'label': dataset_label,\n 'Features': len(dataset.all_features),\n 'Classes': len(dataset.classes),\n 'Concepts': len(dataset.concepts),\n 'Train+Calibration instances per experiment': dataset.train_n,\n 'Test instances per experiment': dataset.test_n,\n 'GSLS Histogram Bins': GslsQuantifier.get_auto_hist_bins(\n calib_count=dataset.calib_n,\n target_count=dataset.test_n,\n ),\n })\n display(pd.DataFrame(dataset_rows).set_index('name'))", "title": "" }, { "docid": "91387009b69d58181444d270708637e6", "score": "0.5566741", "text": "def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):\n batch_ids = list(range(1, 6))\n\n if batch_id not in batch_ids:\n print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))\n return None\n\n features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)\n\n if not (0 <= sample_id < len(features)):\n print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))\n return None\n\n print('\\nStats of batch {}:'.format(batch_id))\n print('Samples: {}'.format(len(features)))\n print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))\n print('First 20 Labels: {}'.format(labels[:20]))\n\n sample_image = features[sample_id]\n sample_label = labels[sample_id]\n label_names = _load_label_names()\n\n print('\\nExample of Image {}:'.format(sample_id))\n print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))\n print('Image - Shape: {}'.format(sample_image.shape))\n print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))\n plt.axis('off')\n plt.imshow(sample_image)", "title": "" }, { "docid": "4ff9a114e30d0ffe6cd995b9525cc963", "score": "0.55520904", "text": "def show_real_images():\n X, y = load_mnist(PATH, is_train=False, return_as_iterator=False)\n y = np.argmax(y, axis=1) # turn one hot to single label\n final_indices = np.array([])\n for cls in range(10):\n indices = np.argwhere(y == cls).reshape(-1) # get all indices that fit the class\n selected_indices = np.random.choice(indices, 1, replace=False) # select 1 indice of class\n final_indices = np.concatenate([final_indices, selected_indices]) # append to final indices\n final_indices = final_indices.astype(int)\n X_, y_ = X[final_indices, :, :, :], y[final_indices] # select x,y with final indices for train\n\n for i in range(10):\n plt.subplot(2, 5, i+1)\n plt.imshow(X_[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n plt.title('real image class = {}'.format(i))\n\n plt.show()\n plt.savefig('./images/dcgan_generated_images_inference_mode/real_images.png')", "title": "" }, { "docid": "e77c5313048ba33a96791ab159530c88", "score": "0.5549886", "text": "def display_label_images(images, labels, label):\n limit = 24 # show a max of 24 images\n plt.figure(figsize=(15, 5))\n i = 1\n\n sign_classes, class_indices, class_counts = np.unique(labels, return_index = True, return_counts = True)\n start = class_indices[label-1]\n end = start + class_counts[label-1]\n random_indices = random.sample(range(start, end), limit)\n for index in random_indices[:][:limit]:\n image = images[index]\n plt.subplot(3, 8, i) # 3 rows, 8 per row\n plt.axis('off')\n i += 1\n plt.imshow(image)\n plt.show()", "title": "" }, { "docid": "d6501e902cf42bb3c6bcf3009e2ec1e1", "score": "0.5548181", "text": "def Image_features(trained_model, dataloader):\n\n combined_output = np.zeros((1, 1024))\n id_list = []\n\n for num, batch in enumerate(dataloader):\n if num % 25 == 0:\n print(\"Processing batch {} of {}\".format(num, len(dataloader)))\n batch_output = Model(trained_model.input, trained_model.layers[-2].output).predict(batch[0]) #32 x 1024\n combined_output = np.concatenate((combined_output, batch_output), axis=0)\n id_list.append(batch[1])\n\n\n combined_output = combined_output[1:, :]\n id_list = np.array(functools.reduce(operator.iconcat, id_list, []))\n\n out_matrix = np.concatenate((np.expand_dims(id_list, axis=1), combined_output), axis=1)\n\n return out_matrix", "title": "" }, { "docid": "b5999f071c00c51f46495596bb6eba10", "score": "0.5546855", "text": "def display(img1, img2, lbl1, lbl2, x, y, img3=[], lbl3=[], cmap=None, n = 2):\n plt.figure(figsize=(x, y))\n plt.subplot(1, n, 1)\n plt.imshow(img1, cmap = cmap)\n plt.xlabel(lbl1, fontsize=15)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(1, n, 2)\n plt.imshow(img2, cmap = cmap)\n plt.xlabel(lbl2, fontsize=15)\n plt.xticks([])\n plt.yticks([])\n if n == 3:\n plt.subplot(1, n, 3)\n plt.imshow(img3, cmap = cmap)\n plt.xlabel(lbl3, fontsize=15)\n plt.xticks([])\n plt.yticks([])\n plt.show()", "title": "" }, { "docid": "371bc7c77daa0807fe019e6d49758cfe", "score": "0.55465615", "text": "def generate_images(self, task: pl.LightningModule, trainer: pl.Trainer):\n if self.data.empty:\n n_imgs = 10\n else:\n n_imgs = len(self.data)\n figs = []\n captions = []\n for s in range(n_imgs):\n r = self.generate_image_set(s, task, trainer)\n figs.append(r[0])\n captions.append(r[1])\n if trainer.logger:\n trainer.logger.log_image(\n key=\"test/examples\",\n images=figs,\n caption=captions,\n step=task.images_shown,\n )", "title": "" }, { "docid": "ddc1bb6bf6f6f1a5880e33fe41b57a4d", "score": "0.55445576", "text": "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0],icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n img = scipy.misc.toimage( big_picture )\n plt.imshow(img,cmap = cm.Greys_r)", "title": "" }, { "docid": "a1c79a03341871cab91cf14066281dde", "score": "0.5543162", "text": "def plot_dataset_samples_imgs(\n dataset, n_plots=4, figsize=DFLT_FIGSIZE, ax=None, pad_value=1, seed=123, title=None\n):\n set_seed(seed)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n img_tensor = torch.stack(\n [dataset[random.randint(0, len(dataset) - 1)][0] for i in range(n_plots)], dim=0\n )\n grid = make_grid(img_tensor, nrow=2, pad_value=pad_value)\n\n ax.imshow(to_numpy(grid.permute(1, 2, 0)))\n ax.axis(\"off\")\n\n if title is not None:\n ax.set_title(title)", "title": "" }, { "docid": "5ca03542a77047a1616255dd9573bc28", "score": "0.55350226", "text": "def visualize_data(number, img_array, label_array):\n fig, ax = plt.subplots(nrows=10, ncols=10, sharex=True, sharey=True)\n ax = ax.flatten()\n for i in range(100):\n img = img_array[label_array==number][i].reshape(28,28)\n ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n plt.show()", "title": "" }, { "docid": "c21e2f2ccdbdbc26ea752283a4be71e4", "score": "0.5528805", "text": "def visualise(self, batch, output, mode):", "title": "" }, { "docid": "2494d4dca40be0a1970fe38616508151", "score": "0.5528288", "text": "def display_sample(observations):\n rgb_obs = observations[\"color_sensor\"]\n semantic_obs = observations[\"semantic_sensor\"]\n depth_obs = observations[\"depth_sensor\"]\n\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGBA\")\n \n semantic_img = Image.new(\"P\", (semantic_obs.shape[1], semantic_obs.shape[0]))\n semantic_img.putpalette(d3_40_colors_rgb.flatten())\n semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))\n semantic_img = semantic_img.convert(\"RGBA\")\n \n depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8), mode=\"L\")\n\n arr = [rgb_img, semantic_img, depth_img]\n titles = ['rgb', 'semantic', 'depth']\n plt.figure(figsize=(12 ,8))\n for i, data in enumerate(arr):\n ax = plt.subplot(1, 3, i+1)\n ax.axis('off')\n ax.set_title(titles[i])\n plt.imshow(data)\n plt.show()", "title": "" }, { "docid": "12e5c87086d73ccb5d3eec310b102046", "score": "0.55271804", "text": "def list_images(dataset, dataset_y, ylabel=\"\", cmap=None):\n #print('Listing called')\n plt.figure(figsize=(15, 16))\n for i in range(6):\n plt.subplot(1, 6, i+1)\n indx = random.randint(0, len(dataset))\n #Use gray scale color map if there is only one channel\n cmap = 'gray' if len(dataset[indx].shape) == 2 else cmap\n plt.imshow(dataset[indx], cmap = cmap)\n plt.xlabel(signs[dataset_y[indx]])\n plt.ylabel(ylabel)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout(pad=0, h_pad=0, w_pad=0)\n plt.show()", "title": "" }, { "docid": "859a23a25fc99f3e590f49d2ec7c70b2", "score": "0.5525721", "text": "def plot_dataset_samples_imgs(\n dataset, n_plots=4, figsize=DFLT_FIGSIZE, ax=None, pad_value=1, seed=123, title=None\n):\n set_seed(seed)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n img_tensor = torch.stack(\n [dataset[random.randint(0, len(dataset) - 1)][0] for i in range(n_plots)], dim=0\n )\n grid = make_grid(img_tensor, nrow=2, pad_value=pad_value)\n\n ax.imshow(grid.permute(1, 2, 0).numpy())\n ax.axis(\"off\")\n\n if title is not None:\n ax.set_title(title)", "title": "" }, { "docid": "bc0d146916f7a2c308bde08590ce3a1b", "score": "0.5513494", "text": "def visualize_dataset(tfrecord_dir, data_type):\n dataset = dg.dataset_from_tfrecord(tfrecord_dir, data_type, 10)\n iterator = dataset.make_one_shot_iterator()\n data = iterator.get_next()\n\n with tf.Session() as sess:\n start_time = time.time()\n data_value = sess.run(data)\n print(time.time() - start_time)\n\n image = data_value['image'][0]\n plt.figure()\n plt.imshow(image.astype(np.uint8))\n plt.grid(False)\n\n for i in range(const.QUESTIONS_PER_IMAGE):\n question = data_value['question'][0, i]\n seq_len = data_value['seq_len'][0, i]\n question = ' '.join(\n [const.INPUTVOCABULARY[question[j]] for j in range(seq_len)])\n print(question)\n print(const.OUTPUTVOCABULARY[data_value['answer'][0, i]])\n\n plt.show()", "title": "" }, { "docid": "bfa68c4921f383f7bbd20a378892fed1", "score": "0.5498958", "text": "def plot_examples_plus(dataloaer, model, save_dir, save_file_name, class_colormap, device, mode=\"train\", batch_id=0, num_examples=4):\n \n # variable for legend\n category_and_rgb = [[category, (r,g,b)] for idx, (category, r, g, b) in enumerate(class_colormap.values)]\n legend_elements = [Patch(facecolor=webcolors.rgb_to_hex(rgb), \n edgecolor=webcolors.rgb_to_hex(rgb), \n label=category) for category, rgb in category_and_rgb]\n \n # test / validation set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n if (mode in ('train', 'val')):\n with torch.no_grad():\n for index, (imgs, masks, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n temp_masks = masks\n\n model.eval()\n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=3, figsize=(12, 4*num_examples), constrained_layout=True)\n fig.tight_layout()\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Groud Truth\n ax[row_num][1].imshow(label_to_color_image(masks[row_num].detach().cpu().numpy(), class_colormap))\n ax[row_num][1].set_title(f\"Groud Truth : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][2].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][2].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n \n # test set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n elif (mode in ('test')):\n with torch.no_grad():\n for index, (imgs, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n\n model.eval()\n \n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=2, figsize=(10, 4*num_examples), constrained_layout=True)\n\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][1].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][1].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][1].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n elif (mode in ('test_crf')):\n dense_crf = DenseCRF()\n with torch.no_grad():\n for index, (imgs, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n\n model.eval()\n \n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n\n # crf ์ถ”๊ฐ€\n crf_outs = list()\n probs = F.softmax(outs, dim=1)\n\n for img, out in zip(imgs, probs):\n crf_prob = dense_crf(img,out)\n crf_outs.append(crf_prob)\n\n oms = torch.argmax(torch.cat(crf_outs, 0), dim=1).detach().cpu().numpy()\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=3, figsize=(15, 4*num_examples), constrained_layout=True)\n\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][1].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][1].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][1].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # ๊ณ‚์น˜๊ธฐ\n original_img = temp_images[row_num].permute([1,2,0])\n seg_map = label_to_color_image(oms[row_num], class_colormap)\n ax[row_num][2].imshow(0.7*original_img+0.3*seg_map/255)\n ax[row_num][2].set_title(f\"img : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n elif (mode in ('train_crf', 'val_crf')):\n dense_crf = DenseCRF()\n with torch.no_grad():\n for index, (imgs, masks, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n temp_masks = masks\n\n model.eval()\n # inference\n # outs = model(torch.stack(temp_images).to(device))['out']\n outs = model(torch.stack(temp_images).to(device))\n \n # crf ์ถ”๊ฐ€\n crf_outs = list()\n for img, out in zip(imgs, outs):\n crf_prob = dense_crf(img,out)\n crf_outs.append(crf_prob)\n\n oms = torch.argmax(torch.cat(crf_outs, 0), dim=1).detach().cpu().numpy()\n\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=4, figsize=(19, 4*num_examples), constrained_layout=True)\n fig.tight_layout()\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Groud Truth\n ax[row_num][1].imshow(label_to_color_image(masks[row_num].detach().cpu().numpy(), class_colormap))\n ax[row_num][1].set_title(f\"Groud Truth : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][2].imshow(label_to_color_image(oms[row_num], class_colormap))\n ax[row_num][2].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # ๊ณ‚์น˜๊ธฐ\n original_img = temp_images[row_num].permute([1,2,0])\n seg_map = label_to_color_image(oms[row_num], class_colormap)\n ax[row_num][3].imshow(0.7*original_img+0.3*seg_map/255)\n ax[row_num][3].set_title(f\"img : {image_infos[row_num]['file_name']}\")\n ax[row_num][3].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n # plt.show()\n plt.savefig(os.path.join(save_dir, save_file_name))\n \n # test set์— ๋Œ€ํ•œ ์‹œ๊ฐํ™”\n else:\n print('unknown mode')", "title": "" }, { "docid": "1d38545934ac0bde788b968f722bc3b7", "score": "0.54931414", "text": "def display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()", "title": "" }, { "docid": "4ada7cbc08954e4c7211477bca32d890", "score": "0.54920197", "text": "def visualize_data_augmentations(cfg, show_images):\n dirname = os.path.join(cfg.OUTPUT_DIR, \"augmentations\")\n os.makedirs(dirname, exist_ok=True)\\\n\n train_data_loader = build_detection_train_loader(cfg, mapper=custom_mapper)\n metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n scale = 1.0\n\n for batch in train_data_loader:\n for per_image in batch:\n # Pytorch tensor is in (C, H, W) format\n img = per_image[\"image\"].permute(1, 2, 0).cpu().detach().numpy()\n img = detection_utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)\n v = Visualizer(img, metadata=metadata, scale=scale)\n target_fields = per_image[\"instances\"].get_fields()\n labels = [metadata.thing_classes[i] for i in target_fields[\"gt_classes\"]]\n vis = v.overlay_instances(\n labels=labels,\n boxes=target_fields.get(\"gt_boxes\", None),\n masks=target_fields.get(\"gt_masks\", None),\n keypoints=target_fields.get(\"gt_keypoints\", None),\n )\n output(vis, str(per_image[\"image_id\"]) + \".jpg\", dirname, show_images)", "title": "" }, { "docid": "e7112cfb8ccfa562c63ab0a01ca581f4", "score": "0.5460943", "text": "def dataloader(data_dir,image_size = (224, 224), batch_size = 32, ):\n\n# TODO: Define your transforms for the training, validation, and testing sets\n \n data_train_transforms = transforms.Compose([transforms.Resize(image_size),\n transforms.RandomRotation(20),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))\n ])\n\n\n data_valid_transforms = transforms.Compose([transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))\n ])\n\n# TODO: Load the datasets with ImageFolder\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n image_datasets = {}\n image_datasets['train'] = datasets.ImageFolder(train_dir, data_train_transforms)\n image_datasets['valid'] = datasets.ImageFolder(valid_dir, data_valid_transforms) \n image_datasets['test'] = datasets.ImageFolder(test_dir, data_valid_transforms)\n \n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n\n dataloaders = {}\n dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, shuffle=True, num_workers = 0)\n dataloaders['valid'] = torch.utils.data.DataLoader(image_datasets['valid'], batch_size=batch_size, shuffle=True,num_workers = 0)\n dataloaders['test'] = torch.utils.data.DataLoader(image_datasets['test'], batch_size=batch_size, shuffle=False,num_workers = 0)\n \n class_names = image_datasets['train'].classes\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}\n class_names = image_datasets['train'].classes\n return dataloaders,class_names,dataset_sizes, image_datasets,batch_size,image_size", "title": "" }, { "docid": "d9cc2b1c79bd93a5f1bee3e93e3e1fb9", "score": "0.54482955", "text": "def ShowImageList(imglist, rows=1):\n column = math.ceil(len(imglist) / rows)\n fig = plt.figure()\n for i in range(rows):\n for j in range(column):\n index = column*i + (j+1)\n plt.subplot(rows, column, index)\n if index <= len(imglist):\n plt.title(\"Image:\" + str(index))\n plt.imshow(imglist[index-1])\n plt.show()", "title": "" }, { "docid": "04f7a0047363018a5551c59f04ee2c7b", "score": "0.5441438", "text": "def show_generated_samples(self, label, save: bool):\n\n input_noise = torch.randn(100, self.latent_vector_size, 1, 1, device=self.device)\n\n with torch.no_grad():\n # visualize the generated images\n generated = self.model_g(input_noise, label).cpu()\n\n generated = make_grid(self.denorm(generated)[:100], nrow=10, padding=2, normalize=False,\n range=None, scale_each=False, pad_value=0)\n\n plt.figure(figsize=(15, 15))\n\n if save:\n save_image(generated, 'img/final_generated_samples.png')\n\n show_img(generated, \"generated samples\")\n\n # visualize the original images of the last batch of the test set for comparison\n it = iter(self.loader_test)\n sample_inputs, _ = next(it)\n fixed_input = sample_inputs[0:64, :, :, :]\n\n img = make_grid(self.denorm(fixed_input), nrow=8, padding=2, normalize=False,\n range=None, scale_each=False, pad_value=0)\n plt.figure(figsize=(15, 15))\n\n show_img(img, \"original samples\")", "title": "" }, { "docid": "dad3166dbead9f83b9323c63f90a5a5c", "score": "0.5421222", "text": "def get_target_dataloader(dataset, batch_size, n_threads, data_path='', image_size=224, data_aug='default', logger=None):\n\n logger.info(\"|===>Get datalaoder for \" + dataset)\n\n # setting\n crop_size = {299: 320, 224: 256}\n resize = crop_size[image_size]\n logger.info(\"image_size={}, resize={}\".format(image_size, resize))\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n if data_aug == 'default':\n # torchvision.set_image_backend('accimage')\n # logger.info('torchvision.set_image_backend(\\'accimage\\')')\n logger.info('data_aug = {} !!!'.format(data_aug))\n train_transform = transforms.Compose([\n transforms.Resize((resize, resize)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(image_size),\n transforms.ToTensor(),\n normalize])\n val_transform = transforms.Compose([\n transforms.Resize((resize, resize)),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize])\n\n elif data_aug == 'improved':\n logger.info('data_aug = {} !!!'.format(data_aug))\n train_transform = transforms.Compose([\n transforms.Resize(resize),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(resize), # important\n transforms.RandomCrop(image_size),\n transforms.ToTensor(),\n normalize])\n\n val_transform = transforms.Compose([\n transforms.Resize(resize),\n transforms.TenCrop(image_size),\n transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops]))\n ])\n\n else:\n assert False, logger.info(\"invalid data_aug={}\".format(data_aug))\n\n # data root\n if dataset in ['MIT_Indoors_67', 'Stanford_Dogs', 'Caltech_256-30', 'Caltech_256-10', 'Caltech_256-20',\n 'Caltech_256-60', 'Caltech_256-40', 'CUB-200-2011', 'Food-101']:\n data_root = os.path.join(data_path, dataset)\n else:\n assert False, logger.info(\"invalid dataset={}\".format(dataset))\n logger.info('{} path = {}'.format(dataset, data_root))\n\n # datset\n train_dataset = datasets.ImageFolder(root=os.path.join(data_root, 'train'), transform=train_transform)\n val_dataset = datasets.ImageFolder(root=os.path.join(data_root, 'test'), transform=val_transform)\n class_num = len(train_dataset.classes)\n train_dataset_sizes = len(train_dataset)\n val_dataset_sizes = len(val_dataset)\n\n # dataloader\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=n_threads)\n\n if data_aug == 'improved':\n batch_size = int(batch_size / 4)\n logger.info('{}: batch_size = batch_size / 4 = {}'.format(data_aug, batch_size))\n\n val_loader = DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=n_threads)\n\n logger.info(\"train and val loader are ready! class_num={}\".format(class_num))\n logger.info(\"train_dataset_sizes={}, val_dataset_sizes={}\".format(train_dataset_sizes, val_dataset_sizes))\n return train_loader, val_loader, class_num, train_dataset_sizes", "title": "" }, { "docid": "787c4e42251dfdcd46b1fd2e11b27561", "score": "0.5419149", "text": "def show_img(img: np.ndarray):\n if len(img.shape) == 2:\n plt.figure()\n plt.imshow(img, cmap=plt.cm.gray)\n plt.show()\n elif len(img.shape) == 3:\n plt.figure()\n plt.imshow(img)\n plt.show()\n else:\n print(\"Type not supported\")", "title": "" }, { "docid": "1f99d859e7e11b31a3e45f57dc712c2a", "score": "0.5417402", "text": "def _info(self) -> tfds.core.DatasetInfo:\n\n h = self.builder_config.height\n w = self.builder_config.width\n s = self.builder_config.num_frames\n\n def get_movi_e_instance_features(seq_length: int):\n features = get_instance_features(seq_length)\n features.update({\n \"asset_id\": tfds.features.Text(),\n \"category\": tfds.features.ClassLabel(\n names=[\"Action Figures\", \"Bag\", \"Board Games\",\n \"Bottles and Cans and Cups\", \"Camera\",\n \"Car Seat\", \"Consumer Goods\", \"Hat\",\n \"Headphones\", \"Keyboard\", \"Legos\",\n \"Media Cases\", \"Mouse\", \"None\", \"Shoe\",\n \"Stuffed Toys\", \"Toys\"]),\n \"scale\": tf.float32,\n \"is_dynamic\": tf.bool,\n })\n return features\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"metadata\": {\n \"video_name\": tfds.features.Text(),\n \"width\": tf.int32,\n \"height\": tf.int32,\n \"num_frames\": tf.int32,\n \"num_instances\": tf.uint16,\n\n \"depth_range\": tfds.features.Tensor(shape=(2,),\n dtype=tf.float32),\n \"forward_flow_range\": tfds.features.Tensor(shape=(2,),\n dtype=tf.float32),\n \"backward_flow_range\": tfds.features.Tensor(shape=(2,),\n dtype=tf.float32),\n },\n \"background\": tfds.features.Text(),\n \"instances\": tfds.features.Sequence(\n feature=get_movi_e_instance_features(seq_length=s)),\n \"camera\": get_camera_features(s),\n \"events\": get_events_features(),\n # -----\n \"video\": tfds.features.Video(shape=(s, h, w, 3)),\n \"segmentations\": tfds.features.Sequence(\n tfds.features.Image(shape=(h, w, 1), dtype=tf.uint8),\n length=s),\n \"forward_flow\": tfds.features.Sequence(\n tfds.features.Tensor(shape=(h, w, 2), dtype=tf.uint16),\n length=s),\n \"backward_flow\": tfds.features.Sequence(\n tfds.features.Tensor(shape=(h, w, 2), dtype=tf.uint16),\n length=s),\n \"depth\": tfds.features.Sequence(\n tfds.features.Image(shape=(h, w, 1), dtype=tf.uint16),\n length=s),\n \"normal\": tfds.features.Video(shape=(s, h, w, 3), dtype=tf.uint16),\n \"object_coordinates\": tfds.features.Video(shape=(s, h, w, 3),\n dtype=tf.uint16),\n }),\n supervised_keys=None,\n homepage=\"https://github.com/google-research/kubric\",\n citation=_CITATION)", "title": "" }, { "docid": "b06024ac00e6029f2f3dbe84734731d6", "score": "0.54102695", "text": "def show_images(images, rows=1, titles=None):\n assert ((titles is None) or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]\n figure = plt.figure()\n cols = np.ceil(n_images / float(rows))\n ax0 = None\n for indx, (image, title) in enumerate(zip(images, titles)):\n axes = figure.add_subplot(rows, cols, indx+1, sharey=ax0)\n if indx == 0:\n ax0 = axes\n else:\n plt.setp(axes.get_yticklabels(), visible=False)\n plt.imshow(image)\n axes.set_title(title)\n # figure.set_size_inches(np.array(figure.get_size_inches()) * n_images)\n plt.show()", "title": "" }, { "docid": "8951065d58c2fdc17601ab05bfc6cd79", "score": "0.5410163", "text": "def __init__(self, outputs, targets, images, sq_num):\n\n self.outputs = [np.argmax(output) for output in outputs]\n self.targets = targets\n self.images = images\n self.sq_num = sq_num\n\n self.pic_size = sqrt(self.sq_num)\n\n plt.figure(figsize=(10, 10))\n\n for i in range(sq_num):\n ax = plt.subplot(self.pic_size, self.pic_size, i + 1)\n plt.imshow(images[i])\n plt.title(\"T: \" + str(self.targets[i]) + \" O: \" + str(self.outputs[i]))\n plt.axis(\"off\")\n\n plt.show()", "title": "" }, { "docid": "7405c52eebcf4dd53557a087fa25635c", "score": "0.53944665", "text": "def showImages(images, cols=4, rows=5, figsize=(15, 10), cmap=None):\n imgLength = len(images)\n fig, axes = plt.subplots(rows, cols, figsize=figsize)\n indexes = range(cols * rows)\n for ax, index in zip(axes.flat, indexes):\n if index < imgLength:\n imagePathName, image = images[index]\n if cmap == None:\n ax.imshow(image)\n else:\n ax.imshow(image, cmap=cmap)\n ax.set_title(imagePathName)\n ax.axis('off')\n fig.show()", "title": "" }, { "docid": "714bbfc44c2ec4bd415817021964fc83", "score": "0.5390346", "text": "def show_images(images, cols=1, titles=None, axis=False, interpolation='gaussian'):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n\n if titles is None:\n titles = ['Image (%d)' % i for i in range(1, n_images + 1)]\n fig = plt.figure()\n\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if not axis:\n plt.axis(\"off\")\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image, interpolation=interpolation)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "title": "" }, { "docid": "46967d33ba62f5448c9d1bb5a8ee0ac9", "score": "0.538756", "text": "def show_images(list_of_images):", "title": "" }, { "docid": "7afdf0b8f631822757970f99bb0a4f06", "score": "0.5386653", "text": "def display(self, path):\r\n labels = np.fromfile(f'{path}/label.bin', dtype='uint8')\r\n data = load_data(f'{path}/data.bin')\r\n for img, label in zip(data, labels):\r\n title = 'yes TFL' if label % 2 else 'no TFL'\r\n self.show_image(img, title)", "title": "" }, { "docid": "e3f29c45e8db66b5059b0c896124cb41", "score": "0.5379086", "text": "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n if np.shape(image)[0] == 2:\n plt.imshow(image[1, :, :])\n else:\n plt.imshow(image)\n plt.show()", "title": "" }, { "docid": "a6a16aa083a48ec53c68c648d13a7f35", "score": "0.53769624", "text": "def list_images(dataset, dataset_y, ylabel=\"\", cmap=None):\n plt.figure(figsize=(15, 16))\n for i in range(6):\n plt.subplot(1, 6, i+1)\n indx = random.randint(0, len(dataset))\n #Use gray scale color map if there is only one channel\n cmap = 'gray' if len(dataset[indx].shape) == 2 else cmap\n plt.imshow(dataset[indx], cmap = cmap)\n plt.xlabel(signs[dataset_y[indx]])\n plt.ylabel(ylabel)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout(pad=0, h_pad=0, w_pad=0)\n plt.show()", "title": "" }, { "docid": "9e247e5f63396435859a1c7c286fba37", "score": "0.5356864", "text": "def plot_images_and_pred(self, n_fig_width = None,\n n_fig_height = None):\n if n_fig_width != None:\n self.n_fig_width = n_fig_width\n if n_fig_height != None:\n self.n_fig_height = n_fig_height\n self.n_fig = self.n_fig_height * self.n_fig_width\n\n plt.figure(figsize=(2*n_fig_height, 2*n_fig_width))\n k = 0\n for (image_batch, label_batch) in self.target_dataset:\n for (image, label) in zip(image_batch, label_batch):\n ax = plt.subplot(self.n_fig_width, self.n_fig_height, k+1)\n plt.imshow(image)\n label_class = int(label)\n if label_class == self.preds_argmax[k]:\n plt.setp(ax.spines.values(), linewidth = 2, color=\"green\")\n else:\n plt.setp(ax.spines.values(), linewidth = 2, color=\"red\")\n plt.title(\"True: \" + str(label_class) +\n \"\\n Pred: \" + str(self.preds_argmax[k]) +\n \" [\" + str(round(float(self.preds[k][label_class]),2)) + \"]\")\n #plt.axis(\"off\")\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n k += 1\n if (k >= self.n_fig):\n break\n if (k >= self.n_fig):\n break\n plt.tight_layout(pad=1.5)\n plt.show()", "title": "" }, { "docid": "71ed0f92e05bdae33e59e8e632aa498e", "score": "0.5350566", "text": "def ShowImage(rows=1, *args):\n column = math.ceil(len(args) / rows)\n fig = plt.figure()\n for i in range(rows):\n for j in range(column):\n index = column*i + (j+1)\n plt.subplot(rows, column, index)\n if index <= len(args):\n plt.title(\"Image:\" + str(index))\n plt.imshow(args[index-1])\n plt.show()", "title": "" }, { "docid": "0c0ff5b9a00eeff68150a2282f3f1e6f", "score": "0.5347136", "text": "def display_image():\n from dataset_prepare import CocoMetadata, CocoPose\n from pycocotools.coco import COCO\n from os.path import join\n from dataset import _parse_function\n\n BASE_PATH = \"~/data/ai_challenger\"\n\n import os\n # os.chdir(\"..\")\n\n ANNO = COCO(\n join(BASE_PATH, \"ai_challenger_valid.json\")\n )\n train_imgIds = ANNO.getImgIds()\n\n img, heat = _parse_function(train_imgIds[100], ANNO)\n\n CocoPose.display_image(img, heat, pred_heat=heat, as_numpy=False)\n\n from PIL import Image\n for _ in range(heat.shape[2]):\n data = CocoPose.display_image(img, heat, pred_heat=heat[:, :, _:(_ + 1)], as_numpy=True)\n im = Image.fromarray(data)\n im.save(\"test_heatmap/heat_%d.jpg\" % _)", "title": "" }, { "docid": "b6f638120d0fc3069893fc9c333e7b26", "score": "0.5337284", "text": "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(galsim_hsc): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=['no', 'yes']),\n }),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('image', 'label'), # Set to `None` to disable\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "title": "" }, { "docid": "dddb333b648b514295dc31f08ba507c9", "score": "0.53247696", "text": "def show_image_set(images, title=\"\"):\n n_images = images.shape[0]\n ncols = int(np.ceil(np.sqrt(1.0 * n_images)))\n nrows = int(np.ceil(n_images/(1.0*ncols)))\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True)\n for nrow in range(0, nrows):\n for ncol in range(0, ncols):\n n = ncol + nrow*ncols\n if n <= n_images - 1:\n axes[nrow, ncol].imshow(images[n, :, :], cmap=cm.gray)\n axes[nrow, ncol].set_title('{:s} {:n}'.format(title, n+1))\n plt.show()", "title": "" }, { "docid": "127feb5b9a9e9c5b1e7876fc7297cda5", "score": "0.53195566", "text": "def demo(args):\n\n args.load_model = 'squeezedet_kitti_epoch280.pth'\n args.gpus = [-1]\n args.debug = 2 # visualize detection boxes\n # vs = VideoStream(src=0).start()\n # frame = vs.read()\n dataset = KITTI('val', args)\n args = Args().update_dataset_info(args, dataset)\n\n preprocess_func = dataset.preprocess\n# del frame\n\n # prepare the model and detector\n model = SqueezeDet(args)\n model = load_model(model, args.load_model)\n detector = Detector(model.to(args.device), args)\n\n # prepare images\n sample_images_dir = '../data/kitti/samples'\n sample_image_paths = glob.glob(os.path.join(sample_images_dir, '*.png'))\n\n # detection\n for path in tqdm.tqdm(sample_image_paths):\n image = skimage.io.imread(path).astype(np.float32)\n image_meta = {'image_id': os.path.basename(path)[:-4],\n 'orig_size': np.array(image.shape, dtype=np.int32)}\n\n image, image_meta, _ = preprocess_func(image, image_meta)\n image = torch.from_numpy(image.transpose(2,0,1)).unsqueeze(0).to(args.device)\n image_meta = {k: torch.from_numpy(v).unsqueeze(0).to(args.device) if isinstance(v, np.ndarray)\n else [v] for k, v in image_meta.items()}\n\n inp = {'image': image,\n 'image_meta': image_meta}\n\n _ = detector.detect(inp)", "title": "" }, { "docid": "5dbfda187f365a0239afb7190823473d", "score": "0.531796", "text": "def process_dataset(self):\n\n print('\\n')\n print('='*40)\n print('=\\t DeepRank Data Set')\n print('=')\n print('=\\t Training data' )\n for f in self.train_database:\n print('=\\t ->',f)\n print('=')\n if self.test_database is not None:\n print('=\\t Test data' )\n for f in self.test_database:\n print('=\\t ->',f)\n print('=')\n print('='*40,'\\n')\n sys.stdout.flush()\n\n\n # check if the files are ok\n self.train_database = self.check_hdf5_files(self.train_database)\n\n if self.valid_database is not None:\n self.valid_database = self.check_hdf5_files(self.valid_database)\n\n if self.test_database is not None:\n self.test_database = self.check_hdf5_files(self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n self.get_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get the target ordering\n #self._get_target_ordering()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets:\n self.get_norm()\n\n\n print('\\n')\n print(\" Data Set Info\")\n print(' Training set : %d conformations' %self.ntrain)\n print(' Validation set : %d conformations' %self.nvalid)\n print(' Test set : %d conformations' %(self.ntest))\n print(' Number of channels : %d' %self.input_shape[0])\n print(' Grid Size : %d x %d x %d' %(self.data_shape[1],self.data_shape[2],self.data_shape[3]))\n sys.stdout.flush()", "title": "" }, { "docid": "dec91ebf9456504c3366c7eeb8da00df", "score": "0.5314662", "text": "def show_sample(self):\n if len(self.images) != 2:\n logger.debug(\"Ping Pong training - Only one side trained. Aborting preview\")\n return None\n logger.debug(\"Showing sample\")\n feeds = dict()\n figures = dict()\n headers = dict()\n for side, samples in self.images.items():\n faces = samples[1]\n if self._model.input_shape[0] / faces.shape[1] != 1.0:\n feeds[side] = self._resize_sample(side, faces, self._model.input_shape[0])\n feeds[side] = feeds[side].reshape((-1, ) + self._model.input_shape)\n else:\n feeds[side] = faces\n if self._use_mask:\n mask = samples[-1]\n feeds[side] = [feeds[side], mask]\n\n preds = self._get_predictions(feeds[\"a\"], feeds[\"b\"])\n\n for side, samples in self.images.items():\n other_side = \"a\" if side == \"b\" else \"b\"\n predictions = [preds[\"{0}_{0}\".format(side)],\n preds[\"{}_{}\".format(other_side, side)]]\n display = self._to_full_frame(side, samples, predictions)\n headers[side] = self._get_headers(side, display[0].shape[1])\n figures[side] = np.stack([display[0], display[1], display[2], ], axis=1)\n if self.images[side][0].shape[0] % 2 == 1:\n figures[side] = np.concatenate([figures[side],\n np.expand_dims(figures[side][0], 0)])\n\n width = 4\n side_cols = width // 2\n if side_cols != 1:\n headers = self._duplicate_headers(headers, side_cols)\n\n header = np.concatenate([headers[\"a\"], headers[\"b\"]], axis=1)\n figure = np.concatenate([figures[\"a\"], figures[\"b\"]], axis=0)\n height = int(figure.shape[0] / width)\n figure = figure.reshape((width, height) + figure.shape[1:])\n figure = _stack_images(figure)\n figure = np.concatenate((header, figure), axis=0)\n\n logger.debug(\"Compiled sample\")\n return np.clip(figure * 255, 0, 255).astype('uint8')", "title": "" }, { "docid": "1254497a84f935048e1cc6a94a303162", "score": "0.53055227", "text": "def show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "title": "" }, { "docid": "1254497a84f935048e1cc6a94a303162", "score": "0.53055227", "text": "def show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "title": "" }, { "docid": "a3e6b641de1b9c824cad265456bd7763", "score": "0.53045225", "text": "def __init__(self, data_loader, args):\n\n self.logger = logging.getLogger(__name__)\n logging.getLogger(\"PIL.PngImagePlugin\").setLevel(logging.WARNING)\n\n self.args = args\n\n # glimpse network params\n self.patch_size = args.MODEL.GLIMPSE.PATCH_SIZE\n self.glimpse_scale = args.MODEL.GLIMPSE.SCALE\n self.num_patches = args.MODEL.GLIMPSE.NUM\n self.loc_hidden = args.MODEL.GLIMPSE.LOC\n self.glimpse_hidden = args.MODEL.GLIMPSE.GLIMPSE\n\n # core network params\n self.num_glimpses = args.MODEL.CORE.NUM\n self.hidden_size = args.MODEL.CORE.HIDDEN\n\n # latent network params\n self.latent_size = args.MODEL.LATENT.HIDDEN\n\n # decoder network params\n self.dec_size = args.MODEL.DECODER.HIDDEN\n\n # data params\n if args.TRAIN.IS_TRAIN:\n self.train_loader = data_loader[0]\n self.valid_loader = data_loader[1]\n self.num_train = args.TRAIN.NUM\n self.num_valid = len(self.valid_loader.dataset)\n else:\n self.test_loader = data_loader\n self.num_test = len(self.test_loader.dataset)\n\n self.gray = args.PRE_PROCESSING.GRAY\n num_channel = 2 * (not self.gray) + 1 # if gray ==1 else ==3\n self.im_size = (\n num_channel,\n args.PRE_PROCESSING.RESIZE,\n args.PRE_PROCESSING.RESIZE,\n )\n\n if args.CLASSIFY:\n self.classify = True\n self.num_classes = 10 if args.TARGET < 0 else 1\n self.target_class = args.TARGET\n else:\n self.classify = False\n self.num_classes = 0\n self.num_digits = 10\n\n # training params\n self.epochs = args.TRAIN.EPOCHS\n self.start_epoch = 0\n self.momentum = args.TRAIN.MOMENTUM\n self.lr = args.TRAIN.INIT_LR\n self.is_test = not args.TRAIN.IS_TRAIN\n self.decay = args.TRAIN.WEIGHT_DECAY\n\n # misc params\n self.use_gpu = args.GPU\n self.ckpt_dir = Path(args.CKPT_DIR)\n self.logs_dir = Path(args.TENSORBOARD_DIR)\n self.best_valid_loss = np.inf\n self.counter = 0\n self.lr_patience = args.TRAIN.LR_PATIENCE\n self.train_patience = args.TRAIN.TRAIN_PATIENCE\n self.use_tensorboard = args.USE_TENSORBOARD\n self.resume = args.RESUME\n self.name = args.NAME\n self.model_name = f\"model_{self.num_glimpses}_{self.patch_size}x{self.patch_size}_{self.glimpse_scale}_{self.name}\"\n self.use_amax = args.AMAX\n self.samples = args.FORWARD\n self.klf = args.TRAIN.KL_FACTOR\n self.clf = args.TRAIN.CL_FACTOR\n self.frac = args.TRAIN.FRAC_LABELS\n self.batch_size = None\n\n self.plot_dir = Path(\"./plots/\") / self.name\n self.file_dir = Path(\"./files/\") / self.name\n if not self.plot_dir.exists():\n self.plot_dir.mkdir(parents=True)\n self.file_dir.mkdir(parents=True)\n\n if not args.TRAIN.IS_TRAIN:\n self.num_glimpses = args.TEST.NUM\n\n # tensorboard logging\n if self.use_tensorboard:\n self.tensorboard_dir = self.logs_dir / self.model_name\n self.logger.info(f\"[*] Saving tensorboard logs to {self.tensorboard_dir}\")\n if not self.tensorboard_dir.exists():\n self.tensorboard_dir.mkdir(parents=True)\n else:\n if self.resume or self.is_test:\n pass\n else:\n for x in self.tensorboard_dir.iterdir():\n if not x.is_dir():\n x.unlink()\n self.writer = SummaryWriter(self.tensorboard_dir)\n\n # How many images to save in tensorboard\n self.num_save = 4\n\n self.logger.debug(\"Create model\")\n # build model\n self.model = VariationalPredictiveAttention(\n self.patch_size,\n self.num_patches,\n self.glimpse_scale,\n self.im_size,\n self.glimpse_hidden,\n self.loc_hidden,\n self.hidden_size,\n self.latent_size,\n self.dec_size,\n self.num_classes,\n args.BIAS,\n args.MODEL.GLIMPSE.SAMPLE,\n args.MODEL.ADD_LOC,\n )\n self.logger.debug(\"Model created\")\n\n if self.use_gpu:\n self.model.cuda()\n\n self.logger.info(\n f\"[*] Number of model parameters: {sum([p.data.nelement() for p in self.model.parameters()]):,}\"\n )\n\n self.logger.info(\"Model:\")\n self.logger.info(self.model)\n\n # initialize optimizer and scheduler\n if args.TRAIN.OPTIM == \"sgd\":\n self.optimizer = optim.SGD(\n filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=self.lr,\n momentum=self.momentum,\n nesterov=True,\n weight_decay=self.decay,\n )\n self.schedule = True\n elif args.TRAIN.OPTIM == \"adam\":\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.lr,\n weight_decay=self.decay,\n amsgrad=True,\n )\n self.schedule = False\n elif args.TRAIN.OPTIM == \"rmsprop\":\n self.optimizer = optim.RMSprop(\n filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=self.lr,\n weight_decay=self.decay,\n )\n self.schedule = True\n\n steps = args.TRAIN.SCHEDULE\n self.scheduler = MultiStepLR(self.optimizer, steps, gamma=0.7)\n\n if args.TRAIN.LOSS == \"mse\":\n self.rec_crit = nn.MSELoss()\n elif args.TRAIN.LOSS == \"bce\":\n self.rec_crit = nn.BCELoss()\n self.criterion = vae_loss", "title": "" }, { "docid": "ef1bee9b47fefdbfa83dc85fbb529f32", "score": "0.530224", "text": "def display_performance(model, device, data_loader):\n model.eval()\n fig = plt.figure(figsize=(20,40))\n cols = 4\n rows = 6\n correct = 0\n\n covid_correct_true = 0\n covid_true = 0\n covid_predicted_true = 0\n\n idx = 1\n classes = {0: 'normal', 1:'non-covid', 2:'covid'}\n with torch.no_grad():\n for data, target in data_loader:\n data, target = data.to(device), target.to(device)\n target = torch.argmax(target, dim=1).long()\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n # recall for covid\n is_covid = (target == 2).item()\n pred_covid = (pred == 2).item()\n if is_covid:\n covid_true += 1\n if pred_covid:\n covid_correct_true += 1\n if pred_covid:\n covid_predicted_true += 1\n\n\n fig.add_subplot(rows, cols, idx)\n idx+=1\n target = target.item()\n pred = pred.item()\n plt.title(\"Ground truth label: {}\\nPredicted label: {}\".format(classes[target], classes[pred]))\n img = data[0].cpu().squeeze(0)\n plt.imshow(img)\n\n acc = 100. * correct / len(data_loader.dataset)\n\n if covid_true == 0:\n covid_recall = 0\n else:\n covid_recall = 100. * covid_correct_true / covid_true\n if covid_predicted_true == 0:\n covid_precision = 0\n else:\n covid_precision = 100. *covid_correct_true / covid_predicted_true\n if covid_recall ==0 and covid_recall==0:\n covid_f1 = 0\n else:\n covid_f1 = 2 * covid_precision * covid_recall / (covid_precision + covid_recall)\n plt.suptitle(\"Validation set pictures with predicted and ground truth labels\\nAverage accuracy {}/{} = {:.1f}%\\nF1 score for COVID class = {:.1f}%\".format(\n correct,\n len(data_loader.dataset),\n acc,\n covid_f1),\n fontsize=30\n )\n plt.show()", "title": "" }, { "docid": "a96ab3c821fd27d52da9f111dd6e4c26", "score": "0.53002423", "text": "def get_n_data(dataloader):\n\n if isinstance(dataloader.dataset, datasets.ImageFolder):\n return len(dataloader.dataset.imgs)\n else:\n return dataloader.dataset.data.shape[0]", "title": "" }, { "docid": "1ec1fefb06aaa6b68ebe34a223290e94", "score": "0.5291997", "text": "def show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(3*fig.get_size_inches()) * n_images)\n plt.show()", "title": "" }, { "docid": "087aba8f9cd1938facd0169fd6d0594f", "score": "0.5287667", "text": "def show(self, title=None, size=(5,5), fignum=21, show_map=True):\n image = self.render()\n \n if show_map:\n shared_map = self.render(self.agents_map)\n image = np.concatenate((image, shared_map), axis=1)\n \n plt.figure(num=fignum,figsize=size);\n plt.imshow(image);\n\n if title is not None:\n plt.title(title);", "title": "" }, { "docid": "61df867e5c2569494bd8fd0196c8ab01", "score": "0.5283936", "text": "def visualize_imaging(self, dataset: aa.Imaging):\r\n\r\n def should_plot(name):\r\n return plot_setting(section=[\"dataset\", \"imaging\"], name=name)\r\n\r\n mat_plot_2d = self.mat_plot_2d_from(subfolders=\"dataset\")\r\n\r\n dataset_plotter = aplt.ImagingPlotter(\r\n dataset=dataset, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d\r\n )\r\n\r\n dataset_plotter.figures_2d(\r\n data=should_plot(\"data\"),\r\n noise_map=should_plot(\"noise_map\"),\r\n psf=should_plot(\"psf\"),\r\n signal_to_noise_map=should_plot(\"signal_to_noise_map\"),\r\n )\r\n\r\n if should_plot(\"subplot_dataset\"):\r\n dataset_plotter.subplot_dataset()", "title": "" }, { "docid": "df0688e747c09762ecf86fcd542720cb", "score": "0.5277251", "text": "def show_images(self, images, titles=None):\n\n import matplotlib.pyplot as plt\n\n # Display a list of images\n n_ims = len(images)\n if titles is None: titles = ['(%d)' % i for i in range(1, n_ims + 1)]\n fig = plt.figure()\n n = 1\n for image, title in zip(images, titles):\n a = fig.add_subplot(1, n_ims, n) # Make subplot\n # if image.ndim == 2: # Is image grayscale?\n # plt.gray() # Only place in this blog you can't replace 'gray' with 'grey'\n plt.imshow(image)\n a.set_title(title)\n n += 1\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_ims)\n plt.show()", "title": "" }, { "docid": "f7035ba2ce45a9b19e68d42999a57b12", "score": "0.52764463", "text": "def plot_images(img, labels, nrows, ncols):\n fig, axes = plt.subplots(nrows, ncols)\n for i, ax in enumerate(axes.flat):\n if img[i].shape == (32, 32, 3):\n ax.imshow(img[i])\n else:\n ax.imshow(img[i, :, :, 0], cmap=plt.get_cmap('gray'))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title(labels[i])", "title": "" }, { "docid": "b91012c6285005500adc56769a39f446", "score": "0.52754986", "text": "def show_pic(self, team: str, num_pics: int) -> None:\n _, data_loader = self.load_data(team, 1, True)\n model = Net()\n path = '../data/autoencoder/net_snapshots/' + \\\n team + '/epoch_' + str(9)\n model.load_state_dict(torch.load(path))\n\n for data, _ in itertools.islice(data_loader, num_pics):\n plt.imshow(data.numpy()[0][0], cmap='gray')\n plt.show()\n output = model(torch.flatten(data, start_dim=1, end_dim=1))\n output.detach_()\n plt.imshow(output.reshape(108, 60).numpy(), cmap='gray')\n plt.show()", "title": "" }, { "docid": "2b3486b774a8625e3e81b0e3f109b282", "score": "0.52727634", "text": "def display_image(\n self,\n idx: Optional[int] = None,\n image: Optional[np.array] = None,\n food: Optional[str] = None,\n show: bool = False,\n ):\n\n if idx is not None:\n image, food_idx = self[idx]\n food=self.category_names[food_idx]\n elif image is None:\n idx = np.random.randint(len(self))\n image, food_idx = self[idx]\n food=self.category_names[food_idx]\n\n plt.figure(figsize=[4, 4])\n plt.imshow(image)\n\n try:\n plt.title(food.astype(str))\n except Exception:\n pass\n if show:\n plt.show()\n else:\n return plt.gcf(), plt.gca()", "title": "" }, { "docid": "198dd5a23e19096d9d24fcae0acb66ce", "score": "0.5271269", "text": "def sample_images(batches_done):\n imgs = next(iter(val_dataloader))\n real_A = imgs[0].to(device)\n real_B = imgs[1].to(device)\n fake_B = generator(real_A)\n\n img_dir = \"sample_images/%s/%s.png\" % (args.dataset_name, batches_done)\n set_fig_settings((FIG_REG_WIDTH * 2, FIG_REG_WIDTH * 1.25))\n fig = plt.figure()\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.title('Real') if i == 0 else plt.title('Fake')\n plt.imshow([real_B[0, 0].cpu().detach().numpy(), fake_B[0, 0].cpu().detach().numpy()][i])\n plt.axis('off')\n plt.tight_layout()\n plt.savefig(img_dir)", "title": "" }, { "docid": "4ed61595589623d30b5a0858c341ac88", "score": "0.52669626", "text": "def show(data, filename=None):\n import Image as pil\n img = pil.fromarray(np.array((data - data.min()) * 255.0 /\n (data.max() - data.min()), np.uint8))\n if filename:\n img.save(filename)\n else:\n img.show()", "title": "" } ]
9ef2258445ccb3bdee3a53fecf5db85f
write content to a file with newline character
[ { "docid": "2e3afb6c2d524efa8778274d7d898516", "score": "0.67021", "text": "def put(file, line=''):\n\tfile.write(line + '\\n')", "title": "" } ]
[ { "docid": "4973ee05daef1745514ee87dca7aaae3", "score": "0.779091", "text": "def write_to_file(fname,content):\r\n f = open(fname, \"a\")\r\n f.write(content)\r\n f.write(\"\\n\")\r\n f.close()", "title": "" }, { "docid": "50275ecbc66e5545bbd5b6931007e09d", "score": "0.70706123", "text": "def write_file(contents: List[str], outfile: TextIO, newlines='\\n'):\n\n for line in contents:\n outfile.write(\"{}{}\".format(line, newlines))", "title": "" }, { "docid": "91e3560baf13b3aa21d5f657ac70af12", "score": "0.7038141", "text": "def _write(self, lines):\r\n self.file.write('\\n'.join(lines))\r\n self.file.write('\\n')", "title": "" }, { "docid": "1339df26a128c9e3d01f83eae5df79ed", "score": "0.6938798", "text": "def write(self, data):\n if not self._bin_mode:\n data = _python_to_crlf_linesep(data)\n self._fo.write(data)", "title": "" }, { "docid": "3bd616c3d784759017f39d14e1ed9887", "score": "0.6899771", "text": "def write_file(fpath, content, mode='w'):\n with open(fpath, mode) as to_write:\n to_write.writelines(content)", "title": "" }, { "docid": "6e5761e42d0feaec1a961907942146e9", "score": "0.68539566", "text": "def write_to_file(file_name, data):\n with open(file_name, 'w+') as f:\n f.write('\\n'.join(data))", "title": "" }, { "docid": "ad53dbe47ea8311ba728647c39a3a75a", "score": "0.6822617", "text": "def output(self, text, nl=0, nt=1):\n if nl: # leading newlines\n self.file.write(_NL_ * nl)\n self.file.write(text)\n if nt: # trailing newlines\n self.file.write(_NL_ * nt)", "title": "" }, { "docid": "c4d0d65981fc79387a85b07f8df52e4d", "score": "0.68135864", "text": "def create_file(filename, text):\n with open(filename, 'w') as f:\n f.write(text + os.linesep)", "title": "" }, { "docid": "a6493973e3b99fd9d8941fd74721feb2", "score": "0.67884254", "text": "def add(content):\n if readfile() == \"\":\n write_to_file(content, \"a\")\n else:\n write_to_file(\"\\n\" + content, \"a\")\n return content", "title": "" }, { "docid": "36512ce9fd02e48f417a730eba521a51", "score": "0.6779833", "text": "def write (self, fpath, enc=\"UTF-8\"):\n\n ofl = codecs.open(fpath, \"w\", enc)\n ofl.writelines(self.lines)\n ofl.close()", "title": "" }, { "docid": "1092b4a0b8e1385f12166dbf39b7b3fb", "score": "0.6773003", "text": "def write_file(self, filename, contents):\n logger.info(\"Writing file: %s\", format_path(filename))\n contents = contents.rstrip() + b\"\\n\"\n self.context.write_file(filename, contents)\n logger.debug(\"Wrote %s to %s.\",\n pluralize(len(contents.splitlines()), \"line\"),\n format_path(filename))", "title": "" }, { "docid": "db353276b405307204ab5085a068719b", "score": "0.6754568", "text": "def write_file(file_, content):\n f = open(file_, 'w')\n f.write(content)\n f.close()", "title": "" }, { "docid": "3a1fa3bf79dbb9e40634a87aa0e4c54e", "score": "0.6744146", "text": "def writeln(self, content):\n self._write('%s%s\\n' % (self.style.spaces(), content))", "title": "" }, { "docid": "1437afced9381806d0dc1160894e583c", "score": "0.6696026", "text": "def writeOutput (data, path=\"output/output.txt\", newline = True):\n\twith open(path, \"w\") as file:\n\t\tfor line in data:\n\t\t\tif newline == True:\n\t\t\t\tfile.write(str(line) + \"\\n\")\n\t\t\telse:\n\t\t\t\tfile.write(str(line))\n\t\tfile.close()\n\treturn path", "title": "" }, { "docid": "7e61da298dc85eafc051efe88229080d", "score": "0.6684603", "text": "def write_file(file_name, contents):\n f = open(file_name, 'w')\n if contents.__class__.__name__ == 'list':\n f.write(\"\\n\".join(contents))\n else:\n f.write(contents)\n f.close()", "title": "" }, { "docid": "7563509f3023eb773bc03c295350f4cf", "score": "0.6683922", "text": "def write(self):\n self.open_file('w')\n self.file_handler.writelines(self.lines)\n self.close_file()", "title": "" }, { "docid": "0c12e6372a449120ad5fb5b60f065b6d", "score": "0.6675094", "text": "def write_to_file(self, file_name, content):\n f = open(file_name, 'w')\n f.write(content)\n f.close()", "title": "" }, { "docid": "4206bfdb8cb1f96b29eb338323961607", "score": "0.6653976", "text": "def write_file(filename, content):\n with open(filename, 'w') as f:\n f.write(content)", "title": "" }, { "docid": "e311f50ef7815611665630bd917c3c7b", "score": "0.6609157", "text": "def write_to_output_file(line):\n prepend_char = \"\\n\"\n if not os.path.isfile(output_file):\n output_writer = open(output_file, \"w\")\n prepend_char = \"\"\n else:\n output_writer = open(output_file, \"a\")\n output_writer.write(prepend_char + line)", "title": "" }, { "docid": "55301aa564844e4714baba9323760882", "score": "0.65969795", "text": "def write_file(path, contents):\n\n if sys.version_info[0] < 3 and isinstance(contents, str):\n contents = unicode(contents, \"utf-8\")\n\n with io.open(path, mode=\"wt\", encoding=\"utf-8\", newline=\"\\n\") as the_file:\n the_file.write(contents)", "title": "" }, { "docid": "b1d5e239e208fb1b9e6969bf8b7def43", "score": "0.6593768", "text": "def write_file(writer, filename):\n for line in txt_line_iterator(filename):\n writer.write(line)\n writer.write(\"\\n\")", "title": "" }, { "docid": "4a3350844c056c3a2274c2c6bae9995b", "score": "0.6589762", "text": "def save(path, data, encoding, newline=None):\n with open_output(path, encoding, newline) as output:\n output.write(data)", "title": "" }, { "docid": "6d0a0072f9017a0a6a532623c775470c", "score": "0.65776455", "text": "def write(self, payload):\n self._file.write(payload.encode('utf-8'))\n self._file.write('\\n')", "title": "" }, { "docid": "f4fbf3a9f8a37736a8a56d0109adf14b", "score": "0.6571975", "text": "def write_file(path, lines):\n\n with open(path, 'w+') as out_file:\n out_file.writelines(lines)", "title": "" }, { "docid": "ee38b64f28e2f9945ba3e83e47881371", "score": "0.65487134", "text": "def write_to_file(self, content, filename=\"default.txt\", filepath = \"default_path\"):\n\n file = self.open_file(filename, filepath)\n\n try:\n for line in content:\n file.write(str(line))\n except IOError:\n print(\"Writing to file \" + filename + \" was not possible\")\n except:\n print(\"Unknown error occured, while writing to file \" + str(filename) + \"Error: \", sys.exc_info()[0])\n\n self.close_file(file)", "title": "" }, { "docid": "1c5814a3653710868cce5f2e3a463ec3", "score": "0.65293473", "text": "def write_lines_to_file(filename: str, file_data: list[str]) -> None:\n with open(filename, 'w') as file:\n file.writelines(file_data)", "title": "" }, { "docid": "ee33241ab45fc0c54f99f00b50ad94ba", "score": "0.6506888", "text": "def write_txt(data, path):\n with open(path, 'w') as handle:\n handle.write(data)", "title": "" }, { "docid": "5e9f5009d8f5004986e3a2c72420845a", "score": "0.6502171", "text": "def write(self, file_content):\n self._prewrite_check()\n self._writable_file.append(\n compat.as_bytes(file_content, encoding=self.__encoding))", "title": "" }, { "docid": "c3c35ef6a39f6ae0b2bb0e696d5c2220", "score": "0.6501802", "text": "def append_to_file(file_name, data):\n with open(file_name, 'a+') as f:\n f.write('\\n'.join(data))", "title": "" }, { "docid": "86bd5d5ae1324dd962c7d2f2fefab6cb", "score": "0.64983726", "text": "def writeFile(data):\n f = open(outputFile, \"w\")\n for value in data:\n f.write(\"{}\\n\".format(value))\n f.close()", "title": "" }, { "docid": "a20b3096b704702491e594583b18a2d2", "score": "0.6488415", "text": "def newline(self):\n self._out.write(u'\\n')", "title": "" }, { "docid": "b91c1e231abef10f49495759abddd0a0", "score": "0.6487996", "text": "def safe_write(\n path: Path, fixed_lines: List[str], encoding: str, newlines: str\n) -> None:\n if not os.access(path, os.W_OK):\n raise WritePermissionError(13, \"Permission denied [WRITE]\", path)\n with open(path, mode=\"w\", encoding=encoding) as destination:\n for line in fixed_lines:\n destination.write(line.replace(os.linesep, newlines))", "title": "" }, { "docid": "423b4b102b0f4385bdb45de39f748289", "score": "0.6462884", "text": "def write_file(file_path: str, data: List[str]):\n with open(file_path, 'w') as fp:\n for line in data:\n fp.write(line + '\\n')", "title": "" }, { "docid": "da48d6b9e96454b3e07eb8661388e430", "score": "0.64515597", "text": "def append_write(filename=\"\", text=\"\"):\n\n with open(filename, mode=\"a\", encoding=\"UTF8\") as x:\n return x.write(text)", "title": "" }, { "docid": "06b27a2173b8d2f2055cc2f467d9b820", "score": "0.6447214", "text": "def write_file_lines(file_name, lines, mode='w'):\n output = io.open(file_name, mode, encoding='utf8')\n output.write(lines)\n output.close()", "title": "" }, { "docid": "435339a001c534b06e4c573efac02bba", "score": "0.64293754", "text": "def write(self, line):\n if self.prev_line_empty:\n self.target.write(self.newlines)\n self.target.write(line.rstrip())\n self.target.write(self.newlines)\n self.prev_line_empty = False", "title": "" }, { "docid": "fc85cf450c926471d88467dec9df56cc", "score": "0.64253545", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as holder:\n return holder.write(text)", "title": "" }, { "docid": "8793eb32b3b9e87c3dc7dc3412d563f0", "score": "0.64245975", "text": "def _write_file(file_path, contents):\n if platform.system() == 'Windows':\n file_path = str(file_path).replace(':', '')\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(contents)", "title": "" }, { "docid": "9784d16ef163511db157fded958402c0", "score": "0.6421728", "text": "def save_content(content, outfile):\n with open(outfile, 'a') as out:\n out.writelines(content)\n print('The content is added to', outfile)", "title": "" }, { "docid": "7426dd8c3cd355f612db2100043e9099", "score": "0.64178395", "text": "def write(self, filename, contents, encoding='utf8'):\n raise NotImplementedError()", "title": "" }, { "docid": "76236f5782ed980847c05501242503ab", "score": "0.6396182", "text": "def writeout(filename, content, append=False):\n\n mode = \"w\"\n\n # append to the file instead of overwriting\n if append:\n mode = \"a\"\n\n # write content\n with open(filename, mode) as out:\n out.write(content)", "title": "" }, { "docid": "7f86d13c440da6b355f9e2ea7edb8001", "score": "0.6395639", "text": "def write_contents(filename, contents, encoding='UTF-8', mode=None):\n if isinstance(contents, text_type):\n contents = codecs.encode(contents, encoding)\n with write_file(filename, mode=mode) as handle:\n handle.write(contents)", "title": "" }, { "docid": "c523b5ef568c95b0ac6e1371f2d5902a", "score": "0.63940865", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a+\", encoding='utf-8') as f:\n return (f.write(text))", "title": "" }, { "docid": "3cfd7ff9e587928e42620f16363f6dcf", "score": "0.6371656", "text": "def write_file(filename, content):\r\n with open(filename, 'wb') as fd:\r\n fd.write(content)", "title": "" }, { "docid": "0375366bb4a32dc706f4411e92997f3d", "score": "0.6367395", "text": "def write(self, f):\n pass", "title": "" }, { "docid": "4707d1e67b2711355fc6516a6e34cdec", "score": "0.63517886", "text": "def txt_write(lines, path: str, model: str = \"w\", encoding: str = \"utf-8\"):\n\n try:\n file = open(path, model, encoding=encoding)\n file.writelines(lines)\n file.close()\n except Exception as e:\n logging.info(str(e))", "title": "" }, { "docid": "b385003d653e37dc61eba9f3e013622b", "score": "0.6336052", "text": "def write_to_file(filename, content, mode):\n write_file = open(filename, mode)\n write_file.write(content)\n write_file.close()", "title": "" }, { "docid": "d1e084951f8180f793cd1803f3da6335", "score": "0.632989", "text": "def _write_line(self, line):\n try:\n if line[-1] == '\\n':\n self._f.write(line)\n else:\n self._f.write(line + '\\n')\n except (IOError, OSError):\n self._logger.error(\"Could not write to log file\")", "title": "" }, { "docid": "d12a89cbce98e13123aff2c2495d4a9b", "score": "0.63289225", "text": "def __appendToFile(self, n):\n fh = open(self.__fileName, \"a\")\n line = n.get_id_student() + \" \" + n.get_id_disciplina() + \" \" + n.get_n()\n #fh.write(\"\\n\")\n fh.write('%s\\n' %line)\n fh.close()", "title": "" }, { "docid": "779ed1796ae1e4432ccf5ae8470b35e2", "score": "0.6325413", "text": "def write_text(\n self, path, value, encoding=None, errors=None, newline=None, **kwargs\n ):\n with self.open(\n path,\n mode=\"w\",\n encoding=encoding,\n errors=errors,\n newline=newline,\n **kwargs,\n ) as f:\n return f.write(value)", "title": "" }, { "docid": "abee5f80da08c3367aec412c57d460ff", "score": "0.6314976", "text": "def write(self, content):\n self._write(content)", "title": "" }, { "docid": "56c1a866d48eb2117c87d8df9fc66e4a", "score": "0.6312773", "text": "def write_file(filename, contents):\n\n with open(filename, 'w') as outfile:\n outfile.write(contents)", "title": "" }, { "docid": "ade6b08f6d8e9e510ad39857b471658c", "score": "0.63077134", "text": "def set_file_content(file, content):\n nchar_written = 0\n with open(file, 'w') as f:\n nchar_written = f.write(content)\n return nchar_written", "title": "" }, { "docid": "d2bb71b7ad6b03f4f682ec08f8f1ed60", "score": "0.62958676", "text": "def _write(line):\n model_script.write(line + '\\n')", "title": "" }, { "docid": "155f62b26bdb79a2770122545f26e622", "score": "0.6287114", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, mode=\"a\", encoding=\"UTF-8\") as my_file:\n return my_file.write(text)", "title": "" }, { "docid": "23047537df81f0dd7bebd2cdfeaa3d38", "score": "0.6257939", "text": "def write_file(self, filepath, contents):\n with open(filepath, 'w') as f:\n f.write(contents.getvalue())", "title": "" }, { "docid": "eba8fb964dfae15804a1c2c9e7e0108c", "score": "0.6256824", "text": "def write_file(file_path,content=None,mode='w'):\n with open(file_path,mode) as f:\n f.write(content)", "title": "" }, { "docid": "63b856716f862b7103faf14c555b4b99", "score": "0.6253423", "text": "def writelines(self, lines):\n self.fp.writelines(lines)", "title": "" }, { "docid": "b5ee62df29e4930ab44246a28a570045", "score": "0.6249647", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a+', encoding='utf-8') as file_name:\n return file_name.write(text)", "title": "" }, { "docid": "26f5dc88cad202229459766b11a93126", "score": "0.62468755", "text": "def write_to_file():\n f = open('sample.txt','w+')\n try:\n line_list = ['Hello World\\n','This is threading tutorial\\n','current Thread\\n']\n f.writelines(line_list)\n f.write(\"Hello I am Vimal\")\n time.sleep(5)\n f.write(\"Hello I\")\n f.close()\n except IOError as e:\n print(f\"Exception occured while writing - {str(e)}\")\n finally:\n f.close()", "title": "" }, { "docid": "ab4794d233cffd24465c94f0dab7a722", "score": "0.62440044", "text": "def append_write(filename=\"\", text=\"\"):\n\n with open(filename, mode='a', encoding=\"UTF8\") as myfile:\n return myfile.write(text)", "title": "" }, { "docid": "fbd46c8e2f1c18a617879b54ce40953a", "score": "0.62436444", "text": "def _write_data_to_file(data: list, file: _io.TextIOWrapper)->None:\n file.writelines(data)\n file.flush()", "title": "" }, { "docid": "eccad28ad8343d712875f6e9495db63b", "score": "0.6242549", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a', encoding=\"UTF-8\") as myfile:\n return myfile.write(text)", "title": "" }, { "docid": "c9dffe285efb67edf622b03ad80eb1cc", "score": "0.62416273", "text": "def write(text):", "title": "" }, { "docid": "cb27c9b6acde1d98cdf75b0c4433fb5b", "score": "0.6234777", "text": "def write(cls, s, file=None, end='\\n', nolock=False):\n fp = file if file is not None else sys.stdout\n fp.write(s)\n fp.write(end)", "title": "" }, { "docid": "6d4c8d994ea57ff966eacff569b4f5e9", "score": "0.6234299", "text": "def write_lines(filename, lines):\n with open(filename, 'w', encoding=\"utf-8\") as fp:\n for line in lines:\n print(line, file=fp)\n print(\"Done writing to file %s.\" % filename)", "title": "" }, { "docid": "5c40bc87ec74cffa56fe0034a8ff6213", "score": "0.6231462", "text": "def write_to_file(self, file_path, text):\n with open(file_path, mode='w+') as f:\n f.write(text)", "title": "" }, { "docid": "a4dc9abb79eb9da910ece4d140c2eae6", "score": "0.62296903", "text": "def write( self ):\n try:\n self.file = open( self.name, 'w' )\n except IOError:\n raise mi_File_Error( \"Cannot open for write.\", self.name )\n\n for l in self.lines:\n self.file.write( l )", "title": "" }, { "docid": "970604a6b4c732e81b5c05b3fd01e860", "score": "0.6226763", "text": "def write(text):\n text_file = open(__version__+\"Output.txt\", \"a\")\n text_file.write(str(text) + \"\\n\\n\\n\")\n text_file.close()", "title": "" }, { "docid": "c23f419b707d5bcecb4b6c3b299dac32", "score": "0.62262094", "text": "def write_text(file,s):\n with open(file,\"w\") as stream:\n stream.write(s)", "title": "" }, { "docid": "405e74c1c960102307c01f526281cb2b", "score": "0.62144965", "text": "def write_line(self, line):\n return self.write(line).line_feed()", "title": "" }, { "docid": "a15e73d314b9f33c8cea040ebb0649cb", "score": "0.6207148", "text": "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode='w', encoding='utf-8') as fn:\n return (fn.write(text))", "title": "" }, { "docid": "6e20908710dadcb998d3ab6b48a87312", "score": "0.6200589", "text": "def write_file(filename, content):\n return filesystem_repository.write_file(filename=filename, content=content)", "title": "" }, { "docid": "76382dac6d14e23aa2807558b4e826c8", "score": "0.6200381", "text": "def write(self, f):\n self._write_header(f)\n for token in self._tokens:\n f.write(token + '\\n')\n f.write('\\n')\n self._mark_clean()", "title": "" }, { "docid": "b38a8a6b0d51bd80081ce6e116c74ad0", "score": "0.6188969", "text": "def writelines(self, lines):\n self.write(b\"\".join(lines))", "title": "" }, { "docid": "5c6c074d716f702bb5977df5d5290fef", "score": "0.6188665", "text": "def write_file(self, fp, data):\n with open(fp, 'w') as f:\n f.write(data)", "title": "" }, { "docid": "fcf0db1b4f1e73c842d3da8aaca413cf", "score": "0.6187794", "text": "def write_line(file_name, line, mode):\r\n output_file = open(file_name, mode)\r\n output_file.write(line)\r\n output_file.close()", "title": "" }, { "docid": "2108eb2eae4f9e0d8f4ce39e0cfbf2ce", "score": "0.6185632", "text": "def write_text(self):\n\n with open(\"new_text.txt\", 'w') as file:\n for line in self.file_list:\n file.write(f\"{line}\\n\")", "title": "" }, { "docid": "181ca6b7b96bfc4fbfeda3d82ecfd423", "score": "0.6184688", "text": "def writeToFile(self, block):\n with open(self.filename, 'a') as f:\n f.write(\"\\n\" + block + \"\\n\")", "title": "" }, { "docid": "09babd89afb740cdb23a798fa7224e16", "score": "0.61755526", "text": "def new_text_file(txt_path, lines):\n\n f = open(txt_path, 'w', encoding='latin1')\n f.writelines(lines)", "title": "" }, { "docid": "98fde169b8ef127ef74ce992be656fba", "score": "0.61620194", "text": "def write(self, text):\n self._file.write(text)", "title": "" }, { "docid": "f4eea354c4e4b974b54cfc1f7573fd0c", "score": "0.61599845", "text": "def write(self, s):\n self.fp.write(s)", "title": "" }, { "docid": "e9795a40f2b5d68da01f77bcd740a8ab", "score": "0.6158586", "text": "def write_file(filename='', text=''):\n with open(filename, 'w+', encoding='utf-8') as file:\n return file.write(text)", "title": "" }, { "docid": "73dab4d54fd119435cd6598292268828", "score": "0.6154237", "text": "def write(fileobject, content, mode=\"a\"):\n with open(fileobject._path, mode=mode) as f:\n f.write(content)", "title": "" }, { "docid": "31ac97025a86ea21c3a5f6e89a972ead", "score": "0.6149577", "text": "def write(path, content, encoding=\"UTF-8\", append=False, raw=False):\n mode = 'wb' if not append else 'ab'\n with OPEN_FUNC(path, mode) as _file:\n if raw:\n import shutil\n shutil.copyfileobj(content, _file)\n else:\n _file.write(content.encode(encoding))", "title": "" }, { "docid": "bc8bf7d1748ac57d212359fe334b9a9b", "score": "0.61484057", "text": "def remove_content(mode=\"w\"):\n item = \"\"\n result = \"\"\n result += item + \"\\n\"\n write_to_file(result.strip(), mode)", "title": "" }, { "docid": "7da90dfab10b955acdcfa6cdfeccef29", "score": "0.6144934", "text": "def write_file(path_file, filename, data, write_type):\n #first tells if it writtes or appends\n #write_type 'w' or 'a'\n #print(data)\n path_to_2 = os.path.join(path_file, filename)\n with open(path_to_2, write_type) as file:\n file.writelines('\\t'.join(i) + '\\n' for i in data)\n file.close()", "title": "" }, { "docid": "7bcb521852fbe256146fce8958d17c6a", "score": "0.6143291", "text": "def append(path, content):\n\n with open(path, 'a') as f:\n f.write(content)", "title": "" }, { "docid": "839696c69aa5d0663637819523f3da86", "score": "0.61411095", "text": "def write_file(filename, contents, charset='utf-8'):\n with open(filename, 'w') as f:\n f.write(contents.encode(charset))", "title": "" }, { "docid": "9b8f1da51bf98ba901a3fe88a51c3872", "score": "0.6134232", "text": "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode='w', encoding='utf-8') as file_open:\n return file_open.write(text)", "title": "" }, { "docid": "850342249b8e6c6fe98a52321df8baa4", "score": "0.6130477", "text": "def save_to_file(file_path: str, content: str):\n # Code for printing to a file\n if file_path:\n file_name = open(file_path, 'a')\n print(str(content), file=file_name)\n file_name.close()", "title": "" }, { "docid": "756f5299afc9f83d8708c35b19aa6b4d", "score": "0.6121106", "text": "def write_to_txt(path_result, entry):\n file = open(path_result, \"w\")\n file.write(entry + \"\\n\")\n file.close()", "title": "" }, { "docid": "b37e478ae4a4c44b43b45d8586f1b31d", "score": "0.6111526", "text": "def write_file(filename=\"\", text=\"\"):\n with open(filename, encoding='utf-8', mode='w') as f:\n return(f.write(text))", "title": "" }, { "docid": "c7bfa09fea0fb041ba13ebc5b1b7a070", "score": "0.6104158", "text": "def write_string_to_file(filename, file_content):\n with FileIO(filename, mode=\"w\") as f:\n f.write(file_content)", "title": "" }, { "docid": "ba975efde9b4fdeaae16fbc5156cb735", "score": "0.6102885", "text": "def write_file(self):\n f = open(self.filename, 'w')\n content = self.create_file_content()\n f.write(content)\n f.close()", "title": "" }, { "docid": "85320ad5054fe2419c213eff8a04e5c3", "score": "0.61006176", "text": "def write(string):\n write.content += string", "title": "" }, { "docid": "101786b00d20d98b6c71117873a04084", "score": "0.60999733", "text": "def append_content(txt):\r\n doc.content += txt + '\\n'", "title": "" }, { "docid": "d32dca46fb4ba1c479d104ab61e33b98", "score": "0.60962605", "text": "def append_line(file,line):\n file.seek(0,2)\n file.write(line +'\\n')", "title": "" }, { "docid": "02a79b54b0136f77cb013d1cc63331c1", "score": "0.6093955", "text": "def finish(self):\n self.file.write('\\n')", "title": "" }, { "docid": "21512cade1602da132bae87d16891ea8", "score": "0.6087086", "text": "def append_write(filename=\"\", text=\"\"):\n with open(filename, mode='a', encoding=\"utf-8\") as my_file:\n x = my_file.write(text)\n return x", "title": "" } ]
a33b8f4a6d21b020da34d58c400fba42
Adds an entry to the routing table of the Node.
[ { "docid": "0fedbf05d03351e1e6e7dc8da7a7a5c0", "score": "0.6699021", "text": "def routing_table(self, entry):\n #check if entry is a dict, or if empty list reset routing_table\n if not isinstance(entry, dict):\n if entry == []:\n self._routing_table = []\n return\n else:\n raise TypeError(f\"Entries in routing table must be type dict.\")\n #check for a valid dict\n for key in entry:\n if key == \"mac\" or key == \"TQ\":\n continue\n else:\n raise KeyError(\n \"Routing entry dicts must have Keys: 'mac' and 'TQ'\")\n self._routing_table.append(entry)", "title": "" } ]
[ { "docid": "48c2432e16918993b0fb714584798688", "score": "0.6901373", "text": "def update_routing_table(self, node_id: NodeID) -> None:\n self.logger.debug(\"Updating %s in routing table\", encode_hex(node_id))\n self.routing_table.add_or_update(node_id)", "title": "" }, { "docid": "9cc86271b25d4fcb156b0056f5fc1119", "score": "0.6754479", "text": "def add_static_route(self, host, port):\n # `port` should have been added to `peer_tables` by `handle_link_up`\n # when the link came up.\n assert port in self.ports.get_all_ports(), \"Link should be up, but is not.\"\n\n # TODO: fill this in!\n self.table[host] = TableEntry(dst=host, port=port,latency=self.ports.get_latency(port),expire_time=FOREVER)", "title": "" }, { "docid": "2efe356a44b118485cbab448860d6560", "score": "0.66889864", "text": "def add_routing_table(self, routing_table):\n if (routing_table.x, routing_table.y) in self._routing_tables_by_chip:\n raise PacmanAlreadyExistsException(\n \"The Routing table for chip \"\n f\"{routing_table.x}:{routing_table.y} already exists in this \"\n \"collection and therefore is deemed an error to re-add it\",\n str(routing_table))\n self._routing_tables_by_chip[(routing_table.x, routing_table.y)] = \\\n routing_table\n self._max_number_of_entries = max(\n self._max_number_of_entries, routing_table.number_of_entries)", "title": "" }, { "docid": "88e5db351fc0f9e44f4f9cf1ebce268c", "score": "0.65870523", "text": "def add_static_route(self, host, port):\n # `port` should have been added to `peer_tables` by `handle_link_up`\n # when the link came up.\n assert port in self.ports.get_all_ports(), \"Link should be up, but is not.\"\n\n # TODO: fill this in!\n self.table[host] = TableEntry(host, port, self.ports.get_latency(port), FOREVER)", "title": "" }, { "docid": "3b44fd3ed8bf697a9958df6b5bae87b2", "score": "0.64911914", "text": "def insert_route(self, match_vRouter_number,\n match_ipv4address,\n action_dest_mac,\n action_egress_port):\n\n entry = shell.TableEntry(\"MyIngress.ipv4NextHopLPM\")(\n action=\"MyIngress.ipv4Forward\")\n entry.match[\"vRouterNumber\"] = str(match_vRouter_number)\n entry.match[\"hdr.ipv4.dstAddr\"] = str(match_ipv4address)\n entry.action[\"port\"] = str(action_egress_port)\n entry.action[\"dstAddr\"] = str(action_dest_mac)\n entry.insert()", "title": "" }, { "docid": "1e323555a97e7524e6760d6577efa2c5", "score": "0.64598674", "text": "def addRoute(self, *args):\n return _coin.SoInput_addRoute(self, *args)", "title": "" }, { "docid": "6866bfc667f1143f1b4a087afaf9a2d6", "score": "0.6442855", "text": "def addRoute(self, *args):\n return _coin.SoProto_addRoute(self, *args)", "title": "" }, { "docid": "373e84527bcd314ec0f1aacf12e951d9", "score": "0.64134705", "text": "def add_table_entry(self, table_id, table_entry):", "title": "" }, { "docid": "7f49a6e1cdc87ce207b6668b7d9651b4", "score": "0.63112104", "text": "def add_node(self, node):\n self.list_node[node] = True", "title": "" }, { "docid": "cb6fac1356307abaf19d14201fec0d4c", "score": "0.62822765", "text": "def add_node(self, node):", "title": "" }, { "docid": "2f67be12f53a15fe9f6f405b6edc335f", "score": "0.62800795", "text": "def add_node(self, node):\n self.nodes.append(node)\n for x in xrange(self.replicas):\n ringkey = self.hash_method(b(\"%s:%d\" % (node, x)))\n self.ring[ringkey] = node\n self.sorted_keys.append(ringkey)\n\n self.sorted_keys.sort()", "title": "" }, { "docid": "b45abeb4b846f9c32c603071008f2ec6", "score": "0.6277852", "text": "def add(self, node: str):\n if self._load_factor() >= 0.75:\n self._resize_map()\n\n potential_index = self._get_new_index(node)\n value_at_index = self[potential_index]\n\n if value_at_index != node:\n self[potential_index] = node\n self.nb_of_nodes += 1", "title": "" }, { "docid": "f49d006a66db03ceb3f5b0eb90c1f685", "score": "0.6221739", "text": "def addnode(self, nodename, node):\n hash_ = self._hash(nodename)\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)\n self._numnodes += self._numnodes\n if(self._debug):\n print 'in addnode nodename:', nodename, \" node:\", node, \" hash_:\", hash_, \" self_keys: \", self._keys, \" self_nodes: \", self._nodes", "title": "" }, { "docid": "69bd9b5813aa13a8f3fdcd2f1f3d8723", "score": "0.61834675", "text": "def add_node(self, node, key):\n self.get_state()\n if int(key) not in self._sorted_keys:\n self.ring[str(key)] = node\n self._sorted_keys.append(int(key))\n self._sorted_keys.sort()\n self.save_state()", "title": "" }, { "docid": "18417cfb9c294a4e80a813445f2a10ce", "score": "0.61600363", "text": "def add(self, sched_entry):\n name = sched_entry['name']\n logger.debug(\"Add Entry[%s] = %s\", name, sched_entry)\n self.sched_dict[name] = sched_entry\n heappush(self.heap, [sched_entry['t_next'], name])", "title": "" }, { "docid": "fc78536d5ef00c4fb74c1b9fc99c0c8c", "score": "0.61538565", "text": "def add_route(self, address1, address2):\n\n self.routes[address1] = address2\n self.routes[address2] = address1", "title": "" }, { "docid": "8dee1658da3972ddb35f9129fe8b3de0", "score": "0.6132496", "text": "def add_route(self, uri_template, resource):\r\n\r\n uri_fields, path_template = helpers.compile_uri_template(uri_template)\r\n method_map = helpers.create_http_method_map(\r\n resource, uri_fields, self._before, self._after)\r\n\r\n # Insert at the head of the list in case we get duplicate\r\n # adds (will cause the last one to win).\r\n self._routes.insert(0, (path_template, method_map))", "title": "" }, { "docid": "377b6e38f552b8872091ab3d1e6fb2b5", "score": "0.611205", "text": "def add_entry(self, entry):\n logging.debug(\"Adding entry to %s\" % self.name)\n # @FIXME: Check entry is proper type for this table\n # @FIXME: Support entry priorities for ternary matching\n\n if isinstance(entry, TableEntryDefault):\n return self.set_default_entry(entry)\n\n with self.cond_var:\n self.entries.append(entry)", "title": "" }, { "docid": "4dc0c18866a6756b868718af94b8ffe6", "score": "0.6106541", "text": "def add_route(self, uri, action, method):\n\n self.__routes.add(uri, action, method)", "title": "" }, { "docid": "8bc058c92beb32a5d1a64e9762ecf971", "score": "0.61036944", "text": "def addNode(self, node):\n \n pass", "title": "" }, { "docid": "72f2ff19e2d6bb3b0671ff52ca55de17", "score": "0.61000526", "text": "def add_route(self, to_town, distance, word):\n\n if to_town.alpha in self.to_map:\n msg = '\"{0}\": route definition already known'\n raise LoadError(msg.format(word))\n self.to_map[to_town.alpha] = Route(self, to_town, distance)", "title": "" }, { "docid": "2dcc8c0641f2c5335e88e7164ccd6136", "score": "0.6097982", "text": "def add_node(self, key1, key2, u):\n if u in CL.entry_finder:\n self.remove_node(u)\n CL.entry_finder[u] = [key1, key2, u]\n heapq.heappush(CL.U, [key1, key2, u])", "title": "" }, { "docid": "8ab43ee39a3c3492dd206d8781911e02", "score": "0.60935736", "text": "def add_static_route(self, host, port):\n # `port` should have been added to `peer_tables` by `handle_link_up`\n # when the link came up.\n assert port in self.peer_tables, \"Link is not up?\"\n\n staticRoute = PeerTableEntry(host, 0, PeerTableEntry.FOREVER)\n self.peer_tables.get(port).update({host: staticRoute})\n self.update_forwarding_table()\n self.send_routes(force=False)", "title": "" }, { "docid": "2c35e1b5cf7c89715664b4a32d157f96", "score": "0.60804987", "text": "def add(self, route_regexp, action, name=None, **urlvars):\n self.routes.append(Route(route_regexp, self.get_action(action), urlvars, name))", "title": "" }, { "docid": "eceb07c071078c19493d2ff95ec21de0", "score": "0.6057271", "text": "def addRoute(self, *args):\n return _coin.SoOutput_addRoute(self, *args)", "title": "" }, { "docid": "84a0c1a25985f893aaa265d8329704d5", "score": "0.6049281", "text": "def add(self,key,value):\n\n index = self.hash(key)\n\n if self._bucket[index]== None:\n self._bucket[index]=LinkedList()\n self._bucket[index].insert([key,value])", "title": "" }, { "docid": "8710dc243f405c980491ff34cf1d153d", "score": "0.60452765", "text": "def add_node(self, key, value):\n new_node = _node_(key, value)\n self.count += 1\n if self.head == None:\n self.head = new_node\n else:\n prev = self.head\n self.head = new_node\n self.head.next = prev", "title": "" }, { "docid": "d3d32ff4ccd66e271cfdc3eb63416fef", "score": "0.6044796", "text": "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "title": "" }, { "docid": "6beb74939b312b5db1566b070c5fc18b", "score": "0.60385865", "text": "def addNode(node): #@NoSelf", "title": "" }, { "docid": "eccaf68b4162ee543715d39b81e5acd2", "score": "0.60224867", "text": "def add_route(duthost, prefix, nexthop):\n duthost.shell(\"vtysh -c 'configure terminal' -c 'ip route {} {}'\".format(prefix, nexthop))", "title": "" }, { "docid": "b8827637ef89b17883cbe03e9434fbbc", "score": "0.60080224", "text": "def handle_route_advertisement(self, route_dst, route_latency, port):\n # TODO: fill this in!\n # if its not in the table entry then I need to add it since it is a new destination\n if route_dst not in self.table.keys():\n self.table[route_dst] = TableEntry(route_dst, port, self.ports.get_latency(port) + route_latency, api.current_time() + self.ROUTE_TTL)\n else:\n for host, entry in self.table.items():\n if route_dst == host: # if my destination is in my table entry then maybe I have found a better path and must update my existing path\n if port == entry.port and route_latency >= INFINITY:\n self.table[host] = TableEntry(route_dst, port, INFINITY, api.current_time())\n self.send_routes(False)\n elif port == entry.port or entry.latency > route_latency + self.ports.get_latency(port):\n self.table[host] = TableEntry(route_dst, port, route_latency + self.ports.get_latency(port), api.current_time() + self.ROUTE_TTL)\n self.send_routes(False)", "title": "" }, { "docid": "c060d1145e1f7712183b76bbdf972e42", "score": "0.5982631", "text": "def add(self, node):\r\n self.queue.append(node)", "title": "" }, { "docid": "2a94cf27bbec61ac4e2850e060c279fe", "score": "0.59806114", "text": "def add(self, entry):\n split = entry.split('/')\n if split[-1] == '':\n split = split[:-1]\n self._root.add(split)", "title": "" }, { "docid": "ab25309369988ddde99e88d53751baa0", "score": "0.59476894", "text": "def add_host_route(self, ip):\n if ip not in self.host_routes:\n logger.info(\"Add Host Route {0} @ {1}\".format(ip, self.iface))\n try:\n ipcmd.add_route(self.iface, ip)\n except ipcmd.IpCmdError:\n # Failure is normal if the proxy already existed\n if ip in self.host_routes:\n return\n # Reload tables\n self.reload()\n if ip in self.host_routes:\n return\n # Let's try again, and failure goes up this time\n ipcmd.add_route(self.iface, ip)\n self.host_routes.add(ip)", "title": "" }, { "docid": "4f96235f054b306ff685b4acbe384f37", "score": "0.59240955", "text": "def add_route_item(self, name, pattern, view_data):\n self.sitemap.add_item(RouteItem(name))", "title": "" }, { "docid": "a7a19bd54b9bc98077046d37c2b80712", "score": "0.5916051", "text": "def _add_entry(self, key, value):\n # TODO: assign a priority based on plugins.yml\n priority = 0\n\n self._entries.append(Entry(priority, key, value))\n self._cache = None", "title": "" }, { "docid": "cf3c324bcfcd4eeed5efdbda75db5434", "score": "0.59138024", "text": "def add_node(self, node):\n self.nodes.append(node)", "title": "" }, { "docid": "9e7441e0e14c4a6b767bbf1b40dbec19", "score": "0.5913795", "text": "def add_router(self):\r\n self.router = {\r\n 'ip': self.router_ip,\r\n 'mac': good_mac(self.router_mac),\r\n 'vendor': get_vendor(self.router_mac),\r\n 'type': 'Router',\r\n 'admin': True\r\n }\r\n\r\n self.devices.insert(0, self.router)", "title": "" }, { "docid": "93402f9ce457f5d479bbc6a094a45884", "score": "0.5897472", "text": "def add(self, entry):\n # Remove the first element if we're not the root node.\n if not self.is_root:\n if entry[0] != self._name:\n raise ValueError('Cannot add a non-matching entry to a Node!')\n entry = entry[1:]\n\n # If the entry is now empty, this node is a leaf.\n if not entry:\n self._is_leaf = True\n return\n\n # Add a child node.\n if not self._is_leaf:\n child = self._children.get(entry[0])\n if not child:\n child = Tree.Node(entry[0])\n self._children[entry[0]] = child\n child.add(entry)\n\n # If we have more than COMBINE_PATHS_THRESHOLD immediate children,\n # combine them into this node.\n immediate_children = 0\n for child in self._children.itervalues():\n if child._is_leaf:\n immediate_children += 1\n if not self.is_root and immediate_children >= COMBINE_PATHS_THRESHOLD:\n self._is_leaf = True\n self._children = {}", "title": "" }, { "docid": "14826279554efb5c85c5c734f239913f", "score": "0.5861758", "text": "def addNode(self, node):\n self.nodes.append(node)", "title": "" }, { "docid": "1513e69815592cfdd9ff288adf258a40", "score": "0.5855208", "text": "def put(self, key, value):\n # Over the resize threshold? If so, resize the hash table\n if self.get_load_factor() > MAX_LOAD:\n # load factor currently exceeds the maximum threshold. Resize the hash table\n self.resize(2*self.capacity)\n\n # Generate the hashed index of the inbound key\n idx = self.hash_index(key)\n # Generate a new value node to be placed into the hash table\n new_node = HashTableEntry(key, value)\n\n # Is the current table entry empty?\n if self.table[idx] == None:\n # Empty table entry, store the passed value as a new node\n self.table[idx] = new_node\n self.num_entries = self.num_entries + 1\n return\n\n # Have at least one node at this table index\n cur_node = self.table[idx]\n while True:\n # Are we replacing an existing key/value pair (key == cur_node.key)\n if key == cur_node.key:\n # Yes, replace current node value with passed value\n cur_node.value = value\n return\n\n # Is this the last node?\n if cur_node.next == None:\n # at the last node in the linked list\n # append a new node referenced by a new key\n break\n\n # More nodes to traverse, advance to the next node\n cur_node = cur_node.next\n\n # Place new node at the end of the linked list\n self.num_entries = self.num_entries + 1\n cur_node.next = new_node", "title": "" }, { "docid": "d90595d2942f9ba8981a419f0be5667c", "score": "0.5854223", "text": "def insert(self, key, node):\n if self.next_nodes.get(key, None) is None:\n self.next_nodes[key] = node\n else:\n raise", "title": "" }, { "docid": "92ca632df185da01b31176d98a69f8f5", "score": "0.5834742", "text": "def append(self, entry):\n self.entries.append(entry)", "title": "" }, { "docid": "8a24ebaaaeae82b8ebbbfbf8db2fe83f", "score": "0.58326674", "text": "def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None):\n pass", "title": "" }, { "docid": "f7c261341163c882179dcdcc204e8145", "score": "0.5828796", "text": "def update_routing_table(self, rip_entries, next_hop_router, output_port):\n route_dead = self.routing_table.update(rip_entries, next_hop_router, self.output_ports[output_port][1])\n if route_dead:\n self.start_garbage_timer()\n self.update_triggered()", "title": "" }, { "docid": "223d464d18e1f22eb0c5a6f27376b123", "score": "0.58255386", "text": "def add_node(self, node):\n self.nodes.add(node)", "title": "" }, { "docid": "223d464d18e1f22eb0c5a6f27376b123", "score": "0.58255386", "text": "def add_node(self, node):\n self.nodes.add(node)", "title": "" }, { "docid": "7639af8dca287cef1ab5ed20faf53338", "score": "0.581804", "text": "def AddNode(self, node):\n if not self.HasNode(node):\n self.connections[node] = []", "title": "" }, { "docid": "b899e8198c09af002a8bf9af39880c39", "score": "0.58124363", "text": "def add_node(self, node):\n\n # check if input node is already in graph\n if node in self.nodeList:\n print(\"Node already in graph, unable to add\")\n return\n\n # add node to list, add node to dictionaries\n self.nodeList.add(node)\n self.parents[node] = dict()\n self.children[node] = dict()\n\n return 1", "title": "" }, { "docid": "0da5dd1620eb89c2ec7518a0d76f800a", "score": "0.5808406", "text": "def addRouteOnStep(self, timeStep, routeId, routeList):\n self._route[timeStep] = (routeId, routeList)", "title": "" }, { "docid": "df08f1129a6418571f9ed3c458ddb785", "score": "0.5808357", "text": "def _insert(self, key, value):\n self.hash_table[key].add(value)", "title": "" }, { "docid": "7964f577156645fd4dc947eb38b2f997", "score": "0.579783", "text": "def add_route(self, path_re, controller):\n\t\tself._controller_map.append((path_re, controller))", "title": "" }, { "docid": "0a71af57c3f2b7b05448fec9c8498a58", "score": "0.57925445", "text": "def put(self, key: int, value: int) -> None:\n aNode=Node(key,value)\n slot=self.hashfunc(key)\n if self.alist[slot]==None:\n self.alist[slot]=aNode\n else:\n now=self.alist[slot]\n while now.next!=None and now.key!=key:\n now=now.next\n\n if now.key==key:\n now.value=value\n else:\n aNode.next=now.next\n now.next=aNode", "title": "" }, { "docid": "9a646dd2608a9c90ef655a8e2bc2f4b2", "score": "0.579208", "text": "def add_entry(\n self,\n entry,\n properties=None,\n weights=None,\n supercell_matrix=None,\n site_mapping=None,\n verbose=True,\n raise_failed=False,\n ):\n processed_entry = self.process_entry(\n entry,\n properties,\n weights,\n supercell_matrix,\n site_mapping,\n verbose,\n raise_failed,\n )\n if processed_entry is not None:\n self._entries.append(processed_entry)\n if verbose:\n self._corr_duplicate_warning(self.num_structures - 1)", "title": "" }, { "docid": "0060ae1cfa886bea6761a1b67df4f1cd", "score": "0.57812643", "text": "def add(self, node):\n self.nodes.append(node)\n self.count += 1", "title": "" }, { "docid": "7a69f16ff694c58b6a34fafab3349ed9", "score": "0.57624173", "text": "def put(self, key: int, value: int) -> None:\n key_i = key // 100\n head = self.hashtable[key_i]\n while head.next:\n if head.next.key == key:\n head.next.val = value\n return\n \n head = head.next\n new_node = Node(key, value)\n head.next = new_node", "title": "" }, { "docid": "d4a1039eae383514379c04922b2180be", "score": "0.5755415", "text": "def put(self, key, data):\r\n if self.load_factor() >= 0.75:\r\n self.resize(self.table)\r\n hash_idx = hash_string(key, self.slots)\r\n num = 1\r\n while self.table[hash_idx] and self.table[hash_idx] != self.deleted\\\r\n and self.table[hash_idx].key != key:\r\n hash_idx = (hash_idx + num*num) % self.slots\r\n self.num_collisions += 1\r\n num += 1\r\n self.table[hash_idx] = Node(key, data)\r\n self.num_items += 1", "title": "" }, { "docid": "272d1e968c1f740b28c49141074328fa", "score": "0.5748685", "text": "def insert(self, path, handler):\n node = self.root\n\n # if path doesn't start with '/' or handler is empty, we return None\n if path[0] != '' or handler == '' or len(path) < 2:\n return None\n\n # if path contains 2 elements and they are '' this means path is '/'\n # so we return the root.handler because this has been already initialised\n if len(path) == 2 and path[0] == '' and path[1] == '':\n node.handler = handler\n return\n\n # if path is valid, we iterate it and add it to the RouteTrie\n for single_path in path[1:]:\n # we skip extra '/' in the path if they exist\n if single_path == '':\n break\n # we keep traversing the RouteTrie if we are finding part of the path\n if single_path in node.children:\n node = node.children[single_path]\n # if it doesn't exist we create a new Node and add the path\n else:\n new_node = RouteTrieNode()\n node.children[single_path] = new_node\n node = new_node\n # We finally add the handler to the path\n node.handler = handler", "title": "" }, { "docid": "7b18b9d1f49b3931ab289b5995321a8b", "score": "0.5739841", "text": "def add_entry(self, relative, event):\r\n self.entries[relative] = self.__child__(os.path.join(self.data,\r\n relative),\r\n self.fam)\r\n self.entries[relative].HandleEvent(event)", "title": "" }, { "docid": "55fa480b4a77b7f2e4e744c817c3d5db", "score": "0.57391155", "text": "def addToHash(self, entry, table):\n\t\tif table.has_key(entry):\n\t\t\ttable[entry] += 1\n\t\telse:\n\t\t\ttable[entry] = 1", "title": "" }, { "docid": "ced66b2284df42c62cc3e11b1aa8c275", "score": "0.5733701", "text": "def AddNode(self, *args):\n return _snap.TUNGraph_AddNode(self, *args)", "title": "" }, { "docid": "92941de0fc7bec71d7c7f92c56615f0d", "score": "0.57326967", "text": "def put(self, key, value):\n # If empty spot in hashtable array\n if (self.hashtable[self.djb2(key)] == None):\n self.hashtable[self.djb2(key)] = HashTableEntry(key, value)\n self.items+=1\n else:\n current = self.hashtable[self.djb2(key)] # Initialize current\n\n # Loop until you find the node with .next value equal to None (the end of the linked list)\n while (current.next != None):\n # If at any point current.key == key you are searching for, then replace value accordingly\n if (current.key == key):\n current.value = value\n return\n current = current.next\n\n # If current.key == key you are searching for, then replace value accordingly\n # Need this additional check here for replacing values, as the previous loop doesn't check at the tail node that could be the node needed to be replaced\n if (current.key == key):\n current.value = value\n return\n\n # Set next of the end node to the value you want to add to the next in line in the linked list\n current.next = HashTableEntry(key, value)\n self.items+=1", "title": "" }, { "docid": "f4b3980fe1ba9e500ac122d7e8a9b8e0", "score": "0.5728148", "text": "def add(self,node):\n node_hash = hash(node)\n \n # check if this is a successful node.\n if self.success(node_hash):\n print(\"Success found!\")\n self.successes.append(node)\n if node_hash not in self.nodes.keys():\n self.nodes[node_hash] = node\n self.check_optimal()\n return False\n \n # check if node already exists; if so ignore\n if node_hash in self.nodes.keys():\n return False\n\n # add to dict\n self.nodes[node_hash] = node\n return True", "title": "" }, { "docid": "7cdad3c22c3929b84c3d6f6907098e57", "score": "0.57185906", "text": "def add_node(self, node):\n self.sender.node_added(self.source_id_buff, self.time_id, node)\n self.time_id += 1", "title": "" }, { "docid": "31c2c2a9d618ce4422ec2ce812bafb86", "score": "0.57097024", "text": "def put(self, key, data):\r\n if self.load_factor() > 0.75:\r\n self.resize(self.table)\r\n hash_idx = hash_string(key, self.slots)\r\n while self.table[hash_idx] is not None and self.table[hash_idx].key != key:\r\n hash_idx = (hash_idx + 1) % self.slots\r\n self.num_collisions += 1\r\n if self.table[hash_idx] is None:\r\n self.table[hash_idx] = Node(key, data)\r\n else:\r\n self.table[hash_idx].val = data\r\n self.num_items += 1", "title": "" }, { "docid": "7dd8abf40eca8a4dc2b593ca9fd434cf", "score": "0.5703388", "text": "def put(self, key, value):\n if key is None:\n raise KeyError(\"None is not a valid key\")\n bucket_index = self._hash_key(key)\n hash_table_item = HashTableItem(key, value)\n if not self._table[bucket_index]:\n self._table[bucket_index] = SinglyLinkedList()\n linked_list = self._table[bucket_index]\n returned_item = linked_list.find_value(hash_table_item)\n if not returned_item:\n linked_list.push_front(hash_table_item)\n self._num_items += 1\n if self._should_double():\n self._resize_table(2)\n else:\n returned_item.value = value", "title": "" }, { "docid": "bfc83fe12aa886f0e2fc42f0c4541fee", "score": "0.570266", "text": "def add_link_to(self, node):\r\n Link(self, node)", "title": "" }, { "docid": "621c005251aea44db220e1a332cd86f1", "score": "0.5702566", "text": "def addEntry(self, table, state, symbol, result):\n table[state][symbol] = result", "title": "" }, { "docid": "717f6d604bdb3416e1e0897ecf918f18", "score": "0.56958693", "text": "def addToNode(self,name,dic):\n\t\tn = listToPath(name)\n\t\tif not n in self.stats:\n\t\t\tself.stats[n] = dic\n\t\telse:\n\t\t\tself.stats[n].update(dic)\n\t\treturn name", "title": "" }, { "docid": "43eae1b5f79f056a2c038c7dd093de26", "score": "0.569148", "text": "def addEntry(self, key, val):\n hashBucket = self.buckets[key % self.numBuckets]\n\n for i in range(len(hashBucket)):\n if hashBucket[i][0] == key:\n hashBucket[i] = (key, val)\n return\n\n hashBucket.append((key, val))", "title": "" }, { "docid": "08ac8e813d7c4f34817297a26a768ab9", "score": "0.5688548", "text": "def add(self, key, value):\r\n # Make sure the item isn't already in the table.\r\n cell, num_probes = self.find(key)\r\n if cell != None:\r\n raise ValueError(f\"The key {key} is already in the hash table.\")\r\n\r\n # Find the key's bucket.\r\n bucket_num = key % self.num_buckets\r\n sentinel = self.buckets[bucket_num]\r\n\r\n # Add the item at the beginning of the bucket.\r\n new_cell = Cell(key, value, sentinel.next)\r\n sentinel.next = new_cell\r\n\r\n # Update num_used.\r\n self.num_used += 1\r\n return num_probes", "title": "" }, { "docid": "8e12e01e5a4976edbd5f9c67dbe41b37", "score": "0.5662321", "text": "def add_entry(self, entry_object):\n self.entries.append(entry_object)", "title": "" }, { "docid": "efc9f3c9cf72eea9599ffc4a95b5f53a", "score": "0.56595033", "text": "def insert_vRouter_port_mapping(self, match_ingress_port, action_vRouter_number):\n\n entry = shell.TableEntry(\"MyIngress.vRouterNumberMatching\")(\n action=\"MyIngress.setVSwitchNumber\")\n entry.match[\"standard_metadata.ingress_port\"] = str(match_ingress_port)\n entry.action[\"vRouterNumberFromTable\"] = str(action_vRouter_number)\n entry.insert()", "title": "" }, { "docid": "526f020ab58619a36b05692584651395", "score": "0.56576306", "text": "def add_node(self, key):\n self.size += 1\n new_node = Node(key)\n self.node_list[key] = new_node\n return new_node", "title": "" }, { "docid": "e5da60e0def8ecb6bbe09f4707ebc1da", "score": "0.5656732", "text": "def _insert(self, key, value):\n location = self.hash(key, True)\n\n if location is not None: # there is an open spot in table\n # location is None or previously deleted\n if self.table[location] is None or self.table[location].deleted:\n new_node = HashNode(key, value)\n self.table[location] = new_node\n self.size += 1\n\n load_factor = self.size/self.capacity\n \n if load_factor >= 0.5:\n self._grow()\n else: # key already exists\n self.table[location].value = value", "title": "" }, { "docid": "a938955678576e7121c331ef8f69d27b", "score": "0.5656664", "text": "def add_neighbour(self, neighbour):\n\t\tself.adj.append(neighbour)", "title": "" }, { "docid": "6834ab80a0c9b2489fff87146be02657", "score": "0.5656228", "text": "def addRoutingStep(self):\r\n self.routing_step_counter = 0 #reset the routing step numbers so it autopopulates correctly\r\n # create label\r\n self.label = QtGui.QLabel(self.centralWidget)\r\n self.label.setText(self.btnAddRoutingStep.text())\r\n self.label.setObjectName(_fromUtf8(\"label_\" + str(self.naming_counter)))\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\r\n self.label.setSizePolicy(sizePolicy)\r\n self.Addinfogrid.addWidget(self.label, self.current_row, 0, 1, 1)\r\n # create Routing Step Box\r\n self.routing_step_line_edit = QtGui.QLineEdit(self.centralWidget)\r\n self.routing_step_line_edit.setObjectName(_fromUtf8(\"RoutingStep_\" + str(self.naming_counter)))\r\n self.Addinfogrid.addWidget(self.routing_step_line_edit, self.current_row, 1, 1, 1)\r\n self.routing_step_line_edit.setFixedWidth(30)\r\n # create Descr Box\r\n self.routing_description_line_edit = QtGui.QLineEdit(self.centralWidget)\r\n self.routing_description_line_edit.setObjectName(\r\n _fromUtf8(\"RoutingDescription_\" + str(self.naming_counter)))\r\n self.Addinfogrid.addWidget(self.routing_description_line_edit, self.current_row, 2, 1, 1)\r\n # create entry combo\r\n self.setEntryBoxSelections(self.current_row)\r\n # update variables\r\n self.row_contents[self.current_row].append(self.label)\r\n self.row_contents[self.current_row].append(self.routing_step_line_edit)\r\n self.row_contents[self.current_row].append(self.routing_description_line_edit)\r\n self.row_contents[self.current_row].append(self.keyRoutingStep)\r\n self.current_row += 1\r\n self.naming_counter += 1", "title": "" }, { "docid": "c9746afc618e4e77c7ae4acab7bfae1d", "score": "0.5651949", "text": "def insert(self, key, value): \n index = self._hash(key) #use Hash method to save index of key\n node = self.buckets[index] #establish pointer to node of index\n if node is None: #if node is empty, then insert\n self.buckets[index] = Node(key, value) #instatiate node \n return\n #if node is not none, then set it to prev as we will insert new node\n prev = node \n while node is not None: #iterate through each node and setting the next node to prev, if it is not empty\n prev = node\n node = node.next\n #after an 'empty(none)' node is found, the prev or last node will point to a new node being established\n prev.next = Node(key, value) \n self.size +=1 #increase size of node count", "title": "" }, { "docid": "7aed141c87b723f5b91986f57157ddce", "score": "0.56315476", "text": "def add_node(self, v):\n if (not v in self.graph.keys()):\n self.graph[v] = []", "title": "" }, { "docid": "2d059ef954bebb1608d4f2b9738dc4a2", "score": "0.56290185", "text": "def add_node (self, node):\n raise NotImplementedError(\"Not implemented yet!\")", "title": "" }, { "docid": "53636ce75eea234917e7fd7839a0bd30", "score": "0.56250453", "text": "def add(self, key: CT) -> None:\n self.root, _ = self._insert(self.root, key)", "title": "" }, { "docid": "3b6a5282ab65a1c2b76b5e7272075010", "score": "0.56242746", "text": "def add(self,node):\r\n self.child = node", "title": "" }, { "docid": "aad8ec75fd14ffef94709bdf61828985", "score": "0.56217265", "text": "def addNode(self, u):\r\n if u in self.vertices:\r\n return\r\n self.vertices[u] = {}", "title": "" }, { "docid": "63c468ba6ae345968882b904f2100212", "score": "0.5614434", "text": "def AddNode(self, *args):\n return _snap.TNGraph_AddNode(self, *args)", "title": "" }, { "docid": "f9e3fe9fe7a5bf0ba6e59e2e2eb48570", "score": "0.5611464", "text": "def _add_node(self, node):\n self.nodes[node.name] = node\n\n self.on_node_created(node)\n\n for pin in node.inputs.values():\n assert not pin.is_folded, (pin.name, pin.node)\n\n # Ensure node restored to original place\n self.on_node_moved(node, node.position)\n\n self.history.record_command(lambda: self._add_node(node), lambda: self.delete_node(node))", "title": "" }, { "docid": "1c48ff9110ebfda745e213b2f5478941", "score": "0.56111705", "text": "def add(self, key):\n current = self.map[key] if key in self.map else None\n node = None\n \n if current == None:\n node = Node(key)\n self.map[key] = node\n else:\n node = self.map[key]\n\n # Check for eviction\n evictionKey = None\n if(len(self.map) > self.capacity):\n temp = self.head.next\n self.head.next = temp.next\n temp.next.prev = self.head\n temp.prev = None\n temp.next = None\n evictionKey = temp.key\n\n # Update status\n if node.next and node.prev:\n node.next.prev = node.prev\n node.prev.next = node.next\n\n temp = self.head.next\n self.head.next = node\n node.prev = self.head\n node.next = temp\n temp.prev = node\n\n if evictionKey:\n del self.map[evictionKey]\n\n return evictionKey", "title": "" }, { "docid": "ad3156422b282aff49a34a83ce7ff77d", "score": "0.56110096", "text": "def add_node(self, parent, children):\n if parent not in self.treemap:\n self.treemap[parent] = TreeNode()\n self.treemap[parent].children += children", "title": "" }, { "docid": "d13a01c61722e38cfcbf3cbf1ba5d558", "score": "0.56096584", "text": "def add_node(self, ip, port):\n self.directory.append({\"ip\": ip, \"port\": port})", "title": "" }, { "docid": "ac43bcee1f0fa8045d9a81e5f6e5a64f", "score": "0.5604597", "text": "def handleNewLink(self, port, endpoint, cost):\n self.neighbours[endpoint] = {'cost' : cost, 'port' : port}\n self.router_packets[self.addr]['neighbours'] = self.neighbours\n self.generateLSP() # because new link has been added, generate a new LSP ", "title": "" }, { "docid": "60201c4a2cd373487bf6f0b76f335eb4", "score": "0.56037825", "text": "def put(self, node, priority=0):\n\n if node in self.entry_finder:\n self.delete(node)\n entry = [priority, node]\n self.entry_finder[node] = entry\n heapq.heappush(self.heap, entry)\n self.size += 1", "title": "" }, { "docid": "8a15d4bf5b11169a27c424a86d3f7f14", "score": "0.55991447", "text": "def put(self, key: int, value: int) -> None:\n index = hash(key)\n if self.store[index] is not None:\n cur = self.store[index]\n while cur is not None:\n if cur.key == key:\n cur.key=key\n cur.value=value\n return\n elif cur.next == None:\n temp = Node(key,value)\n cur.next = temp\n else:\n cur = cur.next\n else:\n self.store[index] = Node(key,value)", "title": "" }, { "docid": "d5d53600d2d6074d15ee0fe7ff11644a", "score": "0.5598489", "text": "def add_node(self, node):\n if node not in self.nodes:\n self.nodeList.add(node)\n self.parents[node] = dict()\n self.children[node] = dict()\n else:\n return \"Node in graph already\"", "title": "" }, { "docid": "b154a5408ab63a410fbc251d3471bead", "score": "0.55952513", "text": "def AddNode(self, *args):\n return _snap.TBPGraph_AddNode(self, *args)", "title": "" }, { "docid": "9ca5b2f9a8f10e82c2ed3b460a45bb57", "score": "0.55910707", "text": "def addEntry(self, entry: SearchItem):\n directoryLock.acquire_write()\n try:\n self.queue.append(entry)\n finally:\n directoryLock.release()\n self.update()", "title": "" }, { "docid": "a82ad721d18695b6cc6b36c49fe1073f", "score": "0.55896866", "text": "def add_path(self, path):\n path = list(path)\n x,y,*z = path[0]\n key1 = self._key(x,y)\n for (x,y,*z) in path[1:-1]:\n key = self._new_key(x, y)\n self._edges.append((key1, key))\n key1 = key\n x,y,*z = path[-1]\n key2 = self._key(x, y)\n self._edges.append((key1, key2))", "title": "" }, { "docid": "7f1c04e597f0989c6cc13e80d5b21ecf", "score": "0.5586828", "text": "def addNode(self, node):\n if node not in self.degree:\n self.degree[node] = 0\n self.destinations[node] = []\n self.sources[node] = []", "title": "" }, { "docid": "d683c97404e99e8dfe4d515bf58e5313", "score": "0.5585715", "text": "def add_node(self, node: Node):\n self.nodes.add(node)", "title": "" }, { "docid": "175e9ec928f87a69502d1af21a52d412", "score": "0.55737895", "text": "def create_route(self):\n response = self.RouteTable.create_route(DestinationCidrBlock=\"0.0.0.0/0\",GatewayId=self.gateway.id)\n return response", "title": "" }, { "docid": "a7eedb13a17a40ef5ac48ae92d70c34e", "score": "0.5573125", "text": "def AddNode(self, *args):\n return _snap.TNEGraph_AddNode(self, *args)", "title": "" }, { "docid": "674d7d75d26d2eca12f439637b994e3c", "score": "0.5572885", "text": "def add(self, node):\r\n if self.cnt < self.capacity:\r\n self.cnt += 1\r\n self.forward(node)\r\n else:\r\n f = self.removenode()\r\n self.forward(node)\r\n if f != -1:\r\n self.adjustIndexNode(f)", "title": "" } ]
cd323382656cf93ddc488225cd8382bb
Percent point function at q, or inverse CDF. Approximated by looking up the index in the cdf table that is closest to q.
[ { "docid": "bf20399d7950fabe0303fc32e8edfabe", "score": "0.6197692", "text": "def ppf(self, q):\n q = np.atleast_1d(q)\n\n # look up the index of the quantile in the 2D CDF grid\n values = []\n for qi in q:\n # find index in grid for every dimension\n idx1, idx2 = np.where(self.joint_cdf >= qi)\n values.append([self.ks[idx1[0]], self.thetas[idx2[0]]])\n\n return np.array(values)", "title": "" } ]
[ { "docid": "f12bea694f0f6a1129ecd0f73232bc83", "score": "0.71535176", "text": "def cdf_percentile(x, p, q=50.0):\n\n # Determine index where cumulative percentage is achieved\n i = np.where(p>q/100.0)[0][0]\n\n # If at extremes of the distribution, return the limiting value\n if i==0 or i==len(x):\n return x[i]\n\n # or interpolate between the two bracketing values in the CDF\n else:\n m = (p[i]-p[i-1])/(x[i]-x[i-1])\n c = p[i] - m*x[i]\n return (q/100.0-c)/m", "title": "" }, { "docid": "1932254d91cc620b51f495c6c77a97ae", "score": "0.68623704", "text": "def ppf(q, df, loc=0.0, scale=1.0, gamma = 1.0):\n result = np.zeros(q.shape[0])\n probzero = Skewt.cdf(x=np.zeros(1),loc=np.zeros(1),df=df,gamma=gamma)\n result[q<probzero] = 1.0/gamma*ss.t.ppf(((np.power(gamma,2) + 1.0) * q[q<probzero])/2.0,df)\n result[q>=probzero] = gamma*ss.t.ppf((1.0 + 1.0/np.power(gamma,2))/2.0*(q[q >= probzero] - probzero) + 0.5, df)\n return result", "title": "" }, { "docid": "c6064e6d608c526651c9e60cba99b305", "score": "0.6497167", "text": "def inv_t_cdf(q, df):\r\n\r\n if q < 0.5 or q > 1:\r\n raise ValueError(\"q value must be [0.5, 1]\")\r\n\r\n # The function below calculates using a two tail value. Hence we need to\r\n # convert.\r\n p = (1 - q) * 2\r\n\r\n # There is a paper describing the operation of this algorithm\r\n # and it is behind a paywall:\r\n # http://dl.acm.org/citation.cfm?id=362776\r\n\r\n if df < 1:\r\n raise ValueError(\"df value must be >= 1\")\r\n\r\n if df == 1:\r\n p *= pi / 2\r\n return cos(p) / sin(p)\r\n\r\n a = 1.0 / (df - 0.5)\r\n b = 48 / (a * a)\r\n c = ((20700 * a / b - 98) * a - 16) * a + 96.36\r\n d = ((94.5 / (b + c) - 3.0) / b + 1.0) * sqrt(a * pi / 2) * df\r\n\r\n x = d * p\r\n y = x ** (2.0 / df)\r\n if y > a + 0.05:\r\n # asymptotic inverse expansion about the normal\r\n x = inv_norm_cdf(p * 0.5)\r\n y = x * x\r\n if df < 5:\r\n c += 0.3 * (df - 4.5) * (x + 0.6)\r\n c = (((0.5 * d * x - 0.5) * x - 7.0) * x - 2.0) * x + b + c\r\n y = (((((0.4 * y + 6.3) * y + 36) * y + 94.5) / c - y - 3) / b + 1) * x\r\n y *= a * y\r\n if y > 0.002:\r\n y = exp(y) - 1\r\n else:\r\n y += 0.5 * y * y\r\n else:\r\n y = (((1 / (((df + 6) / (df * y) - 0.089 * d - 0.822) *\r\n (df + 2.0) * 3.0) + 0.5 / (df + 4.0)) * y - 1.0) *\r\n (df + 1.0) / (df + 2.0) + 1.0 / y)\r\n\r\n return sqrt(df * y)", "title": "" }, { "docid": "c6d047dc337378930a60124b8aa70a8e", "score": "0.6226168", "text": "def qerfi(q):\n\n c0 = 2.515516698\n c1 = 0.802853\n c2 = 0.010328\n d1 = 1.432788\n d2 = 0.189269\n d3 = 0.001308\n\n x = 0.5 - q\n t = max(0.5 - abs(x), 0.000001)\n t = (-2.0 * math.log(t))**0.5\n v = t - ((c2 * t + c1) * t + c0) / (((d3 * t + d2) * t + d1) * t + 1.0)\n if (x < 0.0):\n v = -v\n\n return v", "title": "" }, { "docid": "1650c655f1ba2cce77429470711e1a73", "score": "0.6019953", "text": "def _get_fdr_from_pvalue_interp(self, pvalue):\n pvalue += 1e-15\n qvals = self.df[self._colname_qvalue]\n pvals = self.df[self._colname_pvalue]\n ya = qvals[pvals < pvalue].max()\n yb = qvals[pvals > pvalue].min()\n xa = pvals[pvals < pvalue].max()\n xb = pvals[pvals > pvalue].min()\n dx = xb - xa\n dy = yb - ya\n yc = ya + dy * (pvalue - xa) / dx\n return yc", "title": "" }, { "docid": "adc1908df66f91108bde43e2ee8a4c07", "score": "0.5962773", "text": "def q(theta_prop,theta_current):\n\tn = ss.norm(loc=theta_current, scale=0.1) \t# scale = 0.1 as in q_sample\n\tnumerator = n.pdf(theta_prop)\n\tdenominator = n.cdf(1) - n.cdf(0)\n\treturn numerator/denominator", "title": "" }, { "docid": "37cca0f0f7717cf577fc13c5757b0a43", "score": "0.59572744", "text": "def fidelity(p, q) -> float:\n if len(p) != len(q):\n raise ValueError(\n f'Distributions must have the same length, not {len(p)}, {len(q)}')\n prod = np.array(p) * np.array(q)\n return abs(np.sum(sm.sqrt(prod))) ** 2", "title": "" }, { "docid": "85c6d3bcaf0c1d5793b8145ac51b8393", "score": "0.59410477", "text": "def get_percentage(cd45_pos, dapi_pos, cutoff):\n results = process_percentages(dapi_pos, cd45_pos, cutoff)\n return results[0]", "title": "" }, { "docid": "b554dc6bb420a53830c6242de342614c", "score": "0.5890418", "text": "def pdfq(self, Q):\n return self.norm*exp(-Q/2.)", "title": "" }, { "docid": "9a0ab6d377bb19d96ba88342bf1f4273", "score": "0.5866694", "text": "def chisqprob(x, df):\r\n if x <= 0:\r\n return 1.0\r\n if x == 0:\r\n return 0.0\r\n if df <= 0:\r\n raise ValueError(\"Domain error.\")\r\n if x < 1.0 or x < df:\r\n return 1.0 - _igam(0.5*df, 0.5*x)\r\n return _igamc(0.5*df, 0.5*x)", "title": "" }, { "docid": "8de3dcab56ed848d32695492cfdeaf43", "score": "0.58331394", "text": "def dist(p, q):\n return ((p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2)**0.5", "title": "" }, { "docid": "f4e31d8dc31c4a90d4a1b67f6c536ca0", "score": "0.58135825", "text": "def _cdf(self, p, a, c, m, z):\n return integrate.quad(lambda p1: self._pdf(p1, a, c, m, z),\n -np.inf, p)[0]", "title": "" }, { "docid": "0b974a19c4efea09d7e430aa13f99c74", "score": "0.56883323", "text": "def dist(p, q):\n return (p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2", "title": "" }, { "docid": "72e02bd932b1b68c11aed3bf61bfce6b", "score": "0.5685906", "text": "def chi2P(chi, df):\n assert df & 1 == 0\n m = chi / 2.0\n sum = term = math.exp(-m)\n for i in range(1, df/2):\n term *= m/i\n sum += term\n return min(sum, 1.0)", "title": "" }, { "docid": "e712fe3d1c59d190f0199f01774ad7f3", "score": "0.56824905", "text": "def cdf(x, p, b, loc=0, scale=1):\n with mp.extradps(5):\n p, b, loc, scale = _validate_params(p, b, loc, scale)\n x = mp.mpf(x)\n\n if x <= loc:\n return mp.zero\n m = mode(p, b, loc, scale)\n # If the mode is in the integration interval, use it to do the integral\n # in two parts. Otherwise do just one integral.\n if x <= m:\n c = mp.quad(lambda t: pdf(t, p, b, loc, scale), [loc, x])\n else:\n c = (mp.quad(lambda t: pdf(t, p, b, loc, scale), [loc, m]) +\n mp.quad(lambda t: pdf(t, p, b, loc, scale), [m, x]))\n c = min(c, mp.one)\n return c", "title": "" }, { "docid": "0e2d71b76011e393d7a5fff91c236ffc", "score": "0.5681697", "text": "def cdf(x, df, loc=0.0, scale=1.0, gamma = 1.0):\n result = np.zeros(x.shape[0])\n result[x<0] = 2.0/(np.power(gamma,2) + 1.0)*ss.t.cdf(gamma*(x[x-loc < 0]-loc[x-loc < 0])/scale, df=df)\n result[x>=0] = 1.0/(np.power(gamma,2) + 1.0) + 2.0/((1.0/np.power(gamma,2)) + 1.0)*(ss.t.cdf((x[x-loc >= 0]-loc[x-loc >= 0])/(gamma*scale), df=df)-0.5)\n return result", "title": "" }, { "docid": "8daf6b0192802d5113fb32ba8c631544", "score": "0.5636917", "text": "def qfn(self, df):\n return self._qfn(df)", "title": "" }, { "docid": "1525a9e1bb69d4b110d60dda9f043bbc", "score": "0.5619846", "text": "def ppf(self, cd):\n if self._ppf_func is None:\n x0, y0 = self.cdf_grid\n self._ppf_func = sp.interpolate.interp1d(\n y0, x0, kind='cubic', fill_value='extrapolate') # **self._INTERP_KWARGS)\n\n # Symmetry can be utilized to get better accuracy of results, see 'note' above\n if self.SYMMETRIC:\n cd = np.atleast_1d(cd)\n idx = (cd > 0.5)\n cd = np.copy(cd)\n cd[idx] = 1 - cd[idx]\n\n try:\n xx = self._ppf_func(cd)\n except ValueError:\n logging.error(\"`_ppf_func` failed!\")\n logging.error(\"input `cd` = {} <=== {}\".format(\n utils.stats_str(cd), utils.array_str(cd)))\n for vv in self.cdf_grid:\n logging.error(\"\\tcdf_grid: {} <== {}\".format(\n utils.stats_str(vv), utils.array_str(vv)))\n raise\n\n if self.SYMMETRIC:\n xx[idx] = -xx[idx]\n\n return xx", "title": "" }, { "docid": "006b6135fe7b70d5c2b8e4bb6d3234b0", "score": "0.56155556", "text": "def pdf_internal(x, df, loc=0.0, scale=1.0, gamma = 1.0):\n result = np.zeros(x.shape[0])\n result[x<0] = 2.0/(gamma + 1.0/gamma)*stats.t.pdf(x=gamma*x[(x-loc) < 0], loc=loc[(x-loc) < 0]*gamma,df=df, scale=scale)\n result[x>=0] = 2.0/(gamma + 1.0/gamma)*stats.t.pdf(x=x[(x-loc) >= 0]/gamma, loc=loc[(x-loc) >= 0]/gamma,df=df, scale=scale)\n return result", "title": "" }, { "docid": "b6222cf992104502b747fea037735723", "score": "0.5610944", "text": "def calc_relative_density_salgado_et_al_1997_cpt_values(q_c1n, c_dq=0.9):\n return 0.465 * np.sqrt(q_c1n / c_dq) - 1.063", "title": "" }, { "docid": "d365375aba345025b76866eab078eee5", "score": "0.56013954", "text": "def ppf(self, qs):\n q = np.atleast_1d(qs)\n\n # look up the index of the quantile in the 2D CDF grid\n values = []\n for q in qs:\n # find index in grid for every dimension\n idx1 = np.where(self.cdf_array >= q)[0][0]\n values.append(self.support[idx1])\n\n return np.array(values)", "title": "" }, { "docid": "55dea005cee509a06dd3d5ac4a3d8026", "score": "0.55930823", "text": "def pval(self) -> float:\n return 1 - self.dist.cdf(self.stat)", "title": "" }, { "docid": "1f70f8cd01de890a97e414e5e0d9b753", "score": "0.5547797", "text": "def pdf(self, r1, rq, cq, r3):\n components = self.pdf_fixed_sign(r1, rq, cq, r3, SIGNS[None, :])\n return tf.abs(tf.reduce_sum(components, axis=-1))", "title": "" }, { "docid": "48efac06844808b633df07824b9f965d", "score": "0.5495738", "text": "def percentile(t: torch.tensor, q: float) -> Union[int, float]:\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "title": "" }, { "docid": "48efac06844808b633df07824b9f965d", "score": "0.5495738", "text": "def percentile(t: torch.tensor, q: float) -> Union[int, float]:\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "title": "" }, { "docid": "03545873183179d7d2537e1c50c0a0df", "score": "0.54663", "text": "def hits_theory(d_prime, criterion):\n return 1-stats.norm.cdf(criterion-d_prime)", "title": "" }, { "docid": "663116ac906b8494286df33d113e50fc", "score": "0.54496807", "text": "def _lookup_p_value(t, df):\n assert df >= 1, 'Degrees of freedom must be positive'\n\n # We ignore the negative sign on the t-value because our null hypothesis\n # is that the two samples are the same; our alternative hypothesis is that\n # the second sample is lesser OR greater than the first.\n t = abs(t)\n\n def greatest_smaller(nums, target):\n \"\"\"Returns the largest number that is <= the target number.\"\"\"\n lesser_equal = [n for n in nums if n <= target]\n assert lesser_equal, 'No number in number list <= target.'\n return max(lesser_equal)\n\n df_key = greatest_smaller(MathUtilsApi.TABLE.keys(), df)\n t_table_row = MathUtilsApi.TABLE[df_key]\n approximate_t_value = greatest_smaller(t_table_row, t)\n t_value_index = t_table_row.index(approximate_t_value)\n\n return MathUtilsApi.TWO_TAIL[t_value_index]", "title": "" }, { "docid": "6f38bae904f65b7e483a6f211287d1a9", "score": "0.5443089", "text": "def calculate_psi(self, dist_qtd: list, dist_perc: list) -> np.array:\n return (dist_perc[1] - dist_perc[0]) * np.log(\n dist_perc[1] / dist_perc[0])", "title": "" }, { "docid": "96b6b2d3515933d37b5a30f0193a0b7b", "score": "0.5426643", "text": "def pyqdm(vobs, vref, vfut, dist=stats.lognorm):\n\n if not isinstance(vobs, (list, np.ndarray,)):\n raise TypeError(\"Incorrect input type for observed values\") \n if not isinstance(vref, (list, np.ndarray,)):\n raise TypeError(\"Incorrect input type for reference period values\")\n if not isinstance(vfut, (list, np.ndarray,)):\n raise TypeError(\"Incorrect input type for future period values\")\n\n if any(np.isnan(vobs)):\n raise ValueError(\"Input observation array contains NaN values\")\n if any(np.isnan(vref)):\n raise ValueError(\"Input reference array contains NaN values\")\n if any(np.isnan(vfut)):\n raise ValueError(\"Input future array contains NaN values\") \n\n pobs = dist.fit(vobs, loc=0, scale=1)\n pref = dist.fit(vref, loc=0, scale=1)\n pfut = dist.fit(vfut, loc=0, scale=1)\n \n # CDF of future, at the value of the future data points\n Fsf = dist.cdf(vfut, *pfut)\n\n # Inverse cdf of reference period distribution, evaluated at future CDF\n # values\n invFsr = dist.ppf(Fsf, *pref)\n \n # Relative change in values\n delta = vfut / invFsr \n vfutb = dist.ppf(Fsf, *pobs) * delta \n \n return vfutb", "title": "" }, { "docid": "b5e73baae75c0ac3ce7784d2d4fa3a23", "score": "0.54080266", "text": "def q_value(self, x): \n return Stats.qvalue(x)", "title": "" }, { "docid": "c83d52baccafa4aadf55e517974d933e", "score": "0.54014796", "text": "def _q(p, t):\n\t\treturn ((1.0-t)**3)*p[0] + (3*(1.0-t)**2*t)*p[1] + (3*(1.0-t)*t**2)*p[2] + (t**3)*p[3]", "title": "" }, { "docid": "98650651525ad07b7185ff924b404002", "score": "0.5394221", "text": "def pctc(a, b):\r\n\r\n return (b - a) / np.abs(a)", "title": "" }, { "docid": "857e749331a39cbc7b9d640a6db02c97", "score": "0.5391754", "text": "def pdfq(self, Q):\n return _mvnt.mvtdenq(self.ndim, self.nu, self.norm, Q)", "title": "" }, { "docid": "9f870ff01dfcc4c66b6f89aba9d2efa4", "score": "0.5384126", "text": "def get_q95(self):\n from scipy import interpolate\n psi = self.data['psi'][0]\n q = self.data['qpsi'][0]\n f = interpolate.interp1d(psi,q,kind='cubic')\n psi95 = np.min(psi)+(np.max(psi)-np.min(psi))*0.95\n return f(psi95)", "title": "" }, { "docid": "90b77f0ce0beb2486d4d9ce2d3ae0dd5", "score": "0.53806305", "text": "def query_frac(self, cl, q=Q_DEFAULT):\n df = self.data_fitted\n n = self.query_count(cl, q=q)\n n_cl_occ = (df['cl'] == cl).sum()\n return n/n_cl_occ", "title": "" }, { "docid": "fe1abef1016a8753e76fdd4aa00f9a76", "score": "0.53487784", "text": "def __call__(self, q):\n \n if isinstance(q, Quaternion): q = q.dofs\n \n return super(NearestQuaternion, self).__call__(q) / np.dot(q, q)", "title": "" }, { "docid": "7752b39f5d4d62690ab33b150d27b2cc", "score": "0.53293294", "text": "def _euclidean_dist(p, q):\n return np.linalg.norm(p - q)", "title": "" }, { "docid": "d08ddf5466545e8d7f95c256cfabbf31", "score": "0.5326587", "text": "def f_ratio(chi_square_1,p1,chi_square_2,p2,n):\n ###############################################\n F=( (chi_square_1-chi_square_2)/(p2-p1) ) / (chi_square_2 / (n-p2) )\n \n from scipy.stats import f\n return(f.cdf(F,p2-p1,n-p2))", "title": "" }, { "docid": "8ca20184f047bc1c615f6dfb8d20ce22", "score": "0.5325731", "text": "def q(b):\n return np.sqrt(b)/(1.+np.sqrt(b))", "title": "" }, { "docid": "79ee83f6b9544ffdb35e1baeae414c41", "score": "0.5323326", "text": "def percentile(weight:torch.Tensor, q:float):\n k = 1 + round(float(q) * (weight.numel() - 1))\n result = weight.view(-1).abs().kthvalue(k).values.item()\n return result", "title": "" }, { "docid": "fab551e658341ede6928e60210d4d584", "score": "0.53144443", "text": "def dcpf(B, Pbus, Va0, ref, pv, pq):\r\n pvpq = r_[pv, pq]\r\n\r\n ## initialize result vector\r\n Va = copy(Va0)\r\n\r\n ## update angles for non-reference buses\r\n if pvpq.shape == (1, 1): #workaround for bug in scipy <0.19\r\n pvpq = array(pvpq).flatten()\r\n pvpq_matrix = B[pvpq.T,:].tocsc()[:,pvpq]\r\n ref_matrix = transpose(Pbus[pvpq] - B[pvpq.T,:].tocsc()[:,ref] * Va0[ref])\r\n Va[pvpq] = real(spsolve(pvpq_matrix, ref_matrix))\r\n\r\n return Va", "title": "" }, { "docid": "bb4e173a24fee49e787d6fd22aa290b4", "score": "0.5312867", "text": "def qhi(x):\n return np.percentile(x, 100)", "title": "" }, { "docid": "5d8661b145bfd3e21e0d8512c40b47c0", "score": "0.53099346", "text": "def score_df_result(\n self, query: Text, df: pd.DataFrame, field: Text, index: int) -> float:\n result = df.iloc[index][field]\n if result:\n return self.score(query, result)\n return None", "title": "" }, { "docid": "cac45cda3e29fca5c200162475279b70", "score": "0.5278208", "text": "def c_pdf(self, x):\n\n # only support for positive x\n assert x >= 0\n\n # shortcuts\n m_dof = self.m\n n_dof = self.n\n m2 = m_dof / 2\n n2 = n_dof / 2\n mn2 = m2 + n2\n dmn = m_dof / n_dof\n\n # compute all parts\n t = m.gamma(mn2) * dmn ** m2 * np.power(x, m2 - 1)\n b = m.gamma(m2) * m.gamma(n2) * (1 + np.multiply(dmn, x)) ** mn2\n return t / b", "title": "" }, { "docid": "e291af94c866d2445bda74e80c947507", "score": "0.52772623", "text": "def fdr_correction(pvals):\n if pvals is None:\n qvals = None\n else:\n pvals = np.array(pvals)\n # get descending indices\n by_descend = pvals.argsort()[::-1]\n # Get mapping from sorted to original indices\n by_orig = by_descend.argsort()\n # Compute the ratio of n_pval by ranks, which can then be\n # multiplied by p-values to get the critical value\n steps = float(len(pvals)) / np.arange(len(pvals), 0, -1)\n # Compute critical values and find last p-value below above it\n # Set all other to the same value and back to 1 if higher\n qvals = np.minimum(1, np.minimum.accumulate(steps * pvals[by_descend]))\n # Unsort q-values to get original order\n qvals = qvals[by_orig]\n\n return qvals", "title": "" }, { "docid": "743ca908f4bfc8703cf0f79cfebe28da", "score": "0.5274953", "text": "def test_qc (self): \r\n \r\n c, _ = self.pobj.qc ( tol = .6 )\r\n print(f\"QC= {c * 100}%\" )", "title": "" }, { "docid": "2bc0723ea6107186a23cb2c41c3c5588", "score": "0.5259269", "text": "def cmp_func(p, q):\n return cmp(p.logprob / (len(p) ** self.length_norm_weight),\n q.logprob / (len(q) ** self.length_norm_weight))", "title": "" }, { "docid": "b6ac73845e60278a04254843ff2b9db2", "score": "0.5247404", "text": "def __calculate_pval(self):\n self.__z = self.__calculate_zscore()\n \n return 2 * (1 - ABTest.SpecialFunctions(self.__z).cdf(\"abram\"))", "title": "" }, { "docid": "53523564df0aed778719163c15ea63b3", "score": "0.5239782", "text": "def probability_to_distance(p):\n if p >= (3.0 / 4.0):\n return float('inf')\n return -(3.0 / 4.0) * math.log(1.0 - (4.0 / 3.0) * p)", "title": "" }, { "docid": "23f121c019443f990f79437ff6ef10e5", "score": "0.5233118", "text": "def quantile(self, q):\n if q < 0 or q > 1 or self._count == 0:\n return np.nan\n\n if len(self.incoming) > 0:\n self.merge_compress()\n\n rank = int(q*(self._count - 1) + 1)\n spread = int(self.eps*(self._count - 1))\n g_sum = 0.0\n i = 0\n while i < len(self.entries):\n g_sum += self.entries[i].g\n if g_sum + self.entries[i].delta > rank + spread:\n break\n i += 1\n if i == 0:\n return self._min\n\n return self.entries[i-1].val", "title": "" }, { "docid": "6463d9592feb3582805d7135f8678c65", "score": "0.5216591", "text": "def q_value(self):\n visited = self.n if self.n>0 else 1\n return self.t/visited", "title": "" }, { "docid": "70bdf853cd352f07cab0db14dac94cc4", "score": "0.521505", "text": "def _eq_coverage_function(self, theta, T, G0, b, p):\n kBT = kB * T\n ## start by calculating the equilibirum constant \n K = np.exp( -1 * ( G0 + b * ( theta - 1./2. ) ) / kBT ) \n return theta - ( K / ( 1 + K ) )", "title": "" }, { "docid": "45044d003bbe70077e9efa9d388b6b89", "score": "0.5214675", "text": "def euclidean_distance(self, p, q):\n return sqrt(pow((p['x']-q['x'] ),2) + pow((p['y']-q['y'] ),2) )", "title": "" }, { "docid": "7910fb46c5feebc9b51903c4313d7fbb", "score": "0.5211056", "text": "def query_coverage(self) -> float:\n try:\n return self.query_aligned_length / self.qlen\n except ZeroDivisionError:\n return 0.0", "title": "" }, { "docid": "8246e21804dbd7d769da07c925468453", "score": "0.52065825", "text": "def f_p(self,f_c):\n return f_c*self.Zr/self.Zp", "title": "" }, { "docid": "e7bb85ce52cc006acf052e91da4658e8", "score": "0.52059335", "text": "def prior(position, gf):\n prior_prob = 0\n for i, prior in enumerate(gf.priors):\n prior_prob += prior.pdf(position[i])\n return -prior_prob", "title": "" }, { "docid": "eb5366115c5e2c13ac27f248f339aaa4", "score": "0.5200528", "text": "def _pdf(self, p, a, c, m, z):\n return (1.0 / z) * np.exp(-self._K_indiv(p, a, c, m))", "title": "" }, { "docid": "5c092b82865f4de6de03d02db36a31fc", "score": "0.5196148", "text": "def refr_idx(q, sld):\n return cmath.sqrt(1 - 16 * pi * sld / (q ** 2))", "title": "" }, { "docid": "52934e3f72cabca7245c97806a2f5c1c", "score": "0.5192766", "text": "def _fast_pdf(self, x):\n return self._fast_pdf_probit(x) * np.exp(-probit_logJ(x, self.bounds, self.probit))", "title": "" }, { "docid": "fafcb5ac8e4e114019838eb2ff436cb0", "score": "0.51858723", "text": "def cdf(self):\n return binom.cdf(self.__r, self.__n, self.__p)", "title": "" }, { "docid": "00462728c9f850150e526a687388deb9", "score": "0.5176853", "text": "def update_approx(arm, m, s, X, f, F):\n f[arm] = norm.pdf(X, m, s)\n F[arm] = norm.cdf(X, m, s)\n return f, F", "title": "" }, { "docid": "f90532d82ae174dde831431757d90627", "score": "0.51754725", "text": "def inv_norm_cdf(p):\r\n if 0 < p < p_low:\r\n # rational approximation for the lower region\r\n q = sqrt(-2 * log(p))\r\n x = ((((((c1 * q + c2) * q + c3) * q + c4) * q + c5) * q + c6) /\r\n ((((d1 * q + d2) * q + d3) * q + d4) * q + 1))\r\n elif p_low <= p <= p_high:\r\n # rational approximation for the central region\r\n q = p - 0.5\r\n r = q * q\r\n x = ((((((a1 * r + a2) * r + a3) * r + a4) * r + a5) * r + a6) * q /\r\n (((((b1 * r + b2) * r + b3) * r + b4) * r + b5) * r + 1.0))\r\n else:\r\n # rational approximation for the upper region\r\n q = sqrt(-2.0 * log(1.0 - p))\r\n x = (-(((((c1 * q + c2) * q + c3) * q + c4) * q + c5) * q + c6) /\r\n ((((d1 * q + d2) * q + d3) * q + d4) * q + 1.0))\r\n\r\n # Can't refine to maximum precision due to the lack of the erfc function.\r\n # See http://home.online.no/~pjacklam/notes/invnorm/ for details.\r\n return x", "title": "" }, { "docid": "076cc575831cc2e8a5accc2dceeda819", "score": "0.5175472", "text": "def qlo(x):\n return np.percentile(x, 25)", "title": "" }, { "docid": "db758c0c5b360b5cb9560b8158e7faad", "score": "0.5170501", "text": "def calc_q(histogram, z, i):\r\n left_limit = z[i]\r\n right_limit = z[i + 1]\r\n range_arr = GRAY_RANGE[left_limit:right_limit]\r\n local_sum = sum(range_arr * histogram[left_limit:right_limit])\r\n divider = sum(histogram[left_limit:right_limit])\r\n if divider == 0:\r\n return divider\r\n return local_sum / divider", "title": "" }, { "docid": "29a123e307eee6be768c7510acfb98fa", "score": "0.51680017", "text": "def get_prob(self, qid=None):\n amp = qstate_get_camp(self, qid)\n if qid is None:\n digits = self.qubit_num\n else:\n digits = len(qid)\n prob = {\"{:0{digits}b}\".format(i, digits=digits): abs(c) * abs(c)\n for i, c in enumerate(amp) if abs(c) > cfg.EPS}\n return prob", "title": "" }, { "docid": "c8efd7be57e2693e151dbe6a19fb0c8e", "score": "0.515931", "text": "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "title": "" }, { "docid": "22a4d33663fd50982eea879efc1431da", "score": "0.5155029", "text": "def gradient(self, q):\n if isinstance(q, Quaternion): q = q.dofs\n\n grad = super(NearestQuaternion, self).gradient(q)\n return (grad - 2*self(q)*q) / np.dot(q, q)", "title": "" }, { "docid": "7f79ee94f13bdb1fd513ef1bb3669101", "score": "0.5151069", "text": "def form_factor(self,q):\r\n def interpolate():\r\n \"\"\" FIXME \"\"\"\r\n return 1\r\n form_factor = _weighted(interpolate)(q)\r\n return form_factor", "title": "" }, { "docid": "7380dfd178547abaaa84bb58c2e940e0", "score": "0.51430446", "text": "def correct_percentage_for_formation(self, formation):\n total_qs = self.total_questions_for_formation(formation)\n if total_qs == 0: return 0\n return round(\n 100.0 * self.total_correct_for_formation(formation) / total_qs\n )", "title": "" }, { "docid": "519d7f4637c2e1926f02396ef7e20f55", "score": "0.51421916", "text": "def get_fq_centroid(self, **kwargs):\n f, psd = self.get_psd(**kwargs)\n\n return np.sum(f*psd)/np.sum(psd)", "title": "" }, { "docid": "94cfe6e95b659ff5e909c3be7bb33b96", "score": "0.51382506", "text": "def joint_prob(target: str, base: str, sum_c: int, df: pd.DataFrame) -> float:\n prob_t_b = df[base][target] / sum_c\n prob_t = df['count_w'][target] / sum_c\n prob_c = df[base]['count_c'] / sum_c\n result = ma.log2((prob_t_b/(prob_t * prob_c)))\n if result > 0:\n return ma.round(result, decimals=4)\n else:\n return 0", "title": "" }, { "docid": "02dd5ace6c64b3ee11c7f926a232b763", "score": "0.51345015", "text": "def gradient(self, q):\n dV = self.dV(q)\n return dV.view(np.float64)", "title": "" }, { "docid": "30c12a26da6b10fd8805fbcb6e92d50d", "score": "0.5129247", "text": "def _percent_diff(expected: float, predicted: float) -> float:\n return (predicted - expected) / expected * 100.", "title": "" }, { "docid": "bcc9bcc1d8957b09890733c91c8183e2", "score": "0.5127389", "text": "def calculate_ppf_from_samples(qs, samples):\n\n qs = np.atleast_1d(qs)\n values = np.zeros_like(qs)\n\n # use bins from min to max\n bins = np.linspace(samples.min(), samples.max(), 1000)\n # asign samples to bins\n bin_idx = np.digitize(samples, bins)\n # count samples per bin --> histogram\n n = np.bincount(bin_idx.squeeze())\n # take the normalized cum sum as the cdf\n cdf = np.cumsum(n) / np.sum(n)\n\n # for every quantile, get the corresponding value on the cdf\n for i, qi in enumerate(qs):\n quantile_idx = np.where(cdf >= qi)[0][0]\n values[i] = bins[quantile_idx]\n\n return values", "title": "" }, { "docid": "e70beef75d9ff75213c5b115f3c2b1af", "score": "0.512264", "text": "def KL_divergence(self, p, q):\n return sum(p[x] * log((p[x]) / (q[x])) for x in range(len(p)) if p[x]!= 0)", "title": "" }, { "docid": "098c04db97e9db5163756fe462fe86f8", "score": "0.510809", "text": "def qFloatDistance(p_float, p_float_1): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "2160e03b2e84bcadfc16511dabee1b52", "score": "0.5102192", "text": "def cdf(self, k):\n\n if type(k) is not int:\n k = int(k)\n if k < 0:\n return 0\n if self.n is not None and self.p is not None and k is not None:\n n = self.n\n p = self.p\n k = abs(k)\n cdf = 0\n\n for i in range(0, k + 1):\n cdf = cdf + self.pmf(i)\n return cdf", "title": "" }, { "docid": "ed99d71adb69715df1dbe221a9b60b4c", "score": "0.5089725", "text": "def cdf(self):\n return geom.cdf(self.__r, self.__p)", "title": "" }, { "docid": "c6d9d1e5330a6c792e49e34e7e3bddae", "score": "0.50872064", "text": "def fcdfuniform(x0,xf): \n def fun(x):\n if (x<x0 or x>xf): return 0.\n x = float(x)\n val = float(x)/(float(xf)-float(x0))\n return val\n return fun", "title": "" }, { "docid": "4920ec476991ac89bcae00c95546a68e", "score": "0.5078218", "text": "def quantize_float(self, f, q):\n return int(round(f / q) * q)", "title": "" }, { "docid": "f373fc312a574ae1f1ac0016a21052b1", "score": "0.5068691", "text": "def cdf_qz(self, x):\n xn = (x - limit_a) / (limit_b - limit_a)\n logits = math.log(xn) - math.log(1 - xn)\n return torch.sigmoid(logits * self.temperature - self.qz_loga).clamp(min=epsilon, max=1 - epsilon)", "title": "" }, { "docid": "92413bab15b488324970e415e8a276a6", "score": "0.5065336", "text": "def euclidean_similarity(p, q):\n return 1 / (1 + _euclidean_dist(p, q))", "title": "" }, { "docid": "96e7a68a8d1bfc31299dd53915c7414c", "score": "0.5063473", "text": "def testCalcPdf():\n nn = np.arange(1,101,dtype=np.double)\n tau = np.log(nn)\n (bins,hist) = stat.flux_pdf(tau, 20)\n print(bins)\n assert bins[0] == 0.+1/40.\n assert bins[-1] == 1.-1./40.\n assert np.min(hist) == 0.\n assert np.max(hist) > 1.\n print(hist)\n expected = np.array([ 16. , 2.2, 0.6, 0.2, 0.2, 0.2, 0.2, 0. , 0. , 0. , 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.2])\n assert np.abs(np.sum(expected) - np.sum(hist)) < 1e-3\n #One of the points moves around from bin 1 to bin 2, depending on roundoff.\n assert np.all(np.abs(hist[3:] - expected[3:]) < 1e-3)\n assert np.abs(hist[0] - expected[0]) < 1e-2", "title": "" }, { "docid": "a03ae314cf4a627c321b93cc72d2f22b", "score": "0.5044261", "text": "def pq(i, s, c, pn, pa, d, n ):\n result = (8 * pa * ( s + c) )/ (3 * pn * ( d + n + i ) )\n return result", "title": "" }, { "docid": "1f1cc4cb043ac7a5d7e14df1e1bfac22", "score": "0.50432694", "text": "def calc_CE(p,q):\n if len(p) != len(q):\n print \"p and q not equal length!\"\n else:\n CE_res = 0\n for vector_idx in range(0,len(p)):\n CE_res +=p[vector_idx]*math.log(q[vector_idx])\n \n return CE_res", "title": "" }, { "docid": "c01ad8904d64d8a7a834be925f316582", "score": "0.5034261", "text": "def _get_value(self, p):\n return numpy.linalg.norm(safe_dot(self._fdmat, p), 1)", "title": "" }, { "docid": "df449ad380f2eb5d1aea92309f6a44b3", "score": "0.50315076", "text": "def _compute_pvalues(self):\n tstat = self.coef / self.se\n self.pvalue = 2 * t.cdf(-abs(tstat), self._dgf)", "title": "" }, { "docid": "c0c71ccc0f51a41ad649fed6cbf03973", "score": "0.5030248", "text": "def percentage(self):\r\n return self.currval * 100.0 / self.maxval", "title": "" }, { "docid": "7a8d0b56cb7f3905483dd3d316321e33", "score": "0.5029943", "text": "def chi_square(x, p, df=1):\n N = len(x)\n end = N\n sim = numpy.logical_not(numpy.logical_xor(x[0:end-p], x[p:end]))*1\n s = ((numpy.ones((N-p,), float)-sim)**2).sum()\n D = s/(N-p)\n p_val = 1 - igam(df/2.0, D/2)\n return D, p_val", "title": "" }, { "docid": "e623f399cd6f718e2e3ba36140737fa0", "score": "0.5024768", "text": "def getProgress(self):\r\n npts = self.getNumPoints()\r\n if npts==0:\r\n return -1.0\r\n return float(self.currentPoint)/float(npts)", "title": "" }, { "docid": "ab3621965875fb38b371f73e63a645f7", "score": "0.50207853", "text": "def percent_change(self, finial, initial):\n return (float(finial) - float(initial) / float(initial))", "title": "" }, { "docid": "b85c97e386b4381226949014f8792cdb", "score": "0.5019965", "text": "def dQ_dt(self, Qstr, ix, points):\n if ix == 0:\n ix = 1\n pt1 = points[ix]\n pt0 = points[ix-1]\n t1 = points.indepvararray[ix]\n t0 = points.indepvararray[ix-1]\n return (pt1[Qstr]-pt0[Qstr])/(t1-t0)", "title": "" }, { "docid": "e071a955a7619f08033044b7dbca9e01", "score": "0.500945", "text": "def result_percentage_by_table_index(x, y):\r\n #TODO: check what 4 values are returned\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.shading.percentage.index\", x, y)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_list(val, float, float, float, float)", "title": "" }, { "docid": "cdf70407c11e2ebe26e8220370547567", "score": "0.50088155", "text": "def distance(p, q):\n dist = ((p[0] - q[0])**2 + (p[1] - q[1])**2)\n return math.sqrt(dist)", "title": "" }, { "docid": "b6302560bc2c2df748ec9162ad397cac", "score": "0.50037766", "text": "def _calc_npq(fmp, fm):\n\n out_flt = np.ones(shape=fm.shape) * np.nan\n fmp = np.squeeze(fmp)\n div = np.divide(fm, fmp, out=out_flt,\n where=np.logical_and(fm > 0, np.logical_and(fmp > 0, fm > fmp)))\n sub = np.subtract(div, 1, out=out_flt.copy(),\n where=div >= 1)\n return sub", "title": "" }, { "docid": "160ea080f6eece8d122232dcfbf819aa", "score": "0.49987778", "text": "def pdf(self):\r\n\r\n a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))\r\n b = (self.p ** k )\r\n \r\n return a * b", "title": "" }, { "docid": "b2b055a345e0fee5e4643cd147cd4808", "score": "0.49956444", "text": "def _fast_logpdf(self, x):\n return self._fast_logpdf_probit(x) - probit_logJ(x, self.bounds, self.probit)", "title": "" }, { "docid": "18132b1304a000c69cf69deb44d3258d", "score": "0.4995324", "text": "def CcFunc( df, gm ):\r\n return df['pci'] - df['a']/gm", "title": "" }, { "docid": "9837fbdeb4c1047441a8cbde2599d331", "score": "0.49932036", "text": "def percentile(li, pc):\n if not li:\n return None\n k = (len(li)-1) * pc\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return li[int(k)]\n d0 = li[int(f)] * (c-k)\n d1 = li[int(c)] * (k-f)\n return d0 + d1", "title": "" }, { "docid": "110de13f413a988cc23d75fe03036976", "score": "0.49894333", "text": "def cdf(self, X):\n return stats.norm._cdf(X)", "title": "" } ]
15e5fdf7b8cc1f7fa79217b67c1d0b34
Convert, as necessary, l,b,d into floats
[ { "docid": "74bacff892397f91578a1959e95a602d", "score": "0.0", "text": "def parse_lbd(gal_l, gal_b, distance):\n l = parse_units(gal_l, 'deg', 'Galactic longitude')\n b = parse_units(gal_b, 'deg', 'Galactic latitude')\n d = parse_units(distance, 'kpc', 'distance')\n return l, b, d", "title": "" } ]
[ { "docid": "8e5c2f05726d138a1343dda176d0b088", "score": "0.64943665", "text": "def str2flt(vec):\n newvec=[]\n for v in vec:\n try:\n newv=float(v)\n except:\n newv=None\n finally:\n newvec.append(newv)\n return newvec", "title": "" }, { "docid": "0595ceec48509bfd969a8d3e0d12a768", "score": "0.6491393", "text": "def asFloat(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "0595ceec48509bfd969a8d3e0d12a768", "score": "0.6491393", "text": "def asFloat(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "01696d528815f236f04947ea1e17ec0e", "score": "0.64913887", "text": "def get_float(bindata):\n v = struct.unpack('BBBBB', bindata)\n e = v[0]\n r = v[1] << 24 | v[2] << 16 | v[3] << 8 | v[4]\n s = -1 if (r & 0x80000000) != 0 else 1\n if e == 0:\n return 0.0\n e -= 128\n d = (r | 0x80000000) * pow(2, e - 32)\n return s * d", "title": "" }, { "docid": "822287db0d4287797aa296daf571f54b", "score": "0.64598525", "text": "def double_to_float(d):\n return javabridge.make_instance(\"java/lang/Float\", \"(D)V\", d)", "title": "" }, { "docid": "a2054e69c22b18953755ef21aac7d065", "score": "0.6333949", "text": "def _readFloat(data):\n\n\tif(len(data)<4):\n\t\tprint \"Error: too few bytes for float\", data, len(data)\n\t\trest = data\n\t\tfloat = 0\n\telse:\n\t\tfloat = struct.unpack(\">f\", data[0:4])[0]\n\t\trest = data[4:]\n\n\treturn (float, rest)", "title": "" }, { "docid": "6a8149baa455a99809a4c637d2720c99", "score": "0.6321158", "text": "def list_as_float(l):\n return [float(i) for i in l]", "title": "" }, { "docid": "61370dd6632ab85f0df1d0ba1f2debb5", "score": "0.63206446", "text": "def read_float(self):\n return \"unpack('f', read_fun(4))[0]\"", "title": "" }, { "docid": "eb3b96ef22dc9ccd2307c212f8ae5e09", "score": "0.62402076", "text": "def str2float(l):\n cleanstr = l.strip()\n splitstr = cleanstr.split()\n intlist = [float(u) for u in splitstr]\n return intlist", "title": "" }, { "docid": "f433806ac7c5427288069720817cc784", "score": "0.621876", "text": "def readLEFloat(f):\n read_bytes = f.read(4)\n return struct.unpack(\"<f\", read_bytes)[0]", "title": "" }, { "docid": "29857da1bc7a939696115f9a8f7c9b97", "score": "0.62182057", "text": "def convert_float(self, x):\n if x[0] == '(' and x[-1] == ')':\n x_new = re.findall(\"\\d*\\.?\\d+\", x)\n return -float(x_new[0])\n else:\n # we can ignore there type, just treate them as str and extract them\n x_new = re.findall(\"\\d*\\.?\\d+\", x)\n # transform to float\n return float(x_new[0])", "title": "" }, { "docid": "13fd6631e34a1b3405cf4ad1411099f8", "score": "0.6135809", "text": "def convertToFloat(ra, dec):\n\n fra = ra[0] * (ra[1] * 10000.0 + ra[2] * 100.0 + ra[3])\n fdec = dec[0] * (dec[1] * 10000.0 + dec[2] * 100.0 + dec[3])\n\n return (fra, fdec)", "title": "" }, { "docid": "5f16bd7aab0b32ac84723ce49eb9914d", "score": "0.6131905", "text": "def convert_nums_to_floats(li):\r\n floatified = []\r\n for number in li:\r\n floatified.append(float(number))\r\n return floatified", "title": "" }, { "docid": "87f98f5b871c52d8fc7e31dd88992545", "score": "0.61277336", "text": "def s2f(s):\n try:\n return float(s)\n except:\n return ''", "title": "" }, { "docid": "defffea0df33d98383d19b3bc6a4b477", "score": "0.6094277", "text": "def _reg2float(self, reg):\n s = struct.pack('>l', reg)\n return struct.unpack('>f', s)[0]", "title": "" }, { "docid": "74ae04cc976a03b89c3e9a0817a7ee06", "score": "0.6081809", "text": "def get_float(start: int, num_bytes: int, ens: list):\n try:\n return struct.unpack(\"f\", ens[start:start + num_bytes])[0]\n except Exception as e:\n logging.debug(\"Error creating a float from bytes. \" + str(e))\n return 0.0", "title": "" }, { "docid": "a7b7281a07380ccaec0f8fdab6110f21", "score": "0.60814905", "text": "def _vax_to_ieee_single_float(data):\n f = []\n nfloat = int(len(data) / 4)\n for i in range(nfloat):\n\n byte2 = data[0 + i*4]\n byte1 = data[1 + i*4]\n byte4 = data[2 + i*4]\n byte3 = data[3 + i*4]\n \n # hex 0x80 = binary mask 10000000\n # hex 0x7f = binary mask 01111111\n \n sign = (ord(byte1) & 0x80) >> 7\n expon = ((ord(byte1) & 0x7f) << 1 ) + ((ord(byte2) & 0x80 ) >> 7 )\n fract = ((ord(byte2) & 0x7f) << 16 ) + (ord(byte3) << 8 ) + ord(byte4)\n \n if sign == 0:\n sign_mult = 1.0\n else:\n sign_mult = -1.0;\n \n if 0 < expon:\n # note 16777216.0 == 2^24 \n val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0) \n f.append(val)\n elif expon == 0 and sign == 0:\n f.append(0)\n else: \n f.append(0)\n # may want to raise an exception here ...\n \n return f", "title": "" }, { "docid": "820664c8bbc5cc9433e0129814d67120", "score": "0.6066476", "text": "def Filletrad(self) -> float:", "title": "" }, { "docid": "2cc33fe89574619bc9120775bf609119", "score": "0.6053241", "text": "def decode_float(buf, pos):\r\n return decode_struct(_float_fmt, buf, pos)", "title": "" }, { "docid": "cad83b4063137457197fe05bb23b0adb", "score": "0.60521317", "text": "def c_to_f(t_c):\r\n if t_c is None:\r\n return None\r\n return 1.8 * t_c + 32.0", "title": "" }, { "docid": "474646c2a6667491792ff8fde4f37d3a", "score": "0.6049819", "text": "def fixed2Float(value):\n return float(value) / 64", "title": "" }, { "docid": "96c0caf85f3c977fbab39a36501ff811", "score": "0.6010712", "text": "def to_F(self, t = None):\n if not t:\n t = self.t\n f = float(((t * 9.0) / 5.0) + 32)\n return(f)", "title": "" }, { "docid": "e32c7c834bd9440c570587bdf8281fff", "score": "0.600335", "text": "def test_float_to_float(self) -> None:\n self._assert_parses_to_expected_float(0.1, 0.1)", "title": "" }, { "docid": "03b4a077f7f739818efbfe6ea77f0445", "score": "0.5974856", "text": "def parse_float(bytes):\n longValue = parse_long(bytes)\n exponent = ((longValue >> 23) & 0xff) # float\n exponent -= 127.0\n exponent = pow(2,exponent)\n mantissa = (longValue & 0x7fffff)\n mantissa = 1.0 + (mantissa/8388607.0)\n floatValue = mantissa * exponent\n if ((longValue & 0x80000000) == 0x80000000):\n floatValue = -floatValue\n return floatValue", "title": "" }, { "docid": "805c3e9632cec08feb33ff36f0bffa8b", "score": "0.59746814", "text": "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (long(5.7), 5L)\n ]:\n assert floatify(input) == expect", "title": "" }, { "docid": "e42571a00cabc734106381d22891b8e8", "score": "0.5956831", "text": "def __Bytes2Float(self, data: bytes):\n s = data.decode('UTF-8')\n s = s[:-2]\n f = float(s)\n return f", "title": "" }, { "docid": "cb5fb1a2d7c98752272e508731797fa7", "score": "0.59551024", "text": "def tofloat(x):\n from numpy import nan\n try: return float(x)\n except: return nan", "title": "" }, { "docid": "07eec67c3640bed54a83b33ac316199a", "score": "0.5908836", "text": "def convertListElementsToFloat(stringList):\n try:\n return [float(x) for x in stringList]\n except Exception:\n print Exception.message\n return None", "title": "" }, { "docid": "c3ef1dcd0c3803036965b654b96735a2", "score": "0.58745927", "text": "def to_float(*args: List[Any]) -> List[int]:\n result = []\n for n in args:\n try:\n n = float(n)\n except (ValueError, TypeError):\n n = 0\n result.append(n)\n return result if len(result) > 1 else result[0]", "title": "" }, { "docid": "1230ecf2f2b72c0efa9843538fc04a44", "score": "0.5858018", "text": "def get_float_data(d, name):\n data = list(d[name])\n while '' in data:\n data.remove('')\n\n return np.array(list(map(float, data)))", "title": "" }, { "docid": "16a5fc15f30647bde462635c017e25f2", "score": "0.5850872", "text": "def fortran_float(s, blank_value = 0.0):\n try: return float(s)\n except ValueError:\n s = s.strip()\n if not s: return blank_value\n else:\n try:\n s = s.lower().replace('d', 'e').replace(' ', '')\n return float(s)\n except:\n try:\n return float(''.join([s[0], s[1:].replace('-', 'e-')]))\n except ValueError:\n try:\n return float(''.join([s[0], s[1:].replace('+', 'e')]))\n except ValueError: return nan\n except: return nan", "title": "" }, { "docid": "0622fc3e7f2ae779dc64468e5e6b754a", "score": "0.58380204", "text": "def to_floatSA(parser, node, children):\n if parser.debug:\n print(\"Converting {}.\".format(node.value))\n return float(node.value)", "title": "" }, { "docid": "99bfddcd658607495eaecabdc822e31f", "score": "0.58245426", "text": "def to_float(data):\n\tnewdata = [[] for _ in range(len(data) - 1)]\n\tfor i in range(1, len(data)):\n\t\tnewdata[i - 1] = list(map(float, data[i]))\n\treturn [data[0]] + newdata", "title": "" }, { "docid": "455ad0568d1bfd856a2dcc7de23c37c7", "score": "0.5821017", "text": "def _float(s):\r\n if s:\r\n return float(s)\r\n else:\r\n return None", "title": "" }, { "docid": "449948e66aa9659aee1a9deb41c4e201", "score": "0.5797708", "text": "def float(self, *args):\n self.tuple(args)\n self.not_empty(args)\n for i in args:\n assert isinstance(i, float), exception(self.__needed(float(), i))", "title": "" }, { "docid": "262807f29203fe5fdd85b98ee49633d8", "score": "0.5787658", "text": "def _convert_to_floats(self, data):\n for key, value in data.items():\n data[key] = float(value)\n\n return data", "title": "" }, { "docid": "688369778bdf504553f10c94e286c550", "score": "0.57765657", "text": "def to_F(self, t = None):\n if not t:\n t = self.t\n f = float(((t * 9.0) / 5.0) - 459.67)\n return(f)", "title": "" }, { "docid": "14c44b0efc8d6a111bd38f039398dcef", "score": "0.5756928", "text": "def _FLOAT_to_python(self, value, desc=None): # pylint: disable=C0103\n return float(value)", "title": "" }, { "docid": "0ed414c6c1146cc71c99eb8e8d6a482a", "score": "0.57565445", "text": "def give_me_a_float():\n a = 2.1\n b = 3.2\n sum = a+b\n return sum", "title": "" }, { "docid": "a1cc1e2ba187377a649e818b51fb2a5a", "score": "0.5743722", "text": "def _float():\n # How many bits per float to use?\n return np.float64 if conf.double_precision else np.float32", "title": "" }, { "docid": "3239e0409dd792bc8b43aa8acb04cc6b", "score": "0.5740691", "text": "def map_float(cls, english_units, value):\n try:\n return float(value)\n except Exception:\n return None", "title": "" }, { "docid": "d9ce8d3e5b09ec0b6cb0101b5e4595e4", "score": "0.57168925", "text": "def recv_float(self):\n return unpack('f', self.recv(4))[0]", "title": "" }, { "docid": "2d2d366194de437f318b5f096efdd562", "score": "0.5694284", "text": "def convert_c_to_f(temperature_c):\n pass\n return temperature_f", "title": "" }, { "docid": "d6ba1aa8fa465419c8388dbb4812ffad", "score": "0.56836176", "text": "def c_to_f(celsius):\n\treturn round(((celsius * 1.8) + 32), 2)", "title": "" }, { "docid": "0c57441a335b1478f1da43ee57dff1c7", "score": "0.56831235", "text": "def conver_to_float(value):\n try:\n return float(value)\n except:\n try:\n value = re.sub('[^0-9], .', '', value)\n return float(value)\n except:\n return 0.0", "title": "" }, { "docid": "9eee7137914337259d078e29292f77d3", "score": "0.56769115", "text": "def x_to_float(x):\n return date2num(x) if isinstance(x, datetime) else x", "title": "" }, { "docid": "378f117ceba1b264de7d741517d98816", "score": "0.56633073", "text": "def to_float(_str,var_name):\n try:\n _int=float(_str)\n except:\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_FLOAT_VALUE\")%var_name)\n return _int", "title": "" }, { "docid": "00fc4ad02b77743bdc2efc5bd509125c", "score": "0.56573075", "text": "def myfloat(x):\r\n try:\r\n return float(x)\r\n except ValueError:\r\n return float('nan')", "title": "" }, { "docid": "13c48dba7449d0789f7d4915a1f3e310", "score": "0.56465715", "text": "def fraction_to_float(a, b):\n # TODO(pts): Would this make it work faster?\n # g = gcd(a, b)\n # if g > 1:\n # a /= g\n # b /= g\n #\n # Smart implementation details in long_true_divide in Objects/longobject.c.\n return a.__truediv__(b)", "title": "" }, { "docid": "c9f94a2a1b5df568d0685690703f5a63", "score": "0.56412506", "text": "def decode_floats(data):\n shape = data.shape\n dtype = data.dtype\n if len(shape) < 3:\n raise ValueError('invalid data shape')\n if dtype.char not in 'dfe':\n raise ValueError('not a floating point image')\n littleendian = data.dtype.byteorder == '<' or (\n sys.byteorder == 'little' and data.dtype.byteorder == '=')\n # undo horizontal byte differencing\n data = data.view('uint8')\n data.shape = shape[:-2] + (-1,) + shape[-1:]\n numpy.cumsum(data, axis=-2, dtype='uint8', out=data)\n # reorder bytes\n if littleendian:\n data.shape = shape[:-2] + (-1,) + shape[-2:]\n data = numpy.swapaxes(data, -3, -2)\n data = numpy.swapaxes(data, -2, -1)\n data = data[..., ::-1]\n # back to float\n data = numpy.ascontiguousarray(data)\n data = data.view(dtype)\n data.shape = shape\n return data", "title": "" }, { "docid": "d749d61579b46234f18219960c254a08", "score": "0.56344044", "text": "def myfloat(float_string):\n float_string = str(float_string)\n errormsg = \"ValueError: Input must be decimal or integer string\"\n try:\n if float_string.count(\".\") == 1 and float_string.count(\",\") == 0:\n return float(float_string)\n else:\n midle_string = list(float_string)\n while midle_string.count(\".\") != 0:\n midle_string.remove(\".\")\n out_string = str.replace(\"\".join(midle_string), \",\", \".\")\n return float(out_string)\n except ValueError as error:\n print(\"%s\\n%s\" %(errormsg, error))\n return None", "title": "" }, { "docid": "b098e88184e82c3341d7f7d283b3679c", "score": "0.5626596", "text": "def test_4byte_float_values_from_power_meter(self):\n assert DPT4ByteFloat.from_knx(DPTArray((0x43, 0xC6, 0x80, 00))) == 397\n assert DPT4ByteFloat.to_knx(397) == DPTArray((0x43, 0xC6, 0x80, 00))\n assert DPT4ByteFloat.from_knx(DPTArray((0x42, 0x38, 0x00, 00))) == 46\n assert DPT4ByteFloat.to_knx(46) == DPTArray((0x42, 0x38, 0x00, 00))", "title": "" }, { "docid": "82723307a101b7715aa2d2eed365c241", "score": "0.5624525", "text": "def lbd2xyz(l,b,d,R0=8.5):\n\n brad = np.deg2rad(np.atleast_1d(b).copy().astype(np.float64))\n lrad = np.deg2rad(np.atleast_1d(l).copy().astype(np.float64))\n dd = np.atleast_1d(d).copy().astype(np.float64)\n\n x = dd*np.sin(0.5*np.pi-brad)*np.cos(lrad)-R0\n y = dd*np.sin(0.5*np.pi-brad)*np.sin(lrad)\n z = dd*np.cos(0.5*np.pi-brad)\n\n return x,y,z", "title": "" }, { "docid": "5f7d7a07c6766681a094ee8478bc6f58", "score": "0.5609496", "text": "def convert_c_to_f(temp_c):\n temp_f = (9/5) * temp_c + 32\n return temp_f", "title": "" }, { "docid": "728d349eff2b1d345e1481f0df619b3b", "score": "0.5594384", "text": "def ts_float32(val):\n return np.float64(val)", "title": "" }, { "docid": "728d349eff2b1d345e1481f0df619b3b", "score": "0.5594384", "text": "def ts_float32(val):\n return np.float64(val)", "title": "" }, { "docid": "f2837d6f1b068564b258e9bbd73ba260", "score": "0.5571009", "text": "def __float__(self):\n return float(self.toval())", "title": "" }, { "docid": "2b4269b155cad2a5ba96e095cac70e84", "score": "0.55693114", "text": "def ReadFloat( BinStream ):\n return struct.unpack( 'f', BinStream.read( 4 ) )[0]", "title": "" }, { "docid": "2af34e6baf13e2c8c28210948072e872", "score": "0.5566809", "text": "def convert(r, g, b):\n\n return (0.21*r) + (0.71*g) + (0.07*b)", "title": "" }, { "docid": "4c26454ce85fe816bea3f5bcb017e5f8", "score": "0.55645865", "text": "def __convert_structure_to_float(sign, degs, mins=0, secs=0.0) :\n v = float(degs) \n if mins is not None:\n v += float(mins) / 60.\n if secs is not None:\n v += secs / 3600.\n return -v if sign == \"-\" else v", "title": "" }, { "docid": "c687689be84a833cac11e73b2f7b2630", "score": "0.5541043", "text": "def _parse_float(s):\n return float(s[0] + '.' + s[1:6] + 'e' + s[6:8])", "title": "" }, { "docid": "c3613678495696da8f8ef008caae80c8", "score": "0.5540676", "text": "def _get_f(self, lbda, vf):\n return vf/lbda", "title": "" }, { "docid": "702a2fdea63d06742e71a2b4d8af997f", "score": "0.55336756", "text": "def getFloatBlindData(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "be782aa381317a6504170787f54a1e76", "score": "0.5516447", "text": "def as_float(my_val):\n return float(as_string(my_val))", "title": "" }, { "docid": "8f1d5246aab155eb90624da8cf324ca4", "score": "0.551131", "text": "def read_float(stream: BinaryIO) -> float:\n return cast(float, struct.unpack('>f', stream.read(4))[0])", "title": "" }, { "docid": "5cd83288b21576842d602d4cebc661c5", "score": "0.55039555", "text": "def sat_to_float(data):\n\tsat_lister = np.array([float(''.join(str(i).split(','))) for i in data['mn_sat']])\n\tsat_lister = pd.DataFrame(data = sat_lister, columns = ['mn_sat'])\n\treturn data.update(sat_lister)", "title": "" }, { "docid": "aa92ae3ac7c08299c8574abe0a9be711", "score": "0.550349", "text": "def read_float(s):\n\n try:\n return float(s)\n except:\n return code.giss_data.MISSING", "title": "" }, { "docid": "1608a9dfe95116eb6bbdae2d58efbd44", "score": "0.5502195", "text": "def simplify_float(data: float) -> Union[int, float]:\n\n return int(data) if data.is_integer() else data", "title": "" }, { "docid": "77c9c44c18e1553b97e5ff236cd4d4c8", "score": "0.5496041", "text": "def sfields_to_xyz_vector(flds):\n v = (float(flds[0].strip('x')), float(flds[1].strip('y')))\n z = float(flds[2].strip('z')) if len(flds)==3 else 0\n v += (z,)\n return v", "title": "" }, { "docid": "0df3c8f08fcb29a461c12b76e89bbf16", "score": "0.5494627", "text": "def __convert_using_float_repr(stringValue):\n try : \n v = float(stringValue) \n return v\n except ValueError :\n return None;", "title": "" }, { "docid": "ed5b5e94f70033b8caf955380679f453", "score": "0.5480241", "text": "def _audio_to_float(data):\n if data.dtype == np.float32:\n return data\n return np.true_divide(data, np.iinfo(data.dtype).max, dtype=np.float32)", "title": "" }, { "docid": "10b7c341c5ace75f113004497cc1a902", "score": "0.5475817", "text": "def rasterToFloat(self, inRaster, outFloat):\r\n self.gp.RasterToFloat_conversion(inRaster, outFloat)", "title": "" }, { "docid": "3a43220c309f97bf13d0a42aef383795", "score": "0.5475184", "text": "def UFLOAT(value): # noqa: N802\n if value is None:\n raise ValueError(\"None is not a valid float\")\n value = float(value)\n if value < 0:\n raise ValueError(\"Only positive numbers are allowed\")\n return value", "title": "" }, { "docid": "6c11881cd64ba8d2f81fbf7d8bcd1d56", "score": "0.5464999", "text": "def to_float(x):\n x = np.asarray(x)\n if x.dtype == object:\n x = x.item()\n cls = type(x)\n return _registry.get_to_float(cls)(x)\n else:\n return np.float64(x)", "title": "" }, { "docid": "0ec2384550a8c9341a8a4d9dafb71ef9", "score": "0.54649407", "text": "def ieee_754_conversion(self, hex_string):\n\n return struct.unpack(\"<f\", binascii.unhexlify(hex_string.replace(\" \", \"\")))[0]", "title": "" }, { "docid": "8b4c22713f4036ca07538bd3c8c42cf0", "score": "0.5464629", "text": "def convert_c_to_f(temperature_c):\n temperature_f = temperature_c * (9/5) + 32\n return temperature_f", "title": "" }, { "docid": "d5110c9b1df3268310d4f3b097b5c04a", "score": "0.54644907", "text": "def float_(self, str):\n try:\n ans = float(str)\n except:\n ans = None\n return ans", "title": "" }, { "docid": "a6139738b64d69e9f39f7e345ea45362", "score": "0.54618716", "text": "def floatx():\n return _SESSION.floatx", "title": "" }, { "docid": "0a54e2ce5499b8ade136af36ea8c68d7", "score": "0.545802", "text": "def str2float(str):\n return float(str.replace(\",\", \".\"))", "title": "" }, { "docid": "70bb2b65cd09244fc9e7a08173afdacd", "score": "0.54419595", "text": "def change_to_float(array):\n float_array = []\n for i in array:\n float_array.append(float(i))\n return float_array", "title": "" }, { "docid": "715d472841280da87f22b045a00e7d01", "score": "0.5439318", "text": "def readLEDouble(f):\n read_bytes = f.read(8)\n return struct.unpack(\"<d\", read_bytes)[0]", "title": "" }, { "docid": "ce91d06b25dc3f9132d933377fe22f68", "score": "0.54321456", "text": "def test_4byte_float_values_from_voltage_meter(self):\n assert DPT4ByteFloat.from_knx(DPTArray((0x43, 0x65, 0xE3, 0xD7))) == 229.89\n assert DPT4ByteFloat.to_knx(229.89) == DPTArray((0x43, 0x65, 0xE3, 0xD7))", "title": "" }, { "docid": "4d19170fbe5c6d98e6b4f44450f4cdaa", "score": "0.5426749", "text": "def uint_to_float(val, bits, scale):\r\n assert val >= 0, 'uint_to_float(val={}, bits={}, scale={}) expects unsigned integer for parameter <val> !'.format(val,bits,scale)\r\n assert scale != 0, 'uint_to_float(val={}, bits={}, scale={}) expects non-zero value for parameter <scale> !'.format(val,bits,scale)\r\n assert bits > 0, 'uint_to_float(val={}, bits={}, scale={}) expects positiv value for parameter <bits> !'.format(val,bits,scale)\r\n mask= (2<<bits-1)-1\r\n val= val & mask\r\n neg_flag= (val > mask>>1)\r\n return (neg_flag*-(mask+1) +val)/scale #apply mask & 2th complement & scale\r", "title": "" }, { "docid": "bbdad10a9e2e1dad46d8d79ba886fc4a", "score": "0.5425022", "text": "def Translate(self, float_tuple, float_tuple_1):\n ...", "title": "" }, { "docid": "baa2f7e30bd247fa640e23b05c942af4", "score": "0.5418508", "text": "def __float__(self):\r\n return float(self[0])", "title": "" }, { "docid": "eaf8a97bdc1ef2e48f4bc197901a4aa4", "score": "0.541591", "text": "def to_dlibFloat(self):\n return dlib.drectangle(*self)", "title": "" }, { "docid": "8118dba4340e518e07ec953e12af1837", "score": "0.54109144", "text": "def getSerialFloat():\n result = getSerialBytes()\n\n if str(result) == \"ovf\" or len(result) == 0: # check for overflow\n result = -1.0 # -1.0 is not necessarily a great \"error response\", except that values from the Sparki should be positive\n else:\n result = float(result)\n\n printDebug(\"In getSerialFloat, returning \" + str(result), DEBUG_DEBUG)\n return result", "title": "" }, { "docid": "a9a4710b2dfd7c27c88e3977bdf3a7a5", "score": "0.5403759", "text": "def to_float(value_str):\n try:\n return float(value_str) # first best\n except ValueError:\n return float(value_str.replace(u\",\", u\".\")) # try to replace colon with point", "title": "" }, { "docid": "06a33adac9aeb8ab15f551c0b64dc494", "score": "0.54023767", "text": "def _ConvertFloat(value):\n if value == 'nan':\n raise ParseError('Couldn\\'t parse float \"nan\", use \"NaN\" instead')\n try:\n # Assume Python compatible syntax.\n return float(value)\n except ValueError:\n # Check alternative spellings.\n if value == '-Infinity':\n return float('-inf')\n elif value == 'Infinity':\n return float('inf')\n elif value == 'NaN':\n return float('nan')\n else:\n raise ParseError('Couldn\\'t parse float: {0}'.format(value))", "title": "" }, { "docid": "16da2d9f089d4f562fd167ab3d7fb1a5", "score": "0.5388532", "text": "def test_Data__float__(self):\n for x in (-1.9, -1.5, -1.4, -1, 0, 1, 1.0, 1.4, 1.9):\n self.assertEqual(float(cf.Data(x)), float(x))\n self.assertEqual(float(cf.Data(x)), float(x))\n\n with self.assertRaises(TypeError):\n float(cf.Data([1, 2]))", "title": "" }, { "docid": "2b392d08eecebe3ff4beeb694f0f2fd4", "score": "0.538823", "text": "def convert(x,a,b,c=0,d=1):\n return c + float(x-a)*float(d-c)/float(b-a)", "title": "" }, { "docid": "2d4c4e00befaa2c2507f1f9137b834dd", "score": "0.5387942", "text": "def mutate_float(self, d: float, factor: float) -> float:\n raise NotImplementedError()", "title": "" }, { "docid": "cea969d8e9838145f0db558173f46492", "score": "0.53812855", "text": "def float(values):\n if not isinstance(values, (tuple, list)):\n values = [values]\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))", "title": "" }, { "docid": "6d257cc4be77d4aa78cb371218de822a", "score": "0.53668463", "text": "def parse_float(data, none_is_zero=False):\n if isinstance(data, float):\n return data\n\n if data is None:\n if none_is_zero:\n return 0.0\n else:\n return None\n\n try:\n return float(str(data))\n\n # except ValueError: - leave this one as is\n\n except TypeError:\n # might be None, or other complex type\n pass\n\n # if still here, we can't make into integer\n raise ValueError(\"parse_float(%s) is not a valid float\", data)", "title": "" }, { "docid": "bdb896ab70390415e70dd295c3fc9657", "score": "0.5366246", "text": "def convert_to_numeric(self):\n\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n try: # Try to convert to a float and save in dataset if possible\n float_value = float(self.data[i][j])\n self.data[i][j] = float_value\n except ValueError: # Otherwise, skip it\n pass", "title": "" }, { "docid": "c6367a931a39a60ea885417a24190585", "score": "0.53607374", "text": "def float(self, float=True):\n pass # implemented in Ada", "title": "" }, { "docid": "852879e9560548ed597a8fa0e1b001c5", "score": "0.5350287", "text": "def test_int_to_float(self) -> None:\n self._assert_parses_to_expected_float(1, 1.0)", "title": "" }, { "docid": "584cab4ae9d60636ca6935515f82e28b", "score": "0.5348468", "text": "def zpfloat(val):\n try:\n return float(val)\n except:\n #if isinstance(default, float) and default is not None: return default\n return None", "title": "" }, { "docid": "4e3388f60cd6f8368dd50cf53b2d0b7c", "score": "0.5341325", "text": "def convert_float(newfloat):\n\n if newfloat != 0.0:\n LOGGER.debug('float->{0}'.format(newfloat))\n\n # Extract sign and absolute value\n b_sign = '0'\n normalized = False\n if newfloat < 0.0:\n b_sign = '1'\n newfloat = abs(newfloat)\n\n # Process integer part\n intpart = int(newfloat)\n mantissa = '{0:b}'.format(intpart)\n if intpart == 0:\n mantissa = ''\n else:\n normalized = True\n\n # Base exponent, possibly not normalized yet\n newexp = len(mantissa)\n\n # Process fractional part\n fractpart = newfloat - intpart\n i = 0 # Bit counter\n fractbin = ''\n # Bit by bit, one extra bit for rounding reasons\n while i < 33:\n fractpart *= 2\n if int(fractpart) > 0:\n if not normalized:\n normalized = True\n fractbin = fractbin[i:]\n i = 0 # Normalizing, so more bits are needed\n fractpart -= int(fractpart)\n fractbin += '1'\n else:\n if not normalized:\n newexp -= 1 # Normalizing\n fractbin += '0'\n i += 1\n fractint = int(fractbin, 2) # Convert binary string to int\n\n if newexp < 0: # Negative exponent, adjust fractional part\n fractint -= 1\n\n fractint = '{0:033b}'.format(fractint) # To string again\n\n # Compose mantisa\n mantissa += fractint\n mantissa = b_sign + mantissa[1:]\n\n # Format exponent\n b = '{0:08b}'.format(128 + newexp) # To string\n\n b += mantissa # Final bits\n\n # Rounding using bit #41\n if b[40] == '1':\n b = b[:39] + '1'\n b = int(b[:40], 2)\n\n # To bytes\n b = b.to_bytes(5, byteorder='big', signed=False)\n\n return b\n else:\n # 0 is always treated as int\n return convert_int(0)", "title": "" }, { "docid": "24442253c7f2a5ad1d85c64f40fc8861", "score": "0.53371423", "text": "def convert_to_float(fn):\n def wrapped(self, *args, **kwargs):\n filter_args = None\n\n for item in args:\n # If a tuple or a list\n if isinstance(item, tuple) or isinstance(item, list):\n items = []\n\n for item_args in item:\n if isFloat(item_args):\n items.append(Float(item_args))\n else:\n items.append(None)\n\n if isinstance(item, tuple):\n filter_args = (tuple(items),)\n else:\n filter_args = (items,)\n\n else:\n # Single value\n if isFloat(item):\n filter_args = (Float(item),)\n else:\n filter_args = (None,)\n\n return fn(self, *filter_args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "e80295e4f6c0a9559ef46e95ee5e42b2", "score": "0.5336328", "text": "def timetup2flt(time_tup):\n import calendar\n\n time_tup = time_tup[:5] # ignore later entries in tuple\n time_tup += (0,0,0,) # seconds, milliseconds, zre zero\n time_flt = calendar.timegm(time_tup) # convert to float\n\n return time_flt", "title": "" } ]
29ac3efcaa28934ad9bb8706440c339e
bool, whether supports role mailing lists
[ { "docid": "289d3ce6018d57d5f4249092058e5749", "score": "0.5978602", "text": "def supports_mailing(self):\n return NETWORK_NAME == 'Eionet'", "title": "" } ]
[ { "docid": "293a10f48e27313337970f0560635621", "score": "0.6431647", "text": "def canAssignRole(role):", "title": "" }, { "docid": "64886deff0eadde94b2434cbb708cf61", "score": "0.6292626", "text": "def check_role(rolelist, message):\n for role in rolelist:\n # noinspection PyBroadException\n try:\n if discord.utils.get(message.server.roles, name=role) in message.author.roles:\n return True\n except:\n pass\n else:\n return False", "title": "" }, { "docid": "247f16ccff7496bc94e236308bf6d26d", "score": "0.60965586", "text": "def user_can_invite(email):\n return is_in_mpg_domain_list(email)", "title": "" }, { "docid": "8cfa18e924227aa064b41bc83d07abd3", "score": "0.5919574", "text": "def is_admin(cls, sender):\n name, mail = email.Utils.parseaddr(sender)\n return mail.lower() in (email.lower() for email in cls.admins)", "title": "" }, { "docid": "fa98f89b90e8690570e3cdb08227495b", "score": "0.58292544", "text": "def manage_hasAquirableMailHost(self):\n return len(self.superValues(['Mail Host', 'Secure Mail Host'])) > 0", "title": "" }, { "docid": "35aaac82593ce671b74eef4d67a5056f", "score": "0.57850784", "text": "def mailchimp_member_in_list(self, listname):\n email = self.user.email\n client = self.mailchimp_get\n results = client.lists.members.all(\n settings.MAILCHIMP_LISTS[listname], get_all=True, fields=\"members.email_address\")\n members = results['members']\n addresses = []\n for m in members:\n addresses.append(m['email_address'])\n\n return email in addresses", "title": "" }, { "docid": "076486c339917ecbafe0d1d495f98e1e", "score": "0.57167846", "text": "def hasMembership(self, roles, django_args):\n\n try:\n # we need to check manually, as we must return True!\n self.checkIsDeveloper(django_args)\n return True\n except out_of_band.Error:\n pass\n\n for role in roles:\n try:\n checker_name, args = self.normalizeChecker(self.MEMBERSHIP[role])\n self.doCheck(checker_name, django_args, args)\n # the check passed, we can stop now\n return True\n except out_of_band.Error:\n continue\n\n return False", "title": "" }, { "docid": "d05b8b633031e1b54e285db6b648cd5e", "score": "0.5704019", "text": "def hasManagerRole(self):\n #user_roles = self.REQUEST.AUTHENTICATED_USER.getRoles()\n #user_roles = self.REQUEST.AUTHENTICATED_USER.getRolesInContext(self)\n user_roles = getSecurityManager().getUser().getRolesInContext(self)\n\n for role in self.getManagerRoles():\n if role in user_roles:\n return True\n # still here!\n return False", "title": "" }, { "docid": "1d9ecf822ef8af61330ffec09376ec0f", "score": "0.5671331", "text": "def is_moderator(self):\n return self.name in ('admin',)", "title": "" }, { "docid": "0fe5614a2684618fd0ab309cc9cbbdb6", "score": "0.56385416", "text": "def isMentorRoleEligibleForOrg(profile, org_key):\n if org_key in profile.admin_for:\n return canResignAsOrgAdminForOrg(profile, org_key)\n else:\n return rich_bool.TRUE", "title": "" }, { "docid": "63f47c55650ac9f1291ca15c8d2a5dc2", "score": "0.56364423", "text": "def cfg_roles(self):\n role = self.role\n\n if role == \"agent\" or role == \"monitor\":\n allowed_roles = [\"manager\"]\n elif role == \"manager\":\n allowed_roles = [\"player\", \"agent\", \"monitor\"]\n elif role == \"player\":\n allowed_roles = [\"manager\"]\n else:\n logger.info(f\"Status allowed roles empty, \"\n f\"unknown identity role {role}\")\n\n allowed_roles = []\n\n self.allowed_roles = allowed_roles\n self.peers.allowed_roles = allowed_roles\n logger.info(f\"Status configured: allowed contact roles: {allowed_roles}\")", "title": "" }, { "docid": "43e5a1313629521e2f4b37ae84e23775", "score": "0.5636333", "text": "def is_allowed(roles: List[int], check_adm: Optional[bool] = True, throw_exc: Optional[bool] = False) -> bool:\n\n async def real_check(ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, \n member: Optional[discord.Member] = None) -> bool:\n\n member = member if not ctx else ctx.author\n channel = channel if not ctx else ctx.channel\n\n if check_adm:\n perms = channel.permissions_for(member)\n if perms.administrator:\n return True\n \n for rid in roles:\n if rid in [role.id for role in member.roles]:\n return True\n\n if throw_exc:\n raise commands.MissingAnyRole(roles)\n \n print('true')\n\n return commands.check(real_check)", "title": "" }, { "docid": "a24d78c7bad41bed03ba2edcbc8468c1", "score": "0.5632521", "text": "def hasLocalRolesBlocking(self):\n return 1", "title": "" }, { "docid": "36c2004dccfd8f795e8e2186502484c6", "score": "0.56224865", "text": "def required_role():", "title": "" }, { "docid": "a4f0f6c7e11d69df6a278b6f6ee17fe1", "score": "0.5572422", "text": "def has_roles(jwt: JwtManager, roles: List[str]) -> bool:\n if jwt.validate_roles(roles):\n return True\n return False", "title": "" }, { "docid": "bfda1e52a7027816466a38c8ca49ec79", "score": "0.55520386", "text": "def haveProxyRole(self):\n return bool(self._proxy_roles)", "title": "" }, { "docid": "ef62d521ad8f2569363b767a21858e9a", "score": "0.5529143", "text": "def isRoleMention(mention: str) -> bool:\n return mention.endswith(\">\") and mention.startswith(\"<@&\") and isInt(mention[3:-1])", "title": "" }, { "docid": "d05f519ea086a7c86d366fbcfd1995a4", "score": "0.5528395", "text": "def _should_send_email(self):\n return self.get_profile().should_send_email", "title": "" }, { "docid": "279bceb88965ad3946ec607dfd6b9c69", "score": "0.5524898", "text": "def has_role(*roles):\n return current_user.is_authenticated and current_user.role in roles", "title": "" }, { "docid": "325a4050a720719019d302eab52aa860", "score": "0.54974484", "text": "def can_see(role, channel):\n\t\t#This code *might* fail based on implied perms (admin), not sure.\n\t\toverwrite = channel.overwrites_for(role).read_messages\n\t\tif overwrite is not None:\n\t\t\treturn overwrite\n\t\treturn role.permissions.read_messages", "title": "" }, { "docid": "f0a220309bab7bbadeefe541b851cd85", "score": "0.54676914", "text": "def check_role(self, is_admin):\n if is_admin not in [\"True\", \"False\"]:\n return \"is_admin can only take in 'True' of 'False'\"", "title": "" }, { "docid": "d8bc76623a023cd38051f63d1ce64f2d", "score": "0.54596525", "text": "def has_role(self, role):\n if isinstance(role, str):\n return role in (role.name for role in self.flexmeasures_roles)\n else:\n return role in self.flexmeasures_roles", "title": "" }, { "docid": "399ed6bf87bf6d9cfa80d477ff53602e", "score": "0.54537994", "text": "def test_list_roles(self):\n pass", "title": "" }, { "docid": "10c3f36e71e827c6dbdca6bed4daa404", "score": "0.5445875", "text": "def supports_relationship_admin(self):\n return # boolean", "title": "" }, { "docid": "adab3b22454b0d08bddaaee79545de26", "score": "0.54437053", "text": "def supports_family_admin(self):\n return # boolean", "title": "" }, { "docid": "e7f1a680875d9dfeccbf5910c1d718dc", "score": "0.5431512", "text": "def user_has_required_role(request):", "title": "" }, { "docid": "453860827ba4ea4b298bb9046fc98185", "score": "0.54131174", "text": "def can_users_receive_thread_email(\n recipient_ids: List[str],\n exploration_id: str,\n has_suggestion: bool\n) -> List[bool]:\n users_global_prefs = (\n user_services.get_users_email_preferences(recipient_ids))\n users_exploration_prefs = (\n user_services.get_users_email_preferences_for_exploration(\n recipient_ids, exploration_id))\n zipped_preferences = list(\n zip(users_global_prefs, users_exploration_prefs))\n\n result = []\n if has_suggestion:\n for user_global_prefs, user_exploration_prefs in zipped_preferences:\n result.append(\n user_global_prefs.can_receive_feedback_message_email\n and not user_exploration_prefs.mute_suggestion_notifications)\n else:\n for user_global_prefs, user_exploration_prefs in zipped_preferences:\n result.append(\n user_global_prefs.can_receive_feedback_message_email\n and not user_exploration_prefs.mute_feedback_notifications)\n\n return result", "title": "" }, { "docid": "a8f9ff13a15e97c4315c165df9730741", "score": "0.5412857", "text": "def allow_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allow_lists\")", "title": "" }, { "docid": "9f4f5af0e7f1278445543f7fa0c35db9", "score": "0.5400665", "text": "def check_group(self, request, group_list):\n user_groups = [i.name for i in request.user.groups.all()]\n result = [i for i in user_groups if i in group_list]\n if result or request.user.is_superuser:\n return True\n else:\n return False", "title": "" }, { "docid": "0a704d53049ae3343b45675ec726da68", "score": "0.5400501", "text": "def is_minister(self):\r\n\r\n # TODO Once we have roles table change this\r\n minister = ugettext('Minister')\r\n return any(x.startswith(minister) for x in self.roles)", "title": "" }, { "docid": "145c35bed193ab646be36a3dc71bdee1", "score": "0.5398078", "text": "def has_role(self, *roles):\n\n assert roles and all(roles)\n return self.current_role in roles", "title": "" }, { "docid": "cb95c2c5237a206df30aaf18b85eef93", "score": "0.5384794", "text": "def is_admin(self):\n return self.role in const_core.ADMIN_GROUPS", "title": "" }, { "docid": "d4ebc6188049051c27c37c4ce5b07216", "score": "0.5378878", "text": "def mailinglist_users():\n if 'user_name' in session:\n if session['user_role'] == 'administrator':\n return render_template('viewusers.html',\n users=mongo.db.users.find({'mailing_list': 'true'}).sort('user_name'))\n else:\n return render_template('permission.html',\n message='You are not allowed to use this function')\n else:\n return render_template('login.html',\n message='Please login first to use this function',\n users=mongo.db.users.find().sort('user_name'))", "title": "" }, { "docid": "7d477ac622ec86598e7b8d5b64c7e068", "score": "0.53754777", "text": "def has_role(self, role: typing.Union[str, Role]) -> bool:\n if super().has_role('admin'):\n return True\n else:\n return super().has_role(role)", "title": "" }, { "docid": "86551dad4e2802f61f49767f0332e6dd", "score": "0.53683925", "text": "def allow_lists(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"allow_lists\")", "title": "" }, { "docid": "99612d6e32f293e11e4f97bc4b280eda", "score": "0.53585225", "text": "def is_minister(self):\n\n # TODO Once we have roles table change this\n minister = ugettext('Minister')\n return any(x.startswith(minister) for x in self.roles)", "title": "" }, { "docid": "9e4cb4c220aaadf8950caeee4fc8cb81", "score": "0.53458595", "text": "def _is_relayable_message(message):\n #only matching remove exactly\n if (message.body == 'remove') or ('mod removal' in message.body):\n # Don't announce 'remove' or 'mod removal' replies\n message.mark_as_read()\n LOG.info('[Inbox] Not announcing message type: \"remove\"/\"mod removal\"')\n return False\n\n elif 'blacklist me' in message.subject:\n # Don't announce blacklist requests\n message.mark_as_read()\n LOG.info('[Inbox] Not announcing message type: \"blacklist request\"')\n return False\n\n elif (message.author.name == 'AutoModerator') or (message.author.name == 'reddit'):\n # Don't announce AutoModerator or reddit messages\n message.mark_as_read()\n LOG.info('[Inbox] Not announcing message type: \"AutoMod Response\"')\n return False\n\n else:\n return True", "title": "" }, { "docid": "194a6eb4c297dcc421c9e78de6f0971c", "score": "0.5332814", "text": "def allow_lists(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"allow_lists\")", "title": "" }, { "docid": "c1f33c8ed242f08bbac7674040db48b0", "score": "0.53308994", "text": "def auth_allowed(self, response, details):\n emails = [email.lower() for email in self.setting(\"WHITELISTED_EMAILS\", [])]\n domains = [domain.lower() for domain in self.setting(\"WHITELISTED_DOMAINS\", [])]\n email = details.get(\"email\")\n allowed = True\n if email and (emails or domains):\n email = email.lower()\n domain = email.split(\"@\", 1)[1]\n allowed = email in emails or domain in domains\n return allowed", "title": "" }, { "docid": "9990033f8796543ef6a37cbd2a81f709", "score": "0.53307754", "text": "def isAdminMsgType(*args) -> \"bool\" :\n return _quickfix.Message_isAdminMsgType(*args)", "title": "" }, { "docid": "62b2a6f030cbf50a1b39ef3ed17eed07", "score": "0.53250825", "text": "def allows(self, contacts):\n logger.info(\n f\"Filtering contacts allowed by roles for {self.get('role')}\"\n )\n allowed = []\n\n if contacts:\n for contact in contacts:\n role, _ = contact.split(\"/\")\n if role in self.allowed_roles:\n allowed.append(contact)\n else:\n logger.info(\n f\"Contact role {role} not allowed for {self.get('role')}\"\n )\n\n return allowed", "title": "" }, { "docid": "60513e99513455e7e44d7dbfef93d8cc", "score": "0.531337", "text": "def has_role(self, role):\n if isinstance(role, string_types):\n return role in (role.name.name for role in self.roles)\n else:\n return role in self.roles", "title": "" }, { "docid": "c40ebe158efbb852c35d7d0e1cc2dbfa", "score": "0.5308643", "text": "def validate(self, cfg):\n custom_roles_map = self.validate_custom_roles_list(cfg.custom_roles)\n self.validate_realms_list(cfg.realms, custom_roles_map)", "title": "" }, { "docid": "28b711848844f3da4fa63f8c9a2fcbff", "score": "0.53034943", "text": "def is_authorized(self, role_names):\n #\"\"\"Checks if user is related to given role_names.\n #\"\"\"\n #allowed_roles = set([r.id for r in self.roles])\\\n # .intersection(set(role_names))\n #return len(allowed_roles) > 0\n return True", "title": "" }, { "docid": "11fba0ef20e9ac3ab63eb8cb4ac1c752", "score": "0.5300505", "text": "def get_joined(self):\n return (not has_element(self.driver, '//a[text()=\"Join this group\"]')) and \\\n has_element(self.driver, '//button/span[text()=\"You\\'re a member\"]')", "title": "" }, { "docid": "20c2a8386247864ec8901121e45277b8", "score": "0.52974236", "text": "def can_do(self, interpreted, message):\n if len(interpreted) < 2:\n return False\n word = interpreted[1]\n for word in interpreted[1:]:\n if word[1] == \":mails:\":\n return True\n return False", "title": "" }, { "docid": "a8abf4d32d1b5cde3d3973d16a64824d", "score": "0.52947414", "text": "def can_manage_gradebook_aliases(self):\n return # boolean", "title": "" }, { "docid": "715c1c8957fe122b8bd0c47f865c3b1e", "score": "0.5292408", "text": "def is_manager(self):\n return 'restaurant_manager' in self.user_groups()", "title": "" }, { "docid": "7ed80a1196af3e3b23b34e700b803d39", "score": "0.52861357", "text": "def has_supports_smtp(self):\n return len(filter(lambda x:self.starttls_results[x][\"supports_smtp\"],\n self.starttls_results.keys())) > 0", "title": "" }, { "docid": "7eb4deafbf5a7f0d4427faa5f80f319f", "score": "0.52844954", "text": "def wants_msg(self, dummy, items):\n # Skip replies, accept everything else.\n if len(items[5]) != 0:\n return False\n\n if self.trust_map is None:\n return True\n\n return items[2] in self.trust_map", "title": "" }, { "docid": "fd89bfe0d327bc1bc690e27231da8b8a", "score": "0.52811205", "text": "async def support(self, ctx):\n\n category_id = self.bot.config.get('main_category_id')\n\n if category_id is None:\n return await ctx.send('I couldn\\'t find the modmail category.')\n\n categories = ctx.guild.categories\n\n for c in categories:\n if c.id != int(category_id):\n continue\n else:\n category = c\n\n member_list = []\n\n for member in ctx.guild.members:\n if member.permissions_in(category).read_messages:\n if not member.bot:\n member_list.append(member.mention)\n else:\n continue\n else:\n continue\n\n embed = discord.Embed(\n title='Support Members',\n colour=discord.Colour.blue(),\n description=', '.join(member_list)\n )\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "202c544b76d83e1216d1dda635c15bc4", "score": "0.5280409", "text": "def can_invite(self):\n return self.can_use_invitations and (self.unlimited or self.supply > 0)", "title": "" }, { "docid": "3b519b74812d5379e96bcfcb789cb9c8", "score": "0.5278339", "text": "def is_manager(user_mail):\n return persistence_db.db.get_entity_by_id(\"UserAccount\", user_mail)['is_manager']", "title": "" }, { "docid": "8a9b7ed5bd8bbb6fa52d50f08f375f40", "score": "0.52782327", "text": "def is_staff(jwt: JwtManager) -> bool: # pylint: disable=too-many-return-statements\n if not jwt:\n return False\n if jwt.validate_roles([STAFF_ROLE]):\n return True\n\n return False", "title": "" }, { "docid": "85617e946aab6b5e13a514cf02d40459", "score": "0.52531564", "text": "async def is_regular(self, ctx: commands.Context):\n regular_roles = {\n 'Founder': 469158572417089546,\n 'Moderator': 490250496028704768,\n 'UT Discord Admin': 667104998714245122\n }\n\n for role_id in regular_roles:\n test_role = discord_get(ctx.guild.roles, id=regular_roles[role_id])\n if test_role in ctx.author.roles:\n return True\n\n await ctx.send(\"You do not have permission to do that\")\n return False", "title": "" }, { "docid": "6bc0a2304a584449d06308a368101ab1", "score": "0.5250823", "text": "async def addRole(self, ctx, role : discord.Role, *userList):\n empty = True\n added = 0\n had = 0\n notFound = 0\n message = \"\"\n for user in userList:\n try:\n member = await commands.MemberConverter().convert(ctx, user)\n if member in ctx.guild.members:\n if role not in member.roles:\n await member.add_roles(role)\n added += 1\n else:\n had += 1\n empty = False\n except:\n if notFound == 0:\n message += \"Couldn't find:\\n\"\n message += \"{0}\\n\".format(user)\n notFound += 1\n if empty:\n message += \":x: Nobody was given the role {0}\".format(role.name)\n else:\n message += \":white_check_mark: {0} role given to everyone that was found from list\".format(role.name)\n if notFound > 0:\n message += \". {0} user(s) were not found\".format(notFound)\n if had > 0:\n message += \". {0} user(s) already had the role\".format(had)\n if added > 0:\n message += \". {0} user(s) had the role added to them\".format(added)\n await ctx.send(message)", "title": "" }, { "docid": "0981427dbe8fc6a3a897893ffa5a7323", "score": "0.5242939", "text": "def checkRoles(has_roles:List[discord.Role], need_roles:Optional[List[str]] = None) -> bool:\n\t\tif need_roles is None: need_roles = [\"admin\", \"mod\", \"moderator\", \"bot commander\"]\n\n\t\tfor Role in has_roles:\n\t\t\trole_name:str = Role.name.lower()\n\n\t\t\tif role_name in need_roles: return True\n\n\t\treturn False", "title": "" }, { "docid": "5994c0a9d0cb705ee23c4c767f597133", "score": "0.5230619", "text": "def role_allow(self):\n if not hasattr(self, '_role_allow'):\n self._initialise_role_allow()\n return self._role_allow", "title": "" }, { "docid": "597a47067ec54f0068142612bbfafe71", "score": "0.5230487", "text": "def _password_security(self):\n is_allow = False\n for rec in self:\n if self.env.user.partner_id.id in rec.message_follower_ids.ids:\n is_allow = True\n else:\n is_allow = False\n break\n return is_allow", "title": "" }, { "docid": "8f493fd586ed4f5e00c62a01fb3c3651", "score": "0.5213316", "text": "def can_use_invitations(self):\n return app_settings.USERS_MAY_INVITE and self.enabled", "title": "" }, { "docid": "62e2ad149a782ca23e92f2afab550293", "score": "0.52131635", "text": "def Message_isAdminMsgType(*args) -> \"bool\" :\n return _quickfix.Message_isAdminMsgType(*args)", "title": "" }, { "docid": "5707d29a6714754ccb615027421df118", "score": "0.5210603", "text": "def _handleMentorSelection(self, data):\n is_eligible = profile_logic.isMentorRoleEligibleForOrg(\n data.url_ndb_profile, data.url_connection.organization)\n if is_eligible:\n handleMentorRoleSelection(\n data.url_connection, data.ndb_profile, None, data.program,\n data.program.getProgramMessages(), data.site)\n return is_eligible", "title": "" }, { "docid": "d4779d245a6e207b57b38d602fb59c57", "score": "0.52059686", "text": "async def check_dynamic_role(db_session, ax_form, current_user):\n del db_session\n for field in ax_form.fields:\n if field.field_type_tag == \"AxAuthor\":\n if field.value == current_user['email']:\n return True\n return False", "title": "" }, { "docid": "db7dbbd7646fa07f84b5ed3e796cf72f", "score": "0.5190216", "text": "def is_admin():\n if users.is_current_user_admin():\n return True\n\n if current_email() in settings.admin_emails:\n return True\n\n return False", "title": "" }, { "docid": "e0f7c0b517c07719fe2c21b5b6a666d5", "score": "0.51896644", "text": "def has_role(self, role=None, group=None, context=None):\n \n return self.role_set.match(role, group, context).exists()", "title": "" }, { "docid": "ede8d602b0890c5162be4ebab03a37b5", "score": "0.51880234", "text": "def is_staff(self):\n return self.staff", "title": "" }, { "docid": "ede8d602b0890c5162be4ebab03a37b5", "score": "0.51880234", "text": "def is_staff(self):\n return self.staff", "title": "" }, { "docid": "ede8d602b0890c5162be4ebab03a37b5", "score": "0.51880234", "text": "def is_staff(self):\n return self.staff", "title": "" }, { "docid": "68d3da2cd7aee1775338c586bb8610e2", "score": "0.51823664", "text": "def is_authorized():\n def predicate(ctx):\n if isinstance(ctx.message.author, discord.Member):\n for role in ctx.message.author.roles:\n if role.name in config[\"admin_role_names\"]:\n return True\n\n return ctx.message.author.id in config[\"admin_user_ids\"]\n\n return commands.check(predicate)", "title": "" }, { "docid": "3861c19af6b22dee67e4cdfc117fe866", "score": "0.51739126", "text": "def isAdmin(self) -> \"bool\" :\n return _quickfix.Message_isAdmin(self)", "title": "" }, { "docid": "e46075fc1f6b8b00cb9c6a750631ca7b", "score": "0.51650983", "text": "def supports_book_admin(self):\n return # boolean", "title": "" }, { "docid": "64c9f07ea5cc26e8395c3cb135cea6a7", "score": "0.51505196", "text": "def allow_email_addresses(self):\n return self.properties.get(\"AllowEmailAddresses\", None)", "title": "" }, { "docid": "f7d05f6ff8ae7bd026edf76a82d2e766", "score": "0.51486963", "text": "def user_has_roles(user: User, *requirements: RoleEnum) -> bool:\n if not user.is_authenticated:\n return False\n user_roles = set(role.name for role in user.roles)\n return set(requirements).issubset(user_roles)", "title": "" }, { "docid": "07fa29c225d62b15e31d406eadb0fef4", "score": "0.5145496", "text": "def can_add(self):\n\t\tif self.is_admin:\n\t\t\treturn True\n\t\telse:\n\t\t\troom = self.get_room()\n\t\t\treturn room.users_can_add", "title": "" }, { "docid": "c6568b8455cc0d482db713788f32eb3b", "score": "0.5137094", "text": "def allowed_to(self, name):\n if self.status != 'active':\n return False\n permission = Permission.query.filter_by(name=name).first()\n return permission in self.group.permissions.all()", "title": "" }, { "docid": "a980f5fdbad7dea5df9f3e1abd7707ef", "score": "0.5127057", "text": "def can_send(self, user, nf_type_label):\n can_send = super(EmailBackend, self).can_send(user, nf_type_label)\n # check that user has a verified email\n if can_send and user.email:\n return True\n return False", "title": "" }, { "docid": "2037924edff7b309c02eee7f3cabda51", "score": "0.51248616", "text": "def test_get_role_admins(self):\n pass", "title": "" }, { "docid": "610b9571ffc25e0b2f9b80aacc05ffd4", "score": "0.51190776", "text": "def can_view(self, user):\n if not user.id in [self.recipient_id, self.sender_id]:\n return False\n return True", "title": "" }, { "docid": "9132ba5d536ac0ab96f8aaba40ffbec7", "score": "0.51103127", "text": "def is_admin(self):\n return self.username in ['admin', 'abc']", "title": "" }, { "docid": "71650bff41b1610c4bc6421dc7316a7f", "score": "0.51095366", "text": "def should_invite(user):\n return ((not user['deleted'])\n and (not user['is_restricted'])\n and (not user['is_ultra_restricted'])\n and (args.bots or not user['is_bot'])\n and (args.apps or not user['is_app_user'])\n and (user['id'] != \"USLACKBOT\")\n )", "title": "" }, { "docid": "f43cdd47abaa938f1e2acc0a66545409", "score": "0.51071805", "text": "def Grabbable_In_Room(room, grabbable_actor_list):\n \n grabbable_in_room = False\n \n for actor in room.priority_queue:\n \n if type(actor) == Actor and actor.Id in grabbable_actor_list:\n \n grabbable_in_room = True\n \n return grabbable_in_room", "title": "" }, { "docid": "2c8ded785fe132fd4b8701972ad20f48", "score": "0.51064223", "text": "def test_get_user_roles_bpm_tasks(self):\n pass", "title": "" }, { "docid": "6bf9343dd6d95727429bd3b0b2855206", "score": "0.5105386", "text": "def role(self):", "title": "" }, { "docid": "c646f5df3246b875c5f18e2c99211b27", "score": "0.50952435", "text": "def test_endpoint_role_list(self):\n\n # get endpoint role list\n list_doc = self.tc.endpoint_role_list(self.test_ep_id)\n\n # validate data type\n self.assertEqual(list_doc[\"DATA_TYPE\"], \"role_list\")\n self.assertIn(\"DATA\", list_doc)\n # confirm that each role has the expected values\n for role in list_doc[\"DATA\"]:\n self.assertEqual(role[\"DATA_TYPE\"], \"role\")\n self.assertIn(\"id\", role)\n self.assertIn(\"principal_type\", role)\n self.assertIn(\"principal\", role)\n self.assertIn(\"role\", role)", "title": "" }, { "docid": "97b07c47ccf28d672d72a0adf7be289b", "score": "0.5094999", "text": "def isRole(self, role_name):\n role_names = self.getRoleNames()\n return role_name in role_names if role_names else False", "title": "" }, { "docid": "0ed72d60bb2c0e749edd78f3c397be70", "score": "0.50929695", "text": "def mailchimp_add_member_to_list(self, listname):\n client = self.mailchimp_get\n listid = settings.MAILCHIMP_LISTS[listname]\n if not self.mailchimp_member_in_list(listname):\n data = {\n 'email_address': self.user.email,\n 'status': 'subscribed',\n 'merge_fields': {\n 'FNAME': self.user.first_name\n }\n }\n client.lists.members.create(listid, data)\n\n return True\n return False", "title": "" }, { "docid": "eecabc179ea3e089f093ac561c288eb3", "score": "0.5087725", "text": "def AllowMarketing(self):\n return self.marketing != AccountSettings.MARKETING_NONE", "title": "" }, { "docid": "bff495455cf5a7947bfcc62dc825705b", "score": "0.50858134", "text": "def is_admin(self):\n return self.role == \"admin\"", "title": "" }, { "docid": "52e0d886c8b4d990bacb13b48fe99221", "score": "0.5076825", "text": "def list_roles(self):\n return self._requirements.show()", "title": "" }, { "docid": "8da8c7ed8cb10ed673460f3d558a9983", "score": "0.507401", "text": "def __call__(self, target, creds):\r\n\r\n return self.match.lower() in [x.lower() for x in creds['roles']]", "title": "" }, { "docid": "5f89576f8f15fd29a2eb0cb7bb1496f1", "score": "0.50700957", "text": "def verify_email_sub_list(email_html_body, email_sub_list_id):\n soup = BeautifulSoup(email_html_body)\n for anchor in soup.findAll('a'):\n if '/opt-out-list/' in str(anchor):\n web_url = anchor['href']\n break\n payload = web_url.split('/')[-2]\n payload_dict = PAYLOAD_SIGNING.parse_payload(payload)\n email_subscription_list_id = payload_dict.get('subscription_list')[0]\n LOG.debug('Found list id: %s' % email_subscription_list_id)\n return email_subscription_list_id == email_sub_list_id", "title": "" }, { "docid": "042afa2a5f25cb2ebfb49c0968b27991", "score": "0.50690645", "text": "def is_staff(self):\n return self.is_superuser", "title": "" }, { "docid": "924287d050a77da4c735d739fc348670", "score": "0.50679076", "text": "def has_permission(self, request):\r\n return request.user.is_active and request.user.is_staff", "title": "" }, { "docid": "90a4152aa5a3f0d20852f4e67d54442d", "score": "0.5065041", "text": "async def on_member_join(self, member: discord.Member):\n g = member.guild\n if not g.me.guild_permissions.manage_roles: # if not allowed to manage roles\n self.bot.log.info(\n f'Module - Welcome: Missing \"manage_roles\" permission on guild \"{g.name}\"'\n )\n return\n if \"MEMBER_VERIFICATION_GATE_ENABLED\" not in g.features:\n # we give new members roles if the verification gate is disabled\n await self.give_welcome_roles(member)", "title": "" }, { "docid": "fbaef22036e926b2329f941bea148e5e", "score": "0.50649637", "text": "def is_reseller(user):\n grp = Group.objects.get(name=\"Resellers\")\n return grp in user.groups.all()", "title": "" }, { "docid": "4e0ef7ced8b9d45407b69ae7430ebc97", "score": "0.5064417", "text": "def __bool__(self):\n return bool(self.text_list)", "title": "" }, { "docid": "323f5787aa6629d77f4edfb6a558aef5", "score": "0.5063684", "text": "def checkIsAllowedToManageRole(self, django_args, logic_for_role, \n manage_role_logic):\n\n try:\n # check if it is the user's own role\n self.checkHasRoleForScope(django_args, logic_for_role)\n self.checkIsMyEntity(django_args, logic_for_role, 'user', True)\n return\n except out_of_band.Error:\n pass\n\n # apparently it's not the user's role so check \n # if managing this role is allowed\n fields = {\n 'link_id': django_args['link_id'],\n 'scope_path': django_args['scope_path'],\n }\n\n role_entity = logic_for_role.getFromKeyFieldsOr404(fields)\n\n if role_entity.status != 'active':\n raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_ROLE_MSG)\n\n fields = {\n 'link_id': self.user.link_id,\n 'scope_path': django_args['scope_path'],\n 'status': 'active'\n }\n\n manage_entity = manage_role_logic.getForFields(fields, unique=True)\n\n if not manage_entity:\n raise out_of_band.AccessViolation(message_fmt=DEF_NOT_YOUR_ENTITY_MSG)\n\n return", "title": "" }, { "docid": "076653f573213ac9731a3f52c95901d0", "score": "0.5063358", "text": "def can_create(self):\n return self._has_role(self.edit_role)", "title": "" }, { "docid": "d4f1ee0610a712f0489a26573af5afa7", "score": "0.50626475", "text": "def check_message_by_list(mesage, lis):\n for part in lis:\n if part not in mesage:\n return False\n return True", "title": "" }, { "docid": "1b7ffa8ca623edb0ef63d8b86df94425", "score": "0.5061207", "text": "def has_permission(self, request, view):\n print(request.user.role)\n print(view)\n permitted = request.user.role == 'INVESTOR_USER' or request.user.role == 'BACKOFFICE_USER'\n if not permitted:\n print(self.message)\n\n return permitted", "title": "" } ]
cd70aef18393b85c960a4f28b6c4af6e
Initialize remote value of KNX DPT 1.001.
[ { "docid": "60bdcba32d8ddcf96df7845be210e2df", "score": "0.0", "text": "def __init__(\n self,\n xknx: XKNX,\n group_address: GroupAddressesType | None = None,\n group_address_state: GroupAddressesType | None = None,\n sync_state: bool | int | float | str = True,\n device_name: str | None = None,\n feature_name: str = \"State\",\n after_update_cb: AsyncCallbackType | None = None,\n invert: bool = False,\n ):\n super().__init__(\n xknx,\n group_address,\n group_address_state,\n sync_state=sync_state,\n device_name=device_name,\n feature_name=feature_name,\n after_update_cb=after_update_cb,\n )\n self.invert = bool(invert)", "title": "" } ]
[ { "docid": "996d1505c26ab59baae9bb93bb56606b", "score": "0.5731152", "text": "def test_from_knx(self):\n xknx = XKNX()\n remote_value = RemoteValueStep(xknx)\n assert remote_value.from_knx(DPTBinary(1)) == RemoteValueStep.Direction.INCREASE\n assert remote_value.from_knx(DPTBinary(0)) == RemoteValueStep.Direction.DECREASE", "title": "" }, { "docid": "3336497342ac24643acf17641d17842a", "score": "0.5675726", "text": "def initial_value(self, device):\n return self.get_var_on_device(device).initial_value", "title": "" }, { "docid": "048df6219ef7c0cef37f1d980c43d1a8", "score": "0.56079113", "text": "def upnp_init(self):\r\n pass", "title": "" }, { "docid": "3d15a4e9d3ff09ebca122cdfc43a9b70", "score": "0.55952203", "text": "def __init__(self):\n\n\t\tself.controller = PIDRegulator(25, 0.024, 3.5, 5.0)\t# Args: p, i, d, sat", "title": "" }, { "docid": "7f7415577ac202ff1fe8ee6bfe635360", "score": "0.55438495", "text": "def initialize_device(self):\n\n self.send(self.cmd.RESET)\n # This is legacy initialization from PL1\n # According to Sebastian, without it RV didn't enter remote control mode\n # TODO Check if it's actually needed\n self.start_temperature_regulation()\n self.stop_temperature_regulation()\n self.start_rotation()\n self.stop_rotation()\n self.start_task(interval=10, method=self.get_temperature)", "title": "" }, { "docid": "f630e6df8f9ee411a01c3c690d43cde0", "score": "0.543196", "text": "def setInit(args):\n global TCP_IP, TCP_PORT, FILE_PATH, LATEST_UPDATE\n TCP_IP = args.ip\n TCP_PORT = args.port\n FILE_PATH = args.file", "title": "" }, { "docid": "50295dc60fdd0c1c8cc76d6e08f23687", "score": "0.54199797", "text": "def __init__(self):\n self.pd_client = pokitdok.api.connect(**client_settings)", "title": "" }, { "docid": "e71b68335dff6f4f02edbc0993a398c5", "score": "0.5409202", "text": "def initialise_target(c, key):\n if key == 'p':\n return 2.5e6\n elif key == 'h':\n return 2e6", "title": "" }, { "docid": "5c13ede7125b3d13ad0ecbb9f909b45c", "score": "0.5395307", "text": "def __init__(self):\n self.temperature_degree = 0\n self.humidity_value = 0\n self.air_pollution_level = 50\n self.day_number = 1", "title": "" }, { "docid": "511575fd6c59b5a58439c282a5e3a8bf", "score": "0.5386068", "text": "def testSisoControllerFirstStep(self):\n Kp = 1.0\n Ki = 1.0\n Kd = 1.1\n controller = PIDController.PIDcontroller(Kp, Ki, Kd)\n output = controller.update(10);\n self.assertEquals(31, output)", "title": "" }, { "docid": "88a44e55eb7a84821cd0a7ce213e49a1", "score": "0.5384406", "text": "def init(self,*args, **kw):\n self._initialized = True\n self._info = 'Firmware ver.: Test 1.00'", "title": "" }, { "docid": "88a44e55eb7a84821cd0a7ce213e49a1", "score": "0.5384406", "text": "def init(self,*args, **kw):\n self._initialized = True\n self._info = 'Firmware ver.: Test 1.00'", "title": "" }, { "docid": "d910342fbf25a9d34eb6aed05822d2e7", "score": "0.535282", "text": "def init_default():\n\tpass", "title": "" }, { "docid": "c4befdc22fecb667b3bff4da052f0e49", "score": "0.53197527", "text": "def test_temperature_is_200_at_init():\n with DeviceTestContext(AndrewDev, process=True) as proxy:\n proxy.Init()\n scale = float(proxy.attribute_query('temperature').display_unit)\n # we scale and round here because the object simulates noise\n assert round(proxy.temperature * scale) == 20", "title": "" }, { "docid": "2d4f55afdb8e61b7fb9be89b166c8712", "score": "0.5312856", "text": "def test_current_is_zero_at_init():\n with DeviceTestContext(AndrewDev, process=True) as proxy:\n proxy.Init()\n proxy.turn_on()\n proxy.current = 5\n assert proxy.current != 0\n proxy.Init()\n assert proxy.current == 0", "title": "" }, { "docid": "c2c8a418414e59fc5d65280dda87131f", "score": "0.5298004", "text": "def __init__(self):\n cfg = self.get_config(self.PTH_RHN_DEFAULTS) # Get defaults (if any)\n cfg.update(self.get_config(self.PTH_RHN_CONF)) # Apply custom conf on top\n\n self.http_port = cfg.get(self.CFG_HTTP_PORT_KEY, self.CFG_DEFAULT_HTTP_PORT)\n self.https_port = cfg.get(self.CFG_HTTPS_PORT_KEY, self.CFG_DEFAULT_HTTPS_PORT)", "title": "" }, { "docid": "82d47de13c4f59ac5f5a3c3d6954b97e", "score": "0.5296141", "text": "def __init__(self, isProd=True):\n self.urlRoot = 'http://api.ezdevice.net' if isProd else 'http://localhost:3030'", "title": "" }, { "docid": "9f19702f9ab6612e1f458899ab0928ad", "score": "0.5283475", "text": "def testConstructor(self):\n Kp = 1.0\n Ki = 1.0\n Kd = 1.1\n controller = PIDController.PIDcontroller(Kp, Ki, Kd)\n self.assertEquals(Kp, controller.Kp);\n self.assertEquals(Ki, controller.Ki);\n self.assertEquals(Kd, controller.Kd);", "title": "" }, { "docid": "f5a49066b72230661bf8ed59ee2f855a", "score": "0.52741593", "text": "def test_init(self):\r\n mem = engine.FelixRemoteMemory()\r\n\r\n assert mem.connection == u\"\", mem.connection\r\n assert mem.data == {}, mem.data\r\n assert mem.token is None, mem.token", "title": "" }, { "docid": "e1797b4c4cc5c9cd532b2f66397e8aec", "score": "0.5262089", "text": "def adv_init():\n endpoint = req_adv(\"init/\")\n r = requests.get(endpoint, headers=headers)\n data = check_json(r)\n\n return data", "title": "" }, { "docid": "2a7b8cf2b379de9234c10988bf79a84d", "score": "0.52522165", "text": "def __init__(self):\n self.temperature = 70.0\n self.status = \"sunny\"", "title": "" }, { "docid": "2a7b8cf2b379de9234c10988bf79a84d", "score": "0.52522165", "text": "def __init__(self):\n self.temperature = 70.0\n self.status = \"sunny\"", "title": "" }, { "docid": "73382095f368a6df3d96cf044aa82037", "score": "0.5247835", "text": "def test_to_knx(self):\n xknx = XKNX()\n remote_value = RemoteValueStep(xknx)\n assert remote_value.to_knx(RemoteValueStep.Direction.INCREASE) == DPTBinary(1)\n assert remote_value.to_knx(RemoteValueStep.Direction.DECREASE) == DPTBinary(0)", "title": "" }, { "docid": "cf86d107df877f9965a078f891e1091e", "score": "0.52427715", "text": "def __init__(self):\n self._value = 1", "title": "" }, { "docid": "05ad55f590c7e09ba55ae153606bd8da", "score": "0.5233684", "text": "def __init__(self, pin_name, mqtt_cmd, mqtt_get=None, init_value=0, inverse_logic=True,\n freq=500, no_auto=False):\n self.inverse_logic = 1 if inverse_logic else 0\n self.pin_name = pin_name\n self.init_value = init_value\n self.freq = freq\n self.no_auto = no_auto\n self.mqtt_cmd_topic = api.app.conf.get(mqtt_cmd, '/DigitalOutput/pin_%s/cmd_topic/not/found' % mqtt_cmd)\n self.mqtt_get_topic = api.app.conf.get(mqtt_get, None)\n self.pin = None\n self.curr_value = None\n self.setup()\n super(self.__class__, self).__init__()", "title": "" }, { "docid": "52ef0a336ea62dff7f2d332aee95b3db", "score": "0.5179809", "text": "def init(self):\n self._pr.init()", "title": "" }, { "docid": "990987ed2b51b896a9aaa63ffa7cf248", "score": "0.5154971", "text": "def init(self, now, env):\n pass", "title": "" }, { "docid": "aff3bfbf623e4bb22c50649cc79d8d8a", "score": "0.5149914", "text": "def initialise_Source(c, key):\n if key == 'p':\n return 0.5e5\n elif key == 'h':\n return 1.5e6", "title": "" }, { "docid": "e1cde5e3d6365fb49f9598968a865fe0", "score": "0.5141875", "text": "def setUp(self):\n self.clnt = CvpClient()\n nodes = ['1.1.1.1']\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)", "title": "" }, { "docid": "110d451c74b91deef36cde1ecfcd1993", "score": "0.5125922", "text": "def init_opt(self):", "title": "" }, { "docid": "46b71744abf49b8d7b06b0f6eeb5a365", "score": "0.51230544", "text": "def init_pv_model_config_props(self):\n self.pv_name = \"Unknown\"\n self.estimate_irrad = ESTIMATE_IRRAD_DEFAULT\n self.estimate_temp = ESTIMATE_TEMP_DEFAULT\n self.use_avg_sensor_temp = USE_AVG_SENSOR_TEMP_DEFAULT\n self.cell_temp_adjust = CELL_TEMP_ADJUST_DEFAULT", "title": "" }, { "docid": "58fcb75e89023328a0fb4bcc2ebc785a", "score": "0.51224816", "text": "def __init__(self, name, init_value=None, min_value=0, max_value=255, size=4, step=1,op=\"inc\"):\n super(STLVmFlowVar, self).__init__()\n self.name = name;\n validate_type('name', name, str)\n self.size =size\n valid_fv_size(size)\n self.op =op\n valid_fv_ops (op)\n\n # choose default value for init val\n if init_value == None:\n init_value = max_value if op == \"dec\" else min_value\n\n self.init_value = convert_val (init_value)\n self.min_value = convert_val (min_value);\n self.max_value = convert_val (max_value)\n self.step = convert_val (step)\n\n if self.min_value > self.max_value :\n raise CTRexPacketBuildException(-11,(\"max %d is lower than min %d \") % (self.max_value,self.min_value) );", "title": "" }, { "docid": "50228383385c5472b624f39500874c31", "score": "0.5119438", "text": "def __init__(self):\n\n self.value = 0", "title": "" }, { "docid": "d3bf0b2367bab2ed97ceacbf894b8439", "score": "0.5087003", "text": "def __init__(self,**inp_kargs):\n # update the default config variables with input arguments\n self.DP = DP\n self.update_HXRGNoise(**inp_kargs)", "title": "" }, { "docid": "993786877af11ef7947e2a1713abe736", "score": "0.5060921", "text": "def __init__(self):\n self.phedexBase = \"https://cmsweb.cern.ch/phedex/datasvc/\"", "title": "" }, { "docid": "07d62bda4d650b69fed57444f63f190b", "score": "0.50417626", "text": "def __init__(self, Kp, Ki, Kd, setpoint=0):\n self.setpoint = setpoint\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self._integral = 0\n self._previous_error = 0\n self._change_limit = 0", "title": "" }, { "docid": "c92637668a066945e6c1624fbc4d1976", "score": "0.503835", "text": "def initialize():", "title": "" }, { "docid": "c92637668a066945e6c1624fbc4d1976", "score": "0.503835", "text": "def initialize():", "title": "" }, { "docid": "d0fc4ce7d86efbdebc3f5b604b910e32", "score": "0.5037292", "text": "def _init_env_variables(self):\n self.reward = 0\n self.n_step = 0\n self.consecutive_errors = 0", "title": "" }, { "docid": "b503f6fb416a1b2c91c1daaa14126d28", "score": "0.50365084", "text": "def __init__(self, manual=False):\n print(\"Launching torcs...\")\n if manual:\n self.__reset_torcs_manual()\n else:\n self.__reset_torcs()\n print(\"Connecting to torcs...\")\n self.client = snakeoil.Client(p=3001)\n self.client.maxSteps = np.inf\n self.client.get_servers_input()\n self.obs = observation.Observation()\n self.obs.update_obs(self.client.S.d)\n self.act = action.Action()\n self.PI = 3.14159265359", "title": "" }, { "docid": "cd3a09d6d62af204706b72382dfb7bea", "score": "0.5023596", "text": "def __init__(self, ip):\n self.ip = ip\n self.port_dac_osc = RestApi.port_dac_osc\n self.port_wss = RestApi.port_wss", "title": "" }, { "docid": "837ba764a5db8d41040fdf93ef22f755", "score": "0.50147074", "text": "def init_value(mdp):\n return np.zeros(mdp.num_states)", "title": "" }, { "docid": "8cb46fe0069e58cb9571fdeaf5fecd91", "score": "0.50138366", "text": "def init_environment():\r\n begin_id=654000\r\n static_blks=10\r\n end_id=begin_id+400\r\n nodes_n=10\r\n communication_distribution_type='normal'#'1'\r\n\r\n blk_size,all_storage=dsinit.load_blocksizes(begin_id,end_id)\r\n # communication_cst=dsinit.generate_communication_cost(nodes_n,communication_distribution_type)\r\n \r\n dsrpal.init_all_settings(blk_size,begin_id,end_id,static_blks,nodes_n)\r\n \r\n # all params needed can be accessed from dsrpal.param\r\n\r\n # return max_level\r\n return all_storage", "title": "" }, { "docid": "34ece1f8df3dccfa96502e545c84222c", "score": "0.50079536", "text": "def init_host(self):\n self.driver.init_host(host=self.host)", "title": "" }, { "docid": "22f18ae8ba23c028ee455d9882737795", "score": "0.500553", "text": "def init_env_variables(self):\n self.total_distance_moved = 0.0\n self.current_distance = self.get_distance_from_start_point(self.start_point)\n self.roll_turn_speed = rospy.get_param('/cat/init_vel')", "title": "" }, { "docid": "8d1c8baeacf8fb98dc7bb26f801d595f", "score": "0.50041676", "text": "def initialize(self):\n self.state = {'version': VERSION}", "title": "" }, { "docid": "da398332a32ce3c2cdf12876d502505d", "score": "0.49997044", "text": "def __init__(__self__, *,\n dataplex_spec: 'outputs.GoogleCloudDatacatalogV1DataplexSpecResponse'):\n pulumi.set(__self__, \"dataplex_spec\", dataplex_spec)", "title": "" }, { "docid": "95a079bdf24d72882d9b21b96639f0f0", "score": "0.49996632", "text": "def __init__(self, dev_id=0, sensor_number=1):\n self.dev_id = dev_id\n self.sensor_number = sensor_number\n self.ip_addr = get_ip_addr()", "title": "" }, { "docid": "a7b4e74a3fe946099a643906791e0899", "score": "0.49914792", "text": "def setInitialValue(self):\n\n self.initialValue = 0", "title": "" }, { "docid": "14c6879b8c5ee7237ed4a8a6f9a6e430", "score": "0.49861395", "text": "def __init__(self, tor_value=\"_None\"):\n #: str: The value as a string\n self.value = None\n\n #: str: The unit of the value as a string\n self.unit = None\n\n if tor_value != \"_None\":\n self.fill(tor_value)", "title": "" }, { "docid": "39917dd178106d17ecdfdd800817bce1", "score": "0.49708036", "text": "def __init__(self, value=0):\n self.value = value", "title": "" }, { "docid": "5b273c4fd1e3b528e6e4e57228b49e15", "score": "0.4960451", "text": "def init():\n pass", "title": "" }, { "docid": "c88efe4169786493a1fe7e2da2d1ac5f", "score": "0.49579027", "text": "def __init__ (self) :\n self.value = None", "title": "" }, { "docid": "13dec27f81107d2910f005337b0bb0e1", "score": "0.49540985", "text": "def test_default_setting(self):\n self._run_and_test(self._int_hparams)\n self._run_and_test(self._float_hparams)", "title": "" }, { "docid": "63752c1c88d20defa510218c5bd67130", "score": "0.4946902", "text": "def __init__(self, **kwargs):\n super(Interface, self).__init__(**kwargs)\n self.dnp3_agent_id = DEFAULT_DNP3_AGENT_ID # This gets overridden by the config file if it's defined there.\n self.cache_expiration_secs = DEFAULT_CACHE_EXPIRATION_SECS\n self.points_configured = False", "title": "" }, { "docid": "2f508762852c4d17f9de719b324b62c4", "score": "0.4936807", "text": "def __init__(self, key):\n self.datacenter = None\n NodeDriver.__init__(self, key)", "title": "" }, { "docid": "1c4a472a4118b7cc07bf1515eafa2972", "score": "0.49334666", "text": "def init_ivadomed():\n # Display ivadomed version\n logger.info('\\nivadomed ({})\\n'.format(__version__))", "title": "" }, { "docid": "b4cdf32d0d86b6dd15e384f47205e0c0", "score": "0.49306846", "text": "def __init__(self, step=(-9, 9)):\n self._min_step = None\n self._max_step = None\n self._prng = None\n self.set_params(step=step)\n\n self._is_init = True", "title": "" }, { "docid": "d576a9fda360a7b099212c40f3a8c1cb", "score": "0.4924728", "text": "def __init__(self, ip, port, dummy=False):\n self._dc_ip = None\n self._dc_port = None\n self._delay_client = None\n self._delay_client = None\n self._delays = None\n self._dummy = dummy\n self._dada_input_key = 0xdada\n self._dada_coh_output_key = 0xcaca\n self._dada_incoh_output_key = 0xbaba\n super(FbfWorkerServer, self).__init__(ip, port)", "title": "" }, { "docid": "e30d5ec42841b5153186e132e3b9378b", "score": "0.49240455", "text": "def _get_init_url(self):\n # cn seems to just be a random 10 digit code\n\n\n xpc = {\"cn\": random_string(10),\n \"tp\": 'null',\n \"ppu\": \"http://hangoverapi.appspot.com/_ah/channel/xpc_blank\",\n \"lpu\": \"http://talkgadget.google.com/talkgadget/xpc_blank\"}\n params = {'token': self.token,\n 'xpc': json.dumps(xpc)}\n url = self.base_url + '/d?' + urllib.urlencode(params)\n return url", "title": "" }, { "docid": "65558536f2bd3ef8596045cb1b82ed87", "score": "0.49233174", "text": "def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"HOST\", \"127.0.0.1\"],\n [\"SERVER_PORT\", \"1234\"]])", "title": "" }, { "docid": "0d6b3507121c389debe56b26c2bc264a", "score": "0.49152556", "text": "def __init__(self, agent_num=0):\n self.agent_num = agent_num\n #Move to JSON \n self.PID_acc = PIDController(1.0, 0, 0)\n self.PID_steer = PIDController(2.0, 0, 0)\n self.not_initiliazed = True", "title": "" }, { "docid": "01047459017cdd16c1b3a09bd316f532", "score": "0.4915136", "text": "def test_remote_value(self):\n xknx = XKNX(loop=self.loop)\n remote_value = RemoteValue(\n xknx,\n group_address='1/2/3',\n group_address_state='1/2/4')\n self.assertEqual(\n str(remote_value),\n '<RemoteValue <Address str=\"1/2/3\" />/<Address str=\"1/2/4\" />/None/None/>')\n remote_value.payload = DPTArray([0x01, 0x02])\n self.assertEqual(\n str(remote_value),\n '<RemoteValue <Address str=\"1/2/3\" />/<Address str=\"1/2/4\" />/<DPTArray value=\"[0x1,0x2]\" />/None/>')", "title": "" }, { "docid": "9706e21291b3b3f512401799ca2ab984", "score": "0.49106517", "text": "def __init__(self, _host, sensor):\r\n self.neptun_instance = _host\r\n self.sensor_name = sensor\r\n self._state = None\r\n self.data = None", "title": "" }, { "docid": "c9d098496851500b3ce88c370660c2de", "score": "0.4905063", "text": "def __init__(self):\n self._robot = None\n self._svc_controller = False\n\n self._server = None\n self._port = None\n\n self._speed_left = 0\n self._speed_right = 0", "title": "" }, { "docid": "fc540ac46c6999be148dba12e9e8bcb8", "score": "0.49030095", "text": "def init_host(self, host):\n raise NotImplementedError()", "title": "" }, { "docid": "6bb0d094dc17a12b35d123620289a8dd", "score": "0.49021816", "text": "def Initialize(self, front_end_hostname, front_end_port, simulator_port):", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "df1009e9175e737d376d6a5ba8cdfa56", "score": "0.4898045", "text": "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "title": "" }, { "docid": "38382cda42c78ddc241e69a881ba1f2a", "score": "0.4897996", "text": "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n #self.env.seed(12345)", "title": "" }, { "docid": "0f01d4d0d0fbb1b0fd5048428308bad9", "score": "0.48932728", "text": "def __init__(self):\n self._voltage = 12", "title": "" }, { "docid": "3653857613eddc657ec20b97df4b08f1", "score": "0.4890127", "text": "def __init__(self, *args):\n _snap.TFltIntPrKdV_swiginit(self,_snap.new_TFltIntPrKdV(*args))", "title": "" }, { "docid": "a2343f03c0fa0f72eaaf94195c441e0a", "score": "0.48884434", "text": "def vdk_initialize(self, context: CoreContext) -> None:\n op_id = context.configuration.get_value(CommonStoreKeys.OP_ID.key)\n execution_id = context.configuration.get_value(CommonStoreKeys.EXECUTION_ID.key)\n attempt_id = context.configuration.get_value(CommonStoreKeys.ATTEMPT_ID.key)\n\n if not op_id:\n op_id = str(int(time.time()))\n\n if not execution_id:\n execution_id = f\"{op_id}-{str(uuid.uuid4())[:6]}\"\n\n if not attempt_id:\n attempt_id = f\"{execution_id}-{str(uuid.uuid4())[:6]}\"\n\n context.state.set(CommonStoreKeys.OP_ID, op_id)\n context.state.set(CommonStoreKeys.EXECUTION_ID, execution_id)\n context.state.set(CommonStoreKeys.ATTEMPT_ID, attempt_id)\n\n context.state.set(CommonStoreKeys.VDK_VERSION, vdk_build_info.RELEASE_VERSION)\n context.state.set(CommonStoreKeys.START_TIME, datetime.utcnow())", "title": "" }, { "docid": "295443666a5348c960179eb543306cbb", "score": "0.48836175", "text": "def autonomousInit(self):", "title": "" }, { "docid": "6ad74663a6ccbf94dcc61d45ce28cfeb", "score": "0.48836118", "text": "def _initialize_data(self):\n self.idn = \"LSCI,MODEL460,0,22323\"\n self.source = 1\n self.channels = {\"X\": Channel(), \"Y\": Channel(), \"Z\": Channel(), \"V\": Channel()}\n self.channel = \"X\"\n self.unit = \"T\"", "title": "" }, { "docid": "9d6dccdd2bbb9567d3bbd5dc5719e5ed", "score": "0.48766783", "text": "def set_default_params():\n\n global PARAMS\n PARAMS['chuck_norris_birthday'] = (\"Chuck Norris is immortal and doesn't \"\n \"have a birthday. But if he did, it \"\n \"would be today.\")\n PARAMS['age_of_chuck_norris'] = 21 # because is always 21\n PARAMS['pi'] = np.pi", "title": "" }, { "docid": "2c3d6c3492dddd586ac5ca597ce81be6", "score": "0.4872112", "text": "def __init__ (self, value) :\n self.value = value", "title": "" }, { "docid": "2c3d6c3492dddd586ac5ca597ce81be6", "score": "0.4872112", "text": "def __init__ (self, value) :\n self.value = value", "title": "" }, { "docid": "349380ed7e76fbb4144f2333d2c95e50", "score": "0.48559535", "text": "def test_set_initial_point_fails_if_scalar():\n x = ContinuousDesign()\n pt = 42\n x.initial_point = pt", "title": "" }, { "docid": "dd3f7d341d5bbff8c3af99044ac056e1", "score": "0.48555553", "text": "def testSetGetInitValues(self):\n t = dict(r=2.0, alpha=-0.12, delta=2*3.14)\n v3sp = tpm.V3SP(**t)\n self.assertAlmostEqual(v3sp.r, t['r'])\n self.assertAlmostEqual(v3sp.alpha, t['alpha'])\n self.assertAlmostEqual(v3sp.delta, t['delta'])", "title": "" }, { "docid": "494f3137dd68bc25c64770a2eba4be7b", "score": "0.48487726", "text": "def u_initial(nt,nx,dx):\n\n\tu = np.zeros(nx)\n\tu[0 : 2/(dx+1)] = 1\n\tprint u\n\n\treturn u", "title": "" }, { "docid": "46573d489ca392e423b578c5ad88ecf6", "score": "0.48408392", "text": "def init_rndckey():\n return DesignateBindCharm.singleton.init_rndckey()", "title": "" }, { "docid": "46573d489ca392e423b578c5ad88ecf6", "score": "0.48408392", "text": "def init_rndckey():\n return DesignateBindCharm.singleton.init_rndckey()", "title": "" }, { "docid": "46de8e3f885084ad0ef28b2e2214e116", "score": "0.483609", "text": "def _init_(self, n):\n\t \tself.n=n", "title": "" }, { "docid": "6ad5f2a5454383960818ff72bb73163a", "score": "0.48324746", "text": "def __init__(self, il_value):\n self.il_value = il_value", "title": "" }, { "docid": "18ca1af183885327c565f17f38690c07", "score": "0.48295134", "text": "def version(self,host,port,version):\r\n self.remoteVersion[host,port] = version", "title": "" }, { "docid": "af58734b6619988363cbe9717c431465", "score": "0.48290357", "text": "def init():\n\n # Set up here\n\n return", "title": "" }, { "docid": "6d9e543ff894faceb1119a5a016d54c2", "score": "0.48262185", "text": "def teleopInit(self):\n pass", "title": "" }, { "docid": "57ab17ea7afe85c2cf3a402296368794", "score": "0.48250172", "text": "def __init__(self, value):\n from openzwave.network import ZWaveNetwork\n from pydispatch import dispatcher\n zwave.ZWaveDeviceEntity.__init__(self, value, DOMAIN)\n self._node = value.node\n self._index = value.index\n self._current_temperature = None\n self._unit = None\n self._current_operation_state = STATE_IDLE\n self._target_temperature = None\n self._current_fan_state = STATE_IDLE\n self.update_properties()\n # register listener\n dispatcher.connect(\n self.value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)", "title": "" }, { "docid": "3977991c2a84ed4a2e2c96b4d6a497f0", "score": "0.4818155", "text": "def initRunningVals(self):\n self.r_P = [0.0]*self.mirror.dataPoints\n self.r_Q = [0.0]*self.mirror.dataPoints\n self.r_St = [0.0]*self.mirror.dataPoints", "title": "" }, { "docid": "85ab0b5176cb4df2e9c4eda91a07dfb1", "score": "0.4817505", "text": "def initialize_host(self):\n\n host_input = self.inputs[\"host\"]\n\n host = os.environ.get(\"CGWIRE_HOST\", None)\n if host is None:\n gazu_host = gazu.client.get_host()\n if gazu_host != \"http://gazu.change.serverhost/api\":\n # Assume the host in gazu.client is already set correctly\n # and copy it into these settings.\n log.debug(\"Setting CG-Wire host \"\n \"from gazu.client: %s\" % gazu_host)\n host = gazu_host\n else:\n log.debug(\"Setting CG-Wire host from environment \"\n \"variable CGWIRE_HOST: %s\" % host)\n\n if host:\n # Force the host by environment variable\n host_input.setText(host)\n host_input.setEnabled(False)\n else:\n host_input.setEnabled(True)", "title": "" }, { "docid": "198d329b5c0f2a1a2606cdc286705199", "score": "0.481547", "text": "def __init__(self, desired: int, actual: int) -> None:\n\t\t\tself.desired = desired\n\t\t\tself.actual = actual", "title": "" }, { "docid": "528a6492d653385e6249e6b78c6c9017", "score": "0.48154163", "text": "def test_voltage_is_240():\n with DeviceTestContext(AndrewDev, process=True) as proxy:\n proxy.Init()\n assert proxy.voltage == 240", "title": "" }, { "docid": "d456610653ca8cee2880f0f14d703751", "score": "0.48151034", "text": "def __init__(self, cb):\n\t\tself.kp = float(c.get('PID', 'KP'))\n\t\tself.kd = float(c.get('PID', 'KD'))\n\t\tself.ki = float(c.get('PID', 'KI'))\n\t\tself.cb = cb\n\t\tself.target = float(c.get('PID', 'TARGET_LUMINOSITY'))\n\t\tself.prevVal = self.cb()\n\t\tself.prevTime = time.time()\n\t\tself.maxOutput = float(c.get('PID', 'MAX_OUTPUT'))\n\t\tself.minOutput = float(c.get('PID', 'MIN_OUTPUT'))\n\t\tself.output = self.minOutput + (self.maxOutput - self.minOutput) / 2", "title": "" }, { "docid": "000b66e7d1d159d8dd45d6789ef2c267", "score": "0.48134166", "text": "def __init__(self, cnv):\n \n self.cnv = cnv", "title": "" }, { "docid": "ff3e0f0c3ebd8b470b106ea2dea3e7d2", "score": "0.48099333", "text": "def __init__(self, hass, remote, config):\n self.available = None\n self._quiet_time_error = False\n self._reboot_check = True\n self._hass = hass\n self._remote = remote\n self._config = config\n self._error_time = None\n self._startup_setup = False\n self._utc_now = None\n self._power_state = None", "title": "" } ]
356a2e713ff7fa829665edd4d2a035ec
Devuelve la similitud de jaccard
[ { "docid": "eb5f9d72fcbfef9a06e4b3bb2e174a36", "score": "0.53147584", "text": "def jaccard_similarity(list1, list2):\n intersection = len(list(set(list1).intersection(list2)))\n union = (len(list1) + len(list2)) - intersection\n return float(intersection) / union", "title": "" } ]
[ { "docid": "b28690c78a69fc481a989368dc9c914e", "score": "0.6333269", "text": "def jaccard(left, right):\n\n true_false = 0\n false_true = 0\n false_false = 0\n true_true = 0\n\n if len(left) == len(right):\n for index, val in enumerate(left):\n if left[index] == 1 and right[index] == 0: # true false\n true_false += 1\n elif left[index] == 0 and right[index] == 1: # false true\n false_true += 1\n elif left[index] == 0 and right[index] == 0: # false false\n false_false += 1\n elif left[index] == 1 and right[index] == 1: # true true\n true_true += 1\n\n elif left[index] == -1 and right[index] == 0: # true false\n true_false += 1\n elif left[index] == 0 and right[index] == -1: # false true\n false_true += 1\n elif left[index] == -1 and right[index] == -1: # true true\n true_true += 1\n\n # print(\"p:\", true_true)\n # print(\"q:\", true_false)\n # print(\"r:\", false_true)\n # print(\"s:\", false_false)\n\n return true_true / (true_false + false_true + true_true)", "title": "" }, { "docid": "46af5b1caae920dd4bbd0c80cdd7f065", "score": "0.58410513", "text": "def get_DxxxGID2jaccard(self):\n DxxxGID2jaccard = dict()\n dxxx = species2dxxx[self.species]\n jfile = \"../data/jaccard/\" + dxxx + \".jaccard\"\n try:\n os.stat(jfile)\n except:\n count = 0\n for DxxxGID in self.DxxxGID2FBgnID.keys():\n count += 1\n if count % 1000 == 0:\n print(count)\n FBgnID = self.DxxxGID2FBgnID[DxxxGID]\n v3exonmap = self.v3exonmap[DxxxGID]\n fbexonmap = self.fbexonmap[FBgnID]\n j = jaccard(v3exonmap, fbexonmap)\n # print(DxxxGID, FBgnID, j)\n DxxxGID2jaccard[DxxxGID] = j\n else:\n with open(jfile, \"r\") as f:\n for line in f.readlines():\n DxxxGID, FBgnID, j = line.rstrip().split(\"\\t\")\n DxxxGID2jaccard[DxxxGID] = j \n return(DxxxGID2jaccard)", "title": "" }, { "docid": "c97d7b64fd3e6ade99b1d2da850ec7e2", "score": "0.58125526", "text": "def jaccard(passed, failed, totalpassed, totalfailed):\n if totalfailed + passed == 0:\n return failed\n return failed / (totalfailed + passed)", "title": "" }, { "docid": "a365d7e689283411da9a8a98e3c8ea9c", "score": "0.57432854", "text": "def jaccard(set1,set2):\n if(len(set1) == 0 or len(set2) == 0):\n return 0\n return float(len(set1 & set2)) / len(set1 | set2)", "title": "" }, { "docid": "2ae660825c0ce87eac4799829ea6d9e6", "score": "0.57281584", "text": "def jaccard(a, b):\n a = set(a)\n b = set(b)\n c = a.intersection(b)\n return len(c) / (len(a) + len(b) - len(c))", "title": "" }, { "docid": "a7d6b114891cc4beb39b7f5327a3d4ff", "score": "0.5686854", "text": "def test_card_display_big_joker(self):\n assert str(card.Card(\"Z\", 1)) == 'JOKER'", "title": "" }, { "docid": "ab54b5f976f19fd845bff34123bab57e", "score": "0.5672307", "text": "def jaccard(list1, list2):\n intersection = list(set(list1) & set(list2))\n union = list(set(list1) | set(list2))\n return len(intersection) / len(union)", "title": "" }, { "docid": "480c1b52169c412cbfcb0052e0f1d191", "score": "0.56358117", "text": "def jaccard_comp(val1, val2):\n\n # If at least one of the values is empty return 0\n #\n if (len(val1) == 0) or (len(val2) == 0):\n return 0.0\n\n # If both attribute values exactly match return 1\n #\n elif (val1 == val2):\n return 1.0\n\n # ********* Implement Jaccard similarity function here *********\n\n jacc_sim = 0.0 # Replace with your code\n\n # Add your code here\n\n # ************ End of your Jaccard code *************************************\n\n assert jacc_sim >= 0.0 and jacc_sim <= 1.0\n\n return jacc_sim", "title": "" }, { "docid": "24e55c8635fa40742b9cdcb6cfe2ccd1", "score": "0.5624401", "text": "def test_card_display_small_joker(self):\n assert str(card.Card(\"Z\", 0)) == 'joker'", "title": "" }, { "docid": "30d4603d934feab19598a7e2719b0f1d", "score": "0.56108916", "text": "def jaccard(set1, set2):\n\n intersection = set1.intersection(set2)\n union = set1.union(set2)\n\n # if both ests are empty return 1\n if len(intersection) == 0 and len(union) == 0:\n return 1/1\n else:\n jaccard_index = len(intersection)/len(union)\n return jaccard_index", "title": "" }, { "docid": "871340ffda1ff02f121566470a781cbb", "score": "0.5552048", "text": "def compute_jaccard_similarity(self, fname=JACCARD_SIM):\n if self.jaccard_sim is not None:\n return None\n else:\n if not fname: # time-consuming\n self.meta_df[\"items_for_jaccard\"] = (\n self.meta_df[\"cast\"]\n + self.meta_df[\"keywords\"]\n + self.meta_df[\"genres\"]\n + self.meta_df[\"director\"]\n )\n jaccard_sim = np.zeros(\n (len(self.meta_df.index), len(self.meta_df.index))\n )\n for i1 in tqdm(range(len(self.meta_df.index))):\n for i2 in range(i1 + 1, len(self.meta_df.index)):\n s1 = set(self.meta_dfa_new.items_for_jaccard[i1])\n s2 = set(self.meta_df.items_for_jaccard[i2])\n try:\n sim = Jaccard(s1, s2)\n except:\n print(f\"Jaccard has trouble: (s1, s2) = ({s1}, {s2})\")\n sim = 0\n jaccard_sim[i1, i2] = sim\n jaccard_sim[i2, i1] = sim\n self.__jaccard_sim = jaccard_sim\n else:\n assert isinstance(fname, str)\n assert fname.endswith(\".npz\")\n # require large memory\n self.__jaccard_sim = np.load(fname)[\"arr_0\"] # shape: (45116, 45116)", "title": "" }, { "docid": "cbaba1fbc66a390d85cd89efb6e75494", "score": "0.55482626", "text": "def jaccard(vec1, vec2) :\n vec1, vec2 = NP.squeeze(vec1), NP.squeeze(vec2)\n fnx = lambda v : NP.array(v, dtype=bool)\n vec1, vec2 = fnx(vec1), fnx(vec2)\n try :\n numer = NP.sum(vec1 == vec2)\n except ValueError :\n print(\"check that the 2 vectors are of equal length\")\n denom = float(vec1.size)\n return numer / denom", "title": "" }, { "docid": "c6437296418d7ee6c7b52af3edd84239", "score": "0.55299497", "text": "def jcb(value):\n pattern = re.compile(r'^35')\n return card_number(value) and len(value) == 16 and pattern.match(value)", "title": "" }, { "docid": "c708d30e73aaad3994a2df537c13123f", "score": "0.5426238", "text": "def __len__(self):\r\n return self.server.zcard(self.key)", "title": "" }, { "docid": "da2dabe7bd381ed95e961f23086f5489", "score": "0.5423741", "text": "def get_jaccards(community, all_data):\n\n community_size, n_hashes = community.shape\n universe_size, _ = all_data.shape\n\n print 'calculating jaccard coefficients for', community_size, 'community members against universe of ', universe_size, ' members'\n\n jaccards = np.zeros((community_size, universe_size))\n community_sigs = community.values\n all_sigs = all_data.values\n for idx in range(community_size):\n comparison_signature = community_sigs[idx, :]\n\n # tile for broadcasting\n tiled_community = np.tile(comparison_signature, (universe_size, 1))\n # do a vectorize element-wise account1 == account_j for all j\n collisions = all_sigs == tiled_community\n jacc = np.sum(collisions, axis=1) / float(n_hashes)\n jaccards[idx, :] = jacc\n if idx % 100 == 0:\n print 'community member ', idx, ' complete'\n\n return jaccards", "title": "" }, { "docid": "66b4970a1f03bc22ff8bdf85ab5194ce", "score": "0.54089934", "text": "def ACE_CARD_VALUE():\n return 11", "title": "" }, { "docid": "8e56f7fe4f0d75f39448dfbe106ca526", "score": "0.53677046", "text": "def jaccard(self, X, Y=None):\n # Get both inputs as numpy array\n X = np.asarray(X)\n Y = np.asarray(Y) if Y is not None else X\n\n # Get unique inputs\n X_unique = np.asarray(list(sorted(set(X))))\n Y_unique = np.asarray(list(sorted(set(Y))))\n\n # Get mapping of destination -> fingerprints\n mapping = dict()\n # Loop over all training fingerprints\n for j, fingerprint in enumerate(Y_unique):\n # Loop over all destinations in fingerprint\n for destination in fingerprint:\n # Get fingerprint set\n fps = mapping.get(destination, set()) | set([(j, fingerprint)])\n # Add fingerprint to destination\n mapping[destination] = fps\n\n # Initialise result\n result = np.zeros((X_unique.shape[0], Y_unique.shape[0]), dtype=float)\n\n # Loop over all testing fingerprints\n for i, fingerprint in enumerate(X_unique):\n # Initialise partial matches\n matches = set()\n # Find partial matches\n for destination in fingerprint:\n matches |= mapping.get(destination, set())\n\n # Compute score per partial match\n for j, match in matches:\n # Compute fingerprint matching score\n score = len(fingerprint & match) / max(len(fingerprint | match), 1)\n # Assign score to result\n result[i, j] = score\n\n # Return result\n return result, X_unique, Y_unique", "title": "" }, { "docid": "26594f3768ffff00a8a5fc82e652fefe", "score": "0.5332601", "text": "def jaccard_list(options):\n print(\"Running Jaccard test\")\n if options.jaccardReference is None or options.jaccardQuery is None:\n raise IOError(\"Jaccard needs ref and query files!\")\n\n ref_by_chr = {}\n query_by_chr = {}\n\n # read in reference and store\n with open(options.jaccardReference) as ref_file:\n lines = ref_file.readlines()\n for line in lines:\n line = line.split('\\t')\n if line[0] not in ref_by_chr:\n ref_by_chr[line[0]] = []\n ref_by_chr[line[0]].append((int(line[1]), int(line[2])))\n print(\"Finished reading in reference\")\n\n # read query and store\n with open(options.jaccardQuery) as query_file:\n lines = query_file.readlines()\n for line in lines:\n line = line.split('\\t')\n if line[0] not in query_by_chr:\n query_by_chr[line[0]] = []\n query_by_chr[line[0]].append((int(line[1]), int(line[2])))\n print(\"Finished reading in query\")\n\n # sort input\n t = time.time()\n ref_by_chr = {k: sorted(v) for k, v in ref_by_chr.items()}\n query_by_chr = {k: sorted(v) for k, v in query_by_chr.items()}\n print (\"sort took: \" + str(time.time() - t))\n\n # calculate initial jaccard score\n init_score = jaccard_score(query_by_chr, ref_by_chr)\n print(\"Jaccard initial score: \" + str(round(init_score, 4)))\n\n # dictionary with maximum length for each chromosome\n max_chr_len = {k: max([interval[1] for interval in v]) for k, v in query_by_chr.items()}\n\n # do n random permutations\n n = 10\n scores = []\n for i in range(n):\n r = random.SystemRandom()\n random_query_by_chr = {\n k: [(random_offset - (interval[1] - interval[0]), random_offset) for (random_offset, interval)\n in ((r.randint(0, max_chr_len[k]), interval) for interval in v)]\n for k, v in query_by_chr.items()\n }\n score = round(jaccard_score(random_query_by_chr, ref_by_chr), 4)\n print(\"Jaccard random score #\" + str(n) + \": \" + str(score))\n scores.append(score)\n\n p = float(len([x for x in scores if x > init_score])) / n\n print (\"Joseph's p ([] > real / nr_tests): \" + str(p))", "title": "" }, { "docid": "09fa2ef969b4df727cb84715112b416d", "score": "0.5329333", "text": "def jaccard(expected, found):\n return _intersection_area_(expected, found) / _union_area_(expected, found)", "title": "" }, { "docid": "7b4b60781b0a4ebd93930eb632ff3aa6", "score": "0.52852726", "text": "def jac(self, time, sc, sm):\n raise NotImplementedError", "title": "" }, { "docid": "24e53312d1b58bb6c6bef336621aefd2", "score": "0.52400637", "text": "def jaccard(a,b,weights):\n result = weights*(a != b)*~numpy.isnan(a)*~numpy.isnan(b)\n N = numpy.nansum(~numpy.isnan(a)*~numpy.isnan(b)*weights)-numpy.nansum((a == 0)*(b == 0)*weights)\n return numpy.nansum(result)/N", "title": "" }, { "docid": "226f5775a9d5c5575afd49c07f5fc29d", "score": "0.5236876", "text": "def get_parity_oracle():\n public, private = ck.gen_RSA_keys()\n previous = []\n def oracle(cipher):\n plain = ck.cipher_RSA(cipher, private)\n return not (plain % 2)\n return public, oracle", "title": "" }, { "docid": "d8bee8894e760c0e46cf466612416565", "score": "0.523249", "text": "def jaccardSimilarity(vide01, video2):\n # List of shingles of the first video\n listSinglesV1 = vide01.split()\n del listSinglesV1[0]\n listSinglesV1 = [float(shingle) for shingle in listSinglesV1]\n \n # List of shingles of the first video\n listSinglesV2 = video2.split()\n del listSinglesV2[0]\n listSinglesV2 = [float(shingle) for shingle in listSinglesV2]\n \n # Set containing only unique shingle values:\n uniqueV1 = set(listSinglesV1)\n uniqueV2 = set(listSinglesV2)\n \n # Intersection (elements in common) between the shingles of the first and \n # the second videos:\n intersection = uniqueV1 & uniqueV2\n\n # Union (total number of shingle) between the shingles of the first and \n # the second videos: \n union = uniqueV1 | uniqueV2\n \n # Jaccard similarity formula:\n jaccard = float(len(intersection))/float(len(union)) \n \n return jaccard", "title": "" }, { "docid": "48db46f48fd1a64c9a95243e520ecb72", "score": "0.5222894", "text": "def test_card_display_invalid_joker(self):\n assert str(card.Card(\"Z\", card.SUITS[3])) == 'INVALID'", "title": "" }, { "docid": "eb8b5bec0bd38dc23fe4e4ca44590c85", "score": "0.5189091", "text": "def perform_jaccard_coefficient(truth, cluster):\r\n same, diff, both_zero = 0, 0, 0\r\n for idx_x, row in enumerate(truth):\r\n for idx_y, value in enumerate(row):\r\n if truth[idx_x][idx_y] == 1 and truth[idx_x][idx_y] == cluster[idx_x][idx_y]:\r\n same += 1\r\n elif truth[idx_x][idx_y] != cluster[idx_x][idx_y]:\r\n diff += 1\r\n else:\r\n both_zero += 1\r\n return (same + both_zero) * 1.0 / (same + both_zero + diff), (same) * 1.0 / (same + diff)", "title": "" }, { "docid": "eb8b5bec0bd38dc23fe4e4ca44590c85", "score": "0.5189091", "text": "def perform_jaccard_coefficient(truth, cluster):\r\n same, diff, both_zero = 0, 0, 0\r\n for idx_x, row in enumerate(truth):\r\n for idx_y, value in enumerate(row):\r\n if truth[idx_x][idx_y] == 1 and truth[idx_x][idx_y] == cluster[idx_x][idx_y]:\r\n same += 1\r\n elif truth[idx_x][idx_y] != cluster[idx_x][idx_y]:\r\n diff += 1\r\n else:\r\n both_zero += 1\r\n return (same + both_zero) * 1.0 / (same + both_zero + diff), (same) * 1.0 / (same + diff)", "title": "" }, { "docid": "dfb42506eb410f2cd21bb7f451318c6f", "score": "0.5172457", "text": "def jaccard(list1: List, list2: List) -> float:\n if not list1 and not list2:\n return 1\n return len(set(list1).intersection(set(list2))) / len(set(list1).union(set(list2)))", "title": "" }, { "docid": "ece2cc005c112dda06d7f66519ee7343", "score": "0.5158478", "text": "def mastercard(value):\n pattern = re.compile(r'^(51|52|53|54|55|22|23|24|25|26|27)')\n return card_number(value) and len(value) == 16 and pattern.match(value)", "title": "" }, { "docid": "708438334a9b0697aa6c0fb0b95f4edb", "score": "0.51531667", "text": "def jaccard_list(expected, found):\n f = jaccard\n return _sym_scores_(expected, found, f)", "title": "" }, { "docid": "0e2555893a4ce6446b121593c939fe2c", "score": "0.51442343", "text": "def card_without_subsets(self):", "title": "" }, { "docid": "d3c07a5aee2609ec57e19e3d1e70dc90", "score": "0.5141607", "text": "def set_joker(self):\n self.joker = random.choice(self.cards)\n\n # remove the Joker from Deck and display on Table for Players to see\n self.cards.remove(self.joker)\n\n for card in self.cards:\n if self.joker.rank == card.rank:\n card.isjoker = True", "title": "" }, { "docid": "c52d1ce0c2d785c7872e206289b898bf", "score": "0.5127895", "text": "def jaccard_guess(word, first_label, second_label):\n word1 = set([x for x in word] )\n label1 = set([x for x in first_label] )\n label2 = set([x for x in second_label])\n\n union_w1_l1 = word1 | label1\n int_w1_l1 = word1 & label1\n union_w1_l2 = word1 | label2\n int_w1_l2 = word1 & label2\n jaccard_dist_label_1 = 1.0 - 1.0*len(int_w1_l1)/len(union_w1_l1)\n jaccard_dist_label_2 = 1.0 - 1.0*len(int_w1_l2)/len(union_w1_l2)\n \n if min(jaccard_dist_label_1,jaccard_dist_label_2) > 0.6:\n return np.nan\n \n if jaccard_dist_label_1<jaccard_dist_label_2:\n return first_label\n else:\n return second_label", "title": "" }, { "docid": "e10bc498a017bc3d5b82119766e76e1d", "score": "0.51197934", "text": "def is_valid_run_joker(sequence):\n\n RANK_VALUE[\"A\"] = 1 # resetting value of A (may have been set to 14 in previous run)\n\n # Order the Cards in the sequence\n sort_sequence(sequence)\n\n # Push all Jokers to the end and count the number of Jokers\n push_joker_toend(sequence)\n joker_count = 0\n for card in sequence:\n if card.is_joker() == True:\n joker_count += 1\n\n # Make sure the Suit Match except for Jokers.\n for card in sequence:\n if card.is_joker() == True:\n continue\n if card.suit != sequence[0].suit:\n return False\n\n # This is to cover for K, Q and A run with Jokers\n if sequence[0].rank == \"A\":\n if sequence[1].rank == \"Q\" or sequence[1].rank == \"J\" or sequence[1].rank == \"K\":\n RANK_VALUE[sequence[0].rank] = 14\n sort_sequence(sequence)\n push_joker_toend(sequence)\n\n rank_inc = 1\n for i in range(1, len(sequence)):\n if sequence[i].is_joker() == True:\n continue\n # Compare RANK values with accomodating for Jokers.\n while (RANK_VALUE[sequence[i].rank] != RANK_VALUE[(sequence[i - 1].rank)] + rank_inc):\n # Use Joker Count for missing Cards in the run\n if joker_count > 0:\n rank_inc += 1\n joker_count -= 1\n continue\n else:\n # if No more Jokers left, then revert to regular comparison\n if RANK_VALUE[sequence[i].rank] != RANK_VALUE[(sequence[i - 1].rank)] + 1:\n return False\n else:\n break\n return True", "title": "" }, { "docid": "1f971fbc77f21394b54395ec27703b0f", "score": "0.5117552", "text": "def jaccard_index(nodes_a, nodes_b):\n return len(nodes_a & nodes_b) / len(nodes_a | nodes_b)", "title": "" }, { "docid": "88c668aabd408834c5d88af2f55696bd", "score": "0.51133937", "text": "def jaccard(set_a, set_b):\n intersection = set_a & set_b\n union = set_a | set_b\n return len(intersection) / len(union)", "title": "" }, { "docid": "0ce38233bf9e64fe8da84a4bd342e4df", "score": "0.5080538", "text": "def jaccard_coefficient(G, ebunch=None):\n def predict(u, v):\n union_size = len(set(G[u]) | set(G[v]))\n if union_size == 0:\n return 0\n return len(list(nx.common_neighbors(G, u, v))) / union_size\n return _apply_prediction(G, predict, ebunch)", "title": "" }, { "docid": "5a90b7662c307194bffd04638b171f5c", "score": "0.5067596", "text": "def BLACKJACK_NUMBER():\n return 21", "title": "" }, { "docid": "5620fef55cc7687de0e7bacefec7158f", "score": "0.5062469", "text": "def encrypt_card(deck):\n\n alphabet = get_alpha()\n\n deck = deck[:]\n\n card = pick_card(deck, position=0)\n if get_suit(card) == 5:\n deck = insert_card(card, deck, 0)\n return ''\n\n val = get_value(card)\n deck = insert_card(card, deck, 0)\n\n card = pick_card(deck, position=val)\n if get_suit(card) == 5:\n deck = insert_card(card, deck, val)\n return ''\n\n encrypt_val = get_encrypt_val(card)\n deck = insert_card(card, deck, val)\n\n return alphabet[encrypt_val]", "title": "" }, { "docid": "d6fd48bd5a8b36079a0e8f2f35024bc1", "score": "0.5052926", "text": "def jaccard_sim(\n *, comparison_vector: np.array, comparison_matrix: np.array\n ) -> np.array:\n vec_sum = np.sum(comparison_vector, axis=1)\n mat_sum = np.sum(comparison_matrix, axis=1).T\n\n overlap = comparison_vector.dot(comparison_matrix.T)\n return overlap / (vec_sum + mat_sum - overlap)", "title": "" }, { "docid": "d97a6216ac84e3e428905c0e93db1ce9", "score": "0.50505114", "text": "def jaccard_distance(ns_1, ns_2):\n #print('Computing distance between graphs by jaccard')\n u_size = len(ns_1.union(ns_2))\n if u_size == 0:\n return 1.0\n jres = 1-len(ns_1.intersection(ns_2))/u_size\n print(jres)\n return jres", "title": "" }, { "docid": "f33a39b9e4cdedbbe3b0398ea1005d24", "score": "0.50404567", "text": "def computeJaccardSim(self, crushlistA, crushlistB):\n \n if len(crushlistA) == 0:\n return len(crushlistB)\n \n intersection = crushlistA.intersection(crushlistB)\n intersectionSize = len(intersection)\n \n union = crushlistA.union(crushlistB)\n unionSize = len(union)\n \n similarity = float(intersectionSize) / unionSize;\n \n return similarity", "title": "" }, { "docid": "a0daaa8777adf7616e4f9e217b56eb79", "score": "0.5035781", "text": "def __le__(self, other_card):\n ## YOUR CODE IS HERE ##", "title": "" }, { "docid": "a0133619eb942601a1301b85d04f3aee", "score": "0.4983885", "text": "def jaccard_binary(x,y):\n intersection = np.logical_and(x, y)\n union = np.logical_or(x, y)\n similarity = intersection.sum() / float(union.sum())\n return similarity", "title": "" }, { "docid": "89869fb30f2532359d25d3ca9c5e59a2", "score": "0.49805015", "text": "def is_equal(self, card):\n if self.suit == card.suit and self.number == card.number:\n return True\n else: \n return False", "title": "" }, { "docid": "f50d38dfe69cf5996e58a256dd8dfc6f", "score": "0.49794796", "text": "def jaccard_similarity(self, set1, set2):\n #union = set1.union(set2)\n intersect = set1.intersection(set2)\n #similarity = float(len(intersect) / len(union))\n similarity = float(len(intersect) / (len(set1) + len(set2) - len(intersect)))\n return similarity", "title": "" }, { "docid": "237b2e524f492a0974353bea4342df8b", "score": "0.4961775", "text": "def identity_card_number(self):\n identity = []\n\n for _ in range(3):\n identity.append(self.random_letter().upper())\n\n # it will be overwritten by a checksum\n identity.append(0)\n\n for _ in range(5):\n identity.append(self.random_digit())\n\n identity[3] = checksum_identity_card_number(identity)\n\n return ''.join(str(character) for character in identity)", "title": "" }, { "docid": "f7b336ac555789672483526e38a16669", "score": "0.49616417", "text": "def jaccard_similarity(self,x,y):\r\n intersection_cardinality = len(set.intersection(*[set(x), set(y)]))\r\n union_cardinality = len(set.union(*[set(x), set(y)]))\r\n return intersection_cardinality/float(union_cardinality)", "title": "" }, { "docid": "d429d66747ad9f5b5ddbdb48fa1b6635", "score": "0.49548998", "text": "def jaccard ( set1, set2 ):\n count_union = len( set1.__or__( set2 ) )\n count_intersection = len( set1.__and__( set2 ) )\n return count_intersection / float( count_union )", "title": "" }, { "docid": "d51ebfa6c6707371cfa67bb4051b6bda", "score": "0.49530596", "text": "def jaccardSimilarity(self,sentences,stem=True):\n tokens = [self.nlpWrapper.tokenize(sentence) for sentence in sentences]\n temp =[]\n if stem==True:\n for t in tokens:\n temp.append(self.nlpWrapper.stemmer(tokens=t))\n elif stem==False:\n temp = tokens\n\n tempMatrix = numpy.zeros(shape=(len(sentences),len(sentences)))\n\n for combo in itertools.product(range(len(sentences)),repeat=2):\n num = len(set(temp[combo[0]]).intersection(set(temp[combo[1]])))\n denom = len(set(temp[combo[0]]) | set(temp[combo[1]]))\n if denom<=0 or num<=0:\n tempMatrix[combo[0],combo[1]] = 0.0\n else:\n if float(num/denom) >= self.jaccardThreshold:\n tempMatrix[combo[0],combo[1]] = num/denom\n\n return csr_matrix(tempMatrix)", "title": "" }, { "docid": "9617c52bfed4e5b5a5fe3d21cfe62535", "score": "0.49422717", "text": "def get_credit_card(self, login, card_nick):\n return self._call_api('GET', ('u', login.email, 'ccs', normalize(card_nick, 'nick')), login=login)", "title": "" }, { "docid": "2a086c7f6f2ddb81907c480ff40ff72d", "score": "0.4914514", "text": "def same_rank(self, card):\n if self.number == card.number:\n return True\n else:\n return False", "title": "" }, { "docid": "da60d4c2db316089e551f0012fbbf83a", "score": "0.4912038", "text": "def same_suit(self, card):\n if self.suit == card.suit:\n return True\n else:\n return False", "title": "" }, { "docid": "bc905662e5fbe4806872ec87d58a6f00", "score": "0.49070728", "text": "def test_jaccard_similarity_populated_1():\n assert metrics.jaccard_similarity(LEFT_POPULATED_1, RIGHT_POPULATED_1) == 1.0", "title": "" }, { "docid": "af94542117722cac918acaecf954de8c", "score": "0.49047828", "text": "def matches(self, card):\n return True", "title": "" }, { "docid": "84e570dc0b568051ecccb758b485f826", "score": "0.49038815", "text": "def test_play_rotate(self):\n self.plr.test_input = [\"Rotate\"]\n self.plr.play_card(self.card)\n card = self.g[\"Forts\"].remove()\n self.assertEqual(card.name, \"Garrison\")", "title": "" }, { "docid": "ee2fb8992e97903abfa7ac9d43ccb088", "score": "0.49004102", "text": "def is_joker(self):\n return self.isjoker", "title": "" }, { "docid": "71eee290a59c699858e42f2481cfc416", "score": "0.488928", "text": "def card_to_id(card):\n return faces[card[0]] + card[1]", "title": "" }, { "docid": "be4ad69024cd01a4ae71a3cf3bf035d2", "score": "0.48872548", "text": "def login(cards):\n card_number = str(input(LOGIN_CARD_MSG))\n card_pin = str(input(LOGIN_PIN_MSG))\n\n for index, card in enumerate(cards):\n if card_number == card.number:\n if card_pin == card.pin:\n return index\n return -1", "title": "" }, { "docid": "6f4c87437d3d1f53796b4853dc373427", "score": "0.486613", "text": "def __check_black_jack(self):\n if len(self.hand) == 2 and self.hand_value[0] == 21:\n self.black_jack = True", "title": "" }, { "docid": "7b5fbcb33c822a77d71b8c6578e4bdf8", "score": "0.4865372", "text": "def get_jaccard_sim(str1, str2):\n a = set(str1.split())\n b = set(str2.split())\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))", "title": "" }, { "docid": "3218a836c15d3de90ec6515fd0dbd9f6", "score": "0.48578888", "text": "def card_to_binary(card):\n # This is Cactus Kev's algorithm, reimplemented in Python since we can't\n # use C libraries\n\n # First we need to generate the following representation\n # Bits marked x are not used.\n # xxxbbbbb bbbbbbbb cdhsrrrr xxpppppp\n\n # b is one bit flipped for A-2\n # c, d, h, s are flipped if you have a club, diamond, heart, spade\n # r is just the numerical rank in binary, with deuce = 0\n # p is the prime from LookupTable.primes corresponding to the rank,\n # in binary\n # Then shift appropriately to fit the template above\n\n b_mask = 1 << (14 + card.rank)\n cdhs_mask = 1 << (card.suit + 11)\n r_mask = (card.rank - 2) << 8\n p_mask = LookupTables.primes[card.rank - 2]\n # OR them together to get the final result\n return b_mask | r_mask | p_mask | cdhs_mask", "title": "" }, { "docid": "3218a836c15d3de90ec6515fd0dbd9f6", "score": "0.48578888", "text": "def card_to_binary(card):\n # This is Cactus Kev's algorithm, reimplemented in Python since we can't\n # use C libraries\n\n # First we need to generate the following representation\n # Bits marked x are not used.\n # xxxbbbbb bbbbbbbb cdhsrrrr xxpppppp\n\n # b is one bit flipped for A-2\n # c, d, h, s are flipped if you have a club, diamond, heart, spade\n # r is just the numerical rank in binary, with deuce = 0\n # p is the prime from LookupTable.primes corresponding to the rank,\n # in binary\n # Then shift appropriately to fit the template above\n\n b_mask = 1 << (14 + card.rank)\n cdhs_mask = 1 << (card.suit + 11)\n r_mask = (card.rank - 2) << 8\n p_mask = LookupTables.primes[card.rank - 2]\n # OR them together to get the final result\n return b_mask | r_mask | p_mask | cdhs_mask", "title": "" }, { "docid": "7da0ed6a07397e3803202a8533ae1bfe", "score": "0.48539346", "text": "def test_card(color, rank, resulting_card):\n uno_card = Card(color, rank)\n assert str(uno_card) == resulting_card", "title": "" }, { "docid": "464362399154b17a9da8c949e057c4d8", "score": "0.48496106", "text": "def next_card(self, card_type=None):", "title": "" }, { "docid": "d86822638b3c10b040306f3fc469b0ab", "score": "0.48312718", "text": "def test_generic_card_position(self):\n car = self.dek[0]\n self.assertEqual(str(car), \"2c\")", "title": "" }, { "docid": "09d5b568d126a398987784438575dc01", "score": "0.48237142", "text": "def __init__(self):\n self.card_deck = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] * 4", "title": "" }, { "docid": "1d006852e278bd577e8f3d614d53fd6c", "score": "0.48208869", "text": "def test_jaccard_similarity_populated_2():\n assert metrics.jaccard_similarity(LEFT_POPULATED_2, RIGHT_POPULATED_2) == 0.2", "title": "" }, { "docid": "7c618fa25bcf5fc1a9f35f9c132aad16", "score": "0.4818386", "text": "def captura5():", "title": "" }, { "docid": "da04464de40363c5f59b2ca07c11a6bf", "score": "0.48143682", "text": "def name_card(card, name):\n db.name_card_db(card, name)", "title": "" }, { "docid": "c9e8a89de637d8d66f8c216edebf91d1", "score": "0.48125067", "text": "def test_generic_card(self):\n car = self.dek.deal_card()\n self.assertEqual(str(car), \"As\")", "title": "" }, { "docid": "1a50766448a0fc2a0bdf63c1c3c63834", "score": "0.4807209", "text": "def attack(self) -> bool:\n challenge = self.oracle.challenge()\n self.cookie = bytearray([0] * len(challenge))\n\n # Sorry, this code is really really slow.\n # I've put it here more as a PoC than as a real attack.\n # A faster language is definitely advised.\n\n prefix = b'A' * (len(challenge) // 16)\n for i in range(0, (len(challenge) // 2) + 1):\n print(\"Progress: {}/{}.\".format(i, (len(challenge) // 2) + 1))\n prefix += b'A'\n\n bias_15 = collections.defaultdict(lambda: 0)\n bias_31 = collections.defaultdict(lambda: 0)\n\n for _ in range(2 ** 24): # should be something around 2 ** 30\n cipher = self.oracle.experiment(prefix)\n assert len(cipher) >= 32\n\n bias_15[cipher[15] ^ 240] += 2\n bias_15[cipher[15] ^ 0] += 1\n bias_15[cipher[15] ^ 16] += 1\n\n bias_31[cipher[31] ^ 224] += 2\n bias_31[cipher[31] ^ 0] += 1\n bias_31[cipher[31] ^ 32] += 1\n\n k = 15 - i - (len(challenge) // 16) - 1\n if k >= 0:\n most_biased = sorted(bias_15, key=lambda k: bias_15[k], reverse=True)\n for b in most_biased:\n if chr(b).isalpha():\n self.cookie[k] = b\n else:\n self.cookie[k] = most_biased[0]\n\n k = 31 - i - (len(challenge) // 16) - 1\n most_biased = sorted(bias_31, key=lambda k: bias_31[k], reverse=True)\n for b in most_biased:\n if chr(b).isalpha():\n self.cookie[k] = b\n else:\n self.cookie[k] = most_biased[0]\n\n self.cookie = bytes(self.cookie)\n return self.oracle.guess(self.cookie)", "title": "" }, { "docid": "edef4e1815b9d54ab2476ab54ce6c39a", "score": "0.48061603", "text": "def average_jaccard_sim(s):\n\treturn reduce(lambda x,y : x+y , map(lambda x : jaccard_sim(x[0], x[1]), pairwise_set(s)), 0) / len([x for x in pairwise_set(s)])", "title": "" }, { "docid": "4a2ba483cdd04d0a0cad206cd23077c7", "score": "0.48055252", "text": "def get_assassin_card(self):\n return self.assassin_card", "title": "" }, { "docid": "97283da82783e9634068ef8bf7560b08", "score": "0.47923812", "text": "def test_user_card(self):\n\n fname = 'simplesim_user'\n inp = inputfile.MCNPInput(self.sim, float_format=\"%.5e\", title=\"1\")\n inp.add_user_card('cell', '11 0 -5 IMP:N=0', comment='Graveyard.')\n inp.add_user_literal('cell', 'M1 1001 1\\n 8016 2')\n inp.add_user_card('surface', '11 0 -5 IMP:N=0', comment='Graveyard.')\n inp.add_user_literal('surface', 'M1 1001 1\\n 8016 2')\n inp.add_user_card('data', '11 0 -5 IMP:N=0', comment='Graveyard.')\n inp.add_user_literal('data', 'M1 1001 1\\n 8016 2')\n inp.write(fname + '_first')\n self.assertEquals(\n open(fname + '_first').readlines(),\n open(fname + '_first_compare').readlines())\n os.unlink(fname + '_first')", "title": "" }, { "docid": "ac53b40a5239f8a277106f61309eed84", "score": "0.47893333", "text": "def getCardIndex(self) -> int:\n ...", "title": "" }, { "docid": "37e0c837689ad960589af8a68f79f30c", "score": "0.47890607", "text": "def short_card_details(self):\n if self._face_up:\n return f\"{self._rank_short}-{self._suit[0]}\"\n else:\n return \"*-*\"", "title": "" }, { "docid": "6f42adb24dc7be0e0cdf5c6a951bdec4", "score": "0.47832516", "text": "def move_joker(card, deck):\n\n card = ugly_card(card)\n x = find_card(card, deck)\n y = pick_card(deck, card)\n #import pdb; pdb.set_trace()\n if get_value(y) == 'a':\n if x == len(deck):\n deck = insert_card(y, deck, 1)\n else:\n deck = insert_card(y, deck, x+1)\n elif get_value(y) == 'b':\n if x == len(deck):\n deck = insert_card(y, deck, 2)\n elif x == len(deck)-1:\n deck = insert_card(y, deck, 1)\n else:\n deck = insert_card(y, deck, x+2)\n return deck", "title": "" }, { "docid": "d44e8d481697902d2c9eb55085bc5674", "score": "0.47812685", "text": "def __init__(self, password):\n import crypt\n \n self.n1 = 0\n self.n2 = 0\n\n self.t1 = list(range(self.ROTORSZ))\n self.t2 = [0] * self.ROTORSZ\n self.t3 = [0] * self.ROTORSZ\n\n buffer = crypt.crypt(password, password)\n\n buffer_length = len(buffer)\n if buffer_length <= 0:\n print ('no password supplied?')\n\n seed = 123\n\n for i in range(buffer_length):\n seed = (seed * ord(buffer[i]) + i) % self.b32\n\n for i in range(self.ROTORSZ):\n seed = (5 * seed + ord(buffer[i % buffer_length])) % self.b32\n if seed >= self.b31:\n sign = -1\n signed_seed = seed - self.b32\n else:\n sign = 1\n signed_seed = seed\n random = sign * int(abs(signed_seed) % 65521)\n k = self.ROTORSZ - 1 - i\n ic = (random & self.MASK) % (k + 1)\n random = (random >> 8) & self.MASK\n temp = self.t1[k]\n self.t1[k] = self.t1[ic]\n self.t1[ic] = temp\n if self.t3[k] != 0:\n continue\n if k == 0:\n print(('[0] can\\t %% by zero. k=%i' % (k)))\n ic = (random & self.MASK) % k\n while self.t3[ic] != 0:\n if k == 0:\n print(('[1] can\\'t %% by zero. k=%i' % (k)))\n ic = (ic + 1) % k\n self.t3[k] = ic\n self.t3[ic] = k\n for i in range(self.ROTORSZ):\n self.t2[self.t1[i] & self.MASK] = i", "title": "" }, { "docid": "02611eee4c9665803ded81606c58668e", "score": "0.47748756", "text": "def test4(self):\n self.spawn(\"./credit\").stdin(\"5105105105105100\").stdout(\"^MASTERCARD\\n\", \"MASTERCARD\\n\").exit(0)", "title": "" }, { "docid": "e7b75a7c633932354c90535c628c6a87", "score": "0.4770799", "text": "def cardinality(self):\n card = 0\n i = len(self.__bits) - 1\n while i >= 0:\n i -= 1\n bg_a = self.__bits[i]\n # Take care of common cases.\n if bg_a == 0:\n continue\n\n if bg_a == -1:\n card += 64\n continue\n\n # Successively collapse alternating bit groups into a sum.\n bg_a = ((bg_a >> 1) & 0x5555555555555555) + (bg_a & 0x5555555555555555)\n bg_a = ((bg_a >> 2) & 0x3333333333333333) + (bg_a & 0x3333333333333333)\n bg_b = (bg_a >> 32) + bg_a\n bg_b = ((bg_b >> 4) & 0x0f0f0f0f) + (bg_b & 0x0f0f0f0f)\n bg_b = ((bg_b >> 8) & 0x00ff00ff) + (bg_b & 0x00ff00ff)\n card += ((bg_b >> 16) & 0x0000ffff) + (bg_b & 0x0000ffff)\n return card", "title": "" }, { "docid": "63eef5e6758cfb5dfb43cf9616bc7ef4", "score": "0.47697482", "text": "def jaccard_similarity(x, y):\n intersection = len(set.intersection(*[set(x), set(y)]))\n union = len(set.union(*[set(x), set(y)]))\n return intersection / float(union)", "title": "" }, { "docid": "59fe3b5ddfc38e8dcc72bc8924eca3cb", "score": "0.47681937", "text": "def slice_jaccard(probe_feat, topk_index_feats):\n\n query_num = 1\n gallery_num = len(topk_index_feats)\n all_num = query_num + gallery_num\n concat_feat = torch.cat([probe_feat, topk_index_feats])\n cos_sim = torch.matmul(concat_feat, concat_feat.T) # (101, 101)\n original_dist = 1.0 - (cos_sim + 1.0)/2\n initial_rank = torch.argsort(original_dist, dim=1)\n initial_rank = initial_rank.cpu().numpy()\n original_dist = original_dist.cpu().numpy()\n # print(f'Memory usage: {psutil.virtual_memory().percent}')\n V = np.zeros((all_num, all_num))\n gallery_num = original_dist.shape[0]\n\n k1 = setting['K1']\n k2 = setting['K2']\n for i in range(all_num):\n # k-reciprocal neighbors\n forward_k_neigh_index = initial_rank[i,:k1+1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]\n fi = np.where(backward_k_neigh_index==i)[0]\n k_reciprocal_index = forward_k_neigh_index[fi]\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_forward_k_neigh_index = initial_rank[candidate,:int(np.around(k1/2.))+1]\n candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,:int(np.around(k1/2.))+1]\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i,k_reciprocal_expansion_index])\n V[i,k_reciprocal_expansion_index] = weight/np.sum(weight)\n original_dist = original_dist[:query_num,]\n if k2 != 1:\n V_qe = np.zeros_like(V,dtype=np.float32)\n for i in range(all_num):\n V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(gallery_num):\n invIndex.append(np.where(V[:,i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist,dtype = np.float32)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1,gallery_num],dtype=np.float32)\n indNonZero = np.where(V[i,:] != 0)[0]\n indImages = []\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])\n jaccard_dist[i] = 1-temp_min/(2-temp_min)\n\n # final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value\n del original_dist\n del V\n slice_jaccard = jaccard_dist[:query_num,query_num:].flatten()\n return torch.tensor(slice_jaccard).cuda().half()", "title": "" }, { "docid": "617839ea9ce585ed788ad3c58ec64351", "score": "0.47637737", "text": "def play(self, player: int, move: Tuple[int, str]) -> bool:\n if self.is_finished:\n return False\n\n if self.turn_index != player:\n return False\n\n if self.options[\"์ˆœ์„œ๋Œ€๋กœ\"] or self.options[\"๋žœ๋ค์ˆœ์„œ\"]:\n move = (\n 6 - len([hand for hand in self.hands[player] if hand is None]),\n move[1],\n )\n\n card_name = self.hands[player][move[0] - 1]\n if card_name is None:\n return False\n\n board_index = ord(move[1]) - 97\n if self.board[board_index] is not None:\n return False\n\n self.hands[player][move[0] - 1] = None\n\n changed_card_positions = [board_index]\n\n self.board[board_index] = (card_name, player)\n\n # check for \"SAME\"\n if self.options[\"๋™์ˆ˜\"]:\n affected_cards = self.check_same(board_index)\n if len(affected_cards) >= 2:\n changed_card_positions.extend(\n [pos for pos in affected_cards if self.board[pos][1] != player]\n )\n\n # check for \"PLUS\"\n if self.options[\"ํ•ฉ์‚ฐ\"]:\n affected_cards = self.check_plus(board_index)\n changed_card_positions.extend(\n [pos for pos in affected_cards if self.board[pos][1] != player]\n )\n\n if len(changed_card_positions) < 2:\n # normal\n self.board[board_index] = (card_name, player)\n self.flip(board_index)\n\n else:\n # combo\n for pos in changed_card_positions:\n if pos != board_index:\n self.board[pos] = (self.board[pos][0], player)\n\n self.flip_combo(changed_card_positions)\n\n self.turn_index = 1 - self.turn_index\n\n if all(tile is not None for tile in self.board):\n self.terminate()\n\n return True", "title": "" }, { "docid": "942f1b14d2e644bacb37c9f89afb2c9d", "score": "0.4752759", "text": "def jaccard_distances_array(in_array, num_users):\n\n distance_sets = np.full((num_users, num_users),1.0)\n\n for src_index, src_row in enumerate(distance_sets):\n\n for dst_index, dst_row in enumerate(in_array):\n # Do a triangle and mirror\n if src_index > dst_index:\n continue\n src_row = in_array[src_index]\n dst_row = in_array[dst_index]\n if src_row and dst_row:\n # If either set is empty no point continuing. Otherwise get cardinalities\n intersection_cardinality = len(set.intersection(*[set(src_row), set(dst_row)]))\n union_cardinality = len(set.union(*[set(src_row), set(dst_row)]))\n distance_sets[src_index][dst_index] = 1.0 - (\n float(intersection_cardinality)/float(union_cardinality))\n \n for src_index, src_row in enumerate(distance_sets):\n for dst_index, dst_row in enumerate(distance_sets):\n # Mirror half of the triangle\n if src_index > dst_index:\n distance_sets[src_index][dst_index] = distance_sets[dst_index][src_index]\n print(distance_sets[src_index])\n return distance_sets", "title": "" }, { "docid": "139a25f290b040ee565d280046d02edc", "score": "0.47515318", "text": "def check_for_Blackjack(self):\n assert len(self.__hand) == 2, 'Blakjack hand must contain exactly two cards.'\n if self.__val == 21:\n return True\n else:\n return False", "title": "" }, { "docid": "0359d4528e9dd1dcd0a8ad2c8f1e0600", "score": "0.47345018", "text": "def extract_and_cipher_id(soup):\r\n license_num = soup.find_all(class_='t_G_11_BF')[0].text[11:]\r\n license_num = license_num[2:] if '1-' in license_num else license_num\r\n try: ## for if the doctor if rofe toran and has no id number\r\n license_num = int(license_num)\r\n except:\r\n license_num = 0\r\n chiphered_license_num = license_num * KEY ##TODO problem\r\n return chiphered_license_num", "title": "" }, { "docid": "16f8c6395afe377621f31376d759db2b", "score": "0.4732471", "text": "def saltar_turno(self, jugador):\n if jugador == 0:\n proximo_en_jugar = 1\n else:\n proximo_en_jugar = 0\n return proximo_en_jugar", "title": "" }, { "docid": "efcbff5c3a05d95eba20ae6528f77320", "score": "0.4721919", "text": "def cifradoCesarAlfabetoIngles(cadena):\r\n # Definir la nueva cadena resultado\r\n resultado = ''\r\n # Realizar el \"cifrado\", sabiendo que A = 65, Z = 90, a = 97, z = 122\r\n i = 0\r\n while i < len(cadena):\r\n # Recoge el caracter a cifrar\r\n ordenClaro = ord(cadena[i])\r\n ordenCifrado = 0\r\n # Cambia el caracter a cifrar\r\n if (ordenClaro >= 65 and ordenClaro <= 90):\r\n #Cifra los caracteres en mayusculas\r\n ordenCifrado = (((ordenClaro - 65) + 3) % 26) + 65\r\n elif (ordenClaro >= 97 and ordenClaro <= 122):\r\n #Cifra los caracteres en minusculas\r\n ordenCifrado = (((ordenClaro - 97) + 3) % 26) + 97\r\n # Anade el caracter cifrado al resultado\r\n resultado = resultado + chr(ordenCifrado)\r\n i = i + 1\r\n # devuelve el resultado\r\n return resultado", "title": "" }, { "docid": "5d9623f8cfb51945072ac09707750199", "score": "0.47189477", "text": "def jaccard_wt(graph, node):\n neighbors = set(graph.neighbors(node))\n scores = []\n for n in graph.nodes():\n \tif ((n not in neighbors) and (n !=node)):\n \t\tneighbors2 = set(graph.neighbors(n))\n\n \t\tnumerator = 0\n \t\tSum_A_degrees = 0\n \t\tSum_B_degrees = 0\n\n \t\tfor i in neighbors & neighbors2:\n \t\t\t\tnumerator += 1 / (graph.degree(i))\n \t\tfor i in graph.neighbors(node):\n \t\t\tSum_A_degrees += (graph.degree(i))\n \t\tfor j in graph.neighbors(n):\n \t\t\tSum_B_degrees += (graph.degree(j))\n\n \t\tdenominator = (1/Sum_A_degrees) + (1/Sum_B_degrees)\n\n \t\tscore = numerator/denominator\n\n \t\tscores.append(((node, n), score))\n return sorted(scores, key=lambda x:(-x[1]))\n pass", "title": "" }, { "docid": "d77947ad6f535afd68223c08f956f088", "score": "0.47145268", "text": "def test_jaccard_similarity_left_empty():\n assert metrics.jaccard_similarity(LEFT_EMPTY, RIGHT_POPULATED_1) == 0.0", "title": "" }, { "docid": "30e4cdb4eca3753ab5aba5ebcacbf52c", "score": "0.47035876", "text": "def operate_cipher(self):", "title": "" }, { "docid": "f4aadf1fc5d9c5a1eedbdf4e36e6f33c", "score": "0.46993625", "text": "def test_register_card_helper_success(self):\n card = register_card(self.card_number, self.user_2)\n\n self.assertIsInstance(card, Card)", "title": "" }, { "docid": "4b2051eda0480eb43875eb8a3b62049b", "score": "0.46989545", "text": "def test3(self):\n self.spawn(\"./credit\").stdin(\"5555555555554444\").stdout(\"^MASTERCARD\\n\", \"MASTERCARD\\n\").exit(0)", "title": "" }, { "docid": "32fc1820cfd4159f89e8dd71e0eba78b", "score": "0.4696062", "text": "def select_card(self):\n\t\treturn self.traverse((-float(\"inf\"), self.hand[0], 0), self.private_hand, self.hand, 2, self.game)", "title": "" }, { "docid": "df066826f496b1e3094605efaf8bf728", "score": "0.4692337", "text": "def trasforma(k, c):\n\n alfabeto = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n i = 0\n while alfabeto[i] != c:\n i = i + 1\n\n j = (i + k) % 26\n t = alfabeto[j]\n\n return t", "title": "" }, { "docid": "20bf21fd77f5e8f01cd6e5df1086ceed", "score": "0.46906933", "text": "def set_AL_MB_connectome(self):\n\t\t\n\t\tself.Jj_mask = sp.zeros((self.Mm, self.Zz))\n\t\tsp.random.seed(self.Jj_mask_seed)\n\t\tfor iZ in range(self.Zz):\n\t\t\tidxs = sp.random.choice(range(self.Mm), self.Zz_sparse, replace=0)\n\t\t\tself.Jj_mask[idxs, iZ] = 1", "title": "" }, { "docid": "3f762f18dfd46c96dd04134cd7daff63", "score": "0.4683049", "text": "def blackjack(self):\n if not self.splithand and self.value == 21:\n if all(c.value == 7 for c in self.cards) and BLACKJACK_RULES['triple7']:\n return True\n elif self.length() == 2:\n return True\n else:\n return False\n else:\n return False", "title": "" }, { "docid": "6a52591bb5fe8eafd66a51a50ee565e3", "score": "0.4682985", "text": "def attack(self) -> bool:\n cipher, public = self.oracle.challenge()\n assert self.oracle.experiment(cipher)\n\n e, n = public\n immutable_slice = collections.namedtuple(\n \"immutable_slice\", [\"start\", \"stop\"]\n )\n\n k = n.bit_length()\n B = pow(2, k - 16)\n B2 = 2 * B\n B3 = 3 * B\n\n s_i = -1\n c0 = cipher\n M = {immutable_slice(B2, B3 - 1), }\n\n def _is_done() -> bool:\n \"\"\"\n Check whether the attack should stop.\n :return: True or false.\n \"\"\"\n if len(M) != 1:\n return False\n\n _M = M.copy()\n inner_slice = _M.pop()\n return inner_slice.start == inner_slice.stop\n\n def _update_m() -> set:\n \"\"\"\n After finding a new value for s_i,\n update the M set.\n \"\"\"\n new_m = set()\n\n for a, b in M:\n for r in range(\n matasano.math.int_division_ceil(a * s_i - B3 + 1, n),\n matasano.math.int_division_floor(b * s_i - B2, n) + 1\n ):\n new_a = max(a, matasano.math.int_division_ceil(B2 + r * n, s_i))\n new_b = min(b, matasano.math.int_division_floor(B3 - 1 + r * n, s_i))\n\n if new_a <= new_b:\n new_m.add(\n immutable_slice(\n new_a,\n new_b\n )\n )\n\n return new_m\n\n def _iterate(s_minus_1: int) -> int:\n \"\"\"\n Iterate for a new round.\n\n :param s_minus_1: The previous s_i-1 value.\n :return: The new s_i value.\n \"\"\"\n assert len(M) != 0, \\\n \"M should contain at least one element.\"\n\n if len(M) > 1:\n for inner_s in range(s_minus_1 + 1, n):\n if self.oracle.experiment(c0 * pow(inner_s, e, n)):\n return inner_s\n else: # len(M) == 1\n a, b = M.copy().pop()\n\n for r in range(\n ((b * s_minus_1 - B2) * 2) // n,\n n\n ):\n for inner_s in range(\n (B2 + r * n) // b,\n ((B3 - 1 + r * n) // a) + 1\n ):\n if self.oracle.experiment(c0 * pow(inner_s, e, n)):\n return inner_s\n\n assert False, \\\n \"Something went wrong while finding s_i\"\n\n # Initial round, for i == 1\n for new_s in range(matasano.math.int_division_ceil(n, B3), n):\n if self.oracle.experiment(c0 * pow(new_s, e, n)):\n s_i = new_s\n M = _update_m()\n break\n else:\n assert False, \\\n \"Something went wrong while finding s_1\"\n\n while not _is_done():\n s_i = _iterate(s_i)\n M = _update_m()\n\n a = M.pop().start\n self.message = matasano.util.bytes_for_int(\n a\n )\n\n return self.oracle.guess(a)", "title": "" }, { "docid": "a421d3b55fbf709549d8e5e224857479", "score": "0.46752065", "text": "def jaccard_similarity(messages, samples=200):\n N, A = messages.shape[0], messages.shape[1]\n combinations = list(itertools.combinations(range(A), 2))\n score = 0.0\n for c in combinations:\n for _ in range(samples):\n s = np.random.randint(N)\n score += jaccard_score(\n messages[s, c[0], :], messages[s, c[1], :], average=\"macro\"\n )\n\n # average over number of combinations\n score /= len(combinations) * samples\n\n return score", "title": "" }, { "docid": "081fd7d3170f6944968e6d4a7c32d135", "score": "0.46727148", "text": "def test_check_cloginrc(self):\n name = '_test_cloginrc'\n path = RancidCmd.check_cloginrc(name=name)\n is_exists = os.path.isfile(path)\n mode = os.stat(path).st_mode\n if is_exists:\n os.remove(path)\n self.assertEqual(is_exists, True)\n self.assertEqual(mode, 33216) # oct(33216) == '0o100700'", "title": "" } ]
01866748369ab7177e45157e7fbc9253
Prints and returns the last digit of a number. If given something that's not a number, this returns None.
[ { "docid": "c0afb8ee1076a323ef93c4eb9f945235", "score": "0.8328857", "text": "def print_last_digit(number):\n number = (abs(number) % 10)\n print(\"{}\".format(number), end=\"\")\n return (int(number))", "title": "" } ]
[ { "docid": "b862fc37bc5cbaeaeab1578228d23fe9", "score": "0.8458338", "text": "def print_last_digit(number):\n print(abs(number) % 10, end=\"\")\n return (abs(number) % 10)", "title": "" }, { "docid": "67d1a5ad30a32391a4e44367e8b22772", "score": "0.7971203", "text": "def last_digit(number):\n return int(str(number)[-1])", "title": "" }, { "docid": "af56803d0224b11498c1266a88062d2a", "score": "0.63217986", "text": "def find_largest_digit(n):\n n = str(n)\n if len(n) == 1: # base point\n return n\n else:\n if n[0] <= n[1]: # if head number is smaller than next number, trimming header.\n return find_largest_digit(n[1:])\n else:\n n = n[0] + n[2:] # if header greater than next number, trimming next number.\n return find_largest_digit(n)", "title": "" }, { "docid": "85972a5f9bc02c8f3eac66eb307d4c5f", "score": "0.6075543", "text": "def largest_number(digits):\n res = \"\"\n while digits:\n max_digit = float(\"-inf\")\n for digit in digits:\n if greater_or_equal(digit, max_digit):\n max_digit = digit\n res += max_digit\n digits.remove(max_digit)\n return res", "title": "" }, { "docid": "522e263d05c7f93ab596321efcec89f2", "score": "0.60742664", "text": "def get_last(self):\n if self.display:\n return self.display\n\n if self.stack:\n return str(self.stack[-1])\n\n return '0'", "title": "" }, { "docid": "7d5de32812450faac38b74f2fd88a739", "score": "0.5969337", "text": "def print_digits(num):\n\n # START SOLUTION\n\n while not num % 10 == num:\n\n next_digit = num % 10\n print next_digit\n num = (num - next_digit) / 10\n\n print num", "title": "" }, { "docid": "00ad46c5cc4860a3996ed79493aeac85", "score": "0.59287137", "text": "def number_end_str(self):\n if hasattr(self, 'number'):\n end_string = \"th\"\n if self.number % 10 == 1:\n end_string = \"st\"\n elif self.number % 10 == 2:\n end_string = \"nd\"\n elif self.number % 10 == 3:\n end_string = \"rd\"\n\n return end_string", "title": "" }, { "docid": "0aa8eba9ec2fce0636930b31c75d464c", "score": "0.5896653", "text": "def get_digit(num, place):\n return int(num / 10 ** (place - 1)) % 10", "title": "" }, { "docid": "8d4915b807ae140610d3fd9b59452037", "score": "0.5868702", "text": "def _get_number(number_string):\n if len(number_string) == 4:\n return int(number_string)\n else:\n return int(number_string[-4:])", "title": "" }, { "docid": "39b6eab002aa17ee7e14ad54fe88ec34", "score": "0.5788709", "text": "def main():\n\tprint(find_largest_digit(12345)) # 5\n\tprint(find_largest_digit(281)) # 8\n\tprint(find_largest_digit(6)) # 6\n\tprint(find_largest_digit(-111)) # 1\n\tprint(find_largest_digit(-9453)) # 9", "title": "" }, { "docid": "20d139a0e1c4053095fddd79f8c745ed", "score": "0.57669073", "text": "def print_number(print_num):\n print_word = str(print_num) + ' '\n if (print_num < 21):\n print_word += print_single(print_num, words_lookup)\n else:\n print_word += print_two_part(print_num, words_lookup)\n print(print_word)", "title": "" }, { "docid": "d440548d0088eeeaa153441c329be1a8", "score": "0.5760395", "text": "def print_digits(num):\n # turn into a string\n s = str(num)\n for i in s[::-1]:\n print i", "title": "" }, { "docid": "47122d74355de56f825814ecfb63f9a7", "score": "0.5718764", "text": "def show_number(self):\r\n if self.Complemento:\r\n return self.Negativos()\r\n else:\r\n if self.base < 11:\r\n return self.int()\r\n else:\r\n return self.base_mayor_10()", "title": "" }, { "docid": "d0845e0768f06b074ec317c58e91692f", "score": "0.56912875", "text": "def num(n):\n if n < 0:\n return \"(\" + str(n) + \")\"\n else:\n return n", "title": "" }, { "docid": "77678d9cebe20fd68117e2df27d229eb", "score": "0.5660564", "text": "def last_8(some_int):\n\n return int(str(some_int)[-8:])", "title": "" }, { "docid": "d0e764c0ed5899516cf3551acf8d1189", "score": "0.5631605", "text": "def extractNum(self):\r\n\r\n return self.tail", "title": "" }, { "docid": "5ca26f26aeb6cab0e31b24e0c4c1da59", "score": "0.5596071", "text": "def reverse_num(num: int):\n while True:\n try:\n return int(str(num)[::-1])\n except ValueError:\n print('Please, enter number: ')", "title": "" }, { "docid": "dba128bac86a015a59c3d628246361c5", "score": "0.5541483", "text": "def first_digit(number):\n return int(str(number)[0])", "title": "" }, { "docid": "377352e6983f71df37fb3db0ebce6e75", "score": "0.55151415", "text": "def extract_digit(number, n, base = 10):\n return (number // base ** n) % base", "title": "" }, { "docid": "39c26721f13c733251728b27789d9cad", "score": "0.5499339", "text": "def largest_number(a):\n answer = \"\"\n while len(a) > 0:\n max_digit = a[0]\n for val in a[1:]:\n if greater_or_equal(val, max_digit):\n max_digit = val\n\n answer += max_digit\n a.remove(max_digit)\n\n return answer", "title": "" }, { "docid": "2c8cbb9638ad639562268c194b6d5ae7", "score": "0.54844826", "text": "def getNumerical(number):\r\n output=\"\"\r\n for char in number:\r\n if ifInt(char)==True: output+=char\r\n try: return int(output)\r\n except: return 0", "title": "" }, { "docid": "114d3d79f821177e85c9f8825ca07dac", "score": "0.5460838", "text": "def last_N(input_string, number=2):\n return input_string[-number:]", "title": "" }, { "docid": "b699c4365c23ca18497134bcdaf2695b", "score": "0.5433554", "text": "def _last_n(self, number=None):\n if number is None:\n return self._storage\n return self._storage[-(number):]", "title": "" }, { "docid": "6aeddedfc10981ae378193ae3cb28cb4", "score": "0.5369685", "text": "def ordinal_or_last(m) -> str:\n if m[0] == \"last\":\n return -1\n return m.ordinals_small - 1", "title": "" }, { "docid": "e3ba6f41f3396f3db5a1f6f9715016e3", "score": "0.53642917", "text": "def printDigit(d):\n try:\n i = int(d)\n except:\n print(\"This digit is not an integer\")\n return \"\"\n if(i == 1):\n return(\":::||\")\n if(i == 2):\n return(\"::|:|\")\n if(i == 3):\n return(\"::||:\")\n if(i == 4):\n return(\":|::|\")\n if(i == 5):\n return(\":|:|:\")\n if(i == 6):\n return(\":||::\")\n if(i == 7):\n return(\"|:::|\")\n if(i == 8):\n return(\"|::|:\")\n if(i == 9):\n return(\"|:|::\")\n if(i == 0):\n return(\"||:::\")\n print(\"This is not a single digit integer!\")\n return \"\"", "title": "" }, { "docid": "6c4ef1ed0c2aec30fd05d0a3d5786b88", "score": "0.53616256", "text": "def number_n(n):\n return str(n) + \" is a number\"", "title": "" }, { "docid": "bed03c79f2c24b9731028780caab8e43", "score": "0.5349992", "text": "def least_significant_digit(number):\n number_string = str(number)\n if ('.' in number_string):\n print(number_string)\n print(number_string.partition('.'))\n print(number_string.partition('.')[2])\n return -len(number_string.partition('.')[2])\n else:\n return 0", "title": "" }, { "docid": "7b630f05bdcb7bd48f233c99b6f1d235", "score": "0.533819", "text": "def only_digits_dynamic(n=None):\n return \"{} is a number\".format(n)", "title": "" }, { "docid": "6059f1caf0921678951852e4d6d97347", "score": "0.5322387", "text": "def num_below_thousand(num):\n assert (num >= 0)\n if num == 0:\n return ''\n if num < 20: # anything under 20 has its own number\n return NUMS[num]\n elif num < 100:\n if num % 10 == 0:\n return (NUMS[num // 10 * 10]).strip()\n else:\n return (NUMS[num // 10 * 10] + '-' + NUMS[num % 10]).strip()\n elif num < 1000:\n end = num_below_thousand(num % 100)\n join = ''\n if end:\n join = ' and ' if num % 100 < 100 else ' '\n return (NUMS[num // 100] + ' hundred' + join + end).strip()", "title": "" }, { "docid": "1419c656753bcdfed16aa9526867c08c", "score": "0.5317706", "text": "def last_digit_fast(n):\n F = [int(i) for i in range(n+1)]\n F[0] = 0\n F[1] = 1\n for i in range(2, n+1):\n F[i] = (F[i-1] + F[i-2]) % 10\n\n return F[n]", "title": "" }, { "docid": "637710436c43737d215b70f6ad59dd02", "score": "0.53127825", "text": "def stage_num():\r\n first = 1\r\n last = 350\r\n\r\n while True:\r\n s_num = input(\"Enter stage number (1-350). No input will yield default 001. \")\r\n if s_num == \"\":\r\n s_num = \"001\"\r\n return s_num\r\n else:\r\n try:\r\n s_num = int(s_num)\r\n except ValueError:\r\n print(\"\\nInvalid stage number! Try again.\")\r\n else:\r\n if s_num < first or s_num > last:\r\n print(\"Invalid stage number! Try again.\")\r\n else:\r\n s_num = \"{:03d}\".format(s_num)\r\n return s_num", "title": "" }, { "docid": "447b604ea187290fde42eddce21fa901", "score": "0.53111535", "text": "def maximum69Number(num) -> int:\n return int(str(num).replace(\"6\",\"9\",1))", "title": "" }, { "docid": "47845a78d2c730b980b3184767cfa490", "score": "0.52962047", "text": "def last_2_digits(number: str) -> str:\n if len(number) != 9:\n raise ValueError('The number should have 9 digits')\n if not all(d in string.digits for d in number):\n raise ValueError('The characters must be all digits')\n first_digits = [int(d) for d in number]\n digit_10 = (sum(first_digits[::2])*7 - sum(first_digits[1::2])) % 10\n first_10_digits = first_digits + [digit_10]\n digit_11 = sum(first_10_digits) % 10\n return str(digit_10) + str(digit_11)", "title": "" }, { "docid": "39f6f9704d2f3f71f14e9171f5c3615a", "score": "0.5279561", "text": "def get_digit_right_to_left(number, digit_position):\r\n if digit_position <= 0:\r\n return number % 10\r\n else:\r\n return get_digit_right_to_left(number // 10, digit_position - 1)", "title": "" }, { "docid": "073587a00618dc0a2139e9f43868d907", "score": "0.527198", "text": "def mytrunc(num):\n v = GetNum(num)\n if(isinstance(v,float)):\n return int(v)\n return num", "title": "" }, { "docid": "0b242e4c07bb378e3ab6f7ae1d5251ec", "score": "0.5265949", "text": "def getNumber(self):\r\n\t\treturn self.result.groups()[1]", "title": "" }, { "docid": "9d66937bfd883f2e740f30db329fc885", "score": "0.52610373", "text": "def get_number(self):\n return self._number", "title": "" }, { "docid": "9d66937bfd883f2e740f30db329fc885", "score": "0.52610373", "text": "def get_number(self):\n return self._number", "title": "" }, { "docid": "f45449ce6d1a0fd2b04ecd2d8cc9b154", "score": "0.526063", "text": "def naive_largest_number(list_of_digits):\n # Sort the list\n list_of_digits = sorted(list_of_digits, reverse=True)\n return ''.join(map(str, list_of_digits))", "title": "" }, { "docid": "a94c60fc8e47772c1466e4cb7b9439b3", "score": "0.5257002", "text": "def number(self) -> str:\n return self._dni[:8]", "title": "" }, { "docid": "f8f57c2bc71e7cc65cc06551c78c2037", "score": "0.5247601", "text": "def _nth_digit(i, n):\n return (i // 10**n) % 10", "title": "" }, { "docid": "11548910eb2f8bcff216070f0038835e", "score": "0.5239359", "text": "def get_last_budget():\n ppto = '0000'\n budget = frappe.get_last_doc('Expenses Financial Budget')\n if budget:\n ppto = budget.financial_budget_code\n sqe = '%004d' % (int(ppto[-4:]) + 1)\n return sqe", "title": "" }, { "docid": "e02637bf1ce805338aa3b4c8a58e60b0", "score": "0.52381927", "text": "def tail(self):\r\n if self.last.isNum() :\r\n return (self.last)", "title": "" }, { "docid": "c3df028a80afa579625ac6cb01057c11", "score": "0.522218", "text": "def truncate_number(number: float) -> float:\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"2\")\n # END OF SOLUTION", "title": "" }, { "docid": "f9c09c996ae02468b0fbcdc42ccc5f27", "score": "0.51889825", "text": "def _print_suffix(node, mexp_parser):\n\n # Initialize the result.\n ret = \"\"\n\n # Print the suffix number part if has.\n sfx = node.get_suffix_number().simplify()\n if not sfx.is_zero:\n ret += _print_operand(sfx, mexp_parser)\n\n return ret", "title": "" }, { "docid": "e2a70c0fbda0cb4fadd3e8bc690a069a", "score": "0.5187658", "text": "def find_number(s):\r\n\r\n r = re.search('-?\\d+[.e]?\\d*', s)\r\n if r is not None:\r\n return r.span(0)\r\n return None", "title": "" }, { "docid": "d2b8a5a7691bddcf22dc27e70db53e10", "score": "0.5180014", "text": "def digits(n):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"5\")\n # END OF SOLUTION", "title": "" }, { "docid": "e728680443e522a314f204eb1250aaf0", "score": "0.5150543", "text": "def format_number(num):\r\n result = \" \" + str(num) + \" \"\r\n if num < 10:\r\n result = result + \" \"\r\n return result", "title": "" }, { "docid": "166a1d073fef735b07596fe25ca3f175", "score": "0.5148463", "text": "def reverse(n):\n\tif n < 10:\n\t\treturn n\n\telse:\n\t\treturn combine(n % 10, reverse(n // 10))", "title": "" }, { "docid": "d2982c68e699f28398a424a9a2bdd5df", "score": "0.512914", "text": "def print_result(n):\n print(\"The biggest prime smaller than the given number is \", str(n))", "title": "" }, { "docid": "5d14bb69b411de6e3f45ae29676054b6", "score": "0.5113516", "text": "def _format_num(self, num: int) -> str:\n return f\"[num]{num}[/num]\" if self.rich else str(num)", "title": "" }, { "docid": "8b29024df6b9a293698118c57c6f55e1", "score": "0.5110521", "text": "def _afterpoint(string):\n if _isnumber(string):\n if _isint(string):\n return -1\n else:\n pos = string.rfind(\".\")\n pos = string.lower().rfind(\"e\") if pos < 0 else pos\n if pos >= 0:\n return len(string) - pos - 1\n else:\n return -1 # no point\n else:\n return -1 # not a number", "title": "" }, { "docid": "0657481703e9c6cd3d1999bbabb6e692", "score": "0.50973415", "text": "def floorGet(self, data):\n strdig = re.search(r\"^-?[0-9a-fA-F]+\\s\", data)\n try:\n dig = int(strdig.group(), 16)\n except ValueError: # nonetype or strange dig\n raise ValueError(\"Floor number Error. It should be [-F~F]\")\n\n self.colorPrint(\"Floor\", dig)\n if dig < -0xF or dig > 0xF:\n raise ValueError(str(dig) + \" is too big. It should be [-F~F]\")\n return dig", "title": "" }, { "docid": "08abc830bd91dedf5c57734447cd7037", "score": "0.50909144", "text": "def getFirstDigit(n):\n cnt = 1\n multiple = 1\n while n >= 10 * multiple:\n cnt += 1\n multiple *= 10\n firstDigit, rem = divmod(n, 10 ** (cnt - 1))\n return firstDigit, rem, cnt", "title": "" }, { "docid": "12a830895845c7adc55aeed642fd2b77", "score": "0.5089283", "text": "def number(self) -> str:\n return self._number", "title": "" }, { "docid": "1ba6baae081d7e4c849ce28a3c2f545d", "score": "0.5089042", "text": "def get_fibonacci_last_digit_naive(n):\n if n <= 1:\n return n\n\n previous = 0\n current = 1\n\n for _ in range(n-1):\n previous, current = current, (previous + current) % 10\n\n return current", "title": "" }, { "docid": "8a24585139cad936c321305e16711acd", "score": "0.5088977", "text": "def _format_number(self, num):\n num = int(num)\n if num in [1, 21, 31]:\n suffix = 'st'\n elif num in [2, 22, 32]:\n suffix = 'nd'\n elif num in [3, 23, 33]:\n suffix = 'rd'\n else:\n suffix = 'th'\n\n return f\"{num}{suffix}\"", "title": "" }, { "docid": "330776ff7d18ceb986c82cd4eb0e6e79", "score": "0.5080855", "text": "def last(word):\n return word[-1]", "title": "" }, { "docid": "23ffb56f67acdc04f3877103fef0ae8d", "score": "0.5070611", "text": "def get_number():\n n = \"\"\n while not n.isdigit():\n n = input(\"Give natural number: \")\n return eval(n)", "title": "" }, { "docid": "f81a16f1162fe074b75fda8b05647878", "score": "0.5070483", "text": "def GetNumber(*args):\n return _DigitalMicrograph.GetNumber(*args)", "title": "" }, { "docid": "5602d78c974864e3760bbd2210252269", "score": "0.50703967", "text": "def decrease_num(current_num):\n if int(current_num) > 1:\n return str(int(current_num) - 1)\n else:\n return str(1)", "title": "" }, { "docid": "d9ad9cc2d3ca4468efd1862e010be8df", "score": "0.5063478", "text": "def get_num(prompt='Number? '):\n _num = 0\n while True:\n try:\n _num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return _num", "title": "" }, { "docid": "a3373374a9b0b55b47b7e2c1b2b62c14", "score": "0.5048966", "text": "def last_word(word):\n word = words[-1]\n print word", "title": "" }, { "docid": "93f17ea619b054765787c703deee8f81", "score": "0.50377285", "text": "def ndigits(x):\n s = str(x).strip('-') # change int to str and strip leading '-'\n if s != \"\": # string not empty - apply func to str = str - 1st digit\n return 1 + ndigits(s[1:])\n else:\n return 0 # string is empty, finish counting", "title": "" }, { "docid": "2fe1d264b58196b6b7778e60728c85d6", "score": "0.5034703", "text": "def extract_number(f):\n s = re.findall(\"\\d+$\", f)\n return (int(s[0]) if s else -1, f)", "title": "" }, { "docid": "6d12d35e75fa9d4bc74db7871660c0b9", "score": "0.5029458", "text": "def get_Digits(self):\n return self._output.get('Digits', None)", "title": "" }, { "docid": "9d7ef7257e6e70fc946a230b3020520c", "score": "0.5025162", "text": "def number_get(self) -> Number:\n token: Token = self\n assert False, f\"Token.number_get(): {token.__class__.__name__}.get_number() dos not exist!\"", "title": "" }, { "docid": "c0469e60214817264844b395d5b419ec", "score": "0.50238305", "text": "def get_last_page_number():\n # first get the first page and count how many pages there are\n r = requests.get(CONSTRUCTED_URL)\n\n # parse the text with BeautifulSoup to obtain the last page number\n soup = BeautifulSoup(r.text)\n pagination = soup.find('ol', class_='pagination actions')\n return pagination.find_all('a')[-2].text", "title": "" }, { "docid": "d32f40cb3fe7ff9e3072bdb5971e590d", "score": "0.5023595", "text": "def rounder(num):\n a = list(map(int,str(num)))\n x = math.pow(10, len(a)-1)\n return int((a[0]+1)*x)", "title": "" }, { "docid": "311faecc10b1b5e92f9359f8ef4a0109", "score": "0.50228506", "text": "def number(values_l, n):\n values, l = values_l\n if values:\n return values[l + n]\n else:\n return \"\"", "title": "" }, { "docid": "da86ef24fa59916eed3d12f9dc7b027c", "score": "0.50168204", "text": "def format_number(n, empty=\"-\"):\n if n is None:\n return empty\n\n if n == float(\"inf\"):\n return _(\"infinity\")\n try:\n return \", \".join(map(fmt, n))\n except TypeError:\n pass\n m = abs(n)\n\n if int(n) == n and m < 1e6:\n return \"{:n}\".format(int(n))\n\n if m < 0.01:\n e = int(-log10(m)) + 1\n return \"{:.3n}\".format(n * 10 ** e) + \"e-%02d\" % e\n for k, div, fn, fmt_, suffix in HUMANIZED_SUFFIXES:\n if m < k:\n dec = fn(m) / div\n dec -= int(dec)\n dec = fmt_.format(1 + dec)[1:]\n prefix = \"{:n}\".format(int(fn(n) / div))\n return prefix + dec + suffix\n return \"{:.2n}\".format(n)", "title": "" }, { "docid": "c1e447538221a63ecafb66beb69ab1bc", "score": "0.500173", "text": "def _extract_number(value, default):\n try:\n # The _str_to_num method converts the value to string/float\n # so we need to perform one additional conversion to int on top\n return int(_str_to_num(value))\n except (TypeError, ValueError):\n return default", "title": "" }, { "docid": "ced2f8db4e2b39b59d3fe242ee78eec7", "score": "0.5000666", "text": "def last_step(self):\n if self.success: return self\n def cmp(a): return int(a[0].split('/')[-1])\n return max(self.details.items(), key=cmp)[1]", "title": "" }, { "docid": "165c1265f86d0cfec77d4c9bb9be1228", "score": "0.49939406", "text": "def ordinal_number(n: int) -> str:\n if n <= 0:\n raise Exception('The ordinal number is not defined for non-positive integers.')\n else:\n digit = n % 10 # type: int\n letter_suffix = None # type: str\n if digit == 1:\n letter_suffix = 'st'\n elif digit == 2:\n letter_suffix = 'nd'\n elif digit == 3:\n letter_suffix = 'rd'\n else:\n letter_suffix = 'th'\n return str(n) + letter_suffix", "title": "" }, { "docid": "ce44e4c13f5c94769983e2db954d9b87", "score": "0.4993717", "text": "def getNumber(self):\n return self._number", "title": "" }, { "docid": "f149cae31edb6bdfdab4df34fabce48f", "score": "0.49814153", "text": "def draw_number(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n if (x > 0):\n print(x * '+')\n elif (x < 0):\n print(abs(x) * '-')\n else:\n return None", "title": "" }, { "docid": "aac68536c3aafb3a98e7ce99827805f3", "score": "0.49790806", "text": "def trailing_zeros(self, n: int):\n cnt = 0\n while n % 10 == 0 and n != 0:\n cnt += 1\n n = n / 10\n return cnt", "title": "" }, { "docid": "8a984686b17e58e679281d32af0be322", "score": "0.49763197", "text": "def format_num(num: int) -> str:\n num = str(num)\n ans = ''\n for i in range(len(num)-3, -4, -3):\n if i < 0:\n ans = num[0:i+3] + ans\n else:\n ans = ',' + num[i:i+3] + ans\n\n return ans.lstrip(',')", "title": "" }, { "docid": "b1254daabc8d6d90194dd910caf4e660", "score": "0.4974991", "text": "def print_to_n(n):\n if n >= 1:\n print_to_n(n - 1)\n print(str(n))\n else: # Base case where I went over all the numbers until n\n return", "title": "" }, { "docid": "57b791ef33a2286e2e7de41225b86ce4", "score": "0.49724847", "text": "def number(x):\n return x[0]", "title": "" }, { "docid": "a7f0b344328bc26274fc0c2f75fc5d61", "score": "0.4970976", "text": "def number(self):\n return self._num", "title": "" }, { "docid": "2fd685caa9cba9b4ad973128703f2e10", "score": "0.49669227", "text": "def n_dec(\n x: Numeric) \\\n -> int:\n if x == 0:\n return 0\n _, _, dec = str(x).partition('.')\n return len(dec)", "title": "" }, { "docid": "70e89f0fd9e5e2f00e0399f2344ac5f4", "score": "0.49640313", "text": "def max_num(num_list):\n sort_list = sorted(num_list)\n return sort_list[len(num_list) - 1]", "title": "" }, { "docid": "57478609fbc79a8464a95c2061eaee37", "score": "0.49637672", "text": "def max_num(a, b):\n return a if a > b else b", "title": "" }, { "docid": "720e499cb38f282aa8375555b2f4bb26", "score": "0.49637368", "text": "def get_last_t(self) -> int:\n sqlstr = '''SELECT last_t\n FROM Card\n WHERE id=:id;'''\n self.cursor.execute(sqlstr, {'id': self.id})\n try:\n return self.cursor.fetchone()[0]\n except IndexError:\n return 0", "title": "" }, { "docid": "d7f4a8d4dce1081785e6b2ae28811e6c", "score": "0.49613184", "text": "def missing_digits(n):\n \"*** YOUR CODE HERE ***\"\n if n<10:\n return 0\n #mid_digits=len(set(str(n)))-2\n if n<100:\n return (n%10 - n//10 - 1) if n%10 != n//10 else (n%10 - n//10)\n return missing_digits(n//10) + missing_digits(n%100)", "title": "" }, { "docid": "23e3efc1169f9d31aaef174dc98b8074", "score": "0.49610078", "text": "def get_telephone_number(self):\n if self.telephone_number:\n return self.telephone_number\n else:\n return 'Unknown'", "title": "" }, { "docid": "4b1a872ddabeaea09743f62990414483", "score": "0.49553344", "text": "def format_num(num):\n\n try:\n inum = int(num)\n return locale.format(\"%.*f\", (0, inum), True)\n\n except (ValueError, TypeError):\n return str(num)", "title": "" }, { "docid": "4b1a872ddabeaea09743f62990414483", "score": "0.49553344", "text": "def format_num(num):\n\n try:\n inum = int(num)\n return locale.format(\"%.*f\", (0, inum), True)\n\n except (ValueError, TypeError):\n return str(num)", "title": "" }, { "docid": "7352bb9e9ca582d2dc5f9e81eabc4e75", "score": "0.4955185", "text": "def get_ordinal(n):\n return \"%d%s\" % (\n n,\n \"tsnrhtdd\"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4],\n )", "title": "" }, { "docid": "68b88f7e95a3d57fc747f0012776d2b3", "score": "0.4955018", "text": "def get_pi_digit(nth_digit):\n if nth_digit <= 0:\n return \"Invalid Number\"\n\n nth_digit = nth_digit - 1\n nth_digit = math.floor(nth_digit)\n nth_digit = math.floor(math.pi * pow(10, nth_digit))\n nth_digit = math.fmod(nth_digit, 10)\n return nth_digit", "title": "" }, { "docid": "a00c8289738e416920ec2ab32d74c273", "score": "0.49533105", "text": "def last(s):\n return s[-1]", "title": "" }, { "docid": "97463888e7a793c99aa8f9ee3bd2b024", "score": "0.49501842", "text": "def calc_check_digit(number):\n s = 0\n for i, n in enumerate(reversed(number[:9]), 1):\n c1 = (int(n) + i) % 10\n if c1:\n c2 = (c1 * (2 ** i)) % 9 or 9\n s += c2\n return str((10 - s) % 10)", "title": "" }, { "docid": "e1c7e012bb23b9ebc326a1f7af637b92", "score": "0.49372253", "text": "def last(self):\r\n try:\r\n return self.spaces[-1]\r\n except :\r\n pass", "title": "" }, { "docid": "b7e763461ac85e1e20cad7567d0ef6d4", "score": "0.49364924", "text": "def get_number(token):\n num = StringIO()\n for ch in token:\n if ch.isdigit() or ch == '.' or ch == '-':\n num.write(ch)\n else:\n break\n val = num.getvalue()\n num.close()\n return val", "title": "" }, { "docid": "ff66a2692dad1bb241cabb4d3d33af8a", "score": "0.49356443", "text": "def get_l_exon_num(exon_num):\n if exon_num == 1:\n return 1\n elif exon_num <= 10:\n return 2\n else:\n twenty_perc = exon_num / 5\n return twenty_perc", "title": "" }, { "docid": "934e2c632c6c0692ef274685477b40e1", "score": "0.4932024", "text": "def get_milestone_ab_with_num(milestone):\n\n match = re.search(r\"([ab]\\d+)\", milestone)\n if match:\n return match.group(1)\n\n return \"\"", "title": "" }, { "docid": "0a1ae69b1908a11ee1cb9634ab922a11", "score": "0.49281517", "text": "def max_num(num_list):\n\n num_list.sort()\n return num_list[-1]", "title": "" }, { "docid": "02aefde1a953fe27dc70e0b9fb6d5d5e", "score": "0.49225143", "text": "def find_tens(number):\n if number >= 10:\n number = int(number / 10)\n return number\n elif number < 10:\n return 0", "title": "" }, { "docid": "96fa1130a698d35b6293a3332779063c", "score": "0.4914124", "text": "def num_with_extension(n):\n if 10 <= n % 100 <= 20: # special case of 11-13\n ext = \"th\"\n elif n % 10 == 1:\n ext = \"st\"\n elif n % 10 == 2:\n ext = \"nd\"\n elif n % 10 == 3:\n ext = \"rd\"\n else:\n ext = \"th\"\n return \"{}{}\".format(n, ext)", "title": "" } ]
9953d2c5c9bf35c5afb1bbb3d54db76c
plot pytstan submm sed output as SEDs specific to greybody models
[ { "docid": "5c59020054486de78b839e4f09a3bb5d", "score": "0.53297436", "text": "def pystan_postprocess_SED_plot(allfits, label=\"\"):\n\n labs = (\"1 comp\", \"2 comp\", r\"1 $\\beta=2$\", r\"2 $\\beta=2$\", \"thick\")\n for objname, fits in iteritems(allfits):\n for fit, lab in zip(fits, labs):\n plot_pystan(dat[objname], fit, label=lab)\n plt.title(objname)\n plt.legend(loc='best')\n\n plt.savefig(\"%s_%sSED.png\" % (objname, label), dpi=200)\n plt.figure()", "title": "" } ]
[ { "docid": "2bcc1093c7f3f4c187383fab35b16510", "score": "0.62366015", "text": "def tsnescatterplot(model, word, list_names):\r\n arrays = np.empty((0, 300), dtype='f')\r\n word_labels = [word]\r\n color_list = ['red']\r\n\r\n # adds the vector of the query word\r\n arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)\r\n \r\n # gets list of most similar words\r\n close_words = model.wv.most_similar([word])\r\n \r\n # adds the vector for each of the closest words to the array\r\n for wrd_score in close_words:\r\n wrd_vector = model.wv.__getitem__([wrd_score[0]])\r\n word_labels.append(wrd_score[0])\r\n color_list.append('blue')\r\n arrays = np.append(arrays, wrd_vector, axis=0)\r\n \r\n # adds the vector for each of the words from list_names to the array\r\n for wrd in list_names:\r\n wrd_vector = model.wv.__getitem__([wrd])\r\n word_labels.append(wrd)\r\n color_list.append('green')\r\n arrays = np.append(arrays, wrd_vector, axis=0)\r\n \r\n # Reduces the dimensionality from 300 to 50 dimensions with PCA\r\n reduc = PCA(n_components=18).fit_transform(arrays)\r\n \r\n # Finds t-SNE coordinates for 2 dimensions\r\n np.set_printoptions(suppress=True)\r\n \r\n Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(reduc)\r\n \r\n # Sets everything up to plot\r\n df = pd.DataFrame({'x': [x for x in Y[:, 0]],\r\n 'y': [y for y in Y[:, 1]],\r\n 'words': word_labels,\r\n 'color': color_list})\r\n \r\n fig, _ = plt.subplots()\r\n fig.set_size_inches(9, 9)\r\n \r\n # Basic plot\r\n p1 = sns.regplot(data=df,\r\n x=\"x\",\r\n y=\"y\",\r\n fit_reg=False,\r\n marker=\"o\",\r\n scatter_kws={'s': 40,\r\n 'facecolors': df['color']\r\n }\r\n )\r\n \r\n # Adds annotations one by one with a loop\r\n for line in range(0, df.shape[0]):\r\n p1.text(df[\"x\"][line],\r\n df['y'][line],\r\n ' ' + df[\"words\"][line].title(),\r\n horizontalalignment='left',\r\n verticalalignment='bottom', size='medium',\r\n color=df['color'][line],\r\n weight='normal'\r\n ).set_size(15)\r\n\r\n \r\n plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)\r\n plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)\r\n \r\n plt.title('t-SNE visualization for {}'.format(word.title()))\r\n plt.show()", "title": "" }, { "docid": "b262518c20c2bd8b311954e7cc84636f", "score": "0.6098654", "text": "def tsnescatterplot(model, word, comparison_list = []):\r\n \r\n word_vector = model.wv.get_vector(word)\r\n ncols = np.shape(word_vector)[0]\r\n arrays = np.empty((0, ncols), dtype='f')\r\n word_vector = word_vector.reshape((1,ncols))\r\n arrays = np.append(arrays, word_vector, axis=0)\r\n \r\n word_labels = [word]\r\n color_list = ['red']\r\n \r\n close_words = model.wv.most_similar(word)\r\n \r\n for wrd_score in close_words:\r\n wrd_vector = model.wv.get_vector(wrd_score[0])\r\n wrd_vector = wrd_vector.reshape((1,ncols))\r\n word_labels.append(wrd_score[0])\r\n color_list.append('blue')\r\n arrays = np.append(arrays, wrd_vector, axis=0)\r\n \r\n for word_compare in comparison_list:\r\n wrd_vector = model.wv.get_vector(word_compare)\r\n wrd_vector = wrd_vector.reshape((1,ncols))\r\n word_labels.append(word_compare)\r\n color_list.append('green')\r\n arrays = np.append(arrays, wrd_vector, axis=0)\r\n \r\n simplified_model = PCA(n_components = 0.9).fit_transform(arrays)\r\n \r\n # Finds t-SNE coordinates for 2 dimensions\r\n np.set_printoptions(suppress=True)\r\n \r\n tse = TSNE(n_components=2, \r\n perplexity=40, \r\n learning_rate = 600, \r\n n_iter=5000, \r\n method='exact')\r\n \r\n Y = tse.fit_transform(simplified_model)\r\n \r\n # Sets everything up to plot\r\n df = pd.DataFrame({'x': [x for x in Y[:, 0]],\r\n 'y': [y for y in Y[:, 1]],\r\n 'words': word_labels,\r\n 'color': color_list})\r\n \r\n fig, _ = plt.subplots()\r\n \r\n # Basic plot\r\n p1 = sns.regplot(data=df,\r\n x=\"x\",\r\n y=\"y\",\r\n fit_reg=False,\r\n marker=\"o\",\r\n scatter_kws={'s': 10,\r\n 'facecolors': df['color']})\r\n \r\n # Adds annotations one by one with a loop\r\n for line in range(0, df.shape[0]):\r\n p1.text(df[\"x\"][line],\r\n df['y'][line],\r\n ' ' + df[\"words\"][line].title(),\r\n horizontalalignment='left',\r\n verticalalignment='bottom', size='medium',\r\n color=df['color'][line],\r\n weight='normal'\r\n ).set_size(15)\r\n\r\n \r\n plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)\r\n plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)\r\n \r\n plt.title('t-SNE visualization for {}'.format(word.title()))", "title": "" }, { "docid": "94fcad9513fd1408390e8615f42b3601", "score": "0.60928696", "text": "def plotSeismogram(d, rho, v, wavf, wavA=1.0, noise=0.0, usingT=True, wavtyp=\"RICKER\"):\n\n tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(\n d, rho, v, wavf, wavA, usingT, wavtyp\n )\n\n noise = noise * np.max(np.abs(seis)) * np.random.randn(seis.size)\n filt = np.arange(1.0, 15.0)\n filtr = filt[::-1]\n filt = np.append(filt, filtr[1:]) * 1.0 / 15.0\n noise = np.convolve(noise, filt)\n noise = noise[0 : seis.size]\n\n seis = seis + noise\n\n plt.figure(num=0, figsize=(10, 5))\n\n plt.subplot(131)\n plt.plot(wav, twav, linewidth=1, color=\"black\")\n posind = wav > 0.0\n plt.fill_between(wav[posind], twav[posind], np.zeros_like(wav[posind]), color=\"k\")\n plt.title(\"Wavelet\")\n plt.xlim((-2.0, 2.0))\n plt.ylim((-0.2, 0.2))\n majorytick = np.arange(-0.2, 0.3, 0.1)\n minorytick = np.arange(-0.2, 0.21, 0.01)\n plt.gca().set_yticks(majorytick)\n plt.gca().set_yticks(minorytick, minor=True)\n plt.gca().grid(True, which=\"major\", axis=\"both\", linewidth=1.5)\n plt.gca().grid(True, which=\"minor\", axis=\"y\")\n plt.ylim((tseis.min() - tseis.mean(), tseis.max() - tseis.mean()))\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.subplot(132)\n plt.plot(\n np.zeros(tref.size), (tseis.max(), tseis.min()), linewidth=2, color=\"black\"\n )\n plt.hlines(\n tref, np.zeros(len(rseriesconv)), rseriesconv, linewidth=2\n ) # ,'marker','none'\n if usingT is True:\n plt.title(\"Reflectivity\")\n else:\n plt.title(\"Reflection Coeff.\")\n plt.grid()\n plt.ylim((0, tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-2.0, 2.0))\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.subplot(133)\n posind = seis > 0.0\n plt.plot(seis, tseis, color=\"black\", linewidth=1)\n plt.fill_between(\n seis[posind],\n tseis[posind],\n np.zeros_like(seis[posind]),\n color=\"k\",\n edgecolor=\"white\",\n )\n plt.title(\"Seismogram\")\n plt.grid()\n plt.ylim((tseis.min(), tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-0.95, 0.95))\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "12d62d509c25f5624b559beb78b01d86", "score": "0.60349965", "text": "def plotSeismogramV2(\n d, rho, v, wavf, wavA=1.0, noise=0.0, usingT=True, wavtyp=\"RICKER\"\n):\n\n dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)\n tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(\n d, rho, v, wavf, wavA, usingT, wavtyp\n )\n\n noise = noise * np.max(np.abs(seis)) * np.random.randn(seis.size)\n filt = np.arange(1.0, 21.0)\n filtr = filt[::-1]\n filt = np.append(filt, filtr[1:]) * 1.0 / 21.0\n noise = np.convolve(noise, filt)\n noise = noise[0 : seis.size]\n\n xlimrho = (1.95, 5.05)\n xlimv = (0.25, 4.05)\n\n seis = seis + noise\n\n plt.figure(num=0, figsize=(10, 5))\n\n plt.subplot(141)\n plt.plot(wav, twav, linewidth=1, color=\"black\")\n posind = wav > 0.0\n plt.fill_between(wav[posind], twav[posind], np.zeros_like(wav[posind]), color=\"k\")\n plt.title(\"Wavelet\")\n plt.xlim((-1.0, 1.0))\n plt.ylim((tseis.min() - tseis.mean(), tseis.max() - tseis.mean()))\n plt.ylim((-0.2, 0.2))\n majorytick = np.arange(-0.2, 0.3, 0.1)\n minorytick = np.arange(-0.2, 0.21, 0.01)\n plt.gca().set_yticks(majorytick)\n plt.gca().set_yticks(minorytick, minor=True)\n plt.gca().grid(True, which=\"major\", axis=\"both\", linewidth=1.5)\n plt.gca().grid(True, which=\"minor\", axis=\"y\")\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.subplot(142)\n plotLogFormat(rholog * 10 ** -3, dpth, xlimrho, \"blue\")\n plt.title(\"$\\\\rho$\")\n plt.xlabel(\"Density \\n $\\\\times 10^3$ (kg /m$^3$)\", fontsize=9)\n plt.ylabel(\"Depth (m)\", fontsize=9)\n\n plt.subplot(143)\n plotLogFormat(vlog * 10 ** -3, dpth, xlimv, \"red\")\n plt.title(\"$v$\")\n plt.xlabel(\"Velocity \\n $\\\\times 10^3$ (m/s)\", fontsize=9)\n plt.ylabel(\"Depth (m)\", fontsize=9)\n\n plt.subplot(144)\n posind = seis > 0.0\n plt.plot(seis, tseis, color=\"black\", linewidth=1)\n plt.fill_between(\n seis[posind],\n tseis[posind],\n np.zeros_like(seis[posind]),\n color=\"k\",\n edgecolor=\"white\",\n )\n plt.title(\"Seismogram\")\n plt.grid()\n plt.ylim((tseis.min(), tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-1.0, 1.0))\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "36ff230dcc3416edc471e675f752a3d6", "score": "0.5960732", "text": "def plotSAED(tags, gray=False):\n\n saed = tags.copy()\n saed['convergence_angle_nm-1'] = 0\n\n saed['background'] = 'white' # 'white' 'grey'\n saed['color map'] = 'plasma' # ,'cubehelix'#'Greys'#'plasma'\n saed['color reflections'] = 'ZOLZ'\n\n if gray:\n saed['color map'] = 'gray'\n saed['background'] = '#303030' # 'darkgray'\n saed['color reflections'] = 'intensity'\n saed['plot HOLZ'] = 0\n saed['plot HOLZ excess'] = 0\n saed['plot Kikuchi'] = 0\n saed['plot reflections'] = 1\n\n saed['label HOLZ'] = 0\n saed['label Kikuchi'] = 0\n saed['label reflections'] = 0\n\n saed['label color'] = 'white'\n saed['label size'] = 10\n\n saed['color Laue Zones'] = ['red', 'blue', 'green', 'blue', 'green'] # , 'green', 'red'] #for OLZ give a sequence\n saed['color zero'] = 'red' # 'None' #'white'\n saed['color ring zero'] = 'None' # 'Red' #'white' #, 'None'\n saed['width ring zero'] = 2\n\n plot_diffraction_pattern(saed, True)", "title": "" }, { "docid": "21d1deae717e62adeb6c4fc3f47103ba", "score": "0.5941633", "text": "def plot_SED(name_sampler_fits,logscale_x=False,logscale_y=True,xrange=None,yrange=None,wunit='micron',funit='erg/s/cm2/A', \n\tdecompose=0,xticks=None,photo_color='red',residual_range=[-1.0,1.0],show_original_spec=False,fontsize_tick=18,fontsize_label=25,\n\tshow_legend=True, loc_legend=4, fontsize_legend=18, markersize=100, lw=2.0, name_plot=None):\n\n\thdu = fits.open(name_sampler_fits)\n\theader_samplers = hdu[0].header\n\tobs_photo = hdu['obs_photo'].data\n\tbfit_photo = hdu['bfit_photo'].data\n\tif header_samplers['fitmethod'] == 'rdsps':\n\t\tminchi2_params = hdu['minchi2_params'].data\n\tif header_samplers['specphot'] == 0:\n\t\tbfit_mod_spec = hdu['bfit_mod_spec'].data \n\tif header_samplers['specphot'] == 1:\n\t\tobs_spec = hdu['obs_spec'].data\n\t\tbfit_spec = hdu['bfit_spec'].data\n\t\tbfit_mod_spec = hdu['bfit_mod'].data\n\t\tcorr_factor = hdu['corr_factor'].data \n\thdu.close()\n\n\t# filters\n\tnbands = int(header_samplers['nfilters'])\n\tfilters = []\n\tfor bb in range(0,nbands):\n\t\tfilters.append(header_samplers['fil%d' % bb])\n\n\tif name_plot==None:\n\t\tname_sampler_fits1 = name_sampler_fits.replace('.fits','')\n\t\tname_plot = \"sed_%s.png\" % (name_sampler_fits1)\n\n\tif header_samplers['specphot'] == 1:\n\t\tif header_samplers['fitmethod'] == 'mcmc':\n\t\t\tminchi2_params = None\n\t\tplot_SED_specphoto(filters=filters,obs_photo=obs_photo,obs_spec=obs_spec,bfit_photo=bfit_photo,bfit_spec=bfit_spec,\n\t\t\tbfit_mod_spec=bfit_mod_spec,corr_factor=corr_factor,minchi2_params=minchi2_params,header_samplers=header_samplers,\n\t\t\tlogscale_x=logscale_x,logscale_y=logscale_y,xrange=xrange,yrange=yrange,wunit=wunit,funit=funit,xticks=xticks,\n\t\t\tphoto_color=photo_color,residual_range=residual_range,show_original_spec=show_original_spec,fontsize_tick=fontsize_tick,\n\t\t\tfontsize_label=fontsize_label,show_legend=show_legend,loc_legend=loc_legend,fontsize_legend=fontsize_legend,\n\t\t\tmarkersize=markersize,lw=lw,name_plot=name_plot)\n\telif header_samplers['specphot'] == 0:\n\t\tif header_samplers['fitmethod'] == 'mcmc':\n\t\t\tplot_SED_mcmc_photo(filters=filters,obs_photo=obs_photo,bfit_photo=bfit_photo,bfit_mod_spec=bfit_mod_spec,\n\t\t\t\theader_samplers=header_samplers,logscale_x=logscale_x,logscale_y=logscale_y,xrange=xrange,yrange=yrange,\n\t\t\t\twunit=wunit,funit=funit,decompose=decompose,xticks=xticks,photo_color=photo_color,residual_range=residual_range,\n\t\t\t\tfontsize_tick=fontsize_tick,fontsize_label=fontsize_label,show_legend=show_legend,loc_legend=loc_legend,\n\t\t\t\tfontsize_legend=fontsize_legend,markersize=markersize,lw=lw,name_plot=name_plot)\n\n\t\telif header_samplers['fitmethod'] == 'rdsps':\n\t\t\tplot_SED_rdsps_photo(filters=filters,obs_photo=obs_photo,bfit_photo=bfit_photo,bfit_mod_spec=bfit_mod_spec,minchi2_params=minchi2_params,\n\t\t\t\theader_samplers=header_samplers,logscale_x=logscale_x,logscale_y=logscale_y,xrange=xrange,yrange=yrange,wunit=wunit,\n\t\t\t\tfunit=funit,decompose=decompose,xticks=xticks,photo_color=photo_color,residual_range=residual_range,fontsize_tick=fontsize_tick,\n\t\t\t\tfontsize_label=fontsize_label,show_legend=show_legend,loc_legend=loc_legend,fontsize_legend=fontsize_legend,markersize=markersize,\n\t\t\t\tlw=lw,name_plot=name_plot)", "title": "" }, { "docid": "a4561104d110d479536ed58a6658128d", "score": "0.59313667", "text": "def plotSeismogramV3(\n d, rho, v, wavf, wavA=1.0, noise=0.0, usingT=True, wavtyp=\"RICKER\"\n):\n\n dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)\n tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(\n d, rho, v, wavf, wavA, usingT, wavtyp\n )\n\n noise = noise * np.max(np.abs(seis)) * np.random.randn(seis.size)\n filt = np.arange(1.0, 21.0)\n filtr = filt[::-1]\n filt = np.append(filt, filtr[1:]) * 1.0 / 21.0\n noise = np.convolve(noise, filt)\n noise = noise[0 : seis.size]\n\n xlimrho = (1.95, 5.05)\n xlimv = (0.25, 4.05)\n\n seis = seis + noise\n\n plt.figure(num=0, figsize=(10, 5))\n\n plt.subplot(141)\n plt.plot(wav, twav, linewidth=1, color=\"black\")\n posind = wav > 0.0\n plt.fill_between(wav[posind], twav[posind], np.zeros_like(wav[posind]), color=\"k\")\n plt.title(\"Wavelet\")\n plt.xlim((-1.0, 1.0))\n # plt.ylim((tseis.min()-tseis.mean(),tseis.max()-tseis.mean()))\n plt.ylim((-0.2, 0.2))\n majorytick = np.arange(-0.2, 0.3, 0.1)\n minorytick = np.arange(-0.2, 0.21, 0.01)\n plt.gca().set_yticks(majorytick)\n plt.gca().set_yticks(minorytick, minor=True)\n plt.gca().grid(True, which=\"major\", axis=\"both\", linewidth=1.5)\n plt.gca().grid(True, which=\"minor\", axis=\"y\")\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.subplot(142)\n plotLogFormat(rholog * 10 ** -3, dpth, xlimrho, \"blue\")\n plt.title(\"$\\\\rho$\")\n plt.xlabel(\"Density \\n $\\\\times 10^3$ (kg /m$^3$)\", fontsize=9)\n plt.ylabel(\"Depth (m)\", fontsize=9)\n plt.xlim((0.0, 4.6))\n plt.ylim((200.0, 0.0))\n\n plt.subplot(143)\n plotLogFormat(vlog * 10 ** -3, dpth, xlimv, \"red\")\n plt.ylim((200.0, 0.0))\n plt.xlim((0.0, 1500 * 1e-3))\n plt.title(\"$v$\")\n plt.xlabel(\"Velocity \\n $\\\\times 10^3$ (m/s)\", fontsize=9)\n plt.ylabel(\"Depth (m)\", fontsize=9)\n\n plt.subplot(144)\n posind = seis > 0.0\n plt.plot(seis, tseis, color=\"black\", linewidth=1)\n plt.fill_between(\n seis[posind],\n tseis[posind],\n np.zeros_like(seis[posind]),\n color=\"k\",\n edgecolor=\"white\",\n )\n plt.title(\"Seismogram\")\n plt.grid()\n plt.ylim((0.0, 0.2))\n plt.gca().invert_yaxis()\n plt.xlim((-1.0, 1.0))\n\n plt.setp(plt.xticks()[1], rotation=\"90\", fontsize=9)\n plt.setp(plt.yticks()[1], fontsize=9)\n plt.gca().set_xlabel(\"Amplitude\", fontsize=9)\n plt.gca().set_ylabel(\"Time (s)\", fontsize=9)\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "f9a160d9d5ef67f1fde9cbcd4bf6c880", "score": "0.5753497", "text": "def test(pmn, pstd, mn, std, weights, biases,n=100, t0=[3750.,4500.], g0=2., mh0=0.) :\n fig,ax=plots.multi(2,6,figsize=(8,12))\n\n xt=['Teff','logg','[M/H]','[alpha/M]','[C/M]','[N/M]']\n for i,ipar in enumerate([0,1,2,3,4,5]) : \n for ipix in range(len(weights)) :\n for it0 in range(2) :\n pars=np.tile([t0[it0], g0, mh0, 0.0, 0., 0., 2.],(n,1))\n if ipar == 0 : pars[:,ipar]=np.linspace(3000.,8000.,n)\n elif ipar == 1 : pars[:,ipar]=np.linspace(-0.5,5.5,n)\n elif ipar == 2 : pars[:,ipar]=np.linspace(-2.5,1.,n)\n elif ipar == 3 : pars[:,ipar]=np.linspace(-0.5,1.0,n)\n elif ipar == 4 : pars[:,ipar]=np.linspace(-1.,1.,n)\n elif ipar == 5 : pars[:,ipar]=np.linspace(-0.5,2.,n)\n m=[]\n for ip in range(pars.shape[0]) : m.append(model((pars[ip,:]-pmn)/pstd,mn[ipix],std[ipix],weights[ipix],biases[ipix]))\n plots.plotl(ax[i,it0],pars[:,ipar],m,xt=xt[i])\n #m=[]\n #for ip in range(pars.shape[0]) : m.append(nets[ipix].predict((pars[ip,:].reshape(1,7)-pmn)/pstd)[0,0]*std[ipix]+mn[ipix])\n #plots.plotl(ax[i,it0],pars[:,ipar],m)\n if i == 0 : ax[i,it0].set_title('{:8.0f}{:7.2f}{:7.2f}'.format(t0[it0],g0,mh0))\n fig.tight_layout()", "title": "" }, { "docid": "80116d06f03465d4941099ced1064892", "score": "0.5728882", "text": "def plot_splitsignal_post(m, outpath):\n\n # get y_mod, y_rot, y_orb, y_tra. here: cheat. just randomly select 1 from\n # posterior (TODO: take the median parameters, +generate the model instead)\n np.random.seed(42)\n sel = np.random.choice(m.trace.mu_model.shape[0], 1)\n y_mod = m.trace.mu_model[sel, :].flatten()\n y_tra = m.trace.mu_transit[sel, :].flatten()\n\n y_orb, y_rot = np.zeros_like(m.x_obs), np.zeros_like(m.x_obs)\n for modelcomponent in m.modelcomponents:\n if 'rot' in modelcomponent:\n N_harmonics = int(modelcomponent[0])\n for ix in range(N_harmonics):\n y_rot += m.trace['mu_rotsin{}'.format(ix)][sel, :].flatten()\n y_rot += m.trace['mu_rotcos{}'.format(ix)][sel, :].flatten()\n\n if 'orb' in modelcomponent:\n N_harmonics = int(modelcomponent[0])\n for ix in range(N_harmonics):\n y_orb += m.trace['mu_orbsin{}'.format(ix)][sel, :].flatten()\n y_orb += m.trace['mu_orbcos{}'.format(ix)][sel, :].flatten()\n\n # make the plot!\n plt.close('all')\n fig, axs = plt.subplots(nrows=4, figsize=(14, 12), sharex=True)\n\n axs[0].set_ylabel('flux')\n axs[0].plot(m.x_obs, m.y_obs, \".k\", ms=4, label=\"data\")\n axs[0].plot(m.x_obs, y_mod, lw=0.5, label='model',\n color='C0', alpha=1, zorder=5)\n\n for ix, f in enumerate(['rot', 'orb']):\n if f == 'rot':\n axs[0].plot(m.x_obs, y_rot, lw=0.5, label='model '+f,\n color='C{}'.format(ix+1), alpha=1, zorder=ix+3)\n if f == 'orb':\n axs[0].plot(m.x_obs, y_orb+y_tra, lw=0.5, label='model '+f,\n color='C{}'.format(ix+1), alpha=1, zorder=ix+3)\n\n axs[1].set_ylabel('flux-orb (rot)')\n axs[1].plot(m.x_obs, m.y_obs-y_orb-y_tra, \".k\", ms=4, label=\"data-orb\")\n axs[1].plot(m.x_obs, y_mod-y_orb-y_tra, lw=0.5,\n label='model-orb', color='C0', alpha=1, zorder=5)\n\n axs[2].set_ylabel('flux-rot (orb)')\n axs[2].plot(m.x_obs, m.y_obs-y_rot, \".k\", ms=4, label=\"data-rot\")\n axs[2].plot(m.x_obs, y_mod-y_rot, lw=0.5,\n label='model-rot', color='C0', alpha=1, zorder=5)\n\n axs[3].set_ylabel('flux-model')\n axs[3].plot(m.x_obs, m.y_obs-y_mod, \".k\", ms=4, label=\"data\")\n axs[3].plot(m.x_obs, y_mod-y_mod, lw=0.5, label='model',\n color='C0', alpha=1, zorder=5)\n\n axs[-1].set_xlabel(\"time [days]\")\n for a in axs:\n a.legend()\n format_ax(a)\n fig.tight_layout()\n savefig(fig, outpath, writepdf=0, dpi=300)\n\n ydict = {\n 'x_obs': m.x_obs,\n 'y_obs': m.y_obs,\n 'y_orb': m.y_obs-y_rot,\n 'y_rot': m.y_obs-y_orb,\n 'y_mod_tra': y_tra,\n 'y_mod_orb': y_orb,\n 'y_mod_rot': y_rot\n }\n return ydict", "title": "" }, { "docid": "570ae98d73e42debe105a21212a71271", "score": "0.5571824", "text": "def visualize(word, model):\n variance=np.array([np.diag(model.covars_[i]) for i in range(model.n_components)])\n figures = []\n for parm_idx in range(len(model.means_[0])):\n xmin = int(min(model.means_[:,parm_idx]) - max(variance[:,parm_idx]))\n xmax = int(max(model.means_[:,parm_idx]) + max(variance[:,parm_idx]))\n fig, axs = plt.subplots(model.n_components, sharex=True, sharey=False)\n colours = cm.rainbow(np.linspace(0, 1, model.n_components))\n for i, (ax, colour) in enumerate(zip(axs, colours)):\n x = np.linspace(xmin, xmax, 100)\n mu = model.means_[i,parm_idx]\n sigma = math.sqrt(np.diag(model.covars_[i])[parm_idx])\n ax.plot(x, mlab.normpdf(x, mu, sigma), c=colour)\n ax.set_title(\"{} feature {} hidden state #{}\".format(word, parm_idx, i))\n\n ax.grid(True)\n figures.append(plt)\n for p in figures:\n p.show()", "title": "" }, { "docid": "ea46075509fa54482dda6630f76686df", "score": "0.552548", "text": "def main():\n depths = np.tile(np.arange(3), 10)\n domains = np.repeat(np.arange(5), 6)\n categories = np.repeat(np.arange(10), 3)\n\n # choose amount of noise to prevent points from perfectly overlapping\n levels = [\n {\"name\": \"Domain\", \"level\": domains, \"seed\": 0, \"noise\": 0.3},\n {\"name\": \"Category\", \"level\": categories, \"seed\": 0, \"noise\": 0.15},\n {\"name\": \"Depth\", \"level\": depths, \"seed\": 0, \"noise\": 0.3},\n ]\n\n fig, axes = plt.subplots(\n figsize=(21, 14), ncols=3, nrows=2, gridspec_kw={\"hspace\": 0.5}\n )\n\n for level_idx, level in enumerate(levels):\n plot_RDM(axes[0, level_idx], level)\n plot_MDS(axes[1, level_idx], level, plot_dashed_lines=True)\n\n save_path = f\"{PATHS['figures']}/figure_5_hypothetical_rdm_mds.png\"\n plt.savefig(save_path, dpi=DPI, bbox_inches=\"tight\")\n plt.close(fig)", "title": "" }, { "docid": "88c047d5aa96a958c812c0caeaabb706", "score": "0.55189705", "text": "def makeSinleptonPlots(sel, lep, suffix, channel, is_MC=False):\n plots = []\n\n channelLabel = SingleLeptonChannelTitleLabel(channel)\n\n # PT plot #\n plots.append(Plot.make1D(\"%s_%s_lepton_pt\"%(channel,suffix), \n lep.pt, \n sel, \n EquidistantBinning(60,0.,300.),\n title=\"Transverse momentum of the lepton (channel %s)\"%channel, \n xTitle= \"P_{T} (lepton) [GeV]\",\n plotopts = channelLabel))\n\n # Eta plot #\n plots.append(Plot.make1D(\"%s_%s_lepton_eta\"%(channel,suffix), \n lep.eta, \n sel, \n EquidistantBinning(22, -3., 3.), \n title=\"Pseudorapidity of the lepton (channel %s)\"%channel, \n xTitle= \"#eta (lepton)\",\n plotopts = channelLabel))\n # PT-eta plots #\n #plots.append(Plot.make2D(\"%s_%s_lepton_ptVSeta\"%(channel,suffix), \n # [lep.pt, lep.eta],\n # sel, \n # [EquidistantBinning(60,0.,300.),EquidistantBinning(22, -3., 3.)],\n # xTitle= \"P_{T} (lepton) [GeV]\",\n # yTitle= \"#eta (lepton)\",\n # plotopts = channelLabel))\n # Phi plot #\n plots.append(Plot.make1D(\"%s_%s_lepton_phi\"%(channel,suffix), \n lep.phi, \n sel, \n EquidistantBinning(20, -3.2, 3.2), \n title=\"Azimutal angle of the lepton (channel %s)\"%channel, \n xTitle= \"#phi (lepton)\",\n plotopts = channelLabel))\n\n # GenPartFlav (if isMC) #\n #plots.append(Plot.make1D(\"%s_%s_lepton_genPartFlav\"%(channel,suffix), \n # lep.genPartFlav if is_MC else op.c_int(-1),\n # sel, \n # EquidistantBinning(23, -1., 22.), \n # title=\"Flavour of genParticle (channel %s)\"%channel, \n # xTitle= \"GenParticle flavour (lepton)\",\n # plotopts = channelLabel))\n\n return plots", "title": "" }, { "docid": "691bfb943fb00e0588aff57ff1740a03", "score": "0.5489733", "text": "def plot_s_parameters_script(model, **options):\n defaults = {\"noise\": None,\n \"s\": None,\n \"index\": None}\n plot_options = {}\n all_plots = True\n for key, value in defaults.iteritems():\n plot_options[key] = value\n for key, value in options.iteritems():\n plot_options[key] = value\n if isinstance(plot_options['index'], int):\n all_plots = False\n index = plot_options['index']\n\n def format(x, pos):\n return '1 - {:1.1f}e-4'.format(abs((x - 1)*10**4))\n\n def format2(x, pos):\n return '{:1.1f}e-6'.format(x*10**6)\n\n from matplotlib.ticker import FuncFormatter\n\n if all_plots:\n fig, axarr = plt.subplots(2, 2)\n axarr[0, 0].plot(simple_plot_script(model, index=0)[0]/10**9, simple_plot_script(model, index=0)[1], 'b')\n axarr[0, 0].plot(simple_plot_script(model, index=0)[0]/10**9, simple_plot_script(model, index=0)[2], 'r')\n axarr[0, 0].get_yaxis().set_major_formatter(FuncFormatter(format))\n axarr[0, 0].set_title('S11')\n\n axarr[0, 1].plot(simple_plot_script(model, index=1)[0]/10**9, simple_plot_script(model, index=1)[1], 'b')\n axarr[0, 1].plot(simple_plot_script(model, index=1)[0]/10**9, simple_plot_script(model, index=1)[2], 'r')\n axarr[0, 1].get_yaxis().set_major_formatter(FuncFormatter(format2))\n axarr[0, 1].set_title('S12')\n\n axarr[1, 0].plot(simple_plot_script(model, index=2)[0]/10**9, simple_plot_script(model, index=2)[1], 'b')\n axarr[1, 0].plot(simple_plot_script(model, index=2)[0]/10**9, simple_plot_script(model, index=2)[2], 'r')\n axarr[1, 0].get_yaxis().set_major_formatter(FuncFormatter(format2))\n axarr[1, 0].set_title('S21')\n\n axarr[1, 1].plot(simple_plot_script(model, index=3)[0]/10**9, simple_plot_script(model, index=3)[1], 'b')\n axarr[1, 1].plot(simple_plot_script(model, index=3)[0]/10**9, simple_plot_script(model, index=3)[2], 'r')\n axarr[1, 1].get_yaxis().set_major_formatter(FuncFormatter(format))\n axarr[1, 1].set_title('S22')\n\n fig.text(0.5, 0.008, 'Frequency [GHz]', ha='center')\n fig.text(0.008, 0.5, 'Magnitude', va='center', rotation='vertical')\n fig.suptitle(str(type(model).__name__) + \" S Parameters vs. Frequency\", fontsize=18)\n fig.tight_layout()\n fig.subplots_adjust(top=0.86)\n\n else:\n plt.plot(simple_plot_script(model, **options)[0]/10**9, simple_plot_script(model, **options)[1], 'b')\n plt.plot(simple_plot_script(model, **options)[0]/10**9, simple_plot_script(model, **options)[2], 'r')\n plt.ylabel('Magnitude')\n plt.xlabel('Frequency [GHz]')\n if index == 0:\n plt.title(str(type(model).__name__) + \" S11 vs. Frequency\")\n elif index == 1:\n plt.title(str(type(model).__name__) + \" S12 vs. Frequency\")\n elif index == 2:\n plt.title(str(type(model).__name__) + \" S21 vs. Frequency\")\n else:\n plt.title(str(type(model).__name__) + \" S22 vs. Frequency\")\n plt.show()", "title": "" }, { "docid": "2b339bbbf0b34054aa044fd8381ac92e", "score": "0.5484556", "text": "def plotme(data, b, sncolors, axtype=None, verbose=False):\n\n pl.figure(figsize=(20, 15))\n gs = gridspec.GridSpec(3, 2)\n gs.update(wspace=0.05)\n ax0 = pl.subplot(gs[:-1, :])\n ax2 = pl.subplot(gs[-1, 1])\n\n sourcedata = dict(\n id=[],\n type=[],\n x=[],\n y=[],\n yerr=[],\n colors=[],\n mask=[])\n\n sncount = 0\n snN = len(data['phase'])\n badcount = 0\n\n allSNe_mod = {'phase': [], 'mag': []}\n\n for i, tmp in enumerate(data['phase']):\n\n flag = False\n if len(tmp) == 0:\n continue\n # print \"tmp\", tmp\n # sorted phases order\n indx = np.argsort(tmp)\n\n corephases = (tmp > -5) * (tmp < 5)\n\n\n # Removing these due to no coverage of the peak\n if (b == 'U' and '54A' in data['name'][i]) \\\n or (b == 'V' and '54A' in data['name'][i]) \\\n or (b == 'U' and '03dh' in data['name'][i]):\n continue\n\n if corephases.sum() < 1:\n print(data['name'][i], b,\n \"has no datapoints between near 0. Moving on\")\n continue\n\n if data['name'][i] in ['03lw', '04dk', '04gt', '06fo',\n '07D', '13cq']:\n flag = True # continue\n if flag:\n badcount += 1\n # set offset to minimum mag first\n # magoffset is the index of the minimum (brightest) dp\n\n # if ('13dx' == data['name'][i] and b == 'i'):\n # magoffset = np.where(data['mag'][i][tmp > -2] ==\n # min(data['mag'][i][tmp > -2]))[0]\n # else:\n magoffset = np.where(data['mag'][i][corephases] ==\n min(data['mag'][i][corephases]))[0]\n\n # if more than one peak have min value (nearly impossible w floats) choose first\n if len(magoffset) > 1:\n tmpmo = magoffset[(data['mag'][i][magoffset] ==\n min(data['mag'][i][magoffset]))][0]\n magoffset = np.asarray([tmpmo])\n ## Commented by Somayeh\n # if '16gkg' in data['name'][i]:\n # magoffset = [0]\n\n\n\n # if the maximum is more than 3 days off from expected for this band\n # be suspicious and reset it if you can !\n\n if np.abs(tmp[magoffset] - snstuff.coffset[b]).any() > 3 \\\n and (np.abs(tmp - snstuff.coffset[b]) < 1).any():\n # we can add exceptions here\n if not (b == 'u' and '13dx' in data['name'][i]) \\\n and not (b == 'i' and '13dx' in data['name'][i]) \\\n and not (b == 'H' and '09iz' in data['name'][i]):\n magoffset = np.where(np.abs(tmp) == np.min(np.abs(tmp)))[0]\n\n if data['name'][i] == '03dh':\n magoffset = np.where(np.abs(tmp) == np.min(np.abs(tmp)))[0]\n\n # if more than one peak have min value (nearly impossible w floats) choose first\n if not isinstance(magoffset, int):\n if len(magoffset) > 1:\n tmpmo = magoffset[(data['mag'][i][magoffset] ==\n min(data['mag'][i][magoffset]))][0]\n magoffset = np.asarray([tmpmo])\n\n\n\n sncount += 1\n\n\n # set up key for hover tool: same name and type for all points\n sourcedata['id'] = sourcedata['id'] + [data['name'][i]] * len(indx)\n sourcedata['type'] = sourcedata['type'] + [data['type'][i]] * len(indx)\n if verbose: print('old epochs:', sourcedata['x'])\n if verbose: print('phases:', tmp)\n if verbose: print('peak:', list(tmp[indx]))\n if verbose: print(data['phase'][i][magoffset])\n sntp = data['type'][i]\n if not sntp in ['Ib', 'IIb', 'Ic', 'Ic-bl']:\n sntp = 'other'\n\n sourcedata['yerr'] = sourcedata['yerr'] + list(data['dmag'][i][indx])\n sourcedata['colors'] = sourcedata['colors'] + \\\n [rgb_to_hex(255. * sncolors[i])] * len(indx)\n maskhere = [False] * len(indx)\n\n # removing epochs <0 for dh03 due to GRB contamination\n if '03dh' in data['name'][i]:\n maskhere = np.array(maskhere)\n maskhere[tmp[indx] < 0] = True\n maskhere = maskhere.tolist()\n if ('06jc' in data['name'][i] and b in ['H', 'J', 'K']):\n maskhere = np.array(maskhere)\n maskhere[tmp[indx] > 30] = True\n maskhere = maskhere.tolist()\n\n # print sourcedata['colors']\n sourcedata['mask'] = sourcedata['mask'] + maskhere\n if not flag:\n ## Commented by Somayeh\n sourcedata['x'] = sourcedata['x'] + list(tmp[indx])\n # - data['phase'][i][magoffset])\n allSNe_mod['phase'].append(list(tmp[indx]))\n sourcedata['y'] = sourcedata['y'] + list(-(data['mag'][i][indx]\n - data['mag'][i][magoffset]))\n allSNe_mod['mag'].append(list(-(data['mag'][i][indx] - data['mag'][i][magoffset])))\n\n ax0.errorbar(tmp[indx],\n data['mag'][i][indx] - data['mag'][i][magoffset],\n yerr=data['dmag'][i][indx],\n fmt='-', color=sncolors[i],\n label=data['name'][i], alpha=0.5)\n ax2.errorbar(tmp[indx][~np.array(maskhere)],\n data['mag'][i][indx][~np.array(maskhere)] - \\\n data['mag'][i][magoffset],\n yerr=data['dmag'][i][indx][~np.array(maskhere)],\n fmt='.', color=sncolors[i],\n alpha=0.5)\n if axtype:\n axtype.errorbar(tmp[indx][~np.array(maskhere)],\n data['mag'][i][indx][~np.array(maskhere)] - \\\n data['mag'][i][magoffset],\n yerr=data['dmag'][i][indx][~np.array(maskhere)],\n fmt='.', color=colorTypes[sntp],\n alpha=0.5)\n axtype.set_ylim(axtype.get_ylim()[1], axtype.get_ylim()[0])\n else:\n sourcedata['x'] = sourcedata['x'] + list(tmp[indx])\n\n sourcedata['y'] = sourcedata['y'] + list(-(data['mag'][i][indx]\n - data['mag'][i].min()))\n ax0.errorbar(tmp[indx],\n data['mag'][i][indx] - data['mag'][i].min(),\n yerr=data['dmag'][i][indx],\n fmt='--', color=sncolors[i],\n label=data['name'][i], alpha=0.5)\n ax2.errorbar(tmp[indx][~np.array(maskhere)],\n data['mag'][i][indx][~np.array(maskhere)],\n yerr=data['dmag'][i][indx][~np.array(maskhere)],\n fmt='.', color=sncolors[i],\n alpha=0.5)\n\n ax0.set_title(b + \"(%d)\" % (sncount - badcount), fontsize=20)\n ax0.set_ylabel(\"relative magnitude\", fontsize=20)\n ax2.set_ylabel(\"relative magnitude\", fontsize=20)\n ax2.set_xlabel(\"phase (days since Vmax)\", fontsize=20)\n ax2.yaxis.tick_right()\n ax2.grid(True)\n ax0.grid(True)\n\n ax2.yaxis.set_label_position(\"right\")\n ax0.legend(framealpha=0.5, ncol=4, numpoints=1, prop={'size': 13})\n ax0.set_ylim(ax0.get_ylim()[1], ax0.get_ylim()[0])\n # ax0.set_xlim(20,80)\n # ax0.set_ylim(4,-2)\n\n ax2.set_ylim(ax2.get_ylim()[1], ax2.get_ylim()[0])\n ax2.set_xlim(-27, 105)\n\n sourcedata['x'] = np.asarray(sourcedata['x'])\n sourcedata['y'] = np.asarray(sourcedata['y'])\n sourcedata['yerr'] = np.asarray(sourcedata['yerr'])\n\n indx = ~(np.isnan(sourcedata['x']) * np.isnan(sourcedata['yerr']) *\n np.isnan(sourcedata['y']))\n\n sourcedata['x'] = np.asarray(sourcedata['x'])[indx]\n sourcedata['y'] = np.asarray(sourcedata['y'])[indx]\n sourcedata['yerr'] = np.asarray(sourcedata['yerr'])[indx]\n\n sourcedata['id'] = np.asarray(sourcedata['id'])[indx]\n sourcedata['type'] = np.asarray(sourcedata['type'])[indx]\n\n sourcedata['colors'] = np.asarray(sourcedata['colors'])[indx]\n sourcedata['mask'] = np.asarray(sourcedata['mask'])[indx]\n\n sncolordic = {}\n for i, k in enumerate(sourcedata['id']):\n sncolordic[k] = sourcedata['colors'][i]\n if not os.path.exists('outputs'):\n os.mkdir('outputs')\n\n pkl.dump(sncolordic, open('outputs/colorSNe.pkl', 'wb'))\n\n return allSNe_mod, sourcedata, (ax0, ax2, gs)", "title": "" }, { "docid": "33e876a1bc5f9a38484211922721011e", "score": "0.54594016", "text": "def msed_plots(pressure,temperature,mixing_ratio,altitude,h0_std=2000,ensemble_size=20,ent_rate=np.arange(0,2,0.05),\n entrain=False):\n p = pressure*units('mbar')\n T = temperature*units('degC')\n q = mixing_ratio*units('kilogram/kilogram')\n qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p)\n Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint \n Tp = mpcalc.parcel_profile(p,T[0],Td[0]).to('degC') # parcel profile\n \n # Altitude based on the hydrostatic eq.\n if len(altitude) == len(pressure): # (1) altitudes for whole levels\n altitude = altitude*units('meter')\n elif len(altitude) == 1: # (2) known altitude where the soundings was launched\n z_surf = altitude.copy()*units('meter'); # given altitude\n altitude = np.zeros((np.size(T)))*units('meter') \n for i in range(np.size(T)):\n altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) + z_surf # Hypsometric Eq. for height\n else: \n print('***NOTE***: the altitude at the surface is assumed 0 meter, and altitudes are derived based on the hypsometric equation')\n altitude = np.zeros((np.size(T)))*units('meter') # surface is 0 meter\n for i in range(np.size(T)):\n altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) # Hypsometric Eq. for height\n \n # Static energy calculations \n mse = mpcalc.moist_static_energy(altitude,T,q)\n mse_s = mpcalc.moist_static_energy(altitude,T,qs)\n dse = mpcalc.dry_static_energy(altitude,T)\n\n # Water vapor calculations\n p_PWtop = min(p)\n #p_PWtop = max(200*units.mbar, min(p) + 1*units.mbar) # integrating until 200mb \n cwv = mpcalc.precipitable_water(Td,p,top=p_PWtop) # column water vapor [mm]\n cwvs = mpcalc.precipitable_water(T,p,top=p_PWtop) # saturated column water vapor [mm]\n crh = (cwv/cwvs)*100. # column relative humidity [%]\n\n #================================================\n # plotting MSE vertical profiles\n fig = plt.figure(figsize=[12,8])\n ax = fig.add_axes([0.1,0.1,0.6,0.8])\n ax.plot(dse,p,'-k',linewidth=2)\n ax.plot(mse,p,'-b',linewidth=2)\n ax.plot(mse_s,p,'-r',linewidth=2)\n \n # mse based on different percentages of relative humidity\n qr = np.zeros((9,np.size(qs)))*units('kilogram/kilogram'); mse_r = qr*units('joule/kilogram')# container\n for i in range(9):\n qr[i,:] = qs*0.1*(i+1)\n mse_r[i,:] = mpcalc.moist_static_energy(altitude,T,qr[i,:])\n\n for i in range(9):\n ax.plot(mse_r[i,:],p[:],'-',color='grey',linewidth=0.7)\n ax.text(mse_r[i,3].magnitude/1000-1,p[3].magnitude,str((i+1)*10))\n \n # drawing LCL and LFC levels\n [lcl_pressure, lcl_temperature] = mpcalc.lcl(p[0], T[0], Td[0])\n lcl_idx = np.argmin(np.abs(p.magnitude - lcl_pressure.magnitude))\n \n [lfc_pressure, lfc_temperature] = mpcalc.lfc(p,T,Td)\n lfc_idx = np.argmin(np.abs(p.magnitude - lfc_pressure.magnitude))\n \n # conserved mse of air parcel arising from 1000 hpa \n mse_p = np.squeeze(np.ones((1,np.size(T)))*mse[0].magnitude)\n \n # illustration of CAPE\n el_pressure,el_temperature = mpcalc.el(p,T,Td) # equilibrium level\n el_idx = np.argmin(np.abs(p.magnitude - el_pressure.magnitude))\n ELps = [el_pressure.magnitude] # Initialize an array of EL pressures for detrainment profile\n \n [CAPE,CIN] = mpcalc.cape_cin(p[:el_idx],T[:el_idx],Td[:el_idx],Tp[:el_idx])\n\n ax.plot(mse_p,p,'g',linewidth=1.5)\n ax.fill_betweenx(p[lcl_idx:el_idx+1],mse_p[lcl_idx:el_idx+1],mse_s[lcl_idx:el_idx+1],interpolate=True\n ,color='green',alpha='0.3')\n\n ax.fill_betweenx(p,dse,mse,color='deepskyblue',alpha='0.5')\n ax.set_xlabel('Specific static energies: s, h, hs [kJ kg$^{-1}$]',fontsize=14)\n ax.set_ylabel('Pressure [hPa]',fontsize=14)\n ax.set_xticks([280,300,320,340,360,380])\n ax.set_xlim([280,390])\n ax.set_ylim(1030,120)\n \n if entrain is True:\n # Depict Entraining parcels\n # Parcel mass solves dM/dz = eps*M, solution is M = exp(eps*Z)\n # M=1 at ground without loss of generality\n \n # Distribution of surface parcel h offsets\n h0offsets = np.sort(np.random.normal(0, h0_std, ensemble_size))*units('joule/kilogram')\n # Distribution of entrainment rates \n entrainment_rates = ent_rate /(units('km'))\n \n for h0offset in h0offsets:\n \n h4ent = mse.copy(); h4ent[0] += h0offset;\n \n for eps in entrainment_rates: \n \n hent = h4ent.copy()\n delz = np.gradient(altitude)*units('meter')\n \n for iz in range(1,len(altitude[1:])):\n hent[iz] = hent[iz-1] + eps*delz[iz-1]*( mse[iz-1]-hent[iz-1] ) \n \n # Boolean for positive buoyancy, and its topmost altitude (index) where curve is clippes\n posboy = (hent > mse_s); posboy[0] = True # so there is always a detrainment level\n \n # defining the first EL by posboy as the detrainment layer, switching from positive buoyancy to\n # negative buoyancy (0 to 1) and skipping the surface\n ELindex_ent = 0\n for idx in range(len(posboy)-1):\n if posboy[idx+1] == 0 and posboy[idx] == 1 and idx > 0: \n ELindex_ent = idx;break\n \n # Plot the curve \n plt.plot( hent[0:ELindex_ent+2], p[0:ELindex_ent+2], linewidth=0.6, color='g')\n #plt.plot( hent[0:], p[0:], linewidth=0.6, color='g')\n # Keep a list for a histogram plot (detrainment profile) \n if p[ELindex_ent].magnitude < lfc_pressure.magnitude: # buoyant parcels only\n ELps.append( p[ELindex_ent].magnitude )\n \n # Plot a crude histogram of parcel detrainment levels\n NBINS = 20\n pbins = np.linspace(1000,150,num=NBINS) # pbins for detrainment levels\n hist = np.zeros((len(pbins)-1))\n for x in ELps:\n for i in range(len(pbins)-1):\n if (x < pbins[i]) & (x >= pbins[i+1]):\n hist[i] += 1;break\n \n det_per = hist/sum(hist)*100; # percentages of detrainment ensumbles at levels\n \n ax2 = fig.add_axes([0.705,0.1,0.1,0.8],facecolor=None)\n ax2.barh( pbins[1:], det_per, color='lightgrey',edgecolor='k',height=15*(20/NBINS))\n ax2.set_xlim([0,100])\n ax2.set_xticks([0,20,40,60,80,100])\n ax2.set_ylim([1030,120])\n ax2.set_xlabel('Detrainment [%]')\n ax2.grid()\n ax2.set_zorder(2)\n\n ax.plot( [400,400], [1100,0])\n ax.annotate('Detrainment', xy=(362,320), color='dimgrey')\n ax.annotate('ensemble: ' + str(ensemble_size*len(entrainment_rates)), xy=(364, 340), color='dimgrey')\n ax.annotate('Detrainment', xy=(362,380), color='dimgrey')\n ax.annotate(' scale: 0 - 2 km', xy=(365,400), color='dimgrey')\n \n # Overplots on the mess: undilute parcel and CAPE, etc. \n ax.plot( (1,1)*mse[0], (1,0)*(p[0]), color='g',linewidth=2)\n\n # Replot the sounding on top of all that mess\n ax.plot(mse_s , p, color='r', linewidth=1.5) \n ax.plot(mse , p, color='b', linewidth=1.5) \n\n # label LCL and LCF\n ax.plot((mse_s[lcl_idx]+(-2000,2000)*units('joule/kilogram')), lcl_pressure+(0,0)*units('mbar') ,color='orange',linewidth=3)\n ax.plot((mse_s[lfc_idx]+(-2000,2000)*units('joule/kilogram')), lfc_pressure+(0,0)*units('mbar') , color='magenta',linewidth=3)\n \n \n ### Internal waves (100m adiabatic displacements, assumed adiabatic: conserves s, sv, h). \n #dZ = 100 *mpunits.units.meter\n dp = 1000*units.pascal\n \n # depict displacements at sounding levels nearest these target levels\n targetlevels = [900,800,700,600,500,400,300,200]*units.hPa\n for ilev in targetlevels:\n idx = np.argmin(np.abs(p - ilev))\n\n # dp: hydrostatic\n rho = (p[idx])/Rd/(T[idx])\n dZ = -dp/rho/g\n\n # dT: Dry lapse rate dT/dz_dry is -g/Cp\n dT = (-g/Cp_d *dZ).to('kelvin') \n Tdisp = T[idx].to('kelvin') + dT\n\n # dhsat\n dqs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Tdisp) ,p[idx]+dp) - qs[idx]\n dhs = g*dZ + Cp_d*dT + Lv*dqs\n\n # Whiskers on the data plots\n ax.plot( (mse_s[idx]+dhs*(-1,1)), p[idx]+dp*(-1,1), linewidth=3, color='r') \n ax.plot( (dse[idx] *( 1,1)), p[idx]+dp*(-1,1), linewidth=3, color='k') \n ax.plot( (mse[idx] *( 1,1)), p[idx]+dp*(-1,1), linewidth=3, color='b') \n\n # annotation to explain it \n if ilev == 400*ilev.units:\n ax.plot(360*mse_s.units +dhs*(-1,1)/1000, 440*units('mbar')\n +dp*(-1,1), linewidth=3, color='r') \n ax.annotate('+/- 10mb', xy=(362,440), fontsize=8)\n ax.annotate(' adiabatic displacement', xy=(362,460), fontsize=8)\n \n # Plot a crude histogram of parcel detrainment levels\n # Text parts\n ax.text(290,pressure[3],'RH (%)',fontsize=11,color='k')\n ax.text(285,200,'CAPE = '+str(np.around(CAPE.magnitude,decimals=2))+' [J/kg]',fontsize=12,color='green')\n ax.text(285,250,'CIN = '+str(np.around(CIN.magnitude,decimals=2))+' [J/kg]',fontsize=12,color='green')\n ax.text(285,300,'LCL = '+str(np.around(lcl_pressure.magnitude,decimals=2))+' [hpa]',fontsize=12,color='darkorange')\n ax.text(285,350,'LFC = '+str(np.around(lfc_pressure.magnitude,decimals=2))+' [hpa]',fontsize=12,color='magenta')\n ax.text(285,400,'CWV = '+str(np.around(cwv.magnitude,decimals=2))+' [mm]',fontsize=12,color='deepskyblue')\n ax.text(285,450,'CRH = '+str(np.around(crh.magnitude,decimals=2))+' [%]',fontsize=12,color='blue')\n ax.legend(['DSE','MSE','SMSE'],fontsize=12,loc=1)\n \n ax.set_zorder(3)\n \n return (ax)", "title": "" }, { "docid": "14e6050e37ff5d33d0b79a3dc5d162e0", "score": "0.5393871", "text": "def setup_msdview(ax): # pragma: no cover\n ax.plot([0], color=\"#34a5daff\")\n ax.set_ylabel(\"MSD/m$^2$\", fontsize=16)\n ax.set_xlabel(\"Time/s\", fontsize=16)", "title": "" }, { "docid": "d49663f5f6c5bf7bf2b8e387eed643d8", "score": "0.5384135", "text": "def plot_tsne(embedding, labels, phase=\"train\"):\n X_tsne = TSNE(n_components=2).fit_transform(embedding)\n tsne_x = X_tsne[:, 0]\n tsne_y = X_tsne[:, 1]\n\n tsne_x = sort_together([labels,tsne_x])[1]\n tsne_y = sort_together([labels,tsne_y])[1]\n labels = sort_together([labels,labels])[1]\n \n sym = [0, 1, 4, 24, 5, 3, 17, 13, 26, 20]\n classes = {\n 0: \"plane\",\n 1: \"car\",\n 2: \"bird\",\n 3: \"cat\",\n 4: \"deer\",\n 5: \"dog\",\n 6: \"frog\",\n 7: \"horse\",\n 8: \"ship\",\n 9: \"truck\",\n }\n\n class_label = [classes[i] for i in labels]\n\n df = pd.DataFrame(\n list(zip(tsne_x, tsne_y, class_label)), columns=[\"x\", \"y\", \"Class\"]\n )\n\n fig = px.scatter(\n df,\n x=\"x\",\n y=\"y\",\n color=\"Class\",\n symbol=\"Class\",\n symbol_sequence=sym,\n hover_name=class_label,\n labels={\"color\": \"Class\"},\n )\n\n if g.wandb_log:\n if phase == \"train\":\n wandb.log({\"t-SNE\": fig, \"epoch\": g.epoch_global})\n elif phase == \"val\":\n wandb.log({\"t-SNE Eval\": fig, \"epoch\": g.epoch_global})\n elif phase == \"test\":\n wandb.log({\"t-SNE Test\": fig, \"epoch\": g.epoch_global})\n else:\n raise Exception(\"Invalid data split!!\")\n \n if g.log_offline:\n if phase == \"train\":\n fig.write_image(f\"{g.log_dir}/metrics/tsne.png\")\n elif phase == \"val\":\n fig.write_image(f\"{g.log_dir}/metrics/tsneEval.png\")\n elif phase == \"test\": \n fig.write_image(f\"{g.log_dir}/metrics/tsneTest.png\")\n else:\n raise Exception(\"Invalid data split!!\")", "title": "" }, { "docid": "e0f24e366293c782e96d667c99d387e7", "score": "0.53732365", "text": "def plotTSE(h, dim=10, nTriples=10, nThetas=100, R=1, onSimplex=True, \r\n randseed=-1):\r\n \r\n if randseed >= 0:\r\n np.random.seed(randseed)\r\n \r\n plt.figure()\r\n \r\n for k in range(nTriples):\r\n x = R * np.random.rand(dim)\r\n y = R * np.random.rand(dim)\r\n z = R * np.random.rand(dim)\r\n if onSimplex:\r\n x = x / x.sum()\r\n y = y / y.sum()\r\n z = z / z.sum()\r\n \r\n theta = np.arange(1.0/nThetas, 1, 1.0/nThetas)\r\n expnt = np.zeros(theta.shape)\r\n dyz = h.divergence(y, z)\r\n\r\n for i in range(theta.size):\r\n c = theta[i]\r\n dtheta = h.divergence((1-c)*x+c*y, (1-c)*x+c*z)\r\n expnt[i] = np.log(dtheta / dyz) / np.log(c)\r\n #expnt[i] = (np.log(dtheta) - np.log(dyz)) / np.log(c)\r\n plt.plot(theta, expnt)\r\n\r\n plt.xlim([0,1])\r\n #plt.ylim([0,5])\r\n #plt.xlabel(r'$\\theta$')\r\n #plt.ylabel(r'$\\hat{\\gamma}(\\theta)$')\r\n plt.tight_layout()", "title": "" }, { "docid": "bfc01c00ec59518de81b8ed91e80a469", "score": "0.5372259", "text": "def run(self):\n print(\"Plot Seismograms\".center(80, '-'))\n\n if not self.plot_vel and not self.plot_acc:\n # Nothing needs to be plotted\n return\n install = InstallCfg.getInstance()\n sim_id = self.sim_id\n\n a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))\n a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))\n\n a_statlist = os.path.join(a_indir, self.r_stations)\n slo = StationList(a_statlist)\n site_list = slo.getStationList()\n\n # Get fault information, if available\n if self.src_keys is not None:\n origin = (self.src_keys['lon_top_center'],\n self.src_keys['lat_top_center'])\n dims = (self.src_keys['fault_length'], self.src_keys['dlen'],\n self.src_keys['fault_width'], self.src_keys['dwid'],\n self.src_keys['depth_to_top'])\n mech = (self.src_keys['strike'], self.src_keys['dip'],\n self.src_keys['rake'])\n\n for site in site_list:\n print(\"==> Plotting station: %s\" % (site.scode))\n # Calculate Rrup\n rrup = None\n if self.src_keys is not None:\n site_geom = [float(site.lon), float(site.lat), 0.0]\n (fault_trace1, up_seis_depth,\n low_seis_depth, ave_dip,\n dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech)\n _, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom,\n fault_trace1,\n up_seis_depth,\n low_seis_depth,\n ave_dip)\n\n # Check if we need to plot velocity seismograms\n if self.plot_vel:\n print(\"===> Plotting velocity...\")\n filename = os.path.join(a_outdir, \"%d.%s.vel.bbp\" %\n (sim_id, site.scode))\n outfile = os.path.join(a_outdir, \"%d.%s_velocity_seis.png\" %\n (sim_id, site.scode))\n plot_seismograms.plot_seis(site.scode, filename, sim_id,\n 'vel', outfile,\n rrup=rrup)\n # Check if we need to plot acceleration seismograms\n if self.plot_acc:\n print(\"===> Plotting acceleration...\")\n filename = os.path.join(a_outdir, \"%d.%s.acc.bbp\" %\n (sim_id, site.scode))\n outfile = os.path.join(a_outdir, \"%d.%s_acceleration_seis.png\" %\n (sim_id, site.scode))\n plot_seismograms.plot_seis(site.scode, filename, sim_id,\n 'acc', outfile,\n rrup=rrup)\n\n print(\"Plot Seismograms Completed\".center(80, '-'))", "title": "" }, { "docid": "5aa809c16a2178aff0f5ca95e37c08c4", "score": "0.5365298", "text": "def plot_pystan(data, fit, ncomp=None, model_name=None, linear=True, wavelength=True, logplot=True, label=None):\n \n params = fit.get_posterior_mean().mean(axis=1)[:-1]\n flatnames = fit.flatnames\n \n if model_name is None:\n model_name = fit.model_name\n model_name = model_name[0:model_name.rfind(\"_\")]\n \n if ncomp is None:\n ncomp = fit.data[\"N_comp\"]\n\n if model_name in (\"greybody\", \"greybody_beta2\"):\n assert ncomp in (1,2)\n paramorder = [\"amplitude[0]\", \"beta[0]\", \"T[0]\"]\n if ncomp == 1:\n mod = model.submmModel1_normalized_logA if not linear else model.submmModel1_normalized\n elif ncomp == 2:\n mod = model.submmModel2_normalized_logA if not linear else model.submmModel2_normalized\n paramorder += [\"amplitude[1]\", \"beta[1]\", \"T[1]\"]\n elif model_name == \"greybody_thick\":\n assert ncomp==1\n mod = model.submmModel1_opticallythick_logA if not linear else model.submmModel1_opticallythick\n paramorder = [\"amplitude[0]\", \"beta[0]\", \"T[0]\", \"nu0[0]\"]\n\n if model_name == \"greybody_beta2\":\n ### add in beta=2 fixed parameters\n params = params.tolist()\n for i in range(ncomp):\n flatnames.append(\"beta[%d]\" % i)\n params.append(2.0)\n params = np.array(params)\n\n assert len(params)==len(paramorder)\n assert len(params)==len(flatnames)\n flatnames = np.array(flatnames)\n\n ### package the params for my MCMC model code (duplicates stuff done in stan code...)\n MPparams = [params[flatnames==p] for p in paramorder]\n\n MPmod = mod(*MPparams)\n MPmod.plot(data, wavelength=wavelength, logplot=logplot, label=label)", "title": "" }, { "docid": "c105ef99290a245dc635b5808ec70c00", "score": "0.53536046", "text": "def plot_svga(self, m, filename):\n\n f = plt.figure(figsize=(12,6))\n a1 = f.add_axes([0.05, 0.05, 0.9, 0.6])\n a2 = f.add_axes([0.05, 0.7, 0.9, 0.1])\n a3 = f.add_axes([0.05, 0.85, 0.9, 0.1])\n\n xx = np.linspace(m.X.read_value().min(), m.X.read_value().max(), 200).reshape(-1,1)\n mu, var = m.predict_f(xx)\n mu, var = mu.copy(), var.copy()\n p, _ = m.predict_y(xx)\n\n a3.set_xticks([])\n a3.set_yticks([])\n\n a3.set_xticks([])\n a3.set_yticks([])\n\n i=0\n x = m.X.read_value()[m.Y.read_value().flatten()==i]\n points, = a3.plot(x, x*0, '.')\n color=points.get_color()\n a1.plot(xx, mu[:,i], color=color, lw=2)\n a1.plot(xx, mu[:,i] + 2*np.sqrt(var[:,i]), '--', color=color)\n a1.plot(xx, mu[:,i] - 2*np.sqrt(var[:,i]), '--', color=color)\n a2.plot(xx, p[:,i], '-', color=color, lw=2)\n\n a2.set_ylim(-0.1, 1.1)\n a2.set_yticks([0, 1])\n a2.set_xticks([])\n\n self._save(plt, filename)", "title": "" }, { "docid": "a7b6f6e50898a2fc4768f8a48b1049b1", "score": "0.5327628", "text": "def plot(msd):\n #makes the plot for the msd and the mean of the msd\n try:\n for i in range(len(msd.T)):\n plt.plot(msd.T[i])\n plt.savefig(\"msd.png\")\n except: pass\n\n plt.figure(dpi = 200)\n plt.plot(np.mean(msd))\n plt.savefig(\"MSD.png\")", "title": "" }, { "docid": "b5c0fe54d77654c9b8646920e910aa54", "score": "0.5326216", "text": "def plotSeismogramInteractFixMod(wavf, wavA):\n\n d = [0.0, 50.0, 100.0] # Position of top of each layer (m)\n v = [500.0, 1000.0, 1500.0] # Velocity of each layer (m/s)\n rho = [2000.0, 2300.0, 2300.0] # Density of each layer (kg/m^3)\n wavf = np.array(wavf, dtype=float)\n usingT = True\n plotSeismogram(d, rho, v, wavf, wavA, 0.0, usingT)", "title": "" }, { "docid": "408fc3037863380fa475c82d41fcdd88", "score": "0.53138715", "text": "def plot_compare_rmse_smirks(agg_data, fnm='compare_optgeo_rmse.pdf'):\n xlabels = {\n 'bonds': 'Bond Length RMSE (Angstrom)',\n 'angles': 'Bond Angles RMSE (Degrees)',\n 'propertorsions': 'Torsion Angles RMSE (Degrees)',\n 'impropertorsions': 'Improper Torsion Angles RMSE (Degrees)'\n }\n with PdfPages(fnm) as pdf:\n for fftype, sid_data_list in agg_data.items():\n sid_list = [d['sid'] for d in sid_data_list]\n orig_rmse_array = np.array([d['orig_rmse'] for d in sid_data_list])\n new_rmse_array = np.array([d['new_rmse'] for d in sid_data_list])\n rmse_changes = new_rmse_array - orig_rmse_array\n # make plot\n n = len(sid_list)\n y_pos = np.arange(n)\n plt.figure(figsize=(8.5, n*0.12+1.2))\n # plot the initial rmse\n # plt.barh(y_pos, orig_rmse_array, tick_label=sid_list, height=0.8, color='C0', align='center')\n plt.scatter(orig_rmse_array, y_pos, marker='o', s=80, facecolors='none', edgecolors='grey')\n plt.yticks(y_pos, sid_list)\n # plot the changes in different colors\n increase_idxs = np.nonzero(rmse_changes >=0)[0]\n decrease_idxs = np.nonzero(rmse_changes <0)[0]\n xmin = 0\n xmax = max(orig_rmse_array.max(), new_rmse_array.max())\n head_length = 0.01*(xmax-xmin)\n for i in increase_idxs:\n if abs(rmse_changes[i]) > head_length:\n plt.arrow(orig_rmse_array[i], y_pos[i], rmse_changes[i], 0.0, head_width=0.4, head_length=head_length, length_includes_head=True, width=0.05, color='C3')\n for i in decrease_idxs:\n if abs(rmse_changes[i]) > head_length:\n plt.arrow(orig_rmse_array[i], y_pos[i], rmse_changes[i], 0.0, head_width=0.4, head_length=head_length, length_includes_head=True, width=0.05, color='C2')\n # Compute some metrics for a table\n print(\"Parameter type: %s\" % fftype)\n print(\"Total number of parameters: %i\" % len(sid_data_list))\n print(\"Parameters improved: %i/%i (%.1f%%)\" % (len(decrease_idxs), len(sid_data_list), 100.0*len(decrease_idxs)/len(sid_data_list)))\n print(\"Average RMSE change: %.4f -> %.4f (%.4f, %.1f%%)\" % (np.mean(orig_rmse_array), np.mean(orig_rmse_array+rmse_changes), np.mean(rmse_changes), 100.0*np.mean(rmse_changes)/np.mean(orig_rmse_array)))\n # plt.barh(y_pos[increase_idxs], rmse_changes[increase_idxs], left=orig_rmse_array[increase_idxs], height=0.6, color='C3', align='center')\n # plt.barh(y_pos[decrease_idxs], rmse_changes[decrease_idxs], left=orig_rmse_array[decrease_idxs], height=0.6, color='C2', align='center')\n # adjust the y range, and invert the yaxis\n #plt.ylim(y_pos[0]-1, y_pos[-1]+1)\n plt.ylim(y_pos[-1]+1, y_pos[0]-1)\n # adjust the x range\n padding = (xmax - xmin) * 0.01\n plt.xlim(xmin, xmax+padding)\n plt.xlabel(xlabels[fftype.lower()])\n # save\n plt.title(f'RMSE comparison for {fftype}')\n plt.tight_layout()\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n print(f\"RMSE compare plots saved as {fnm}\")", "title": "" }, { "docid": "47e4c5171a9a0c4876d1f2fcd41bec56", "score": "0.5312829", "text": "def splot(y, y0, yd, title=\"Denoising\"):\n fig = plt.figure(figsize=(20, 12))\n _y0 = y0[:2000]\n _y = y[:2000]\n _yd = yd[:2000]\n plt.subplot(221)\n plt.plot(_y0)\n plt.title('Raw signal :')\n plt.subplot(222)\n plt.plot(_y)\n plt.title('Noised signal')\n# plt.plot(utils.gaussian_filter(y, mu))\n# plt.title('Result for the gaussian filter - SNR :' + str(utils.snr(y0, utils.gaussian_filter(y, mu))))\n plt.subplot(223)\n plt.plot(_yd, \"r\")\n plt.plot(_y0, linewidth=2.5, alpha=0.3)\n plt.title('Denoised signal - SNR : %0.2f dB' % utils.snr(y0, yd))\n plt.subplot(224)\n plt.plot(_y0 - _yd)\n plt.title('Differences between raw and denoised signal :')\n fig.suptitle(title, fontsize=30, fontweight=\"bold\")", "title": "" }, { "docid": "80e10dda693e07482591c81293be79b0", "score": "0.5305018", "text": "def plot_KS_and_RMSE_alpha(x, y, KS, LE1, Fs, Gs, colors, obs_type):", "title": "" }, { "docid": "535ce061c8f704a9529c4946cc28cbcb", "score": "0.52887535", "text": "def process_and_save_fantasia(plot=False, signal_dim=64):\n\n signals_without_noise = []\n signals_noise = []\n SNR = []\n full_paths = get_fantasia_full_paths(db.fantasia_ecgs[0].directory, list(range(1,21)))\n for file_path in full_paths:\n # try:\n print(\"Pre-processing signal - \" + file_path)\n signal = sio.loadmat(file_path)['val'][0][:1256]\n # plt.plot(signal)\n processed = process_dnn_signal(signal, signal_dim)\n signals_without_noise.append(processed)\n if plot:\n plt.plot(processed)\n plt.show()\n plt.plot(signal[1000:5000])\n fig, ax = plt.subplots()\n major_ticks = np.arange(0, 64)\n ax.set_yticks(major_ticks)\n plt.ylim([0, 15])\n plt.xlim([0, 140])\n ax.grid(True, which='both')\n plt.minorticks_on\n # ax.grid(which=\"minor\", color='k')\n ax.set_ylabel('Class - k')\n ax.set_xlabel('Sample - n')\n plt.plot(signalx, label=\"Smoothed Signal\", alpha=0.4)\n plt.plot(processed, label=\"Discretized Signal\")\n # ticklines = ax.get_xticklines() + ax.get_yticklines()\n gridlines = ax.get_ygridlines() # + ax.get_ygridlines()\n ticklabels = ax.get_xticklabels() + ax.get_yticklabels()\n\n for line in gridlines:\n line.set_color('k')\n line.set_linestyle('-')\n line.set_linewidth(1)\n line.set_alpha(0.2)\n\n for label in ticklabels:\n label.set_color('r')\n label.set_fontsize('medium')\n plt.legend()\n\n plt.show()\n\n signalx = smooth(remove_moving_std(remove_moving_avg((signal - np.mean(signal)) / np.std(signal))))\n\n print(\"Saving signals...\")\n np.savez(\"signals_without_noise.npz\", signals_without_noise=signals_without_noise)", "title": "" }, { "docid": "8005fc11e45a2a57e68e6132562ca5e8", "score": "0.5285624", "text": "def task_full2_plot(depends_on, produces):\r\n fig, ax = plt.subplots()\r\n fig.suptitle(\"Another showcase of Lhd algorithm\")\r\n ax.set_xlim(0, 1)\r\n ax.set_ylim(0, 1)\r\n ax.set_ylabel(\"$F(x_2)$\")\r\n ax.set_xlabel(\"$F(x_1)$\")\r\n\r\n full = pickle.load(open(depends_on, \"rb\"))\r\n sns.regplot(\r\n x=full[:, 0],\r\n y=full[:, 1],\r\n ax=ax,\r\n fit_reg=False,\r\n color=\"darkblue\",\r\n scatter_kws={\"alpha\": 0.4},\r\n )\r\n\r\n n = len(full[:, 0])\r\n for i in np.arange(0, 1, 1 / n):\r\n plt.axhline(i)\r\n plt.axvline(i)\r\n\r\n plt.savefig(produces)", "title": "" }, { "docid": "019ebb47b93e9543591b4a7ba93ec742", "score": "0.5279271", "text": "def main():\n n_samples = 100\n n_features = 2\n n_classes = 3\n\n # generate toy dataset of 2D SPD matrices\n dataset_generator = geomstats.datasets.sample_sdp_2d.DatasetSPD2D(\n n_samples, n_features, n_classes)\n data, labels = dataset_generator.generate_sample_dataset()\n\n # plot dataset as ellipses\n ellipsis = visualization.Ellipsis2D()\n for i in range(n_samples):\n x = data[i]\n y = geomstats.datasets.sample_sdp_2d.get_label_at_index(i, labels)\n ellipsis.draw(x, color=ellipsis.colors[y], alpha=.1)\n\n # define and fit MDM classifier to data\n metric = SPDMetricAffine(n=n_features)\n MDMEstimator = RiemannianMinimumDistanceToMeanClassifier(\n metric, n_classes, point_type='matrix')\n MDMEstimator.fit(data, labels)\n\n # plot Frechet means computed in the MDM\n for i in range(n_classes):\n ellipsis.draw(\n MDMEstimator.mean_estimates_[i],\n color=ellipsis.colors_alt[i],\n linewidth=5,\n label='Barycenter of class ' + str(i))\n\n # generate random test samples, and predict with MDM classifier\n data_test = SPDMatrices(n=n_features).random_uniform(n_samples=3)\n predictions = MDMEstimator.predict(data_test)\n\n for i in range(data_test.shape[0]):\n c = list(predictions[i] == 1).index(True)\n x_from, y_from = ellipsis.draw(\n data_test[i], color=ellipsis.colors[c], linewidth=5)\n _, _, x_to, y_to = ellipsis.compute_coordinates(\n MDMEstimator.mean_estimates_[c])\n arrow = visualization.DataArrow(ellipsis.fig)\n arrow.draw(x_from, y_from, x_to, y_to)\n\n ellipsis.fig.axes[0].set_title(\n 'Example plot of the MDM classifier in dimension 2\\n'\n '3-class fit and 3 test sample prediction\\n'\n '(black arrows denote assignement)')\n ellipsis.plot()", "title": "" }, { "docid": "118d6021a458371c9e638882fd12888b", "score": "0.5277625", "text": "def getstemplot(self):\n oddgrid = False\n if len(self.signals) % 2 != 0 and np.sqrt(len(self.signals)) - int(np.sqrt(len(self.signals))) != 0:\n # adds another signal to the list if the list has an odd number of signals\n self.signals.append(0 * self.samples)\n self.coefficients.append(0)\n self.frequencies.append(0)\n oddgrid = True\n\n count = 0\n kcount = 0\n freqcount = 0\n flag = False\n size = len(self.signals)\n factors = []\n pltloc1 = 0\n pltloc2 = 0\n\n for i in range(1, size + 1):\n if size % i == 0:\n factors.append(i)\n # finds the factor of the size of the signals list\n\n if np.sqrt(len(self.signals)) - int(np.sqrt(len(self.signals))) == 0:\n pltloc1 = np.sqrt(size)\n pltloc2 = np.sqrt(size)\n # takes care of perfect square condition\n elif len(factors) % 2 == 0:\n pltloc1 = factors[int(len(factors) / 2)]\n pltloc2 = factors[int(len(factors) / 2 - 1)]\n # since the list of factors is sorted and the length of the list is odd, grab the\n # two number that would make up the median and set them as 2D array indices for subplots\n\n fig, plot = plt.subplots(int(pltloc2), int(pltloc1))\n # creates subplots\n fig.suptitle('Stem Plot of Signals')\n # adds title to figure\n for x in range(0, int(pltloc2)):\n for y in range(0, int(pltloc1)):\n # iterates through all subplots to fill with signals\n color = \"#{:06X}\".format(random.randint(0, 0xFFFFFF))\n # generates a random hex color code. Could be made into its own class\n if isinstance(self.signals[count], list):\n # runs this if signal is a unit sample sequence\n label = ''\n for z in range(0, len(self.coefficients[count]), 1):\n # generates a title/label\n if self.coefficients[count][z] != 0:\n if flag is False:\n if self.coefficients[count][z] > 0 and self.constantsK[kcount][z] >= 0:\n label += str(self.coefficients[count][z]) + '\\u03B4[n+' + str(self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] < 0 and self.constantsK[kcount][z] <= 0:\n label += str(self.coefficients[count][z]) + '\\u03B4[n-' + str(-self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] > 0 and self.constantsK[kcount][z] <= 0:\n label += str(self.coefficients[count][z]) + '\\u03B4[n-' + str(-self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] < 0 and self.constantsK[kcount][z] >= 0:\n label += str(self.coefficients[count][z]) + '\\u03B4[n-' + str(-self.constantsK[kcount][z]) + ']'\n flag = True\n\n elif flag is True:\n if self.coefficients[count][z] > 0 and self.constantsK[kcount][z] >= 0:\n label += '+' + str(self.coefficients[count][z]) + '\\u03B4[n+' + str(self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] < 0 and self.constantsK[kcount][z] <= 0:\n label += '-' + str(-self.coefficients[count][z]) + '\\u03B4[n-' + str(-self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] > 0 and self.constantsK[kcount][z] <= 0:\n label += '+' + str(self.coefficients[count][z]) + '\\u03B4[n-' + str(-self.constantsK[kcount][z]) + ']'\n elif self.coefficients[count][z] < 0 and self.constantsK[kcount][z] >= 0:\n label += '-' + str(-self.coefficients[count][z]) + '\\u03B4[n-' + str(self.constantsK[kcount][z]) + ']'\n\n if pltloc1 == 2 and pltloc2 == 1:\n plot[y].stem(self.signals[count][1], self.signals[count][0], color)\n # grabs the correct signal with count then plots constantsK [1] for x samples\n # and coefficients [0] for height of stem\n plot[y].set_xticks(range(min(self.signals[count][1]) - 2, max(self.signals[count][1]) + 3, 2))\n # adds xticks to each subplot\n plot[y].xaxis.set_minor_locator(AutoMinorLocator(n=2))\n # adds ticks in between numbered ticks from above\n else:\n plot[x, y].stem(self.signals[count][1], self.signals[count][0], color)\n # grabs the correct signal with count then plots constantsK [1] for x samples\n # and coefficients [0] for height of stem\n plot[x, y].set_xticks(range(min(self.signals[count][1]) - 2, max(self.signals[count][1]) + 3, 2))\n # adds xticks to each subplot\n plot[x, y].xaxis.set_minor_locator(AutoMinorLocator(n=2))\n # adds ticks in between numbered ticks from above\n flag = False\n kcount += 1\n else:\n # runs this if signal is a complex sequence\n label = '{} * cos({} * \\u03C0 * n)'.format(self.coefficients[count], self.frequencies[freqcount])\n if pltloc1 == 2 and pltloc2 == 1:\n plot[y].stem(self.samples, self.signals[count], color, basefmt=color)\n plot[y].set_xticks(range(self.samples[0], self.samples[-1] + 1, 2))\n # adds xticks to each subplot\n plot[y].xaxis.set_minor_locator(AutoMinorLocator(n=2))\n # adds ticks in between numbered ticks from above\n else:\n plot[x, y].stem(self.samples, self.signals[count], color, basefmt=color)\n # basefmt is used here to configure the legend\n plot[x, y].set_xticks(range(self.samples[0], self.samples[-1] + 1, 2))\n # adds xticks to each subplot\n plot[x, y].xaxis.set_minor_locator(AutoMinorLocator(n=2))\n # adds ticks in between numbered ticks from above\n freqcount += 1\n\n if pltloc1 == 2 and pltloc2 == 1:\n plot[y].set_title(label)\n # adds title to each subplot\n plot[y].grid()\n plot[y].legend(loc=\"upper right\")\n plot[y].axhline(y=0, color='r')\n # adds horizontal line to the x axis on each subplot\n plot[y].set_xlabel('n')\n plot[y].set_ylabel(self.labels[count])\n else:\n plot[x, y].set_title(label)\n # adds title to each subplot\n plot[x, y].grid()\n plot[x, y].legend(loc=\"upper right\")\n plot[x, y].axhline(y=0, color='r')\n # adds horizontal line to the x axis on each subplot\n plot[x, y].set_xlabel('n')\n plot[x, y].set_ylabel(self.labels[count])\n count += 1\n # count is increased to correctly access lists of signals\n if oddgrid is True:\n fig.delaxes(plot[-1, -1])\n #plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "456bc242a4c93dbac7a22e1d37e0a422", "score": "0.5269483", "text": "def plotSignificantRegions(phenotypeIndex,runId=\"\",statId=\"\",window=[50000,50000],res_path=\"/Network/Data/250k/tmp-bvilhjal/snp_res/\"):\n\tpass", "title": "" }, { "docid": "c8d5b8fdb7a4f170bdecaf9544d86127", "score": "0.5245313", "text": "def ridgePlot():\n X,y=preprocess()\n scores=makeScorers()\n arr=[]\n for alphas in [0.0001,0.001,0.01,0.1,1,10,100,1000]:\n # print(\"Alpha= \",alphas)\n clf=linear_model.Ridge(alpha=alphas)\n arr.append(np.average(cross_val_score(clf,X,y,cv=5,scoring=scores[0])))# Storing the erors for corresponding alpha for plotting.\n plt.figure(20)\n plt.plot(['0.0001','0.001','0.01','0.1','1','10','100','1000'],arr)\n plt.xlabel(\"Regularization Parameter\")\n plt.ylabel(\"MSE\")\n plt.savefig(\"RidgePlot.png\")", "title": "" }, { "docid": "383ea7c0dfe7542bea9306eb53e058b7", "score": "0.52441436", "text": "def plot_sample_set(z_all,env,y_label = False, x_train = None):\n \n \n \n s_expl = z_all[:,:env.n_s]\n n_it = np.shape(s_expl)[0]\n fig, ax = env.plot_safety_bounds(color = \"r\")\n \n c_spectrum = viridis(np.arange(n_it))\n # plot initial dataset \n if not x_train is None:\n\ts_train = x_train[:,:env.n_s]\n n_train = np.shape(s_train)[0]\n for i in range(n_train):\n ax = env.plot_state(ax,s_train[i,:env.n_s],color = c_spectrum[0])\n \n # plot the data gatehred\n for i in range(n_it):\n ax = env.plot_state(ax,s_expl[i,:env.n_s],color = c_spectrum[i])\n \n ax.set_xlabel(\"Angular velocity $\\dot{\\\\theta}$\")\n print(y_label)\n if y_label:\n\tprint(\"??\")\n\tax.set_ylabel(\"Angle $\\\\theta$\")\n fig.set_size_inches(3.6,4.5)\n return fig, ax", "title": "" }, { "docid": "c6306ae0f4767f3d51c39e33b20507e5", "score": "0.524104", "text": "def task_full_plot(depends_on, produces):\r\n fig, ax = plt.subplots()\r\n fig.suptitle(\"Showcase of Lhd algorithm\")\r\n ax.set_xlim(0, 1)\r\n ax.set_ylim(0, 1)\r\n ax.set_ylabel(\"$F(x_2)$\")\r\n ax.set_xlabel(\"$F(x_1)$\")\r\n\r\n full = pickle.load(open(depends_on, \"rb\"))\r\n sns.regplot(\r\n x=full[:, 0],\r\n y=full[:, 1],\r\n ax=ax,\r\n fit_reg=False,\r\n color=\"darkblue\",\r\n scatter_kws={\"alpha\": 0.4},\r\n )\r\n\r\n n = len(full[:, 0])\r\n for i in np.arange(0, 1, 1 / n):\r\n plt.axhline(i)\r\n plt.axvline(i)\r\n\r\n plt.savefig(produces)", "title": "" }, { "docid": "71f3cd8e8525267c72010d50907d35da", "score": "0.5234998", "text": "def plotSeismogramInteract(\n d2, d3, rho1, rho2, rho3, v1, v2, v3, wavf, wavA, AddNoise=False, usingT=True\n):\n d = np.array((0.0, d2, d3), dtype=float)\n v = np.array((v1, v2, v3), dtype=float)\n rho = np.array((rho1, rho2, rho3), dtype=float)\n\n if AddNoise:\n noise = 0.02\n else:\n noise = 0.0\n\n plotSeismogramV2(d, rho, v, wavf, wavA, noise, usingT)", "title": "" }, { "docid": "6c2fc90814e4f8cbd076168d017187f3", "score": "0.52253246", "text": "def hst_sextractor(self, **extra_kwargs):\n os.chdir(self.hr_dir)\n if self.use_bgcorr:\n input_image = self.bgcorr_image\n else:\n # input_image = self.hr_resample_drz[self.hr_bands[-1]]\n input_image = self.hr_output_drz[self.hr_bands[1]]\n sex_cmd = ['cex', '%s,%s' % (self.hr_output_drz[self.hr_bands[0]], input_image)]\n # sex_cmd = ['cex', '%s,%s' % (self.hr_resample_drz[self.hr_bands[0]], input_image)]\n sex_cmd += ['-c', self.hr_sexfile]\n sex_cmd += ['-WEIGHT_TYPE', 'MAP_WEIGHT,MAP_WEIGHT']\n sex_cmd += ['-WEIGHT_IMAGE', '%s,%s' % (self.hr_output_wht[self.hr_bands[0]], self.hr_output_wht[self.hr_bands[1]])]\n # sex_cmd += ['-WEIGHT_IMAGE', '%s,%s' % (self.hr_resample_wht[self.hr_bands[0]], self.hr_resample_wht[self.hr_bands[1]])]\n sex_cmd += ['-WEIGHT_THRESH', '0.']\n sex_cmd += ['-CATALOG_NAME', self.hr_sexcat]\n sex_cmd += ['-FLAG_IMAGE', self.hr_output_flg]\n # sex_cmd += ['-FLAG_IMAGE', self.hr_resample_flg]\n sex_cmd += ['-CHECKIMAGE_NAME', self.hr_output_seg]\n # sex_cmd += ['-CHECKIMAGE_NAME', self.hr_resample_seg]\n # sex_cmd += ['-PIXEL_SCALE', str(self.pixscale)]\n sex_cmd += ['-MAG_ZEROPOINT', str(self.magzpt)]\n if len(extra_kwargs):\n for k in extra_kwargs.keys():\n sex_cmd += ['-%s' % k.upper(), str(extra_kwargs[k])]\n print sex_cmd\n subprocess.call(sex_cmd)\n # Now format the SExtractor catalog into the TFIT-accepted form\n self.make_tfitcat()", "title": "" }, { "docid": "c1971a8c58e1fc0ad69e01e8acd75ff4", "score": "0.52219117", "text": "def plotGmmEst(dis, ps, eps, fout):\n fig, axs = pylab.subplots(1, 2, figsize=(4, 2.75), sharey=True)\n #raw PETs\n sns.kdeplot(dis, ax=axs[0], shade=True, color=colors[2])\n axs[0].set_ylabel(\"Density\")\n axs[0].set_title(\"Raw\")\n #gmm infered PETs\n nsa = np.where(ps == 0)[0]\n nsb = np.where(ps == 1)[0]\n if np.mean(dis[nsa]) > np.mean(\n dis[nsb]): #change the order if first classes mean larger\n nsa, nsb = nsb, nsa\n sns.kdeplot(dis[nsa],\n ax=axs[1],\n shade=True,\n color=colors[0],\n label=\"potential peak PETs\") #colors from the cLoops2.settings\n sns.kdeplot(dis[nsb],\n ax=axs[1],\n shade=True,\n color=colors[1],\n label=\"potential loop PETs\") #colors from the cLoops2.settings\n axs[1].legend()\n axs[1].set_title(\"GMM inferred PETs\\nEstimated eps=%s\" % eps)\n #set common x-label\n fig.text(0.5, 0.0, \"Distance between two ends (log2,bp)\", ha='center')\n pylab.tight_layout()\n pylab.savefig(fout)", "title": "" }, { "docid": "ee692e55cf5a3570dbeda7688a1e549c", "score": "0.52208084", "text": "def onHistogram2Button(self):\n \n #Clear the scene\n slicer.mrmlScene.Clear()\n \n # Load master volume\n sampleDataLogic = SampleData.SampleDataLogic()\n masterVolumeNode = sampleDataLogic.downloadMRBrainTumor1()\n\n # Create segmentation\n segmentationNode = slicer.vtkMRMLSegmentationNode()\n slicer.mrmlScene.AddNode(segmentationNode)\n segmentationNode.CreateDefaultDisplayNodes() # only needed for display\n segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)\n\n # Create seed segment inside tumor\n tumorSeed = vtk.vtkSphereSource()\n tumorSeed.SetCenter(-6, 30, 28)\n tumorSeed.SetRadius(10)\n tumorSeed.Update()\n segmentationNode.AddSegmentFromClosedSurfaceRepresentation(tumorSeed.GetOutput(), \"Tumor\", [1.0,0.0,0.0])\n\n # Create seed segment inside tumor 2\n referenceSeed = vtk.vtkSphereSource()\n referenceSeed.SetCenter(-6, -50, -10)\n referenceSeed.SetRadius(20)\n referenceSeed.Update()\n segmentationNode.AddSegmentFromClosedSurfaceRepresentation(referenceSeed.GetOutput(), \"Reference\", [0.0,0.0,1.0])\n\n # Create seed segment outside tumor\n backgroundSeedPositions = [[0,65,32], [1, -14, 30], [0, 28, -7], [0,30,64], [31, 33, 27], [-42, 30, 27]]\n append = vtk.vtkAppendPolyData()\n for backgroundSeedPosition in backgroundSeedPositions:\n backgroundSeed = vtk.vtkSphereSource()\n backgroundSeed.SetCenter(backgroundSeedPosition)\n backgroundSeed.SetRadius(10)\n backgroundSeed.Update()\n append.AddInputData(backgroundSeed.GetOutput())\n\n append.Update()\n backgroundSegmentId = segmentationNode.AddSegmentFromClosedSurfaceRepresentation(append.GetOutput(), \"Background\", [0.0,1.0,0.0])\n\n # Perform analysis\n ################################################\n\n # Create segment editor to get access to effects\n segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()\n # To show segment editor widget (useful for debugging): segmentEditorWidget.show()\n segmentEditorWidget.setMRMLScene(slicer.mrmlScene)\n segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()\n slicer.mrmlScene.AddNode(segmentEditorNode)\n segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)\n segmentEditorWidget.setSegmentationNode(segmentationNode)\n segmentEditorWidget.setMasterVolumeNode(masterVolumeNode)\n\n # Set up masking parameters\n segmentEditorWidget.setActiveEffectByName(\"Mask volume\")\n effect = segmentEditorWidget.activeEffect()\n # set fill value to be outside the valid intensity range\n intensityRange = masterVolumeNode.GetImageData().GetScalarRange()\n effect.setParameter(\"FillValue\", str(intensityRange[0]-1))\n # Blank out voxels that are outside the segment\n effect.setParameter(\"Operation\", \"FILL_OUTSIDE\")\n # Create a volume that will store temporary masked volumes\n maskedVolume = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLScalarVolumeNode\", \"Temporary masked volume\")\n effect.self().outputVolumeSelector.setCurrentNode(maskedVolume)\n\n # Create chart\n plotChartNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLPlotChartNode\", \"Histogram\")\n \n \n # Create histogram plot data series for each masked volume\n for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):\n # Set active segment\n segmentID = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)\n segmentEditorWidget.setCurrentSegmentID(segmentID)\n # Apply mask\n effect.self().onApply()\n # Compute histogram values\n histogram = np.histogram(arrayFromVolume(maskedVolume), bins=100, range=intensityRange)\n # Save results to a new table node\n segment = segmentationNode.GetSegmentation().GetNthSegment(segmentIndex)\n tableNode=slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLTableNode\", segment.GetName() + \" histogram table\")\n updateTableFromArray(tableNode, histogram)\n tableNode.GetTable().GetColumn(0).SetName(\"Count\")\n tableNode.GetTable().GetColumn(1).SetName(\"Intensity\")\n # Create new plot data series node\n plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLPlotSeriesNode\", segment.GetName() + \" histogram\")\n plotSeriesNode.SetAndObserveTableNodeID(tableNode.GetID())\n plotSeriesNode.SetXColumnName(\"Intensity\")\n plotSeriesNode.SetYColumnName(\"Count\")\n plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)\n plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleNone)\n plotSeriesNode.SetUniqueColor()\n # Add plot to chart\n plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())\n\n # Show chart in layout\n slicer.modules.plots.logic().ShowChartInLayout(plotChartNode)\n\n # Delete temporary node\n slicer.mrmlScene.RemoveNode(maskedVolume)\n slicer.mrmlScene.RemoveNode(segmentEditorNode)\n \n print('Histogram generated for Brain Tumor Segmentation')", "title": "" }, { "docid": "bbf0bc1ae2534afe77712c8198b01f65", "score": "0.52095664", "text": "def show_band_strukture(self, model):\n color_gaps(self, model)\n self.show()", "title": "" }, { "docid": "9b53d5cd44e2da766840866b1ce10632", "score": "0.52075714", "text": "def sed_vis(phot_in, filt_file, res_dir, filt_dir, sfh_ages=None):\n\n colors = np.array(\n [\n [\"#9E3549\", \"#C98B97\", \"#B45C6D\", \"#89152C\", \"#740017\"],\n [\"#256F5B\", \"#628D81\", \"#417E6D\", \"#0F604A\", \"#00523B\"],\n [\"#89A236\", \"#BFCD8F\", \"#A3B85E\", \"#708C15\", \"#5B7700\"],\n ],\n dtype=\"<U7\",\n )\n\n # Check inputs\n if not os.path.isfile(phot_in):\n raise Exception(\"File not found: {}\".format(phot_in))\n if not os.path.isfile(filt_file):\n raise Exception(\"File not found: {}\".format(filt_file))\n if not os.path.isdir(res_dir):\n raise Exception(\"Not a directory: {}\".format(res_dir))\n else:\n res_dir = os.path.abspath(res_dir)\n if not os.path.isdir(filt_dir):\n raise Exception(\"Not a directory: {}\".format(filt_dir))\n else:\n filt_dir = os.path.abspath(filt_dir)\n\n # Construct array of input summary files\n if sfh_ages is None:\n sum_files = glob.glob(\"{}/summary_*.dat\".format(res_dir))\n elif type(sfh_ages) == list:\n sum_files = [\n \"{}/summary_{}.dat\".format(res_dir, sfh_age) for sfh_age in sfh_ages\n ]\n else:\n sum_files = [\"{}/summary_{}.dat\".format(res_dir, sfh_ages)]\n\n # Create plotting directory\n plot_dir = \"{}/plots/\".format(res_dir)\n if not os.path.isdir(plot_dir):\n os.mkdir(plot_dir)\n\n # Generate one set of figures per SFH/age combination\n for sum_file in sum_files:\n # Check if file is empty\n if os.stat(sum_file).st_size == 0:\n print(\"{} is empty\".format(os.path.basename(sum_file)))\n continue\n\n sfh_age = (\n sum_file.replace(res_dir, \"\")\n .replace(\"summary_\", \"\")\n .replace(\"/\", \"\")\n .replace(\".dat\", \"\")\n )\n pobsr = read_phot_in(phot_in, filt_file)\n pbst = read_bestfit_params(sum_file, pobsr.index.values)\n\n # Generate one PDF per SED fit\n for i, r in pbst.iterrows():\n # Read in best fit SED\n sed_best_file = \"{}/bestfit/bestfit.{}.{}.dat\".format(res_dir, i, sfh_age)\n if not os.path.isfile(sed_best_file):\n print(\"Not found: bestfit.{}.{}.dat\".format(i, sfh_age))\n continue\n\n # Instantiate figure\n fig = plt.figure(figsize=(11, 8.5))\n ax = fig.add_subplot(111)\n\n # Plot best fit SED\n sed_best_file = \"{}/bestfit/bestfit.{}.{}.dat\".format(res_dir, i, sfh_age)\n if not os.path.isfile(sed_best_file):\n print(\"Not found: bestfit.{}.{}.dat\".format(i, sfh_age))\n continue\n psed = ascii.read(sed_best_file).to_pandas()\n psed.columns = [\"wave\", \"flam\"]\n # Unit conversion from F_lambda (erg/s/cm2/AA) to F_nu (erg/s/cm2/Hz) to AB magnitude\n flam = psed.flam.values * u.erg / u.s / u.cm ** 2 / u.AA\n lam = psed.wave.values * u.AA\n fnu = flam.to(u.erg / u.s / u.cm ** 2 / u.Hz, u.spectral_density(lam))\n abmag = fnu.to(u.ABmag)\n psed[\"fnu\"] = fnu.value\n psed[\"ABmag\"] = abmag.value\n # convert wavelength to microns\n mlam = lam.to(u.micron)\n psed[\"wave_um\"] = mlam.value\n ax.plot(\n psed.wave_um,\n psed.ABmag,\n color=colors[0][2],\n ds=\"steps-mid\",\n marker=\"\",\n linestyle=\"-\",\n label=\"Best fit SED\",\n zorder=2.0,\n )\n\n # Plot observed photometry\n pobsc = pobsr.loc[i]\n pobs = reformat_obs_phot(pobsc, filt_dir)\n # Defined errorbars\n pdef = pobs.loc[~np.isnan(pobs.emag)]\n ax.errorbar(\n pdef.wave,\n pdef.mag,\n yerr=pdef.emag,\n color=\"k\",\n linestyle=\"\",\n marker=\"o\",\n ms=10,\n mfc=\"none\",\n mew=3,\n label=\"Observed photometry\",\n zorder=2.5,\n )\n # Undefined errorbars\n pudef = pobs.loc[np.isnan(pobs.emag)]\n ax.errorbar(\n pudef.wave,\n pudef.mag,\n yerr=pudef.emag,\n color=\"k\",\n linestyle=\"\",\n marker=\"x\",\n ms=10,\n mfc=\"none\",\n mew=3,\n label=\"Undef mag error\",\n zorder=2.5,\n )\n\n # Plot predicted photometry from best-fit model\n pobs = add_modelmag_pd(pobs, psed.wave.values, psed.fnu, filt_dir)\n pobs_magdef = pobs.loc[~np.isnan(pobs.mag)]\n ax.scatter(\n pobs_magdef.wave,\n pobs_magdef.bmag,\n marker=\"s\",\n s=100,\n color=colors[0][4],\n fc=\"none\",\n linewidths=2,\n alpha=1.0,\n label=\"Model prediction\",\n zorder=2.2,\n )\n\n # Axes limits and config\n # x axis: [min(filter_wavelengths) - 0.2, max(filter_wavelengths) + 0.5]\n xmin = pobs.wave.min() - 0.2\n xmax = pobs.wave.max() + 0.5\n xlim = [xmin, xmax]\n # y axis: [max(observed_photometry) + 1.0, min(observed_photometry, best_SED) - 0.5]\n ymin = pobs.mag.max() + 1\n ymax = min([psed.ABmag.min(), pobs.mag.min()]) - 0.5\n ylim = [ymin, ymax]\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.semilogx()\n # Set up tick labels for microns\n xt = np.array([0.1, 0.5, 1, 2, 4, 8, 24, 160, 500]) * 1.0e4\n valid_ticks = (xt > xlim[0] * 1.0e4) & (xt < xlim[1] * 1.0e4)\n if valid_ticks.sum() > 0:\n xt = xt[valid_ticks]\n ax.set_xticks(xt / 1.0e4)\n ax.set_xticklabels(xt / 1.0e4)\n\n # Label observed photometry\n for fi, fr in pobs.iterrows():\n # Skip if no magnitude in this band\n if np.isnan(fr.mag):\n continue\n # Define upper and lower possible positions\n if fr.mag > fr.bmag:\n if np.isnan(fr.emag):\n lpos = fr.mag + 0.1\n else:\n lpos = fr.mag + fr.emag + 0.1\n upos = fr.bmag - 0.1\n else:\n if np.isnan(fr.emag):\n upos = fr.mag - 0.1\n else:\n upos = fr.mag - fr.emag - 0.1\n lpos = fr.bmag + 0.1\n upos_ax = ax.transLimits.transform([fr.wave, upos])\n lpos_ax = ax.transLimits.transform([fr.wave, lpos])\n # Check if going above or below the figure\n if upos_ax[1] > 0.87:\n pos = lpos\n va = \"top\"\n elif lpos_ax[1] < 0.13:\n pos = upos\n va = \"bottom\"\n else:\n if fr.mag > fr.bmag:\n pos = lpos\n va = \"top\"\n else:\n pos = upos\n va = \"bottom\"\n ax.text(\n fr.wave,\n pos,\n fi,\n ha=\"center\",\n va=va,\n color=\"k\",\n size=14,\n rotation=90,\n )\n\n # Axes labels\n ax.set_xlabel(r\"$\\lambda_{obs}$ ($\\mu$m)\")\n ax.set_ylabel(\"AB Mag\")\n\n # Legend\n ax.legend(loc=\"upper left\", fontsize=14)\n\n # Best-fit parameters\n sfh_age_str = []\n if \"csf_\" in sfh_age:\n sfh_age_str.append(\"Constant SFH\")\n elif \"tau_\" in sfh_age:\n sfh_age_str.append(\"Tau model\")\n elif \"taur_\" in sfh_age:\n sfh_age_str.append(\"Tau rising model\")\n else:\n sfh_age_str.append(sfh_age.split(\"_\")[0])\n if \"allage\" in sfh_age:\n sfh_age_str.append(\"All ages\")\n elif \"agegt50\" in sfh_age:\n sfh_age_str.append(r\"Age $>$ 50Myr\")\n else:\n sfh_age_str.append(sfh_age.split(\"_\")[1])\n fit_info = [\n *sfh_age_str,\n r\"$\\tau$/Myr: {}\".format(r.tau),\n r\"EBMV: {}\".format(r.ebmv),\n \"Age: {} Myr\".format(r.age),\n r\"SFR: {} M$_{{\\odot}}$/yr\".format(r.sfr),\n r\"log(M$_*$/M$_{{\\odot}}$): {:.3f}\".format(r.mass),\n r\"$\\chi^2$: {}\".format(r.chisq),\n ]\n ax.annotate(\n \"\\n\".join(fit_info),\n [0.78, 0.03],\n xycoords=\"axes fraction\",\n ha=\"left\",\n va=\"bottom\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n fontsize=14,\n usetex=True,\n family=\"serif\",\n )\n\n # Title\n ax.set_title(r\"{} at $z$={:.3f}\".format(i, pobsc.zsys), fontsize=15)\n\n fig.tight_layout()\n fig.savefig(\"{}/{}_{}.pdf\".format(plot_dir, i, sfh_age))\n plt.close(fig)", "title": "" }, { "docid": "efb89827559eb4d16bad908759a42322", "score": "0.519969", "text": "def svdVisualize(self):\n \n (U,S,V) = numpy.linalg.svd(numpy.dot(self.X_mapped.T,self.X_mapped)/self.m)\n Z = numpy.zeros((self.m,2))\n Z[:,0] = numpy.dot(self.X_mapped,U[:,0])\n Z[:,1] = numpy.dot(self.X_mapped,U[:,1])\n # plot projected data for visualization\n colors = map(lambda x: 'r' if x else 'b', self.y)\n plt.scatter(Z[:,0],Z[:,1],20,colors)\n plt.show()", "title": "" }, { "docid": "1a9b80ae009499b5c6fe86a509b9daa2", "score": "0.51971656", "text": "def plot_tidal_ellipses():\n pass", "title": "" }, { "docid": "b498a6acce3f21774ee2122a4689ab3f", "score": "0.51966524", "text": "def plot_with_el(gmm, dpgmm):\n plt.figure(figsize=(13, 10))\n plt.subplot(2,1,1)\n plt.title('Gaussian Mixture with ellipses')\n plot_gmm(gmm, data, labels)\n plt.subplot(2,1,2)\n\n plt.title('Bayesian Gaussian Mixture with ellipses')\n plot_gmm(dpgmm, data, dlabels)\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "74da90d6536576c193f47333a36247e2", "score": "0.51954764", "text": "def make_tsne_plot(caps_presence, labels, filename=None, save_kwargs=None):\n\n # idx = np.random.choice(res.test.posterior_pres.shape[0], size=int(1e4),\n # replace=False)\n # points = res.train.posterior_pres[idx]\n # labels = res.train.label[idx]\n\n tsne = TSNE(2, perplexity=50)\n embedded = tsne.fit_transform(caps_presence)\n\n colors = np.asarray([\n 166, 206, 227,\n 31, 120, 180,\n 178, 223, 138,\n 51, 160, 44,\n 251, 154, 153,\n 227, 26, 28,\n 253, 191, 111,\n 255, 127, 0,\n 202, 178, 214,\n 106, 61, 154\n ], dtype=np.float32).reshape(10, 3) / 255.\n\n fig, ax = plt.subplots(1, 1, figsize=(6, 6))\n for i in range(10):\n idx = (labels == i)\n points_for_label = embedded[idx]\n ax.scatter(points_for_label[:, 0], points_for_label[:, 1], c=colors[i])\n\n if filename is not None:\n if save_kwargs is None:\n save_kwargs = dict(bbox_inches='tight', dpi=300)\n\n fig.savefig(filename, **save_kwargs)\n plt.close(fig)", "title": "" }, { "docid": "b6ae346c25e865ff3a1edabc7c24cf25", "score": "0.51777846", "text": "def shap_interpret(self):\n se = shap.TreeExplainer(self.model) # , feature_perturbation=\"interventional\", model_output=\"raw\"\n shap_values = se.shap_values(self.x_test)\n shap.summary_plot(shap_values[1], features=self.x_test) # feature_names=self.features", "title": "" }, { "docid": "8fc1e347f9419892445dfd13d6f3c9c3", "score": "0.5167584", "text": "def plot_simulation_without_antibiotic(populations):", "title": "" }, { "docid": "bef4c28837cc56683278936a9eb9519f", "score": "0.5159639", "text": "def update_msdview(ax, system): # pragma: no cover\n line = ax.lines[0]\n\n line.set_ydata(system.msd_sample)\n line.set_xdata(np.arange(0, system.step) * system.timestep_length)\n ax.set_xlim(0, system.step * system.timestep_length)\n ax.set_ylim(0, np.amax(system.msd_sample) + np.amax(system.msd_sample) * 0.05)", "title": "" }, { "docid": "fcdf416fb6f05706f8223caca52210fe", "score": "0.5158811", "text": "def plotSeismogramInteractTBL(\n d2, d3, rho1, rho2, rho3, v1, v2, v3, wavf, wavA, AddNoise=False, usingT=True\n):\n d = np.array((0.0, d2, d3), dtype=float)\n v = np.array((v1, v2, v3), dtype=float)\n rho = np.array((rho1, rho2, rho3), dtype=float)\n\n if AddNoise:\n noise = 0.02\n else:\n noise = 0.0\n\n plotSeismogramV3(d, rho, v, wavf, wavA, noise, usingT)", "title": "" }, { "docid": "2122d0bc5da91c2749e893d2a74fd64f", "score": "0.5156134", "text": "def plot_colmse(colmse_t):\n\n versions = colmse_t.columns.unique(level=0).to_list()\n models = colmse_t.index.to_list()\n for i in range(len(versions)):\n for j in range(len(models)):\n orid = colmse_t.iloc[j][versions[i], 'ori'].values\n gend = colmse_t.iloc[j][versions[i], 'gen'].values\n\n # plotting log values as there is a big diff in magnitude\n fig = go.Figure(data=go.Scatter(\n x=orid, #np.log(orid),\n y=gend, #np.log(gend),\n mode='markers',\n text=colmse_t.columns.unique(level=2).to_list(),\n name='data'))\n # plotting the diagonal\n # fig.add_trace(go.Scatter(x=[np.log(min(orid)), np.log(max(orid))], y=[np.log(min(orid)), np.log(max(orid))], mode='lines', name='diagonal'))\n fig.add_trace(go.Scatter(x=[min(orid), max(orid)], y=[min(orid), max(orid)], mode='lines', name='diagonal'))\n fig.update_layout(xaxis_title=\"scaled ori data mse\",\n yaxis_title=\"scaled gen data mse\",\n title=versions[i]+' '+models[j])\n fig.show()", "title": "" }, { "docid": "5924ada2ff4e596c97252f7403d92965", "score": "0.51467174", "text": "def make_exp_summary_TF(figtitle,extraction_type,mean_image,roi_image,roi_traces,\n rawStimData,bf_image,\n rois_df,rois,stimulus_information,save_fig,current_movie_ID,\n summary_save_dir):\n \n \n plt.close('all')\n # Constructing the plot backbone, selecting colors\n colors, _ = run_matplotlib_params()\n fig = plt.figure(figsize=(18, 9))\n fig.suptitle(figtitle,fontsize=12)\n \n grid = plt.GridSpec(2, 6, wspace=1, hspace=0.3)\n \n ## BF masks\n ax=plt.subplot(grid[0,:2])\n sns.heatmap(mean_image,cmap='gray',ax=ax,cbar=False)\n sns.heatmap(bf_image,cmap='plasma',\n cbar_kws={'ticks': np.unique(bf_image[~np.isnan(bf_image)]),\n 'fraction':0.1,\n 'shrink' : 1,\n 'label': 'Hz',},alpha=0.5,vmin=0.1,vmax=1.5)\n ax.axis('off')\n ax.set_title('BF map') \n \n ## Histogram\n ax=plt.subplot(grid[0,2])\n chart = sns.countplot('BF',data =rois_df,palette='plasma')\n chart.set_xticklabels(chart.get_xticklabels(), rotation=45, \n fontweight='bold')\n leg = chart.legend()\n leg.remove()\n \n \n ## Tuning curve\n ax=plt.subplot(grid[0,3:])\n # Plot tuning curves\n tunings = np.squeeze(list(map(lambda roi : roi.TF_curve_resp, rois)))\n mean_t = np.mean(tunings,axis=0)\n std_t = np.std(tunings,axis=0)\n ub = mean_t + std_t\n lb = mean_t - std_t\n # Tuning curve\n \n TF_stim = rois[0].TF_curve_stim\n ax.fill_between(TF_stim, ub, lb,\n color=colors[0], alpha=.2)\n \n ax.plot(TF_stim,mean_t,'-o',lw=4,color=colors[0],\n markersize=10)\n #ax.plot(TF_stim,tunings.T,alpha=0.3,lw=1)\n \n ax.set_xscale('log') \n ax.set_title('Frequency tuning curve') \n ax.set_xlabel('Hz')\n ax.set_ylabel('$\\Delta F/F$')\n ax.set_xlim((ax.get_xlim()[0],10)) \n \n ## Plotting all tuning curves\n if len(rois) > 100:\n ax=plt.subplot(grid[1,:])\n elif len(rois) > 75:\n ax=plt.subplot(grid[1,:4])\n elif len(rois) > 50:\n ax=plt.subplot(grid[1,:3])\n elif len(rois) > 25:\n ax=plt.subplot(grid[1,:2])\n else:\n ax=plt.subplot(grid[1,1])\n \n \n non_edge_stims = (stimulus_information['stim_type'] != 50)\n uniq_freq_nums = len(np.where(np.unique(stimulus_information['epoch_frequency'][non_edge_stims])>0)[0])\n bfs = np.squeeze(list(map(lambda roi : roi.BF, rois)))\n sorted_indices = np.argsort(bfs)\n tf_tunings = tunings[sorted_indices,:]\n plot_tf_array = np.zeros(shape=(np.shape(tf_tunings)[0],np.shape(tf_tunings)[0]*np.shape(tf_tunings)[1]))\n plot_tf_array[:] = np.nan\n for i in range(np.shape(tf_tunings)[0]):\n \n curr_data = tf_tunings[i,:]\n curr_data = curr_data + np.mod(i,9)\n curve_start = i*np.shape(tf_tunings)[1] - (i*(uniq_freq_nums-1))\n plot_tf_array[i,curve_start:curve_start+uniq_freq_nums] = curr_data\n \n ax.plot(np.transpose(plot_tf_array),'-o',linewidth=2.0, alpha=.8,\n color=colors[0],markersize=0.4)\n ax.axis('off')\n ax.set_title('ROI tuning curves N: %s' % len(rois))\n \n if save_fig:\n # Saving figure \n save_name = 'Summary_%s_%s' % (current_movie_ID, extraction_type)\n os.chdir(summary_save_dir)\n plt.savefig('%s.pdf'% save_name, bbox_inches='tight',dpi=300)\n return fig", "title": "" }, { "docid": "d0f880bfaecda71caee581fa390c314c", "score": "0.5144847", "text": "def task_plots(depends_on, produces):\r\n fig, ax = plt.subplots()\r\n fig.suptitle(\"Illustration of trust region application with reused sample points\")\r\n ax.set_xlim(0, 1)\r\n ax.set_ylim(0, 1)\r\n ax.set_ylabel(\"$F(x_2)$\")\r\n ax.set_xlabel(\"$F(x_1)$\")\r\n\r\n first_sample = pickle.load(open(depends_on[\"first\"], \"rb\"))\r\n second_sample = pickle.load(open(depends_on[\"second\"], \"rb\"))\r\n\r\n sns.regplot(\r\n x=first_sample[:, 0],\r\n y=first_sample[:, 1],\r\n ax=ax,\r\n fit_reg=False,\r\n color=\"darkblue\",\r\n scatter_kws={\"alpha\": 0.4},\r\n )\r\n sns.regplot(\r\n x=second_sample[:, 0],\r\n y=second_sample[:, 1],\r\n ax=ax,\r\n fit_reg=False,\r\n color=\"firebrick\",\r\n scatter_kws={\"alpha\": 0.4},\r\n )\r\n\r\n for i in np.arange(0, 1, 1 / 20):\r\n plt.axhline(i)\r\n plt.axvline(i)\r\n\r\n plt.savefig(produces)", "title": "" }, { "docid": "9218d395fff25260d0ae0199b10a5f9b", "score": "0.51240957", "text": "def make_ms_plots(self):\n info('making MS inspection plots')\n\n ### uv-coverage plot, different color baselines, legend, uv-annuli ###\n pl.figure(figsize=(16,16))\n #from mpltools import color\n cmap = pl.cm.Set1\n color.cycle_cmap(self.Nant, cmap=cmap)\n fig, ax = pl.subplots()\n for ant0 in range(self.Nant):\n for ant1 in range(self.Nant):\n if (ant1 > ant0) \\\n and not ((self.station_names[ant0]=='JCMT') or (self.station_names[ant1] == 'JCMT')) \\\n and not ((self.station_names[ant0]=='APEX') or (self.station_names[ant1] == 'APEX')):\n\n temp_mask = np.logical_not(self.flag[self.baseline_dict[(ant0,ant1)],0,0])\n temp_u = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 0]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n temp_v = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 1]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n #if (np.sqrt((temp_u.max()**2 + temp_v.max()**2)) > 0.1):\n pl.plot(np.hstack([np.nan, temp_u,np.nan, -temp_u, np.nan]), np.hstack([np.nan, temp_v,np.nan, -temp_v,np.nan]), \\\n lw=2.5,label='%s-%s'%(self.station_names[ant0],self.station_names[ant1]))\n #pl.plot(-self.uvw[np.logical_not(self.flag[:, 0, 0]), 0], -self.uvw[np.logical_not(self.flag[:, 0, 0]), 1], \\\n # label=self.station_names[i])\n lgd = pl.legend(bbox_to_anchor=(1.02, 1), loc=2, shadow=True,fontsize='small')\n ax = pl.gca()\n\n uvbins_edges = np.arange(0, 11, 1) # uvdistance units: Giga-lambda\n uvbins_centre = (uvbins_edges[:-1] + uvbins_edges[1:]) / 2.\n numuvbins = len(uvbins_centre)\n binwidths = uvbins_edges[1] - uvbins_edges[0]\n for b in range(numuvbins):\n p = Circle((0, 0), uvbins_edges[b + 1], edgecolor='k', ls='solid', facecolor='none', alpha=0.5, lw=0.5)\n ax.add_artist(p)\n pl.xlabel('$u$ / G$\\,\\lambda$')\n pl.ylabel('$v$ / G$\\,\\lambda$')\n pl.xlim(-10, 10)\n pl.ylim(-10, 10)\n ax.set_aspect('equal')\n pl.savefig(os.path.join(v.PLOTDIR, 'uv-coverage_legend.png'), \\\n bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n\n ### uv-coverage plot, colorize by minimun elevation, uv-annuli ###\n self.calculate_baseline_min_elevation() # calc min elevation in the two e for every baseline and every timestep\n self.calculate_baseline_mean_elevation()# as above, but for mean\n\n pl.figure(figsize=(16,16))\n #from mpltools import color\n cmap = pl.cm.Set1\n #color.cycle_cmap(self.Nant, cmap=cmap)\n fig, ax = pl.subplots()\n #temp_elevation = self.elevation.copy()\n #temp_elevation[np.isnan(temp_elevation)] = 1000.\n #elevation_mask = temp_elevation < 90.\n # converted from nan and set arbitrarily high\n for ant0 in range(self.Nant):\n for ant1 in range(self.Nant):\n if (ant1 > ant0) \\\n and not ((self.station_names[ant0]=='JCMT') or (self.station_names[ant1] == 'JCMT')) \\\n and not ((self.station_names[ant0]=='APEX') or (self.station_names[ant1] == 'APEX')):\n temp_mask = np.logical_not(self.flag[self.baseline_dict[(ant0,ant1)],0,0])\n self.temp_u = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 0]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n self.temp_v = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 1]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n temp_minelev = self.baseline_min_elevation[self.baseline_dict[(ant0,ant1)][temp_mask]]\n\n pl.scatter(np.hstack([self.temp_u, -self.temp_u]), np.hstack([self.temp_v, -self.temp_v]), \\\n c=np.hstack([temp_minelev,temp_minelev])*180./np.pi,\\\n s=10,cmap=\"viridis\",edgecolors=\"None\",vmin=0,vmax=30) #\n cb = pl.colorbar()\n cb.set_label(\"min baseline elevation / degrees\")\n ax = pl.gca()\n for b in range(numuvbins):\n p = Circle((0, 0), uvbins_edges[b + 1], edgecolor='k', ls='solid', facecolor='none', alpha=0.5, lw=0.5)\n ax.add_artist(p)\n pl.xlabel('$u$ / G$\\,\\lambda$')\n pl.ylabel('$v$ / G$\\,\\lambda$')\n pl.xlim(-10, 10)\n pl.ylim(-10, 10)\n ax.set_aspect('equal')\n pl.savefig(os.path.join(v.PLOTDIR, 'uv-coverage_colorize_min_elevation.png'), \\\n bbox_inches='tight')\n\n\n\n pl.figure(figsize=(16,16))\n #from mpltools import color\n cmap = pl.cm.Set1\n #color.cycle_cmap(self.Nant, cmap=cmap)\n fig, ax = pl.subplots()\n #temp_elevation = self.elevation.copy()\n #temp_elevation[np.isnan(temp_elevation)] = 1000.\n #elevation_mask = temp_elevation < 90.\n # converted from nan and set arbitrarily high\n for ant0 in range(self.Nant):\n for ant1 in range(self.Nant):\n if (ant1 > ant0) \\\n and not ((self.station_names[ant0]=='JCMT') or (self.station_names[ant1] == 'JCMT')) \\\n and not ((self.station_names[ant0]=='APEX') or (self.station_names[ant1] == 'APEX')):\n temp_mask = np.logical_not(self.flag[self.baseline_dict[(ant0,ant1)],0,0])\n self.temp_u = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 0]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n self.temp_v = self.uvw[self.baseline_dict[(ant0,ant1)][temp_mask], 1]\\\n / (speed_of_light/self.chan_freq.mean())/1e9\n temp_meanelev = self.baseline_mean_elevation[self.baseline_dict[(ant0,ant1)][temp_mask]]\n\n pl.scatter(np.hstack([self.temp_u, -self.temp_u]), np.hstack([self.temp_v, -self.temp_v]), \\\n c=np.hstack([temp_meanelev,temp_meanelev])*180./np.pi,\\\n s=10,cmap=\"viridis\",edgecolors=\"None\",vmin=0,vmax=30) #\n cb = pl.colorbar()\n cb.set_label(\"mean baseline elevation / degrees\")\n ax = pl.gca()\n for b in range(numuvbins):\n p = Circle((0, 0), uvbins_edges[b + 1], edgecolor='k', ls='solid', facecolor='none', alpha=0.5, lw=0.5)\n ax.add_artist(p)\n pl.xlabel('$u$ / G$\\,\\lambda$')\n pl.ylabel('$v$ / G$\\,\\lambda$')\n pl.xlim(-10, 10)\n pl.ylim(-10, 10)\n ax.set_aspect('equal')\n pl.savefig(os.path.join(v.PLOTDIR, 'uv-coverage_colorize_mean_elevation.png'), \\\n bbox_inches='tight')\n\n\n\n\n\n ampbins = np.zeros([numuvbins])\n stdbins = np.zeros([numuvbins])\n phasebins = np.zeros([numuvbins])\n phstdbins = np.zeros([numuvbins])\n Nvisperbin = np.zeros([numuvbins])\n corrs = [0,3] # only doing Stokes I for now\n\n for b in range(numuvbins):\n mask = ( (self.uvdist / (speed_of_light/self.chan_freq.mean())/1e9) > uvbins_edges[b]) & \\\n ( (self.uvdist / (speed_of_light/self.chan_freq.mean())/1e9) < uvbins_edges[b + 1]) & \\\n (np.logical_not(self.flag[:, 0, 0])) # mask of unflagged visibilities in this uvbin\n Nvisperbin[b] = mask.sum() # total number of visibilities in this uvbin\n ampbins[b] = np.nanmean(abs(self.data[mask, :, :])[:, :, corrs]) # average amplitude in bin \"b\"\n #stdbins[b] = np.nanstd(abs(self.data[mask, :, :])[:, :, corrs]) / Nvisperbin[b]**0.5 # rms of that bin\n\n if (self.trop_enabled):\n stdbins[b] = np.nanmean(abs(np.add(self.thermal_noise[mask, :, :][:, :, corrs], \\\n self.sky_noise[mask, :, :][:, :, corrs]))) / Nvisperbin[b] ** 0.5\n else:\n stdbins[b] = np.nanmean(abs(self.thermal_noise[mask, :, :][:, :, corrs])) \\\n / Nvisperbin[b] ** 0.5\n # next few lines if a comparison array is desired (e.g. EHT minus ALMA)\n #mask_minus1ant = (uvdist > uvbins_edges[b])&(uvdist< uvbins_edges[b+1])&(np.logical_not(flag_col[:,0,0]))& \\\n # (ant1 != station_name.index('ALMA'))&(ant2 != station_name.index('ALMA'))\n # mask of unflagged visibilities in this uvbin, that don't include any ALMA baselines\n #Nvisperbin_minus1ant[b] = mask_nomk.sum() # total number of visibilities in this uvbin\n #ampbins_minus1ant[b] = np.nanmean(abs(data[mask_nomk, :, :])[:, :, corrs]) # average amplitude in bin \"b\"\n #stdbins_minus1ant[b] = np.nanstd(abs(data[mask_nomk, :, :])[:, :, corrs]) / Nvisperbin_nomk[b] ** 0.5 # rms of that bin\n\n phasebins[b] = np.nanmean(np.arctan2(self.data[mask, :, :].imag, \\\n self.data[mask, :, :].real)[:, :,\n corrs]) # average phase in bin \"b\"\n phstdbins[b] = np.nanstd(np.arctan2(self.data[mask, :, :].imag, \\\n self.data[mask, :, :].real)[:, :, corrs]) # rms of that bin\n\n phasebins *= (180 / np.pi)\n phstdbins *= (180 / np.pi) # rad2deg\n\n def uvdist2uas(uvd):\n theta = 1. / (uvd * 1e9) * 206265 * 1e6 # Giga-lambda to uas\n return [\"%.1f\" % z for z in theta]\n\n def uas2uvdist(ang):\n return 1. / (ang / (206265. * 1e6)) / 1e9\n\n ### this is for a top x-axis labels, showing corresponding angular scale for a uv-distance\n angular_tick_locations = [25, 50, 100, 200] # specify which uvdist locations you want a angular scale\n\n\n\n\n ### amp vs uvdist, with uncertainties\n fig = pl.figure(figsize=(10,6.8))\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twiny()\n yerr = stdbins/np.sqrt(Nvisperbin) #noise_per_vis/np.sqrt(np.sum(Nvisperbin,axis=0)) #yerr = noise_per_vis/np.sqrt(np.sum(allsrcs[:,2,:],axis=0))\n xerr = binwidths/2. * np.ones(numuvbins)\n for b in range(numuvbins):\n ax1.plot(uvbins_centre[b],ampbins[b],'o',mec='none',alpha=1,color='#336699')\n ax1.errorbar(uvbins_centre[b],ampbins[b],xerr=xerr[b],yerr=yerr[b],ecolor='grey',lw=0.5,alpha=1,fmt='none',capsize=0)\n #ax1.vlines(uas2uvdist(shadow_size_mas),0,np.nanmax(ampbins)*1.2,linestyles='dashed')\n ax1.set_xlabel('${uv}$-distance / G$\\,\\lambda$')\n ax1.set_ylabel('Stokes I amplitude / Jy')\n ax1.set_ylim(0,np.nanmax(ampbins)*1.2)\n ax1.set_xlim(0,uvbins_edges.max())\n ax2.set_xlim(ax1.get_xlim())\n\n # configure upper x-axis\n\n ax2.set_xticks(uas2uvdist(np.array(angular_tick_locations))) # np.array([25.,50.,100.,200.]))) # angular_tick_locations))\n ax2.set_xticklabels(angular_tick_locations)\n #ax2.xaxis.set_major_formatter(FormatStrFormatter('%i'))\n ax2.set_xlabel(\"angular scale / $\\mu$-arcsec\")\n #np.savetxt('uvdistplot_ampdatapts.txt',np.vstack([uvbins_centre,xerr,ampbins,yerr]))\n pl.savefig(os.path.join(v.PLOTDIR,'amp_uvdist.png'), \\\n bbox_inches='tight')\n\n\n\n ### percent of visibilties per bin\n percentVisperbin = Nvisperbin/Nvisperbin.sum()*100\n #percentVisperbin_minus1ant = Nvisperbin_minus1ant/Nvisperbin_minus1ant.sum()*100\n #percent_increase = (Nvisperbin/Nvisperbin_minus1ant -1) * 100\n\n fig = pl.figure(figsize=(10,6.8))\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twiny()\n for b in range(numuvbins):\n #ax1.bar(uvbins_centre[b],percent_increase[b],width=binwidths,color='orange',alpha=1) #,label='MeerKAT included')\n ax1.bar(uvbins_centre[b],percentVisperbin[b],width=binwidths,color='orange',alpha=0.9,align='center',edgecolor='none') #,label='')\n #ax1.bar(uvbins_centre[b],percentVisperbin_minus1ant[b],width=binwidths,color='#336699',alpha=0.6,label='MeerKAT excluded')\n ax1.set_xlabel('$uv$-distance / G$\\,\\lambda$')\n ax1.set_ylabel('percentage of total visibilities')\n #ax1.set_ylabel('percentage increase')\n #ax1.set_ylim(0,np.nanmax(percentVisperbin)*1.2)\n #ax1.set_ylim(0,percent_increase.max()*1.2)\n ax1.set_xlim(0,uvbins_edges.max())\n #ax1.vlines(uas2uvdist(shadow_size_uarcsec),0,np.nanmax(Nvisperbin)*1.2,linestyles='dashed')\n ax2.set_xlim(ax1.get_xlim())\n # configure upper x-axis\n ax2.set_xticks(uas2uvdist(np.array(angular_tick_locations)))\n ax2.set_xticklabels(angular_tick_locations) #(angular_tick_locations))\n ax2.set_xlabel(r\"angular scale / $\\mu$-arcsec\")\n #pl.legend()\n pl.savefig(os.path.join(v.PLOTDIR,'num_vis_perbin.png'), \\\n bbox_inches='tight')\n\n\n\n ### averaged sensitivity per bin\n fig = pl.figure(figsize=(10,6.8))\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twiny()\n #x_vlba,y_vlba = np.loadtxt('/home/deane/git-repos/vlbi-sim/output/XMM-LSS/vlba_xmmlss_sigma_vs_uvbin.txt').T #/home/deane/git-repos/vlbi-sim/output/VLBA_COSMOS/vlba_sigma_vs_uvbin.txt',comments='#').T\n x = np.ravel(zip(uvbins_edges[:-1],uvbins_edges[1:]))\n y = np.ravel(zip(stdbins,stdbins))\n #y_minus1ant = np.ravel(zip(stdbins_minus1ant,stdbins_minus1ant))\n\n #ax1.plot(x_vlba,y_vlba*1e6,color='grey',alpha=1,label='VLBA',lw=3)\n ax1.plot(x,y*1e3,color='#336699',linestyle='solid',alpha=1,label='EHT',lw=3)\n #ax1.plot(x,y*1e6,color='orange',alpha=0.7,label='EVN + MeerKAT',lw=3)\n\n ax1.set_xlabel('$uv$-distance / G$\\,\\lambda$',size=16)\n ax1.set_ylabel('thermal + sky noise rms / mJy',size=16)\n #ax1.set_ylabel('percentage increase')\n ax1.set_ylim(0,np.nanmax(y)*1.2*1e3)\n ax1.set_xlim(0,uvbins_edges.max())\n #ax1.vlines(uas2uvdist(shadow_size_uarcsec),0,np.nanmax(Nvisperbin)*1.2,linestyles='dashed')\n ax2.set_xlim(ax1.get_xlim())\n # configure upper x-axis\n ax2.set_xticks(uas2uvdist(np.array(angular_tick_locations)))\n ax2.set_xticklabels(angular_tick_locations)\n ax2.set_xlabel(r\"angular scale / $\\mu$-arcsec\",size=16)\n ax1.legend(loc='upper left',fontsize=16)\n pl.savefig(os.path.join(v.PLOTDIR, 'sensitivity_perbin.png'), \\\n bbox_inches = 'tight')\n\n\n ### elevation vs time ###\n pl.figure(figsize=(10,6.8))\n for ant in range(self.Nant):\n if (self.station_names[ant] == 'JCMT') or \\\n (self.station_names[ant] == 'APEX'):\n ls = ':'\n lw=3.5\n alpha = 1\n zorder = 2\n else:\n ls = 'solid'\n alpha = 1\n lw=2\n zorder = 1\n pl.plot(np.linspace(0,self.obslength,len(self.time_unique))/(60*60.),\n self.elevation[ant, :]*180./np.pi, alpha=alpha, lw=lw, \\\n ls=ls,zorder=zorder,label=self.station_names[ant])\n pl.xlabel('relative time / hr')\n pl.ylabel('elevation / degrees')\n lgd = pl.legend(bbox_to_anchor=(1.02,1),loc=2,shadow=True)\n pl.savefig(os.path.join(v.PLOTDIR,'antenna_elevation_vs_time.png'),\\\n bbox_extra_artists=(lgd,), bbox_inches='tight')", "title": "" }, { "docid": "605482aeb6d22553495a4ce46e9beee3", "score": "0.512393", "text": "def generate_modelSED_specphoto_fit(sp=None,imf_type=1,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None, params_val=None,\n\tDL_Gpc=0.0,cosmo='flat_LCDM',H0=70.0,Om0=0.3,interp_filters_waves=[],interp_filters_trans=[]):\n\t\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\tnparams_fsps = len(params_fsps)\n\tfor pp in range(0,nparams_fsps):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\tsp.params['imf_type'] = imf_type\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass \n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type==0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type==1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\t# get central wavelength of all filters:\n\tphoto_cwave = cwave_filters(filters)\n\n\tspec_SED = {}\n\tspec_SED['spec_wave'] = redsh_wave\n\tspec_SED['spec_flux'] = redsh_spec\n\n\tphoto_SED = {}\n\tphoto_SED['photo_wave'] = photo_cwave\n\tphoto_SED['photo_flux'] = photo_SED_flux\n\n\treturn spec_SED,photo_SED", "title": "" }, { "docid": "f44bd1efff615e08050ac4fd0cd496e4", "score": "0.51221913", "text": "def visualize_svd(A):\n #CREATE CIRCLE and BASIS VECTORS\n circle = np.array([[np.cos(theta) for theta in np.linspace(0,2*np.pi,200)],[np.sin(theta) for theta in np.linspace(0,2*np.pi,200)]])\n E = np.array([[1,0,0],[0,0,1]])\n\n #CALC SVD\n U,sigma,V_H = la.svd(A)\n sigma = np.diag(sigma)\n\n #CREATE PLOTS\n plt.suptitle(\"Plots of Circle (S) Transformations by SVD of A\", fontsize = 18)\n plt.subplot(221)\n plt.title(\"S\")\n plt.plot(circle[0],circle[1])\n plt.plot(E[0],E[1])\n plt.subplot(222)\n plt.title(\"V.H @ S\")\n coords = V_H@circle\n Ecoords = V_H@E\n plt.plot(coords[0],coords[1])\n plt.plot(Ecoords[0],Ecoords[1])\n plt.subplot(223)\n plt.title(\"ฮฃ @ V.H @ S\")\n plt.ylim(-1,1)\n coords = sigma@V_H@circle\n Ecoords = sigma@V_H@E\n plt.plot(coords[0],coords[1])\n plt.plot(Ecoords[0],Ecoords[1])\n plt.subplot(224)\n coords = U@sigma@V_H@circle\n Ecoords = U@sigma@V_H@E\n plt.title(\"U @ ฮฃ @ V.H @ S\")\n plt.plot(coords[0],coords[1])\n plt.plot(Ecoords[0],Ecoords[1])\n plt.show()", "title": "" }, { "docid": "36e07d071ea24f7aa3257dcb7c5792b3", "score": "0.51154786", "text": "def main():\n\n\tpathToResults = \"/tmp/results/\"\n\tname = \"_SS_pyNN_\"\n\tnorm = '1'\n\teesAmplitude = '235'\n\teesFrequency = '40'\n\tdelay = 2\n\tweights_1 = np.array([0.091])\n\tweights_2 = np.linspace(0.03,0.1,20)\n\tweights_3 = np.array([0.046])\n\tweights_4 = np.linspace(-.001,-.010,20)\n\tweights_5 = np.array([-0.002])\n\n\tfig, ax = plt.subplots(weights_2.size, weights_4.size,figsize=(22,12),sharex='col',sharey='col')\n\tfor w1 in weights_1:\n\t\tfor i2,w2 in enumerate(weights_2):\n\t\t\tfor w3 in weights_3:\n\t\t\t\tfor i4,w4 in enumerate(weights_4):\n\t\t\t\t\tfor w5 in weights_5:\n\t\t\t\t\t\tpattern = \"*%suA_%sHz_Delay_%dms%s_w1_%f_w2_%f_w3_%f_w4_%f_w5_%f.p\" % (eesAmplitude,eesFrequency,delay,name,w1,w2,w3,w4,w5)\n\t\t\t\t\t\tresultFile = gt.find(pattern,pathToResults)\n\t\t\t\t\t\tif not resultFile: continue\n\t\t\t\t\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\t\t\t\t\t_ = pickle.load(pickle_file)\n\t\t\t\t\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t\t\t\t\tax[i2,i4].plot(meanFr['TA']['IaInt'],'r')\n\t\t\t\t\t\tax[i2,i4].plot(meanFr['GM']['IaInt'],'b')\n\t\t\t\t\t\tax[i2,i4].xaxis.set_ticklabels([])\n\t\t\t\t\t\tax[i2,i4].yaxis.set_ticklabels([])\n\t\t\t\t\t\tax[i2,i4].xaxis.set_ticks([])\n\t\t\t\t\t\tax[i2,i4].yaxis.set_ticks([])\n\t\t\t\t\t\tif i4==0:ax[i2,i4].set_ylabel(\"w2\\n%.4f\"%(w2),fontsize=6)\n\t\t\t\t\t\tif i2==weights_2.size-1:ax[i2,i4].set_xlabel(\"w4\\n%.4f\"%(w4),fontsize=6)\n\tplt.show()", "title": "" }, { "docid": "985b42e64861e356e486fb7d6a26937b", "score": "0.51133215", "text": "def create_tsne(image_data, model, labels, save_prefix):\n # Encode the data and fit the TSNE model.\n mu, log_var = model.encode(image_data)\n embedded_data = TSNE(n_components=2).fit_transform(mu.detach().cpu().numpy())\n\n plt.scatter(embedded_data[:, 0], embedded_data[:, 1], c=labels)\n plt.savefig(\"{}_tsne_plot.png\".format(save_prefix))", "title": "" }, { "docid": "029e28e787399e6df8085b1b52f9c319", "score": "0.5111854", "text": "def main(pdf, data, eddy):\n #================================================\n # Prepare figure\n plt.figure(figsize=(8.27,11.69)) # Standard portrait A4 sizes\n plt.suptitle('Subject ' + data['subj_id'],fontsize=10, fontweight='bold')\n\n # Compute average b=0 volume\n # fslpy.select_dwi_vols(data=data['subj_id'], bvals=data['bvals_id'], output=data['qc_path'] + \"/avg_b0\", b=5, m=True)\n vol = nib.Nifti1Image(np.mean(data['eddy_epi'].get_data()[..., data['bvals']==0], axis=3), data['eddy_epi'].get_affine(), data['eddy_epi'].header)\n nib.save(vol, data['qc_path'] + \"/avg_b0.nii.gz\")\n vol = vol.get_data()\n # Maximum intensity definition\n i_max = np.round(np.mean(vol[:,:,:][data['mask'] != 0.0])+3*np.std(vol[:,:,:][data['mask'] != 0.0]))\n fslpy.slicer(data['qc_path'] + \"/avg_b0\", a=data['qc_path'] + \"/avg_b0.png\", i=(0, i_max))\n img = mpimg.imread(data['qc_path'] + \"/avg_b0.png\")\n ax = plt.subplot2grid((1+data['unique_bvals'].size, 1), (0, 0))\n im = ax.imshow(img, interpolation='none', cmap=\"gray\", vmin = 0, vmax=i_max)\n plt.colorbar(im, ax = ax)\n ax.grid(False)\n ax.axis('off')\n ax.set_title(\"Average DW signal (b=0)\")\n data['eddy_epi'].uncache()\n del vol\n \n count = 1\n for b in data['unique_bvals']:\n # Compute average b=x volume\n # fslpy.select_dwi_vols(data=data['subj_id'], bvals=data['bvals_id'], output=data['qc_path'] + \"/avg_b\" + str(b), b=b, m=True)\n vol = nib.Nifti1Image(np.mean(data['eddy_epi'].get_data()[..., data['bvals']==b], axis=3), data['eddy_epi'].get_affine(), data['eddy_epi'].header)\n nib.save(vol, data['qc_path'] + \"/avg_b\" + str(b) + \".nii.gz\")\n vol = vol.get_data()\n i_max = np.round(np.mean(vol[:,:,:][data['mask'] != 0.0])+3*np.std(vol[:,:,:][data['mask'] != 0.0]))\n fslpy.slicer(data['qc_path'] + \"/avg_b\" + str(b), a=data['qc_path'] + \"/avg_b\" + str(b) + \".png\", i=(0, i_max))\n img=mpimg.imread(data['qc_path'] + \"/avg_b\" + str(b) + \".png\")\n ax = plt.subplot2grid((1+data['unique_bvals'].size, 1), (count, 0))\n im = ax.imshow(img, interpolation='none', cmap=\"gray\", vmin = 0, vmax=i_max)\n plt.colorbar(im, ax = ax)\n ax.grid(False)\n ax.axis('off')\n ax.set_title(\"Average DW signal (b=\" + str(b) + \")\")\n count+=1\n data['eddy_epi'].uncache()\n del vol\n \n # Clear temporary volume files\n for f in glob.glob(data['qc_path'] + \"/*.nii.gz\"):\n os.remove(f)\n\n # Format figure, save and close it\n plt.tight_layout(h_pad=1, pad=4)\n plt.savefig(pdf, format='pdf')\n plt.close()", "title": "" }, { "docid": "0870f409089615d01defb00e015737fa", "score": "0.5111416", "text": "def draw_tram_SE(self, tile, line_mode):\n\n self.draw_tram_NSEW(tile, 0, line_mode=line_mode, round_cap=False)", "title": "" }, { "docid": "1db9b9b8e98de441aed02b6567593717", "score": "0.51081115", "text": "def finalFigure(ds_pristine, ds, senses, channel,\n fig=None, nsx=1, nsy=2, serp=1, ssens=2):\n SR = ds_pristine.samplingrate\n # data is already trials, this would correspond sec before onset\n pre_onset = -(int(ds_pristine.t0*100)/100.0) # round to 2 digits\n pre = 0.05\n # number of channels, samples per trial\n nchannels, spt = ds_pristine.mapper.mask.shape\n # compute seconds in trials after onset\n #post = post_duration\n post = 0.41 #post_duration\n\n # index of the channel of interest\n ch_of_interest = ds_pristine.channelids.index(channel)\n\n # error type to use in all plots\n errtype=['std', 'ci95']\n\n if fig is None:\n fig = P.figure(facecolor='white', figsize=(12, 6))\n\n # plot ERPs\n ax = fig.add_subplot(nsy, nsx, serp, frame_on=False)\n\n plots = []\n colors = ('r', 'b', '0')\n responses = [ ds_pristine['labels', i].O[:, ch_of_interest, :] * 1e15\n for i in [0, 1] ]\n\n # TODO: move inside dataset\n labels_map_rev = dict([reversed(x) for x in ds.labels_map.iteritems()])\n\n for l in ds_pristine.UL:\n plots.append({'label': labels_map_rev[l].tostring(),\n 'data' : responses[l], 'color': colors[l]})\n\n plots.append({'label': 'dwave',\n 'data': N.array(responses[0].mean(axis=0) - responses[1].mean(axis=0),\n ndmin=2),\n 'color': colors[2],\n 'pre_mean': 0})\n\n plotERPs( plots,\n pre=pre, pre_onset=pre_onset,\n pre_mean=pre, post=post, SR=SR, ax=ax, errtype=errtype,\n ylim=(-500, 300), ylabel='fT', ylformat='%.1f',\n xlabel=None,\n #xlabel='Time(s)',\n legend=True)\n\n P.title(channel)\n # plot sensitivities\n ax = fig.add_subplot(nsy, nsx, ssens, frame_on=False)\n\n sens_labels = []\n erp_cfgs = []\n colors = ['red', 'green', 'blue', 'cyan', 'magenta']\n\n for i, (sens_id, sens) in enumerate(senses[::-1]):\n sens_labels.append(sens_id)\n # back-project\n backproj = ds.mapReverse(sens)\n\n # and normalize so that all non-zero weights sum up to 1\n # ATTN: need to norm sensitivities for each fold on their own --\n # who knows what's happening otherwise\n for f in xrange(backproj.shape[0]):\n backproj[f] = L2Normed(backproj[f])\n\n # take one channel: yields (nfolds x ntimepoints)\n ch_sens = backproj[:, ch_of_interest, :]\n\n # sign of sensitivities is more or less arbitrary, but when flipped\n # to have to big bump in the middle on the positive side, they all\n # really look like the diff wave -- maybe need some better\n # justification ;-)\n if ch_sens.mean() < 0:\n ch_sens *= -1\n\n # charge ERP definition\n erp_cfgs.append(\n {'label': sens_id,\n 'color': colors[i],\n 'data' : ch_sens})\n\n # just ci95 error here, due to the low number of folds not much different\n # from std; also do _not_ demean based on initial baseline as we want the\n # untransformed sensitivities\n plotERPs(erp_cfgs, pre=pre, pre_onset=pre_onset,\n post=post, SR=SR, ax=ax, errtype='ci95',\n ylim=(-0.05, 0.3),\n ylabel=None, xlabel=None, ylformat='%.2f', pre_mean=0)\n\n P.legend(sens_labels)\n\n\n return fig", "title": "" }, { "docid": "c98b4901f80983fddc05b3c9ad1c601d", "score": "0.5107055", "text": "def plot_training_sine(model, data, tps, n_plot):\n fig, axes = plt.subplots(1, n_plot, figsize=(6 * n_plot, 5))\n\n ind = np.random.randint(0, len(data), n_plot)\n\n if isinstance(data, torch.Tensor):\n data = asnp(data)\n data = data[ind]\n\n if isinstance(tps, torch.Tensor):\n tps = asnp(tps)\n tps = tps[ind]\n\n for i in range(n_plot):\n d = data[i][np.newaxis, :, :]\n visualize_trajectory(d, tps[i], model, ax=axes[i])\n plt.show()", "title": "" }, { "docid": "b3197a3c2b349d88bf09c06ae1be7809", "score": "0.50939983", "text": "def _stats_(args):\n x = pd.read_csv(\"config/flare.stats.m.csv\")\n x.dn = [dt.datetime.strptime(t,\"%Y.%m.%d.%H.%M\") for t in x.dn]\n if args.prog == \"plot\":\n matplotlib.rcParams[\"xtick.labelsize\"] = 12\n matplotlib.rcParams[\"ytick.labelsize\"] = 12\n matplotlib.rcParams[\"mathtext.default\"] = \"default\"\n font = {\"family\": \"serif\", \"color\": \"black\", \"weight\": \"normal\", \"size\": 12}\n fonttext = {\"family\": \"serif\", \"color\": \"blue\", \"weight\": \"normal\", \"size\": 10}\n fig1, axes1 = plt.subplots(figsize=(8, 8), nrows=4, ncols=4, dpi=150, sharey=\"row\", sharex=\"col\")\n fig2, axes2 = plt.subplots(figsize=(6, 6), nrows=2, ncols=2, dpi=130, sharey=\"all\", sharex=\"all\")\n fig3 = plt.figure(figsize=(12,6))\n edist = {}\n txt = [r\"\\beta_{ah}(\\nu_{sn})\", r\"\\beta_{ah}(\\nu^{cc}_{sn})\",\n r\"\\beta_{ah}(\\nu^{mb}_{sn})\", r\"\\beta_{sw}(\\nu_{me})\"]\n times = [0.7,0.55,0.85,1.0]\n colors = [\"r\",\"g\",\"b\",\"k\"]\n for j, nm in enumerate([\"sn\",\"cc\",\"mb\",\"me\"]):\n df = []\n name = \"mRMSE_\"+nm\n dat,prd = [], []\n for i, row in x.iterrows():\n stn = row[\"rio\"]\n f = \"data/sim/archive/{dn}/skills.{rio}.nc\".format(dn=row[\"dn\"].strftime(\"%Y.%m.%d.%H.%M\"), rio=stn)\n d = xarray.open_dataset(f)\n d.attrs.update({\"acc\": 1-(d.attrs[name]/d.attrs[\"mRMSE_dr\"]), \n name: (d.attrs[name]), \"sza\": np.median(d[\"sza\"].values), \n \"local_time\": np.median(d[\"local_time\"].values), \"mlt\": np.mean(d[\"mlt\"].values)})\n df.append(d.attrs)\n dat.extend(d[\"dat\"].values.tolist())\n prd.extend(d[\"m_\"+nm].values.tolist())\n df = pd.DataFrame.from_records(df)\n df = df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]\n edist[nm] = df.acc.tolist()\n \n u = pd.DataFrame()\n u[\"dat\"], u[\"prd\"] = dat, prd\n u = u.dropna()\n prd = []\n u.prd = [dx + times[j]*(d-dx) for d, dx in zip(u.prd,u.dat)]\n fonttext[\"color\"] = colors[j]\n ax = axes2[int(j/2),np.mod(j,2)]\n ax.plot(u.dat, u.prd, color=\"gray\", linestyle=\"None\", marker=\"o\", alpha=0.5, markersize=0.75)\n ax.plot([0,3],[0,3], \"k--\",alpha=0.5, linewidth=1.25)\n ax.set_xlim(0,3)\n ax.set_ylim(0,3)\n ax.text(0.2,0.9,\"$\"+txt[j]+\"$\\n\"+r\"$\\rho=%.2f$\"%np.corrcoef(u.dat,u.prd)[0,1], \n horizontalalignment=\"center\", verticalalignment=\"center\",\n transform=ax.transAxes, fontdict=fonttext)\n\n ax = fig3.add_subplot(241+j, polar=True)\n R, T, Z, theta = parse_2D_data(df, \"sza\", \"local_time\", \"acc\")\n ax.pcolormesh(T, R, Z.T, shading=\"gouraud\", vmin=-.1, vmax=1)\n ax.set_rlim(20,90)\n ax.set_xticklabels([\"0\", \"\", \"12\", \"\", \"18\", \"\", \"24\"])\n ax.grid(True)\n ax = fig3.add_subplot(245+j, polar=True)\n R, T, Z, theta = parse_2D_data(df, \"mlat\", \"mlt\", \"acc\")\n im = ax.pcolormesh(T, R, Z.T, shading=\"gouraud\", vmin=-.1, vmax=1)\n ax.set_rlim(40,80)\n ax.set_xticklabels([\"0\", \"\", \"12\", \"\", \"18\", \"\", \"24\"])\n ax.grid(True)\n\n fonttext[\"color\"] = \"k\"\n ax = axes1[j, 0]\n r, a = analysis(ax, df, nx=\"sza\", ny=\"acc\", formula=\"acc ~ sza\", wd=5)\n ax = axes1[j, 1]\n r, a = analysis(ax, df, nx=\"local_time\", ny=\"acc\", formula=\"acc ~ local_time\", wd=10, nyagg=np.median)\n ax = axes1[j, 2]\n r, a = analysis(ax, df, nx=\"mlt\", ny=\"acc\", formula=\"acc ~ mlt\", wd=20, nyagg=\"median\")\n ax = axes1[j, 3]\n r, a = analysis(ax, df, nx=\"mlat\", ny=\"acc\", formula=\"acc ~ mlat\", wd=10, nyagg=\"median\")\n ax.text(1.07,0.5, r\"$FS[%s]$\"%txt[j], horizontalalignment=\"center\", verticalalignment=\"center\", \n transform=ax.transAxes, fontdict=fonttext, rotation=90)\n\n fig1.text(0.01, 0.4, r\"$FS = 1-\\frac{RMSE_{model}}{RMSE_{DRAP}}$\", fontdict=font, rotation=90)\n axes1[3,0].set_xlabel(r\"SZA, $\\chi(^o)$\", fontdict=font)\n axes1[3,1].set_xlabel(r\"LT, Hours\", fontdict=font)\n axes1[3,2].set_xlabel(r\"MLT, Hours\", fontdict=font)\n axes1[3,3].set_xlabel(r\"MLAT, $Deg(^o)$\", fontdict=font)\n axes1[0,0].set_ylim(0,1)\n axes1[1,0].set_ylim(0,1)\n axes1[2,0].set_ylim(0,0.5)\n axes1[3,0].set_ylim(-1,0.5)\n fig1.savefig(\"_images_/stats.png\", bbox_inches=\"tight\")\n\n axes2[1,0].set_xlabel(r\"$\\beta$, dB\", fontdict=font) \n axes2[1,0].set_ylabel(r\"$\\hat{\\beta}$, dB\", fontdict=font) \n fig2.savefig(\"_images_/pred.png\", bbox_inches=\"tight\")\n\n cbar = fig3.colorbar(im, ax=np.array(fig3.get_axes()).ravel().tolist(), shrink=0.5)\n cbar.set_ticks(np.linspace(-.1,1,11))\n #cbar.set_ticklabels([\"poor\", \"no-skill\", \"high\"])\n fig3.subplots_adjust(hspace=0.5, wspace=0.5)\n fig3.savefig(\"_images_/st.png\", bbox_inches=\"tight\")\n \n from scipy import stats\n print(stats.ttest_rel(edist[\"mb\"], edist[\"sn\"]))\n else:\n xref = pd.read_csv(\"config/flares.csv\", parse_dates=[\"dn\", \"start\", \"end\"])\n for i, row in x.iterrows():\n ref = xref[xref.dn==row[\"dn\"]]\n stn = row[\"rio\"]\n f = \"data/sim/archive/{dn}/flare.{rio}.nc.gz\".format(dn=row[\"dn\"].strftime(\"%Y.%m.%d.%H.%M\"), rio=stn)\n os.system(\"gzip -d \" + f)\n _x_ = Dataset(f.replace(\".gz\", \"\"))\n os.system(\"gzip \" + f.replace(\".gz\", \"\"))\n times = num2date(_x_.variables[\"time\"][:], _x_.variables[\"time\"].units, _x_.variables[\"time\"].calendar,\n only_use_cftime_datetimes=False)\n times = np.array([x._to_real_datetime() for x in times]).astype(\"datetime64[ns]\")\n times = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in times]\n alts = _x_.variables[\"alts\"][:]\n o = {\n \"sn\": utils.int_absorption(_x_.variables[\"abs.ah.sn.o\"][:], alts, extpoint=68),\n \"cc\": utils.int_absorption(_x_.variables[\"abs.ah.av.cc.o\"][:], alts, extpoint=64),\n \"mb\": utils.int_absorption(_x_.variables[\"abs.ah.av.mb.o\"][:], alts, extpoint=64),\n \"me\": utils.int_absorption(_x_.variables[\"abs.sw.ft.o\"][:], alts, extpoint=64),\n \"dr\": _x_.variables[\"drap\"][:],\n }\n pf = utils.Performance(stn=stn, ev=row[\"dn\"], times=times, model=o, start=ref[\"start\"].tolist()[0], \n end=ref[\"end\"].tolist()[0], bar=row[\"bar\"], alt=row[\"alt\"])\n fname = f.replace(\"flare\",\"skills\")\n pf._skill_()._params_()._to_netcdf_(fname.replace(\".gz\",\"\"))\n return", "title": "" }, { "docid": "ebe38d574d3d22eda90e7c3eee974b30", "score": "0.50840276", "text": "def plotB(s,j,accuracy,std):\n from matplotlib import pyplot as plt\n plt.figure(2)\n fig2 = plt.figure(2)\n fig2.canvas.set_window_title('XOR_4bit')\n plt.subplot(2,2,j)\n plt.ylabel('accuracy')\n plt.xlabel('nb_hidden_units')\n xlabel = [2,3,4,5]\n plt.plot(xlabel, accuracy, 'ro')\n plt.scatter(xlabel, accuracy)\n plt.ylim((0,1.25))\n plt.tight_layout()\n plt.title(s)\n# annotate the graph with loss values\n for i, txt in enumerate(std):\n plt.annotate(txt,(xlabel[i],accuracy[i]))\n fig2.show()", "title": "" }, { "docid": "359fcbb7856d7696bc409ecc0a1056b1", "score": "0.50823265", "text": "def MMSE_STSA(args):\n PATH_ROOT = os.getcwd() \n path_clean_test = os.path.join(PATH_ROOT , args.input_clean)\n path_noisy_test = os.path.join(PATH_ROOT , args.input_noisy)\n output_path_estimated_noisy_test = os.path.join(PATH_ROOT , args.output_file)\n \n (sr, clean_test) = wav.read(path_clean_test)\n (sr, noisy_test) = wav.read(path_noisy_test)\n\n maxPosteriorSNR= 100 \n minPosteriorSNR= 1\n \n #NFFT=256 \n #hop_length_sample = 128 \n #winfunc = 'hamming' \n NFFT=args.num_FFT\n hop_length_sample = args.hop_size\n winfunc = args.window\n \n smoothFactorDD=0.99\n\n # the variance of the speech; lambda_x(k)\n #noisy\n stft_noisy_test = librosa.stft(noisy_test, n_fft=NFFT, hop_length=hop_length_sample, window=winfunc) \n magnitude_noisy_test, phase_noisy_test = divide_magphase(stft_noisy_test, power=1)\n \n pSpectrum = magnitude_noisy_test**2 \n \n # estimate the variance of the noise using minimum statistics noise PSD estimation ; lambda_d(k). \n estNoise = estnoisem(pSpectrum,hop_length_sample/sr) \n estNoise = estNoise\n \n aPosterioriSNR=pSpectrum/estNoise \n aPosterioriSNR=aPosterioriSNR\n aPosterioriSNR[aPosterioriSNR > maxPosteriorSNR] = maxPosteriorSNR\n aPosterioriSNR[aPosterioriSNR < minPosteriorSNR] = minPosteriorSNR\n\n previousGainedaPosSNR=1 \n (nFrames,nFFT2) = pSpectrum.shape \n totalGain =[]\n for i in range(nFFT2): \n aPosterioriSNR_frame = aPosterioriSNR[:,i] \n \n #operator [2](52)\n oper=aPosterioriSNR_frame-1\n oper[oper < 0] = 0 \n smoothed_a_priori_SNR = smoothFactorDD * previousGainedaPosSNR + (1-smoothFactorDD) * oper\n \n #V for MMSE estimate ([2](8)) \n V=smoothed_a_priori_SNR*aPosterioriSNR_frame/(1+smoothed_a_priori_SNR) \n \n #Calculate Gain function which results from the MMSE [2](7),(12).\n gain= smoothed_a_priori_SNR/(1+smoothed_a_priori_SNR) \n if any(V<1):\n gain[V<1] = (math.gamma(1.5) * np.sqrt(V[V<1])) / aPosterioriSNR_frame[V<1] * np.exp(-1 * V[V<1] / 2) * ((1 + V[V<1]) * bessel(0, V[V<1] / 2) + V[V<1] * bessel(1, V[V<1] / 2))\n \n previousGainedaPosSNR = (gain**2) * aPosterioriSNR_frame\n totalGain.append(gain)\n \n totalGain=np.array(totalGain)\n\n magnitude_estimated_clean = totalGain.T * magnitude_noisy_test\n stft_reconstructed_clean = merge_magphase(magnitude_estimated_clean, phase_noisy_test)\n signal_reconstructed_clean =librosa.istft(stft_reconstructed_clean, hop_length=hop_length_sample, window=winfunc)\n signal_reconstructed_clean=signal_reconstructed_clean.astype('int16')\n \n wav.write(output_path_estimated_noisy_test,sr,signal_reconstructed_clean)\n \n #display signals, spectrograms\n show_signal(clean_test,noisy_test,signal_reconstructed_clean,sr)\n show_spectrogram(clean_test,noisy_test, signal_reconstructed_clean,sr,NFFT,hop_length_sample)", "title": "" }, { "docid": "c714e452089b52556c59c144abd2c048", "score": "0.5080722", "text": "def main():\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n # initialize variables from command line + defaults\n FIG = {} # plot dictionary\n FIG['demag'] = 1 # demag is figure 1\n in_file = pmag.get_named_arg_from_sys(\"-f\", default_val=\"measurements.txt\")\n plot_by = pmag.get_named_arg_from_sys(\"-obj\", default_val=\"loc\")\n name_dict = {'loc': 'location', 'sit': 'site',\n 'sam': 'sample', 'spc': 'specimen'}\n plot_key = name_dict[plot_by]\n LT = \"LT-\" + pmag.get_named_arg_from_sys(\"-LT\", \"AF\") + \"-Z\"\n if LT == \"LT-T-Z\":\n units, dmag_key = 'K', 'treat_temp'\n elif LT == \"LT-AF-Z\":\n units, dmag_key = 'T', 'treat_ac_field'\n elif LT == 'LT-M-Z':\n units, dmag_key = 'J', 'treat_mw_energy'\n else:\n units = 'U'\n no_norm = pmag.get_flag_arg_from_sys(\"-N\")\n norm = 0 if no_norm else 1\n no_plot = pmag.get_flag_arg_from_sys(\"-sav\")\n plot = 0 if no_plot else 1\n fmt = pmag.get_named_arg_from_sys(\"-fmt\", \"svg\")\n XLP = pmag.get_named_arg_from_sys(\"-XLP\", \"\")\n dir_path = pmag.get_named_arg_from_sys(\"-WD\", os.getcwd())\n spec_file = pmag.get_named_arg_from_sys(\"-fsp\", default_val=\"specimens.txt\")\n samp_file = pmag.get_named_arg_from_sys(\"-fsa\", default_val=\"samples.txt\")\n site_file = pmag.get_named_arg_from_sys(\"-fsi\", default_val=\"sites.txt\")\n\n # create contribution and add required headers\n fnames = {\"specimens\": spec_file, \"samples\": samp_file, 'sites': site_file}\n contribution = nb.Contribution(dir_path, single_file=in_file,\n custom_filenames=fnames)\n file_type = list(contribution.tables.keys())[0]\n print(len(contribution.tables['measurements'].df), ' records read from ', in_file)\n # add plot_key into measurements table\n if plot_key not in contribution.tables['measurements'].df.columns:\n contribution.propagate_name_down(plot_key, 'measurements')\n data_container = contribution.tables[file_type]\n # pare down to only records with useful data\n # grab records that have the requested code\n data_slice = data_container.get_records_for_code(LT)\n # and don't have the offending code\n data = data_container.get_records_for_code(XLP, incl=False, use_slice=True,\n sli=data_slice, strict_match=False)\n\n # make sure quality is in the dataframe\n if 'quality' not in data.columns:\n data['quality'] = 'g'\n # get intensity key and make sure intensity data is not blank\n intlist = ['magn_moment', 'magn_volume', 'magn_mass']\n IntMeths = [col_name for col_name in data.columns if col_name in intlist]\n # get rid of any entirely blank intensity columns\n for col_name in IntMeths:\n if not data[col_name].any():\n data.drop(col_name, axis=1, inplace=True)\n IntMeths = [col_name for col_name in data.columns if col_name in intlist]\n if len(IntMeths) == 0:\n print('No intensity headers found')\n sys.exit()\n\n int_key = IntMeths[0] # plot first intensity method found - normalized to initial value anyway - doesn't matter which used\n data = data[data[int_key].notnull()]\n # make list of individual plots\n # by default, will be by location_name\n plotlist = data[plot_key].unique()\n plotlist.sort()\n pmagplotlib.plot_init(FIG['demag'], 5, 5)\n # iterate through and plot the data\n for plt in plotlist:\n plot_data = data[data[plot_key] == plt].copy()\n if plot:\n print(plt, 'plotting by: ', plot_key)\n if len(plot_data) > 2:\n title = plt\n spcs = []\n spcs = plot_data['specimen'].unique()\n for spc in spcs:\n INTblock = []\n spec_data = plot_data[plot_data['specimen'] == spc]\n for ind, rec in spec_data.iterrows():\n INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']])\n if len(INTblock) > 2:\n pmagplotlib.plotMT(FIG['demag'], INTblock,\n title, 0, units, norm)\n\n if not plot:\n files = {}\n for key in list(FIG.keys()):\n files[key] = title + '_' + LT + '.' + fmt\n pmagplotlib.saveP(FIG, files)\n #sys.exit()\n else:\n pmagplotlib.drawFIGS(FIG)\n prompt = \" S[a]ve to save plot, [q]uit, Return to continue: \"\n ans = input(prompt)\n if ans == 'q':\n sys.exit()\n if ans == \"a\":\n files = {}\n for key in list(FIG.keys()):\n files[key] = title + '_' + LT + '.' + fmt\n pmagplotlib.saveP(FIG, files)\n pmagplotlib.clearFIG(FIG['demag'])", "title": "" }, { "docid": "a44adc2b8fc3f03111bfbbc7c3c73c44", "score": "0.5079008", "text": "def visualize(self):", "title": "" }, { "docid": "7f37ebdf34057481eaada32313e7ccf7", "score": "0.5078298", "text": "def fig_test_nhi():\n from sklearn.linear_model import Ridge\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n\n outfile = 'fig_test_nhi.pdf'\n # Load ML\n ml_abs = pred_to_tbl('../Vetting/data/test_dlas_5k96451_predictions.json.gz')\n # Load Test\n test_dlas = test_to_tbl('../Vetting/data/test_dlas_5k96451.json.gz')\n # Load vette\n vette_5k = ltu.loadjson('../Vetting/vette_5k.json')\n\n # Scatter plot of NHI\n test_ml_idx = np.array(vette_5k['test_idx'])\n any_abs = test_ml_idx != -99999\n #dz = ml_abs['zabs'][test_ml_idx[match]] - test_dlas['zabs'][match]\n abs_idx = np.abs(test_ml_idx)\n\n # Grab columns\n pred_NHI = ml_abs['NHI'][abs_idx[any_abs]] - ml_abs['biasNHI'][abs_idx[any_abs]]\n true_NHI = test_dlas['NHI'][any_abs]\n\n\n # Ridge regression & plot\n # p = np.polyfit(x,y,degree)\n degree, alpha = 3, 1\n model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=alpha))\n model.fit(pred_NHI.reshape(-1,1), true_NHI)\n rval = np.linspace(20.0, 22.25, 1000)\n r_pred = model.predict(rval.reshape(-1, 1))\n # plt.plot(r, r_pred, linewidth=2, color='green')\n #plt.plot(r, np.polyval(p, r), linewidth=2, color='green')\n\n # Get polynomial from the model\n p = np.flipud(model.get_params()['ridge'].coef_)\n p[-1] += model.get_params()['ridge'].intercept_\n np.set_printoptions(precision=52)\n print(p)\n print(np.polyval(p, 21.0))\n\n # Start the plot\n fig = plt.figure(figsize=(5, 5))\n plt.clf()\n gs = gridspec.GridSpec(1,1)\n #xlim = (3820., 4750)\n #ylim = (-2., 18.)\n\n ax = plt.subplot(gs[0])\n ax.scatter(pred_NHI, true_NHI, s=0.1)\n\n # One-to-one line\n ax.plot(rval, rval, ':', color='gray')\n\n # Fit\n ax.plot(rval, r_pred, 'b--')\n\n ax.set_xlabel(r'Predicted $\\log \\, N_{\\rm HI}$ (Uncorrected)')\n ax.set_ylabel(r'True $\\log \\, N_{\\rm HI}$')\n ax.yaxis.set_major_locator(plt.MultipleLocator(0.5))\n #ax.set_xlim(0.6, 200)\n\n\n set_fontsize(ax, 15.)\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.1, w_pad=0.2)\n plt.savefig(outfile)\n plt.close()\n print(\"Wrote {:s}\".format(outfile))", "title": "" }, { "docid": "18aceef6eaf3d8d0778b55072a9b70d6", "score": "0.5075967", "text": "def nn_overlays(ws, save=False):\r\n\r\n # Setup the figure\r\n fig, ax = plt.subplots(dpi=dpi, figsize=(inches, inches))\r\n best = ws.summary['Simulation'].loc[ws.summary['RMSE'] == np.nanmin(ws.summary['RMSE'])].values[0]\r\n\r\n # Add a grid and a line for MHW\r\n ax.grid(color='lightgrey', linewidth=0.5, zorder=0)\r\n ax.axhline(y=0.34, color='darkblue', linewidth=2, linestyle='--', zorder=2, label='MHW')\r\n\r\n # Add a line for the fence if necessary\r\n if ws.adjusts['Fence'] is None:\r\n pass\r\n else:\r\n ax.axvline(x=ws.adjusts['Fence'], color='sienna', linewidth=1, zorder=4, label='Fence')\r\n\r\n # Plot the initial and final field profiles\r\n ax.plot(ws.profiles['X'], ws.profiles['Field Init'], color='black', linewidth=2, linestyle='--', zorder=4, label='Field$_{0}$')\r\n ax.plot(ws.profiles['X'], ws.profiles['Field Final'], color='black', linewidth=2, linestyle='-', zorder=30, label='Field$_{f}$')\r\n\r\n # Plot the initial model profile and best model profile\r\n ax.plot(ws.profiles['X'], ws.profiles['Model Init'], color='red', linewidth=2, linestyle='--', zorder=4, label='Model$_{0}$')\r\n ax.plot(ws.profiles['X'], ws.profiles[f'Run {best}'], color='red', linewidth=2, linestyle='-', zorder=40, label='Model$_{f}$')\r\n\r\n # Plot all the other model results\r\n for ii in range(1, ws.runs+1):\r\n ax.plot(ws.profiles['X'], ws.profiles[f'Run {ii}'],\r\n color='darkgray', linewidth=1, linestyle='-', zorder=6 + ii)\r\n\r\n # Add a legend\r\n ax.legend(loc='upper right', fancybox=False, edgecolor='black')\r\n\r\n # Set the X-Axis\r\n ax.set_xlim(left=0, right=ws.adjusts['Right'])\r\n ax.set_xlabel('Cross-Shore Distance (m)', **font)\r\n\r\n # Set the Y-Axis\r\n ax.set_ylim(bottom=-1, top=ws.adjusts['Top'])\r\n ax.set_ylabel('Elevation (m NAVD88)', **font)\r\n\r\n # Save and close the figure\r\n title = f'BGB{ws.p} Neural Network Final Profiles'\r\n save_and_close(fig, title, save)", "title": "" }, { "docid": "3898eb51c5d54f059a48f78710f99977", "score": "0.5064971", "text": "def vis_segmentation(image, seg_map,path):\n\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n \n plt.imshow(seg_image)\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color.', seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.4)\n# seg_image=Image.open('./result/'+path.split('/')[-1][:-4]+'_color.png').convert(\"RGB\")\n seg_image=Image.fromarray(seg_image) \n img_mix = np.asarray(Image.blend(image, seg_image, 0.4))\n plt.imsave('./result/'+path.split('/')[-1][:-4]+'_color_image.', img_mix)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()", "title": "" }, { "docid": "2847f9be9f65a79df445f89ce54326ae", "score": "0.50518787", "text": "def simple_plot_script(model, **options):\n defaults = {\"noise\": None,\n \"s\": None,\n \"index\": None,\n \"fit_stats\": None}\n plot_options = {}\n for key, value in defaults.iteritems():\n plot_options[key] = value\n for key, value in options.iteritems():\n plot_options[key] = value\n if plot_options['index']:\n index = plot_options['index']\n else:\n index = 0\n if plot_options['fit_stats']:\n fit_stats = plot_options['fit_stats']\n else:\n fit_stats = False\n if plot_options['noise']:\n noise = plot_options['noise']\n else:\n if index == 0 or index == 3:\n noise = 1E-5 if type(model) == ShortModel else 1E-7\n else:\n noise = 5E-6 if index % 3 == 0 else 5E-7\n if plot_options['s']:\n sim_s = plot_options['s']\n else:\n sim_s = model.s_parameters()[index] + noise*np.random.randn(len(model.f))\n\n p = lmfit.Parameters()\n\n if type(model) == ShortModel and not model.complex:\n p.add_many(('l', model.l), ('z0', model.z0))\n # Calc S11\n if index == 0:\n def residual(param):\n v = param.valuesdict()\n return (v['z0'] - 2*math.pi*model.f*v['l']) / (v['z0'] + 2*math.pi*model.f*v['l']) - sim_s\n # Calc S22\n elif index == 3:\n def residual(param):\n v = param.valuesdict()\n return (2*math.pi*model.f*v['l'] - v['z0']) / (v['z0'] + 2*math.pi*model.f*v['l']) - sim_s\n # Calc S12/21\n else:\n p.add('s', 0.98)\n def residual(param):\n v = param.valuesdict()\n return (v['z0']/(2*np.pi*model.f*v['l']))**(1/2) * (1 - v['s']) - sim_s\n\n elif type(model) == OpenModel and not model.complex:\n p.add_many(('c', model.c), ('z0', model.z0))\n\n # Calc S11\n if index == 0:\n def residual(param):\n v = param.valuesdict()\n return (v['z0'] - 1 / (2*math.pi*model.f*v['c'])) / (v['z0'] + 1 / (2*math.pi*model.f*v['c'])) - sim_s\n\n # Calc S22\n elif index == 3:\n def residual(param):\n v = param.valuesdict()\n return (1/(2*math.pi*model.f*v['c']) - v['z0']) / (v['z0'] + 1/(2*math.pi*model.f*v['c'])) - sim_s\n\n # Calc S12/21\n else:\n p.add('s', 0.98)\n def residual(param):\n v = param.valuesdict()\n return ((2*math.pi*model.f*v['c']) / v['z0'])**(1 / 2) * (1 - v['s']) - sim_s\n\n else:\n print('Model must be type ShortModel or OpenModel')\n return\n\n mi = lmfit.minimize(residual, p, method=\"powell\" if index % 3 == 0 else \"leastsq\")\n if fit_stats:\n print(lmfit.fit_report(mi, show_correl=False))\n return [model.f, abs(sim_s), abs(residual(mi.params) + sim_s)]", "title": "" }, { "docid": "46ebe5dcba2734e9963ba38a1564911f", "score": "0.50492424", "text": "def plot_sup_x_unsup(data, w, h):\n # Colors to be used (upt to 5 classes)\n colors = np.array(['black', 'blue', 'red', 'green', 'purple'])\n\n # Getting the column and classes' names\n col_names = data.columns.to_numpy()\n target_names = data['target'].to_numpy()\n\n # Getting numerical values for the classes labels\n target = np.unique(data['target'].to_numpy(), return_inverse=True)\n\n # Getting X1 and X2\n data = data.iloc[:, 0:2].to_numpy()\n\n # Creates the Figure\n plt.figure(0, figsize=(w, h))\n\n # Create two subplots\n plt.subplots_adjust(right=2.5)\n\n # Get the first subplot, which is the Supervised one.\n plt.subplot(1, 2, 1)\n ax = plt.gca()\n for i, label in enumerate(target[0]):\n plt.scatter(data[target_names == label, 0],\n data[target_names == label, 1],\n c=colors[i], label=label)\n\n # Creates the legend\n plt.legend(loc='best', fontsize=22, frameon=True)\n\n # Name the axes and creates title\n plt.xlabel(col_names[0], fontsize=1.5*(w + h))\n plt.ylabel(col_names[1], fontsize=1.5*(w + h))\n plt.title(\"Supervised\", fontdict={'fontsize': 2 * (w + h)})\n\n# ax.set_xticklabels(ax.get_xticks(), fontdict={'fontsize': w + h})\n# ax.set_yticklabels(ax.get_yticks(), fontdict={'fontsize': w + h})\n\n # Creates the unsupervised subplot.\n plt.subplot(1, 2, 2)\n ax = plt.gca()\n plt.scatter(data[:, 0], data[:, 1])\n plt.title(\"Unsupervised\", fontdict={'fontsize': 2 * (w + h)})\n plt.xlabel(col_names[0], fontsize=1.5*(w + h))\n plt.ylabel(col_names[1], fontsize=1.5*(w + h))", "title": "" }, { "docid": "2cfb5355ceec68ace196a48fb304e114", "score": "0.50467765", "text": "def mechplot2D(mpos,wpos,i):\n \n plt.figure('Front leg')\n plt.title('Mechanism front leg hinge traces')\n plt.axis('equal')\n \n plt.plot(mpos.front.Q[0],mpos.front.Q[1],'b')\n plt.plot(mpos.front.P[0],mpos.front.P[1],'b')\n plt.plot(mpos.front.B[0],mpos.front.B[1],'b')\n plt.plot(mpos.front.A[0],mpos.front.A[1],'b')\n plt.plot(mpos.front.O[0],mpos.front.O[1],'bx')\n \n plt.plot(wpos.front.E[0],wpos.front.E[1],'r')\n plt.plot(wpos.front.C[0],wpos.front.C[1],'r')\n plt.plot(wpos.front.D[0],wpos.front.D[1],'r')\n \n plt.plot(wpos.eqspar_geom[1].x, wpos.eqspar_geom[1].y, 'k*')\n \n if i>=0:\n\n plt.plot((mpos.front.B[0,i],0),(mpos.front.B[1,i],0),'g')\n plt.plot((mpos.front.B[0,i],mpos.front.Q[0,i]),(mpos.front.B[1,i],mpos.front.Q[1,i]),'g')\n plt.plot((mpos.front.A[0,i],mpos.front.P[0,i]),(mpos.front.A[1,i],mpos.front.P[1,i]),'g')\n plt.plot((mpos.front.A[0,i],0),(mpos.front.A[1,i],0),'g')\n \n plt.plot((wpos.front.A[0,i],wpos.front.E[0,i]),(wpos.front.A[1,i],wpos.front.E[1,i]),'g')\n plt.plot((wpos.front.O[0,i],wpos.front.C[0,i]),(wpos.front.O[1,i],wpos.front.C[1,i]),'g')\n plt.plot((wpos.front.C[0,i],wpos.front.E[0,i]),(wpos.front.C[1,i],wpos.front.E[1,i]),'g')\n plt.plot((wpos.front.C[0,i],wpos.front.D[0,i]),(wpos.front.C[1,i],wpos.front.D[1,i]),'g')\n \n plt.figure('Back leg')\n plt.title('Mechanism front back hinge traces')\n plt.axis('equal')\n \n plt.plot(mpos.back.Q[0],mpos.back.Q[1],'b')\n plt.plot(mpos.back.P[0],mpos.back.P[1],'b')\n plt.plot(mpos.back.B[0],mpos.back.B[1],'b')\n plt.plot(mpos.back.A[0],mpos.back.A[1],'b')\n plt.plot(mpos.back.O[0],mpos.back.O[1],'bx')\n \n plt.plot(wpos.back.E[0],wpos.back.E[1],'r')\n plt.plot(wpos.back.C[0],wpos.back.C[1],'r')\n plt.plot(wpos.back.D[0],wpos.back.D[1],'r')\n\n if i>=0:\n\n plt.plot((mpos.back.B[0,i],0),(mpos.back.B[1,i],0),'g')\n plt.plot((mpos.back.B[0,i],mpos.back.Q[0,i]),(mpos.back.B[1,i],mpos.back.Q[1,i]),'g')\n plt.plot((mpos.back.A[0,i],mpos.back.P[0,i]),(mpos.back.A[1,i],mpos.back.P[1,i]),'g')\n plt.plot((mpos.back.A[0,i],0),(mpos.back.A[1,i],0),'g')\n \n plt.plot((wpos.back.A[0,i],wpos.back.E[0,i]),(wpos.back.A[1,i],wpos.back.E[1,i]),'g')\n plt.plot((wpos.back.O[0,i],wpos.back.C[0,i]),(wpos.back.O[1,i],wpos.back.C[1,i]),'g')\n plt.plot((wpos.back.C[0,i],wpos.back.E[0,i]),(wpos.back.C[1,i],wpos.back.E[1,i]),'g')\n plt.plot((wpos.back.C[0,i],wpos.back.D[0,i]),(wpos.back.C[1,i],wpos.back.D[1,i]),'g')\n \n plt.show()", "title": "" }, { "docid": "10dfd87d8e5202e05843b718fe4b5ae1", "score": "0.50445354", "text": "def plot_composition_evolution(compounds, ts, ys, y_axis, rs=None):\n\n sns.set(style=\"ticks\") # sets sns as the rule\n sns.set_context(\"talk\")\n sns.set_palette(\"Spectral\")\n\n N_cmpd = len(compounds)\n compound_names = [defs['name'] for name, defs in compounds.items()]\n\n hrs = ts / 3600\n for tick in range(N_cmpd):\n ax = sns.lineplot(x=hrs, y=ys[:, tick], label=compound_names[tick], alpha=1)\n\n if y_axis == 'M':\n y_label = 'M (mol L$^{-1}$)'\n elif y_axis == 'n':\n y_label = 'moles'\n elif y_axis == 'N':\n y_label = 'molecules'\n else:\n print('y parameter not valid for plotting')\n\n ax.set(xlabel='time (hr)', ylabel=y_label)\n ax.legend(title='Compound')\n\n if rs is not None:\n ums = rs * 1e6\n\n ax2 = ax.twinx()\n ax2.plot(hrs, ums, color='black', linewidth=2, linestyle='--', alpha=0.4)\n ax2.set(ylabel='r (um)')\n\n project_dir = get_project_directory()\n today_str = date.today().strftime(\"%Y%m%d\")\n cmpd_strings = '_'.join(compound_names)\n fig = today_str[2:] + '-' + 'model' + '-' + cmpd_strings + '-evap.png'\n fig_path = os.path.join(project_dir, 'results', fig)\n\n plt.savefig(fig_path, bbox_inches='tight', dpi=300, transparent=True)", "title": "" }, { "docid": "046833e90592e93ceb7d55df7b7aa9c3", "score": "0.50440794", "text": "def plot():", "title": "" }, { "docid": "2b5958faf00f191db3e847e4b5db3a5e", "score": "0.5042752", "text": "def Scatter_TSTS_TRTR(E_table):\n df_dic = list(set(E_table.keys().get_level_values(0).tolist()))\n models = (E_table.index).tolist()\n l = len(df_dic)\n for n in range(l):\n key = df_dic[n]\n max_v = np.max(E_table[key,'ori MSE'])\n\n #plotting the data points\n fig = go.Figure(data=go.Scatter(\n x=E_table[key,'ori MSE'],\n y=E_table[key,'gen MSE'],\n mode='markers',\n text=E_table.index,\n name='data'))\n # plotting the diagonal\n fig.add_trace(go.Scatter(x=[0,max_v], y=[0,max_v], mode='lines', name='diagonal'))\n fig.update_layout(xaxis_title=\"MSE of original data\",\n yaxis_title=\"MSE of generated data\",\n title=key)\n #fig.write_image(\"png_files/3.1 DWP/scatter from IIIE Table '+ key+'.png\")\n fig.show()", "title": "" }, { "docid": "e0a12b651d688966e14ae5ec6c365b40", "score": "0.5036825", "text": "def plot_sample(X_mal, noise, generator, target_model, epoch,\n TPR_train, TPR_test, avg_changes, m_label=1, g_label=0,\n params={}, annotate=True, out_dir='.', plot_id=None,\n xz_input=True, dpi=100):\n sample_sz = 8\n if type(target_model) == LinearSVC:\n weights = target_model.coef_.flatten()\n elif type(target_model) == SVC:\n weights = target_model.coef_.toarray().flatten()\n\n # Top N negative features\n N = 20\n top_neg = np.argpartition(weights, range(N))[:N]\n top_neg = np.unravel_index(top_neg, (100, 100))\n top_neg = list(zip(*top_neg))\n weights = np.round(weights.reshape((100, 100)), 2)\n\n Y_mal = target_model.predict(X_mal)\n DF_mal = target_model.decision_function(X_mal)\n\n X_adv = generator.predict([X_mal, noise])\n X_adv = binarise(X_adv).numpy() # numpy to reshape\n Y_adv = target_model.predict(X_adv)\n DF_adv = target_model.decision_function(X_adv)\n\n n_feats_mal = np.count_nonzero(X_mal, axis=1)\n diff = X_adv - X_mal\n dist1 = np.linalg.norm(diff, ord=1, axis=1)\n dist2 = np.linalg.norm(diff, ord=2, axis=1)\n\n fig = plt.figure(num='Sample', figsize=(16, 16), facecolor='w', dpi=dpi)\n title = \\\n (f\"A sample of original malware \\& corresponding AEs \"\n f\"[Epoch: {epoch + 1}] \"\n f\"[Evasion Rates (Current, Best)\\%: \"\n f\"Test: ({100 * (1 - TPR_test[-1]):.2f}, {100 * (1 - min(TPR_test)):.2f}) \"\n f\"Train: ({100 * (1 - TPR_train[-1]):.2f}, {100 * (1 - min(TPR_train)):.2f})] \"\n f\"[Avg \\# changes: {avg_changes:.0f}]\")\n fig.suptitle(title, c='r', x=0.5, y=0.99, fontsize=16, fontweight='bold',\n bbox=dict(facecolor='none', edgecolor='red'))\n font_param = {'size': 14, 'color': 'k'}\n fig.text(0.5, 0.95, params, fontsize=14, ha='center', va='center',\n bbox=dict(fc='none', ec='k', pad=6))\n idx = 0\n for row in range(1, 2 * sample_sz + 1, sample_sz):\n for i in range(sample_sz // 2):\n subplot = row + i\n # Malware subplots\n ax = plt.subplot(4, 4, subplot)\n img = X_mal[idx].reshape((100, 100))\n plt.imshow(img, cmap='gray', interpolation='none')\n # pred_color = {M (mal): k, G (good): r}\n pred, color = ('M', 'k') if Y_mal[idx] == m_label else ('G', 'r')\n pred = r\"$\\bf{[\" + pred + \"]}$\" # Predicted label in bold\n ax.set_title(f'{pred} DF({DF_mal[idx]:.2f}) :: '\n f'Features({n_feats_mal[idx]:.0f})',\n color=color, fontsize=14)\n plt.axis('off')\n\n # Adversarial subplots\n ax = plt.subplot(4, 4, subplot + 4)\n img = X_adv[idx].reshape((100, 100))\n plt.imshow(img, cmap='gray', interpolation='none')\n # pred_color = {M (mal): r, G (good): g}\n pred, color = ('M', 'r') if Y_adv[idx] == m_label else ('G', 'g')\n pred = r\"$\\bf{[\" + pred + \"]}$\" # Predicted label in bold\n img = diff[idx].reshape((100, 100))\n # plt.imshow(img, alpha=img, cmap='spring_r')\n\n # Plot added features\n y, x = np.where(img == 1)\n c = []\n for i, j in zip(x, y):\n w = weights[j][i]\n if w < 0: # Features with -ve weights\n c.append('g')\n elif w > 0:\n c.append('r') # Features with +ve weights\n else:\n c.append('c')\n\n plt.scatter(x, y, s=25, marker='o', c=c)\n # plt.scatter(x, y, s=100, marker='o', c='None', ec='y') # frame\n # Annotate with weights\n if annotate or dist1[idx] <= 15: # Annotate if <12\n for i, j in zip(x, y):\n w = weights[j][i]\n if w != 0: # Annotate non-zero w\n c = 'yellow' if w < 0 else 'darkorange'\n fw = 'bold' if (j,\n i) in top_neg else 'normal' # Top feat\n ax.annotate(w, (i, j), (i, j - 1), size=10, c=c,\n weight=fw)\n\n ax.set_title(f'{pred} DF({DF_adv[idx]:.2f}) :: '\n f'L1({dist1[idx]:.0f}) :: '\n f'L2({dist2[idx]:.1f})',\n color=color, fontsize=14)\n plt.axis('off')\n idx = idx + 1\n\n # Row labels\n font_mal = {'size': 16, 'weight': 'bold', 'color': 'k'}\n font_adv = {'size': 16, 'weight': 'bold', 'color': 'r'}\n fig.text(0.002, 0.82, 'Original', va='top', rotation='vertical',\n fontdict=font_mal)\n fig.text(0.002, 0.57, 'Adversarial', va='center', rotation='vertical',\n fontdict=font_adv)\n fig.text(0.002, 0.35, 'Original', va='center', rotation='vertical',\n fontdict=font_mal)\n fig.text(0.002, 0.12, 'Adversarial', va='center', rotation='vertical',\n fontdict=font_adv)\n\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n # plt.savefig(out_dir + f'{id}_epoch_{epoch}_loss_{loss}.png')\n plt.show()", "title": "" }, { "docid": "7746d34a8ac6fd768f2f16af9833e882", "score": "0.50353414", "text": "def plot_base_model():\n space_base_demo_to_plot = {\n 'lr_rate_mult': 1.0,\n 'l2_weight_reg_mult': 1.0,\n 'batch_size': 300,\n 'optimizer': 'Nadam',\n 'coarse_labels_weight': 0.2,\n 'conv_dropout_drop_proba': 0.175,\n 'fc_dropout_drop_proba': 0.3,\n 'use_BN': True,\n\n 'first_conv': 4,\n 'residual': 4,\n 'conv_hiddn_units_mult': 1.0,\n 'nb_conv_pool_layers': 3,\n 'conv_pool_res_start_idx': 0.0,\n 'pooling_type': 'inception',\n 'conv_kernel_size': 3.0,\n 'res_conv_kernel_size': 3.0,\n\n 'fc_units_1_mult': 1.0,\n 'one_more_fc': 1.0,\n 'activation': 'elu'\n }\n plot(space_base_demo_to_plot, \"model_demo\")", "title": "" }, { "docid": "bfd108957b3040971bb52b8843097ff9", "score": "0.5031583", "text": "def simple_trajPlot(ws,nu_overW,Ws, params):\n #Preprocess\n unNu_overW,unw = unfoldNu_overW(nu_overW,ws)\n if len(np.shape(Ws))>1:\n meanW = np.mean(Ws,0)\n stdW = np.std(Ws,0)\n else:\n meanW = Ws\n name = get_hash(params)\n st,gid = read_gdf(params['directory'],name,(0,params['simtime']),threads = params['threads'])\n meanW_burst,meanSC_burst = meanTraj(meanW,st,gid,params,bin_size =20,\n primer=(0,50),\n interp =False,smooth=False,smooth_par=(5,3), interp_dt=0.05)\n plt.figure()\n plt.plot(unw/(20),unNu_overW,color = 'k',label ='analytics',linewidth = 4)\n #plt.plot(ws/20,nu_overW[0,:,:],'.',color = 'r', label ='analytics')\n #plt.plot(ws,FRs[2,:],'.',label = 'numerics');\n #plt.plot(mw_stack,sc,'-',alpha = 0.3); plt.show(block =False)\n i0,i1 =0,len(meanW_burst)\n [plt.plot(mw,sc_,'-',alpha =0.1) for mw,sc_ in zip(meanW_burst[i0:i1],meanSC_burst[i0:i1])]\n# sns.kdeplot(np.hstack(meanW_burst),np.hstack(meanSC_burst),cmap=\"Blues\",\n# shade=True, shade_lowest=False,bw=(0.02,2.))\n plt.xlabel('fixed w')\n plt.ylabel('Rate (Hz)')\n plt.legend()\n# plt.xlim([np.min(unw/20),np.max(meanW)])\n# plt.ylim([0,np.max(mean)])\n #plt.axvline(np.max(FPs)*params['b']/ 20)\n# sns.despine(trim =True)\n plt.show(block = False)", "title": "" }, { "docid": "065c3ca954c623702e6d22f2c5cafa44", "score": "0.50313705", "text": "def plot(self, x, slab, gm, bg=False, min=None,\n max=None, X=None, Y=None, **kargs):\n\n displays = []\n kargs[\"donotstoredisplay\"] = True\n kargs[\"render\"] = False\n # now remembers the viewport and worldcoordinates in order to reset\n # them later\n vp = x._viewport\n wc = x._worldcoordinate\n # m=x.mode\n # and resets everything to [0,1]\n x._viewport = [0, 1, 0, 1]\n x._worldcoordinate = [0, 1, 0, 1]\n # x.mode=0 # this should disable the replot but it doesn't work....\n\n displays += self.drawAttributes(x, slab, gm, bg=bg, **kargs)\n\n kargs[\"donotstoredisplay\"] = True\n if not isinstance(gm, vcs.taylor.Gtd):\n nms = [\"x\", \"y\", \"z\", \"t\"]\n for i, ax in enumerate(slab.getAxisList()[-2:][::-1] +\n [kargs.get(\"zaxis\", None), kargs.get(\"taxis\", None)]):\n if (hasattr(gm, \"projection\") and\n vcs.elements[\"projection\"][gm.projection].type\n in round_projections) or ax is None:\n continue\n for att in [\"name\", \"units\", \"value\"]:\n nm = nms[i] + att\n sub = getattr(self, nm)\n tt = x.createtext(\n None,\n sub.texttable,\n None,\n sub.textorientation)\n if att == \"name\":\n if i == 0 and gm.g_name == \"G1d\":\n if gm.flip or hasattr(slab, \"_yname\"):\n tt.string = [slab.id]\n else:\n tt.string = [ax.id]\n elif i == 1 and gm.g_name == \"G1d\":\n if hasattr(slab, \"_yname\"):\n tt.string = [slab._yname]\n else:\n tt.string = [ax.id]\n else:\n tt.string = [ax.id]\n elif att == \"units\":\n tt.string = [getattr(ax, \"units\", \"\")]\n tt.x = [sub.x, ]\n tt.y = [sub.y, ]\n tt.priority = sub._priority\n # This is the name of the axis. It should be transformed\n # through geographic projection but it is not at the moment\n displays.append(x.text(tt, bg=bg, **kargs))\n sp = tt.name.split(\":::\")\n del(vcs.elements[\"texttable\"][sp[0]])\n del(vcs.elements[\"textorientation\"][sp[1]])\n del(vcs.elements[\"textcombined\"][tt.name])\n\n if X is None:\n X = slab.getAxis(-1)\n if Y is None:\n Y = slab.getAxis(-2)\n wc2 = vcs.utils.getworldcoordinates(gm, X, Y)\n wc2 = kargs.get(\"plotting_dataset_bounds\", wc2)\n vp2 = [self.data._x1, self.data._x2, self.data._y1, self.data._y2]\n vp2 = kargs.get(\"ratio_autot_viewport\", vp2)\n\n # Do the tickmarks/labels\n if not isinstance(gm, vcs.taylor.Gtd):\n for axis in [\"x\", \"y\"]:\n for number in [\"1\", \"2\"]:\n for mintic in [False, True]:\n displays += self.drawTicks(slab,\n gm,\n x,\n axis=axis,\n number=number,\n vp=vp2,\n wc=wc2,\n bg=bg,\n X=X,\n Y=Y,\n mintic=mintic,\n **kargs)\n\n if X is None:\n X = slab.getAxis(-1)\n if Y is None:\n Y = slab.getAxis(-2)\n\n wc2 = vcs.utils.getworldcoordinates(gm, X, Y)\n wc2 = kargs.get(\"plotting_dataset_bounds\", wc2)\n\n # Do the boxes and lines\n for tp in [\"box\", \"line\"]:\n for num in [\"1\", \"2\"]:\n e = getattr(self, tp + num)\n if e.priority != 0:\n ln_tmp = x.createline(source=e.line)\n if hasattr(gm, \"projection\"):\n ln_tmp.projection = gm.projection\n if vcs.elements[\"projection\"][\n ln_tmp.projection].type != \"linear\":\n ln_tmp.worldcoordinate = wc2[:4]\n ln_tmp.viewport = kargs.get(\"ratio_autot_viewport\",\n [e._x1, e._x2, e._y1, e._y2])\n dx = (e._x2 - e._x1) / \\\n (self.data.x2 - self.data.x1) * (wc2[1] - wc2[0])\n dy = (e._y2 - e._y1) / \\\n (self.data.y2 - self.data.y1) * (wc2[3] - wc2[2])\n if tp == \"line\":\n ln_tmp._x = [wc2[0], wc2[0] + dx]\n ln_tmp._y = [wc2[2], wc2[2] + dy]\n elif tp == \"box\" and \\\n vcs.elements[\"projection\"][ln_tmp.projection].type in\\\n round_projections:\n ln_tmp._x = [[wc2[0], wc2[1]], [wc2[0], wc2[1]]]\n ln_tmp._y = [[wc2[3], wc2[3]], [wc2[2], wc2[2]]]\n else:\n ln_tmp._x = [\n wc2[0],\n wc2[0] + dx,\n wc2[0] + dx,\n wc2[0],\n wc2[0]]\n ln_tmp._y = [wc2[2], wc2[2], wc2[3], wc2[3], wc2[2]]\n\n # print('boxorline, wc2 = ', wc2)\n else:\n ln_tmp._x = [e._x1, e._x2, e._x2, e._x1, e._x1]\n ln_tmp._y = [e._y1, e._y1, e._y2, e._y2, e._y1]\n ln_tmp._priority = e._priority\n displays.append(x.plot(ln_tmp, bg=bg, ratio=\"none\", **kargs))\n del(vcs.elements[\"line\"][ln_tmp.name])\n\n # x.mode=m\n # I think i have to use dict here because it's a valid value\n # (obviously since i got it from the object itself and didn't touch it\n # but Dean doesn't allow to set it back to some of these values (None)!\n x._viewport = vp\n x._worldcoordinate = wc\n return displays", "title": "" }, { "docid": "7c81a5300c6641c4c500b3d266d9f178", "score": "0.50307196", "text": "def show_side_by_side_loss(original, reconstructed):\n batchsize = original.shape[0]\n original = torch.clip(original, 0, 1).detach().cpu()\n reconstructed = torch.clip(reconstructed, 0, 1).detach().cpu()\n for i in range(batchsize):\n fig, axs = plt.subplots(1, 2, figsize=(10, 20))\n fig.tight_layout()\n mseloss = torch.nn.functional.mse_loss(original[i], reconstructed[i])\n print(\"The MSE loss is: \", mseloss.item())\n axs[0].imshow(original[i].permute(1, 2, 0))\n axs[1].imshow(reconstructed[i].permute(1, 2, 0))\n axs[0].tick_params(left=False,\n bottom=False,\n labelleft=False,\n labelbottom=False)\n axs[1].tick_params(left=False,\n bottom=False,\n labelleft=False,\n labelbottom=False)\n plt.show()\n print(\"\\n\\n\\n\")", "title": "" }, { "docid": "8a0518f5c54109f0c3496fba7a6e5ae6", "score": "0.5019269", "text": "def plot_model_structure(self, model, input_size):\n inpt_vars = [torch.randn(i_s) for i_s in input_size]\n\n self.writer.add_graph(model=model, input_to_model=inpt_vars)", "title": "" }, { "docid": "14dbb2ac463468903c6d7fc69dba9983", "score": "0.50165385", "text": "def analysis_RMSE(alphas, gammas, Fs, Gs, plot_gamma):", "title": "" }, { "docid": "dfa31ce258668f3069673915fd800aed", "score": "0.50145394", "text": "def plot_inception_model():\n plot_model(inception_resnet_v1, to_file=\"Inception ResNet-v1.png\", show_shapes=True)", "title": "" }, { "docid": "7a2a3ec6b3a160d582dea51a3427d539", "score": "0.5013498", "text": "def triptych(sim):\n fig = plt.figure(figsize= (14,6))\n plt.subplots_adjust(wspace = 0.3)\n\n for i, label in enumerate(('A', 'B', 'C')):\n ax = plt.subplot(1,3,i+1)\n ax.text(-0.05, 1.08, label, transform=ax.transAxes,\n fontsize=16, fontweight='bold', va='top')\n\n ax1 = plt.subplot(131)\n veg_points(sim.isvegc, dx = sim.dx, ax = ax1)\n\n ax1 = plt.subplot(132)\n zinflplot = colormap(sim,sim['zinflc'], ax = ax1,\n clabel= '$I$ (cm)', colorbar = True , cround = 1)\n\n ax1 = plt.subplot(133)\n zinflplot = colormap(sim,sim['vmax'], ax = ax1, clabel= 'velocity (cm/s)',\n colorbar = True, cmap = \"Blues\",\n cround = 1, veg_scale=False)", "title": "" }, { "docid": "e88279e99314cea11305a9177ca53117", "score": "0.5008903", "text": "def plotTree(self,dim,substitution_patterns=[],line_kwargs={},marker_kwargs={}, right_to_left = False):\n\t\tright_to_left_mult = 1\n\t\tif right_to_left:\n\t\t\tright_to_left_mult = -1\n\t\tl, model_points = self.getTree(substitution_patterns)\n\t\tlast_y = 0\n\t\tfor ll in l:\n\t\t\tif ll[1][0] in self.keys() and ll[1][1] in self.keys():\n\t\t\t\tplt.plot([right_to_left_mult*ll[0][0],right_to_left_mult*ll[0][1]], [np.mean(self.getNode(ll[1][0])[dim]),np.mean(self.getNode(ll[1][1])[dim])],ll[2],**line_kwargs)\n\t\tfor ll in model_points:\n\t\t\tif ll[1][0] in self.keys():\n\t\t\t\tplt.plot(right_to_left_mult*ll[0][0], np.mean(self.getNode(ll[1][0])[dim]),ll[2],**marker_kwargs)\n\t\t\t\tx = ll[0][0]\n\t\t\t\ty = np.mean(self.getNode(ll[1][0])[dim])\n\t\t\t\tif abs(last_y-y) < 0.1:\n\t\t\t\t\ty = y + 0.1\n\t\t\t\tif 'name' in self.getNode(ll[1][0]):\n\t\t\t\t\tplt.text(x,y, self.getNode(ll[1][0])['name'],rotation=30,va='bottom',size=9)\n\t\t\t\telse:\n\t\t\t\t\tplt.text(x,y, ll[1][0],rotation=30,va='bottom',size=9)\n\t\tplt.title(dim)", "title": "" }, { "docid": "cfe0b7aed5986668a798bfdcd19e675c", "score": "0.5006289", "text": "def plot_max_sed(myfile, overfig=None,thresh=1, save_file=None, extra_thetas=None, **kwargs):\n results, obs, model = rr.results_from(myfile)\n theta_max = get_theta_max(results)\n \n # check keyword arguments\n try:\n sps = kwargs['sps']\n except KeyError: # if key does not exist\n sps = rr.get_sps(results)\n kwargs['sps'] = sps\n except NameError: # if dict does not exist\n kwargs = {}\n sps = rr.get_sps(results)\n kwargs['sps'] = sps\n font_kwargs = {'fontsize': 14}\n\n # wavelengths in angstroms\n spec_wave = dc(sps.wavelengths)\n phot_wave = np.array([f.wave_effective for f in results['obs']['filters']])\n\n # get maximum ln spectrum, original photometry, and quantiled spectra\n obs['wavelength'] = spec_wave # must be set to get proper spec wavelengths\n spec_max, phot_max = model_spec(theta_max, model, obs, sps,units='cgs')\n orig, err, wav_err, det_mask = obs_phot(results, units='cgs')\n quant_specs = spec_range(results, model, obs, **kwargs)\n\n # get fitted photometry\n #obs['wavelength'] = phot_wave # must be set to get proper phot wavelengths\n #spec, phot_max = model_spec(theta_max, model, obs, sps,units='cgs')\n # angstroms to microns\n spec_wave, phot_wave, wav_err = angstrom_micron(spec_wave, phot_wave, wav_err)\n spec_nu, phot_nu = wav_nu([spec_wave, phot_wave])\n #err_nu = np.abs( phot_nu*err / (phot_nu*phot_max*np.log(10)) ) # error prop\n err_nu = phot_nu*err\n\n # plotting time! Declare some stuff first.\n if overfig==None:\n sedfig = plt.figure(num=None, figsize=(12,9), dpi=80)\n sedfig.subplots_adjust(hspace=0)\n gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[3,1])\n ax0 = sedfig.add_subplot(gs[0])\n ax1 = sedfig.add_subplot(gs[1], sharex=ax0)\n else:\n sedfig = overfig[0]\n ax0 = overfig[1]\n ax1 = overfig[2]\n \n\n # plot spectra\n if overfig==None:\n ax0.plot(spec_wave, spec_nu*spec_max, color=colorize('C0'))\n else:\n ax0.plot(spec_wave, spec_nu*spec_max, color=colorize('C3'))\n if quant_specs is not None:\n if overfig is None:\n ax0.fill_between(spec_wave, spec_nu*quant_specs[0],\n spec_nu*quant_specs[1], interpolate=True,\n alpha=0.5, color=colorize('C7'))\n if overfig is not None:\n ax0.fill_between(spec_wave, spec_nu*quant_specs[0],\n spec_nu*[1], interpolate=True,\n alpha=0.2, color=colorize('C3'))\n # handle extra\n if extra_thetas is not None:\n obs['wavelength'] = dc(sps.wavelengths)\n for i in range(extra_thetas.shape[0]):\n color = 'C{}'.format(i+2)\n extra_spec, extra_phot = model_spec(extra_thetas[i,:], model, obs, sps)\n ax0.plot(spec_wave, spec_nu*extra_spec, color=colorize(color))\n\n # plot photometry, first by getting data from this study\n study = np.array([j.startswith('my_spitzer') for j in obs['filternames']])\n ax0.errorbar(phot_wave[det_mask*study], \n phot_nu[det_mask*study]*orig[det_mask*study],\n fmt='o', yerr=err_nu[det_mask*study],\n xerr=wav_err[det_mask*study], \n color=colorize('C3'), ms=9, label='phot')\n ax0.errorbar(phot_wave[~det_mask*study], \n phot_nu[~det_mask*study]*orig[~det_mask*study],\n fmt='v', yerr=0*err_nu[~det_mask*study],\n xerr=wav_err[~det_mask*study], \n color=colorize('C3'), ms=9, label='phot')\n ax0.errorbar(phot_wave[det_mask*~study], \n phot_nu[det_mask*~study]*orig[det_mask*~study],\n fmt='o', yerr=err_nu[det_mask*~study],\n xerr=wav_err[det_mask*~study], \n color=colorize('C1'), ms=9, label='phot')\n ax0.errorbar(phot_wave[~det_mask*~study], \n phot_nu[~det_mask*~study]*orig[~det_mask*~study],\n fmt='v', yerr=0*err_nu[~det_mask*~study],\n xerr=wav_err[~det_mask*~study], \n color=colorize('C1'), ms=9, label='phot')\n \"\"\"\n ax0.errorbar(phot_wave[det_mask*study], \n phot_nu[det_mask*study]*orig[det_mask*study],\n fmt='o', yerr=err_nu[det_mask*study],\n xerr=wav_err[det_mask*study], \n color=colorize('C3'), ms=9, label='phot')\n ax0.errorbar(phot_wave[~det_mask*study], \n phot_nu[~det_mask*study]*orig[~det_mask*study],\n fmt='v', yerr=0*err_nu[~det_mask*study],\n xerr=wav_err[~det_mask*study], \n color=colorize('C3'), ms=9, label='phot')\n ax0.errorbar(phot_wave[det_mask*~study], \n phot_nu[det_mask*~study]*orig[det_mask*~study],\n fmt='o', yerr=err_nu[det_mask*~study],\n xerr=wav_err[det_mask*~study], \n color=colorize('C1'), ms=9, label='phot')\n ax0.errorbar(phot_wave[~det_mask*~study], \n phot_nu[~det_mask*~study]*orig[~det_mask*~study],\n fmt='v', yerr=0*err_nu[~det_mask*~study],\n xerr=wav_err[~det_mask*~study], \n color=colorize('C1'), ms=9, label='phot')\n \"\"\"\n # plot residuals\n resid = phot_nu*orig-phot_nu*phot_max\n ax1.plot(phot_wave[~det_mask], resid[~det_mask], marker='v', ls='',\n color=colorize('C0'), ms=9)\n ax1.errorbar(phot_wave[det_mask], resid[det_mask], marker='o', ls='',\n color=colorize('C0'), ms=9, yerr=err_nu[det_mask])\n ax1.axhline(y=0, xmin=0, xmax=(phot_wave+wav_err).max()*1.10, ls='--',\n color=colorize('C7'))\n ax1.set_ylim([-(np.abs(resid).max()*1.10), np.abs(resid).max()*1.10])\n\n # final adjustments\n # ax0.legend(loc='lower right', numpoints=1)\n ax0.set_xscale('log')\n ax1.set_xscale('log')\n ax0.set_yscale('log')\n xmin = (phot_wave-wav_err).min()*0.90\n xmax = (phot_wave+wav_err).max()*1.10\n ax0.set_xlim([xmin, xmax])\n specValid = spec_max[(spec_wave < xmax) & (spec_wave > xmin)]\n specNuValid = spec_nu[(spec_wave < xmax) & (spec_wave > xmin)]\n ymin = (specNuValid*specValid).min()\n ymax = (specNuValid*specValid).max()\n if ymin > (phot_nu*orig).min():\n ymin = (phot_nu*orig).min()\n if ymax < (phot_nu*orig).max():\n ymax = (phot_nu*orig).max()\n\n ymin*=0.90\n ymax*=2\n ax0.set_ylim([ymin,ymax])\n\n \n \n #ax0.set_ylim([(phot_nu*phot_max).min()*0.95,\n # (phot_nu*phot_max).max()*1.05])\n ax0.xaxis.set_minor_formatter(FormatStrFormatter(\"%.1f\"))\n ax0.xaxis.set_major_formatter(FormatStrFormatter(\"%.1f\"))\n ax1.set_xlabel(r'$\\lambda_{obs} (\\mu m)$', **font_kwargs)\n ax0.set_ylabel(r'$\\nu f_{\\nu} [erg/s/cm^{2}]$', **font_kwargs)\n ax1.set_ylabel(r'$\\chi$', **font_kwargs)\n\n # add second axis\n zred = results['obs']['zred']\n ax3 = ax0.twiny()\n ax3.set_xscale('log')\n ax0_x = ax0.get_xticks(minor=True)\n ax3_x = ax0_x/(1+zred)\n ax3.set_xticks(ax0_x)\n ax3.set_xticklabels(ax3_x)\n xbnd = ax0.get_xbound()\n ax3.set_xbound(xbnd[0]/(1+zred), xbnd[1]/(1+zred) )\n ax3.xaxis.set_minor_formatter(FormatStrFormatter(\"%.1f\"))\n ax3.xaxis.set_major_formatter(FormatStrFormatter(\"%.1f\"))\n ax3.set_xlabel(r'$\\lambda_{rest} (\\mu m)$', **font_kwargs)\n\n # add text\n num_param = len(results['initial_theta'])\n fit_value = sum(resid[det_mask]**2)/(sum(det_mask)-num_param)\n fit_string = 'GRB {}'.format(results['obs']['objid']) + '\\n'\n fit_string = fit_string + r'best-fit $\\chi^2/N_{dof}={x:.2f}$'.\\\n format(phot=r'{phot}', x=fit_value, dof='{dof}')\n fit_string = fit_string + '\\n' + r'$z={}$'.format(zred)\n ax0.text(0.02, 0.90, fit_string, horizontalalignment='left', verticalalignment='center',\n transform=ax0.transAxes, color='k', **font_kwargs)\n\n\n # save the figure\n if save_file is not None:\n sedfig.savefig(save_file, format='eps', transparent=True, pad_inches=0,\n dpi=80)\n\n return sedfig,ax0,ax1", "title": "" }, { "docid": "d91a6151ea5f10c258b32c0c2bf8ff7b", "score": "0.5004991", "text": "def denoise_fig(data,denoised_arr,type='macenko',savefigpath='none'):\r\n sli = data.shape[2] // 2\r\n gra = data.shape[3] - 1\r\n orig = data[:, :, sli, gra]\r\n den = denoised_arr[:, :, sli, gra]\r\n rms_diff = np.sqrt((orig - den) ** 2)\r\n\r\n if show_orig_denoised:\r\n fig1, ax = plt.subplots(1, 3, figsize=(12, 6),\r\n subplot_kw={'xticks': [], 'yticks': []})\r\n\r\n fig1.subplots_adjust(hspace=0.3, wspace=0.05)\r\n\r\n ax.flat[0].imshow(orig.T, cmap='gray', interpolation='none',\r\n origin='lower')\r\n ax.flat[0].set_title('Original')\r\n ax.flat[1].imshow(den.T, cmap='gray', interpolation='none',\r\n origin='lower')\r\n ax.flat[1].set_title('Denoised Output')\r\n if type == 'macenko':\r\n ax.flat[2].imshow(rms_diff.T, cmap='gray', interpolation='none',\r\n origin='lower')\r\n ax.flat[2].set_title('Residuals')\r\n if type == 'gibbs':\r\n ax.flat[2].imshow(data_corrected[:, :, 0, 0].T - data_slices[:, :, 0, 0].T,\r\n cmap='gray', origin='lower', vmin=-500, vmax=500)\r\n ax.flat[2].set_title('Gibbs residuals')\r\n if savefigpath.lower=='none':\r\n pass\r\n else:\r\n fig1.savefig(savefigpath)", "title": "" }, { "docid": "12536c031857f93e2d9bbf492fdc29fe", "score": "0.50036436", "text": "def plot_generate():\n # represents how many explanations were covered by top model predictions\n # starts at (0,0)\n case_1 = np.asarray([0,7,0,2,1,4,0,0,0,0,0,0,0,0]).cumsum() / 15.0\n case_2 = np.asarray([0,2,8,1,0,3,1,0,0,0,0,0,0,0]).cumsum() / 15.0\n case_3 = np.asarray([0,6,0,0,2,0,3,0,0,0,0,0,0,0]).cumsum() / 15.0\n case_4 = np.asarray([0,7,4,0,0,1,0,0,0,0,0,0,0,0]).cumsum() / 15.0\n\n fig, axes = plt.subplots(1,1)\n turnTopRightOff(axes)\n\n axes.plot(np.arange(14), case_2, color='#B276B2', linestyle='-', marker='s', markersize=9)\n axes.plot(np.arange(14), case_3, color='#5DA5DA', linestyle='-', marker='^', markersize=10)\n axes.plot(np.arange(14), case_4, color='#FAA43A', linestyle='-', marker='8', markersize=6)\n #axes.plot(np.arange(14), case_1, color='#F15854', linestyle='-', marker='D', markersize=6)\n\n axes.annotate('Condition 1', xy=(13,0.98), xytext=(13.2,0.98), color='#B276B2', fontsize=14) \n axes.annotate('Condition 2', xy=(13,0.72), xytext=(13.2,0.72), color='#5DA5DA', fontsize=14) \n axes.annotate('Condition 3', xy=(13,0.78), xytext=(13.2,0.78), color='#FAA43A', fontsize=14) \n #axes.annotate('Condition 1', xy=(13,0.92), xytext=(13.2,0.92), color='#F15854') \n\n axes.set_ylabel('Proportion of responses')\n axes.set_xlabel('Model\\'s top N most probable explanations')\n axes.set_ylim(0,1.1)\n axes.set_xlim(0,13)\n fig.set_size_inches(5,5)\n plt.savefig('plots/plot_generate_all.eps', bbox_inches='tight')\n plt.close(fig)\n\n fig, axes = plt.subplots(1,1)\n turnTopRightOff(axes)\n #fig.suptitle('Model predictions compared to generated responses', fontsize=18, fontweight='bold')\n axes.plot(np.arange(14), case_1, color='r', linestyle='-')\n axes.set_ylabel('Percent accounted for')\n axes.set_xlabel('Number of best explanations used')\n axes.set_ylim(0,1.1)\n axes.set_xlim(0,13)\n fig.set_size_inches(5,5)\n plt.savefig('plots/plot_generate_case1.eps', bbox_inches='tight')\n plt.close(fig)\n \n fig, axes = plt.subplots(1,1)\n turnTopRightOff(axes)\n #fig.suptitle('Model predictions compared to generated responses', fontsize=18, fontweight='bold')\n axes.plot(np.arange(14), case_2, color='r', linestyle='-')\n axes.set_ylabel('Percent accounted for')\n axes.set_xlabel('Number of best explanations used')\n axes.set_ylim(0,1.1)\n axes.set_xlim(0,13)\n fig.set_size_inches(5,5)\n plt.savefig('plots/plot_generate_case2.eps', bbox_inches='tight')\n plt.close(fig)\n\n fig, axes = plt.subplots(1,1)\n #fig.suptitle('Model predictions compared to generated responses', fontsize=18, fontweight='bold')\n axes.plot(np.arange(14), case_3, color='r', linestyle='-')\n axes.set_ylabel('Percent accounted for')\n axes.set_xlabel('Number of best explanations used')\n axes.set_ylim(0,1.1)\n axes.set_xlim(0,13)\n fig.set_size_inches(5,5)\n plt.savefig('plots/plot_generate_case3.eps', bbox_inches='tight')\n plt.close(fig)\n\n fig, axes = plt.subplots(1,1)\n turnTopRightOff(axes)\n # fig.suptitle('Model predictions compared to generated responses', fontsize=18, fontweight='bold')\n axes.plot(np.arange(14), case_4, color='r', linestyle='-')\n axes.set_ylabel('Percent accounted for')\n axes.set_xlabel('Number of best explanations used')\n axes.set_ylim(0,1.1)\n axes.set_xlim(0,13)\n fig.set_size_inches(5,5)\n plt.savefig('plots/plot_generate_case4.eps', bbox_inches='tight')\n plt.close(fig)", "title": "" }, { "docid": "4e6384c43762cbf6772da28d69e533ec", "score": "0.5002981", "text": "def plot_loss(items, output_path):\n\n df = pd.DataFrame(items, columns=[\"epoch\", \"batch_id\", \"value\"])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n seaborn.lineplot(x=\"epoch\", y=\"value\", data=df, ax=ax)\n fig.savefig(output_path)\n plt.close(fig)", "title": "" }, { "docid": "c882d3e92063b23f08ea54a15d4bd0ed", "score": "0.49982595", "text": "def make_sma_evolution_scatter_plot(initial_sm_axes, final_sm_axes,\n delta = 0, movie = 0, color = 3, i = -1, time = None):\n\n if color == 3:\n directory = snapshot_dir + \"/sma_transfer\"\n elif color == 2:\n directory = snapshot_dir + \"/sma_kept\"\n\n if delta == 1:\n directory += \"_delta\"\n\n try:\n os.mkdir(directory)\n except:\n print \"\\t(\" , directory, \"already exists)\"\n \n # Add disk range parameter initialized from different arrays that contain all the particles\n # Change function name\n\n fig = init_fig()\n\n pyplot.scatter(initial_sm_axes.value_in(units.AU), final_sm_axes.value_in(units.AU), \\\n c=colors[3], lw=0.5, zorder=0)\n\n min_x = 40\n mx_x = 100\n if movie == 1:\n mx_y = 1000\n else:\n mx_y = max(final_sm_axes.value_in(units.AU))\n pyplot.xlim(min_x / 1.02, mx_x * 1.02)\n pyplot.ylim(0, mx_y * 1.05)\n\n pyplot.xlabel('initial sm-axis [AU]')\n pyplot.ylabel('final sm-axis [AU]')\n\n # Plot 'y' vs 'x'\n pyplot.plot([min_x / 1.02, mx_x * 1.02], [min_x / 1.02, mx_x * 1.02], linestyle='--', color ='black', zorder = 1)\n\n pyplot.plot([40, 40], [0, 1], linestyle='--', color = 'black', zorder = 1)\n pyplot.plot([100, 100], [0, 1], linestyle='--', color = 'black', zorder = 1)\n\n time_yr_str = \"\"\n if (time is not None):\n time_yr_str = \"{0:.1f} yr\".format(time.value_in(units.yr))\n\n title_str = \"Evolution of Semi-Major Axes for Transferred Particles\\n\" + time_yr_str\n #pyplot.text(0.5, 0.999, title_str, \n # horizontalalignment='center', verticalalignment='bottom', \n # transform=xy_plane.transAxes)\n pyplot.title(title_str)\n\n plot_sma = snapshot_dir+\"/sma_evolution_{0:03d}.png\".format(i)\n pyplot.savefig(plot_sma)\n\n pyplot.cla()", "title": "" }, { "docid": "6c9bf9b6d367a149b542032110c2d4e4", "score": "0.49978015", "text": "def plotSeismogramInteractRes(h2, wavf, AddNoise=False):\n d = [0.0, 50.0, 50.0 + h2] # Position of top of each layer (m)\n v = [500.0, 1000.0, 1500.0] # Velocity of each layer (m/s)\n rho = [2000.0, 2300.0, 2500.0] # Density of each layer (kg/m^3)\n wavf = np.array(wavf, dtype=float)\n\n if AddNoise:\n noise = 0.02\n else:\n noise = 0.0\n\n plotSeismogramV2(d, rho, v, wavf, 1.0, noise)", "title": "" }, { "docid": "ce33e14d1e73ef5ae879e46555d3101b", "score": "0.49924424", "text": "def plot_training(alpha_star, mu_star, alpha_hats, \n mu_hats, l_hats, r_hats, T,\n plot_intervals = True):\n fix, ax = plt.subplots(1,1)\n print(np.array(mu_hats).shape)\n mu_hats = np.array(mu_hats)\n l_hats = np.array(l_hats)\n r_hats = np.array(r_hats)\n alpha_hats = np.array(alpha_hats)\n x = np.arange(T)\n ax.plot(x, mu_hats[:,0], 'r', label='muhat0')\n ax.plot(x, mu_hats[:,1], 'g', label='muhat1')\n ax.plot(x, alpha_hats[:,0], 'k', label='alpha0')\n if plot_intervals:\n ax.plot(x, l_hats[:,0], '--', label='left0')\n ax.plot(x, l_hats[:,1], '--', label='left1')\n\n ax.plot(x, r_hats[:,0], '--', label='right0')\n ax.plot(x, r_hats[:,1], '--', label='right1')\n ax.legend()\n ax.set_ylim((-3,3))\n plt.show()", "title": "" }, { "docid": "de5b2e5d5b35ee5d5c92c0c54ebfeb5c", "score": "0.49910298", "text": "def plot_tsne_and_umap(self,training_set,labels,save_path,model_name = \"tsne\"):\n if model_name == \"tsne\":\n tsne_train = manifold.TSNE(n_components=2, perplexity=30.0, early_exaggeration=12.0, learning_rate=200.0,\n n_iter=1000, n_iter_without_progress=300, min_grad_norm=1e-07, metric='euclidean',\n init='random', verbose=0, random_state=None, method='barnes_hut', angle=0.5)\n results = tsne_train.fit_transform(training_set)\n elif model_name == \"umap\":\n umap_train = umap.UMAP(n_neighbors=10,\n min_dist=0.3,\n metric='correlation')\n results = umap_train.fit_transform(training_set)\n else:\n raise ValueError(\"Model name could not be recognized\")\n df_subset_up = {}\n df_subset_up['tsne-2d-one'] = results[:, 0]\n df_subset_up['tsne-2d-two'] = results[:, 1]\n df_subset_up['y'] = labels\n plt.figure(figsize=(16, 10))\n sns_plot = sns.scatterplot(\n x=\"tsne-2d-one\",\n y=\"tsne-2d-two\",\n hue=\"y\",\n palette=sns.color_palette(\"hls\", max(labels)+1),\n data=df_subset_up,\n legend=\"full\",\n alpha=0.3\n )\n fig = sns_plot.get_figure()\n fig.savefig(save_path)\n return sns_plot", "title": "" }, { "docid": "2d3071c309856c93d265055b5b37ce5b", "score": "0.49875188", "text": "def plotGhost(data):", "title": "" }, { "docid": "9bd10d94cfa8fa5313201c91cf6b5a51", "score": "0.4982061", "text": "def plotLineStereographic(vecs, ax, **kwargs):\r\n projpospos, projpos, projnegpos, projneg = calcStereo(vecs)\r\n x = np.array(projpos[0, :])[0]\r\n y = np.array(projpos[1, :])[0]\r\n ax.plot(x,y, color = \"black\", **kwargs)", "title": "" }, { "docid": "90340d83d149b4c0fb8f752f3d08154c", "score": "0.49816653", "text": "def plotReacNet(ax, sunet, matr_shape, step=4, xoffset=0,\n color='#FF8200', tagsize=8, dotsize=15, aspect=1.0):\n with open(matr_shape) as f:\n for i, line in enumerate(f):\n if len(line) != 22:\n continue\n else:\n break\n data = np.genfromtxt(matr_shape, skip_header=i)\n x, y, z = np.hsplit(data, 3)\n with open(sunet) as f:\n species = f.readlines()\n species = [s.strip() for s in species]\n names = [elemSplit(s, invert=True) for s in species]\n nsp = len(names)\n rates = len(x)\n im = ax.scatter(x, y, c=color, edgecolors='#000000', s=dotsize)\n ax.set_aspect(aspect)\n # set labels\n ax.set_yticks(np.arange(1, nsp+1, 1)) # all tags in y-axis\n ax.set_yticklabels(['$^{{{}}}{{{}}}$'.format(*t) for t in names],\n fontsize=tagsize, ha='right')\n ax.set_xticks(np.arange(xoffset+1, nsp+1, step))\n labels = names[xoffset::step]\n ax.set_xticklabels(['$^{{{}}}{{{}}}$'.format(*t) for t in labels],\n fontsize=tagsize, va='baseline')\n ax.tick_params('both', length=5, width=2, which='major')\n ax.tick_params('x', length=5, width=2, labeltop=True, labelbottom=False)\n t = '{} Isotopes\\n{} Rates'\n note = ax.annotate(t.format(nsp, rates), xy=(100, 30), fontsize=8)\n plt.gca().invert_yaxis()\n return nsp, rates", "title": "" }, { "docid": "044a417022ea394cab50758bbcbc26d3", "score": "0.49811196", "text": "def setup_rdfview(ax, system): # pragma: no cover\n ax.plot([0], color=\"#34a5daff\")\n ax.set_xlim([0, system.box_length / 2])\n ax.set_yticks([])\n ax.set_ylabel(\"RDF\", fontsize=16)\n ax.set_xlabel(\"r/m\", fontsize=16)", "title": "" }, { "docid": "1494f6822a2b842ea0f30d95af14c702", "score": "0.4975469", "text": "def exo2():\n m = round(.1*n/ 3)*3\n pvertex = vertex*U\n pvertexN = perform_thresholding(pvertex, m, 'largest')\n vertexN = pvertexN*U'\n pvertexL = pvertex\n pvertexL(: , m/ 3 + 1: n) = 0\n vertexL = pvertexL*U'\n subplot(1, 2, 1)\n plot_mesh(vertexL, faces)\n subplot(1, 2, 2)\n plot_mesh(vertexN, faces)\n disp(['Linear: SNR = ' num2str(snr(vertex, vertexL), 3) 'dB'])\n disp(['Non-linear: SNR = ' num2str(snr(vertex, vertexN), 3) 'dB'])", "title": "" }, { "docid": "598a96c0e028b241a5dcca08a9f4895a", "score": "0.49753344", "text": "def plot_results(vae,\n test_loader,\n model_name=\"vae_mnist\"):\n\n encoder = vae.encoder\n decoder = vae.decoder\n with torch.no_grad():\n batch = (test_loader.dataset.data.float() / 255.)\n print(batch.numpy().shape)\n batch = batch.view(-1, vae.original_dim).to(vae.device)\n z_mean = encoder(batch)[:, :latent_dim]\n y_test = test_loader.dataset.targets\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n # plt.savefig(filename)\n # plt.show()\n\n filename = \"./{}_digits_over_latent.png\".format(model_name)\n # display a 30x30 2D manifold of digits\n n = 30\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n with torch.no_grad():\n z_sample = torch.tensor(z_sample).float().view(-1, latent_dim)\n x_decoded = torch.sigmoid(decoder(z_sample))\n digit = x_decoded.reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = digit_size // 2\n end_range = (n - 1) * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filename)\n plt.show()", "title": "" }, { "docid": "5f8e1e83ab0de8d76001d90a97f11799", "score": "0.49746433", "text": "def make_3d_fit_viz_in_2d(n_submodels=3, line='hcop', version=None):\n if line[:4] == 'hcop':\n directory = \"carma\"\n if version is None:\n if line == 'hcop':\n version = 2\n elif line == 'hcop_regrid':\n version = 3\n else:\n directory = 'sofia'\n if version is None:\n version = 1\n filename_stub = f\"{directory}/models/gauss_fit_{line}_{n_submodels}G_v{version}\"\n param_fn = catalog.utils.search_for_file(filename_stub+\".param.fits\")\n # resid_fn = catalog.utils.search_for_file(filename_stub+\".resid.fits\")\n # model_fn = catalog.utils.search_for_file(filename_stub+\".model.fits\")\n hdul = fits.open(param_fn)\n print(list(hdu.header['EXTNAME'] for hdu in hdul if 'EXTNAME' in hdu.header))\n # resid_cube = cube_utils.SpectralCube.read(resid_fn)\n # model_cube = cube_utils.SpectralCube.read(model_fn)\n means = []\n amplitudes = []\n shape = hdul[1].data.shape\n # ii, jj = tuple(x for x in np.mgrid[0:shape[0], 0:shape[1]])\n ii, jj = np.mgrid[0:shape[0], 0:shape[1]]\n i_axis = np.arange(shape[0])\n j_axis = np.arange(shape[1])\n\n i_cube = []\n j_cube = []\n\n if n_submodels > 1:\n for k in range(n_submodels):\n means.append(hdul[f'mean_{k}'].data[:])\n amplitudes.append(hdul[f'amplitude_{k}'].data[:])\n i_cube.append(ii)\n j_cube.append(jj)\n i_cube = np.array(i_cube)\n j_cube = np.array(j_cube)\n means = np.array(means)\n amplitudes = np.array(amplitudes)\n else:\n means = hdul['mean'].data[np.newaxis, :, :]\n amplitudes = hdul['amplitude'].data[np.newaxis, :, :]\n i_cube = ii[np.newaxis, :, :]\n j_cube = jj[np.newaxis, :, :]\n\n # means = np.array(means)\n # amplitudes = np.array(amplitudes)\n # i_array = np.array(i_array)\n # j_array = np.array(j_array)\n if line == 'hcop':\n amp_cutoff = 2.5\n elif line == 'hcop_regrid':\n amp_cutoff = 0.6\n else:\n amp_cutoff = 5\n amp_mask = amplitudes > amp_cutoff # about 5sigma\n\n\n # means = means[amp_mask]\n # amplitudes = amplitudes[amp_mask]\n # i_array = i_array[amp_mask]\n # j_array = j_array[amp_mask]\n\n # im1 = ax1.hist2d(j_array, means, bins=64)[3]\n\n if line[:4] == 'hcop':\n n_bins = 128\n else:\n n_bins = 32\n\n img_ra_vel = np.zeros((n_bins, shape[1]))\n vel_limits = (22, 28)\n for j in j_axis:\n velocities_in_j = means[:, :, j].ravel()\n amplitudes_in_j = amplitudes[:, :, j].ravel()\n vel_hist_in_j, vel_edges = np.histogram(velocities_in_j[amplitudes_in_j > amp_cutoff], bins=n_bins, range=vel_limits)\n img_ra_vel[:, j] = vel_hist_in_j\n vel_centers = (vel_edges[:-1] + vel_edges[1:])/2\n vel_delta = vel_edges[1] - vel_edges[0]\n\n\n fig = plt.figure(figsize=(6, 8))\n ax1 = plt.subplot(211)\n ax2 = plt.subplot(212, projection=WCS(hdul[1].header))\n\n\n im1 = ax1.imshow(img_ra_vel, origin='lower', aspect=(shape[1]/(vel_limits[1]-vel_limits[0])), extent=[0, shape[1], vel_limits[0], vel_limits[1]])\n fig.colorbar(im1, ax=ax1, label='$N$ valid components')\n # ax1.set_xlabel(\"RA\")\n ax1.set_ylabel(f\"Velocity ({kms.to_string('latex_inline')})\")\n ax1.xaxis.set_ticks([])\n # ax.invert_xaxis()\n\n\n img_ra_dec = np.sum((amplitudes > amp_cutoff).astype(int), axis=0)\n im2 = ax2.imshow(img_ra_dec, origin='lower', vmin=0, vmax=3)\n cbar = fig.colorbar(im2, ax=ax2, ticks=list(range(0, 4)), label='$N$ valid components')\n ax2.set_xlabel(\"Right Ascension\")\n ax2.set_ylabel(\"Declination\")\n ax2.tick_params(axis='x', direction='in')\n\n plt.subplots_adjust(bottom=0.1, top=0.95, left=0.12, right=0.98, hspace=0.07)\n\n\n # im3 = ax3.hist2d(means, i_array, bins=64)[3]\n # fig.colorbar(im3, ax=ax3)\n # ax3.set_xlabel(\"Velocity (km/s)\")\n # ax3.set_ylabel(\"Dec\")\n\n dpi = 300\n dpi_stub = \"\" if dpi==100 else f\"_dpi{dpi}\"\n\n # plt.show()\n # 2022-09-01,13, 2023-07-25\n savename = os.path.join(catalog.utils.todays_image_folder(), f\"p1_3d_viz_in_2d_{line}_{n_submodels}p{dpi_stub}\")\n fig.savefig(f\"{savename}.png\",\n metadata=catalog.utils.create_png_metadata(title=\"projection of grid fit\",\n file=__file__, func=\"make_3d_fit_viz_in_2d\"),\n dpi=dpi)\n # elif True:\n # from mayavi import mlab\n # mlab.figure(bgcolor=(0.2, 0.2, 0.2), fgcolor=(0.93, 0.93, 0.93), size=(800, 700))\n # mlab.axes(ranges=[0, shape[1], 0, shape[0], 20, 30],\n # xlabel='j (ra)', ylabel='i (dec)', zlabel='velocity (km/s)', nb_labels=10,\n # line_width=19)\n # kwargs = dict(mode='cube', colormap='jet',\n # scale_mode='none', scale_factor=0.7, opacity=0.2)\n # mlab.points3d(j_array, i_array, -1*means*(30 if line=='hcop' else 4), amplitudes, **kwargs)\n # mlab.show()", "title": "" }, { "docid": "de136541f0af6fc8a10587cffb969e2d", "score": "0.4972451", "text": "def brett(self):\t\t\t\n\t\t\n def pline(x, y):\n return plt.plot(x, y, color=(0,0,0), lw=0.8)\n\n def prot(x, y, t):\n return ax.text(x, y, t, fontsize=9, horizontalalignment='center', \n verticalalignment='center', color=(1,0,0), \n fontname='Times New Roman')\n\n def pblack(x, y, t):\n return ax.text(x, y, t, fontsize=9, horizontalalignment='center', \n verticalalignment='center', color=(0,0,0),\n fontname='Times New Roman')\n\n def punt(x, y):\n ax.text(x, y, '12', fontsize=6, horizontalalignment='center', \n verticalalignment='center', color=(0,0,0),\n fontname='Times New Roman')\n\n dx, dy = 1.5, 1.5\n fig = plt.figure(figsize=(3, 4))\n ax = fig.add_subplot(1, 1, 1)\n ax.spines['top'].set_visible(False)\t\t\n ax.spines['bottom'].set_visible(False)\t\t\n ax.spines['right'].set_visible(False)\t\t\n ax.spines['left'].set_visible(False)\t\t\n ax.set_xticks([])\n plt.axes().xaxis.set_ticks_position('none')\n ax.set_yticks([])\n plt.axes().yaxis.set_ticks_position('none')\n plt.xlim(0, 10*dx)\n plt.ylim(-0.1, 15*dy)\n pline([3*dx, 6*dx, 6*dx, 3*dx, 3*dx], [0, 0, 14*dy, 14*dy, 0])\n pline([4*dx, 4*dx], [dy, 13*dy])\n pline([5*dx, 5*dx], [dy, 13*dy])\n for i in range(1, 14):\n pline([3*dx, 6*dx], [i*dy, i*dy])\n pline([0, 0], [2*dy, 12*dy])\n pline([9*dx, 9*dx], [2*dy, 12*dy])\n pline([3*dx, 0], [dy, 2*dy])\n pline([3*dx, 0], [2*dy, 3*dy])\n pline([6*dx, 9*dx], [dy, 2*dy])\n pline([6*dx, 9*dx], [2*dy, 3*dy])\n pline([0, 3*dx], [12*dy, 13*dy])\n pline([9*dx, 6*dx], [12*dy, 13*dy])\n pline([0, 9*dx], [5*dy, 5*dy])\n pline([0, 9*dx], [9*dy, 9*dy])\n pline([2*dx, 2*dx], [1.35*dy, 2.3*dy])\n pline([7*dx, 7*dx], [1.35*dy, 2.3*dy])\n pline([dx, dx], [1.7*dy, 2.65*dy])\n pline([8*dx, 8*dx], [1.7*dy, 2.65*dy])\n ax.add_patch(patches.RegularPolygon(\n (1.7*dx, 3.7*dy), 4, 0.6*dx, color=(0,0,0)))\n ax.add_patch(patches.RegularPolygon(\n (7.4*dx, 3.7*dy), 4, 0.6*dx, facecolor=(1,0,0)))\n ax.text(4.5*dx, 13.4*dy, '0', fontsize=9, horizontalalignment='center', \\\n verticalalignment='center', color=(0,1,0))\n prot(3.5*dx, 12.4*dy, '1')\n pblack(4.5*dx, 12.4*dy, '2')\n prot(5.5*dx, 12.4*dy, '3')\n pblack(3.5*dx, 11.4*dy, '4')\n prot(4.5*dx, 11.4*dy, '5')\n pblack(5.5*dx, 11.4*dy, '6')\n prot(3.5*dx, 10.4*dy, '7')\n pblack(4.5*dx, 10.4*dy, '8')\n prot(5.5*dx, 10.4*dy, '9')\n pblack(3.5*dx, 9.4*dy, '10')\n pblack(4.5*dx, 9.4*dy, '11')\n prot(5.5*dx, 9.4*dy, '12')\n pblack(3.5*dx, 8.4*dy, '13')\n prot(4.5*dx, 8.4*dy, '14')\n pblack(5.5*dx, 8.4*dy, '15')\n prot(3.5*dx, 7.4*dy, '16')\n pblack(4.5*dx, 7.4*dy, '17')\n prot(5.5*dx, 7.4*dy, '18')\n prot(3.5*dx, 6.4*dy, '19')\n pblack(4.5*dx, 6.4*dy, '20')\n prot(5.5*dx, 6.4*dy, '21')\n pblack(3.5*dx, 5.4*dy, '22')\n prot(4.5*dx, 5.4*dy, '23')\n pblack(5.5*dx, 5.4*dy, '24')\n prot(3.5*dx, 4.4*dy, '25')\n pblack(4.5*dx, 4.4*dy, '26')\n prot(5.5*dx, 4.4*dy, '27')\n pblack(3.5*dx, 3.4*dy, '28')\n pblack(4.5*dx, 3.4*dy, '29')\n prot(5.5*dx, 3.4*dy, '30')\n pblack(3.5*dx, 2.4*dy, '31')\n prot(4.5*dx, 2.4*dy, '32')\n pblack(5.5*dx, 2.4*dy, '33') \n prot(3.5*dx, 1.4*dy, '34')\n pblack(4.5*dx, 1.4*dy, '35')\n prot(5.5*dx, 1.4*dy, '36') \n pblack(0.5*dx, 2.4*dy, 'P') \n pblack(8.5*dx, 2.4*dy, 'P') \n punt(0.7*dx, 2.13*dy)\n punt(8.7*dx, 2.13*dy)\n pblack(1.35*dx, 2.07*dy, 'M') \n pblack(7.35*dx, 2.07*dy, 'M') \n punt(1.72*dx, 1.85*dy)\n punt(7.72*dx, 1.85*dy) \n pblack(2.45*dx, 1.75*dy, 'D') \n pblack(6.45*dx, 1.75*dy, 'D') \n punt(2.75*dx, 1.48*dy)\n punt(6.75*dx, 1.48*dy) \n pblack(1.5*dx, 10.5*dy, 'Passe')\n pblack(7.5*dx, 10.5*dy, 'Manque')\n pblack(1.5*dx, 7*dy, 'Pair')\n pblack(7.5*dx, 7*dy, 'Impair')\n \n plt.show()", "title": "" } ]
c19228a3d740918fb2e3cafbd49ae231
Add a status line to the editor.
[ { "docid": "1d447c653b8fea3da5840afa452e3fd1", "score": "0.67183894", "text": "def addStatus(self, text, priority=0):\n pass", "title": "" } ]
[ { "docid": "b54316b4f7ef636bdd64ac3479e808a4", "score": "0.66472626", "text": "def add_line(self, line):\n height, width = self.window.getmaxyx()\n self.window.addstr(self.line_no, 2, line[:width-5])\n self.line_no += 1", "title": "" }, { "docid": "7900b80ea1e715119a86afbb5739fdc9", "score": "0.66377926", "text": "def add_line(self, line):\n\t\tself.lines.append(line)", "title": "" }, { "docid": "e83e24604c58b5d7e5c06d7fbc3ccb47", "score": "0.6612937", "text": "def add_line(self):\n self.prev_line_end = self.last\n self.current_line += 1", "title": "" }, { "docid": "976942a70e0e0621922a061ec5cfc6f2", "score": "0.6600205", "text": "def addLine(self, line):\n self.insert(tk.END, line + '\\n')", "title": "" }, { "docid": "0d464d108be99937e739305fce22a38c", "score": "0.65450764", "text": "def add_line(self, line):\n self.lines.append(line)", "title": "" }, { "docid": "6bf327ac8b0ba5f944787796a240cba2", "score": "0.6385026", "text": "def addLine(self, line, color=(0, 0, 0)):\n self.lineCount += 1\n\n linePos = self.lineCount*self.defaultTextSize\n linePosOffset = self.lineCount*self.defaultSpacing\n position = (0, self.firstLinePos - linePos - linePosOffset)\n self.addText(line, size=self.defaultTextSize, anchor=\"middle_left\",\n position=position)", "title": "" }, { "docid": "ace6bd30268987f2a49dfa54981adf3f", "score": "0.634499", "text": "def line(self, line):\n self.lines.append(\"{0}{1}\".format(\" \" * self.indent, line))\n self.lastCmdWasBlank = False", "title": "" }, { "docid": "cd21419b8c54ad9c658a550d661830a4", "score": "0.63383085", "text": "def _update_status(self, status, *value):\n\n # print(value)\n # Create text options dictionary\n text_options = {}\n text_options.update({\"running\": \"Running Test...\"})\n\n if value is not None and len(value)>0:\n text_options.update({\"update\": \"Running Test... CFV = \" + str(value[0])})\n else:\n text_options.update({\"update\": \"Running Test...\"})\n\n text_options.update({\"taring\": \"Taring Scale, Wait...\"})\n text_options.update({\"pass\": \"PASSED\"})\n text_options.update({\"fail\": \"FAILED\"})\n text_options.update({\"test_stopped\": \"Test stopped prematurely...\"})\n text_options.update({\"\":\"\"})\n\n # Create background colors dictionary\n bc_options = {}\n bc_options.update({\"running\": \"background-color: rgb(255, 255, 255);\"})\n bc_options.update({\"update\": \"background-color: rgb(255, 255, 255);\"})\n bc_options.update({\"taring\": \"background-color: rgb(255, 255, 255);\"})\n bc_options.update({\"pass\": \"background-color: rgb(0, 255, 0);\"})\n bc_options.update({\"fail\": \"background-color: rgb(255, 0, 0);\"})\n bc_options.update({\"test_stopped\": \"background-color: rgb(255, 85, 0);\"})\n bc_options.update({\"\": \"background-color: rgb(255, 255, 255);\"})\n\n # Update the line edit text\n if status == 'update':\n f = self._ui.status_line_edit.font()\n f.setPointSize(25)\n self._ui.status_line_edit.setFont(f)\n\n else:\n f = self._ui.status_line_edit.font()\n f.setPointSize(45)\n self._ui.status_line_edit.setFont(f)\n\n self._ui.status_line_edit.setText(text_options[status])\n\n # Update the line edit background color\n self._ui.status_line_edit.setStyleSheet(bc_options[status])", "title": "" }, { "docid": "c2f4d3d812e14b7cf6844749dac2506d", "score": "0.6326748", "text": "def status(self, status, color):\n self.status_var.set(status)\n self._status_display.configure(background=color)", "title": "" }, { "docid": "ed8ae42e59d77b84d7201045e90bd1f9", "score": "0.6318497", "text": "def display_line(self, line):\n\t\tself.lines.append(line)\n\t\tself.update()", "title": "" }, { "docid": "2d7f0507638ecf3a404d5d841c495637", "score": "0.63056093", "text": "def update_status(self, status_message):\n\n self.status_box_widget.append(str(status_message))\n # reference: https://stackoverflow.com/questions/7778726/autoscroll-pyqt-qtextwidget\n self.status_box_widget.moveCursor(QtGui.QTextCursor.End)\n self.update()", "title": "" }, { "docid": "06e727027fee069a908c70abdb18799c", "score": "0.6268333", "text": "def add_line(self, index, line):\n try:\n li = self._lines[index]\n lt = self._line_tokens[index]\n except IndexError:\n li = ''\n lt = self._parser.parse_string('')\n\n d = {\n 'count': 1,\n 'data': {\n 'lines': [li],\n 'line_tokens': [lt],\n },\n 'state': self.get_page_state(),\n 'last_addition': index,\n }\n\n # self.add_to_undo_buffer(('+', index, d))\n self._lines.insert(index, line)\n self._line_tokens.insert(index, self._parser.parse_string(line))", "title": "" }, { "docid": "ac1acb94eb27827dc5b3e51d0eda0a99", "score": "0.62529624", "text": "def add_status(status_effect: str, unit: Character):\n unit.status_effects[status_effect] = True", "title": "" }, { "docid": "e47bc8d300d567dec08440f910877624", "score": "0.62285304", "text": "def status_message(window, message, color=COLOR_PAIRS[\"STATUS\"], line=0, col=0):\n\n message = message.strip()\n\n window.clear()\n window.addstr(line, col, message, curses.color_pair(color))\n window.refresh()", "title": "" }, { "docid": "6d8ee117c96d057e114f2105ea79da3b", "score": "0.6201668", "text": "def status(self, status: CommandStatus):\n\n self.set_status(status)", "title": "" }, { "docid": "4a40ef217fcc310ba04f677a106636cd", "score": "0.6165816", "text": "def _add_line(self, content: str):\n self.__current_file_contents.append(content)", "title": "" }, { "docid": "b800263e54e92b8c3d5371d328a2db6e", "score": "0.6121408", "text": "def AddLine(self, line):\n self._lines.append(line)", "title": "" }, { "docid": "8d84813f76b1eab4ff42ed134f27188e", "score": "0.6114483", "text": "def _line_insert(self, line):\n content, attributes = self._format(line)\n self._ui_window.line_insert(line, content, attributes)", "title": "" }, { "docid": "40b8fa1eb0b64f60ca524c62b62ef385", "score": "0.61123586", "text": "def new_line(self):\n self.lines.append(self.current_line)\n self.current_line = ''", "title": "" }, { "docid": "623e084b56309b7b6e121c50a7ec9c6d", "score": "0.60855323", "text": "def add_line(self, line, *lineno):\n self.result.append(line, directives.unchanged, *lineno)", "title": "" }, { "docid": "225ab8be91c6abdd14da3b9ecd094d6b", "score": "0.60767984", "text": "def _line_update(self, line):\n content, attributes = self._format(line)\n self._ui_window.line_update(line, content, attributes)", "title": "" }, { "docid": "0660c5780cc1b828697f5af904c32e7c", "score": "0.605", "text": "def add_status(self, status_dict):\n self._database.write_configuration('status_list', status_dict, True)", "title": "" }, { "docid": "98ed2f8cc012f251ce92a8dd4b971de5", "score": "0.60417813", "text": "def set_status(self, new_status):\r\n if new_status == self.status:\r\n return\r\n if new_status not in self.available_statuses():\r\n raise exceptions.InvalidLineStatus(\r\n _(\"'%(new_status)s' is not a valid status (current status:\"\r\n \" '%(status)s')\")\r\n % {'new_status': new_status, 'status': self.status})\r\n self.status = new_status\r\n self.save()", "title": "" }, { "docid": "193ff495d8cb694036a5e76521c784fa", "score": "0.5997687", "text": "def status_message(self, message):\n\n self._append_text(message, self._status_format)", "title": "" }, { "docid": "7a0f01fb3af387df21facf5146032316", "score": "0.59796494", "text": "def update_status_line(self) -> List[Command]:\n return [\n ClearStatusLineCommand(),\n SetStatusLineTextCommand(self.get_selected().tooltip, Position.CENTER),\n ]", "title": "" }, { "docid": "9a2099f2d40b2cbc3ab9d8d66839836c", "score": "0.5975878", "text": "def add_line(self, line, context=None):\n if not isinstance(line, LiquidLine):\n line = LiquidLine(line, context)\n line.ndent = self.ndent\n self.codes.append(line)", "title": "" }, { "docid": "b9724fabe81eb24efe717c7c6ef9f874", "score": "0.59749013", "text": "def add_line(self, event):\n self.canvas.create_line(self.last_x, self.last_y, event.x, event.y, width=self.line_width, tags='drawing')\n # this makes the new starting point of the drawing\n self.last_x, self.last_y = event.x, event.y", "title": "" }, { "docid": "b2a880cf35830ff0ff597779a77069ff", "score": "0.5950461", "text": "def add_line(self, index):\r\n # Create a widget for the line\r\n line = self.FlashcardEditLine(self, self.type, self.set, index)\r\n layout = self.ui.scrollAreaWidgetContents.layout()\r\n # Insert the widget before the spacer\r\n layout.insertWidget(layout.count() - 1, line)", "title": "" }, { "docid": "eb90737b8483efa053940fc50b1de66e", "score": "0.5938972", "text": "def _add_to_file_status(self, status):\n self._file_status.fail += status.fail\n self._file_status.warning += status.warning\n self._file_status.error += status.error\n self._file_status.skipped += status.skipped\n self._file_status.test_count += 1", "title": "" }, { "docid": "7520fb474423f92b425cfc6fdca2c9f8", "score": "0.5931713", "text": "def _status(self, message):\n sublime.status_message(message)", "title": "" }, { "docid": "d619b67edd06ed2cbe60d4732227674c", "score": "0.59291303", "text": "def add_track_line(self):\n self.track_line = QLineEdit()\n self.track_line.setPlaceholderText(\"Enter Track...\")\n self.hbox.addWidget(self.track_line)", "title": "" }, { "docid": "8761e0ea05281b95a1f74b894e0bcb24", "score": "0.5886873", "text": "def add_line(self, lineno, text, important=True):\r\n self.append((lineno, important, text))", "title": "" }, { "docid": "b15bd78bd84109bd9c74d0822325bc5e", "score": "0.5881788", "text": "def AppendText(self, line):\n self._text_ctrl.AppendText(line)", "title": "" }, { "docid": "303420a468706c9c99c6cf2f4dd9ed10", "score": "0.58435977", "text": "def set_status_message(self, message):\n self.statusbar.push(self.context_id, message)", "title": "" }, { "docid": "7f836eaae8594ecdf4d698baa63b80c7", "score": "0.5803774", "text": "def AddLine(self, line, fgcolor=None, bgcolor=None):\n return _idaapi.pyscv_add_line(self.__this, self.__make_sl_arg(line, fgcolor, bgcolor))", "title": "" }, { "docid": "d966522e2b8986b1ca055bb8e8e312d5", "score": "0.58022195", "text": "async def _status(status_details):\n status_update.append(status_details)", "title": "" }, { "docid": "2d7b3b6d4deec338932cdd4364d00442", "score": "0.5792026", "text": "def append(self, line):\n self.lines.append(line)", "title": "" }, { "docid": "e3e90b3463f01efeac3c5ca3eb319b03", "score": "0.57875276", "text": "def add_status_widget(self, widget, index=0):\r\n # Check widget class\r\n if not isinstance(widget, StatusBarWidget):\r\n raise SpyderAPIError(\r\n 'Any status widget must subclass StatusBarWidget!'\r\n )\r\n\r\n # Check ID\r\n id_ = widget.ID\r\n if id_ is None:\r\n raise SpyderAPIError(\r\n f\"Status widget `{repr(widget)}` doesn't have an identifier!\"\r\n )\r\n\r\n # Check it was not added before\r\n if id_ in self.STATUS_WIDGETS and not running_under_pytest():\r\n raise SpyderAPIError(f'Status widget `{id_}` already added!')\r\n self.STATUS_WIDGETS[id_] = widget\r\n\r\n self._statusbar.setStyleSheet('QStatusBar::item {border: None;}')\r\n if index == -1:\r\n self._statusbar.addPermanentWidget(widget)\r\n else:\r\n self._statusbar.insertPermanentWidget(index, widget)\r\n self._statusbar.layout().setContentsMargins(0, 0, 0, 0)\r\n self._statusbar.layout().setSpacing(0)", "title": "" }, { "docid": "f10c7cd7feb16d064cda171e1fec0cc6", "score": "0.57801926", "text": "def add_status():\n check_admin()\n\n add_status = True\n\n form = StatusForm()\n if form.validate_on_submit():\n status = Status(name=form.name.data,description=form.description.data)\n\n try:\n # add status to the database\n db.session.add(status)\n db.session.commit()\n flash('You have successfully added a new status.')\n except:\n # in case status name already exists\n flash('Error: status name already exists.')\n\n # redirect to status page\n return redirect(url_for('admins.list_statuss'))\n\n # load status template\n return render_template('Admin/Status/status.html', action=\"Add\",\n add_status=add_status, form=form,\n title=\"Add Status\")", "title": "" }, { "docid": "8bd35f59fcb71d8caf407398b7ce7717", "score": "0.57763976", "text": "def toggle_line_button_pressed(self):\r\n self.line_status = not self.line_status\r\n if not self.line_status:\r\n self.remove_lines_points()\r\n else:\r\n self.add_lines_points()", "title": "" }, { "docid": "7c760ee778c62488632ff1c50ae63957", "score": "0.5756924", "text": "def addLine(self, ln, offset):\n s = ln[offset:]\n if not self.tip.isOpen:\n raise Exception(\"Attempted to add line (\" + ln + \") to closed container.\")\n self.tip.strings.append(s)", "title": "" }, { "docid": "dd1a70f7925c9c14b7b573aeb03a8154", "score": "0.5756298", "text": "def annotate_append_line(self, line, item, style=None):\n\t\tcurrent = self.annotation(line)\n\t\tif len(current) and not current.endswith('\\n'):\n\t\t\tself.annotate_append(line, '\\n', 0)\n\t\treturn self.annotate_append(line, item, style)", "title": "" }, { "docid": "09e712caa4fb4287b741be7ac08e7bc7", "score": "0.5736203", "text": "def _createLine(self, (x1, y1), (x2, y2), style={}):\n ev = Event('style', style, {'coords': ((x1, y1), (x2, y2))})\n self.notify(ev)\n self.notify(Event('append', Draftsman.createLine((x1, y1), (x2, y2), ev.content)))", "title": "" }, { "docid": "0ad4aa19a01dce77e34098f4fc7458d9", "score": "0.5735983", "text": "def highlight_current_line(self, event):\n logging.debug(\"Function: highlight current line\")\n self.textWidget.tag_remove(\"current_line\", 1.0, \"end\")\n self.textWidget.tag_add(\"current_line\", \"insert linestart\", \"insert lineend+1c\")", "title": "" }, { "docid": "512725783ed572332add55a3f6289595", "score": "0.5731084", "text": "def status(self, status):\n\n\n self._status = status", "title": "" }, { "docid": "512725783ed572332add55a3f6289595", "score": "0.5731084", "text": "def status(self, status):\n\n\n self._status = status", "title": "" }, { "docid": "fbb67cca445faef8522ab4f663c73f80", "score": "0.57180536", "text": "def add(self, line):\n self.data.append(line)", "title": "" }, { "docid": "556cf3c6b803e6ae118a4fe9fc995c1a", "score": "0.5718051", "text": "def _createVLine(self, x, (y1, y2), style={}):\n ev = Event('style', style, {'coords': ((x, y1), (x, y2))})\n self.notify(ev)\n\tself.notify(Event('append', Draftsman.createVLine(x, (y1, y2), ev.content)))", "title": "" }, { "docid": "1a2774aee8cc77bf306fe44609152452", "score": "0.571722", "text": "def add_line(self, textline, level=0):\n if level == 0:\n ind = \"\"\n\n else:\n ind = \"\".join([self.ind for _ in range(level)])\n\n textline = self.wrap_text(textline, self.left_margin + ind,\n self.right_margin, self.max_width)\n\n self.__write_lines(textline)\n\n self.nlines += 1", "title": "" }, { "docid": "04846c1c60d8867e6d173c531f9c6609", "score": "0.57076037", "text": "def addLine(self, text):\n\n self.lines.append(text)\n while len(self.lines) > 10:\n del self.lines[0]\n self.redisplayLines()", "title": "" }, { "docid": "35d7301f9619ddf26499c7473193dc0c", "score": "0.5705498", "text": "def render_line(start: Vec2, end: Vec2, style: BorderStyle):\n if style.status:\n layout.add_line(\n start=start,\n end=end,\n dxfattribs={\n \"layer\": layer,\n \"color\": style.color,\n \"linetype\": style.linetype,\n \"lineweight\": style.lineweight,\n },\n )", "title": "" }, { "docid": "3d03850c0284348bcfda18c0f39268b9", "score": "0.56875527", "text": "def add_line(self, line):\n self.replies.append(line)", "title": "" }, { "docid": "1eac3d6db61bbface365387738dacc48", "score": "0.5679652", "text": "def put_at(self, line):\n\t\treturn self.editor.markerAdd(line, self.id)", "title": "" }, { "docid": "5e07df9ce724c415b2cb993d6efb9ca5", "score": "0.56790924", "text": "def EditLine(self, lineno, line, fgcolor=None, bgcolor=None):\n return _idaapi.pyscv_edit_line(self.__this, lineno, self.__make_sl_arg(line, fgcolor, bgcolor))", "title": "" }, { "docid": "8bf310790441ca732f28f66fc2306b5c", "score": "0.56783444", "text": "def update_program_status(self, s):\n # Show only most recent line of status\n if len(self.program_status) > 0 and self.program_status[-1] == \"\\n\":\n self.program_status = \"\"\n self.program_status += s\n self.programStatusLabel.setText(str(self.program_status).strip())", "title": "" }, { "docid": "5b8e014254ff84fb1a93f6c674d1fb48", "score": "0.5676245", "text": "def _onecmdupd(self, line):\n self._lastcmd = line\n self._numcmds += 1\n self._history.append(line)", "title": "" }, { "docid": "273ab78ca0866d30c0ada9057f61622e", "score": "0.56747335", "text": "def _set_status(self, ctxt, status, text=None, url=None):\n\n # Set the status\n self.last_commit.create_status(\n status,\n url or github.GithubObject.NotSet,\n text or github.GithubObject.NotSet,\n )\n\n ctxt.emit('Changing status to \"%s\" (text \"%s\"%s%s)' %\n (status, text, ', url ' if url else '', url or ''),\n debug=True)\n\n # Remember it so we only make calls we need to\n self.last_status = {\n 'status': status,\n 'text': text,\n 'url': url,\n }", "title": "" }, { "docid": "9d16bf0835f02f957fa9b55833892400", "score": "0.5668909", "text": "def add_line(self, line):\n if self.skip_line_re.search(line):\n return\n line += \"\\n\" if not line.endswith(\"\\n\") else line\n self.__original_content += line", "title": "" }, { "docid": "93457b4e1d1ff0eed568943418baa26a", "score": "0.5661903", "text": "def line_1(self, add_str: str) -> None:\n self._line_1 = add_str", "title": "" }, { "docid": "6b2a9d3d83e9566c2f8aa8457c15370b", "score": "0.5659725", "text": "def on_status(self, status):\n self.last_status[0] = status", "title": "" }, { "docid": "446dc88317353dae61683997a7a7eac1", "score": "0.5636364", "text": "def append_status_bar(self, append_message):\n\n self.ui.statusBar.showMessage(self.ui.statusBar.currentMessage() + append_message)", "title": "" }, { "docid": "32e39b656c5de58e714b0b4538fd3004", "score": "0.56214136", "text": "async def add(self, ctx: commands.Context, *, addition: str):\n if addition in self.stat:\n await ctx.send(\"That status is already in the database.\")\n else:\n self.stat.append(addition)\n self.bot.mongodb[\"rrp\"].insert_one({\"title\": addition})\n await ctx.message.add_reaction(\"โœ”\")", "title": "" }, { "docid": "0f7dccf4fae0ba2d1a6c11480cfea5a2", "score": "0.5620963", "text": "def InsertLine(self, lineno, line, fgcolor=None, bgcolor=None):\n return _idaapi.pyscv_insert_line(self.__this, lineno, self.__make_sl_arg(line, fgcolor, bgcolor))", "title": "" }, { "docid": "1563ab11a412a0c5a8907dc30d42e6b6", "score": "0.56207436", "text": "def add_lineup_entry(self, name_new_lineup_entry):\n asynckivy.start(self.async_add_lineup_entry(name_new_lineup_entry))", "title": "" }, { "docid": "9c5ab09b72b2f7696adcc7ef94fce422", "score": "0.56149185", "text": "def set_status(self, status):\n # This is called for the 'show' command, so it mustn't log anything.\n raise NotImplementedError", "title": "" }, { "docid": "a9f2adcc1edf72e26d6f23e06b6157b4", "score": "0.56124115", "text": "def addLine(self):\n self.T.setRowCount(self.T.rowCount()+1)\n t = qTItem(str(self.T.rowCount()-1))\n self.T.setVerticalHeaderItem(self.T.rowCount()-1,t)\n self.setSpinBoxMax()", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "6324a249067f2f362de5eafe31f3509c", "score": "0.5609737", "text": "def status(self, status: str):\n\n self._status = status", "title": "" }, { "docid": "d6e58368e83663428c827736f9ec3769", "score": "0.5605313", "text": "def set_status(self, status: Status) -> None:", "title": "" }, { "docid": "1dd1bdcb0a3fcfce40845467a7df0fcd", "score": "0.5601723", "text": "def __insert_status(tokens: str, status: str) -> str:\n if 'Status: Complete' in tokens:\n return tokens\n else:\n return tokens.replace('- id:', f'- {status} - id:')", "title": "" }, { "docid": "3b0f344fb6ab6cb83303b1b4d3254977", "score": "0.56009287", "text": "def BCStatusBarAddWidget(sb, w, permanent):", "title": "" }, { "docid": "a189847a4f56a4ba7610f01cd75121a5", "score": "0.5599851", "text": "def add(self, line: str):\n if isinstance(line, str):\n line = [line]\n self.msg.append(line)", "title": "" }, { "docid": "cdd6401c3ed236a4b23506c18abfef21", "score": "0.5598431", "text": "def add_status(self, state=None, stage=None):\n\n self._state = state or self._state\n self._stage = stage or self._stage\n\n if self._stage and self._progress != 1:\n stage_index = [m.__name__ for m in self._stage_list].index(self._stage)\n self._progress = round((stage_index + 1) / (len(self._stage_list) + 1), 2)\n\n self.db.jobs.update_one({\"_id\": self.id}, {\n \"$push\": {\n \"status\": {\n \"state\": self._state,\n \"stage\": self._stage,\n \"error\": self._error,\n \"progress\": self._progress,\n \"timestamp\": virtool.utils.timestamp()\n }\n }\n })\n\n self.dispatch(\"jobs\", \"update\", [self.id])", "title": "" }, { "docid": "c71e286a23c0f25ea51875164ce8a887", "score": "0.5598285", "text": "def status(self, status):\n self._status = status", "title": "" }, { "docid": "c71e286a23c0f25ea51875164ce8a887", "score": "0.5598285", "text": "def status(self, status):\n self._status = status", "title": "" }, { "docid": "c71e286a23c0f25ea51875164ce8a887", "score": "0.5598285", "text": "def status(self, status):\n self._status = status", "title": "" }, { "docid": "74aa097f4ad6b2a14f6cb89b7d1da721", "score": "0.559547", "text": "def _push_status(self, new_status):\n curstatus = self._status\n self._status = new_status\n try:\n yield\n finally:\n self._status = curstatus", "title": "" }, { "docid": "684441bda54164d8fdae78cecf1f1b6b", "score": "0.55899286", "text": "def SetStatus(*args, **kwargs):\n return _stc.StyledTextCtrl_SetStatus(*args, **kwargs)", "title": "" }, { "docid": "6bde5c62c602ac9f8a1e7c312cf3f65d", "score": "0.5565283", "text": "def _append_line(self, line, style):\n\n # only after 700 lines reset to 500 to not do it every time\n def last_line():\n return self.text.PositionToXY(self.text.LastPosition)[2]\n\n def last_visible_line():\n pos = self.text.HitTest((0, self.text.GetSize()[1]))[1]\n return self.text.PositionToXY(pos)[2]\n\n last = last_line()\n visible = last_visible_line()\n\n if self.text.GetNumberOfLines() > 700:\n print(\"Resetting to last 500 lines...\")\n self.text.Freeze()\n _tmp = self.text.GetValue()\n cur = _tmp.splitlines()\n new_lines = ['... earlier content discarded ...'] + cur[-500:]\n self.text.SetValue('\\n'.join(new_lines))\n if visible >= last:\n self.text.ShowPosition(self.text.GetLastPosition())\n self.text.Thaw()\n\n self.text.Freeze()\n insert = self.text.GetInsertionPoint()\n self.text.SetInsertionPointEnd()\n if style == 'bold':\n self.text.BeginBold()\n self.text.WriteText(line)\n if style == 'bold':\n self.text.EndBold()\n self.text.SetInsertionPoint(insert)\n if visible >= last:\n self.text.ShowPosition(self.text.GetLastPosition())\n self.text.Thaw()", "title": "" }, { "docid": "887278b181a6f79d6229e8e9f5848491", "score": "0.5564089", "text": "def add_status(self, label, msg_id, actor):\n\n with self.engine.begin() as con:\n query = f\"INSERT INTO securemessage.status(label, msg_id, actor) VALUES('{label}', '{msg_id}', '{actor}')\"\n con.execute(text(query))", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" }, { "docid": "d892fab9580da11953f1cf694d468f0b", "score": "0.5549683", "text": "def status(self, status):\n\n self._status = status", "title": "" } ]
17ba8b85a8254b519fe1b16970a97eb8
Transfers data from a file to an ndarray
[ { "docid": "3e85a6065442a3e646bad8126ed5e393", "score": "0.0", "text": "def extract_data(folder,file_name,lower,upper):\n\n csv_data = np.genfromtxt(\n fname=p(os.path.join(folder,file_name)),\n dtype=float,\n delimiter=','\n ,skip_header=1,\n usecols=range(lower,upper)\n )\n \n return csv_data", "title": "" } ]
[ { "docid": "1eda034ebc8d2c659548b8d7aa50c35f", "score": "0.6489842", "text": "def fread(f, n, dtype):\n if dtype is np.str:\n dt=np.uint8\n else:\n dt=dtype\n\n data_array=np.fromfile(f, dt, n)\n #data_array.shape=(n,1)\n return data_array", "title": "" }, { "docid": "1fa684eb4ffda8046c293b64c19da0ea", "score": "0.6466319", "text": "def read_data(path):\n name = osp.basename(path)\n if name == \"ind.cora.test.index\":\n out = np.genfromtxt(path, dtype=\"int64\")\n return out\n else:\n out = pickle.load(open(path, \"rb\"), encoding=\"latin1\")\n out = out.toarray() if hasattr(out, \"toarray\") else out\n return out", "title": "" }, { "docid": "c65421c1a5fd6f3cb02c914528e4cb74", "score": "0.6427434", "text": "def read_src_file(src_file):\r\n num_lon = 8 #found during data exploration inside of source .bin files, bytes data\r\n num_lat = get_bin_len_of_file(src_file)\r\n with open(src_file, 'rb') as f:\r\n tmp_arr = array.array(_TYP)\r\n tmp_arr.fromfile(f, num_lon*num_lat)\r\n return tmp_arr", "title": "" }, { "docid": "2edff222942bdfa170cc4dc6983f9b3f", "score": "0.63876796", "text": "def read_file(self, file_name) -> np.ndarray:\n def bytes2int(input_bytes):\n return int.from_bytes(input_bytes, byteorder='big')\n\n with open(file_name, 'rb') as file:\n first2bytes = bytes2int(file.read(2))\n assert first2bytes == 0 # The first 2 bytes are always 0\n\n data_type = bytes2int(file.read(1))\n assert data_type == 8 # 0x08: unsigned byte\n\n num_dimensions = bytes2int(file.read(1))\n shape = [bytes2int(file.read(4)) for _ in range(num_dimensions)] + [1] # make the num of channels to be 1\n\n matrix = np.frombuffer(file.read(), dtype=np.uint8).reshape(shape)\n\n return matrix", "title": "" }, { "docid": "c142dd169ec7c06412b184069777c4c5", "score": "0.63853693", "text": "def read_data(self, start: Optional[int]=None, end: Optional[int]=None) -> np.ndarray:\n nframe = self._find_nframe_from_file()\n seek_to_data(self.file_object)\n read_start = 0\n end_read = nframe * self.nifs * self.nchans\n if start is not None:\n if start < 0:\n read_start = (nframe + start) * self.nifs * self.nchans\n else:\n read_start = start * self.nifs * self.nchans\n if end is not None:\n if end < 0:\n end_read = (nframe + end) * self.nifs * self.nchans\n else:\n end_read = end * self.nifs * self.nchans\n self.file_object.seek(read_start, os.SEEK_CUR)\n nbytes_to_read = end_read - read_start\n data = np.fromfile(self.file_object, count=nbytes_to_read, dtype=self.dtype)\n nframe = data.size // self.nifs // self.nchans\n data = data.reshape((nframe, self.nifs, self.nchans))\n if self.nbits < 8:\n data = unpack(data, self.nbits)\n self.data = data\n return self.data", "title": "" }, { "docid": "e921157063ea01c8fed92e3d5ce2f224", "score": "0.62390435", "text": "def read_raw(filepath, grid_size):\n filepath = str(filepath)\n\n # Read bytes\n with open(filepath, \"rb\") as test:\n content = test.read()\n\n # Create np array and return\n data = np.frombuffer(content, dtype=\"float32\").reshape(grid_size)\n\n if sys.platform == \"darwin\":\n data = data.T # TODO figure out why this is necessary on Mac OS?\n\n return data", "title": "" }, { "docid": "dc891fbd2bb4958b5f584de05a7a3d3a", "score": "0.6232264", "text": "def loaddata(path):\r\n return np.load(path)", "title": "" }, { "docid": "09fd16aad0d3fb1dc0ecd1de7c201328", "score": "0.62277323", "text": "def data_load_to_array(file):\r\n with open(file, \"r\") as data_str:\r\n data_list = []\r\n for i in data_str:\r\n list_str = i.split(\"\\n\")\r\n del list_str[-1]\r\n data_list.append(list_str)\r\n \r\n data_array = []\r\n for line in data_list:\r\n data_array.append(np.array(line[0].split(\",\")))\r\n data_array = np.array(data_array)\r\n header = data_array[0]\r\n array_data_set = data_array[1:]\r\n array_data_set = array_data_set.reshape((len(array_data_set), len(header)))\r\n\r\n return array_data_set", "title": "" }, { "docid": "869ce02f1b9ce8a22f41347d3982b6e0", "score": "0.6153497", "text": "def read_from_file(filepath : str) -> ndarray:\n\n with open(filepath, 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "dd50036d035e449517d0271cdbf78082", "score": "0.61263454", "text": "def read_file(file_path: str) -> np.ndarray:\r\n file_extension = os.path.splitext(file_path)[-1]\r\n try:\r\n if file_extension == '.Poly5':\r\n data = cv.poly5unpad(file_path)\r\n elif file_extension == '.npy':\r\n data = np.load(file_path)\r\n else:\r\n raise Exception\r\n except:\r\n data = None\r\n\r\n return data", "title": "" }, { "docid": "a65824dc2793483ff984880c9afe2c83", "score": "0.6081729", "text": "def read_current_filepos(filehandle, dt='i', number=1):\n\n arr = array.array(dt)\n arr.fromfile(filehandle, number)\n arr = np.asarray(arr)\n\n #arr = np.fromfile(filehandle, dtype=dt, count=number)\n\n return arr", "title": "" }, { "docid": "08f42b101fb76fddcf93d09d0a3c8379", "score": "0.6061442", "text": "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image \n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "title": "" }, { "docid": "6ab9eaf0ecd881ee5ef6389a6c8030de", "score": "0.6050249", "text": "def read_npy(file):\n return np.load(file)", "title": "" }, { "docid": "8347a93184323482087153e21928f6e2", "score": "0.60484546", "text": "def read_as_array(nc_fname, var):\n with nc.Dataset(nc_fname, mode='r') as fcon:\n #fcon = nc.Dataset(nc_fname)\n data_array = fcon.variables[var][:]\n return np.fliplr(data_array)", "title": "" }, { "docid": "e1fcb74e163afecf0372972f73241371", "score": "0.60121393", "text": "def _read_at(self, offset, size, ntype):\n self._fid.seek(offset)\n result = np.fromfile(self._fid, ntype, int(size))\n return result", "title": "" }, { "docid": "ab6e8116bb7a12b466f8a5ccda0d7b41", "score": "0.5993909", "text": "def readFlirt(fname):\n return np.loadtxt(fname)", "title": "" }, { "docid": "e1a2871ffb13c7b885ecdc2e80f72c12", "score": "0.5986817", "text": "def fread(fid: _io.TextIOWrapper, nelements: int, dtype: str) -> np.array:\n\n if dtype is np.str:\n dt = np.uint8 # WARNING: assuming 8-bit ASCII for np.str!\n else:\n dt = dtype\n\n data_array = np.fromfile(fid, dt, nelements)\n data_array.shape = (nelements, 1)\n\n return data_array", "title": "" }, { "docid": "14bf9bafd85f0265bc58dd9011b6fe5f", "score": "0.59830904", "text": "def read_bin_data(fname):\n\n f = open(fname, \"rb\")\n raw = f.read()\n f.close()\n data = np.fromstring(raw, 'f')\n if sys.byteorder == 'big':\n data = data.byteswap()\n\n return data", "title": "" }, { "docid": "61a744b3376184da0b88880b30a03cac", "score": "0.5980732", "text": "def _convert_array(t):\n out = io.BytesIO(t)\n out.seek(0)\n return np.load(out)", "title": "" }, { "docid": "5a2e22008bc6e43747fca6c33efb1275", "score": "0.59739995", "text": "def _write_data(data: np.ndarray, nbit: int, file_object: IO[bytes]) -> np.ndarray:\n file_object.seek(0, 2)\n if nbit < 8:\n data = pack(data, nbit)\n data.tofile(file_object)", "title": "" }, { "docid": "db52ac635337845f67167ab83770a3c1", "score": "0.5973435", "text": "def load_data(filename):\n\n return converters.toarr(file_to_list(filename))", "title": "" }, { "docid": "3ad2b29fc81971874617ee49a7d15dbd", "score": "0.5971673", "text": "def load(inFile):\n fd = open(inFile, 'rb')\n data = np.fromfile(file=fd, dtype=np.float32)\n data = data.reshape(10,data.size/10).T\n return data", "title": "" }, { "docid": "92122f5bb5f8ae8b327d8a8c4e6e4094", "score": "0.5952342", "text": "def load_data(filename):\n return np.loadtxt(filename, delimiter=',')", "title": "" }, { "docid": "3782dc7ffb79eadca4ef59d86cfc9e0a", "score": "0.59436995", "text": "def read_array(path):\n \n hf_object = h5py.File(path, 'r')\n ret_obj = np.array(hf_object[\"array\"])\n hf_object.close()\n return ret_obj", "title": "" }, { "docid": "d8830222d592700d2d902f66924f5439", "score": "0.5923683", "text": "def get_ndarray():\r\n # Specify the filepath.\r\n pwd = dirname(__file__)\r\n filepath = join(pwd, 'cs5033_fall2017_assign04_data.csv')\r\n # generate ndarray from the data file.\r\n hw_data = genfromtxt(filepath, delimiter=',', skip_header=1)\r\n return hw_data", "title": "" }, { "docid": "6c246da86245d46cd7194134f99bcd88", "score": "0.5916734", "text": "def read_1D(file_path):", "title": "" }, { "docid": "5ee5bce8f89982e511e076bbaa2f4a5b", "score": "0.5896196", "text": "def read_file(self, path):\n return np.array(h5.File(path,'r')[\"train_X\"])", "title": "" }, { "docid": "fe2e5d4bfeaefdbaef905d87f0f10efb", "score": "0.58844554", "text": "def read_data(infile):\n return gdal.Open(infile, gdal.GA_ReadOnly).ReadAsArray()", "title": "" }, { "docid": "e960e73f90ac80e11ce3bbbca9c49305", "score": "0.5876342", "text": "def _load_data(self, filename, offset):\n\n # Download the file from the internet if it does not exist locally.\n download(base_url=base_url, filename=filename, download_dir=self.data_dir)\n\n # Read the data-file.\n path = os.path.join(self.data_dir, filename)\n with gzip.open(path, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=offset)\n\n return data", "title": "" }, { "docid": "13b85024bf496d81a186da1ac087994d", "score": "0.58671063", "text": "def get_task_data(file_name: str) -> np.ndarray:\n return pd.read_csv(file_name, delimiter=\" \", header=None).to_numpy()", "title": "" }, { "docid": "6f24ba909efe0ac95a44798391bda23f", "score": "0.58639306", "text": "def load_data(filename):\n data = loadarff(open(filename, 'r'))[0]\n return np.array([list(row) for row in data])", "title": "" }, { "docid": "5446fa2249a64c9bacef9fe02948c5ea", "score": "0.5863841", "text": "def _data_from_rec(rec_fileobj, in_shape, dtype, slice_indices, out_shape,\n scalings=None, mmap=True):\n rec_data = array_from_file(in_shape, dtype, rec_fileobj, mmap=mmap)\n rec_data = rec_data[..., slice_indices]\n rec_data = rec_data.reshape(out_shape, order='F')\n if scalings is not None:\n # Don't do in-place b/c this goes int16 -> float64\n rec_data = rec_data * scalings[0]\n rec_data += scalings[1]\n return rec_data", "title": "" }, { "docid": "8869ebd16870e16fb827bf91d413bc50", "score": "0.5844674", "text": "def load_data(filename):\n data = np.array([[float(x) for x in line.strip().split(',')] for line in open(filename).readlines()])\n print('Loaded %d observations.'%len(data))\n return data", "title": "" }, { "docid": "9c3c7627683998bd06c62cbc23cab86d", "score": "0.58379817", "text": "def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)", "title": "" }, { "docid": "9f871840267f2bfa293fcf4d2f43c3f1", "score": "0.583579", "text": "def read_file(file=None, header=0):\n with open(file, 'r') as fr:\n op = np.array([list(map(float, i.split())) for i in fr.readlines()[header:]])\n return op", "title": "" }, { "docid": "d2af12682b4e348494f1b3f277cdd2bb", "score": "0.58333564", "text": "def load_data(self):\n self.data = io.imread(self.filename)", "title": "" }, { "docid": "8d21d99a9cf8050284782fb173a49a2c", "score": "0.5830965", "text": "def read_data(filepath, d = ','):\n return np.asmatrix(np.genfromtxt(filepath, delimiter = d, dtype = None))", "title": "" }, { "docid": "ad7ed686611a9bd31eb2fb2013aaacf6", "score": "0.5812681", "text": "def read_file(filename):\n data_type = current_data_types()\n\n # Data will be added to this with time\n all_data = numpy.empty((0, ), dtype=data_type)\n\n with open(filename, 'r') as f:\n file_data = f.read().split('\\n')\n lines = [line.split(',') for line in file_data][:-1]\n for line in lines:\n all_data = numpy.append(all_data, numpy.array(\n [tuple(line)], dtype=data_type), axis=0)\n return all_data", "title": "" }, { "docid": "b997384a9e4a6fb71ec1a7ed81756d50", "score": "0.5804369", "text": "def load_bin_file(file_name, shape, row_major=True):\n try:\n item_vec = np.fromfile(file_name, dtype='<f4')\n except IOError as error:\n print(\"File_name is %s\" % file_name)\n raise error\n\n # item = torch.from_numpy(item_vec.reshape(shape[0], shape[1]))\n item = item_vec.reshape(shape[0], shape[1]) if row_major else item_vec.reshape(shape[1], shape[0]).transpose()\n # item = item.unsqueeze(0)\n # item[torch.isnan(item)] = 0\n return item", "title": "" }, { "docid": "526795578a021a3153eb15c8ebfb2c2c", "score": "0.57892346", "text": "def dump_raw_data(filename, data):\n if data.ndim == 3:\n # Begin 3D fix\n data = data.reshape([data.shape[0], data.shape[1]*data.shape[2]])\n # End 3D fix\n\n a = array.array('f')\n for o in data:\n a.fromlist(list(o.flatten()))\n\n # if is_little_endian():\n # a.byteswap()\n\n with open(filename, 'wb') as rawf:\n a.tofile(rawf)", "title": "" }, { "docid": "3e7735c92a41fb6638cb324a6851846c", "score": "0.5778771", "text": "def load(filename, shape=()):\n arr = np.loadtxt(filename, skiprows=1).flatten().reshape(shape)\n return arr", "title": "" }, { "docid": "cdead8ca3d6411e3b8bbdd808023acb8", "score": "0.5774117", "text": "def load_data(file_name):\n result = None\n with open(file_name, 'r') as fp:\n lines = fp.readlines()\n result = np.loadtxt(lines, dtype=float)\n return result", "title": "" }, { "docid": "cbc8dbbcf15c89990a262a62c4c15730", "score": "0.57739234", "text": "def _readpos(f,npfile):\n \n thispos = np.fromfile(f,np.float32,3*npfile)\n thispos = np.reshape(thispos, [npfile, 3])\n \n return thispos", "title": "" }, { "docid": "13293c37067eb1bd446138834b60474f", "score": "0.5766871", "text": "def transform(self, fpath: str) -> np.ndarray:\n raise NotImplementedError", "title": "" }, { "docid": "6fabea29134231ffe7498049a2020fa1", "score": "0.576122", "text": "def dem_file_read(self, file_path):\n with open(file_path, 'rb') as handle:\n dem_array_data = pickle.load(handle)\n return dem_array_data", "title": "" }, { "docid": "9bac8db31008e236479f1f2d742ca458", "score": "0.57500196", "text": "def _npy_loads(data):\n stream = BytesIO(data)\n return np.load(stream)", "title": "" }, { "docid": "c379943a6ad719532f4f061b79cf3099", "score": "0.57498777", "text": "def read_to_numpy_array(input_path, data_skip_lines):\n start = datetime.datetime.utcnow()\n logging.info('Reading the input file into a numpy array')\n data = np.loadtxt(open(input_path.strip()),delimiter=\",\",\n skiprows=data_skip_lines)\n end = datetime.datetime.utcnow()\n logging.info('read {} in: {}'.format(input_path, (end - start)))\n return data", "title": "" }, { "docid": "819c7e0fafc9c9b366bf971cf1f0e7cd", "score": "0.5748381", "text": "def loadrawdata(filename):\n\twith open(filename, 'rb') as fid:\n\t\tdatafile = np.fromfile(fid, dtype=np.int16) #get frames\n\treturn datafile", "title": "" }, { "docid": "df6cce4bd2439a921e7ddae19dc9c14d", "score": "0.57432765", "text": "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "title": "" }, { "docid": "baf3871222e58d24687f2fcf9df0b150", "score": "0.5738596", "text": "def read_data(file_in):\r\n\r\n # Magic code\r\n magic = file_in.read(4)\r\n magic = [ord(_) for _ in magic]\r\n if len(magic) != 4 or magic[0] != 0 or magic[1] != 0:\r\n raise RuntimeError(\"Invalid magic number: [{}]\".format('-'.join(['{:02x}'.format(_) for _ in magic])))\r\n\r\n # Type code\r\n type_code = magic[2]\r\n dtype_map = {\r\n 0x08: np.uint8,\r\n 0x09: np.int8,\r\n 0x0B: np.int16,\r\n 0x0C: np.int32,\r\n 0x0D: np.float32,\r\n 0x0E: np.float64,\r\n }\r\n dtype = dtype_map[type_code]\r\n\r\n # Dimensions\r\n ndim = magic[3]\r\n\r\n dims = []\r\n for idim in range(ndim):\r\n dim, = struct.unpack('>I', file_in.read(4))\r\n dims.append(dim)\r\n\r\n # Data\r\n data = file_in.read()\r\n data = np.fromstring(data, dtype=dtype).reshape(tuple(dims))\r\n\r\n return data", "title": "" }, { "docid": "e2807e5d94976c83e8eb9db057c0d1b8", "score": "0.5733975", "text": "def read(path):\n with open(path) as f:\n return np.array([[float(field) for field in line.split('\\t')]\n for line in [line.strip() for line in f]])", "title": "" }, { "docid": "8c90ce2cf9b80df0a2b759a60b30c31f", "score": "0.57330155", "text": "def load_numpy_data(datafile):\n data = np.load(datafile)\n c0 = data['c0']\n c = data['c']\n u = data['u']\n y = data['y']\n return c0, u, c, y", "title": "" }, { "docid": "4150cf074624b4fb9aa4f4586bf2ebe3", "score": "0.5732759", "text": "def csv_open(fname):\n file = pd.read_csv(fname)\n file = np.array(file)\n return file", "title": "" }, { "docid": "66b697b45894ad9812df8775e298b886", "score": "0.5715058", "text": "def read_file(data, data_file):\n datafile = open(data_file, \"r\")\n next(datafile)\n file_data = datafile.readlines()\n for line in file_data:\n array = line.strip().split(',')\n data.append(array)\n datafile.close()", "title": "" }, { "docid": "7c15d499e93aa74b37235fd2e11ea1e1", "score": "0.5703847", "text": "def read_file_to_array(path):\n arr = None\n with open(path) as file:\n arr = file.readlines()\n return arr", "title": "" }, { "docid": "810fd77245914f4ec51ce0176a42742b", "score": "0.57016015", "text": "def read_some(fp, typecode, n):\n data = array.array(typecode)\n data.fromfile(fp, n)\n if big_endian: data.byteswap()\n return data", "title": "" }, { "docid": "e27355246aa86656bdf6c965c5c87da8", "score": "0.5699507", "text": "def load_npy(path):\n data = numpy.load(path)\n if str(data.dtype) == 'object':\n data = data.item()\n return data", "title": "" }, { "docid": "067550f2ad14f58761d12925cf2065bd", "score": "0.5699104", "text": "def read_data(filepath, d = ','):\n return np.genfromtxt(filepath, delimiter=d, dtype=None)", "title": "" }, { "docid": "31ed2e1c024d21b7b448438334840d97", "score": "0.5698474", "text": "def get_real_data(filename):\n real_data = np.loadtxt(filename, usecols=(0, 1, 2))\n #real_data[:, 1] /= real_data[0, 1]\n return real_data", "title": "" }, { "docid": "cdab7b0a5ecfb91deb35e89a2dc0d5c1", "score": "0.5688721", "text": "def read_data(filepath):\n X = []\n y = []\n data = open(filepath).readlines()\n for i in data:\n row = i.strip().split()\n row = [float(i) for i in row]\n\n X.append(row[:-1])\n y.append(int(row[-1]))\n\n X = np.array(X)\n return X, y", "title": "" }, { "docid": "e4df0b7501d9dfa23a97d6c1b9884dcb", "score": "0.5686982", "text": "def _open_file(f) -> np.ndarray:\n #Tried Pillow/PIL, cv2, scipy, imageio\n reader = png.Reader(f)\n ny, nx, image_gen, *_ = reader.read()\n data = np.array(list(map(np.uint16, image_gen)))\n data = data.reshape(nx, ny, 3)\n shifted_data = np.right_shift(data, 4)\n\n return shifted_data", "title": "" }, { "docid": "59706c02e0d2f80f2f5978123bd6049f", "score": "0.5685405", "text": "def load_data(path):\n d = np.loadtxt(path, np.float32, delimiter = ',', skiprows = 1)\n return d[:, 1:]", "title": "" }, { "docid": "94801e3d3623134deec24f3921a1b648", "score": "0.5678481", "text": "def memory_file2ndarray(\n memfile: io.BytesIO\n ) -> np.ndarray:\n if memfile is None:\n return None\n return np.asarray(Image.open(memfile))", "title": "" }, { "docid": "3fd8f38327e6ef44e883ea3a05411f86", "score": "0.5677021", "text": "def scn_read_data(fname, header):\n\n tint = array ('f') # 4 byte float\n iampl = array ('h') # 2 byte integer\n iprops = array('b') # 1 byte integer\n\n f=open(fname, 'rb')\n f.seek(header['ioffset']-1)\n tint.fromfile(f, header['nint'])\n iampl.fromfile(f, header['nint'])\n iprops.fromfile(f, header['nint'])\n f.close()\n \n if header['iscanver'] > 0:\n gapnotfound = True\n while gapnotfound:\n if iampl[-1] == 0:\n gapnotfound = False\n iprops[-1] = 8\n else:\n tint.pop()\n iampl.pop()\n iprops.pop()\n \n return np.array(tint)*0.001, np.array(iampl), np.array(iprops)", "title": "" }, { "docid": "f6827c52c5e06da64662d91c922c94d5", "score": "0.5670782", "text": "def get_file(self, path):\n f = open(path, \"r\")\n lines = f.readlines()\n f.close()\n # Set values\n self.x = np.array([float(line.strip().split()[0]) for line in lines])\n self.y = np.array([float(line.strip().split()[1]) for line in lines])", "title": "" }, { "docid": "64f5b3668cf99c65bf9fa53ed428d4f2", "score": "0.5664696", "text": "def read_file(self, path, **kwargs):\n if \"verbose\" in kwargs:\n verbose = kwargs[\"verbose\"]\n else:\n verbose = False\n # read data\n data = np.loadtxt(path, ndmin=2)\n if data.shape[1] == 10:\n self.typ = \"TIN\"\n self.data = data\n elif data.shape[1] == 3:\n self.typ = \"POINT_VECTOR\"\n self.data = data\n elif verbose:\n print(\"Gli external: File data not valid\")", "title": "" }, { "docid": "bcefa8327680108960e3f3f4718791cc", "score": "0.56501156", "text": "def set_data_2_file(file,array):\n f = open(file,\"wb+\")\n for i in range(len(array)):\n f.write(chr(array[i] & 0xff))\n f.close()", "title": "" }, { "docid": "951fe0c9d4b4096321d2a96e8d79aebd", "score": "0.56492496", "text": "def _load(self) -> np.ndarray:\n # using get_filepath_str ensures that the protocol and path are appended correctly for different filesystems\n load_path = get_filepath_str(self._filepath, self._protocol)\n \n ts_file = load_from_tsfile_to_dataframe(load_path)\n return ts_file", "title": "" }, { "docid": "106bb279e4a1cef2c84473bca641fcc1", "score": "0.5643873", "text": "def read_data(fdir, fname, vector_size):\n\twith open(fdir+fname, 'r') as f:\n\t\tdata = f.readlines()\n\tdata = [ x.strip().split(' ') for x in data ] # strip \\n and split using ' '\n\tdata = [ [float(y) for y in x] for x in data ] # convert all values to ints\n\tdata = [ (np.array(x[:vector_size]), x[vector_size]) for x in data ] # convert to tuple\n\treturn data", "title": "" }, { "docid": "12c6055963c3b666f3472e5c91cf6277", "score": "0.5634988", "text": "def data_in_from_npy(path):\n\n datain = np.load(path)\n row = len(datain)\n tem = datain[0].decode('UTF-8').split(',')\n column = len(tem)\n\n dataout = np.empty([row - 1, column])\n\n for i in range(row - 1):\n tem = datain[i + 1].decode('UTF-8').split(',')\n for j in range(column):\n if tem[j] == 'NA':\n tem[j] = np.nan\n # dataout[i][j] = tem[j]\n dataout[i, :] = tem\n\n return dataout", "title": "" }, { "docid": "b4e687465fb43d308f59f51ebff81907", "score": "0.5634771", "text": "def load_data(self, file=None):\n if file is None:\n dfile = h5py.File(self.filename, 'r')\n exp_data = np.array(dfile[self.key])\n dfile.close()\n else:\n exp_data = np.array(file[self.key])\n return self._extract_data(exp_data)", "title": "" }, { "docid": "2082bd933891a556f2cf78d84f794dde", "score": "0.5632736", "text": "def load_npy(fp: str):\n data = np.load(fp, allow_pickle=False)\n return data", "title": "" }, { "docid": "3981501d8aeb69c1d51d40274d64060b", "score": "0.56298006", "text": "def read_transition_matrix(filename):\n d = np.genfromtxt(filename)\n return d", "title": "" }, { "docid": "a07e443b2148110ae2e923862cf9093a", "score": "0.56255215", "text": "def np_load(file_path):\n log.info('Loading %s' % file_path)\n start = time.time()\n data = np.load(file_path)\n log.info('Loading took %d seconds' % (time.time() - start))\n\n return data", "title": "" }, { "docid": "47ed3da0307ac7b52f399767baad570a", "score": "0.56233007", "text": "def _read_asc_data(asc_file):\n return np.loadtxt(asc_file)", "title": "" }, { "docid": "e215e7db91eb42116a418725e4fea9b9", "score": "0.5611776", "text": "def load_file(fname):\r\n data = np.load(fname).T.astype(float)\r\n X = None\r\n if data.shape[1] >= size:\r\n X = data[0:size,0:size].reshape((1,size,size,1))\r\n for i in range(1, data.shape[1] // size):\r\n cut = data[0:size,i*size:(i+1)*size].reshape((1,size,size,1))\r\n X = np.concatenate((X, cut), axis=0)\r\n return X", "title": "" }, { "docid": "90df1c15edba346b433d2db9ae95ef73", "score": "0.5603703", "text": "def load_real(filename, ann_info=None, rsc_data=None):\n data = np.fromfile(filename, FLOAT_32_LE)\n rows, cols = _get_file_rows_cols(ann_info=ann_info, rsc_data=rsc_data)\n _assert_valid_size(data, cols)\n return data.reshape([-1, cols])", "title": "" }, { "docid": "2721800b4361022585609b41bd35bfb3", "score": "0.5598822", "text": "def load_input_data(file_path, indicies, seq_len=3):\n # load h5 file into memory.\n fr = h5py.File(file_path, 'r')\n data = fr['array'].value\n fr.close()\n\n # get relevant training data pieces\n data = [data[y-seq_len:y] for y in indicies]\n data = np.stack(data, axis=0)\n\n # type casting\n data = data.astype(np.float32)\n return data", "title": "" }, { "docid": "2769ee83b6ab0e2a8a3d7ff6b691968e", "score": "0.5595951", "text": "def to_ndarray(self):", "title": "" }, { "docid": "3be372035da6e626f671945292bdabac", "score": "0.5593107", "text": "def load_binary_data(filename, dtype=np.float32): \n\tf = open(filename, \"rb\") \n\tdata = f.read() \n\tf.close() \n\t_data = np.fromstring(data, dtype) \n\tif sys.byteorder == 'big':\n\t\t_data = _data.byteswap()\n\treturn _data", "title": "" }, { "docid": "3e925c3ffba6d7dadb07c8222015fd64", "score": "0.55927885", "text": "def read_npz(path):\n data = np.load(path)[\"arr_0\"]\n return data", "title": "" }, { "docid": "d56e4d97d19172f13fa3efb47776fb11", "score": "0.5587738", "text": "def grok_data(self):\n entity = self.entity\n try:\n filename = entity.data.filename.lower()\n except AttributeError:\n data = entity.data\n if isinstance(data, Binary):\n return True\n # if not isinstance(data, numpy.ndarray):\n # raise TypeError('data is neither a Binary nor a numpy array (%s)' % type(data))\n numpy_array = data\n else:\n adapter = self._cw.vreg['adapters'].select_or_none('source_to_numpy_array',\n self._cw, entity=entity, filename=filename)\n if adapter is None:\n msg = self._cw._('Unsupported file type %s') % entity.data.filename\n raise ValidationError(entity.eid, {'data': msg})\n numpy_array = adapter.to_numpy_array(entity.data, filename)\n\n if numpy_array.ndim != 1:\n raise ValidationError(entity.eid,\n {'data': _('data must be a 1-dimensional array')})\n if numpy_array.size == 0:\n raise ValidationError(entity.eid,\n {'data': _('data must have at least one value')})\n data = Binary()\n compressed_data = zlib.compress(pickle.dumps(numpy_array, protocol=2))\n data.write(compressed_data)\n entity.cw_edited['data'] = data\n entity.array = numpy_array\n return False", "title": "" }, { "docid": "b98961d18606922171a9fe75360cf267", "score": "0.55826956", "text": "def convert_file(nc_file,zarr_dir):\n with Dataset(nc_file) as ncin:\n store = zarr.DirectoryStore(zarr_dir)\n the_group=zarr.hierarchy.open_group(store=store, mode='w', synchronizer=None, path=None)\n for varname in ncin.variables.keys():\n print(f'variable: {varname}')\n the_var=ncin.variables[varname][...]\n if the_var.shape==4:\n the_var=the_var.squeeze()\n the_group.array(varname,the_var,shape=the_var.shape,dtype=the_var.dtype,\n compressor=zarr.Blosc(cname='zlib', clevel=5),chunks=None)", "title": "" }, { "docid": "2e1cc07037513d545847b730f07e4262", "score": "0.5580658", "text": "def read_poses_from_disk(file_path: str, _delimiter: str = delimiter()) -> np.ndarray:\n path = Path(file_path)\n assert_debug(path.exists() and path.is_file())\n return df_to_poses(pd.read_csv(path, sep=_delimiter, index_col=None))", "title": "" }, { "docid": "d4d56718d0cc7ccf616014cf3e9ca6bf", "score": "0.55805665", "text": "def _load_npy(filename):\n with open(filename, 'rb') as fid:\n return np.load(fid)", "title": "" }, { "docid": "3e9cab46145b701e006360ae8648578a", "score": "0.5573112", "text": "def load_img(img_file: str) -> np.ndarray:\n img = fabio.open(img_file).data\n return img", "title": "" }, { "docid": "0300886ba5d22ef19bd77f7e4746c032", "score": "0.55694044", "text": "def fn_load_array(file_list):\n import numpy as np\n \n for i in range(len(file_list)):\n data = np.genfromtxt(file_list[i], delimiter='\\t', dtype=np.str, skip_header=1)\n \n return data", "title": "" }, { "docid": "c84fb5ca29fcc3aeda1ece8d4e7d3f6d", "score": "0.55642384", "text": "def load_bin(fname, size, add_channel_axis=False):\n with open(fname, 'rb') as fd:\n databuf = fd.read()\n except_msgs = []\n for type in ['f', 'd']:\n try:\n arr = np.array(struct.unpack(type*np.product(size), databuf)).reshape(size[::-1])\n break\n except Exception as e:\n except_msgs.append(str(e))\n continue\n if arr is None:\n raise Exception(\"\\n\".join(except_msgs))\n if add_channel_axis:\n arr = np.expand_dims(arr, axis=arr.ndim)\n return arr", "title": "" }, { "docid": "2423cdc3514e096e5102b65d5b1592a7", "score": "0.55481476", "text": "def load_array(data_file: str, minrows=10, **kwargs) -> np.ndarray:\n return loadData(data_file, minrows=minrows, **kwargs).T", "title": "" }, { "docid": "78e65f994e5df0d1fb53fb6c102da9dc", "score": "0.55309725", "text": "def read_raw(self, file, mask=True):\n # ic()\n with open(file, 'rb') as im:\n # ic('file opened')\n arr = np.fromstring(im.read(), dtype='int32')\n # ic('converted to np')\n arr = arr.reshape((195, 487))\n if mask:\n for i in range(0, 10):\n arr[:,i] = -2.0\n for i in range(477, 487):\n arr[:,i] = -2.0\n return arr.T", "title": "" }, { "docid": "65890e92b8dfd099a3e22fd6102c1a99", "score": "0.5524632", "text": "def parse_into_array(path):\n if path.endswith(\".npy\"):\n return np.load(path)\n return np.array(json.load(open(path)))", "title": "" }, { "docid": "e1812156a7e7a41fbcdd4116bfc9f9db", "score": "0.55214864", "text": "def ssd_read_data(filename, h):\n\n fid = open(filename, 'rb')\n fid.seek(h['ioff'])\n trace = np.fromfile(fid, 'h')\n fid.close()\n\n return trace", "title": "" }, { "docid": "9ff42810cedec98d90228a37b6b6c2c3", "score": "0.5519079", "text": "def store_npy(fp: str, data: np.ndarray):\n np.save(fp, data, allow_pickle=False)", "title": "" }, { "docid": "91668c535592d313f257d81d24bff3bd", "score": "0.5514996", "text": "def read_h5data(self):\n\n import h5py\n import numpy as np\n\n d = h5py.File(self.h5file)\n self.data = np.array(d[self.dataname])", "title": "" }, { "docid": "9aeebcde0bab29c0ffd5974a87f8c556", "score": "0.5510589", "text": "def readdata(filename):\n\tf = open(filename, 'rb')\n\treader = csv.reader(f, delimiter=',')\n\tnin = int(reader.next()[0])\n\tnout = int(reader.next()[0])\n\tinputs = []\n\toutputs = []\n\tfor row in reader:\n\t\tinputs.append(map(float, row[:-1]))\n\t\toutputs.append(map(float, reader.next()[:-1]))\n\tf.close()\n\tassert np.alltrue(np.array(map(len, inputs)) == nin)\n\t#assert np.alltrue(np.array(map(len, outputs)) == nout)\n\tinputs = np.array(inputs)\n\toutputs = np.array(outputs)\n\tassert len(inputs) == len(outputs)\n\tndp = len(inputs)\n\tprint \"Read %s\\n(datapoints : %i, inputs : %i, outputs : %i)\" % (filename, ndp, nin, nout)\n\t\n\treturn (inputs, outputs)", "title": "" }, { "docid": "5cbdc1d04d089053f346e0b59490d7a9", "score": "0.54953104", "text": "def read_arr_from_itk(file_name):\n file_name = os.path.abspath(file_name)\n dset_itk = sitk.ReadImage(file_name)\n dset_arr = sitk.GetArrayFromImage(dset_itk)\n dset_arr = dset_arr.astype(np.float32)\n \n return dset_arr", "title": "" }, { "docid": "2dc2c4abfc1066b30b7893c187fe3b56", "score": "0.54873204", "text": "def _data_reader(file_path, dim):\n file_name, file_ext = os.path.splitext(file_path)\n if file_ext == '.wav':\n sr, data = nii_wav_tk.waveReadAsFloat(file_path)\n else:\n data = nii_io_tk.f_read_raw_mat(file_path, dim)\n return data", "title": "" }, { "docid": "4e6350958e1d1655ab2f0a9f7e29d63f", "score": "0.5475986", "text": "def OpenAsArray(fh, bandnumber=1, dtype='float32', nan_values=False, print_job=False):\n print('WaPOR GIS: Opening file...')\n checkMemory('OpenAsArray Start')\n\n datatypes = {\n \"uint8\": np.uint8, \"int8\": np.int8,\n \"uint16\": np.uint16, \"int16\": np.int16, \"Int16\": np.int16,\n \"uint32\": np.uint32, \"int32\": np.int32, \"Int32\": np.int32,\n \"float32\": np.float32, \"float64\": np.float64,\n \"Float32\": np.float32, \"Float64\": np.float64,\n \"complex64\": np.complex64, \"complex128\": np.complex128,\n \"Complex64\": np.complex64, \"Complex128\": np.complex128, }\n\n DataSet = gdal.Open(fh, gdal.GA_ReadOnly)\n checkMemory('OpenAsArray Opened')\n\n Type = DataSet.GetDriver().ShortName\n if Type == 'HDF4':\n Subdataset = gdal.Open(DataSet.GetSubDatasets()[bandnumber][0])\n NDV = int(Subdataset.GetMetadata()['_FillValue'])\n else:\n Subdataset = DataSet.GetRasterBand(bandnumber)\n NDV = Subdataset.GetNoDataValue()\n\n if print_job:\n print('WaPOR GIS: Band DataType : {v}'.format(\n v=Subdataset.DataType))\n print('WaPOR GIS: Band DataTypeName : {v}'.format(\n v=gdal.GetDataTypeName(Subdataset.DataType)))\n print('WaPOR GIS: NoDataValue : {v}, {t}'.format(\n v=NDV, t=type(NDV)))\n\n Array = Subdataset.ReadAsArray().astype(datatypes[dtype])\n # Array = Subdataset.ReadAsArray()\n if print_job:\n print('WaPOR GIS: Band Array dtype : {v} {sp} {sz}'.format(\n v=Array.dtype.name, sp=Array.shape, sz=Array.size))\n checkMemory('OpenAsArray Loaded')\n\n if nan_values:\n Array[Array == NDV] = np.nan\n\n DataSet = None\n checkMemory('OpenAsArray End')\n return Array", "title": "" }, { "docid": "cf062feff2dec43194395faa6cc8a2ee", "score": "0.5470581", "text": "def readData(self):\n data = np.loadtxt(self.filename,dtype=str, skiprows=2)\n print('loaded')\n self.transition+=1\n\n #empty the Numpy arrays\n if self.counter == 1:\n self.time = np.delete(self.time,[i for i in range(len(self.time))])\n self.magnitude = np.delete(self.magnitude ,[i for i in range(len(self.magnitude))])\n\n\n for row in data:\n initial_time,initial_reading = row.split(\",\")\n initial_time=float(initial_time)\n initial_reading=float(initial_reading)\n self.time = np.insert(self.time, len(self.time), initial_time)\n self.magnitude = np.insert(self.magnitude, len(self.magnitude), initial_reading)\n if self.transition == 1:\n self.time1 = np.insert(self.time1, len(self.time1), self.time)\n self.magnitude1 = np.insert(self.magnitude1, len(self.magnitude1), self.magnitude)\n print(\"Ayooo\")\n elif self.transition == 2:\n self.time2 = np.insert(self.time2, len(self.time2), self.time)\n self.magnitude2 = np.insert(self.magnitude2, len(self.magnitude2), self.magnitude)\n elif self.transition ==3:\n self.time3 = np.insert(self.time3, len(self.time3), self.time)\n self.magnitude3 = np.insert(self.magnitude3, len(self.magnitude3), self.magnitude)\n\n\n print('ready')\n self.mode = 'play'\n self.viewData()", "title": "" }, { "docid": "12889664e8fdaaf48197dfc815f6f896", "score": "0.5469703", "text": "def fromfile(self, f, out=None, dtype=np.float):\n if out is None:\n res = np.zeros((self.natoms+1, self.FNAN_mx+1, self.Total_NumOrbs_mx, self.Total_NumOrbs_mx), dtype=dtype)\n else :\n res = out\n\n for ct_AN in range(1,self.natoms+1):\n for h_AN in range(0,self.FNAN[ct_AN]+1):\n for i in range(self.Total_NumOrbs[ct_AN]):\n c = self.Total_NumOrbs[self.natn[ct_AN,h_AN]]\n res[ct_AN,h_AN,i,0:c] = np.fromfile(f, count=c)\n \n return res", "title": "" }, { "docid": "5aac2460ed2eec14718a6408f84e03e4", "score": "0.54692036", "text": "def read_file(file_name):\n\n if file_name.endswith('.npy'):\n data = np.load(file_name)\n\n elif file_name.endswith(('.fits', '.fit', '.FITS', '.FIT')):\n data = read_from_fits(file_name)\n\n else:\n raise ValueError(('Invalid file extension [{}]. Files must be FITS or '\n 'numpy binary.').format(splitext(file_name)[-1]))\n\n check_data_format(data, [2, 3])\n\n return data", "title": "" } ]
a99170ae8bd5f9324da146051ce93030
Returns info string about resolution matrix.
[ { "docid": "0424d44b77317e85bf933553aae932ff", "score": "0.6297488", "text": "def __str__(self):\n info = self.par.copy()\n if self.par['kfix'] == 1:\n info['efixstr'] = 'fixed incident energy k_i = %2.4f A-1' % self.par['k']\n else:\n info['efixstr'] = 'fixed final energy k_f = %2.4f A-1' % self.par['k']\n info['q0'] = self.q0\n if not self.ERROR:\n mat = self.NP.tolist()\n info['mat'] = '\\n'.join(''.join(('%5.2f' % mat[i][j]).rjust(10)\n for j in range(4)) for i in range(4))\n # Calculate Bragg width in direction of scan and for w-scan, and\n # corresponding Lorentz factors\n bragw = self.calcBragg()\n info['brqx'], info['brqy'], info['brqz'], info['brva'], info['brde'] = bragw\n info['R0'] = self.R0\n\n p1 = \"\"\"\\\nResolution matrix for a triple axis spectrometer calculated by the Popovici method:\n\nSpectrometer Setup:\n===================\nd-spacings: dm = %(dm)1.4f da = %(da)1.4f\nmosaic : etam = %(etam)1.4f etas = %(etas)1.4f etaa = %(etaa)1.4f\ns-sense : sm = %(sm)-4i ss = %(ss)-4i sa = %(sa)-4i\nalpha 1->4: %(alpha1)i-Mono-%(alpha2)i-Sample-%(alpha3)i-Ana-%(alpha4)i (horizontal collimation)\nbeta 1->4: %(beta1)i-Mono-%(beta2)i-Sample-%(beta3)i-Ana-%(beta4)i (vertical collimation)\n\nSample Parameters:\n==================\nLattice information:\na b c alpha beta gamma\n%(as)2.3f A %(bs)2.3f A %(cs)2.3f A %(aa)3.1f deg %(bb)3.1f deg %(cc)3.1f deg\n\nScattering plane:\nAX AY AZ BX BY BZ\n%(ax)1.3f %(ay)1.3f %(az)1.3f %(bx)1.3f %(by)1.3f %(bz)1.3f\n\n%(efixstr)s\nreciprocal space position: qh = %(qx)1.3f qk = %(qy)1.3f ql = %(qz)1.3f (r.l.u.) en = %(en)2.3f (meV)\nscattering vector Q = %(q0)2.5f A-1\n\nResolution Info:\n================\n\"\"\" % info\n if self.ERROR:\n p2 = 'ERROR: ' + self.ERROR\n else:\n p2 = \"\"\"\\\n=> Resolution Volume: R0 = %(R0)3.3f A-3*meV\n\n=> Resolution Matrix (in frame Qx, Qy, Qz, E):\n%(mat)s\n\n=> Bragg widths:\n Qx Qy Qz (A-1) Vanadium dE (meV)\n %(brqx)1.5f %(brqy)1.5f %(brqz)1.5f %(brva)1.5f %(brde)1.5f\n\"\"\" % info\n return p1 + p2", "title": "" } ]
[ { "docid": "a31e39add72e9642275dadc81b8580c8", "score": "0.69172955", "text": "def get_info(self):\n size_unit = self.get_display_grid_unit()\n pxy = self.grid.get_pixel_size().coordinates\n if size_unit is not None:\n if isinstance(size_unit, units.UnitBase):\n size_value = 1.0\n else:\n size_value, size_unit = size_unit.value, size_unit.unit\n px, py = (pxy.to(size_unit) / size_value).value\n unit_str = f' {size_unit}'\n elif (isinstance(pxy, units.Quantity)\n and pxy.unit\n != units.dimensionless_unscaled): # pragma: no cover\n unit_str = f' {pxy.unit}'\n px, py = pxy.value\n else: # pragma: no cover\n px, py = pxy\n unit_str = ''\n\n u_beam = self.underlying_beam\n u_fwhm = 0.0 if u_beam is None else u_beam.fwhm\n i_beam = self.get_image_beam()\n i_fwhm = 0.0 if i_beam is None else i_beam.fwhm\n\n info = [\"Map information:\",\n f\"Image Size: {self.get_size_string()} pixels \"\n f\"({px} x {py}{unit_str}).\",\n self.grid.to_string(),\n f'Instrument PSF: {u_fwhm:.5f} '\n f'(includes pixelization)',\n f'Image resolution: {i_fwhm:.5f} '\n f'(includes smoothing)']\n return info", "title": "" }, { "docid": "c4fa4943f1e5133ffba5e4800a566e0e", "score": "0.6396968", "text": "def resolution(self, verbose=False):\n if verbose:\n return self._resolution\n else:\n r = ['R^'+str(i) for i in self._betti]\n return join(r,' <-- ')", "title": "" }, { "docid": "406517734f3fd55c14e4421aaf0c1e1d", "score": "0.622422", "text": "def info(self) -> str:\n reg_info = \"\".join([\"-\" + r.info().upper() for r in self.regularizer])\n return Solver.info(self) + \"-\" + self.data_term.info() + reg_info", "title": "" }, { "docid": "406517734f3fd55c14e4421aaf0c1e1d", "score": "0.622422", "text": "def info(self) -> str:\n reg_info = \"\".join([\"-\" + r.info().upper() for r in self.regularizer])\n return Solver.info(self) + \"-\" + self.data_term.info() + reg_info", "title": "" }, { "docid": "1074c1cccabb28baa21136aedabc15f4", "score": "0.62048525", "text": "def info(self):\n load_str = self.loadsim.info()\n dc_str = self.dcsim.info()\n return 'Battery simulator consisting of ' + load_str + ' and ' + dc_str", "title": "" }, { "docid": "a955df1f9800a457aa55e8c805abd3a9", "score": "0.62022877", "text": "def __str__(self):\n return str(self._matrix)", "title": "" }, { "docid": "bff821df81b2ac493ff634d6b686f634", "score": "0.6149327", "text": "def info(self,showHeader=False):\n arcmin = 180*60./numpy.pi\n print \"Dimensions (Ny,Nx) = (%d,%d)\"%(self.Ny,self.Nx)\n print \"Pixel Scales: (%f,%f) arcmins. \"%(self.pixScaleY*arcmin,self.pixScaleX*arcmin)\n print \"Map Bounds: [(x0,y0), (x1,y1)]: [(%f,%f),(%f,%f)] (degrees)\"%(self.x0,self.y0,self.x1,self.y1)\n print \"Map Bounds: [(x0,y0), (x1,y1)]: [(%s,%s),(%s,%s)]\"%\\\n (astLib.astCoords.decimal2hms(self.x0,':'),\\\n astLib.astCoords.decimal2dms(self.y0,':'),\\\n astLib.astCoords.decimal2hms(self.x1,':'),\\\n astLib.astCoords.decimal2dms(self.y1,':'))\n \n print \"Map area = %f sq. degrees.\"%(self.area)\n print \"Map mean = %f\"%(self.data.mean())\n print \"Map std = %f\"%(self.data.std())\n \n if showHeader:\n print \"Map header \\n %s\"%(self.header)", "title": "" }, { "docid": "c97e9f7954bac1bdb7e89520c8059ca7", "score": "0.61486083", "text": "def getDebugInfos(self):\n ans = self.getName() + \" : \" + str(self.getMaxRange()) + \" : \" + str(self.resolution) + \"\\n\";\n ans += \"ADC values: \" + str(self.getVoltage()) + \" adc val fast \" + str(self.getVoltageFast())\n return ans;", "title": "" }, { "docid": "7364aa4b1be56b1b1cb4252f861d4b74", "score": "0.60969275", "text": "def __str__(self):\n s = ''\n for i, row in enumerate(self.matrix):\n d = {}\n for j, key in enumerate(self.columns):\n d[key] = row[j]\n s += str(i)+': '+str(d)+'\\n'\n return s", "title": "" }, { "docid": "58287c80b87974a5f3cf7da1c98f37e2", "score": "0.6095699", "text": "def __get_strings_info(self):\n list_info = []\n info = '''\n----------------------------------------\n----------------------------------------\n RESOLUTION\n----------------------------------------\n----------------------------------------\nUnits vectors :\n รฎ' = {}\n ฤต' = {}\n k' = {}\n '''.format(\n self.matrix_units_vectors[0], \n self.matrix_units_vectors[1], \n self.matrix_units_vectors[2])\n\n list_info.append(info)\n\n for vector in self.vector_list.values():\n vector_info = [\n vector.name,\n vector.coordinates,\n vector.norme,\n vector.components,\n vector.determinent]\n\n info = '''\n----------------------------------------\nVector name = {}\nVector coodinates = {}\nVector norme = {}\nVector components รฎ = {}\nVector components ฤต = {}\nVector components k = {}\nVector determinent = {}\n-----------------------------------------\n '''.format(\n vector_info[0],\n vector_info[1],\n vector_info[2],\n vector_info[3][0],\n vector_info[3][1],\n vector_info[3][2],\n vector_info[4])\n list_info.append(info)\n\n return list_info", "title": "" }, { "docid": "355099e73a285d13b7edbc6299124581", "score": "0.6092209", "text": "def get_grid_info(src, freq):\n grid = self.get_grid(src, freq)\n out = ''\n if verb != 0 and hasattr(grid, 'construct_mesh_info'):\n out += grid.construct_mesh_info\n out += grid.__repr__()\n return out", "title": "" }, { "docid": "e27a5f370015558ccaf9263a1bf81d68", "score": "0.6079362", "text": "def __str__(self):\n return str(self.matr_num)", "title": "" }, { "docid": "6152e18a26e674f26cfd4da7bc355314", "score": "0.6064378", "text": "def printMatrix(self):\n\treturn (\"[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\" % \n\t\t(self(0,0), self(0,1), self(0,2), self(0,3), \n\t\tself(1,0), self(1,1), self(1,2), self(1,3), \n\t\tself(2,0), self(2,1), self(2,2), self(2,3), \n\t\tself(3,0), self(3,1), self(3,2), self(3,3)))", "title": "" }, { "docid": "0500f508181995e61cda55c8d9824383", "score": "0.6012337", "text": "def __str__(self):\n return '\\n'.join(map(str, (' '.join(map(str, elem)) for elem in self.matr)))", "title": "" }, { "docid": "d068aa05c6ece38f340250e59307d4ad", "score": "0.5978768", "text": "def info(self):\n print(\"\")\n print(\"Powerspectrum:\")\n print(\"A_s: \", self.A_s)\n print(\"n_s: \", self.n_s)\n print(\"kpivot (1/Mpc): \", self.kpivot * const.Mpc)\n print(\"ell_max: \", self.ell_max)", "title": "" }, { "docid": "20b467b55b47d1cea7d28b7f118ae626", "score": "0.5937948", "text": "def disp(self):\n single = lambda x: '{:.2f}'.format(x)\n double = lambda x: '({:.2f}, {:.2f})'.format(*x)\n props = [\n ('rotation', single),\n ('scale', double),\n ('shear', single),\n ('translation', double)]\n\n out = [\"Transformation Parameters:\"]\n for prop, fmt in props:\n val = getattr(self.matrix, prop)\n out.append(\"{:>12s} -- {}\".format(prop, fmt(val)))\n print('\\n'.join(out))", "title": "" }, { "docid": "fc5d87e8dde333a0980ccad92828e4c8", "score": "0.586094", "text": "def __str__(self):\n return str(matrix(self.board))", "title": "" }, { "docid": "86918c3691880593508d234efa727578", "score": "0.5837346", "text": "def info(self):\n ss = \"\\nSummary PSFKing info\\n\"\n ss += \"---------------------\\n\"\n ss += array_stats_str(self.offset, \"offset\")\n ss += array_stats_str(self.energy, \"energy\")\n ss += array_stats_str(self.gamma, \"gamma\")\n ss += array_stats_str(self.sigma, \"sigma\")\n\n # TODO: should quote containment values also\n\n return ss", "title": "" }, { "docid": "b3892d11bbdef5482a27c3e31581cfa4", "score": "0.5836605", "text": "def info(self):\n\n s = f\"\"\"width : {self._video.width}\nheight : {self._video.height}\ncount : {self._video.frame_count}\nfps : {self._video.fps}\n\"\"\"\n return s", "title": "" }, { "docid": "1c7ce356874b98bcfc721581eecaad7b", "score": "0.58301127", "text": "def __str__(self):\n mat = \",\\n \".join(str(x) for x in self.data)\n return f\"Matrix([\\n {mat}\\n ])\"", "title": "" }, { "docid": "402b11de63cb9da17a12f797a889a0ac", "score": "0.5822843", "text": "def __str__(self):\n strpuzz = ''\n for row in range(3):\n strpuzz += ' '.join(map(str, self.mat8[row])) + '\\r\\n'\n return strpuzz", "title": "" }, { "docid": "c1496c270d043b4d77fcaa74d40c5d8c", "score": "0.58225363", "text": "def get_resolution(self):\n # The resolution of the Phantom camera is stored in the \"defc.res\" field in the form of a string which looks\n # like this \"x_res x y_res\". The two values being separated by an \"x\" character\n resolution_string = self['defc.res']\n resolution_string = resolution_string.strip()\n resolution = resolution_string.split('x')\n return int(resolution[0]), int(resolution[1])", "title": "" }, { "docid": "05bb62722ba423fdbcbfe9422ee779c8", "score": "0.58133715", "text": "def show_matrix(self):\n print(self.left_right)\n print(self.top_botton)", "title": "" }, { "docid": "9d9f90ce39c10afa87ee44c7a69d037d", "score": "0.57949096", "text": "def info_string(self, width=79, verbosity_option=None):\n raise NotImplementedError(\"info_string not implemented\")", "title": "" }, { "docid": "165124b2ff13330782934bcf59647980", "score": "0.5789052", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(t:\" + self.wavelet + \"-l:%d\" % self.level + \"-w:%g\" % self.weight.max() + \")\"", "title": "" }, { "docid": "165124b2ff13330782934bcf59647980", "score": "0.5789052", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(t:\" + self.wavelet + \"-l:%d\" % self.level + \"-w:%g\" % self.weight.max() + \")\"", "title": "" }, { "docid": "276447294ba6d056e42ffb2c46dd4f1d", "score": "0.57542664", "text": "def __str__(self):\r\n result = \"[\\n\"\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n result += \" \" + str(self.matrix[i * self.cols + j])\r\n result += \"\\n\"\r\n result += \"]\"\r\n return result", "title": "" }, { "docid": "52158f44f9b97f76cf6a08c4d3b6ebff", "score": "0.5748311", "text": "def __str__(self) -> str:\n tmp = self.system_name + \"\\n\"\n tmp += \" {}\\n\".format(self.scaling_factor)\n for i in range(3):\n tmp += \" {:#.6f} {:#.6f} {:6f}\\n\".format(\n self.cell_vecs[i][0], self.cell_vecs[i][1], self.cell_vecs[i][2]\n )\n for element in self.atomtypes:\n tmp += \" {}\".format(element)\n tmp += \"\\n\"\n for atomnum in self.atomnums:\n tmp += \" {}\".format(atomnum)\n tmp += \"\\n\"\n for frame_index, positions in enumerate(self.configurations):\n tmp += \"Direct configuration= {}\\n\".format(frame_index + 1)\n for position in positions:\n tmp += \" {:#.6f} {:#.6f} {:6f}\\n\".format(\n position[0], position[1], position[2]\n )\n return tmp", "title": "" }, { "docid": "f1d502d87f3f32e1d875d29051cebf95", "score": "0.5743364", "text": "def get_info_arr(self):\n return 'Variations:', len(self.key), 'Repetitions:', self.reps, 'Start time (H:M:S) ', self.experiment_time, ' Start date (Y-M-D)', self.experiment_date", "title": "" }, { "docid": "e056de05f29f3bf07262e8be16c52b59", "score": "0.5741582", "text": "def __str__(self):\n if self.data.ndim == 3:\n nt, ni, nr = self.data.shape\n else:\n ni, nt, nr = 1, *self.data.shape\n s = '({}) Ion species fluid quantity of size NI x NT x NR = {} x {} x {}\\n'.format(self.name, ni, nt, nr)\n for i in range(len(self.ions.Z)):\n s += \" {:2s} (Z = {:3d})\\n\".format(*self.ions[i])\n\n return s", "title": "" }, { "docid": "3568f2c4008426fc62a3dd49a6f04d4d", "score": "0.5717657", "text": "def printTransformMatrix(self):\n\treturn (\"[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\\n[%f,%f,%f,%f]\" % \n\t (self.asMatrix()(0,0), self.asMatrix()(0,1), self.asMatrix()(0,2), self.asMatrix()(0,3), \n\t self.asMatrix()(1,0), self.asMatrix()(1,1), self.asMatrix()(1,2), self.asMatrix()(1,3), \n\t self.asMatrix()(2,0), self.asMatrix()(2,1), self.asMatrix()(2,2), self.asMatrix()(2,3), \n\t self.asMatrix()(3,0), self.asMatrix()(3,1), self.asMatrix()(3,2), self.asMatrix()(3,3)))", "title": "" }, { "docid": "a4b812b2d60dd3b2b99bedea038f8488", "score": "0.5711729", "text": "def __repr__(self):\n return str(matrix(self.board))", "title": "" }, { "docid": "b45693abf409940b098befe306864d2b", "score": "0.5692991", "text": "def get_string(self):\n result = \"\"\n for line in self.data_matrix:\n result += \"\".join(line)+'\\n'\n\n return result[:-1]", "title": "" }, { "docid": "024f3a6e39a9f173840c818070359b2c", "score": "0.5692875", "text": "def info(self):\n lst = []\n indent = ' '\n lst.append('ID: {0}'.format(self._id))\n lst.append('Name: {0}'.format(self.name))\n lst.append('Temporal Domain:\\n{0}'.format(self.temporal_domain.__str__(indent*2)))\n lst.append('Spatial Domain:\\n{0}'.format(self.spatial_domain.__str__(indent*2)))\n\n lst.append('Parameters:')\n for x in self._range_value:\n lst.append('{0}{1} {2}\\n{3}'.format(indent*2,x,self._range_value[x].shape,self._range_dictionary.get_context(x).__str__(indent*4)))\n\n return '\\n'.join(lst)", "title": "" }, { "docid": "152605e3ae58844ab6284d7f646d24e8", "score": "0.56827354", "text": "def getinfo(self):\n try:\n outdims = (self.imgwidth,self.imgheight)\n except:\n outdims = (\"no output image has been assigned\")\n crsdict = dict([(\"output dimensions\",outdims),\n (\"coord bbox\",(self.xleft,self.ytop,self.xright,self.ybottom)),\n (\"coord dimensions\",(self.xwidth,self.yheight)) ])\n return crsdict", "title": "" }, { "docid": "06d64a3dad943117316296fbb4485574", "score": "0.566334", "text": "def __str__(self):\n s = \"\"\n for key in self.data:\n s = s + key + \": dim\" + str(self.data[key].shape) + \"\\n\" + str(self.data[key][:self.endmarker[key]]) + \"\\n\\n\"\n return s", "title": "" }, { "docid": "f206694d49256c759f000158d5efcf75", "score": "0.5639649", "text": "def getmapinfo(self):\n\n self.ra = np.unique(self.m.ra)\n self.dec = np.unique(self.m.dec)\n self.ravec = np.ravel(self.m.ra)\n self.decvec = np.ravel(self.m.dec)\n self.Npair = len(self.m.fn)", "title": "" }, { "docid": "b005abece3d5ed247b3ca87e6c2ff18e", "score": "0.56304747", "text": "def __repr__(self,):\n stringRepresentation=\"\"\n \n rows,cols = self.matrix.shape\n \n for row in xrange(0,rows):\n stringRepresentation += \"[\"\n \n for col in xrange(0,cols):\n stringRepresentation+= \"%+0.2f \"%self.matrix[row][col]\n stringRepresentation += \"]\\n\"\n \n return stringRepresentation", "title": "" }, { "docid": "b4d835c1390aed18ad1240342032137b", "score": "0.5618609", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(w:%g\" % self.weight.max() + \")\"", "title": "" }, { "docid": "68e848453d63d7285275fb458d3b3514", "score": "0.56174994", "text": "def __repr__(self):\n\n msg = \"\"\n for arr in self.matrix:\n msg +=\"[\" + \" \".join(str(x) for x in arr) + \"]\\n\"\n\n return msg", "title": "" }, { "docid": "d5136b7422f29127dc67a160e3bab8dc", "score": "0.56138784", "text": "def _printImgMatrix(self, imgMatrix, width, height, resolution, align):\n if resolution == \"high\":\n scaling = 24\n currentpxWidth = self.printer.pxWidth * 2\n else:\n scaling = 8\n currentpxWidth = self.printer.pxWidth\n if width > currentpxWidth:\n raise ValueError(\"Image too wide. Maximum width is configured to be \" + str(currentpxWidth) + \"pixels. The image is \" + str(width) + \" pixels wide.\")\n tmp = ''\n for yScale in range(-(-height / scaling)):\n # Set mode to hex and 8-dot single density (60 dpi).\n if resolution == \"high\":\n outList = [ \"0x1B\", \"0x2A\", \"0x21\" ]\n else:\n outList = [ \"0x1B\", \"0x2A\", \"0x00\" ]\n # Add width to the communication to the printer. Depending on the alignment we count that in and add blank vertical lines to the outList\n if align == \"left\":\n blanks = 0\n if align == \"center\":\n blanks = (currentpxWidth - width) / 2\n if align == \"right\":\n blanks = currentpxWidth - width\n highByte = (width + blanks) / 256\n lowByte = (width + blanks) % 256\n outList.append(hex(lowByte))\n outList.append(hex(highByte))\n if resolution == \"high\":\n blanks *= 3\n if align == \"left\":\n pass\n if align == \"center\":\n for i in range(blanks):\n outList.append(hex(0))\n if align == \"right\":\n for i in range(blanks):\n outList.append(hex(0))\n for x in range(width):\n # Compute hex string for one vertical bar of 8 dots (zero padded from the bottom if necessary).\n binStr = \"\"\n for y in range(scaling):\n # Indirect zero padding. Do not try to extract values from images beyond its size.\n if (yScale * scaling + y) < height:\n binStr += \"0\" if imgMatrix[x, yScale * scaling + y] == 255 else \"1\"\n # Zero padding\n else:\n binStr += \"0\"\n outList.append(hex(int(binStr[0:8], 2)))\n if resolution == \"high\":\n outList.append(hex(int(binStr[8:16], 2)))\n outList.append(hex(int(binStr[16:24], 2)))\n for element in outList:\n try:\n tmp += chr(int(element, 16))\n except:\n raise\n\n self._write(tmp)", "title": "" }, { "docid": "01e143ab76d0ab5f4df6cb4f80840fe4", "score": "0.56131023", "text": "def dump(self):\n for i, abbrev in self.idxFac.items():\n wt = self.wVec[i]\n iBack = self.facIdx[abbrev]\n print '%d: %s: %f %d' % (i, abbrev, wt, iBack)\n print self.tMatrix", "title": "" }, { "docid": "71ae1a35636f3d86c96d6a1ed4e56c53", "score": "0.56069946", "text": "def __repr__(self):\n _analysisType = {\n 0: \"Illuminance\", 1: \"Radiation\", 2: \"Luminance\"\n }\n return \"%s: %s\\n#Views: %d\" % \\\n (self.__class__.__name__,\n _analysisType[self.simulationType],\n self.viewCount)", "title": "" }, { "docid": "74a4362a2fd9bb2ec2f7dfddea837cc8", "score": "0.5605232", "text": "def print_info(self):\n\n print('-'*80)\n print('Material: %s' % self._material_name)\n print('Parameters: %s' % self._parameters)\n print('Material class: %s' % self._material_class)\n print('Incompressible: %s' % self._incompressible)\n print('-'*80)", "title": "" }, { "docid": "49ef1c469b150efa9d23da36f9c76d96", "score": "0.56021434", "text": "def output_findsym(self):\n self.set_scale()\n self.convention = 'Direct'\n if self.name[-1] == \"\\n\":\n output = self.name\n else:\n output = self.name+\"\\n\"\n output += \"0\\n\" # Use default symmetry tolerance (1e-6)\n output += \"1\\n\" # Set lattice parameter format to vectors\n # Output unit cell vectors as \"lattice parameter vectors\"\n for i in range(3):\n for j in range(3):\n output += str(self.cell_vec[i, j])+\" \"\n output += \"\\n\"\n output += \"1\\n\" # Set unit cell vector format to vectors\n # Unit cell vectors chosen as identity matrix - all relevant information\n # is contained in the \"lattice parameter vectors\"\n output += \"1.0 0.0 0.0\\n\"\n output += \"0.0 1.0 0.0\\n\"\n output += \"0.0 0.0 1.0\\n\"\n output += str(self.num_atoms)+\"\\n\"\n # Output list of atomic type integers\n n = 1\n for i in range(self.num_atom_types):\n for j in range(self.atom_types[i]):\n output += str(n)+\" \"\n n += 1\n output += \"\\n\"\n # Output atomic positions\n for i in range(self.num_atoms):\n for j in range(3):\n output += str(self.atom_positions[i, j])+\" \"\n output += \"\\n\"\n return output", "title": "" }, { "docid": "0534a68136f454dc8255a73bff96b580", "score": "0.55920273", "text": "def report_strings(self):\n result = self.report()\n strings = []\n if '_FPS_' in result:\n strings.append(f'FPS: {result[\"_FPS_\"]:>5.1f}')\n strings += [f'{name}: {val:>3.0f}' for name, val in result.items()]\n return strings", "title": "" }, { "docid": "282abc57c6f5927cce584dc353075534", "score": "0.5582221", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(s:%s\" % np.array(self.filt_size) + \"-w:%g\" % self.weight.max() + \")\"", "title": "" }, { "docid": "d4c69c51c77d401e7c79d20def822f41", "score": "0.5579474", "text": "def get_brief_info(self) -> str:\n return self.tile.get_brief_info().rstrip()", "title": "" }, { "docid": "83dc1893256d7d7d3a9e3af4558e3ba2", "score": "0.55786127", "text": "def __str__(self):\n\n return \"Mat33(\" + str(self.row0) + \",\" + str(self.row1) + \",\" + str(self.row2) + \")\"", "title": "" }, { "docid": "9bfde5df85473532bbceb685427f09ee", "score": "0.5566558", "text": "def get_metadata_string(self):\n return \"Spherical mode: %s (%f,%f,%f) (%d,%d,%d,%d)\" % (self.projection, self.yaw, self.pitch, self.roll, self.clip_top, self.clip_bottom, self.clip_left_right, self.clip_right)", "title": "" }, { "docid": "6ddc58bd58c16a9c75f697696a9c9801", "score": "0.555309", "text": "def __repr__(self):\n infos = [ self.name, self.chrom, self.initial, self.final, self.score, self.errors_bp, \n self.motif, self.orientation, self.seq ]\n return ','.join( [str(x) for x in infos if x] )", "title": "" }, { "docid": "afb65789714094355b707358280abc12", "score": "0.5550868", "text": "def __repr__(self):\n info = self.__class__.__name__ + \"\\n\"\n info += \"-\" * len(self.__class__.__name__) + \"\\n\\n\"\n info += f\"\\tshape : {self.psf_value.shape}\\n\"\n return info", "title": "" }, { "docid": "f1f52516aa4c3c54573242ca21626372", "score": "0.5547318", "text": "def matrix_to_str(m):\n s = \"\"\n for i in range(len(m)):\n for j in range(3):\n s += str(float(m[i][j])) + \" \"\n\n return s", "title": "" }, { "docid": "764358e4a581011851b5d78e3b315046", "score": "0.5546159", "text": "def _str(self) -> str | None:\n assert self.shape is not None\n return 'channels: {}\\norbits: {}\\npoints_per_orbit: {}'.format(\n *self.shape\n )", "title": "" }, { "docid": "66e803e8cc5d43bf1d03a77876331fd3", "score": "0.55367607", "text": "def _str(self) -> str | None:\n return 'dc_ref: (256, 256) float32\\np_fit: (32, 32, 16) float64'", "title": "" }, { "docid": "64625be05d3bb4315981c1a6e89fce2a", "score": "0.55317557", "text": "def _metadata_print(self) -> str:\n output = 'Mdtest\\n'\n\n if self._metadata[self._num_systems] == '':\n return ''\n for key, values in self._metadata[self._num_systems].items():\n output += (f\" {key}: {values['mean']} ops\\n\")\n return output", "title": "" }, { "docid": "5eae9a05d726ceabb843491a9dddbcc0", "score": "0.55249625", "text": "def __str__(self):\n str = \"\"\n for row in self.matrix:\n for char in row:\n str += char\n str += \"\\n\"\n return str", "title": "" }, { "docid": "a0ab4882e27d4fee8a794a521b7d6868", "score": "0.55038106", "text": "def get_info_string(self):\n if not self.serial_connections:\n return \"No connection to any CPU board.\"\n\n infos = \"Connected CPUs:\\n\"\n for connection in sorted(self.serial_connections, key=lambda x: x.chain_serial):\n infos += \" - Port: {} at {} baud. Chain Serial: {}\\n\".format(connection.port, connection.baud,\n connection.chain_serial)\n for board_id, board_firmware in self.gen2_addr_arr[connection.chain_serial].items():\n if board_firmware is None:\n infos += \" -> Board: 0x{:02x} Firmware: broken\\n\".format(board_id)\n else:\n infos += \" -> Board: 0x{:02x} Firmware: 0x{:02x}\\n\".format(board_id, board_firmware)\n\n infos += \"\\nIncand cards:\\n\" if self.opp_incands else \"\"\n card_format_string = \" - Chain: {} Board: 0x{:02x} Card: {} Numbers: {}\\n\"\n\n for incand in self.opp_incands.values():\n infos += card_format_string.format(incand.chain_serial, incand.addr,\n incand.card_num,\n self._get_numbers(incand.mask))\n\n infos += \"\\nInput cards:\\n\"\n for inputs in self.opp_inputs:\n infos += card_format_string.format(inputs.chain_serial, inputs.addr,\n inputs.card_num,\n self._get_numbers(inputs.mask))\n\n infos += \"\\nSolenoid cards:\\n\"\n for outputs in self.opp_solenoid:\n infos += card_format_string.format(outputs.chain_serial, outputs.addr,\n outputs.card_num,\n self._get_numbers(outputs.mask))\n\n infos += \"\\nLEDs:\\n\" if self.neo_card_dict else \"\"\n for leds in self.neo_card_dict.values():\n infos += \" - Chain: {} Board: 0x{:02x} Card: {}\\n\".format(leds.chain_serial, leds.addr, leds.card_num)\n\n infos += \"\\nMatrix lights:\\n\" if self.matrix_light_cards else ''\n for matrix_light in self.matrix_light_cards.values():\n infos += \" - Chain: {} Board: 0x{:02x} Card: {} Numbers: 0 - 63\\n\".format(\n matrix_light.chain_serial, matrix_light.addr, matrix_light.card_num)\n\n return infos", "title": "" }, { "docid": "cb0409204fd4c3c6d84fb413c03aa953", "score": "0.54986066", "text": "def print_metrics(self):\n if not self.is_fitted:\n print(\"Model not fitted yet!\")\n return None\n items = (\n (\"sse:\", self.sse()),\n (\"sst:\", self.sst()),\n (\"mse:\", self.mse()),\n (\"r^2:\", self.r_squared()),\n (\"adj_r^2:\", self.adj_r_squared()),\n (\"AIC:\", self.aic()),\n (\"BIC:\", self.bic()),\n )\n for item in items:\n print(\"{0:8} {1:.4f}\".format(item[0], item[1]))", "title": "" }, { "docid": "6ede1214b33afd6f6e15f800a3dd276b", "score": "0.5498218", "text": "def plot_info(self):\n path = self.get_info_plot_path()\n header_list = self._get_info_header_list()\n data_list = []\n for op in self.op_matrix_list:\n op.update_properties()\n data_list.append(\n list(op.domain.get_ordered_param_dict().values()) + op.get_properties().list())\n DisplayInfo.plot_info(data_list, header_list,\n path, to_html=True, to_csv=False)", "title": "" }, { "docid": "1d18ecc74fb0484ad7a07f3b87fc71dc", "score": "0.5492103", "text": "def __repr__(self):\n\n r = \" \"\n\n for i in range(self.dim):\n if i<10:\n r += \"0\" + str(i) + \" \"\n else:\n r += str(i) + \" \"\n r += \"\\n\"\n\n for i in range(self.dim):\n \n s = str(i)\n if len(s)<2:\n r += \"0\"\n r += s + \" | \"\n\n for j in range(self.dim):\n if self.adjacencies[i][j]:\n r += \" 1 \"\n else:\n r += \" 0 \"\n\n r += \"| \"\n s = str(i)\n if len(s)<2:\n r += \"0\"\n r += s + \"\\n\"\n\n r += \" \"\n\n for i in range(self.dim):\n if i<10:\n r += \"0\" + str(i) + \" \"\n else:\n r += str(i) + \" \"\n r += \"\\n\"\n return r", "title": "" }, { "docid": "d0d47d6a17739c483752b703f604489a", "score": "0.5487407", "text": "def topology_info(mesh, h_min=0, scale=0.480):\n Mf = flax_mass(mesh)\n Vcore = core_volume(mesh, h_min=h_min)\n Mpla = Vcore * 1.25 * 0.2\n print(f'pla mass: {np.around(Mpla, 1)} g')\n Mcarbon = carbon_mass(mesh, h_min=h_min)\n print(f'ratio brace/board {np.around((Mcarbon + Mpla) / Mf, 2)}')", "title": "" }, { "docid": "8d7c4d99222c15b5daa1b456c10018f0", "score": "0.5478381", "text": "def mat_print(self):\n col_maxes = [max([len(\"{:g}\".format(x)) for x in col]) for col in self.matrix.T]\n for x in self.matrix:\n for i, y in enumerate(x):\n print((\"{:\" + str(col_maxes[i]) + \"g}\").format(y), end=\" \")\n print(\"\")", "title": "" }, { "docid": "984c3d78c80babb2e53ca66615d78a95", "score": "0.5474693", "text": "def __str__(self):\n result = '==================== Mapping ====================\\n'\n result += str(self.R) + '\\n'\n for index in range(self.n):\n result += f'{self.name}_{index+1} = {self.F[index]}\\n'\n return result", "title": "" }, { "docid": "e81271fdf9bb581cfa94c90333d926cb", "score": "0.5461554", "text": "def string(self):\n p = self.getinfo()\n sp = '::'\n out = ''\n for f in p:\n out += sp.join(f) + '\\n'\n #print f[0], f[1], f[2], f[3]\n return out", "title": "" }, { "docid": "de6e2d2848b2f116b8beb02727f74a54", "score": "0.5459549", "text": "def get_qualifier(self):\n res = []\n\n mcc = self.imsi & 0xFFFF\n mnc = (self.imsi & 0xFFFF0000) >> 16\n if mcc != 0:\n res.append(\"mcc%d\" % mcc)\n if mnc != 0:\n res.append(\"mnc%d\" % mnc)\n\n if self.locale != 0:\n res.append(self.get_language_and_region())\n\n screenLayout = self.screenConfig & 0xff\n if (screenLayout & MASK_LAYOUTDIR) != 0:\n if screenLayout & MASK_LAYOUTDIR == LAYOUTDIR_LTR:\n res.append(\"ldltr\")\n elif screenLayout & MASK_LAYOUTDIR == LAYOUTDIR_RTL:\n res.append(\"ldrtl\")\n else:\n res.append(\"layoutDir_%d\" % (screenLayout & MASK_LAYOUTDIR))\n\n smallestScreenWidthDp = (self.screenConfig & 0xFFFF0000) >> 16\n if smallestScreenWidthDp != 0:\n res.append(\"sw%ddp\" % smallestScreenWidthDp)\n\n screenWidthDp = self.screenSizeDp & 0xFFFF\n screenHeightDp = (self.screenSizeDp & 0xFFFF0000) >> 16\n if screenWidthDp != 0:\n res.append(\"w%ddp\" % screenWidthDp)\n if screenHeightDp != 0:\n res.append(\"h%ddp\" % screenHeightDp)\n\n if (screenLayout & MASK_SCREENSIZE) != SCREENSIZE_ANY:\n if screenLayout & MASK_SCREENSIZE == SCREENSIZE_SMALL:\n res.append(\"small\")\n elif screenLayout & MASK_SCREENSIZE == SCREENSIZE_NORMAL:\n res.append(\"normal\")\n elif screenLayout & MASK_SCREENSIZE == SCREENSIZE_LARGE:\n res.append(\"large\")\n elif screenLayout & MASK_SCREENSIZE == SCREENSIZE_XLARGE:\n res.append(\"xlarge\")\n else:\n res.append(\"screenLayoutSize_%d\" % (screenLayout & MASK_SCREENSIZE))\n if (screenLayout & MASK_SCREENLONG) != 0:\n if screenLayout & MASK_SCREENLONG == SCREENLONG_NO:\n res.append(\"notlong\")\n elif screenLayout & MASK_SCREENLONG == SCREENLONG_YES:\n res.append(\"long\")\n else:\n res.append(\"screenLayoutLong_%d\" % (screenLayout & MASK_SCREENLONG))\n\n density = (self.screenType & 0xffff0000) >> 16\n if density != DENSITY_DEFAULT:\n if density == DENSITY_LOW:\n res.append(\"ldpi\")\n elif density == DENSITY_MEDIUM:\n res.append(\"mdpi\")\n elif density == DENSITY_TV:\n res.append(\"tvdpi\")\n elif density == DENSITY_HIGH:\n res.append(\"hdpi\")\n elif density == DENSITY_XHIGH:\n res.append(\"xhdpi\")\n elif density == DENSITY_XXHIGH:\n res.append(\"xxhdpi\")\n elif density == DENSITY_XXXHIGH:\n res.append(\"xxxhdpi\")\n elif density == DENSITY_NONE:\n res.append(\"nodpi\")\n elif density == DENSITY_ANY:\n res.append(\"anydpi\")\n else:\n res.append(\"%ddpi\" % (density))\n\n touchscreen = (self.screenType & 0xff00) >> 8\n if touchscreen != TOUCHSCREEN_ANY:\n if touchscreen == TOUCHSCREEN_NOTOUCH:\n res.append(\"notouch\")\n elif touchscreen == TOUCHSCREEN_FINGER:\n res.append(\"finger\")\n elif touchscreen == TOUCHSCREEN_STYLUS:\n res.append(\"stylus\")\n else:\n res.append(\"touchscreen_%d\" % touchscreen)\n\n screenSize = self.screenSize\n if screenSize != 0:\n screenWidth = self.screenSize & 0xffff\n screenHeight = (self.screenSize & 0xffff0000) >> 16\n res.append(\"%dx%d\" % (screenWidth, screenHeight))\n\n version = self.version\n if version != 0:\n sdkVersion = self.version & 0xffff\n minorVersion = (self.version & 0xffff0000) >> 16\n res.append(\"v%d\" % sdkVersion)\n if minorVersion != 0:\n res.append(\".%d\" % minorVersion)\n\n return \"-\".join(res)", "title": "" }, { "docid": "758f7e1353bff13ab75ce909ff3202d0", "score": "0.5458902", "text": "def show(self):\n r = \"\"\n for i in range(self.map_height):\n s = \"\"\n for j in range(self.map_width):\n if self.agent.idem_position(i,j):\n s += str(self.agent)\n else:\n s += str(self.map_array[i][j])\n if(i > 0):\n r += \"\\n\"\n r += s\n print(r)", "title": "" }, { "docid": "bfddcda8e240df42fbe5df58fa0b657c", "score": "0.54511166", "text": "def debug(self):\n print \" - Lat %f to %f, Long %f to %f\" % (self.S,self.N,self.W,self.E)\n print \" - Ratio: %f\" % self.ratio", "title": "" }, { "docid": "c293ad2805d9e75dcba25f1aea2aa9b8", "score": "0.5449761", "text": "def __repr__(self):\n return \"Matrix%s\" % str(self._matrix)", "title": "" }, { "docid": "8bd30134dfcaf4840f32346ad1f43e37", "score": "0.5442919", "text": "def info(self):\n return str()", "title": "" }, { "docid": "7389588e22f5ccb46e8d37af92f5c19c", "score": "0.54292715", "text": "def profile_info_string(self, width=79, verbosity_option=None,\n enumerate=False):\n raise NotImplementedError(\"profile_info_string not implemented\")", "title": "" }, { "docid": "b460354be48f74c197d5379d335e84e0", "score": "0.54282945", "text": "def __str__(self):\n output = ['[']\n for row in self._matrix:\n output.append(str(row))\n output.append(',\\n ')\n output.pop() # Remove last comma and newline\n output.append(']')\n return ''.join(output)", "title": "" }, { "docid": "19932ece96fc173e1be79507f85b7a17", "score": "0.5425735", "text": "def to_string(self):\n map_string = \"\"\n for i in range(self.__width):\n for j in range(self.__width):\n map_string += self.__map_format[i][j]\n map_string += \"\\n\"\n return map_string", "title": "" }, { "docid": "619e3f97f980d4d41fe35fb2b31b3b88", "score": "0.54124105", "text": "def write_debug_info(self):\n\n sys.stdout.write(\"cell x: %.2f\" % self.robot.get_cell_x() + \", cell y: %.2f\" % self.robot.get_cell_y() + \"\\n\")\n sys.stdout.write(\n \"x: %.2f\" % self.robot.x +\n \", y: %.2f\" % self.robot.y + \"\\n\"\n \"heading %.2f\" % self.robot.heading)\n sys.stdout.write(\"\\n\\n\" + ('-' * 120) + \"\\n\\n\")\n\n if self.output_file is not None:\n if not self.output_file.closed:\n self.output_file.write(\"cell x: %.2f\" % self.robot.get_cell_x() + \", cell y: %.2f\" % self.robot.get_cell_y() + \"\\n\")\n self.output_file.write(\n \"x: %.2f\" % self.robot.x +\n \", y: %.2f\" % self.robot.y + \"\\n\" +\n \"heading %.2f\" % self.robot.heading)\n self.output_file.write(\"\\n\\n\" + ('-' * 120) + \"\\n\\n\")", "title": "" }, { "docid": "76821f955e5882c42a53b46f03516008", "score": "0.5412216", "text": "def __str__(self):\n if ConnectivityProfile.show_as_log:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n profile = np.log(self.profile)\n vrange = np.log(self.globalrange)\n profile[profile==-np.inf] = 0\n vrange[vrange==-np.inf] = 0\n else:\n profile = self.profile\n vrange = self.globalrange\n calib = termplot.calibrate(vrange, self.column_names)\n return \"\\n\".join([style.BOLD+'Connectivity profile of \"{}\"'.format(self.region)+style.END] \n + [ termplot.format_row(self.column_names[i],profile[i],calib)\n for i in np.argsort(profile)\n if self.profile[i]>0\n ])", "title": "" }, { "docid": "4b8d6447867dde44832cf829775d9249", "score": "0.54104006", "text": "def print_solvers_info(self):\n # printing info about the sim\n print('dx = %e' % self.solver.solver.dx)\n print('dy = %e' % self.solver.solver.dy)\n print('dz = %e' % self.solver.solver.dz)\n dt_cfl = self.cfl / (c_light * np.sqrt(1 / self.solver.solver.dx ** 2 +\n 1 / self.solver.solver.dy ** 2 +\n 1 / self.solver.solver.dz ** 2))\n\n print('EM solver: dt = %e' % dt_cfl)\n print('ES solver: dt = %e' % picmi.warp.top.dt)", "title": "" }, { "docid": "14cda1096b80568934d429796a1b932d", "score": "0.5407925", "text": "def __str__(self):\n\n result = \"GammaInitial:\\n%s\" % (str(np.array([[self.GammaInitial[j * self.pomdp.n + i] \\\n for j in range(self.pomdp.r)] \\\n for i in range(self.pomdp.n)]))) + \"\\n\\n\"\n\n result += \"currentHorizon: %i\" % (self.currentHorizon) + \"\\n\\n\"\n\n result += \"Gamma:\\n%s\" % (str(np.array([[self.Gamma[j * self.pomdp.n + i] \\\n for j in range(self.pomdp.r)] \\\n for i in range(self.pomdp.n)]))) + \"\\n\\n\"\n\n result += \"GammaPrime:\\n%s\" % (str(np.array([[self.GammaPrime[j * self.pomdp.n + i] \\\n for j in range(self.pomdp.r)] \\\n for i in range(self.pomdp.n)]))) + \"\\n\\n\"\n\n result += \"pi:\\n%s\" % (str(np.array([self.pi[i] \\\n for i in range(self.pomdp.n)]))) + \"\\n\\n\"\n\n return result", "title": "" }, { "docid": "2a6624b7250a57eb7369ea59435cc229", "score": "0.5407308", "text": "def print_info(self):\n print('\\n======================= SLIDE ======================')\n print('|')\n for key, val in sorted(self.__dict__.items()):\n if 'list' in key:\n print('|\\t {}:\\n|\\t\\t\\tlength: {}'.format(key, len(val)))\n continue\n\n if type(val) is np.ndarray:\n print('|\\t {}:\\n|\\t\\t\\tshape: {}'.format(key, val.shape))\n continue\n\n if key == 'output_imgs':\n try:\n for vk, vv in val.items():\n print('|\\t {}:\\n|\\t\\t\\t{}: {}'.format(key, vk, vv.shape))\n except:\n print('|\\t {}:\\n|\\t\\t\\tlen: {}'.format(key, len(val)))\n continue\n\n print('|\\t {}:\\n|\\t\\t\\t{}'.format(key, val))\n print('|')\n print('======================= SLIDE ======================\\n')", "title": "" }, { "docid": "db186cf7814a48807d40363a64e0c287", "score": "0.5407048", "text": "def info(self):\n\n self._setup()\n\n print '----------------------'\n print '-- Model Name: %s' % self.modelname\n print '----------------------'\n print ''\n print 'URL: %s' % self.baseurl\n print ''\n print 'LATITUDE'\n print 'num: %d' % self.nlat\n print 'min: %f' % np.min(self.lat)\n print 'max: %f' % np.max(self.lat)\n print ''\n print 'LONGITUDE'\n print 'num: %d' % self.nlon\n print 'min: %f' % np.min(self.lon)\n print 'max: %f' % np.max(self.lon)\n print ''\n print 'TIME'\n print 'min: %s' % datetime.strftime(datetime.strptime(str(self.daterange[0]), '%Y%m%d'),'%Y-%m-%d')\n print 'max: %s' % datetime.strftime(datetime.strptime(str(self.daterange[1]), '%Y%m%d'),'%Y-%m-%d')", "title": "" }, { "docid": "95f98f939d0818b2ebab4bcb6594582e", "score": "0.5404648", "text": "def __str__(self):\n return '\\n'.join([' '.join([self.__MAP__[self.grid[(i, j)]] for j in range(self.n)]) for i in range(self.m)])", "title": "" }, { "docid": "ddb8d479a9034c3969d64fd96100dde7", "score": "0.5403992", "text": "def __repr__(self):\r\n\t\tstrBuff = \"- Best Particle Topology Statistics\\n\"\r\n\t\tfor k,v in self.internalDict.items():\r\n\t\t\tstrBuff += \"\\t%-45s = %s\\n\" % (self.descriptions.get(k,k), v)\r\n\t\treturn strBuff", "title": "" }, { "docid": "5a82bf5e06f083ebbeafa8810d6bc062", "score": "0.540317", "text": "def nice_string(self):\n out = \"\"\n for i in range(self.DIMENSION):\n for j in range(self.DIMENSION):\n out = out + \" \" + str(self.get_val(i, j)) + \" \"\n if j == 2 or j == 5:\n out = out + \"|\"\n if i == 2 or i == 5:\n out = out + \"\\n\" + \" - - - + - - - + - - -\"\n out = out + \"\\n\"\n return out", "title": "" }, { "docid": "c31e9f0b9eadd0a56ffe96c24cdbe3ed", "score": "0.5394514", "text": "def __str__(self):\n\n result = \"GammaInitial:\\n%s\" % (str(np.array([[self.GammaInitial[j * self.pomdp.n + i] \\\n for j in range(self.pomdp.r)] \\\n for i in range(self.pomdp.n)]))) + \"\\n\\n\"\n\n result += \"currentHorizon: %i\" % (self.currentHorizon) + \"\\n\\n\"\n result += \"numThreads: %i\" % (self.numThreads) + \"\\n\\n\"\n\n return result", "title": "" }, { "docid": "a387e2e0bf9fe607e28c23e4dc45eacc", "score": "0.53927654", "text": "def resolution(self):\n return self._res", "title": "" }, { "docid": "a8758072a048fb35e2635c6dab117f68", "score": "0.53923005", "text": "def __repr__(self):\n s = self.grid()\n s += \"robots = \" + str(self.robot_locs) + '\\n'\n s += \"stacks = \" + str(self.stack_locs) + '\\n'\n s += \"orders = \" + str(self.orders) + '\\n'\n return s", "title": "" }, { "docid": "e56f0145113607895d5ed1fc527f33fe", "score": "0.53908724", "text": "def __str__(self):\n info = ''\n for name in self.get('_map_names', sorted(self)):\n info += self[name].__str__()\n info += '\\n'\n return info", "title": "" }, { "docid": "f70940a3c94d12208e2602fc94b16e80", "score": "0.5387626", "text": "def get_run_info(spec='O3', res='0.25x0.3125', region='EU', fname='',\n scale=1, pcent=False, ClearFlo_unit=False):\n from AC.core import get_latlonalt4res\n from AC.variables import tra_unit, latex_spec_name\n\n # Set variables (e.g. res) or automation of variable setting here\n# res = get_run_descriptors()\n # Kludge set res manually\n res = '0.5x0.666'\n\n # Get lat and lon of GC grid for given resolution\n lon, lat, NIU = get_latlonalt4res(res=res)\n\n # Set units based on species name + get scaling\n if pcent:\n units = '%'\n else:\n units, scale = tra_unit(spec, ClearFlo_unit=ClearFlo_unit, scale=True)\n\n # Set filename from run detail\n fname += '{}_{}_{}_{}_{}.mp4'.format(region, res,\n spec, units, time.strftime(\"%y_%m_%d_%H_%M\"))\n\n # setup plot title\n title = 'Surface {} / {}'.format(latex_spec_name(spec), units)\n\n return lat, lon, units, fname, res, title, scale", "title": "" }, { "docid": "31d5a575c95de7eb78333c1e4267a401", "score": "0.5369136", "text": "def print_details(self):\n print(self.raster,\"\\r\")\n if not self.is_Window:\n print(\"{}\\nAttributes: {}\".format(self.raster.name,self.raster.count))\n print(\"\\nWidth: {}\\nHeight: {}\".format(self.raster.width, self.raster.height))", "title": "" }, { "docid": "8a90155aed00740ab4e9b301a16d5c3e", "score": "0.5364401", "text": "def __str__(self):\n # The game may assume this function returns a reasonable representation\n # of the board for printing, but may not assume details about it.\n returned = ''\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[i])):\n returned += self.matrix[i][j] + ' '\n if j == len(self.matrix[i]) - 1:\n returned += \"\\n\"\n break\n return returned", "title": "" }, { "docid": "c8d226fb61387c2ad59d3be2d136e047", "score": "0.53602356", "text": "def metadata_info():\n\tds_aia_lev1=Sdo_aia_dataset(\"http://medoc-sdo.ias.u-psud.fr/webs_aia_dataset\")\n\treturn ds_aia_lev1.display()", "title": "" }, { "docid": "27c7ec59d531d0aa3c58cd594c78369d", "score": "0.5357384", "text": "def get_resolution(self):\n return self.grid.resolution", "title": "" }, { "docid": "186bfe1c2c04408ff2437e1ec7a28c51", "score": "0.5355983", "text": "def show(self):\n\t\tfor row in self.wmatrix:\n\t\t\tfor col in row:\n\t\t\t\tprint(col)", "title": "" }, { "docid": "303be34354d565c0196a05ce37d50a64", "score": "0.53535753", "text": "def to_string(self): \n for i in self.puzzle_array:\n print(i)\n print(\"current blank is \" + str(self.blank) + \"\\ntotal number of moves is \" + str(self.move_number) + \"\\ncurrent missing tiles is \" + (str)(self.number_of_misplaced_tiles()) + \"\\nso cost is \" + str(self.cost))\n print(\"\")", "title": "" }, { "docid": "e733919d2c1059bef446d2a0d0e915c2", "score": "0.5351619", "text": "def map_detail(self):\n center = self.polygon.centroid\n return \"15/%f/%f\" % (center.y, center.x)", "title": "" }, { "docid": "fe4a4a6613b1dcc976caa6ea7b82d7de", "score": "0.5351287", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(l:%g\" % self.limit + \")\"", "title": "" }, { "docid": "fe4a4a6613b1dcc976caa6ea7b82d7de", "score": "0.5351287", "text": "def info(self) -> str:\n return self.__reg_name__ + \"(l:%g\" % self.limit + \")\"", "title": "" }, { "docid": "36c707f89cf1606c5a7fcc3d29eacb6e", "score": "0.5344974", "text": "def print_bump_info(self):\n\n # Print stuff\n print(\"BUMP WINDOW\")\n print(f\" loc = {self.min_loc_ar[0]}\")\n print(f\" width = {self.min_width_ar[0]}\")\n \n # Check if there ara multiple channels\n if not isinstance(self.min_Pval_ar[0], np.ndarray):\n # Print stuff for 1 channel\n print(f\" local p-value = {self.min_Pval_ar[0]:.5g}\")\n print(f\" -ln(loc p-value) = {self.t_ar[0]:.5f}\")\n print(f\" local significance = {norm.ppf(1 - self.min_Pval_ar[0]):.5f}\")\n else:\n # Print stuf for multiple channels\n print(\n \" local p-value (per channel) = [\",\n end=''\n )\n [\n print(f\"{self.min_Pval_ar[0][ch]:.5g} \",end='')\n for ch in range(len(self.min_Pval_ar[0]))\n ]\n print(\"]\")\n print(\n f\" local p-value (combined) = {self.min_Pval_ar[0].prod():.5g}\"\n )\n print(\n f\" -ln(loc p-value) (combined) = {self.t_ar[0]:.5f}\"\n )\n print(\n f\" local significance (combined) = {norm.ppf(1 - self.min_Pval_ar[0].prod()):.5f}\"\n )\n \n print(\"\")\n\n return", "title": "" }, { "docid": "95d0cbd29cb4f8242ea46f2bb07835ea", "score": "0.53423625", "text": "def get_params_info(cls):\n return dict(\n pi_res='phase interpolator resolution',\n lch='transistor channel length',\n wn='NMOS width',\n wp='PMOS width',\n ctrl_nf_inv0='ctrl array inv0 NMOS finger number',\n ctrl_nf_inv1='ctrl array inv1 NMOS finger number',\n clk_nf_inv='clock cell inv NMOS finger number',\n clk_nf_tinv0='clock cell tinv NMOS finger number',\n clk_nf_tinv1='clock cell tinv switch NMOS finger number',\n buf_nf_inv0='buffer inv0 NMOS finger number',\n buf_nf_inv1='buffer inv1 NMOS finger number',\n cload_nf='cload MOS finger number',\n intent='transistor threshold',\n ndum_side='dummy finger number at side',\n ndum='dummy finger number between cells',\n ptap_w='NMOS substrate width, in meters/number of fins.',\n ntap_w='PMOS substrate width, in meters/number of fins.',\n g_width_ntr='gate track width in number of tracks.',\n ds_width_ntr='source/drain track width in number of tracks.',\n show_pins='True to draw pins.',\n power_width_ntr='power width in number of tracks',\n )", "title": "" }, { "docid": "0ae5c475ec1a9597243730d37316fcb1", "score": "0.5327173", "text": "def get_info(self):\n description = type(self).__name__ + ': '\n description += 'First Stream: {} - '.format(type(self._input_stream).__name__)\n description += 'Drift Stream: {} - '.format(type(self._drift_stream).__name__)\n description += 'alpha: {} - '.format(self.alpha)\n description += 'position: {} - '.format(self.position)\n description += 'width: {} - '.format(self.width)\n return description", "title": "" }, { "docid": "c1e8162f24890d552d01506ce6cea52f", "score": "0.53249013", "text": "def get_info(self):\n return \"cluster {0:d}\\nredshift is {1:f}\".format(self.cluster_id, self.candidate.get_redshift(self.cluster_id))", "title": "" } ]
3dbd503e47042cfe17541567b30f6a37
Return visible windows belonging to a process.
[ { "docid": "74ac6ca4f6cc2566322cea394c8397ae", "score": "0.77159387", "text": "def find_windows_for_process(self, process_id, display_name):\n pids = self.get_process_ids(process_id)\n if not pids:\n return []\n\n logger.info(\n 'Waiting for 30 seconds to ensure all windows appear: '\n 'pid=%s, display=%s', pids, display_name)\n time.sleep(30)\n\n visible_windows = set()\n for pid in pids:\n _, windows = common.execute(\n 'xdotool', 'search --all --pid %s --onlyvisible --name \".*\"' % pid,\n '.', env={'DISPLAY': display_name}, exit_on_error=False,\n print_command=False, print_output=False, stdin=common.BlockStdin())\n for line in windows.splitlines():\n if not line.isdigit():\n continue\n visible_windows.add(line)\n\n logger.info('Found windows: %s', ', '.join(list(visible_windows)))\n return visible_windows", "title": "" } ]
[ { "docid": "3fd2e2789de871ce4471b156866f2db6", "score": "0.73314553", "text": "def get_hwnds(pid):\n\n def callback(hwnd, hwnds):\n if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):\n _, found_pid = win32process.GetWindowThreadProcessId(hwnd)\n if found_pid == pid:\n hwnds.append(hwnd)\n return True\n\n hwnd_list = []\n win32gui.EnumWindows(callback, hwnd_list)\n return hwnd_list", "title": "" }, { "docid": "7b18d4844969f3b0738fd818661a8966", "score": "0.64775866", "text": "def window__get_visibility_states(hwnd):\n placement = WINDOWPLACEMENT()\n placement.length = c_sizeof(placement)\n val = windll.user32.GetWindowPlacement(hwnd, byref(placement))\n if val is None:\n return []\n show_cmd = placement.showCmd\n if show_cmd == SW_HIDE:\n return ['hidden']\n if show_cmd == SW_SHOWNORMAL:\n return ['shown', 'restored', 'active']\n if show_cmd == SW_SHOWMINIMIZED:\n return ['shown', 'minimized', 'active']\n if show_cmd == SW_MAXIMIZE:\n return ['shown', 'maximized', 'active']\n if show_cmd == SW_SHOWMAXIMIZED:\n return ['shown', 'maximized', 'active']\n if show_cmd == SW_SHOWNOACTIVATE:\n return ['shown', 'restored']\n if show_cmd == SW_SHOW:\n return ['shown', 'restored', 'active']\n if show_cmd == SW_MINIMIZE:\n return ['shown', 'minimized', 'active']\n if show_cmd == SW_SHOWMINNOACTIVE:\n return ['shown', 'minimized']\n if show_cmd == SW_SHOWNA:\n return ['shown', 'restored']\n if show_cmd == SW_RESTORE:\n return ['shown', 'restored', 'active']\n return []", "title": "" }, { "docid": "6d0f5f09e1e28cd4143e121abe9a6c9f", "score": "0.64162284", "text": "def getWindows(self):\n\n def callback(hWnd, windows):\n if not self.isRealWindow(hWnd):\n return\n text = win32gui.GetWindowText(hWnd)\n windows.append((hWnd, text))\n\n windows = []\n win32gui.EnumWindows(callback, windows)\n return windows", "title": "" }, { "docid": "aef5fa7e9469914f16d291df916de71a", "score": "0.62866724", "text": "def getWindows(self):\n window_list = CGWindowListCopyWindowInfo(\n kCGWindowListExcludeDesktopElements | kCGWindowListOptionOnScreenOnly,\n kCGNullWindowID,\n )\n\n return [\n (win.valueForKey_(\"kCGWindowNumber\"), win.valueForKey_(\"kCGWindowName\"))\n for win in window_list\n if win.valueForKey_(\"kCGWindowSharingState\")\n and win.valueForKey_(\"kCGWindowName\")\n ]", "title": "" }, { "docid": "3497ec5f1e524b08562eb98c184bd18f", "score": "0.60375625", "text": "def visible_views():\n window = sublime.active_window()\n\n # Priority for the active view\n active_view = window.active_view()\n yield active_view\n\n num_groups = window.num_groups()\n for group_id in range(num_groups):\n view = window.active_view_in_group(group_id)\n if view != active_view:\n yield view", "title": "" }, { "docid": "523db8d51ef90a47a0b260d49f22dda4", "score": "0.59456027", "text": "def get_windows(ws):\n windows = []\n for w in ws.descendants():\n if w.window and w.window_class != \"i3bar\":\n windows.append(w)\n return windows", "title": "" }, { "docid": "194aded8a9ba50219a2005efcf6d73f0", "score": "0.5920721", "text": "def window__find_handles():\n ret = []\n\n def callback(hwnd):\n ret.append(hwnd)\n # return True to continue the enumeration\n return True\n\n window__enum_window_handles(callback)\n\n return ret", "title": "" }, { "docid": "e7031b5490e3f07ec57d9f04e6cbddb2", "score": "0.58432233", "text": "def _get_window_list(self):\n window_list = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements, Quartz.kCGNullWindowID)\n return window_list", "title": "" }, { "docid": "770f65f2e56ba5d75ab86b0e43f4b70c", "score": "0.5742749", "text": "async def window_handles(self):\n if self.w3c:\n return (await self.execute(Command.W3C_GET_WINDOW_HANDLES))['value']\n else:\n return (await self.execute(Command.GET_WINDOW_HANDLES))['value']", "title": "" }, { "docid": "dc8b68fa391e278e6bcfbebc14f5a62e", "score": "0.57181937", "text": "def __iter__(self) -> Iterator[Window]:\n win_ids = self._call(\"WinGetList\", *self._query())\n if win_ids is None:\n return\n for win_id in win_ids.values():\n if win_id > 0:\n yield Window(win_id)", "title": "" }, { "docid": "cc6329cb7ea6c05250df63b54ddedd87", "score": "0.56293464", "text": "def Windows(self):\n return self._window_setting", "title": "" }, { "docid": "fa122a18741aeb0c7627ae6b0bd0332c", "score": "0.5596343", "text": "def get_windows():\n # TODO: How to support tuples in the request string?\n window_type = request.args.get('type')\n if window_type is None:\n return jsonify(dsp_helpers.get_window(\"all\"))\n\n else:\n nx = request.args.get('nx', 100)\n return jsonify(dsp_helpers.get_window(window_type))\n\n #TODO: Implement this function to return a list of all windows\n\n return False", "title": "" }, { "docid": "08449e943136bfb0dd782cf673a4782b", "score": "0.5574944", "text": "def _get_windows(self):\n windows = []\n for i in range(len(self.phases)):\n windows_tmp = WindowMaker.windows_from_dataset(\n self.dataset, 'ak135', [self.phases[i]],\n [self.components[i]],\n t_before=self.t_before, t_after=self.t_after)\n windows += windows_tmp\n windows = [\n w for w in windows\n if\n self.distance_min <= w.get_epicentral_distance()\n <= self.distance_max]\n return windows", "title": "" }, { "docid": "870a26bc8ae0e7328f0c13149247a37e", "score": "0.5521315", "text": "def list(cls):\n\t\tout = subprocess.check_output(['wmctrl','-l','-G','-p','-x'])\n\t\twindows = []\n\t\tfor line in out.splitlines():\n\t\t\tparts = line.split(None, len(cls._fields)-1)\n\t\t\tparts = [p.strip() for p in parts]\n\t\t\tparts[1:7] = [int(p) for p in parts[1:7]]\n\t\t\tif len(parts) == 9: #Title field is missing\n\t\t\t\tparts.append('')\n\t\t\twindows.append(cls(*parts))\n\t\treturn windows", "title": "" }, { "docid": "4eef15af438c4b52b091a391bc0caaf3", "score": "0.54752564", "text": "def getWindowByPID(self, pid, order=0):\n for w in self._get_window_list():\n if \"kCGWindowOwnerPID\" in w and w[\"kCGWindowOwnerPID\"] == pid:\n print(self.getWindowRect(w[\"kCGWindowNumber\"]))\n # Matches - make sure we get it in the correct order\n if order == 0:\n return w[\"kCGWindowNumber\"]\n else:\n order -= 1\n return None", "title": "" }, { "docid": "e92bdaebb7c676604e97c2f1da2b70b6", "score": "0.547475", "text": "def online_service_window_visible(self, visible=True, time=5):\n sym_names_list = list()\n sym_names_list.append(self.sym_names.homepage_Window)\n sym_names_list.append(self.sym_names.homepage_KPVersion_label)\n sym_names_list.append(self.sym_names.homepage_danfosslink_Label)\n return self.Preferenceswindow.Preferences_window_is_visible(sym_names_list, visible)", "title": "" }, { "docid": "0b0bdb87317cd447585e9c5899c4f0e6", "score": "0.5461032", "text": "def get_window_info(self):\n return self.nvim.call('rnvimr#rpc#get_window_info')", "title": "" }, { "docid": "6c29e4d3171883815f9549ab4c695df7", "score": "0.5410186", "text": "def getOpenedWindows():\n # type: () -> Tuple[FPMIWindow, ...]\n return FPMIWindow(\"Main Window\"), FPMIWindow(\"Other Window\")", "title": "" }, { "docid": "1c7bea518655e43ad838aabc8695471f", "score": "0.53967226", "text": "def get_visible_children(self):\n return [c for c in self._children if c.get_visible()]", "title": "" }, { "docid": "0ceadfae83d16206f7dc2e89cac21926", "score": "0.53625643", "text": "def get_titles() -> list:\n enum_windows = ctypes.windll.user32.EnumWindows\n enum_windows_proc = ctypes.WINFUNCTYPE(ctypes.c_bool,\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int))\n get_window_text = ctypes.windll.user32.GetWindowTextW\n get_window_text_length = ctypes.windll.user32.GetWindowTextLengthW\n is_window_visible = ctypes.windll.user32.IsWindowVisible\n\n titles = []\n\n def foreach_window(hwnd, _l_param):\n if is_window_visible(hwnd):\n length = get_window_text_length(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n get_window_text(hwnd, buff, length + 1)\n titles.append(buff.value)\n return True\n\n enum_windows(enum_windows_proc(foreach_window), 0)\n\n return titles", "title": "" }, { "docid": "2330abc3ffcc3060b73751d87b4267a0", "score": "0.5343851", "text": "def findWindow(path):\n # type: (String) -> List[FPMIWindow]\n print(path)\n return [FPMIWindow(\"Window\")]", "title": "" }, { "docid": "4fd6522192764bce663b2b3375fc51bf", "score": "0.52795994", "text": "def visible(self):\n return golly.visrect( [self.x, self.y, self.wd, self.ht] )", "title": "" }, { "docid": "0357daa47f47f33579610bc57094514b", "score": "0.52168417", "text": "def get_visible(self, id):\r\n return self.visibilities[id]", "title": "" }, { "docid": "a6c575443c281867c0cb543613bac759", "score": "0.5209724", "text": "def getSpectrumWindowList(self):\n windows = {}\n if self.params.peakList:\n views = getSpectrumViews(self.params.peakList.dataSource)\n for view in views:\n windows[view.spectrumWindowPane.spectrumWindow] = None\n\n return [[w.name, w] for w in windows.keys()]", "title": "" }, { "docid": "ddfcd98dc1c797de9d25f8a31d197559", "score": "0.52073294", "text": "def getMinWindow(self):\n return list(self.minWin)", "title": "" }, { "docid": "fb1f4e79b6d1f48661b70e29618fc955", "score": "0.51824015", "text": "def find_process(self, processName=None):\n system = winappdbg.System()\n for process in system:\n if process.get_filename() is not None:\n name = process.get_filename().split(\"\\\\\")[-1]\n if processName is None:\n self.running.append((name, process.get_pid()))\n else:\n if name == processName:\n self.hwnd = process\n break;", "title": "" }, { "docid": "2301793026a135b0344d680c3feaa38a", "score": "0.51604354", "text": "def visibles(self) -> dict[Pos, set[Pos]]:\n\n def visible_from(pos_0: Pos, vector: tuple[int, int]) -> Pos | None:\n d_x, d_y = vector\n x_0, y_0 = pos_0\n x, y = x_0 + d_x, y_0 + d_y\n while (x, y) in self.bounds:\n if (x, y) in self.tiles:\n return x, y\n x, y = x + d_x, y + d_y\n else:\n return None\n\n visibles = defaultdict(set)\n for pos in self.tiles.keys():\n for direction in [(+1, 0), (0, +1), (+1, +1), (-1, +1)]:\n visible_pos = visible_from(pos, direction)\n if visible_pos:\n visibles[pos].add(visible_pos)\n visibles[visible_pos].add(pos)\n\n return dict(visibles)", "title": "" }, { "docid": "5cfdc7754b854a14a61bd9f659d0d880", "score": "0.51215446", "text": "def get_visible(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "d583df321d1e96d4fe89aa5541040aa6", "score": "0.51065975", "text": "def getVisibleObjects(self, table):\n visible = []\n for viewRow in xrange(table.rowCount):\n modelRow = table.convertRowIndexToModel(viewRow)\n visible.append(self[modelRow])\n return visible", "title": "" }, { "docid": "5c1ec6450a3ab1f21372d61c7b83cd85", "score": "0.5104462", "text": "def _get_visible(self):\n return self._visible", "title": "" }, { "docid": "5c1ec6450a3ab1f21372d61c7b83cd85", "score": "0.5104462", "text": "def _get_visible(self):\n return self._visible", "title": "" }, { "docid": "73fd8a2bebc9f22266a5550a78b7eec1", "score": "0.5096725", "text": "def get_visible_vertices(cam, obj, ignore_occlusion=False, perc_z_eps=1e-6, hide=None):\n logger_name = thisfile + '->get_visible_vertices()'\n\n scene = bpy.context.scene\n w, h = scene.render.resolution_x, scene.render.resolution_y\n scale = scene.render.resolution_percentage / 100.\n\n # Get camera matrix\n cam_mat, _, ext_mat = get_camera_matrix(cam)\n\n # Get z-buffer\n if not ignore_occlusion:\n zbuffer = get_camera_zbuffer(cam, hide=hide)\n\n # Get mesh data from object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n visible_vert_ind = []\n # For each of its vertices\n for bv in bm.verts:\n\n # Check if its projection falls inside frame\n v_world = obj.matrix_world * bv.co # local to world\n uv = np.array(cam_mat * v_world) # project to 2D\n uv = uv[:-1] / uv[-1]\n if uv[0] >= 0 and uv[0] < w * scale and uv[1] >= 0 and uv[1] < h * scale:\n # Yes\n\n if ignore_occlusion:\n # Considered visible already\n visible_vert_ind.append(bv.index)\n else:\n # Proceed to check occlusion with z-buffer\n v_cv = ext_mat * v_world # world to camera to CV\n z = v_cv[-1]\n z_min = zbuffer[int(uv[1]), int(uv[0])]\n if (z - z_min) / z_min < perc_z_eps:\n visible_vert_ind.append(bv.index)\n\n logger.name = logger_name\n logger.info(\"Visibility test done with camera '%s'\", cam.name)\n logger.warning(\"... using w = %d; h = %d\", w * scale, h * scale)\n\n return visible_vert_ind", "title": "" }, { "docid": "0c03f18d61deb0c68b765730a9dd14be", "score": "0.50846976", "text": "def prj_window_check_controller_visible(self,visible=True):\n return self.project_win.prj_window_check_controller_visible(self.sym_names.online_controller_variant_Product_Image,visible)", "title": "" }, { "docid": "23dc4a9f3188afab89714edb3aef9384", "score": "0.506798", "text": "def get_visibility_from_video(self, video_ind):\n instance_id = self.data_infos[video_ind]\n img_ids = self.coco.instancesToImgs[instance_id]\n visible = []\n for img_id in img_ids:\n for ann in self.coco.imgToAnns[img_id]:\n if ann['instance_id'] == instance_id:\n visible.append(not ann.get('occluded', False))\n visible_info = dict(visible=np.array(visible, dtype=np.bool_))\n return visible_info", "title": "" }, { "docid": "f5110528495883a1ec14262be5c802a1", "score": "0.5067343", "text": "def visible(self):\n return self.GetVisible()", "title": "" }, { "docid": "f5110528495883a1ec14262be5c802a1", "score": "0.5067343", "text": "def visible(self):\n return self.GetVisible()", "title": "" }, { "docid": "cf11e2714323ae917ce0fcafa0b672c4", "score": "0.5065576", "text": "def get_visible_objects(self) -> List[int]:\n\n # Try to get unique colors with a reasonable number of max colors.\n # If the number of colors exceeds this, `getcolors()` returns None.\n for max_colors in [256, 512, 1024]:\n try:\n colors = [c[1] for c in self.state.get_pil_images()[\"id\"].getcolors(maxcolors=max_colors)]\n return [o for o in self.objects_static if self.objects_static[o].segmentation_color in colors]\n except TypeError:\n continue\n # This should never happen, but it's better to prevent the build rom crashing.\n return []", "title": "" }, { "docid": "f6b86dc0975bbee4341a038ce09083a3", "score": "0.50613064", "text": "def about_window_is_visible(self, visible=True):\n sym_names_list = list()\n sym_names_list.append(self.sym_names.about_Window)\n sym_names_list.append(self.sym_names.about_Window_Title_Label)\n sym_names_list.append(self.sym_names.about_Window_cancel_Button)\n sym_names_list.append(self.sym_names.about_Window_KoolProg_Label)\n sym_names_list.append(self.sym_names.about_Window_version_Label)\n sym_names_list.append(self.sym_names.about_Window_rights_Label)\n sym_names_list.append(self.sym_names.about_Window_Close_Button)\n self.CommonFunction.about_window_is_visible(sym_names_list, visible)", "title": "" }, { "docid": "411396b39b6ab8e8c847e8f402de3281", "score": "0.5061104", "text": "def get_View(self):\n view = [i for (i, c) in enumerate(self.controls) if c.Shown]\n return view", "title": "" }, { "docid": "15a55c2a3e2d2858b821b0fdd9ccf0b5", "score": "0.50586617", "text": "def get_windows():\n main = crs.initscr() # For the bulk of output.\n main.resize(crs.LINES - 3, crs.COLS)\n inbar = crs.newwin(1, crs.COLS, crs.LINES - 1, 0) # For user input.\n infobar = crs.newwin(1, crs.COLS, crs.LINES - 2, 0) # For 'now playing'.\n outbar = crs.newwin(1, crs.COLS, crs.LINES - 3, 0) # For notices.\n return main, inbar, infobar, outbar", "title": "" }, { "docid": "779ec9ca55632847a1e2cf92e8d163af", "score": "0.5057016", "text": "def Visible(self):\n self.link._check_connection()\n command = 'G_Visible'\n self.link._send_line(command)\n self.link._send_item(self)\n visible = self.link._rec_int()\n self.link._check_status()\n return visible", "title": "" }, { "docid": "d8ca4be1df0e6c94aca7a8b34602609c", "score": "0.5056472", "text": "def get_View(self):\n view = [i for (i,c) in enumerate(self.controls) if c.Shown]\n return view", "title": "" }, { "docid": "faf03002939d387929fbf44aa1b63ad2", "score": "0.50544447", "text": "def GetVisible(self) -> bool:\n ...", "title": "" }, { "docid": "319e3f1fc9653d44c5ea32ee84b4f944", "score": "0.50531626", "text": "def visiblePagesAt(self, rect):\n if not self.opaquePages:\n for p in self.pages:\n yield p, rect & p.geometry()\n else:\n covered = QRegion()\n for p in self.pages:\n overlayrect = rect & p.geometry()\n if not overlayrect or not QRegion(overlayrect).subtracted(covered):\n continue # skip if this part is hidden below the other\n covered += overlayrect\n yield p, overlayrect\n if not QRegion(rect).subtracted(covered):\n break", "title": "" }, { "docid": "274806b47d6b3e6635853f401ddcb05e", "score": "0.50132114", "text": "def chartWindow_check_grid_is_visible_in_graph_view(self, visible=True):\n return self.chartWindow.chartWindow_check_grid_is_visible_in_graph_view(self.sym_names.UIGraph_WindowsFormsAccessibleObject_Grid, visible)", "title": "" }, { "docid": "92bb59ce04aa06999abaf8e13bbfbaca", "score": "0.5007815", "text": "def get_all_window_ids(self):\r\n return self.get_string_array(\"getAllWindowIds\", [])", "title": "" }, { "docid": "fa5648a7f61b0638958998d9b2295773", "score": "0.50044334", "text": "def get_all_window_names(self):\r\n return self.get_string_array(\"getAllWindowNames\", [])", "title": "" }, { "docid": "ee5a26b7f3387aefdee6aac8f4683c59", "score": "0.50019157", "text": "def Window_Dialog_PopUp_visible(self, visible=True):\n sym_list = list()\n sym_list.append(self.sym_names.Window_Dialog)\n return self.WindowDialogPopUp.Window_Dialog_PopUp_visible(sym_list, visible)", "title": "" }, { "docid": "e1c8b47111908bf1d158c6ac124726d7", "score": "0.49918827", "text": "def online_service_window_check_parameters_tab_is_visible(self, visible=True):\n return self.OnlineServiceWindow.online_service_window_check_parameters_tab_is_visible(self.sym_names.koolProg_Parameters_TabItem, visible)", "title": "" }, { "docid": "da28da2613fc0c8cb8827433f5d58352", "score": "0.49877462", "text": "def get_xvnc_process_info():\n\n # Configure logging\n log = logging.getLogger(\"get_xvnc_process_info\")\n\n # Get a list of all the Xvnc processes running\n log.debug(\"Querying list of Xvnc processes from OS...\")\n try:\n process_list = subprocess.check_output([\"ps\", \"--no-header\", \"-ww\", \"-C\", \"Xvnc\", \"-o\", \"user=WIDE-USER-COLUMN,pid,args\"])\n except subprocess.CalledProcessError:\n # If 'ps' returns nothing, it sets the returncode to '1', which causes the\n # CalledProcessError exception to be thrown. In this case, it is ok for\n # there to be no Xvnc processes running and thus nothing for ps to return.\n process_list = b\"\"\n process_list = process_list.decode('utf8')\n process_list = process_list.splitlines()\n\n # Parse out the server information from the listing returned\n active_sessions = []\n log.debug(\"Parsing Xvnc process list...\")\n for process in process_list:\n matches = re.search(r\"^(?P<username>(\\w|-)+)\\s+(?P<pid>\\d+)\\s+(?P<exe>[\\w/]+)\\s+(?P<args>.+$)\",\n process)\n username, pid, exe, args = matches.group(\"username\", \"pid\", \"exe\", \"args\")\n\n # Get the display number from the args\n match = re.search(r\"^:(?P<display_number>\\d+)\", args)\n if match is None:\n display_number = 0\n else:\n display_number = match.group(\"display_number\")\n\n # Get the display name from the args\n match = re.search(r\"-desktop\\s+(?P<display_name>[ a-zA-Z0-9/\\-|.:()]+?)\\s+(-|$)\", args)\n if match is None:\n display_name = \"{0}:{1}\".format(username, display_number)\n else:\n display_name = match.group(\"display_name\")\n\n # Get the geometry from the args\n match = re.search(r\"-geometry\\s+(?P<geometry>[ 0-9x]+?)\\s+(-|$)\", args)\n if match is None:\n geometry = \"Unknown\"\n else:\n geometry = match.group(\"geometry\")\n\n # Get the pixelformat from the args, which if not present, can be inferred\n # from the depth parameter if it is present in the args\n # :NOTE: Consult the Xvnc man page for details on the defaults for\n # pixelformat and depth.\n match = re.search(r\"-pixelformat\\s+(?P<pixelformat>[a-zA-Z0-9]+?)\\s+(-|$)\", args)\n if match is None:\n match = re.search(r\"-depth\\s+(?P<depth>[0-9]+?)\\s+(-|$)\", args)\n if match is None:\n # Assume default depth of 24 and default pixelformat of RGB888\n pixelformat = \"RGB888\"\n else:\n depth = match.group(\"depth\")\n if depth == \"8\":\n pixelformat = \"BGR233\"\n elif depth == \"15\":\n # :NOTE: The Xvnc man page doesn't give a default for 15bpp\n pixelformat = \"Unknown\"\n elif depth == \"16\":\n pixelformat = \"RGB565\"\n elif depth == \"24\":\n pixelformat = \"RGB888\"\n else:\n pixelformat = \"Unknown\"\n else:\n pixelformat = match.group(\"pixelformat\").upper()\n\n # Add the session to the list of sessions to return to the client.\n new_session_info = {}\n new_session_info[\"username\"] = username\n new_session_info[\"pid\"] = pid\n new_session_info[\"display_number\"] = display_number\n new_session_info[\"display_name\"] = display_name\n new_session_info[\"geometry\"] = geometry\n new_session_info[\"pixelformat\"] = pixelformat\n\n active_sessions.append(new_session_info)\n\n log.debug(\"Found {0} Xvnc servers running.\".format(len(active_sessions)))\n return active_sessions", "title": "" }, { "docid": "414131c422a85d981f43ebfebf11624c", "score": "0.49805808", "text": "def ListUsedProcessIds(self):\n return [urn.Basename() for urn in self.ListChildren()]", "title": "" }, { "docid": "98b4516ecc3871c0f7e8767be2f7b9ef", "score": "0.49706668", "text": "def get_visible_params(self):\n return [param for param in self.parms.values() if not param.invisible]", "title": "" }, { "docid": "7b5dddc945566ed0bdcd2855fa8f842b", "score": "0.49676117", "text": "def _get_parents_from_windows(self, timepoint, windows):\n return [\n timepoint2 for timepoint2 in windows\n if timepoint2 != timepoint and timepoint in windows[timepoint2]\n ]", "title": "" }, { "docid": "83dc7661dd49e22c8a560a9f5808f69a", "score": "0.49561572", "text": "def include_hidden_windows(self, include=True):\n return dc.replace(self, hidden_windows=include)", "title": "" }, { "docid": "11eb4e6caf6431fcbead201513f26138", "score": "0.49518687", "text": "def getVisibleEntries (self):\n viewHeight = cmds.flowLayout(self.entryFlow, query=True, height=True)\n ix = self.getScrollIndex()\n visVals = self.entryVals[ix:]\n keyPairs = zip(self.settings[\"selectionKeys\"], visVals)\n heights = 0\n n = 0\n results = []\n for k, v in keyPairs:\n entry = self.entries[v]\n h = cmds.text(entry[\"ui\"], query=True, height=True)\n if heights + h > viewHeight:\n break\n results.append((k, v, entry))\n heights = heights + h\n n += 1\n return results", "title": "" }, { "docid": "0962fe0a0e4ab01603ba3bc1e6b047a0", "score": "0.4947565", "text": "def check_chartWindow_is_visible(self, visible=True):\n sym_list = list()\n sym_list.append(self.sym_names.uI_ChartWindow)\n sym_list.append(self.sym_names.uIChart_btnStop_Button)\n sym_list.append(self.sym_names.uIChart_logInterval_Label)\n sym_list.append(self.sym_names.uIChart_LogPeriod_Label)\n sym_list.append(self.sym_names.uIChart_logInterval_ComboBox)\n sym_list.append(self.sym_names.uIChart_LogPeriod_ComboBox)\n return self.chartWindow.check_chartWindow_is_visible(sym_list,visible)", "title": "" }, { "docid": "6173adba0b9a3ae4480109002b4344eb", "score": "0.49474207", "text": "def viewmore_window_visible(self, visible=True):\n sym_names_list = list()\n sym_names_list.append(self.sym_names.viewmore_Window)\n sym_names_list.append(self.sym_names.viewmore_Window_WindowTitle_Label)\n sym_names_list.append(self.sym_names.viewmore_Window_Maximize_Button)\n sym_names_list.append(self.sym_names.viewmore_Window_Close_X_Button)\n sym_names_list.append(self.sym_names.viewmore_Window_Close_Button)\n self.CommonFunction.viewmore_window_visible(sym_names_list, visible)", "title": "" }, { "docid": "c415c0b7627b036c03dc19946066486d", "score": "0.4923091", "text": "def Preferences_window_is_visible(self, visible=True):\n sym_names_list = list()\n sym_names_list.append(self.sym_names.preference_Window)\n sym_names_list.append(self.sym_names.preference_Window_Title_Label)\n sym_names_list.append(self.sym_names.preference_Window_close_Button)\n sym_names_list.append(self.sym_names.preference_Window_Languages_Label)\n sym_names_list.append(self.sym_names.preference_Window_Languages_ComboBox)\n sym_names_list.append(self.sym_names.preference_Window_SaveFileson_Label)\n sym_names_list.append(self.sym_names.preference_Window_FilePath_Edit)\n sym_names_list.append(self.sym_names.preference_Window_Unit_Label)\n sym_names_list.append(self.sym_names.preference_Window_Unitconverter_ComboBox)\n sym_names_list.append(self.sym_names.preference_Window_Baudrate_Label)\n sym_names_list.append(self.sym_names.preference_Window_Baudrate_ComboBox)\n sym_names_list.append(self.sym_names.preference_Window_PasswordEnable_CheckBox)\n sym_names_list.append(self.sym_names.preference_Window_btnBrowse_Button)\n sym_names_list.append(self.sym_names.preference_Window_Save_Button)\n sym_names_list.append(self.sym_names.preference_Window_Cancel_Button)\n return self.Preferenceswindow.Preferences_window_is_visible(sym_names_list, visible)", "title": "" }, { "docid": "ee976bbd0ae875af4e92fc4eb1568ba9", "score": "0.49219924", "text": "def visibilities(self):\n return self._visibilities", "title": "" }, { "docid": "2319a6771a095677410b55efbf2edcce", "score": "0.49000725", "text": "def is_visible(self):\n style = self.style\n if style is None:\n return False\n return WindowStyle.VISIBLE in style", "title": "" }, { "docid": "e2c0a06732f85e556ecde0d2f598b9fe", "score": "0.48987395", "text": "def _get_window_geometry(display, windowname):\n cmd = [XWININFO, '-d', display, '-name', windowname]\n xwininfo = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = xwininfo.communicate()\n retcode = xwininfo.wait()\n if retcode != 0:\n print \"Error finding console xwindow info: \" + err\n raise UninitializedWindowError\n\n width = None\n height = None\n mapped = False\n for line in out.splitlines():\n line = line.strip()\n if line.startswith(\"Map State:\"):\n if line.split()[-1] != \"IsViewable\":\n # Window is not mapped yet.\n raise UnmappedWindowError\n else:\n mapped = True\n if line.startswith(\"Width:\"):\n width = int(line.split()[1])\n elif line.startswith(\"Height:\"):\n height = int(line.split()[1])\n if width and height and mapped:\n return [width, height]\n else:\n # What, no width and height???\n print \"No window geometry info returned by \" + XWINFINFO\n raise UnmappedWindowError", "title": "" }, { "docid": "7795a35480a7fa3837cbfc9b65e850ab", "score": "0.48937517", "text": "def search_windows(img, windows, clf, scaler, color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, \n hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, \n hog_channel=0, spatial_feat=True, \n hist_feat=True, hog_feat=True):\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n return on_windows", "title": "" }, { "docid": "e9cf1d85dd9399ce21e48f2bd8b8c0b6", "score": "0.48789158", "text": "def precompute_windows(img, win_size):\n rows, cols = img.shape\n windows = []\n for i in range(rows):\n curr_windows = []\n for j in range(cols):\n curr_windows.append(get_window(img, (i, j), win_size))\n windows.append(curr_windows)\n return np.array(windows)", "title": "" }, { "docid": "b5f74f4692b4c0fdbfc049251ccb4935", "score": "0.4853628", "text": "def getWindowRect(self, hwnd):\n for w in self._get_window_list():\n if \"kCGWindowNumber\" in w and w[\"kCGWindowNumber\"] == hwnd:\n x = w[\"kCGWindowBounds\"][\"X\"]\n y = w[\"kCGWindowBounds\"][\"Y\"]\n width = w[\"kCGWindowBounds\"][\"Width\"]\n height = w[\"kCGWindowBounds\"][\"Height\"]\n return (x, y, width, height)\n return None", "title": "" }, { "docid": "836b81a5a862350e7464c65fd672837d", "score": "0.48487994", "text": "def isVisible():", "title": "" }, { "docid": "b79276903762a03e765b9a80c011e88b", "score": "0.48484072", "text": "def get_View(self):\n return [c.title for c in self.controls if c.Shown]", "title": "" }, { "docid": "e416ade7921c5d8e1214c41dccac9cfa", "score": "0.48470485", "text": "def print_visible_map(self):\n for y in range(libtcod.map_get_height(self.dungeon_map)):\n line = \"\"\n for x in range(libtcod.map_get_width(self.dungeon_map)):\n if libtcod.map_is_in_fov(self.dungeon_map, x, y):\n line += \" \"\n else:\n line += \"#\"\n print(line)", "title": "" }, { "docid": "9c88f1e75ea810e63a64b4b32de72146", "score": "0.48445085", "text": "def get_processes_not_responsive(self):\n tasks = check_output('tasklist /fi \"status eq not responding\"').decode('cp866', 'ignore').split(\"\\r\\n\")\n p = []\n for task in tasks:\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\n if m is not None:\n p.append({'name': m.group(1).decode(),\n 'pid': int(m.group(2).decode()),\n 'session_name': m.group(3).decode(),\n 'session_num': int(m.group(4).decode()),\n 'mem_usage': int(m.group(5).decode('ascii', 'ignore').replace(',', ''))\n })\n return(p)", "title": "" }, { "docid": "98b307b3bb3b725c87f7e242f5fde62e", "score": "0.48373184", "text": "def create_pass_through_win_key_set(pass_through_win_key: bool) -> Iterable[int]:\n return get_modifier_vk_keys(pass_through_win_key)", "title": "" }, { "docid": "699b9667794b77a979831c3724599b52", "score": "0.48358354", "text": "def visible(self):\n return self.filter(lambda plugin_ep: not plugin_ep.hidden)", "title": "" }, { "docid": "dddd0c27e55a347d8162c9c4e8c4c2e6", "score": "0.48303482", "text": "def grid(self):\n return self.GetGridVisible()", "title": "" }, { "docid": "f74cb4c57b633893bcb8902a00371441", "score": "0.4823401", "text": "def get_pids(self, process):\n pids = run_shell_command('pidof ' + process, quiet_mode=True)\n if not pids:\n return None\n return pids.strip().split(' ')", "title": "" }, { "docid": "a36ca1dbb4823c64ea6ad57fa27eb3e1", "score": "0.48174146", "text": "def project_window_get_tree_view_list(self):\n return self.project_win.project_window_get_tree_view_list(self.sym_names.windowhomepage_treeViewParameters_Tree)", "title": "" }, { "docid": "5905234edf4dae254bf8c3d19270672f", "score": "0.48173872", "text": "def get_window(self):\n return scipy.signal.get_window(\"boxcar\", self.num_points)", "title": "" }, { "docid": "e034f02049d155e121c4a140be96fb85", "score": "0.48149422", "text": "def get_processes_by_name(process_name: str) -> Iterable[psutil.Process]:\n return [p for p in psutil.process_iter() if p.name() == process_name]", "title": "" }, { "docid": "344f24512a53de2d0ce720aebaef3cfd", "score": "0.48134717", "text": "def get_all_window_titles(self):\r\n return self.get_string_array(\"getAllWindowTitles\", [])", "title": "" }, { "docid": "27d5c26e95fc3eb8e913c8fe46801de8", "score": "0.48133495", "text": "def _filter_windows(window_counts, bg_depth, dupl_hierarchy, min_windows, perc_samples):\n if not window_counts:\n return\n window_size = bg_depth.window_size\n neighbours = bg_depth.neighbours\n neighbours_dist = bg_depth.neighbours_dist\n\n n_samples = len(window_counts[0])\n max_failed_count = n_samples * 0.01 * perc_samples\n windows = dupl_hierarchy.windows\n n_windows = len(windows)\n\n for window, window_counts_row in zip(windows, window_counts):\n if not bg_depth.gc_content_defined(window.gc_content):\n window.in_hmm = False\n continue\n\n failed_count = n_samples - sum(map(bg_depth.window_passes, window_counts_row))\n if failed_count <= max_failed_count:\n continue\n\n window.in_hmm = False\n window_ix = window.ix\n for oth_ix in range(max(0, window_ix - neighbours), min(n_windows, window_ix + neighbours + 1)):\n if oth_ix != window_ix and window.region1.distance(windows[oth_ix].region1) < neighbours_dist:\n windows[oth_ix].in_hmm = False\n\n for region_group in dupl_hierarchy.region_groups:\n group_windows = [windows[i] for i in region_group.window_ixs]\n n_windows = sum(map(operator.attrgetter('in_hmm'), group_windows))\n if n_windows < min_windows:\n for window in group_windows:\n window.in_hmm = False", "title": "" }, { "docid": "786a204024981436d4ce253b20744d03", "score": "0.47953093", "text": "def help_window_visible(self, visible=True):\n sym_names_list = list()\n sym_names_list.append(self.sym_names.help_Window)\n sym_names_list.append(self.sym_names.help_Window_Close_X_Button)\n sym_names_list.append(self.sym_names.help_Window_Close_Button)\n sym_names_list.append(self.sym_names.help_Window_Maximize_Button)\n sym_names_list.append(self.sym_names.help_Window_WindowTitle_Label)\n self.CommonFunction.help_window_visible(sym_names_list, visible)", "title": "" }, { "docid": "32bdd027f88fc2ece64fa2b3e344feae", "score": "0.47903547", "text": "def guest_nmap_tcp_window_scan(initiator, target, version_detect=True, port_start=None, port_stop=None,\r\n random_order=True, no_ping=False):\r\n target = Nmap.__format_host_list(target)\r\n return initiator.shellExec(\r\n Nmap._nmap_tcp_window_scan(target, version_detect, port_start, port_stop, random_order, no_ping))", "title": "" }, { "docid": "77ca024b957f8326cd194b02b714621b", "score": "0.47872955", "text": "def IsVisible(self):\n return self.__IsWindowVisible(self.GetWindowHandle())", "title": "" }, { "docid": "96b267483eef6945055491a3ba8e6223", "score": "0.47833273", "text": "def find_wins(self, orientation, tilt):\n located = []\n for i in self.windows:\n if i.orientation == orientation and i.tilt == tilt:\n located.append(i)\n else:\n pass\n return located", "title": "" }, { "docid": "60e9414bb5a7af77dd809ab81ae8c169", "score": "0.47810942", "text": "def get_threads(self):\n process = self.hwnd\n for thread in process.iter_threads():\n self.threads[str(thread.get_tid())] = thread", "title": "" }, { "docid": "b9d27254abe35a3f7e9a30d9e508fed8", "score": "0.4776739", "text": "def views(self):\n rtn = []\n for a in self.context.window.screen.areas:\n if a.type == 'VIEW_3D':\n rtn.append(a)\n return rtn", "title": "" }, { "docid": "ed1fd0a91c04b60c573160ada6b4cfdb", "score": "0.47667143", "text": "def selected(self,window=None):\n if window == None:\n res = [ x for x,y in self.windows.items() if y.selected ]\n res.sort()\n return res\n if not self.windows.has_key(window):\n return False\n return self.windows[window].selected", "title": "" }, { "docid": "7548f406da278bd5d76135a1b70117ad", "score": "0.4763478", "text": "def getAllFromScreen(self, position, window):\n plane_positions = []\n for position in positions:\n planes_positions.append(self.getFromScreen(position, window))\n return plane_positions", "title": "" }, { "docid": "b0bbafe664a8f6c82ceadfa263b93d36", "score": "0.47631967", "text": "def sys_show_process_by_memory_usage():\n run('ps -eo pmem,pcpu,rss,vsize,args | sort -k 1 -r')", "title": "" }, { "docid": "f8c15ea189e03736c86aa0af074d1255", "score": "0.47624922", "text": "def check_open_chartWindow_is_visible(self, visible=True):\n sym_list = list()\n sym_list.append(self.sym_names.uIChart_Grid_Label)\n sym_list.append(self.sym_names.uIChart_label_Label)\n sym_list.append(self.sym_names.uIChart_zoom_Label)\n sym_list.append(self.sym_names.uIChart_btnReset_Button)\n sym_list.append(self.sym_names.uIChart_labelInterval_ComboBox)\n sym_list.append(self.sym_names.uIChart_zoomLevel_ComboBox)\n sym_list.append(self.sym_names.uIChart_Grid_CheckBox)\n sym_list.append(self.sym_names.uIChart_dgParametersInfo_Table)\n sym_list.append(self.sym_names.uIChart_Printer_Button)\n return self.chartWindow.check_open_chartWindow_is_visible(sym_list,visible)", "title": "" }, { "docid": "9dd35d35e398ed3551ac53b09ea0c9d6", "score": "0.4759145", "text": "def mirrors_by_pgid(self, pgid: int) -> [ProcessMirror]:\n return list(filter(lambda x: x._pgid == pgid, self._procs))", "title": "" }, { "docid": "89cd2e7432312aebce161a34d47b7054", "score": "0.4753503", "text": "def prj_window_check_controller_visible_for_expand_and_collapse(self,visible=True):\n return self.project_win.prj_window_check_controller_visible_for_expand_and_collapse(self.sym_names.collapseImg_Image,visible)", "title": "" }, { "docid": "b33bbde51c9c5d94389df922bd162682", "score": "0.47533154", "text": "def get_kth_visible_item(self, idxk):\n shown_count = 0\n for item in self.instance.sizer.Children:\n if item.IsShown():\n if shown_count == idxk:\n return item.GetWindow()\n shown_count += 1", "title": "" }, { "docid": "44439d62c1f1b7e85502c1b321f81d67", "score": "0.47493303", "text": "def visible(self):\n return self._visible", "title": "" }, { "docid": "8b1e3e7c68beb7c0cfdbefc0ddf22c41", "score": "0.47443625", "text": "def find_references_other_window() -> None:", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" }, { "docid": "3b0fcc44dfcb8b4f5743efe25818c927", "score": "0.47395664", "text": "def visible(self):\n return self[\"visible\"]", "title": "" } ]
6125735d81cefe56f59732353a54fe8f
r"""Calculates the hydrophobic fitness of a protein. Hydrophobic fitness is an efficient centroidbased method for calculating the packing quality of your structure [3]_. For this method C, F, I, L, M, V, W and Y are considered hydrophobic. The
[ { "docid": "da3bb79821b7a97a8c5917e0e0bb199d", "score": "0.6614055", "text": "def calculate_hydrophobic_fitness(assembly):\n hydrophobic_centroids = []\n tyrosine_centroids = []\n polar_centroids = []\n for residue in [r for r in assembly.get_monomers()\n if isinstance(r, ampal.Residue)]:\n centroid_list = None\n centroid = residue.centroid\n if residue.mol_letter in HYDROPHOBIC:\n centroid_list = hydrophobic_centroids\n elif residue.mol_letter == 'Y':\n centroid_list = tyrosine_centroids\n elif residue.mol_letter in standard_amino_acids:\n centroid_list = polar_centroids\n else:\n continue\n if centroid_list is not None:\n centroid_list.append(\n (residue.parent.id, int(residue.id),\n residue['CA'] if centroid is None else centroid))\n hf = run_hf_loop(hydrophobic_centroids,\n tyrosine_centroids, polar_centroids)\n return hf", "title": "" } ]
[ { "docid": "bf03dd037f2a5f685f080a4bb7178227", "score": "0.63004637", "text": "def fitness(individual):\n x=individual[0]\n z=individual[1]\n return x**2-2*x*z+6*x+z**2-6*z", "title": "" }, { "docid": "5c18d3c652ff260be0aac9a138dbbd89", "score": "0.600077", "text": "def fitness(x):\n fitness_score = np.array([\n count_characters(x, suf, [\"Fit\"])\\\n - count_characters(x, suf, [\"Fat\"])\n for suf in [\"noint\", \"int\"]\n ])\n return choose_max(fitness_score)", "title": "" }, { "docid": "63963f2c46049824884ca6f683f8c47b", "score": "0.5966186", "text": "def __compute_fitness(self):\n for index in range(len(self.objective)):\n i = index + 1\n ffeasmax = 0\n for chromosomerep in self.generation:\n f = chromosomerep['f%i' % i]\n if is_equal(chromosomerep['g'], 0, 0.0001) and f > ffeasmax:\n ffeasmax = f\n\n for chromosomerep in self.generation:\n f = chromosomerep['f%i' % i]\n g = chromosomerep['g']\n if is_equal(g, 0, 0.0001):\n chromosomerep['fitness%i' % i] = f\n else:\n chromosomerep['fitness%i' % i] = ffeasmax + g\n if len(self.objective) == 1:\n for chromosomerep in self.generation:\n chromosomerep['fitness'] = chromosomerep['fitness1']\n else:\n for index in range(len(self.generation)):\n currchromosome = self.generation[index]\n currobjective = self.__get_objective_array(currchromosome)\n maxj = float('-inf')\n for i in range(len(self.generation)):\n if index == i:\n continue\n compchromosome = self.generation[i]\n compobjective = self.__get_objective_array(compchromosome)\n mink = min(currobjective - compobjective)\n maxj = max(mink, maxj)\n self.generation[index]['fitness'] = maxj", "title": "" }, { "docid": "23251a0328e5d62d77bfe370e2dc0f76", "score": "0.5961828", "text": "def calculate_fitness(chr):\r\n total_value = 0\r\n total_weight = 0\r\n num = 0\r\n for gene in chr:\r\n if gene == 1:\r\n total_weight += items[num][0]\r\n total_value += items[num][1]\r\n num += 1\r\n if total_weight > knapsack_size:\r\n return 0\r\n else:\r\n return total_value", "title": "" }, { "docid": "90428e72f9dd255df77a2b1b0b3a5e33", "score": "0.59293705", "text": "def fitness(self, phenotype):\n return phenotype.fitness() / len(self)", "title": "" }, { "docid": "6b44e8c58395a7c4cb9afd27219dd5f2", "score": "0.57723475", "text": "def function(individual,P):\r\n V = individual['V']\r\n n = individual['n']\r\n T = individual['T']\r\n\r\n P_calc = (n*T*8.8314)/V\r\n fitness = P- P_calc\r\n return fitness", "title": "" }, { "docid": "0ff5a8fa11463de6bc0d902bf682f003", "score": "0.57532865", "text": "def fitness_function(self, chromosome):\n pass", "title": "" }, { "docid": "8ef569282b9733a3fa57c96e888b0084", "score": "0.564769", "text": "def fitness_function(self):\n fitness = 0\n weight = 0\n for i in range(NUMBER_OF_ITEMS):\n if self.genome[i] == 1:\n weight = weight + items[i][0]\n fitness = fitness + items[i][1]\n if weight > MAX_WEIGHT:\n fitness = 0\n return fitness", "title": "" }, { "docid": "101908993e42fb3205c681fbec28960d", "score": "0.5623306", "text": "def calculate_fitness(self) -> float:\n pass", "title": "" }, { "docid": "ce1c700a33e9a9fd258074c348a32a65", "score": "0.5603181", "text": "def fitness(biny):\n fit = []\n for i in range(len(biny)):\n #for each individual create confusion tuple\n confused = (0,0,0,0)\n for j in range(len(biny[i])):\n #calculate sum of confusion of one gen an total confusion of individual\n confused = tuple(map(sum, zip(confused, eva(biny[i][j]))))\n #create fitness value for each individual with kappa coefficent\n fit.append(kappa(confused))\n return fit", "title": "" }, { "docid": "9845e2997d4785dc2c03f8a99f4adb8f", "score": "0.5525521", "text": "def addHydrogens(self):\n count = 0\n self.write(\"Adding hydrogens to the protein...\\n\")\n for residue in self.protein.getResidues():\n if not (isinstance(residue, Amino) or \\\n isinstance(residue, Nucleic)): continue\n for atomname in residue.reference.map:\n if not atomname.startswith(\"H\"): continue\n if residue.hasAtom(atomname): continue\n if isinstance(residue,CYS):\n if residue.SSbonded and atomname == \"HG\": continue\n\n # If this hydrogen is part of a tetrahedral group,\n # follow a different codepath\n\n if self.rebuildTetrahedral(residue, atomname):\n count += 1\n continue\n \n # Otherwise use the standard quatfit methods\n \n coords = []\n refcoords = []\n \n refatomcoords = residue.reference.map[atomname].getCoords()\n bondlist = residue.reference.getNearestBonds(atomname)\n \n for bond in bondlist:\n if bond == \"N+1\": atom = residue.peptideN\n elif bond == \"C-1\": atom = residue.peptideC\n else: atom = residue.getAtom(bond)\n\n if atom == None: continue\n\n # Get coordinates, reference coordinates\n\n coords.append(atom.getCoords())\n refcoords.append(residue.reference.map[bond].getCoords())\n\n # Exit if we have enough atoms\n \n if len(coords) == 3: break\n\n if len(coords) == 3:\n newcoords = findCoordinates(3, coords, refcoords, refatomcoords)\n residue.createAtom(atomname, newcoords)\n count += 1\n else:\n self.write(\"Couldn't rebuild %s in %s!\\n\" % (atomname, residue),1)\n \n self.write(\" Added %i hydrogen atoms.\\n\" % count)", "title": "" }, { "docid": "98609a7ee16509a4ec264abfc7d03121", "score": "0.5504681", "text": "def _compute_fitness(self, chromosome):\n return self.fitness_func(chromosome, self._data)", "title": "" }, { "docid": "cb1d41cd1e4a00948e0e86850da187f0", "score": "0.54501766", "text": "def get_hydrophobic_ratio(self):\n\n res = {}\n desc = GlobalDescriptor(self.ProteinSequence)\n desc.hydrophobic_ratio()\n res['hydrophobic_ratio'] = desc.descriptor[0][0]\n return res", "title": "" }, { "docid": "62d1c26ed3e2c9a8cb4cf8f709e94fc3", "score": "0.54407924", "text": "def fitness(self, chromosome,EM):\n\t\treturn len(chromosome)", "title": "" }, { "docid": "33326fb322503906efc5e4c9562191c5", "score": "0.54097694", "text": "def genome_fitness(self, genome, fitness_callback=None):\n\n hdel_log_p = scipy.stats.norm.logpdf(genome.proportion_hdel(), loc=self.proportion_hdel, scale=self.proportion_hdel_stddev)\n hlamp_log_p = scipy.stats.norm.logpdf(genome.proportion_hlamp(), loc=self.proportion_hlamp, scale=self.proportion_hlamp_stddev)\n ploidy_log_p = scipy.stats.norm.logpdf(genome.ploidy(), loc=self.ploidy, scale=self.ploidy_stddev)\n loh_log_p = scipy.stats.norm.logpdf(genome.proportion_loh(), loc=self.proportion_loh, scale=self.proportion_loh_stddev)\n\n fitness = hdel_log_p + hlamp_log_p + ploidy_log_p + loh_log_p\n\n if fitness_callback is not None:\n fitness = fitness_callback(genome, fitness)\n\n return fitness", "title": "" }, { "docid": "6ae8bc664a7b36a159d1bbc4e3a33633", "score": "0.5393262", "text": "def computeFitness(self):\n self.fitness = self.euclideanDistance(self.genes[0], self.genes[len(self.genes)-1])\n for i in range(0, self.genSize-1):\n self.fitness += self.euclideanDistance(self.genes[i], self.genes[i+1])\n \n self.distance=self.fitness\n self.fitness=10000000000/self.fitness", "title": "" }, { "docid": "064c81365d709a158d23335c37d1bdb6", "score": "0.5392029", "text": "def ExponotialFunction(chromosome):\n\talpha = 0\n\tfor i in range(1, len(chromosome)+1):\n\t\talpha += e**(-5.12*i)\n\talpha = -alpha\n\tfitness = 0\n\tfor i in range(len(chromosome)):\n\t\tfitness += e**((i+1)*chromosome[i])\n\treturn fitness + alpha", "title": "" }, { "docid": "244f524ce2366c232207127e0a901db0", "score": "0.5372479", "text": "def calc_fitness(self) -> None:\n self.population = sorted(\n self.population,\n key=lambda x: x.fitness,\n reverse=True)\n self.max_fitness = self.population[0].fitness", "title": "" }, { "docid": "d3c43fb413a08d4f3e4106690e8b9087", "score": "0.5340326", "text": "def fitness(self) -> float:\n return self.__fitness", "title": "" }, { "docid": "cc929e8df85e13f1a0f6c3aa02342bd2", "score": "0.5334777", "text": "def compute_fitness(self):\r\n self.population_rating = 0.0\r\n for individual in self.individuals:\r\n individual.compute_fitness()\r\n self.population_rating += individual.fitness", "title": "" }, { "docid": "2d165f9b60d206d73f10f9b92a73bddf", "score": "0.53035814", "text": "def fitness(population, goal):\n ave = statistics.mean(population)\n return ave / goal # when this values is >= 1, you'll know it's time to stop breeding", "title": "" }, { "docid": "147d85910453d0faf4360ddd54ec279f", "score": "0.52840936", "text": "def tree_fitness(self):\n cols = self.observations()[:, 1:]\n rows = cols.transpose()\n\n heights = []\n\n for col in cols:\n m = np.nonzero(col)[0]\n if m.size:\n heights.append(20 - min(m))\n else:\n heights.append(0)\n\n # Aggregate Height\n f0 = sum(heights)\n\n # Number of holes\n f1 = 0\n for col in cols:\n hit = False\n for square in col:\n if square and not hit:\n hit = True\n if hit and not square:\n f1 += 1\n\n # Number of columns with at least one hole\n f2 = 0\n for col in cols:\n hit = False\n for square in col:\n if square and not hit:\n hit = True\n if hit and not square:\n f2 += 1\n break\n\n # Bumpiness\n f3 = 0\n for i, height in enumerate(heights[1:]):\n f3 += (height - heights[i - 1])\n\n # Row transitions\n f4 = 0\n for row in rows:\n current = row[0]\n for square in row:\n if current != square:\n current = square\n f4 += 1\n\n # Column transitions\n f5 = 0\n for col in cols:\n current = col[0]\n for square in col:\n if current != square:\n current = square\n f5 += 1\n\n # Empty columns\n f6 = sum([int(sum(x) == 0) for x in cols])\n\n wells = []\n for i in range(len(heights)):\n if i == 0:\n w = heights[1] - heights[0]\n w = w if w > 0 else 0\n wells.append(w)\n elif i == len(heights) - 1:\n w = heights[-2] - heights[-1]\n w = w if w > 0 else 0\n wells.append(w)\n else:\n w1 = heights[i - 1] - heights[i]\n w2 = heights[i + 1] - heights[i]\n w1 = w1 if w1 > 0 else 0\n w2 = w2 if w2 > 0 else 0\n w = w1 if w1 >= w2 else w2\n wells.append(w)\n\n # Deepest well\n f7 = max(wells)\n\n # Lines cleared\n f8 = self.last_cleared_lines\n\n # Weighted blocks\n f9 = 0\n for i, row in enumerate(rows):\n f9 += (20 - i) * sum(row)\n\n features = [f0, f1, f2, f3, f4, f5, f6, f7, f8, f9]\n\n return self.network.activate(features)[0]", "title": "" }, { "docid": "740a48ff60e3b3a463eb895f879be6ee", "score": "0.5282305", "text": "def run_hf_loop(hydrophobic_centroids, tyrosine_centroids, polar_centroids):\n n_hydrophobic = len(hydrophobic_centroids)\n n_tyrosine = len(tyrosine_centroids)\n n_polar = len(polar_centroids)\n hydrophobic_scores = []\n burial_scores = []\n for reference in hydrophobic_centroids:\n h_centroids_in_7_3, h_centroids_in_10, h_neighbours = get_number_within(\n reference, hydrophobic_centroids)\n y_centroids_in_7_3, y_centroids_in_10, y_neighbours = get_number_within(\n reference, tyrosine_centroids)\n p_centroids_in_7_3, p_centroids_in_10, p_neighbours = get_number_within(\n reference, polar_centroids)\n all_neighbours = h_neighbours + y_neighbours + p_neighbours\n ci = (h_centroids_in_7_3 + y_centroids_in_7_3 + p_centroids_in_7_3) - (\n all_neighbours)\n hi = (n_hydrophobic - h_neighbours) + (n_tyrosine - y_neighbours)\n Ni = hi + (n_polar - p_neighbours)\n Hic = ci * (hi / Ni)\n Hi = (h_centroids_in_7_3 + y_centroids_in_7_3) - (\n h_neighbours + y_neighbours)\n hydrophobic_scores.append(Hi - Hic)\n burial_scores.append(\n (h_centroids_in_10 + y_centroids_in_10 + p_centroids_in_10))\n hydrophobic_fitness = -1 * (\n (sum(burial_scores)*sum(hydrophobic_scores)\n ) / ((n_hydrophobic+n_tyrosine)**2)\n )\n return hydrophobic_fitness", "title": "" }, { "docid": "875d1a9a5cba11d5edc46f8e51a603f1", "score": "0.5281795", "text": "def geneticAlgorithm(X, y, n_population, n_generation):\n # create individual\n creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n creator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\n # create toolbox\n toolbox = base.Toolbox()\n toolbox.register(\"attr_bool\", random.randint, 0, 1)\n toolbox.register(\"individual\", tools.initRepeat,\n creator.Individual, toolbox.attr_bool, len(X.columns))\n # toolbox.register(\"individual\", tools.initRepeat,\n # creator.Individual, toolbox.attr_bool, X.shape[1])\n toolbox.register(\"population\", tools.initRepeat, list,\n toolbox.individual)\n toolbox.register(\"evaluate\", getFitness, X=X, y=y)\n toolbox.register(\"mate\", tools.cxOnePoint)\n toolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.05)\n toolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n # initialize parameters\n pop = toolbox.population(n=n_population)\n hof = tools.HallOfFame(n_population * n_generation)\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean)\n stats.register(\"min\", np.min)\n stats.register(\"max\", np.max)\n\n # genetic algorithm\n pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.9, mutpb=0.05,\n ngen=n_generation, stats=stats, halloffame=hof,\n verbose=True)\n\n # return hall of fame\n return hof", "title": "" }, { "docid": "0b9961421910c58f0a7b12d722a7acf4", "score": "0.52664834", "text": "def compute_hydropathy_of_sequence(seq, only_consider_AFILMVWY=False):\n\n # Define the hydropathy of each amino acid according to Table 2\n # from Kyte and Doolittle, 1982, J. Mol. Biol.\n aa_hydropathy = {\n \"I\": 4.5,\n \"V\": 4.2,\n \"L\": 3.8,\n \"F\": 2.8,\n \"C\": 2.5,\n \"M\": 1.9,\n \"A\": 1.8,\n \"G\": -0.4,\n \"T\": -0.7,\n \"W\": -0.9,\n \"S\": -0.8,\n \"Y\": -1.3,\n \"P\": -1.6,\n \"H\": -3.2,\n \"E\": -3.5,\n \"Q\": -3.5,\n \"D\": -3.5,\n \"N\": -3.5,\n \"K\": -3.9,\n \"R\": -4.5\n }\n\n # Sum the hydropathy of each amino acid in the sequence, only\n # considering hydrophobic amino acids if specified above\n if only_consider_AFILMVWY:\n return sum([\n aa_hydropathy[aa] for aa in seq\n if aa in list('AFILMVWY')\n ])\n else:\n return sum([aa_hydropathy[aa] for aa in seq])", "title": "" }, { "docid": "88140061a9323d30b7ece629f3939dde", "score": "0.52559984", "text": "def fitness(self, solucion):\r\n platilloleft = 0\r\n platilloright = 0\r\n for i in range(len(solucion)):\r\n if solucion[i] == 0:\r\n platilloleft += self.dictObjetos[i]\r\n else:\r\n platilloright += self.dictObjetos[i]\r\n return abs(platilloleft - platilloright)", "title": "" }, { "docid": "0f0d23003cf9e036300093eccb59b094", "score": "0.5240488", "text": "def fitness(listgene):\n valuelist=[]\n for i in range(len(listgene)):\n score=coeff[i]*listgene[i]\n valuelist.append(score)\n v=sum(valuelist)\n return v", "title": "" }, { "docid": "8ba2451f3ac6edf6ba6edd0653208d32", "score": "0.5230516", "text": "def darwinian_fitness_function(genome):\n individual = ImageClassifier(w1=genome[\"w1\"], w2=genome[\"w2\"])\n fitness = 0\n\n for label, image in labeled_images:\n classification = individual.classify(image)\n if classification == label:\n fitness += 100\n\n return fitness", "title": "" }, { "docid": "68236df6fd83528349483cafd03d3889", "score": "0.5195723", "text": "def fitness(self):\n return self._fitness", "title": "" }, { "docid": "68236df6fd83528349483cafd03d3889", "score": "0.5195723", "text": "def fitness(self):\n return self._fitness", "title": "" }, { "docid": "5a04ac4e3ffd4cf68ba746f7c5878b0b", "score": "0.51652586", "text": "def evalH(self):\n for u, v, data in self.tree.edges(data=True):\n self.tree.adj[u][v][\"h-dist\"] = data[\"pc\"].copulaModel.h\n self.tree.adj[u][v][\"hinv-dist\"] = data[\"pc\"].copulaModel.hinv\n return self._evalH()", "title": "" }, { "docid": "2abfddb707ba7feec0e66367cc8508b1", "score": "0.5152964", "text": "def calculate_HG(data, base):\n # get probabilities\n n_vals = len(data.flat) # total number of values\n # get number of each unique (classified) value in the array\n _, unique_counts = np.unique(data, return_counts=True)\n probs = unique_counts / n_vals # get probabilities\n HG = entropy(probs, base=base) # get global entropy, this is returned\n return HG", "title": "" }, { "docid": "5273dde08aa58e08c7d65919fca4d55a", "score": "0.51237607", "text": "def test_genetic_operations():\n\n params = {'function_set': [add2, sub2, mul2, div2],\n 'arities': {2: [add2, sub2, mul2, div2]},\n 'init_depth': (2, 6),\n 'init_method': 'half and half',\n 'n_features': 10,\n 'const_range': (-1.0, 1.0),\n 'metric': 'mean absolute error',\n 'p_point_replace': 0.05,\n 'parsimony_coefficient': 0.1}\n random_state = check_random_state(415)\n\n # Test for a small program\n test_gp = [mul2, div2, 8, 1, sub2, 9, .5]\n donor = [add2, 0.1, sub2, 2, 7]\n\n gp = _Program(random_state=random_state, program=test_gp, **params)\n\n expected = ['mul', 'div', 8, 1, 'sub', 9, 0.5]\n assert([f.name if isinstance(f, _Function) else f\n for f in gp.reproduce()] == expected)\n assert(gp.program == test_gp)\n assert([f.name if isinstance(f, _Function) else f\n for f in gp.crossover(donor, random_state)[0]] == ['sub', 2, 7])\n assert(gp.program == test_gp)\n expected = ['mul', 'div', 8, 1, 'sub', 'sub', 3, 5, 'add', 6, 3]\n assert([f.name if isinstance(f, _Function) else f\n for f in gp.subtree_mutation(random_state)[0]] == expected)\n assert(gp.program == test_gp)\n assert([f.name if isinstance(f, _Function) else f\n for f in gp.hoist_mutation(random_state)[0]] == ['div', 8, 1])\n assert(gp.program == test_gp)\n expected = ['mul', 'div', 8, 1, 'sub', 9, 0.5]\n assert([f.name if isinstance(f, _Function) else f\n for f in gp.point_mutation(random_state)[0]] == expected)\n assert(gp.program == test_gp)", "title": "" }, { "docid": "0a8318b51b6695c001de8043879fd6aa", "score": "0.51075584", "text": "def fitness(self):\n\n return ( 1 - float(self.loses) / self.games )", "title": "" }, { "docid": "08255a5c1cf31ad1ab6e957548491df4", "score": "0.51046187", "text": "def mutation(self, prob, index=None):\n if self.code_mode == \"binary\":\n for i in range(self.dimension):\n gene = list(self.chromosome[i])\n neg_gene = ['0' if i == '1' else '1' for i in gene]\n len_gene = self.lengths[i]\n p = np.random.random(len_gene)\n is_muate = p < prob\n for j in range(len_gene):\n if is_muate[j]:\n gene[j] = neg_gene[j]\n self.chromosome[i] = ''.join(gene)\n elif self.code_mode == \"real\":\n # polynomial mutation\n for i in range(self.dimension):\n if random.random() > prob:\n continue\n u = random.random()\n if u <= 0.5:\n q = (2*u+(1-2*u)*(1-(self.chromosome[i]-self.lower_bounds[i])\\\n /(self.up_bounds[i]-self.lower_bounds[i]))**(index+1))**(1/(index+1))-1\n else:\n q = 1-(2*(1-u)+2*(u-0.5)*(1-(self.up_bounds[i]-self.chromosome[i])\\\n /(self.up_bounds[i]-self.lower_bounds[i]))**(index+1))**(1/(index+1))\n self.chromosome[i] += q*(self.up_bounds[i]-self.lower_bounds[i])\n if self.chromosome[i] > self.up_bounds[i] or self.chromosome[i] < self.lower_bounds[i]:\n self.chromosome[i] = self.lower_bounds[i] if random.random() > 0.5 else self.up_bounds[i]\n else:\n raise ValueError(\"Unkonw code mode.\")\n return", "title": "" }, { "docid": "48381fcfe7d11f945b27bca329e7c656", "score": "0.5093215", "text": "def fitness(binary_N_paths, addPenalty=False):\n total_cost = 0\n # basic cost\n for one_path_double_digit in binary_N_paths:\n one_path_single_digit_list = []\n one_path_double_digit_list = list(one_path_double_digit)\n for i in range(len(one_path_double_digit_list)):\n if i % 2 == 0:\n one_path_single_digit_list.append(int(one_path_double_digit_list[i]) + int(one_path_double_digit_list[i+1]))\n one_path_single_digit_np = np.array(one_path_single_digit_list)\n target_indices = np.where(one_path_single_digit_np == 1)[0]\n if len(target_indices) == 0:\n duration_interval_num = 0\n else:\n duration_interval_num = int(target_indices[-1] - target_indices[0] + 1)\n if duration_interval_num == 0:\n total_cost += 0\n elif duration_interval_num * intervalDuration <= 5:\n total_cost += 90\n elif duration_interval_num * intervalDuration <= 7.5:\n total_cost += 180\n else:\n total_cost += (20 * intervalDuration) * duration_interval_num\n # add penalty\n if addPenalty:\n demandFlag, demandViolationNum = demand_constraint(binary_N_paths, tolerance)\n rushHour, rushHourViolatonNum = rush_hour_constraint(binary_N_paths)\n maxWorkingHour, maxWorkingHourViolationNum = max_working_hour_constraint(binary_N_paths)\n if checkDemandFlag:\n total_cost += alpha * demandViolationNum * demandViolationPenalty\n if checkRushHourFlag:\n total_cost += rushHourViolatonNum * rushHourViolationPenalty\n if maxWorkingHourViolationPenalty:\n total_cost += maxWorkingHourViolationNum * maxWorkingHourViolationPenalty\n return total_cost", "title": "" }, { "docid": "fbf0236b2293b38cb86b9fe5ee7bf86c", "score": "0.509223", "text": "def _best_hyps(self, hyps, normalize_by_length=False):\n # This length normalization is only effective for the final results.\n\n if normalize_by_length:\n return sorted(hyps, key=lambda h: h.prob / len(h.context), reverse=True)\n else:\n return sorted(\n hyps,\n key=lambda h: h.prob - self.diversity_strength * h._hamming_penalty,\n reverse=True,\n )", "title": "" }, { "docid": "03799cd74acfc53a2c5f56c39036a2eb", "score": "0.5083912", "text": "def compute_tot_fitness(fitness_function, pop):\n probs = np.zeros(len(pop)) # list to house probabilites\n best_member = ''\n best_fitness = -10**18\n total_fitness = 0 # The sum of of all the fitness values from the population.\n for i, chromosome in enumerate(pop):\n new_fitness = fitness_function(chromosome)\n if new_fitness > best_fitness:\n best_member = chromosome\n best_fitness = new_fitness\n total_fitness += new_fitness\n probs[i] = new_fitness\n probs = probs / total_fitness\n return total_fitness, best_fitness, best_member, probs", "title": "" }, { "docid": "347584ec06d7a512aa23f0e7498e7a6b", "score": "0.50829667", "text": "def _fitness(self, indiv):\n return indiv", "title": "" }, { "docid": "8c2278e2e21f3c33b9ba131fffd7f5fe", "score": "0.5074147", "text": "def qpg(state, fitness):\n regret = fitness - state.dot(fitness)\n return state * (state * regret - np.sum(state**2 * regret))", "title": "" }, { "docid": "eb708d0cd6f65518675d1bd547873e05", "score": "0.5041996", "text": "def genHyperWalVote(self):\n self.transWal()\n\n# bit,fit = tl.compFit(self)\n# a = sorted(zip(bit,fit), key=lambda a_entry: a_entry[1]) \n# optBit = a[0][0]\n# optFit = a[0][1]\n# print 'opti\\n',optBit, optFit\n\n #for i in range(len(a)): \n# for i in range(10): \n# print '%s\\t%.3f' %(a[i][0],a[i][1])\n\n # initialize sumFitA \n self.sumFitA = []\n evalSubFunc = []\n for i in range(self.n):\n self.sumFitA.append(Struct(one=0,zero=0))\n \n for i in self.WA:\n subBit = i.arr\n\n if subBit not in evalSubFunc and i.arr:\n evalSubFunc.append(subBit)\n\n # check every template that matches the subfunction\n seqBits = tl.genSeqBits(len(subBit))\n schFitArr = []\n for j in seqBits:\n schFit = 0\n\n # convert bit string to array representation\n schTpl = []\n for k in range(len(j)):\n if j[k] == '1':\n schTpl.append(subBit[k])\n\n # compute schema fitness\n for k in self.WA:\n subset = True\n for l in k.arr:\n if l not in subBit:\n subset = False\n break\n if subset == True:\n schFit = schFit + int(math.pow(-1, self.binCountArr(k.arr, schTpl))) * k.w\n\n schFitArr.append(Struct(fit=schFit,arr=schTpl))\n# print subBit, j, schFit\n# print \n\n schFitArrSort = sorted(schFitArr, key = lambda i: i.fit)\n\n # perform voting from the best hyperplane associated with the subfunction\n #for k in range(self.k+1):\n for k in range(1):\n #for k in range(self.k*2):\n for j in subBit:\n if j in schFitArrSort[k].arr:\n #self.sumFitA[j].one = self.sumFitA[j].one + schFitArrSort[k].fit\n self.sumFitA[j].one = self.sumFitA[j].one + 1\n else:\n #self.sumFitA[j].zero = self.sumFitA[j].zero + schFitArrSort[k].fit\n self.sumFitA[j].zero = self.sumFitA[j].zero + 1", "title": "" }, { "docid": "f7b6038c7f4b226d5d8ad91618c3d8da", "score": "0.5006405", "text": "def fitness(self):\n return [individual.fitness for individual in self._individuals]", "title": "" }, { "docid": "f8b7b9f815555c237b09e178fe48753a", "score": "0.5004712", "text": "def fitness(self, path: str, path_type: str = None) -> float:\n return 0.0", "title": "" }, { "docid": "52124fa241d5b51e756802ae7f8332d4", "score": "0.5000257", "text": "def main():\n\n exectime = {\n 'all': {},\n 'file': {},\n 'ga': {},\n 'wm': {}\n }\n sample = 1000\n exectime[\"all\"][\"start\"] = time.time()\n\n \"\"\" Opening train data file \"\"\"\n exectime[\"file\"][\"start\"] = time.time()\n df = pd.read_csv(\"dataset/train1.csv\")\n exectime[\"file\"][\"end\"] = time.time()\n print df['x1'].size\n return\n \"\"\" Implemetation of fitness function to GA \"\"\"\n def fitness_function(chromosome):\n \"\"\"\n ::param chromosome:\n :type chromosome: Chromosome\n \"\"\"\n\n gs = chromosome.genes\n\n if len(gs) != 14:\n return 0\n\n if not (0 <= gs[0].value() < gs[1].value()) or not (gs[1].value() > gs[2].value()):\n return 0\n if not (gs[2].value() < gs[3].value() < gs[4].value()) or not (gs[4].value() > gs[5].value()):\n return 0\n if not (gs[5].value() < gs[6].value() <= 100):\n return 0\n if not (gs[0].value() < gs[3].value() < gs[6].value()):\n return 0\n\n if not (0 < gs[7].value() < gs[8].value()) or not (gs[8].value() > gs[9].value()):\n return 0\n if not (gs[9].value() < gs[10].value() < gs[11].value()) or not (gs[11].value() > gs[12].value()):\n return 0\n if not (gs[12].value() < gs[13].value() <= 100):\n return 0\n if not (gs[7].value() < gs[10].value() < gs[13].value()):\n return 0\n\n x1, x2 = generate_universes(gs, sample)\n\n wmcls = WangMendelClassifier(x1=x1, x2=x2)\n wmcls.train(df.to_dict('records'), out_label='cls', debug=False)\n return wmcls.get_fitness() * 100\n\n \"\"\" Creating first universe \"\"\"\n n_genes1 = [random.uniform(0, 100) for i in range(0, 7)]\n n_genes1.sort()\n genes1 = [Gene(0, 100, n) for n in n_genes1]\n genes1[1], genes1[2] = genes1[2], genes1[1]\n genes1[4], genes1[5] = genes1[5], genes1[4]\n\n \"\"\" Creating second universe \"\"\"\n n_genes2 = [random.uniform(0, 100) for i in range(0, 7)]\n n_genes2.sort()\n genes2 = [Gene(0, 100, n) for n in n_genes2]\n genes2[1], genes2[2] = genes2[2], genes2[1]\n genes2[4], genes2[5] = genes2[5], genes2[4]\n\n \"\"\" Genetic Algorithm Tuning \"\"\"\n exectime[\"ga\"][\"start\"] = time.time()\n pop = GaPopulation(Chromosome(fitness_function, *(genes1 + genes2)), 50)\n ga = GaAlgorithm(pop, maxgen=100, mutation=0.15)\n ga.run(debug=True)\n exectime[\"ga\"][\"end\"] = time.time()\n\n best = pop.best()\n gs = best.genes\n print('-' * 80)\n print(str(best))\n\n \"\"\" WangMendel Classifier to show results \"\"\"\n exectime[\"wm\"][\"start\"] = time.time()\n x1, x2 = generate_universes(gs, sample)\n wmcls = WangMendelClassifier(x1=x1, x2=x2)\n wmcls.train(df.to_dict('records'), out_label='cls', debug=False)\n wmcls.print_status()\n exectime[\"wm\"][\"end\"] = time.time()\n\n \"\"\" Ploting results \"\"\"\n fig = plt.figure()\n axes = fig.add_subplot(111)\n\n axes.set_xbound(x1.domain.limits)\n axes.set_ybound(x2.domain.limits)\n seaborn.scatterplot('x1', 'x2', hue='cls', data=df, ax=axes)\n\n gs0 = [gs[0].value(), gs[0].value()]\n gs1 = [gs[1].value(), gs[1].value()]\n gs2 = [gs[2].value(), gs[2].value()]\n gs3 = [gs[3].value(), gs[3].value()]\n gs4 = [gs[4].value(), gs[4].value()]\n gs5 = [gs[5].value(), gs[5].value()]\n gs6 = [gs[6].value(), gs[6].value()]\n gs7 = [gs[7].value(), gs[7].value()]\n gs8 = [gs[8].value(), gs[8].value()]\n gs9 = [gs[9].value(), gs[9].value()]\n gs10 = [gs[10].value(), gs[10].value()]\n gs11 = [gs[11].value(), gs[11].value()]\n gs12 = [gs[12].value(), gs[12].value()]\n gs13 = [gs[13].value(), gs[13].value()]\n interval = [0, 100]\n\n # X1/A1\n plt.plot([0, 0], interval, gs1, interval, color=\"blue\")\n axes.fill_between([0, gs[1].value()], [0, 0], [100, 100], facecolor='blue', alpha=0.2, interpolate=True)\n # X1/A2\n plt.plot(gs2, interval, gs4, interval, color=\"red\")\n axes.fill_between([gs[2].value(), gs[4].value()], [0, 0], [100, 100], facecolor='red', alpha=0.2, interpolate=True)\n # X1/A3\n plt.plot(gs5, interval, [100, 100], interval, color=\"green\")\n axes.fill_between([gs[5].value(), 100], [0, 0], [100, 100], facecolor='green', alpha=0.2, interpolate=True)\n\n # X1/A1\n plt.plot(interval, [0, 0], interval, gs8, color=\"blue\")\n axes.fill_between(interval, [0, 0], gs8, where=gs8 >= [0, 0], facecolor='blue', alpha=0.2, interpolate=True)\n # X1/A2\n plt.plot(interval, gs9, interval, gs11, color=\"red\")\n axes.fill_between(interval, gs9, gs11, where=gs11 >= gs9, facecolor='red', alpha=0.2, interpolate=True)\n # X1/A3\n plt.plot(interval, gs12, interval, [100, 100], color=\"green\")\n axes.fill_between(interval, gs12, [100, 100], where=[100, 100] >= gs12, facecolor='green', alpha=0.2, interpolate=True)\n\n x1.plot()\n\n x2.plot()\n\n plt.show()\n\n exectime[\"all\"][\"end\"] = time.time()\n\n \"\"\" Printing timers \"\"\"\n print('=' * 80)\n for label in exectime:\n print(\"%s time: \\t%.3fs\" % (label, float(exectime[label][\"end\"] - exectime[label][\"start\"])))\n print('=' * 80)", "title": "" }, { "docid": "e3e8b9238652f1024cbf85ede754ff58", "score": "0.49987617", "text": "def fitness_5bit_and_density_worker(individual, reca_config, ea_config):\n\n fitness = []\n for _ in range(ea_config.get(\"tests_per_individual\")):\n reCA_out = five_bit_and_density_runner(individual, reca_config)\n fitness.append(int((reCA_out.total_correct / len(reCA_out.all_test_examples)) * 1000))\n\n\n fitness_std = int(np.std(fitness))\n fitness = int(np.mean(fitness))\n\n # Making sure tests\n if fitness>ea_config.get(\"retest_threshold\"):\n fitness = []\n for _ in range(ea_config.get(\"retests_per_individual\")):\n reCA_out = five_bit_and_density_runner(individual, reca_config)\n fitness.append(int((reCA_out.total_correct / len(reCA_out.all_test_examples)) * 1000))\n\n fitness_std = int(np.std(fitness))\n fitness = int(np.mean(fitness))\n print(\"result after making sure: \", fitness)\n fitness = 970 if fitness < 968 else fitness\n\n # fitness = fitness if (fitness<850) else fitness-fitness_std*(1000/fitness)\n fitness = 1 if fitness == 0 else fitness # avoid div by zero\n individual.fitness = fitness\n individual.fitness_std = fitness_std\n\n return individual", "title": "" }, { "docid": "6b8909ff94cb50c178655c91517b4d0b", "score": "0.4994112", "text": "def fitness_f(x):\n weight = 0\n value = 0\n for i in range(len(x.genes)):\n weight += x.genes[i][0]\n value += x.genes[i][1]\n if weight <= 15:\n return value\n return 0", "title": "" }, { "docid": "d411f3c6443f203562e48bca05ad82d0", "score": "0.49931893", "text": "def multi_dimensional_fitness(individual):\n return pow(individual[0], 2) + pow(individual[1], 3)", "title": "" }, { "docid": "73c33161d764662d2b1c36cd972c742c", "score": "0.49814048", "text": "def polygonality_hexagonality(area, perimeter, neighbors, solidity, maxferet, minferet):\n area_list=[]\n perim_list=[]\n \n #Calculate area hull\n area_hull = area / solidity\n\n #Calculate Perimeter hull\n perim_hull = 6 * math.sqrt(area_hull / (1.5 * math.sqrt(3)))\n\n if neighbors == 0:\n perimeter_neighbors = float(\"NAN\")\n elif neighbors > 0:\n perimeter_neighbors = perimeter / neighbors\n\n #Polygonality metrics calculated based on the number of sides of the polygon\n if neighbors > 2:\n poly_size_ratio = 1 - math.sqrt((1 - (perimeter_neighbors / (math.sqrt((4 * area) / (neighbors * (1 / (math.tan(math.pi / neighbors)))))))) * (1 -(perimeter_neighbors / (math.sqrt(( 4 * area) / (neighbors * (1 / (math.tan(math.pi / neighbors)))))))))\n poly_area_ratio = 1 - math.sqrt((1 - (area / (0.25 * neighbors * perimeter_neighbors * perimeter_neighbors * (1 / (math.tan(math.pi / neighbors)))))) * (1 - (area / (0.25 * neighbors * perimeter_neighbors * perimeter_neighbors * (1 / (math.tan(math.pi / neighbors)))))))\n\n #Calculate Polygonality Score\n poly_ave = 10 * (poly_size_ratio + poly_area_ratio) / 2\n\n #Hexagonality metrics calculated based on a convex, regular, hexagon \n apoth1 = math.sqrt(3) * perimeter / 12\n apoth2 = math.sqrt(3) * maxferet / 4\n apoth3 = minferet / 2\n side1 = perimeter / 6\n side2 = maxferet / 2\n side3 = minferet / math.sqrt(3)\n side4 = perim_hull / 6\n\n #Unique area calculations from the derived and primary measures above \n area1 = 0.5 * (3 * math.sqrt(3)) * side1 * side1\n area2 = 0.5 * (3 * math.sqrt(3)) * side2 * side2\n area3 = 0.5 * (3 * math.sqrt(3)) * side3 * side3\n area4 = 3 * side1 * apoth2\n area5 = 3 * side1 * apoth3\n area6 = 3 * side2 * apoth3\n area7 = 3 * side4 * apoth1\n area8 = 3 * side4 * apoth2\n area9 = 3 * side4 * apoth3\n area10 = area_hull\n area11 = area\n \n #Create an array of all unique areas\n list_area=[area1, area2, area3, area4, area5, area6, area7, area8, area9, area10, area11]\n area_uniq = np.asarray(list_area, dtype=float)\n\n #Create an array of the ratio of all areas to eachother \n for ib in range (0, len(area_uniq)):\n for ic in range (ib + 1, len(area_uniq)):\n area_ratio = 1 - math.sqrt((1 - (area_uniq[ib] / area_uniq[ic])) * (1 - (area_uniq[ib] / area_uniq[ic])))\n area_list.append(area_ratio)\n area_array = np.asarray(area_list)\n stat_value_area = stats.describe(area_array)\n del area_uniq, list_area, area_array, area_list\n\n #Create Summary statistics of all array ratios \n area_ratio_ave = stat_value_area.mean\n area_ratio_sd = math.sqrt(stat_value_area.variance)\n\n #Set the hexagon area ratio equal to the average Area Ratio\n hex_area_ratio = area_ratio_ave\n\n # Perimeter Ratio Calculations\n # Two extra apothems are now useful \n apoth4 = math.sqrt(3) * perim_hull / 12\n apoth5 = math.sqrt(4 * area_hull / (4.5 * math.sqrt(3)))\n\n perim1 = math.sqrt(24 * area / math.sqrt(3))\n perim2 = math.sqrt(24 * area_hull / math.sqrt(3))\n perim3 = perimeter\n perim4 = perim_hull\n perim5 = 3 * maxferet\n perim6 = 6 * minferet / math.sqrt(3)\n perim7 = 2 * area / (apoth1)\n perim8 = 2 * area / (apoth2)\n perim9 = 2 * area / (apoth3)\n perim10 = 2 * area / (apoth4)\n perim11 = 2 * area / (apoth5)\n perim12 = 2 * area_hull / (apoth1)\n perim13 = 2 * area_hull / (apoth2)\n perim14 = 2 * area_hull / (apoth3)\n\n #Create an array of all unique Perimeters\n list_perim = [perim1, perim2, perim3, perim4, perim5, perim6, perim7, perim8, perim9, perim10, perim11, perim12, perim13, perim14]\n perim_uniq = np.asarray(list_perim, dtype=float)\n del list_perim\n\n #Create an array of the ratio of all Perimeters to eachother \n for ib in range (0, len(perim_uniq)):\n for ic in range (ib + 1, len(perim_uniq)):\n perim_ratio = 1 - math.sqrt((1 - (perim_uniq[ib] / perim_uniq[ic])) * (1 - (perim_uniq[ib] / perim_uniq[ic])))\n perim_list.append(perim_ratio)\n del perim_ratio\n perim_array = np.asarray(perim_list)\n stat_value_perim = stats.describe(perim_array)\n del perim_uniq, perim_list, perim_array\n\n #Create Summary statistics of all array ratios \n perim_ratio_ave = stat_value_perim.mean\n perim_ratio_sd = math.sqrt(stat_value_perim.variance)\n\n #Set the HSR equal to the average Perimeter Ratio \n hex_size_ratio = perim_ratio_ave\n hex_sd = np.sqrt((area_ratio_sd**2 + perim_ratio_sd**2) / 2)\n\n # Calculate Hexagonality score\n hex_ave = 10 * (hex_area_ratio + hex_size_ratio) / 2\n\n if neighbors < 3:\n poly_size_ratio = float(\"NAN\")\n poly_area_ratio = float(\"NAN\")\n poly_ave = float(\"NAN\")\n hex_size_ratio = float(\"NAN\")\n hex_area_ratio = float(\"NAN\")\n hex_ave = float(\"NAN\")\n hex_sd = float(\"NAN\")\n return(poly_ave, hex_ave, hex_sd)", "title": "" }, { "docid": "8c2a9977ba2d343f7a0ab361e1461d8a", "score": "0.49809524", "text": "def genHyperVote(self):\n self.transWal()\n# bit,fit = tl.compFit(self)\n# a = sorted(zip(bit,fit), key=lambda a_entry: a_entry[1]) \n# optBit = a[0][0]\n# optFit = a[0][1]\n# print 'opti\\n',optBit, optFit\n\n #for i in range(len(a)): \n# for i in range(10): \n# print '%s\\t%.3f' %(a[i][0],a[i][1])\n\n # initialize sumFitA \n self.sumFitA = []\n evalSubFunc = []\n for i in range(self.n):\n self.sumFitA.append(Struct(one=0,zero=0))\n \n for i in range(self.n):\n subBit = self.neighs[i][:]\n subBit.append(i)\n subBit.sort()\n\n if subBit not in evalSubFunc:\n evalSubFunc.append(subBit)\n\n # check every template that matches the subfunction\n seqBits = tl.genSeqBits(len(subBit))\n schFitArr = []\n walTouch = []\n\n # compute schema fitness\n for k in self.WA:\n subset = True\n for l in k.arr:\n if l not in subBit:\n subset = False\n break\n if subset == True:\n walTouch.append(k)\n\n for j in seqBits:\n schFit = 0\n\n # convert bit string to array representation\n schTpl = []\n for k in range(len(j)):\n if j[k] == '1':\n schTpl.append(subBit[k])\n\n for k in walTouch:\n schFit = schFit + int(math.pow(-1, self.binCountArr(k.arr, schTpl))) * k.w\n\n schFitArr.append(Struct(fit=schFit,arr=schTpl))\n# print subBit, j, schFit\n# print \n\n schFitArrSort = sorted(schFitArr, key = lambda i: i.fit)\n\n # perform voting from the best hyperplane associated with the subfunction\n #for k in range(self.k+1):\n for k in range(1):\n #for k in range(self.k*2):\n for j in subBit:\n if j in schFitArrSort[k].arr:\n #self.sumFitA[j].one = self.sumFitA[j].one + schFitArrSort[k].fit\n self.sumFitA[j].one = self.sumFitA[j].one + 1\n else:\n #self.sumFitA[j].zero = self.sumFitA[j].zero + schFitArrSort[k].fit\n self.sumFitA[j].zero = self.sumFitA[j].zero + 1", "title": "" }, { "docid": "6e40955a8c39fdce35da73088b27817f", "score": "0.4977121", "text": "def RunHydrogenCalculation(self,problemManager):\n \n if(self.type == \"NPV\" or self.type == \"HydrogenNPV\" ):\n rv = self.RunHydrogenNPVcalculation(problemManager)\n #elif(self.type == \"employment\"):\n # rv = self.RunEmploymentCalculation(problemManager)\n else:\n raise BluecapError(\"Error: Unrecognized regional hydrogen calculation: \" + self.type)\n \n return rv", "title": "" }, { "docid": "9339533de66970501ea921e65da74096", "score": "0.4976383", "text": "def test_like_a_normal_person():\n\t\"\"\"\n\tprint(\"ASDFAFSD\")\n\tprint(genetic_alg.for_testing(\"maxcut-140-630-0.7-1.cnf\", 500, \"ts\", \"uc\", .6, .01, 10, 'ga')[0].individual.fitness)\n\tprint(\"right before me\")\n\treturn\n\t\"\"\"\n\tselect_options = [\"ts\", \"rs\", \"bs\"]\n\tcrossover_options = [\"1c\", \"uc\"]\n\tdatas = []\n\t\"\"\"\n\tfor file in os.listdir(\"test_problems\"):\n\t\tif file[0] == \".\":\n\t\t\tcontinue\n\t\tprint(file)\n\t\tfor s_option in select_options:\n\t\t\tprint(s_option)\n\t\t\tfor c_option in crossover_options:\n\t\t\t\tprint(c_option)\n\t\t\t\tfor pop_size in range(1, 6):\n\t\t\t\t\tprint(pop_size)\n\t\t\t\t\tfor xover_prob in range(2, 6):\n\t\t\t\t\t\tprint(xover_prob)\n\t\t\t\t\t\tfor mutation_prob in range (0, 5):\t\n\t\t\t\t\t\t\tparameters = {\n\t\t\t\t\t\t\t\t\"file_name\": file,\n\t\t\t\t\t\t\t\t\"pop_size\": pop_size * 20,\n\t\t\t\t\t\t\t\t\"selection_type\": s_option,\n\t\t\t\t\t\t\t\t\"xover_method\": c_option,\n\t\t\t\t\t\t\t\t\"xover_prob\": (xover_prob/5)-.1,\n\t\t\t\t\t\t\t\t\"mutation_prob\": (mutation_prob/20) + .001,\n\t\t\t\t\t\t\t\t\"num_generations\": 10, \n\t\t\t\t\t\t\t\t\"algorithm\": \"ga\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tsolution = genetic_alg.for_testing(file, pop_size * 20, s_option, c_option, (xover_prob / 5) - .1, mutation_prob/20 + .001, 75, 'ga')\n\t\t\t\t\t\t\tdata = {\n\t\t\t\t\t\t\t\t\"solution\": solution,\n\t\t\t\t\t\t\t\t\"parameters\": parameters\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfitness = data[\"solution\"][0].individual.fitness\n\t\t\t\t\t\t\tprint(\"-{}-\".format(fitness))\n\t\t\t\t\t\t\tdatas.append(data)\n\t\"\"\"\n\tk = 0\n\tfor file in os.listdir(\"testy\"):\n\t\tif file[0] == \".\":\n\t\t\tcontinue\n\t\telse:\n\t\t\t#for select_type in select_options:\n\t\t\t\t#for c_type in crossover_options:\n\t\t\tfor pop in range(0, 1):\n\t\t\t\tprint(k/(15*10))\n\t\t\t\tk += 1\n\t\t\t\tsolution = genetic_alg.for_testing(file, 100, \"bs\", \"uc\", .9, .01, 800, 'ga')\n\t\t\t\tparameters = {\n\t\t\t\t\t\"file_name\": file,\n\t\t\t\t\t\"pop_size\": 100,\n\t\t\t\t\t\"selection_type\": \"ts\",\n\t\t\t\t\t\"xover_method\": \"uc\",\n\t\t\t\t\t\"xover_prob\": .9,\n\t\t\t\t\t\"mutation_prob\": .01,\n\t\t\t\t\t\"num_generations\": 200, \n\t\t\t\t\t\"algorithm\": \"ga\"\n\t\t\t\t}\n\t\t\t\tdata = {\n\t\t\t\t\t\"solution\": solution,\n\t\t\t\t\t\"parameters\": parameters\n\t\t\t\t}\n\t\t\t\tdatas.append(data)\n\n\tworkbook = xlsxwriter.Workbook('accuracy_solution2.xlsx')\n\tworksheet = workbook.add_worksheet()\n\tfile = []\n\tselect = []\n\tcrossover = []\n\tpop = []\n\tcross_prob = []\n\tmut_prob = []\n\tnum_gen = []\n\talg = []\n\tscore = []\n\titer_found = []\n\ttime = []\n\tfor item in datas:\n\t\tparam = item[\"parameters\"]\n\t\tfile.append(param[\"file_name\"])\n\t\tselect.append(param[\"selection_type\"])\n\t\tcrossover.append(param[\"xover_method\"])\n\t\tpop.append(param[\"pop_size\"])\n\t\tcross_prob.append(param[\"xover_prob\"])\n\t\tmut_prob.append(param[\"mutation_prob\"])\n\t\tnum_gen.append(param[\"num_generations\"])\n\t\talg.append(param[\"algorithm\"])\n\t\titer_found.append(item[\"solution\"][0].iteration_found)\n\t\ttime.append(item[\"solution\"][1])\n\t\tscore.append(item[\"solution\"][0].individual.fitness)\n\tarray = [file, select, crossover, pop, cross_prob, mut_prob, num_gen, alg, score, iter_found, time]\n\trow = 0\n\n\tfor col, data in enumerate(array):\n\t worksheet.write_column(row, col, data)\n\n\tworkbook.close()", "title": "" }, { "docid": "a7ab5dd07794684232374f9897a9df91", "score": "0.49663207", "text": "def GA(popsize, generations):\n fitnessList = []\n avgFitness = 0\n generationCount = 0\n # create initial generation\n progList = massCreate(popsize)\n # loop over remaining generations\n while generationCount != generations:\n avgFitness = 0\n fitnessList = []\n # evaluate fitness of each program and store results with programs in a list\n for program in progList:\n fitnessList.append((evaluateFitness(program, TRIALS, ROWS*COLUMNS*2), program))\n fitnessList.sort() # sort by first element of tuple -- fitness\n fitnessList.reverse() # get descending order\n # calculate average fitness of generation\n avgFitness = 0\n for program in fitnessList:\n avgFitness += program[0]\n avgFitness /= popsize\n # get #1 top performer\n print \"GENERATION \" + str(generationCount)\n print \"\\tAverage Program: \" + str(avgFitness)\n print \"\\tBest Program: \" + str(max(fitnessList)[0])\n\n # get top performers -- cutoff is determined by global var TOPFRACTION\n topFitness = fitnessList[0:int(len(fitnessList)*TOPFRACTION)]\n\n # breed programs to create new generation of size 'popsize'\n newPopulation = []\n while len(newPopulation) != popsize:\n daddyProgram = random.choice(topFitness)[1]\n mommyProgram = random.choice(topFitness)[1]\n childProgram = daddyProgram.crossover(mommyProgram)\n newPopulation.append(childProgram)\n\n # mutate new population according to global MUTATIONRATE\n desiredMutations = int(popsize * MUTATIONRATE)\n mutationCount = 0\n while mutationCount != desiredMutations:\n newPopulation[random.randint(0, popsize-1)].mutate()\n mutationCount += 1\n\n progList = newPopulation\n generationCount += 1\n print\n print \"Master Program from \" + str(generations) + \" generations of breeding: \"\n print \"\\tFitness: \" + str(max(fitnessList)[0])\n print\n # return the Master Program and HOOK IT UP TO SKYNET\n masterProgram = max(fitnessList)[1]\n return masterProgram", "title": "" }, { "docid": "30247a53bbba3e3838e2f8816f251b95", "score": "0.4962496", "text": "def fitness(self,\n x: list):\n # Compute Himmelblau function value\n function_value = math.pow(x[0] * x[0] + x[1] - 11.0, 2.0) + math.pow(x[0] + x[1] * x[1] - 7.0, 2.0)\n # Return list\n return [function_value]", "title": "" }, { "docid": "e489dd11a8d71257e5df1b6742fedf61", "score": "0.49548113", "text": "def calculateFitness(self, chromosome):\n\t\tchromosomeError = self.f(chromosome)\n\n\t\t#############################\n\t\t# YOUR CODE HERE #\n\t\t#############################\n\t\tpass", "title": "" }, { "docid": "71610ba1e9a1efd3be2aa22758ab69fe", "score": "0.49476033", "text": "def calc_goal_h(self):\n raise NotImplementedError", "title": "" }, { "docid": "3c5d1c2e28607660f51fb69c7ef9830c", "score": "0.49310905", "text": "def single_dimensional_fitness(individual):\n return pow(individual, 2)", "title": "" }, { "docid": "d2b2593e338e3e11107afb7d46514f7b", "score": "0.4929632", "text": "def fitness(self, sol=''):\n\n w = self.w(sol.split(\"-\") if sol != '' else self.solution)\n z = self.z(sol.split(\"-\") if sol != '' else self.solution)\n\n if w <= int(self.weightMax):\n return z\n else:\n return float(z - self.penality() * (w - int(self.weightMax)))", "title": "" }, { "docid": "61023fbfef1e9d377968045fdcdcf85e", "score": "0.49275038", "text": "def main(protein: Optional[str]):\n if not protein:\n protein = arg_parser.parse_args().protein\n protein = protein.lower()\n net = _load_net()\n protein_pred = protein[:4]\n x, _ = epitope.create_dataset(pd.DataFrame(data={\"protein\": [protein]}))\n dataset = TensorDataset(torch.tensor(x, dtype=torch.float32))\n data_loader = iter(data.DataLoader(dataset, batch_size=1, shuffle=False))\n all_predictions = []\n with torch.no_grad():\n for i in range(4, len(data_loader) + 4):\n d = next(data_loader)\n prediction = net(d[0]).squeeze(1)[0]\n all_predictions.append(prediction)\n if prediction - OPTIMAL_THRESHOLD >= 0.0:\n protein_pred += protein[i].upper()\n else:\n protein_pred += protein[i].lower()\n\n protein_pred += protein[-4:]\n with open(\"output.txt\", \"w\") as out:\n out.write(f\"Prediction for protein={protein} is:\\n{protein_pred}\\n\\n\")\n for i in range(len(all_predictions)):\n out.write(\n f\"probability for acid={protein[i + 4]} is {all_predictions[i]}\\n\"\n )\n print(f\"prediction is {protein_pred}, go to output.txt for more info\")\n return protein_pred", "title": "" }, { "docid": "dc011d51fb0a03b04242410545adca48", "score": "0.4924692", "text": "def objective(hyparams):\n start = time.time()\n\n agent = cls.routine(env, hyparams, seed, VFA)\n\n avg, _ = agent.optimality()\n return {'loss': - avg, 'status': STATUS_OK, 'eval_time': time.time() - start}", "title": "" }, { "docid": "4d9526ef70b8df9dd30f25d434fae4a7", "score": "0.4922532", "text": "def fitness_func(code: int, knapsack_obj: Knapsack):\n # get genome sequence\n genome = get_genome_sequence(code, knapsack_obj.n)\n # check if total load of genome fits in capacity\n if np.dot(genome, knapsack_obj.weights) <= knapsack_obj.capacity:\n # return the profit\n return np.dot(genome, knapsack_obj.values)\n # return Negative Infinity \n return np.NINF", "title": "" }, { "docid": "8d63734584759bf787f5e8d005c58419", "score": "0.4921558", "text": "def genetic_algorithm(self):\n \n # Init first population\n pop = Population(self.equation, population_size=self.population_size, boundary=(self.low_bound, self.high_bound),\\\n no_bits_per_item=self.no_bits_per_item, offset=self.offset, initial=True)\n # print(pop)\n x = pop.get_fittess_indiv()\n print(\"{:*^120}\".format(\"Initial fittess x\"))\n print(\"x = {0}, fitness = {1}\".format((x+self.offset), self.equation(x+self.offset)))\n\n # Set first pop\n self.evol.setPop(pop)\n \n for i in range(self.no_generations):\n # Evolve next generation\n pop = self.evol.evolve_generation()\n\n # Set new pop\n self.evol.setPop(pop)\n\n # Print solution\n if i % self.print_solution_per_gen == 0:\n x = pop.get_fittess_indiv()\n print(\"{:-^50}Generation {}{:-^50}\".format(\"\", i, \"\"))\n print(\"x = {}\".format(x+self.offset), end=\"\")\n print(\", fitness = {0}\".format(self.equation(x+self.offset)))\n\n return self.evol.getPop().get_fittess_indiv() + self.offset", "title": "" }, { "docid": "3d72bc5284bbc7d3674001032c9f50a7", "score": "0.49207792", "text": "def compute_cost():\n return (1 / (2 * m)) * sum((hypothesis() - y) ** 2)", "title": "" }, { "docid": "a5410e23593ba4d2bc2f8d9fa82cbd40", "score": "0.49187195", "text": "def hypothese(tour, grille, scoreG, nbCouleur = 6):\n if tour == 0:\n return [0,1,2,3]\n for i in range(nbCouleur ** 4): #Nombre de possibilitรฉ\n hypo = geneCouleur(i, nbCouleur)\n score = 0\n for tr in range(tour - 1):\n score += abs((fitness(evaluer(hypo, grille[tr])) - fitness(scoreG[tr])))\n if score == 0:\n return hypo\n return hypo", "title": "" }, { "docid": "00ddce291a167973fad214b2c586b1de", "score": "0.49157757", "text": "def objective_function_h(force,h,length,E,I):\r\n # Take the absolute value of the force so that negative values are not \r\n # encountered.\r\n force = (force[0])\r\n # get the shape of the ode\r\n x,y,theta,lengthTheory = solve_ode_system(h,force,E,I)\r\n # compute the percentage error squared\r\n error = (100*(length - lengthTheory)/length)**2\r\n \r\n return error", "title": "" }, { "docid": "2e0acd8eebc4ffcb979efd3ec908428e", "score": "0.4913287", "text": "def _score_protein(self):\n for index in self._indices:\n self._get_protein(index)\n\n for ann1,ann2 in self.proteinscore:\n if not self.pep[ann1] or not self.pep[ann2]:\n self.proteinscore[(ann1,ann2)] = 0\n continue\n alignment = []\n match = 0\n mafft_string = '>{0} {1}\\n{2}\\n'.format(ann1, self.cluster[ann1], self.pep[ann1])\n mafft_string += '>{0} {1}\\n{2}\\n'.format(ann2, self.cluster[ann2], self.pep[ann2])\n mafft_command = 'mafft --anysymbol -'\n p = Popen(mafft_command.split(), stdin = PIPE, stdout = PIPE, stderr = PIPE)\n out,err = p.communicate(input = mafft_string)\n out_lines = (x for x in out.split('\\n') if x.strip())\n faiter = (x[1] for x in groupby(out_lines, lambda line: line[0] == '>'))\n for header in faiter:\n header = header.next()[1:].strip()\n seq = ''.join(s.strip() for s in faiter.next())\n alignment.append(seq)\n for pair in zip(alignment[0], alignment[1]):\n if pair[0] == pair[1]:\n match += 1\n self.proteinscore[(ann1, ann2)] = float(match) / len(alignment[0])\n\n return {(ann1,ann2):value for (ann1,ann2),value in self.proteinscore.iteritems() if value == max(self.proteinscore.values())}\n \n #if percentage identity is below 90%, keep the original annatations\n if win.values()[0] < 0.90:\n return {(1,2):False}\n else:\n return win", "title": "" }, { "docid": "566dff60f6201e20b036ed3aa2061c00", "score": "0.49076456", "text": "def heating_value(fuel):\n gas.TP = 298, ct.one_atm\n gas.set_equivalence_ratio(1.0, fuel, 'O2:1.0')\n h1 = gas.enthalpy_mass\n Y_fuel = gas[fuel].Y[0]\n \n # complete combustion products\n Y_products = {'CO2': gas.elemental_mole_fraction('C'),\n 'H2O': 0.5 * gas.elemental_mole_fraction('H'),\n 'N2': 0.5 * gas.elemental_mole_fraction('N')}\n \n gas.TPX = None, None, Y_products\n Y_H2O = gas['H2O'].Y[0]\n h2 = gas.enthalpy_mass\n LHV = -(h2 - h1)/Y_fuel\n HHV = -(h2 - h1 + (h_liquid - h_gas)*Y_H2O)/Y_fuel\n return LHV, HHV", "title": "" }, { "docid": "6fd06541cfa911bd6e8ea2825aa53534", "score": "0.48810878", "text": "def get_problem():\n # Author: Joseph Williams\n\n problem = beluga.optim.Problem('planarHypersonicWithThrust')\n\n # Define independent variables\n problem.independent('t', 's')\n\n rho = 'rho0*exp(-h/H)'\n Cl = '(0.496*alfa + 0.0049)'\n Cd = '(0.4747*alfa^2 + 0.0096*alfa + 0.0007)'\n # T = 'ThrustFunction()'\n D = '(0.5*'+rho+'*v^2*'+Cd+'*Aref)'\n L = '(0.5*'+rho+'*v^2*'+Cl+'*Aref)'\n r = '(re+h)'\n Ft = '(T*cos(alfa) - '+D+')'\n Fn = '(T*sin(alfa) + '+L+')'\n\n # Define equations of motion\n problem.state('h','v*sin(gam)','m') \\\n .state('theta','v*cos(gam)/'+r,'rad') \\\n .state('v',Ft+'/mass - mu*sin(gam)/'+r+'**2','m/s') \\\n .state('gam',Fn+'/(mass*v) + (v/'+r+' - mu/(v*'+r+'^2))*cos(gam)','rad') \\\n .state('mass','mdotf','kg')\n\n # Define controls\n problem.control('alfa','rad')\n\n # Define costs\n problem.cost['terminal'] = Expression('-v^2','m^2/s^2')\n # problem.cost['path'] = Expression('1','s')\n # Define constraints\n problem.constraints().initial('h-h_0','m') \\\n .initial('theta-theta_0','rad') \\\n .initial('v-v_0','m/s') \\\n .initial('mass-mass_0','kg') \\\n .terminal('h-h_f','m') \\\n .terminal('theta-theta_f','rad')\n\n # Define constants\n problem.constant('mu', 3.986e5*1e9, 'm^3/s^2') # Gravitational parameter, m^3/s^2\n problem.constant('rho0', 1.2, 'kg/m^3') # Sea-level atmospheric density, kg/m^3\n problem.constant('H', 7500, 'm') # Scale height for atmosphere of Earth, m\n\n # problem.constant('T',2668932,'kg*m/s^2') # Constant Thrust of Vehcicle, kgm/s^2\n# problem.constant('mdotf',15.5*0.05,'kg/s') # Fuel Mass Flow Rate of Vehicle, kg\n problem.constant('T',0,'kg*m/s^2') # Constant Thrust of Vehcicle, kgm/s^2\n problem.constant('mdotf',0,'kg/s') # Fuel Mass Flow Rate of Vehicle, kg\n\n problem.constant('re',6378000,'m') # Radius of planet, m\n problem.constant('Aref',557.4,'m^2') # Reference area of vehicle, m^2\n problem.constant('rn',1/12*0.3048,'m') # Nose radius, m\n\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=4)\n #problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False)\n\n problem.scale.unit('m','h') \\\n .unit('s','h/v') \\\n .unit('kg','mass') \\\n .unit('rad',1)\n\n # Define quantity (not implemented at present)\n # Is this actually an Expression rather than a Value?\n # problem.quantity = [Value('tanAng','tan(theta)')]\n\n # problem.guess.setup('auto',start=[1000,0,100,-90*pi/180,127005])\n problem.guess.setup('auto',start=[80000,0,5000,-90*pi/180,127005])\n #problem.guess.setup('auto',start=[80000,3.38575809e-21,5000,7.98617365e-02],direction='forward',time_integrate=229.865209,costate_guess =[-1.37514494e+01,3.80852584e+06,-3.26290152e+03,-2.31984720e-14])\n # Figure out nicer way of representing this. Done?\n\n problem.steps.add_step().num_cases(11) \\\n .terminal('h', 0) \\\n # .initial('h',60000)\\\n # .initial('v',5000)\n #.terminal('theta', 10*pi/180)\n problem.steps.add_step().num_cases(11) \\\n .terminal('theta', 10*pi/180)\n\n problem.steps.add_step().num_cases(5) \\\n .const('T', 2668932) \\\n .const('mdotf', 15.5*0.05)\n\n #\n # problem.steps.add_step()\n # .num_cases(3)\n # .terminal('x', 40.0)\n # .terminal('y',-40.0)\n # )\n return problem", "title": "" }, { "docid": "89bb2a361fbd37b2c05a92f17ac7c8d4", "score": "0.48786464", "text": "def _compute_heuristic_cost(self):\r\n # Map tiles to their final coordinates\r\n d = {0: (0,0), 1: (0,1), 2: (0,2),\r\n 3: (1,0), 4: (1,1), 5: (1,2),\r\n 6: (2,0), 7: (2,1), 8: (2,2)}\r\n total_h = 0 # inital heuristic cost\r\n \r\n # add up Manhattan distances for each tile\r\n for r in range(3):\r\n for c in range(3):\r\n if self.puzzle[r][c] != 0:\r\n total_h = total_h + abs(d[self.puzzle[r][c]][0]-r) + abs(d[self.puzzle[r][c]][1]-c)\r\n \r\n self.hcost = total_h", "title": "" }, { "docid": "41ed1a6cac4b397262d594cb7def8eb2", "score": "0.48744744", "text": "def bungalow_cost(self, coordinates):\n\n # set default price of a bungalow\n self.bungalow = 399000\n\n # calculate percentage of extra housing worth per extra square meter space\n self.percentage_bungalow = self.bungalow * 0.04\n\n # make default total price of all bungalows\n self.total_bungalow = self.bungalow_amount * self.bungalow\n\n # retrieve coordinates\n coordinateslist = coordinates\n\n # check the outline of every bungalow for extra free space\n for coordinate in coordinateslist:\n\n single = 1 \n bungalow = 2 \n maison = 3\n \n # free space is calculated by checking the distance between house and its surroundings.\n distance = 4\n\n x_coordinaat = coordinate[0]\n y_coordinaat = coordinate[1]\n \n # check for free space around house until another house is found\n check = True\n while check == True:\n\n # distance between house and its surroundings is increased by one for each run to check for more free space\n # free space around a house is checked on each side (up, down, left and right)\n x = x_coordinaat - distance\n x_ver = x_coordinaat + 11 + distance\n y = y_coordinaat - distance\n y_ver = y_coordinaat + 7 + distance\n\n # reset coordinates when out of boundary, because extra free space is able to 'go over' the boundaries\n if x < 0:\n x = 0 \n\n if x_ver > 160:\n x_ver=160\n\n if y < 0:\n y = 0\n \n if y_ver > 180:\n y_ver = 180\n \n # remove current house from the gridmap\n self.neighbourhood[(y_coordinaat - 3):(y_coordinaat + 10),(x_coordinaat - 3):(x_coordinaat + 14)] = 0\n\n # check for other house in given range of free space\n try: \n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n\n # when no house is found, recalculate total price and add one extra meter of free space\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1 \n\n # check for IndexError to be sure a coordinate does not go out of range\n except IndexError:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1 \n\n # redraw house on the gridmap\n self.neighbourhood[(y_coordinaat - 3):(y_coordinaat + 10),(x_coordinaat - 3):(x_coordinaat + 14)] = 5\n self.neighbourhood[y_coordinaat:(y_coordinaat + 7),x_coordinaat:(x_coordinaat + 11)] = 2\n\n return self.total_bungalow", "title": "" }, { "docid": "cf0f9a27bca85f2f481ce43b5bc2f27e", "score": "0.48703694", "text": "def generate_het(self):\n \n self.hetx_list, self.hety_list = ([] for i in range(2))\n # get the general expression for h in z before plugging in g,z.\n \n # column vectors ax ay for use in matrix A = [ax ay]\n self.ax = Matrix([[0],[0]])\n self.ay = Matrix([[0],[0]])\n \n for j in range(1,self.trunc_gh+1):\n p1 = lib.kProd(j,self.dx)\n p2 = kp(p1,sym.eye(2))\n \n d1 = lib.vec(lib.df(self.NIC1,self.x,j+1))\n d2 = lib.vec(lib.df(self.NIC2,self.x,j+1))\n \n self.ax += (1/math.factorial(j)) * p2*d1\n self.ay += (1/math.factorial(j)) * p2*d2\n \n self.A = sym.zeros(2,2)\n \n self.A[:,0] = self.ax\n self.A[:,1] = self.ay\n \n \n self.z_expansion = Matrix([[self.zx],[self.zy]])\n het = self.A*self.z_expansion\n \n \n # expand all terms\n self.hetx = sym.expand(het[0].subs([(self.dx1,self.gx),(self.dx2,self.gy)]))\n self.hety = sym.expand(het[1].subs([(self.dx1,self.gx),(self.dx2,self.gy)]))\n \n # collect all psi terms into factors of pis^k\n self.hetx_powers = sym.collect(self.hetx,self.psi,evaluate=False)\n self.hety_powers = sym.collect(self.hety,self.psi,evaluate=False)\n \n \n self.hetx_list = []\n self.hety_list = []\n \n counter = 0\n while (counter <= self.trunc_g+1):\n \n # save current term\n self.hetx_list.append(self.hetx_powers[self.psi**counter])\n self.hety_list.append(self.hety_powers[self.psi**counter])\n \n counter += 1\n \n # substitute limit cycle\n for i in range(len(self.ghx_list)):\n self.hetx_list[i] = self.hetx_list[i].subs({self.x1:sym.cos(2*sym.pi*self.t),\n self.x2:sym.sin(2*sym.pi*self.t),\n sym.Indexed('gx',0):s(0),\n sym.Indexed('gy',0):s(0)})\n self.hety_list[i] = self.hety_list[i].subs({self.x1:sym.cos(2*sym.pi*self.t),\n self.x2:sym.sin(2*sym.pi*self.t),\n sym.Indexed('gx',0):s(0),\n sym.Indexed('gy',0):s(0)})", "title": "" }, { "docid": "fe4429869a3d2fb5b1108c293a8d6925", "score": "0.48668933", "text": "def compute_charged_mass(pepmass, z):\n return((pepmass+(PROTON*z)) / z)", "title": "" }, { "docid": "f3ed6b59ddfac7cb027a1f9650e3a325", "score": "0.48629898", "text": "def subgoal_heuristic(problem):\n def h(state):\n costs = []\n for g in problem.goals:\n subgoal_plan = planner(problem, null_heuristic, state, ((g,), ()))\n costs.append(plan_cost(subgoal_plan))\n for g in problem.num_goals:\n subgoal_plan = planner(problem, null_heuristic, state, ((), (g,)))\n costs.append(plan_cost(subgoal_plan))\n return max(costs)\n return h", "title": "" }, { "docid": "a8e4901c2c3e9abb51d7899fbf49f9f4", "score": "0.48623714", "text": "def fitness(self, position):\n # print(position)\n fitness = math.sin(position[0]) # + math.cos(position[1])\n\n return fitness", "title": "" }, { "docid": "c6a6a8d6ef6b7f5f45c4cf7edc8cfba0", "score": "0.48601383", "text": "def calculate_fitness(self):\n dist = distance(self.posXY, TARGET_XY)\n if self.reached_goal:\n self.fitness = 1.0/16.0 + 1000.0/self.step\n else:\n self.fitness = 1.00 / (dist**2)\n return self.fitness", "title": "" }, { "docid": "73f9850277da386c19dfbb9d70254f8f", "score": "0.4842256", "text": "def gpa_maximization(chromosome, queued_vehicles):\n fitness = 0 # initialize fitness to 0\n #print(\"NUEVO CROMOSOMA\")\n for i in range(len(chromosome)):\n # increment fitness by 1 for every matching character\n #print(\"Interseccion:\",i,\" \",chromosome[i])\n fitness += gpa_one_intersection(chromosome[i], queued_vehicles[i])\n return fitness", "title": "" }, { "docid": "d44bedadb4124d74f87e4e5a9aaf7650", "score": "0.4839236", "text": "def eval_func_hamdist(chromosome):\n EcocMatrix, features_used_list = TMConvertor.getMatrixDirectly_and_feature(chromosome)\n classes = gol.get_val(\"classes\")\n dist = 0\n for i in xrange(len(EcocMatrix)):\n for j in xrange(i+1, len(EcocMatrix)):\n dist += distance.hamming(EcocMatrix[i], EcocMatrix[j])\n num = len(classes)*(len(classes)-1)/2\n dist /= num\n return dist", "title": "" }, { "docid": "75a3058401cd30519802899e181f6760", "score": "0.48345113", "text": "def evaluate_fitness( self , creature ):\r\n cScore = 0\r\n for gene in creature.genes:\r\n for codon in gene.codons:\r\n if codon == \"NT\":\r\n cScore += 1\r\n creature.score = cScore", "title": "" }, { "docid": "5f90f0c68dbb46ae9ffe194c40b81cd4", "score": "0.48314306", "text": "def lamarckian_fitness_function(genome):\n individual = ImageClassifier(w1=genome[\"w1\"], w2=genome[\"w2\"])\n fitness = 0\n\n for label, image in labeled_images:\n classification = individual.classify(image)\n if classification == label:\n fitness += 100\n else:\n individual.back_propagate()\n\n final_genome = individual.get_weights()\n\n return fitness, final_genome", "title": "" }, { "docid": "adeba55e69a7668e4afcad55c92fb3bb", "score": "0.482009", "text": "def calc_zpe(self, verbose=False):\n\n if self.Gzpe is None:\n if self.freq is None:\n self.get_vibrations(verbose=verbose)\n\n # Truncate some modes if required\n if self.state_type == 'gas':\n if self.shape is None:\n self.get_atoms()\n ntrunc = self.shape\n elif self.state_type == 'TS' and len(self.i_freq) == 0:\n ntrunc = 1\n else:\n ntrunc = 0\n nfreqs = self.freq.shape[0] - ntrunc\n use_freq = copy.deepcopy(self.freq[0:nfreqs])\n\n self.Gzpe = 0.5 * h * sum(use_freq) * JtoeV", "title": "" }, { "docid": "689f3d35d2afba288ad0b77d94bd7c51", "score": "0.48162013", "text": "def fitness_function(self, query: str) -> float:\n pass", "title": "" }, { "docid": "084536c83459fc607f49fdedfa168b1a", "score": "0.48161736", "text": "def homophily(G,T,n,flagSymmetric):\n \n homophilyLabels=['Homophily','Coleman','Freeman']\n numlinks = np.sum(G, axis=None, dtype=float) #axis=None - sum all elements\n if flagSymmetric:\n numlinks = numlinks/float(2)\n numpossiblelinks = n*(n-1)/2;\n\n numlinks_typetau = np.matmul(np.matmul(T,G),T)\n IT = np.ones(n,dtype=float) - T\n numlinks_crosstype = np.matmul(np.matmul(IT,G),T)\n if flagSymmetric:\n numlinks_typetau = numlinks_typetau/float(2)\n numlinks_crosstype = numlinks_crosstype/float(2)\n\n\n numtypetau = np.sum(T,axis=None,dtype=float)\n proptypetau= numtypetau/float(n)\n density= numlinks/numpossiblelinks\n Enumlinks_crosstype = float(numtypetau*(n-numtypetau)*density)\n\n \n H=numlinks_typetau/numlinks\n C=0\n if proptypetau<1:\n C=(H-proptypetau)/(float(1)-proptypetau)\n F=0\n if Enumlinks_crosstype>0:\n F=(Enumlinks_crosstype - numlinks_crosstype)/Enumlinks_crosstype\n\n return H,C,F", "title": "" }, { "docid": "cfbafa82157e0e2ba212e5a155526bd1", "score": "0.481522", "text": "def post_process_protein(params, protein):\n\n def sequence_length(protein):\n return protein['sequence_length']\n\n def has_tm_helix(protein):\n for program in params['helix_programs']:\n if dict_get(protein, '%s_helices' % program):\n return True\n return False\n\n def has_surface_exposed_loop(protein):\n for program in params['helix_programs']:\n if eval_surface_exposed_loop(\n protein['sequence_length'],\n len(protein['%s_helices' % (program)]),\n protein['%s_outer_loops' % (program)],\n params['terminal_exposed_loop_min'],\n params['internal_exposed_loop_min']):\n return True\n return False\n\n def exposed_loop_extent(protein):\n extents = []\n for program in params['helix_programs']:\n if program + '_helices' in protein:\n extents.append(max_exposed_loop(\n protein['sequence_length'],\n len(protein['%s_helices' % (program)]),\n protein['%s_outer_loops' % (program)],\n params['terminal_exposed_loop_min'],\n params['internal_exposed_loop_min']))\n if extents:\n return max(extents)\n else:\n return 0\n\n terminal_exposed_loop_min = \\\n params['terminal_exposed_loop_min']\n\n is_hmm_profile_match = dict_get(protein, 'hmmsearch')\n is_lipop = dict_get(protein, 'is_lipop')\n if is_lipop:\n i_lipop_cut = protein['lipop_cleave_position']\n is_signalp = dict_get(protein, 'is_signalp')\n if is_signalp:\n i_signalp_cut = protein['signalp_cleave_position']\n\n details = []\n if is_hmm_profile_match:\n details += [\"hmm(%s)\" % \"|\".join(protein['hmmsearch'])]\n if is_lipop:\n details += [\"lipop\"]\n if is_signalp:\n details += [\"signalp\"]\n for program in params['helix_programs']:\n if has_tm_helix(protein):\n n = len(protein['%s_helices' % program])\n details += [program + \"(%d)\" % n]\n\n if is_lipop:\n chop_nterminal_peptide(protein, i_lipop_cut)\n elif is_signalp:\n chop_nterminal_peptide(protein, i_signalp_cut)\n\n if is_hmm_profile_match:\n category = \"PSE-Cellwall\"\n elif has_tm_helix(protein):\n if has_surface_exposed_loop(protein):\n category = \"PSE-Membrane\"\n else:\n category = \"MEMBRANE(non-PSE)\"\n else:\n if is_lipop:\n # whole protein considered outer terminal loop\n if sequence_length(protein) < terminal_exposed_loop_min:\n category = \"LIPOPROTEIN(non-PSE)\"\n else:\n category = \"PSE-Lipoprotein\"\n elif is_signalp:\n category = \"SECRETED\"\n else:\n category = \"CYTOPLASM(non-PSE)\"\n\n if details == []:\n details = [\".\"]\n\n protein['details'] = details\n protein['category'] = category\n if 'CYTOPLASM' not in category and 'SECRETED' not in category:\n protein['loop_extent'] = exposed_loop_extent(protein)\n else:\n protein['loop_extent'] = \".\"\n\n return details, category", "title": "" }, { "docid": "ef0a106b18979c5be48eb064a0fa1107", "score": "0.48145115", "text": "def fitness(self, x):\n i = 0\n for obj, data in self.variables.items():\n for label, params in data.items():\n if obj in [\"Connections\", \"Components\"]:\n for param in params:\n self.input_dict[obj][label][param] = x[i]\n i += 1\n else:\n self.input_dict[obj][label] = x[i]\n i += 1\n\n self.model.solve_model(**self.input_dict)\n f1 = [self.model.get_objective(self.objective)]\n\n cu = self.collect_constraints(\"upper\")\n cl = self.collect_constraints(\"lower\")\n\n return f1 + cu + cl", "title": "" }, { "docid": "8e18ae1334e5e6cbc693cefbed7e8ab0", "score": "0.47915804", "text": "def fitness(parents, weights, values):\n backpack_weights = np.sum(np.multiply(weights, parents), axis=1)\n boolian_weights = backpack_weights > max_weight\n for idx in range(len(backpack_weights)):\n if boolian_weights[idx]:\n items_idx = np.where(parents[idx])[0]\n np.random.shuffle(items_idx)\n i = 0\n while backpack_weights[idx] > max_weight:\n\n parents[idx][items_idx[i]] = 0\n backpack_weights[idx] -= weights[0][items_idx[i]]\n i += 1\n fit = np.sum(np.multiply(values, parents), axis=1)\n fit_sorted = fit.argsort()\n fit = fit[fit_sorted[::-1]]\n parents = parents[fit_sorted[::-1]]\n fitns_eval = np.std(fit[:int(len(fit)*0.8)])\n # store the best backpack itmes in the variable best_element \n global best_elemet, best_score\n if fit[0] > best_score:\n best_score = fit[0]\n best_elemet = parents[0]\n return (parents, fitns_eval)", "title": "" }, { "docid": "6446337c899e81282994f21488cc1246", "score": "0.47893465", "text": "def run(self) -> (typing.List, float):\n population = self.toolbox.population(n=self.population_size)\n hof = tools.HallOfFame(1)\n\n if self.warm_start_path is not None:\n population = self.create_population_from_csv(population)\n\n if self.output_dir is not None:\n halloffame = tools.HallOfFame(maxsize=self.population_size)\n topone = tools.HallOfFame(maxsize=1)\n\n ending_time = time.time() + self.timeout\n self.gen = 0\n while self.gen < self.n_generations and time.time() < ending_time:\n self.gen += 1\n if self.verbose:\n print(\"On generation {}\".format(self.gen))\n\n # create and validate new generation by geneticism\n final_population = self.get_new_generation(population)\n \n # Evaluate the individuals with an invalid fitness\n population = final_population\n invalid_ind = [ind for ind in population if not ind.fitness.valid]\n fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n if self.output_dir is not None:\n halloffame.update(population)\n # it's a list of lists\n individuals = halloffame.items\n # reverse halloffame items since they did it reverse\n individuals.reverse()\n scores = [item.wvalues[0] * self.optimize for item in halloffame.keys]\n if self.verbose:\n print(\"The results from generation {} are:\".format(self.gen))\n self.create_dataset_from_results(individuals, scores, self.output_dir is not None, self.verbose, prefix=\"generation-{}\".format(self.gen))\n halloffame.clear()\n\n if len(population) != 0:\n topone.update(population)\n population = self.toolbox.select(population, k=len(population))\n\n if len(topone) == 0:\n return [], float(\"inf\") if self.minimize else float(\"-inf\")\n\n return topone.items[0], topone.keys[0].wvalues[0] * self.optimize", "title": "" }, { "docid": "be6e67cd770c8969b81c61361f77b341", "score": "0.4780094", "text": "def evaluateHadronicityWithVoting(self, returnCopy=True, votingMethod=None, votingFilter=None):\n\n if votingMethod==None:\n help(MCTree.evaluateHadronicityWithVoting)\n raise ValueError(\"evaluateHadronicityWithVoting() in MCTree class requires a chosen voting method.\")\n elif votingMethod==0:\n # self.vote = lambda g, p : 1\n self.vote = lambda g : 1\n elif votingMethod==1:\n # self.vote = lambda g, p : self.gens[g].p4().P() / self.gens[p].p4().P()\n self.vote = lambda g : self.gens[g].p4().P() #/ self.gens[p].p4().P()\n elif votingMethod==2: #Not a good method, due to changes in angular direction for daughters versus parent particles\n # self.vote = lambda g, p : self.gens[g].pt / self.gens[p].pt\n self.vote = lambda g : self.gens[g].pt #/ self.gens[p].pt\n\n \n tJets = {}\n tGenJets = {}\n tFatJets = {}\n tGenJetAK8s = {}\n bJets = {}\n bGenJets = {}\n bFatJets = {}\n bGenJetAK8s = {}\n WDau1Jets = {}\n WDau1GenJets = {}\n WDau1FatJets = {}\n WDau1GenJetAK8s = {}\n WDau2Jets = {}\n WDau2GenJets = {}\n WDau2FatJets = {}\n WDau2GenJetAK8s = {}\n\n tJetsWeight = {}\n tGenJetsWeight = {}\n tFatJetsWeight = {}\n tGenJetAK8sWeight = {}\n bJetsWeight = {}\n bGenJetsWeight = {}\n bFatJetsWeight = {}\n bGenJetAK8sWeight = {}\n WDau1JetsWeight = {}\n WDau1GenJetsWeight = {}\n WDau1FatJetsWeight = {}\n WDau1GenJetAK8sWeight = {}\n WDau2JetsWeight = {}\n WDau2GenJetsWeight = {}\n WDau2FatJetsWeight = {}\n WDau2GenJetAK8sWeight = {}\n\n tJetsWithVoting = {}\n tGenJetsWithVoting = {}\n tFatJetsWithVoting = {}\n tGenJetAK8sWithVoting = {}\n bJetsWithVoting = {}\n bGenJetsWithVoting = {}\n bFatJetsWithVoting = {}\n bGenJetAK8sWithVoting = {}\n WDau1JetsWithVoting = {}\n WDau1GenJetsWithVoting = {}\n WDau1FatJetsWithVoting = {}\n WDau1GenJetAK8sWithVoting = {}\n WDau2JetsWithVoting = {}\n WDau2GenJetsWithVoting = {}\n WDau2FatJetsWithVoting = {}\n WDau2GenJetAK8sWithVoting = {}\n\n #vote renormalization factors\n self.tb_renorm = {}\n self.tW_dau1_renorm = {}\n self.tW_dau2_renorm = {}\n self.t_first_renorm = {}\n for tidx in self.t_head.values():\n tJets[tidx] = []\n tGenJets[tidx] = []\n tFatJets[tidx] = []\n tGenJetAK8s[tidx] = []\n bJets[tidx] = []\n bGenJets[tidx] = []\n bFatJets[tidx] = []\n bGenJetAK8s[tidx] = []\n WDau1Jets[tidx] = []\n WDau1GenJets[tidx] = []\n WDau1FatJets[tidx] = []\n WDau1GenJetAK8s[tidx] = []\n WDau2Jets[tidx] = []\n WDau2GenJets[tidx] = []\n WDau2FatJets[tidx] = []\n WDau2GenJetAK8s[tidx] = []\n\n tJetsWeight[tidx] = []\n tGenJetsWeight[tidx] = []\n tFatJetsWeight[tidx] = []\n tGenJetAK8sWeight[tidx] = []\n bJetsWeight[tidx] = []\n bGenJetsWeight[tidx] = []\n bFatJetsWeight[tidx] = []\n bGenJetAK8sWeight[tidx] = []\n WDau1JetsWeight[tidx] = []\n WDau1GenJetsWeight[tidx] = []\n WDau1FatJetsWeight[tidx] = []\n WDau1GenJetAK8sWeight[tidx] = []\n WDau2JetsWeight[tidx] = []\n WDau2GenJetsWeight[tidx] = []\n WDau2FatJetsWeight[tidx] = []\n WDau2GenJetAK8sWeight[tidx] = []\n\n tJetsWithVoting[tidx] = {}\n tGenJetsWithVoting[tidx] = {}\n tFatJetsWithVoting[tidx] = {}\n tGenJetAK8sWithVoting[tidx] = {}\n bJetsWithVoting[tidx] = {}\n bGenJetsWithVoting[tidx] = {}\n bFatJetsWithVoting[tidx] = {}\n bGenJetAK8sWithVoting[tidx] = {}\n WDau1JetsWithVoting[tidx] = {}\n WDau1GenJetsWithVoting[tidx] = {}\n WDau1FatJetsWithVoting[tidx] = {}\n WDau1GenJetAK8sWithVoting[tidx] = {}\n WDau2JetsWithVoting[tidx] = {}\n WDau2GenJetsWithVoting[tidx] = {}\n WDau2FatJetsWithVoting[tidx] = {}\n WDau2GenJetAK8sWithVoting[tidx] = {}\n\n #Fill dictionaries with jet indices and value initialized to 0 (will be accumulated votes for the jet)\n #Append jet list of tuples with the jet index and the weight of the vote\n #print(\"b Desc: \" + str(self.tb_desc[tidx]))\n #Renomalization: Fix the non-closure of voting method by accumulating the net weighted votes here, then dividing the accumulation by it\n self.tb_renorm[tidx] = 0\n self.tW_dau1_renorm[tidx] = 0\n self.tW_dau2_renorm[tidx] = 0\n self.t_first_renorm[tidx] = 0\n\n for dnode in self.tb_desc[tidx]:\n # thevote = self.vote(dnode, self.tb_first[tidx])\n thevote = self.vote(dnode)\n self.tb_renorm[tidx] += thevote\n if self.jets:\n #print(self.treeJet[dnode])\n for i in self.treeJet[dnode]:\n bJetsWithVoting[tidx][i] = 0\n bJetsWeight[tidx].append((i, thevote))\n if self.genjets:\n for i in self.treeGenJet[dnode]:\n bGenJetsWithVoting[tidx][i] = 0\n bGenJetsWeight[tidx].append((i, thevote))\n if self.fatjets:\n for i in self.treeFatJet[dnode]:\n bFatJetsWithVoting[tidx][i] = 0\n bFatJetsWeight[tidx].append((i, thevote))\n if self.genfatjets:\n for i in self.treeGenJetAK8[dnode]:\n bGenJetAK8sWithVoting[tidx][i] = 0\n bGenJetAK8sWeight[tidx].append((i, thevote))\n #print(\"W Dau 1 Desc: \" + str(self.tW_dau1_desc[tidx]))\n for dnode in self.tW_dau1_desc[tidx]:\n # thevote = self.vote(dnode, self.tW_dau1_last[tidx])\n thevote = self.vote(dnode)\n self.tW_dau1_renorm[tidx] += thevote\n if self.jets:\n for i in self.treeJet[dnode]:\n WDau1JetsWithVoting[tidx][i] = 0\n WDau1JetsWeight[tidx].append((i, thevote))\n if self.genjets:\n for i in self.treeGenJet[dnode]:\n WDau1GenJetsWithVoting[tidx][i] = 0\n WDau1GenJetsWeight[tidx].append((i, thevote))\n if self.fatjets:\n for i in self.treeFatJet[dnode]:\n WDau1FatJetsWithVoting[tidx][i] = 0\n WDau1FatJetsWeight[tidx].append((i, thevote))\n if self.genfatjets:\n for i in self.treeGenJetAK8[dnode]:\n WDau1GenJetAK8sWithVoting[tidx][i] = 0\n WDau1GenJetAK8sWeight[tidx].append((i, thevote))\n #print(\"W Dau 2 Desc: \" + str(self.tW_dau2_desc[tidx]))\n for dnode in self.tW_dau2_desc[tidx]:\n # thevote = self.vote(dnode, self.tW_dau2_last[tidx])\n thevote = self.vote(dnode)\n self.tW_dau2_renorm[tidx] += thevote\n if self.jets:\n for i in self.treeJet[dnode]:\n WDau2JetsWithVoting[tidx][i] = 0\n WDau2JetsWeight[tidx].append((i, thevote))\n if self.genjets:\n for i in self.treeGenJet[dnode]:\n WDau2GenJetsWithVoting[tidx][i] = 0\n WDau2GenJetsWeight[tidx].append((i, thevote))\n if self.fatjets:\n for i in self.treeFatJet[dnode]:\n WDau2FatJetsWithVoting[tidx][i] = 0\n WDau2FatJetsWeight[tidx].append((i, thevote))\n if self.genfatjets:\n for i in self.treeGenJetAK8[dnode]:\n WDau2GenJetAK8sWithVoting[tidx][i] = 0\n WDau2GenJetAK8sWeight[tidx].append((i, thevote))\n #print(\"t Desc: \" + str(self.t_first_desc[tidx]))\n \n #t_remainder = set(self.t_first_desc[tidx]) - (set(self.tb_first_desc[tidx]) + set(self.tW_dau1_desc[tidx]) + set(self.tW_dau2_desc[tidx]))\n #for dnode in t_remainder:\n for dnode in self.t_first_desc[tidx]:\n # thevote = self.vote(dnode, self.t_first[tidx])\n thevote = self.vote(dnode)\n self.t_first_renorm[tidx] += thevote\n if self.jets:\n for i in self.treeJet[dnode]:\n tJetsWithVoting[tidx][i] = 0\n tJetsWeight[tidx].append((i, thevote))\n if self.genjets:\n for i in self.treeGenJet[dnode]:\n tGenJetsWithVoting[tidx][i] = 0\n tGenJetsWeight[tidx].append((i, thevote))\n if self.fatjets:\n for i in self.treeFatJet[dnode]:\n tFatJetsWithVoting[tidx][i] = 0\n tFatJetsWeight[tidx].append((i, thevote))\n if self.genfatjets:\n for i in self.treeGenJetAK8[dnode]:\n tGenJetAK8sWithVoting[tidx][i] = 0\n tGenJetAK8sWeight[tidx].append((i, thevote))\n\n #Accumulate votes by using the key in the 1st slot of the tuple, and the vote weight in the second slot, with renormalization factor\n for v in tJetsWeight[tidx]:\n tJetsWithVoting[tidx][v[0]] += v[1]/self.t_first_renorm[tidx]\n for v in tGenJetsWeight[tidx]:\n tGenJetsWithVoting[tidx][v[0]] += v[1]/self.t_first_renorm[tidx]\n for v in tFatJetsWeight[tidx]:\n tFatJetsWithVoting[tidx][v[0]] += v[1]/self.t_first_renorm[tidx]\n for v in tGenJetAK8sWeight[tidx]:\n tGenJetAK8sWithVoting[tidx][v[0]] += v[1]/self.t_first_renorm[tidx]\n for v in bJetsWeight[tidx]:\n bJetsWithVoting[tidx][v[0]] += v[1]/self.tb_renorm[tidx]\n for v in bGenJetsWeight[tidx]:\n bGenJetsWithVoting[tidx][v[0]] += v[1]/self.tb_renorm[tidx]\n for v in bFatJetsWeight[tidx]:\n bFatJetsWithVoting[tidx][v[0]] += v[1]/self.tb_renorm[tidx]\n for v in bGenJetAK8sWeight[tidx]:\n bGenJetAK8sWithVoting[tidx][v[0]] += v[1]/self.tb_renorm[tidx]\n for v in WDau1JetsWeight[tidx]:\n WDau1JetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau1_renorm[tidx]\n for v in WDau1GenJetsWeight[tidx]:\n WDau1GenJetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau1_renorm[tidx]\n for v in WDau1FatJetsWeight[tidx]:\n WDau1FatJetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau1_renorm[tidx]\n for v in WDau1GenJetAK8sWeight[tidx]:\n WDau1GenJetAK8sWithVoting[tidx][v[0]] += v[1]/self.tW_dau1_renorm[tidx]\n for v in WDau2JetsWeight[tidx]:\n WDau2JetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau2_renorm[tidx]\n for v in WDau2GenJetsWeight[tidx]:\n WDau2GenJetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau2_renorm[tidx]\n for v in WDau2FatJetsWeight[tidx]:\n WDau2FatJetsWithVoting[tidx][v[0]] += v[1]/self.tW_dau2_renorm[tidx]\n for v in WDau2GenJetAK8sWeight[tidx]:\n WDau2GenJetAK8sWithVoting[tidx][v[0]] += v[1]/self.tW_dau2_renorm[tidx]\n\n #Convert to lists of tuples and sort\n tJets[tidx] = tJetsWithVoting[tidx].items()\n tJets[tidx].sort(key=lambda k : k[1], reverse=True)\n tGenJets[tidx] = tGenJetsWithVoting[tidx].items()\n tGenJets[tidx].sort(key=lambda k : k[1], reverse=True)\n tFatJets[tidx] = tFatJetsWithVoting[tidx].items()\n tFatJets[tidx].sort(key=lambda k : k[1], reverse=True)\n tGenJetAK8s[tidx] = tGenJetAK8sWithVoting[tidx].items()\n tGenJetAK8s[tidx].sort(key=lambda k : k[1], reverse=True)\n bJets[tidx] = bJetsWithVoting[tidx].items()\n bJets[tidx].sort(key=lambda k : k[1], reverse=True)\n bGenJets[tidx] = bGenJetsWithVoting[tidx].items()\n bGenJets[tidx].sort(key=lambda k : k[1], reverse=True)\n bFatJets[tidx] = bFatJetsWithVoting[tidx].items()\n bFatJets[tidx].sort(key=lambda k : k[1], reverse=True)\n bGenJetAK8s[tidx] = bGenJetAK8sWithVoting[tidx].items()\n bGenJetAK8s[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau1Jets[tidx] = WDau1JetsWithVoting[tidx].items()\n WDau1Jets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau1GenJets[tidx] = WDau1GenJetsWithVoting[tidx].items()\n WDau1GenJets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau1FatJets[tidx] = WDau1FatJetsWithVoting[tidx].items()\n WDau1FatJets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau1GenJetAK8s[tidx] = WDau1GenJetAK8sWithVoting[tidx].items()\n WDau1GenJetAK8s[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau2Jets[tidx] = WDau2JetsWithVoting[tidx].items()\n WDau2Jets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau2GenJets[tidx] = WDau2GenJetsWithVoting[tidx].items()\n WDau2GenJets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau2FatJets[tidx] = WDau2FatJetsWithVoting[tidx].items()\n WDau2FatJets[tidx].sort(key=lambda k : k[1], reverse=True)\n WDau2GenJetAK8s[tidx] = WDau2GenJetAK8sWithVoting[tidx].items()\n WDau2GenJetAK8s[tidx].sort(key=lambda k : k[1], reverse=True)\n \n if returnCopy:\n return {'tJets': copy.copy(tJets), 'tGenJets': copy.copy(tGenJets), 'tFatJets': copy.copy(tFatJets),\n 'tGenJetAK8s': copy.copy(tGenJetAK8s), 'bJets': copy.copy(bJets), 'bGenJets': copy.copy(bGenJets),\n 'bFatJets': copy.copy(bFatJets), 'bGenJetAK8s': copy.copy(bGenJetAK8s), 'WDau1Jets': copy.copy(WDau1Jets),\n 'WDau1GenJets': copy.copy(WDau1GenJets), 'WDau1FatJets': copy.copy(WDau1FatJets),\n 'WDau1GenJetAK8s': copy.copy(WDau1GenJetAK8s), 'WDau2Jets': copy.copy(WDau2Jets),\n 'WDau2GenJets': copy.copy(WDau2GenJets), 'WDau2FatJets': copy.copy(WDau2FatJets),\n 'WDau2GenJetAK8s': copy.copy(WDau2GenJetAK8s)\n }\n else:\n return {'tJets': tJets, 'tGenJets': tGenJets, 'tFatJets': tFatJets, 'tGenJetAK8s': tGenJetAK8s,\n 'bJets': bJets, 'bGenJets': bGenJets, 'bFatJets': bFatJets, 'bGenJetAK8s': bGenJetAK8s,\n 'WDau1Jets': WDau1Jets, 'WDau1GenJets': WDau1GenJets, 'WDau1FatJets': WDau1FatJets,\n 'WDau1GenJetAK8s': WDau1GenJetAK8s, 'WDau2Jets': WDau2Jets, 'WDau2GenJets': WDau2GenJets,\n 'WDau2FatJets': WDau2FatJets, 'WDau2GenJetAK8s': WDau2GenJetAK8s\n }", "title": "" }, { "docid": "f3b3b73a938c1373b44c9d385508b923", "score": "0.47761238", "text": "def add_hydrogens(self):\n count = 0\n for residue in self.residues:\n if not isinstance(residue, (aa.Amino, na.Nucleic)):\n continue\n for atomname in residue.reference.map:\n if not atomname.startswith(\"H\"):\n continue\n if residue.has_atom(atomname):\n continue\n if isinstance(residue, aa.CYS) and (\n residue.ss_bonded and atomname == \"HG\"\n ):\n continue\n if hasattr(residue, \"rebuild_tetrahedral\"):\n # If this hydrogen is part of a tetrahedral group,\n # follow a different codepath\n if residue.rebuild_tetrahedral(atomname):\n count += 1\n continue\n else:\n _LOGGER.warning(\n \"Tetrahedral hydrogen reconstruction not available \"\n \"for nucleic acids. Some hydrogens may be missing (if \"\n \"so, this is a bug).\"\n )\n # Otherwise use the standard quatfit methods\n coords = []\n refcoords = []\n refatomcoords = residue.reference.map[atomname].coords\n bondlist = residue.reference.get_nearest_bonds(atomname)\n for bond in bondlist:\n if bond == \"N+1\":\n atom = residue.peptide_n\n elif bond == \"C-1\":\n atom = residue.peptide_c\n else:\n atom = residue.get_atom(bond)\n if atom is None:\n continue\n # Get coordinates, reference coordinates\n coords.append(atom.coords)\n refcoords.append(residue.reference.map[bond].coords)\n # Exit if we have enough atoms\n if len(coords) == 3:\n break\n if len(coords) == 3:\n newcoords = quat.find_coordinates(\n 3, coords, refcoords, refatomcoords\n )\n residue.create_atom(atomname, newcoords)\n count += 1\n else:\n _LOGGER.warning(\n f\"Couldn't rebuild {atomname} in {residue}!\"\n )\n _LOGGER.debug(f\" Added {count} hydrogen atoms.\")", "title": "" }, { "docid": "3ec1b6473628b6d5ab71a6b5347ab236", "score": "0.47716314", "text": "def calcFitness(self, geneSerial, data, D):\n alpha = self.m_Alpha\n beta = self.m_Beta\n # alpha and beta are weight factor\n customers = []\n fitness = 0\n for item in data.CUSTOMERS:\n tmp = Customer()\n tmp.x = copy.deepcopy(item.x)\n tmp.y = copy.deepcopy(item.y)\n tmp.demand = copy.deepcopy(item.demand)\n customers.append(tmp)\n providers = []\n sigmaCost = 0\n sigmaCapacity = 0\n sigmaDemand = 0\n mmd = self.m_D * 1000.0\n for i in range(0, len(geneSerial)):\n tmpProvider = Provider()\n tmpProvider.x = copy.deepcopy(data.PROVIDERS[i].x)\n tmpProvider.y = copy.deepcopy(data.PROVIDERS[i].y)\n tmpProvider.capacity = copy.deepcopy(data.PROVIDERS[i].capacity[geneSerial[i]])\n tmpProvider.cost = copy.deepcopy(data.PROVIDERS[i].cost[geneSerial[i]])\n sigmaCost = sigmaCost + tmpProvider.cost\n sigmaCapacity = sigmaCapacity + tmpProvider.capacity\n providers.append(tmpProvider)\n for item in customers:\n sigmaDemand = sigmaDemand + item.demand\n\n if sigmaCapacity >= sigmaDemand:\n swapchainsolver = SwapChainSolver(providers, customers)\n mmd = swapchainsolver.Solver()\n if mmd > D:\n fitness = -4.0\n else:\n if sigmaCost != 0:\n fitness = float(4.0 / sigmaCost)\n else:\n fitness = 8.0\n else:\n fitness = -8.0\n # print(\"fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand:\",fitness,mmd,sigmaCapacity,sigmaCost,sigmaDemand)\n return math.exp(fitness), mmd, sigmaCapacity, sigmaCost, sigmaDemand", "title": "" }, { "docid": "4137d7755afe53e1cde7a8bf4f84b0cb", "score": "0.47685122", "text": "def _best_hyps(self, hyps, normalize_by_length=False):\n # This length normalization is only effective for the final results.\n if normalize_by_length:\n return sorted(\n hyps,\n key=lambda h: h.prob / len(h.context) ** self.alpha_length_norm,\n reverse=True,\n )\n else:\n return sorted(hyps, key=lambda h: h.prob, reverse=True)", "title": "" }, { "docid": "d13981309314750f4e33e46e8b58b2d7", "score": "0.4767489", "text": "def calculate_perturbation_propensity(protein, source_residues):", "title": "" }, { "docid": "a99265349c29b9fc2c759ffb2fcf96e5", "score": "0.47611284", "text": "def Transform(self,\n pH=constants.DEFAULT_PH,\n pMg=constants.DEFAULT_PMG,\n ionic_strength=constants.DEFAULT_IONIC_STRENGTH):\n\n dG = self.formation_energy\n \n # add the potential related to the pH\n if self.number_of_hydrogens > 0:\n dG += self.number_of_hydrogens * constants.RTlog10 * pH\n \n # add the potential related to the ionic strength\n dG -= 2.91482 * (self.net_charge ** 2 - self.number_of_hydrogens) * \\\n numpy.sqrt(ionic_strength) / (1 + 1.6 * numpy.sqrt(ionic_strength))\n \n # add the potential related to the magnesium ions\n if self.number_of_mgs > 0:\n dG += self.number_of_mgs * \\\n (constants.RTlog10 * pMg - constants.MG_FORMATION_ENERGY)\n\n return dG", "title": "" }, { "docid": "53501ea07042b78f31a2d7aed3782f3b", "score": "0.47577044", "text": "def generationStep(self, percentElite=.00):\n i = 0\n cutoffIndex = int(round(percentElite * self.populationSize))\n eliteCount = 0\n while i < self.populationSize:\n # take a percentage of elite and pass to next gen\n if i < cutoffIndex: \n self.newPopulation[i] = copy.deepcopy(\n self.population[self.sortDict[eliteCount].id])\n eliteCount = eliteCount + 1\n i = i + 1\n continue\n \n chromoDad, chromoMom = self._getParents()\n if (i < self.populationSize-1):\n son = Chromosome(self.fitVector, self.mutationRate, self.beatTime)\n daughter = Chromosome(\n self.fitVector, self.mutationRate, self.beatTime)\n # third arg is ref to offspring obj\n self._mate(chromoDad, chromoMom, [son, daughter]) \n\n self.newPopulation[i] = son\n self.newPopulation[i].mutate()\n self.newPopulation[i].calcFitness()\n self.newPopulation[i+1] = daughter\n self.newPopulation[i+1].mutate()\n self.newPopulation[i+1].calcFitness()\n i = i + 2\n else: # case of an odd sized population, last chromo\n son = Chromosome(self.fitVector, self.mutationRate, self.beatTime)\n # third arg is ref to offspring obj\n self._mate(chromoDad, chromoMom,[son,]) \n self.newPopulation[i] = son\n self.newPopulation[i].mutate() #prop here\n self.newPopulation[i].calcFitness()\n i = i + 1\n \n for i in range(0, self.populationSize):\n self.population[i] = copy.deepcopy(self.newPopulation[i])\n del self.newPopulation[i]\n self._sortPopulation()\n # returns key of best vector in population \n self.bestC = self.sortDict[0].id", "title": "" }, { "docid": "9d04f159633a62f40639d80fca36e657", "score": "0.4754033", "text": "def gen_hess_fun(cpdag, ref_cpdag, num_sample=1, exact=True, total_x = 1, is_tree=False):\n n = cpdag.shape[0]\n def hess_fun(intervention_set, x, e):\n \"\"\"\n estimates the hessian for gred\n \"\"\"\n #sample the intervention given x\n \n dags = mec_size.uniform_sample_dag_plural(cpdag, num_sample, exact=exact)\n hess = np.zeros((n, n))\n for dag in dags:\n computed_val = {}\n #do runs for multiple different samples of x\n for _ in range(total_x):\n S = []\n for s in range(n):\n if e[s] < x[s]:\n S.append(s)\n for i in range(n):\n for j in range(i, n):\n if i == j:\n continue\n S_ij = list({i, j}.union(set(S)))\n S_i = list({i}.union(set(S)) - {j})\n S_j = list({j}.union(set(S)) - {i})\n S_minus = list(set(S) - {i,j}) #the set with both indices removed\n for S_mod in [S_ij, S_i, S_j, S_minus]:\n if np.array(S_mod).tobytes() not in computed_val:\n cpdag_new = orient_from_intervention(dag, cpdag.copy(), intervention_set+[S_mod], is_tree=is_tree)\n computed_val[np.array(S_mod).tobytes()] = cpdag_obj_val(cpdag_new)\n \n hess[i,j] += (computed_val[np.array(S_ij).tobytes()]-computed_val[np.array(S_i).tobytes()]-\n computed_val[np.array(S_j).tobytes()]+computed_val[np.array(S_minus).tobytes()])/ (num_sample*total_x) \n #print(time.time()-time2)\n return hess\n return hess_fun", "title": "" }, { "docid": "4ce5f30e344c9086a5df5e82457183e7", "score": "0.4753058", "text": "def hydrophobic(seq): \r\n return seq.count('V') + seq.count('I') + seq.count('L') + \\\r\n seq.count('M') + seq.count('F') + seq.count('W') + seq.count('C')", "title": "" }, { "docid": "e8c45f71cf5584af9dc32303447a0071", "score": "0.47438708", "text": "def fitness_function(self, query):\n qarr = np.array(list(self.converter(query)))\n bulls = (qarr == self.sifrem).sum()\n fcow = qarr != self.sifrem\n cows = len(set(qarr[fcow]) & set(self.sifrem[fcow]))\n return bulls * self.cbull + cows * self.ccow", "title": "" }, { "docid": "5356fcd6c29af5223d9677093d08a6bc", "score": "0.47396603", "text": "def test_GenotypePhenotypeMap_calculates_phenotypes_with_correct_data(self):\n \n min_val = 0.0\n max_val = 1000.0\n phenotype = \"bigness\"\n \n self.Map.add_phenotype(phenotype,min_val,max_val,\\\n percent_of_sites=1.0)\n \n genome = self.Map.Phenotypes[phenotype][\"max_trait_array\"]\n #The genome should therefore have the max value for this trait\n \n result = self.Map.get_phenotypes(genome) \n\n \n obs = result[\"bigness\"]\n exp = 1000.0\n self.assertEqual(obs,exp)", "title": "" }, { "docid": "898f84b009e902e554495ad9a7cb9957", "score": "0.47391254", "text": "def costheta(self):\n return self.__vector3d.costheta()", "title": "" }, { "docid": "032a6519e01866b9cea04aa2a4541293", "score": "0.47301444", "text": "def fs_chordwise_area_distribution(flying_surface,type,numpnts = 30):\n pt1 = []; pt2 = []; points = []; t_c_x =[] ; t_c = []; c = []\n for i in range(len(flying_surface[\"Airfoil Leading-Edge X Location\"])):\n pt1.append(flying_surface.loc[i,\"Airfoil Leading-Edge X Location\"])\n pt2.append(pt1[i]+flying_surface.loc[i,\"Airfoil Chord Length\"])\n t_c_x.append(np.asarray(flying_surface.loc[i,\"Airfoil Thickness Function X/c Values\"]))\n t_c.append(np.asarray(flying_surface.loc[i,\"Airfoil Thickness Function Thickness/c Values\"]))\n c.append(np.asarray(flying_surface.loc[i,\"Airfoil Chord Length\"]))\n points.append(pt1[i])\n points.append(pt2[i])\n points.sort()\n y = max(flying_surface[\"Airfoil Leading-Edge Y Location\"])-min(flying_surface[\"Airfoil Leading-Edge Y Location\"])\n ys = np.linspace(0.0,y,numpnts)\n y_db = []\n all_points = np.linspace(min(points),max(points),numpnts)\n for y_ in ys:\n t_db = []\n for j in range(len(all_points)):\n le_pnt_x = np.interp(y_,[0.0,y],[pt1[0],pt1[-1]])\n te_pnt_x = np.interp(y_,[0.0,y],[pt2[0],pt2[-1]])\n if all_points[j] <= le_pnt_x or all_points[j] >= te_pnt_x: t_db.append(0.0)\n else:\n chord = np.interp(y_,[0.0,y],[c[0],c[1]])\n chord_percentage = (all_points[j]-le_pnt_x)/chord\n t_root = np.interp(chord_percentage,t_c_x[0][::-1],t_c[0][::-1])*c[0]\n t_tip = np.interp(chord_percentage,t_c_x[-1][::-1],t_c[-1][::-1])*c[-1]\n t_db.append(np.interp(y_,[0.0,y],[t_root,t_tip]))\n y_db.append(t_db)\n y_db = np.array(y_db)\n if type == \"Vertical Tail\": symmetry = 1.0\n else: symmetry = 2.0\n trapz_db = []\n for k in range(len(all_points)):\n trapz_db.append(symmetry*np.trapz(y_db[:,k],ys))\n trapz_db = np.array(trapz_db)\n return [all_points, trapz_db]", "title": "" }, { "docid": "c11e8a0fdf6e112a9b8e6a8b2ec1952c", "score": "0.47283125", "text": "def classifyGraham(message, kind, prior = 0.5, c = 3.7e-4):\n \n msg_terms = get_words(message)\n p_ham = p_hoax = 1\n n = 0\n for term in msg_terms:\n hoax, ham = getProbability(term)\n p_hoax *= hoax\n p_ham *= ham\n n+=1\n # print(p_ham)\n if (p_hoax == 0) and (p_ham==0):\n prob_hoax = prob_ham = 0\n else :\n # modification tim peter\n p_hoax_all = totalHoax/(totalHoax+totalHam)\n p_ham_all = totalHam/(totalHoax+totalHam)\n #print(\"p_ham_all = \", totalHam, \"/(\", totalHoax , \"+\", totalHam)\n # devide = ((p_hoax_all**(1-n) * p_hoax) + (p_ham_all**(1-n) * p_ham))\n devide = ((p_hoax) + (p_ham))\n # prob_hoax = Decimal((p_hoax_all**(1-n) * p_hoax) / devide)\n prob_hoax = Decimal( p_hoax / devide)\n # print(\"phoax_all** = \", p_hoax_all**(1-n), \" p_hoax = \", p_hoax, \" p_ham_all = \", (p_ham_all**(1-n)), \" p_ham = \" , p_ham)\n # prob_ham = Decimal((p_ham_all**(1-n) * p_ham) / devide)\n prob_ham = Decimal(p_ham / devide)\n # print(\"prob_hoax = \", (p_hoax_all**(1-n) * p_hoax), \" prob_ham = \", (p_ham_all**(1-n) * p_ham), \" devide = \", devide)\n print(\"PROBALITY : \", prob_hoax, \" - \", prob_ham)\n return prob_hoax, prob_ham", "title": "" }, { "docid": "228efb9d1a7fdd9607a1f90138dd311a", "score": "0.47250277", "text": "def boltzmannq(state, fitness, temperature=1.):\n exploitation = (1. / temperature) * replicator(state, fitness)\n exploration = (np.log(state) - state.dot(np.log(state).transpose()))\n return exploitation - state * exploration", "title": "" } ]
d5903c89d6dfcc07cfe76673180a10a9
Test deleting a user account without a password key
[ { "docid": "c59feb9eaa1d7a89cbd9585b060d0535", "score": "0.8261928", "text": "def test_deleting_account_without_password_key(self):\n self.create_user()\n self.create_category()\n self.create_recipe()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.delete('/auth/delete-account',\n content_type='application/json',\n headers=headers)\n reply = json.loads(response.data.decode())\n self.assertEqual(reply['Error'], 'Password is missing')", "title": "" } ]
[ { "docid": "ce8c332843fdef45824dd2dfebf1fc0c", "score": "0.81951886", "text": "def test_deleting_account_with_empty_password_value(self):\n self.create_user()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.delete('/auth/delete-account',\n content_type='application/json',\n headers=headers,\n data=json.dumps(\n dict(password='')))\n reply = json.loads(response.data.decode())\n self.assertEqual(reply['Error'], 'password is empty')", "title": "" }, { "docid": "9be521abbd440b09598301576ff8d5a5", "score": "0.811218", "text": "def test_delete_account(self):\n pass", "title": "" }, { "docid": "6cf613d5830e8448149f2b10e6e9226e", "score": "0.80213684", "text": "def test_delete_user(self):\n pass", "title": "" }, { "docid": "6cf613d5830e8448149f2b10e6e9226e", "score": "0.80213684", "text": "def test_delete_user(self):\n pass", "title": "" }, { "docid": "c60eeb56da95c815b82f8cf40ccfbe67", "score": "0.79991794", "text": "def test_delete_user_using_delete(self):\n pass", "title": "" }, { "docid": "4386cdebfcefccde1442ac7ffc80ad4e", "score": "0.79756904", "text": "def test_for_delete_non_existing_user(self):\n self.usr.delete_account(1)\n result = self.usr.delete_account('')\n self.assertEqual({'status': 'error', 'message': 'Sorry, could not delete User'}, result)", "title": "" }, { "docid": "8decaca5657eec16eb440ef3dff2f4af", "score": "0.7819363", "text": "def test_deleting_account_with_wrong_password(self):\n self.create_user()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.delete('/auth/delete-account',\n content_type='application/json',\n headers=headers,\n data=json.dumps(\n dict(password='p')))\n reply = json.loads(response.data.decode())\n self.assertEqual(reply['Error'], 'Incorrect password')", "title": "" }, { "docid": "15b21c41b778cf145948d4b88afaa546", "score": "0.77189225", "text": "def test_delete_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.filter(email=sample_user.email).count() == 1\n password_data = {\"password\": \"testpassword\"}\n\n response = client.post(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_200_OK\n assert User.objects.filter(email=sample_user.email).count() == 0", "title": "" }, { "docid": "860d1d72ac8ac207927724a59f1fbf20", "score": "0.7715476", "text": "def test_deleting_account_successfully(self):\n self.create_user()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.delete('/auth/delete-account',\n content_type='application/json',\n headers=headers,\n data=json.dumps(\n dict(password=self.test_user_password\n )))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "223485a24ca5b8788a85c25fba565569", "score": "0.7682984", "text": "def test_delete_identity_credentials(self):\n pass", "title": "" }, { "docid": "42606498a10bfbc49ace5b70fd66b8b7", "score": "0.76620036", "text": "def test_delete_user(self):", "title": "" }, { "docid": "05349124aadf655afdd413dee730cdc9", "score": "0.76371795", "text": "def test_delete_user(self):\n user = MyUser.objects.get(username='testuser')\n response = self.client.delete(\"/api/v1/users/\", user, content_type='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.client.login(**self.credentials)\n response = self.client.delete(\"/api/v1/users/\", user, content_type='json')\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "title": "" }, { "docid": "02c58dad0ba5d0ab0314fb67b908a353", "score": "0.7622989", "text": "def test_change_password_fail_delete(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n response = client.delete(reverse(\"user-change-password\"), format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED", "title": "" }, { "docid": "97aad62ff241932b33220b421899218a", "score": "0.7619505", "text": "def test_delete_user_fail_while_logged_out(sample_user):\n\n client = APIClient()\n\n assert User.objects.all().count() == 1\n password_data = {\"password\": \"testpassword\"}\n\n response = client.post(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "83af908ed10dfa4115de37a424bc0ec1", "score": "0.76193917", "text": "def test_delete(self):\r\n\r\n # Setup\r\n self.user_manager.create_user('doomed')\r\n\r\n # Test\r\n status, body = self.delete('/v2/users/doomed/')\r\n\r\n # Verify\r\n self.assertEqual(200, status)\r\n\r\n user = User.get_collection().find_one({'login' : 'doomed'})\r\n self.assertTrue(user is None)", "title": "" }, { "docid": "578c562deafb4089599ea8314e9ca569", "score": "0.7599824", "text": "def test_delete_user_fail_short_password(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.filter(email=sample_user.email).count() == 1\n password_data = {\"password\": \"a\"}\n\n response = client.post(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert User.objects.filter(email=sample_user.email).count() == 1", "title": "" }, { "docid": "c5cd80983abebdf2ea73b5421be59720", "score": "0.75990146", "text": "def test_delete_user_fail_delete(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.all().count() == 1\n\n response = client.delete(reverse(\"user-delete-user\"), format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "62d9732eca100d8140a04f69058b8ad0", "score": "0.7597292", "text": "def test_cannot_delete_account(self):\n self.client.force_login(self.user)\n response = self.client.delete(f'/users/{self.user.username}/')\n self.assertEqual(405, response.status_code)", "title": "" }, { "docid": "37f0e7d53eeac942d37958649245e750", "score": "0.757454", "text": "def test_user_delete(self):\n self.new_user.save_user()\n test_user = User(\"wesley\", \"mutwiri\", \"[email protected]\", 33126755)\n test_user.save_user()\n self.new_user.delete_user() # to delete a credentials object\n self.assertEqual(len(User.user_list), 1)", "title": "" }, { "docid": "d4aed48c1d6c8f0f8256f183ae0c4bf0", "score": "0.75678504", "text": "def test_users_delete(self):\n with app.app_context():\n User.create('todelete', '[email protected]', 'password')\n db.session.commit()\n status, data = self.delete('{}/{}'.format(USERS_URL, 'todelete'))\n self.assertEqual(status, 204)", "title": "" }, { "docid": "2ba9c10147f13ef5dffb44a9cf1ea64c", "score": "0.7546728", "text": "def test_delete_user(self):\n user = get_existing_user()\n\n self.delete_user(user.user_id)\n self.get_user(user.user_id, statuscode=statuscode.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "8b7f2c843ead2366fe2da065f218414e", "score": "0.7534797", "text": "def test_delete_user_fail_wrong_password(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.filter(email=sample_user.email).count() == 1\n password_data = {\"password\": \"wrongpassword\"}\n\n response = client.post(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert User.objects.filter(email=sample_user.email).count() == 1", "title": "" }, { "docid": "e120fa0b25b4fef04d6ad5859181b700", "score": "0.7516004", "text": "def test_delete_user_fail_wrong_data(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.all().count() == 1\n\n password_data = {\"email\": \"[email protected]\"}\n response = client.post(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "f29f42883cf8b404e686caa906cbe60b", "score": "0.7437611", "text": "def test_user_tsp_delete(self):\n pass", "title": "" }, { "docid": "b9a09185b3b974802c72a725a74fca23", "score": "0.7416217", "text": "def test_delete_nonexisting_user(self):\n user_id = get_uuid()\n\n response = self.delete_user(user_id, statuscode=statuscode.HTTP_404_NOT_FOUND)\n assert response.json()[\"identifier\"] == user_id", "title": "" }, { "docid": "934f8c0f8de5b0fb866ffb1019d36649", "score": "0.7408929", "text": "def test_delete_user_fail_put(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.all().count() == 1\n password_data = {\"password\": \"testpassword\"}\n\n response = client.put(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "98db4e3701f8c7c2be4315f44e02e97f", "score": "0.7382731", "text": "def test_deleteUser(self):\n response = self.client.open(\n '/user/{username}'.format(username=\"string\"),\n method='DELETE',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "9bb1efecfc282a9b6dd7b9346f1d9de9", "score": "0.7379028", "text": "def test_delete_credential(self):\n self.new_credential.save_details()\n test_credential = Credentials(\"Facebook\",\"Kingsleymuturi\",\"$Any!Pass0*\")\n test_credential.save_details()\n\n self.new_credential.delete_credentials()\n self.assertEqual(len(Credentials.credentials_list),1)", "title": "" }, { "docid": "2ca1729cc0649b494597d00003101bf1", "score": "0.7378339", "text": "def test_delete_user(self):\n unwanted_user = User(roles=[default_role])\n del unwanted_user\n assert 'unwanted_user' not in dir()", "title": "" }, { "docid": "94ae11cd827761ecf91cf03bb72dcd46", "score": "0.735086", "text": "def test_delete_user(self):\n models.User.get(username=\"testUsername\").delete_instance()\n with self.assertRaises(Exception):\n models.User.get(username=\"testUsername\").delete_instance()", "title": "" }, { "docid": "d5d0994249cf6c3c041a7e69d9650b47", "score": "0.7349374", "text": "def test_delete_password(self):\n self.new_password.save_password()\n test_password = Password(\"Test\",\"user\",\"email\",\"password\")\n test_password.save_password()\n self.new_password.delete_password()\n self.assertEqual(len(Password.password_list),1)", "title": "" }, { "docid": "2c4882598a64bd8f327165ae3ebe568d", "score": "0.7348258", "text": "def test_delete_user(self):\n\n self.app.delete_user(self.username)\n dir = os.path.split(self.cwd)[0]\n with open(dir + \"/src/users.json\", \"r\") as userfile:\n users = json.load(userfile)\n user_exists = True if self.username in users else False\n self.assertFalse(user_exists)", "title": "" }, { "docid": "623358a191b86164b84bc7c1d7f08dac", "score": "0.7342638", "text": "def test_delete_user(self):\n self.new_user.save_user()\n testing_user = User(\"Kipyegon\", \"PyegonPtalz#75\")\n testing_user.save_user()\n\n self.new_user.delete_user()\n self.assertEqual(len(User.users_list), 1)", "title": "" }, { "docid": "8cb830f81dd1bf5d0019938231707d12", "score": "0.73225504", "text": "def test_delete_user(self):\n print('(' + self.test_delete_user.__name__ + ')',\n self.test_delete_user.__doc__)\n resp = self.connection.delete_user(USER1_NICKNAME)\n self.assertTrue(resp)\n # Check that the users has been really deleted through a get\n resp2 = self.connection.get_user(USER1_NICKNAME)\n self.assertIsNone(resp2)", "title": "" }, { "docid": "054f549712958e98552343bda3c87290", "score": "0.73208517", "text": "def test_delete_xapi_credential(self):\n pass", "title": "" }, { "docid": "7a858d96bdc28db64aab25b6f879d593", "score": "0.7314366", "text": "def test_3_delete_user(self):\n\n self.add_user(\"Another\", \"Person\")\n delete_res = self.client.get(\"/users/2/delete\")\n\n self.assertEqual(delete_res.status_code, 302)\n self.assertEqual(User.query.filter_by(id=2).first(), None)", "title": "" }, { "docid": "e6565d5505f825a58bc7460dd39f2a5b", "score": "0.7309137", "text": "def test_delete_user_no_login(self):\n with self.client as c:\n resp = c.get(f'/userdetail/{self.testuser.id}/delete', follow_redirects=True)\n html = resp.get_data(as_text=True)\n self.assertIn('Welcome to Rowable', html)", "title": "" }, { "docid": "a70947227156b963fe6d39ccb0721966", "score": "0.72509134", "text": "def test_delete_user(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"users/{self.user_id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Chris Hemsworth\", html)", "title": "" }, { "docid": "ba04a94ab1cf771e8faac1c2e560c92e", "score": "0.7247352", "text": "def test_delete_user(self):\n \n self.new_user.save_user()\n test_user = User(\"Facebook\",\"Bihawa Mohamed\",\"65432*\")\n test_user.save_user()\n \n self.new_user.delete_user()\n self.assertEqual(len(User.user_list),1)", "title": "" }, { "docid": "0caf7e6102ca322e889e8fe3932eaf2e", "score": "0.7241279", "text": "def test_user_user_uuid_delete(self):\n self.test_user_post()\n ret =self.api.user_user_uuid_delete(user_uuid=self.users[0])\n del self.users[0]\n assert (ret.success)\n pass", "title": "" }, { "docid": "2ce4194932235923988abf3d25dd8536", "score": "0.72259647", "text": "def test_deleteuser(self):\n response = self.client.open(\n '/users/{userId}'.format(userId='userId_example'),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "84e7e67d18429a7297f4f3f6ae166a70", "score": "0.7210574", "text": "def test_delete_user_fail_get(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.all().count() == 1\n\n response = client.get(reverse(\"user-delete-user\"), format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "78eaa153ee51ce8b60a21f21f2b537ea", "score": "0.7194363", "text": "def test_delete_credentials(self):\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"status\",\"clinton\",\"454545\",\"[email protected]\")\n test_credentials.save_credentials()\n\n self.new_credentials.delete_credentials()\n self.assertEqual(len(Credentials.credentials_list),1)", "title": "" }, { "docid": "31968d68b12ed6dce6e534c4589ffd8d", "score": "0.71910864", "text": "def test_remove_user(self):\n # Remove the user to clean up the database and allow the test to be run multiple times\n message = database.remove_user(self.username, self.password)\n\n self.l.write_to_log(\n f\"{self.l.get_datetime_string()} - function tested: database.remove_user, function output: {message}, \"\n f\"desired output: Success\")\n\n self.assertEqual('Success', message)\n\n # Make sure the user is gone\n d, message = database.login(self.username, self.password)\n self.assertEqual('User does not exist', message)", "title": "" }, { "docid": "3695eef0a74d87723a851b9bc39c2d49", "score": "0.71883434", "text": "def test_delete_user_protect(self):\n self.assertRaises(ProtectedError, self.user.delete)", "title": "" }, { "docid": "6ac8b7f636ccfcb6ef70f095b45dfdc0", "score": "0.7187184", "text": "def test_delete_user(self):\r\n\r\n with app.test_client() as client:\r\n with client.session_transaction() as change_session:\r\n # simulating user login\r\n change_session['username'] = \"jlbrem\"\r\n resp = client.post(\"/users/jlbrem/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True) \r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Create Your Account\", html)\r\n self.assertIn(\"User deleted!\", html)\r\n self.assertIsNone(Feedback.query.get(self.feedback.id))\r\n change_session.pop(\"username\")", "title": "" }, { "docid": "37a566f8bdd11c3c8fc38aafd9b99eb4", "score": "0.71652466", "text": "def test_delete_user_fail_patch(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n assert User.objects.all().count() == 1\n password_data = {\"password\": \"testpassword\"}\n\n response = client.patch(reverse(\"user-delete-user\"), password_data, format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert User.objects.all().count() == 1", "title": "" }, { "docid": "ed3a73f660e44e651ff3492287385ff7", "score": "0.714482", "text": "def test_delete_user(self):\n\n self.new_user.save_user()\n test_user = User(\"Trump\", \"1234\")\n test_user.save_user()\n\n self.new_user.delete_user()\n self.assertEqual(len(User.user_list), 1)", "title": "" }, { "docid": "9070a79ccd17d24f64d86d7b41b670c0", "score": "0.71330756", "text": "def test_deactive_user(self):\n user = self.user\n self.client.get(reverse('user:security'), {\"supprimer\": user})\n user_login = self.client.login(username=user, password=\"123456\")\n self.assertFalse(user_login)", "title": "" }, { "docid": "c01e42897832ddd0b3424af464730239", "score": "0.71303135", "text": "def test_delete_privilege(self):\n pass", "title": "" }, { "docid": "f2e1cba782dbd26edfd828732965fa9d", "score": "0.71288675", "text": "async def test_user_delete(client: AsyncClient) -> None:\n await add_empty_user()\n email = '[email protected]'\n auth = await auth_headers(client, email)\n resp = await client.get('/users/me', headers=auth)\n assert resp.status_code == 200\n data = resp.json()\n assert data['email'] == email\n # Delete user\n resp = await client.delete('/users/me', headers=auth)\n assert resp.status_code == 204\n # Check deletion\n resp = await client.get('/users/me', headers=auth)\n assert resp.status_code == 404", "title": "" }, { "docid": "df49197e6d2e9e5fbeb75a84ca77a9c9", "score": "0.7118713", "text": "def tearDown(self):\n self.user.delete()", "title": "" }, { "docid": "df49197e6d2e9e5fbeb75a84ca77a9c9", "score": "0.7118713", "text": "def tearDown(self):\n self.user.delete()", "title": "" }, { "docid": "d3249ce69fb4a14d59b01ea718b35ad9", "score": "0.7078817", "text": "def test_remove_user_from_privilege(self):\n pass", "title": "" }, { "docid": "24c0fde0af648c4e380481d4515ea0bb", "score": "0.7064224", "text": "def test_valid_delete(self):\n\n self.test_valid_create()\n data = query_admin(\"users\")\n user_count = len(data[\"users\"])\n delete_id = None\n \n for user in data[\"users\"]:\n if user[\"screen_name\"] == \"testguy1\":\n delete_id = user[\"id\"]\n break\n\n self.assertTrue(delete_id is not None, \"delete_id should be a value.\")\n\n url = self.base_urls[yp.API_DELETE]\n api = ApiHandler()\n resp = api.fetch(url, None, {\"id\" : delete_id, \"code\" : 58780932341})\n\n self.assertTrue(200 == resp.code,\n \"Received an invalid code: %d\" % resp.code)\n \n new_count = len(query_admin(\"users\")[\"users\"])\n \n self.assertTrue(user_count - 1 == new_count, \"User was not deleted.\")", "title": "" }, { "docid": "90e3cd67acb5bf152b7ec680853cf085", "score": "0.7061282", "text": "def account_user_del(context, test_type):\n logger.info(\"account_user_del | starting\")\n acct = fake_account_with_user()\n account_add(\n context, acct[\"account_name\"], acct[\"email\"], test_type=\"positive\", log=False\n )\n account_user_add(\n context,\n acct[\"account_name\"],\n acct[\"user\"],\n acct[\"passw\"],\n test_type=\"positive\",\n log=False,\n )\n command = assemble_command(\n context,\n \" account user del --account {0} {1}\".format(\n acct[\"account_name\"], acct[\"user\"]\n ),\n )\n logger.debug(\"account_user_del | running command {0}\".format(command))\n try:\n # as long as this doesn't throw an exception or return 4xx, we're ok\n subprocess.run(command.split(), check=True, stdout=subprocess.PIPE)\n log_results_simple(\n \"ok\",\n \"ok\",\n \"positive\",\n \"account_user_del\",\n \"user {0} deleted from account {1}\".format(\n acct[\"user\"], acct[\"account_name\"]\n ),\n )\n logger.info(\"account_user_del | finished\")\n except Exception as e:\n log_explicit_failure(\n test_type, \"account_user_del\", \"failed to del user {0}\".format(acct[\"user\"])\n )\n logger.error(\"account_user_del | error calling anchore-cli: {0}\".format(e))", "title": "" }, { "docid": "58ef166505c1e2fba0feb9fda7712f46", "score": "0.70570886", "text": "def test_register_user_fail_delete():\n\n client = APIClient()\n\n response = client.delete(reverse(\"user-register\"), format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED", "title": "" }, { "docid": "3b07de5a8df7e4dc606ac7e0cf6b5ccd", "score": "0.70570743", "text": "def test_database_project_instance_credential_delete(self):\n pass", "title": "" }, { "docid": "29b802c2b03fb6fca976b55f9724adb4", "score": "0.7040989", "text": "def test_client_bank_account_delete(self):\n pass", "title": "" }, { "docid": "b3cba6a8e815390b24ee3e747dc9f6ec", "score": "0.70308566", "text": "def test_delete_with_auth(self):\n pk = User.objects.get(username='spam').pk\n token = User.objects.get(username='egg').auth_token.key\n resp = self.client.delete(\n reverse('crud:detail', args=(pk,)),\n **{'HTTP_AUTHORIZATION': f\"Token {token}\"}\n )\n\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "3e94db9ee7152abe1be1066c910447b7", "score": "0.7026746", "text": "def test_remove_user_from_table_with_user(self):\n pass", "title": "" }, { "docid": "dd87dddbf1982a44501f025aa8dfd589", "score": "0.7024675", "text": "def delete_user(self, user_name: str) -> bool:", "title": "" }, { "docid": "bdb35cd487439cd22d26e95c469c2819", "score": "0.70160556", "text": "def test_delete_identity(self):\n pass", "title": "" }, { "docid": "c17b2d64d43ce6b36bbbfac61205a0e1", "score": "0.69991064", "text": "def test_delete_user():\n response = client.delete(\"/coach/delete/1\")\n assert response.status_code == 200\n assert response.json() == \"L'utilisateur 1 a รฉtรฉ supprimรฉ\"", "title": "" }, { "docid": "96cd00c4bdb70da9106686e3a26f1a1e", "score": "0.6994739", "text": "def test_delete(self):\r\n\r\n # Setup\r\n self.role_manager.create_role(role_id = 'role-1')\r\n self.user_manager.create_user(login = 'user-1', roles = ['role-1'])\r\n\r\n\r\n # Test\r\n status, body = self.delete('/v2/roles/role-1/users/user-1/')\r\n\r\n # Verify\r\n self.assertEqual(200, status)\r\n\r\n user = User.get_collection().find_one({'login' : 'user-1'})\r\n self.assertFalse('role-1' in user['roles'])", "title": "" }, { "docid": "c6ab58d2afa163fa9128c5a8b30ab234", "score": "0.6978858", "text": "def test_self_delete(self):\n user = User.objects.get(username='egg')\n token, pk = user.auth_token.key, user.pk\n resp = self.client.delete(\n reverse('crud:detail', args=(pk,)),\n **{'HTTP_AUTHORIZATION': f\"Token {token}\"}\n )\n self.assertEqual(resp.status_code, status.HTTP_409_CONFLICT)", "title": "" }, { "docid": "c0c6a4d4a8e4c44dfe5409bab7b53bbc", "score": "0.69768137", "text": "def test_other_user_delete(self):\n self.client.force_login(User.objects.create_user('other_user', '[email protected]', 'password'))\n response = self.client.delete(reverse('blogs:delete', args=(self.post.pk,)))\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "3d171a8cd9631d8502f9d7bf997e2788", "score": "0.6969309", "text": "def deletes_user(user):\n user.delete_user()", "title": "" }, { "docid": "dba7815095adba041075f01c0081c1b8", "score": "0.69518363", "text": "def test_deleting_user(self):\r\n response = self.app.delete(\r\n \"/api/1.0/users/{}\".format(self.valid_users[0]),\r\n headers={\r\n 'User': self.valid_users[0],\r\n 'Authorization': self.access_token\r\n }\r\n )\r\n\r\n user = Users.query.filter_by(UserID=self.valid_users[0]).first()\r\n access_mapping = UsersAccessMapping.query.filter_by(UserID=self.valid_users[0]).first()\r\n access_tokens = UsersAccessTokens.query.filter_by(UserID=self.valid_users[0]).all()\r\n\r\n self.assertEqual(204, response.status_code)\r\n self.assertEqual(\"\", response.data.decode())\r\n\r\n self.assertEqual(None, user)\r\n self.assertEqual(None, access_mapping)\r\n self.assertEqual([], access_tokens)", "title": "" }, { "docid": "8e126b1b065ccdc1ea186ffcaa8863d8", "score": "0.6940705", "text": "def test_delete_account(self):\n # Register test users\n self.register_test_users()\n\n # Try and delete their accounts without logging in (should fail)\n for account in self.users:\n response = self.client.delete('/accounts/{}/'.format(account.pk))\n self.assertNotEqual(response.status_code, status.HTTP_204_NO_CONTENT, 'Was able to delete accounts without logging in')\n\n for i in range(N_TEST_USERS):\n # login as user and try to delete all accounts\n self.login(USERNAMES[i])\n # Try and delete their accounts (we have to start indexing at i because the users before i have been deleted)\n for account in self.users[i:]:\n # If account belongs to the user that's logged it, delete should be successful\n if account == self.users[i]:\n response = self.client.delete('/accounts/{}/'.format(account.pk))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, 'Test user was not able to delete it\\'s own account')\n\n # Otherwise delete should fail\n else:\n response = self.client.delete('/accounts/{}/'.format(account.pk))\n self.assertNotEqual(response.status_code, status.HTTP_204_NO_CONTENT, 'Test user was able to delete an account that did not belong to it')\n self.logout()\n\n # Check that admins can delete any account\n # Register test users\n self.register_test_users()\n\n self.login(ADMIN_USERNAME)\n\n # Try and delete accounts as admin (should succeed in all cases)\n for account in self.users:\n response = self.client.delete('/accounts/{}/'.format(account.pk))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, 'Admin was not able to delete an account')\n\n self.logout()", "title": "" }, { "docid": "5785d1c1136ec73908dfc3531576b50d", "score": "0.6918495", "text": "def test_delete_user_successful(self):\n url = detail_url(self.user.id)\n res = self.client.delete(url)\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.user.refresh_from_db()\n\n self.assertFalse(self.user.is_active)", "title": "" }, { "docid": "509244d62be3262cabaa50a0fb55d1c2", "score": "0.691098", "text": "def test_delete_user_by_id():\n syn = syncope.Syncope(syncope_url=\"http://192.168.1.145:9080\", username=\"admin\", password=\"password\")\n user_data = syn.get_user_by_name(\"wdijkerman\")\n user_id = int(user_data['id'])\n print str(user_id)\n assert syn.delete_user_by_id(user_id) == True", "title": "" }, { "docid": "31c9ba85abd6e1001399b2c6ad553e89", "score": "0.68981916", "text": "def test_delete_existant_table_with_user(self):\n pass", "title": "" }, { "docid": "b3ebed6eb60c780a74ec8080eb4e82c0", "score": "0.68862504", "text": "def tearDown(self):\n try:\n models.User.delete().execute()\n except:\n pass", "title": "" }, { "docid": "b11ae0e158616bcc111c2fd528a15c0c", "score": "0.6886152", "text": "def test_delete_missing_user(self):\r\n\r\n # Test\r\n status, body = self.delete('/v2/users/fake/')\r\n\r\n # Verify\r\n self.assertEqual(404, status)", "title": "" }, { "docid": "75679b7a8a52a0f259273d993d2831ad", "score": "0.6880054", "text": "def test_users_del_user(admin_client, init_database):\n response = admin_client.post(url_for('admin.users'), data={\n 'del_user': 1\n }, follow_redirects=True, headers={'accept-language': 'pl'})\n assert 'Uลผytkownik <strong>testuser</strong> zostaล‚ usuniฤ™ty' \\\n in response.data.decode('utf-8')\n response = admin_client.get(\n url_for('admin.users'),\n follow_redirects=True,\n headers={'accept-language': 'pl'}\n )\n assert 'testuser' not in response.data.decode('utf-8')", "title": "" }, { "docid": "f09c788a36cb6da77acf0fc965863706", "score": "0.6879468", "text": "def test_delete_fail(self):\n user = User.objects.create()\n token, is_created = Token.objects.get_or_create(user=user)\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token {}\".format(token.key))\n response = self.client.delete(self.bad_url)\n self.assertEqual(HTTP_404_NOT_FOUND, response.status_code)", "title": "" }, { "docid": "c9db41cbc53a50c1e5275318ac94927d", "score": "0.6872869", "text": "def delete_user(user_id):\n return", "title": "" }, { "docid": "e34700fc438154f184a1190e73296df8", "score": "0.68531615", "text": "def delete_test_user(self):\n log.info(f\"Deleting RGW test user: {self.uid}\")\n self.toolbox.exec_cmd_on_pod(\n f\"radosgw-admin user rm --uid={self.uid} --purge-data\",\n timeout=self.timeout_clean,\n )", "title": "" }, { "docid": "7016b031f2b57cb9f0b0a10a2f657cb0", "score": "0.685033", "text": "def test_delete_user_single(self):\n usermanager.USERS = self.one_user_no_groups.copy()\n test_username, test_userinfo = usermanager.USERS.items()[0]\n rv = self.app.delete('/users/%s' % (test_username))\n self.assertEquals(200, rv.status_code)\n self.assertEquals(json.loads(rv.data), {})\n self.assertEquals(usermanager.USERS, {})", "title": "" }, { "docid": "9618d5b3f58799a8f70a64230f547feb", "score": "0.6846866", "text": "def deleteUser(user):\n return dbDeleteUser(user)", "title": "" }, { "docid": "862b6a5884cee747d4b641fb311c4b52", "score": "0.68297744", "text": "def DeleteAccounts(self):\n user = User.objects.get(username='test_user')\n user.profile.delete_external_accounts()\n # Pull userids\n user.profile.pull_ids()\n # Make sure userids are now empty\n self.assertEqual(user.profile.wp_userid, None)\n self.assertEqual(user.profile.wiki_userid, None)\n self.assertEqual(user.profile.dam_userid, None)", "title": "" }, { "docid": "9f8c2c9d17e7acb09842793f31004869", "score": "0.68260664", "text": "def test_user_by_email_email_delete(self):\n auth()\n email = bla_mail()\n user=dict(passwd=bla(8),email=email,first_name=bla(6),last_name=bla(8))\n ret=self.api.user_post(body=user)\n ret=self.api.user_by_email_email_delete(email=email)\n assert(ret.success)\n\n pass", "title": "" }, { "docid": "c70410aeb1e015f3d77483871a17d24d", "score": "0.6813349", "text": "def del_user(user):\n\n user.delete_user()\n\n # Finding user", "title": "" }, { "docid": "5a67293a202c1cdc30e3a00f1d0270e7", "score": "0.6801671", "text": "def test_create_and_delete_user(self):\n rebuild_tables()\n build_movie_table()\n\n create_user(\"tempUser\", \"insecurepassword\")\n results = exec_get_all(\"\"\"SELECT username FROM system_users WHERE username = 'tempUser'\"\"\")\n self.assertEqual(results, [('tempUser',)])\n\n delete_user(\"tempUser\")\n results = exec_get_all(\"\"\"SELECT username FROM system_users WHERE username = 'tempUser'\"\"\")\n self.assertEqual(results, [])", "title": "" }, { "docid": "55f493b7b45730b6f0af2e045a1a5ae8", "score": "0.67954755", "text": "def test_delete_profile(self):\n pass", "title": "" }, { "docid": "55f493b7b45730b6f0af2e045a1a5ae8", "score": "0.67954755", "text": "def test_delete_profile(self):\n pass", "title": "" }, { "docid": "82f771601ab7b2a69de602231844a586", "score": "0.67919785", "text": "def tearDown(self):\n User.delete('testUser')\n User.delete('testUser1')\n User.delete('testUser2')", "title": "" }, { "docid": "889eba0020a80009b87066c346da2c4e", "score": "0.6786098", "text": "def test_delete_user_by_another_user(self):\n url = detail_url(self.another_user.id)\n res = self.client.delete(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "4c4f13abce6db596cd030a061bd6a6da", "score": "0.6773442", "text": "def test_delete(self):\n tom_pk = self.tom.pk\n self.c.delete(tom_pk)\n self.assertRaises(User.DoesNotExist,User.objects.get,pk=tom_pk)", "title": "" }, { "docid": "8a0acc934486ecf3e3e428d3a8a64897", "score": "0.67505354", "text": "def delete(self):\n\n email = session.get('user')\n if not request.get_json():\n return make_response(jsonify(dict(error='Bad request. Please enter some data')), 400)\n\n password = request.get_json().get('password')\n\n if not User.exists(email=email):\n return make_response(jsonify(dict(error='User does not exist')), 400)\n\n user = User.query.filter_by(email=email).first()\n\n if not user.check_password(password):\n return make_response(jsonify(dict(error='Incorrect password')), 403)\n\n User.delete(email)\n return make_response(jsonify(dict(success=\"Account deleted successfully\")), 200)", "title": "" }, { "docid": "dc4f5477fc9083be93ea02c7595923ff", "score": "0.67495465", "text": "def delete_account(request):\n\n error = False\n if request.method == \"POST\":\n if request.user.check_password(request.POST[\"password\"]):\n request.user.delete()\n return redirect(\"/\")\n else: error = \"Invalid credentials\"\n return render(request, \"delete-account.html\", {\"error\": error})", "title": "" }, { "docid": "7fb5d2631da428a8857493f4b47233d0", "score": "0.67450047", "text": "def test_deleted_user_retrieval(self):\n temp_user = deleted_user()\n self.assertEqual(temp_user.username, 'deleted')", "title": "" }, { "docid": "7fb5d2631da428a8857493f4b47233d0", "score": "0.67450047", "text": "def test_deleted_user_retrieval(self):\n temp_user = deleted_user()\n self.assertEqual(temp_user.username, 'deleted')", "title": "" }, { "docid": "69a6318b00b03d3fea954b476ce859af", "score": "0.67433906", "text": "def test_get_not_auth_users_cannot_delete(self):\n self.client.logout()\n response = self.client.get(\n reverse('delete_task', args=[self.task.pk]),\n )\n self.assertRedirects(response, reverse('login'))", "title": "" }, { "docid": "8c915706532394a5e8758441dd921c91", "score": "0.6737889", "text": "def test_delete_invalid_username_fails(self):\n self.client.force_login(self.user)\n\n response = self.client.delete(\n f'/users/whatever/follows/',\n data=json.dumps({'username': self.other.username}),\n content_type='application/json'\n )\n\n self.assertEqual(404, response.status_code)", "title": "" }, { "docid": "35e1aaa23ce6d90195d0688c810f05c7", "score": "0.6728937", "text": "def test_delete_contact(self):\n self.new_user.add_user()\n test_delete_user = User(\"Test\", \"passwd12\") # new user\n test_delete_user.add_user()\n self.new_user.delete_user() # deletes user\n self.assertEqual(len(User.list_of_users), 1)", "title": "" }, { "docid": "18d4561c54fc816a05d339128c7d5cca", "score": "0.6722525", "text": "def delete_account(request):\n request.user.delete()\n\n return Response()", "title": "" }, { "docid": "a8e1967499c41a1824166fc71784ce82", "score": "0.67204463", "text": "def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"JohnD2020#\", \"Twitter\", \"Twitter20*\")\n test_credential.save_credential()\n\n self.new_credential.delete_credential() # Deleting an credential object\n self.assertEqual(len(Credential.credential_list), 1)", "title": "" }, { "docid": "b994c166834569ccc1010b78eddbc862", "score": "0.6707951", "text": "def test_mac_deluser(self):\n # Create a group to use for test - If unsuccessful, skip the test\n if (\n self.run_function(\"group.add\", [ADD_GROUP, 5678])\n and self.run_function(\"group.adduser\", [ADD_GROUP, ADD_USER]) is not True\n ):\n self.run_function(\"group.delete\", [ADD_GROUP])\n self.skipTest(\"Failed to create a group to manipulate\")\n\n delusr = self.run_function(\"group.deluser\", [ADD_GROUP, ADD_USER])\n self.assertTrue(delusr)\n\n group_info = self.run_function(\"group.info\", [ADD_GROUP])\n self.assertNotIn(ADD_USER, \"\".join(group_info[\"members\"]))", "title": "" } ]
db4f8aec007b41fb25bf66b292e606fd
Gets the identifier of this AccessPolicy. The id of the policy. Set by server at creation time.
[ { "docid": "b1ad433d654521b2cd6b094150da0ae9", "score": "0.0", "text": "def identifier(self):\n return self._identifier", "title": "" } ]
[ { "docid": "9ad0b7f8b226c09cd01ec534b4950829", "score": "0.7846321", "text": "def policy_id(self):\n return self._policy_id", "title": "" }, { "docid": "c2968e83c5829951cc14f939bb6ea7ea", "score": "0.7770658", "text": "def access_policy_id(self) -> Optional[str]:\n return pulumi.get(self, \"access_policy_id\")", "title": "" }, { "docid": "0e7a87d7f1d3b1d58bc288f259a7a473", "score": "0.74056286", "text": "def policy_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "628582a3eff40e281ff6c6bfaa30f8fb", "score": "0.7270613", "text": "def policy_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "2d25193ee6970207e2de9e89213b073f", "score": "0.70301896", "text": "def policy_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "861824a4871abb76cdde80da053592df", "score": "0.6994018", "text": "def policy_assignment_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_assignment_id\")", "title": "" }, { "docid": "4b520e7aa4b0c6973a9b862ac416de8d", "score": "0.6940681", "text": "def policy_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "4b520e7aa4b0c6973a9b862ac416de8d", "score": "0.6940681", "text": "def policy_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "4b520e7aa4b0c6973a9b862ac416de8d", "score": "0.6940681", "text": "def policy_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_id\")", "title": "" }, { "docid": "661fb2324115e93d593c79e692b9b407", "score": "0.69134337", "text": "def policy_definition_id(self) -> str:\n return pulumi.get(self, \"policy_definition_id\")", "title": "" }, { "docid": "f838f214210df14198ff1d36bb06529a", "score": "0.69132644", "text": "def access_policy_identity(self) -> Optional['outputs.AccessPolicyIdentity']:\n return pulumi.get(self, \"access_policy_identity\")", "title": "" }, { "docid": "b757fc9e7e34eeff9cb0d4a7916e2402", "score": "0.66723585", "text": "def policy_assignment_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_assignment_id\")", "title": "" }, { "docid": "213b6b310b32d7dfdf2444ddcabfa53b", "score": "0.65211993", "text": "def get_id(self):\n return Id(**profile.ID)", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "f39cf8aec741d58b476f53090b558860", "score": "0.6438977", "text": "def get_id(self):\n return self._id", "title": "" }, { "docid": "866cab3bc6857a9a59136ca5cf27fc1a", "score": "0.64318925", "text": "def get_id(self):\n \n return self.id", "title": "" }, { "docid": "c61579f490c44cb1ab68d8bc1dd100a7", "score": "0.6426783", "text": "def get_id(self):\r\n return self._id", "title": "" }, { "docid": "80353e82c60b17314bafd3fd55fceaf2", "score": "0.64187187", "text": "def get_id(self):\n return self.id_", "title": "" }, { "docid": "e1dfbe2f431fc5eec2aebf0c6cf5612e", "score": "0.6411731", "text": "def policy_definition_id(self) -> Optional[str]:\n return pulumi.get(self, \"policy_definition_id\")", "title": "" }, { "docid": "9d887b32707277a66ee587876d33cd9e", "score": "0.6384955", "text": "def GetId(self):\n return self._id", "title": "" }, { "docid": "db19938e07acbbf3cc5550dca0446b67", "score": "0.63709277", "text": "def id_(self) -> ID:\n return self._id_", "title": "" }, { "docid": "9b5485349fcd478d1999296b1cdf85af", "score": "0.63660115", "text": "def id(self):\n return self.get(self._names[\"id\"])", "title": "" }, { "docid": "90ec7864dec7bcc798975c81b3da747c", "score": "0.6358751", "text": "def get_id(self):\n return self.__id", "title": "" }, { "docid": "90ec7864dec7bcc798975c81b3da747c", "score": "0.6358751", "text": "def get_id(self):\n return self.__id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "47adb8774448e1702ecd622206397edf", "score": "0.63475317", "text": "def get_id(self):\n return self.id", "title": "" }, { "docid": "f31d569b160eb17d731b2a069501b9c7", "score": "0.63403046", "text": "def access_policy_name(self) -> str:\n return pulumi.get(self, \"access_policy_name\")", "title": "" }, { "docid": "023466aef50ce7b28ba64e6448f57d96", "score": "0.63265467", "text": "def getID(self):\n return self._id", "title": "" }, { "docid": "0b7875b2ed0f7dad029102d2fd7f2150", "score": "0.6311781", "text": "def get_id(self) -> str:\n return self._id", "title": "" }, { "docid": "0b7875b2ed0f7dad029102d2fd7f2150", "score": "0.6311781", "text": "def get_id(self) -> str:\n return self._id", "title": "" }, { "docid": "98dced27bdc246a9e9b2ce8d764a0f41", "score": "0.62971395", "text": "def policy_definition_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_definition_id\")", "title": "" }, { "docid": "ed5bc796ba3dff36a6d4d146946a620b", "score": "0.6291837", "text": "def _get_id(self):\r\n return self._id", "title": "" }, { "docid": "b26a2a379c4ca49e51b8b79e1aba78a6", "score": "0.6291541", "text": "def id(self):\n\n return self._id.value", "title": "" }, { "docid": "924d21db25f4e0afd4790b93f458abd8", "score": "0.6282087", "text": "def id(self):\n return self.properties.get('Id', None)", "title": "" }, { "docid": "924d21db25f4e0afd4790b93f458abd8", "score": "0.6282087", "text": "def id(self):\n return self.properties.get('Id', None)", "title": "" }, { "docid": "7f25a92352e6040fa51fe74c79f3aa88", "score": "0.626405", "text": "def get_id(self) -> int:\n return self.__id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" }, { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.6262181", "text": "def id(self):\n return self._id", "title": "" } ]
8f5dc2de06c33a5958ec5f8977de11e7
Return a list value for further modification.
[ { "docid": "2a441e6f8dbd5431a200434a00c53189", "score": "0.73336405", "text": "def _get_list_value_for_modification(self, key):\r\n\t\ttry:\r\n\t\t\tvalue = self.table[key]\r\n\t\texcept KeyError:\r\n\t\t\ttry: value = self.parent[key]\r\n\t\t\texcept AttributeError: value = []\r\n\t\t\tif isinstance(value, list):\r\n\t\t\t\tvalue = value[:]\r\n\t\t\telse:\r\n\t\t\t\tvalue = [value]\r\n\t\telse:\r\n\t\t\tif not isinstance(value, list):\r\n\t\t\t\tvalue = [value]\r\n\t\tself.table[key] = value\r\n\t\treturn value", "title": "" } ]
[ { "docid": "41b6a4da55e32ff4b218fe4383986434", "score": "0.7180536", "text": "def listify(self, value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "title": "" }, { "docid": "3fd078fcb797a7be3925f9dc2168d836", "score": "0.70994365", "text": "def toList(self, value):\n if value is None:\n return []\n elif not isinstance(value, list):\n return [value]\n else:\n return value", "title": "" }, { "docid": "7f58b21b63b75f45f676af11940d4823", "score": "0.70008385", "text": "def as_value_list(self):\n if self.cardinality == 'multiple': return self.value\n elif self.value is not None: return [self.value]\n else: return []", "title": "" }, { "docid": "7f58b21b63b75f45f676af11940d4823", "score": "0.70008385", "text": "def as_value_list(self):\n if self.cardinality == 'multiple': return self.value\n elif self.value is not None: return [self.value]\n else: return []", "title": "" }, { "docid": "ad4dd283700108e7848b1a0cf3af7533", "score": "0.69332284", "text": "def listvalues(self):\r\n return list(self.iterlistvalues())", "title": "" }, { "docid": "840555ee349358d8a203a95729363786", "score": "0.69085526", "text": "def listvalues(self):\n return list(self.iterlistvalues())", "title": "" }, { "docid": "0e8eabdb84907ffe2aa0bff3e818021d", "score": "0.68661094", "text": "def get_values_list(self):\n return self.values", "title": "" }, { "docid": "b74a727de054067903e647e26f78c211", "score": "0.67653954", "text": "def getValue(self):\n return self.state.any_list(self)", "title": "" }, { "docid": "7b35702f7ec747074074789b2e50ccbc", "score": "0.66818494", "text": "def getList(self):\n if self.isInteger:\n return None\n return self.val", "title": "" }, { "docid": "3f3a9954a063b1195d253ce67d03b24c", "score": "0.6613617", "text": "def _to_list(value: Any) -> list:\n if not isinstance(value, list):\n value = [value]\n return value", "title": "" }, { "docid": "478e6608692f26a6e09211b12dc94b97", "score": "0.6609057", "text": "def get_values(self):\n return # osid.configuration.ValueList", "title": "" }, { "docid": "59187e00c3978b670c19beb6c28c2c01", "score": "0.6607776", "text": "def getList(self) -> []:", "title": "" }, { "docid": "ee5b5bcdd124be7abb2a347e4ac78b98", "score": "0.65823895", "text": "def _getbarevallist(self):\n pass", "title": "" }, { "docid": "db461d40e3ae419e75fa2517d6c00370", "score": "0.6564355", "text": "def as_list(self):", "title": "" }, { "docid": "dac6c86548adcc6466a447cac87cf09d", "score": "0.65504456", "text": "def aslist(self):\r\n return self.options['aslist'].value", "title": "" }, { "docid": "6a992acb17f34f2248521071dc0a92f9", "score": "0.6514241", "text": "def value_list(self,value=0):\r\n lst = []\r\n for c in self.pos_val:\r\n if self.pos_val[c] == value:\r\n lst += [c]\r\n return lst", "title": "" }, { "docid": "ca2e6ec560adbd72ba74efbdb3fff0ab", "score": "0.6512886", "text": "def _list_wrap(value):\n return value if isinstance(value, list) else [value]", "title": "" }, { "docid": "568cbeeafb741689a3c5bc109594a3f0", "score": "0.649476", "text": "def serialize(self, value, entity, request):\n value = super(ListField, self).serialize(value, entity, request)\n if value is None:\n return\n ret = []\n for v in value:\n ret.append(self.itemspec.serialize(v, entity, request))\n return ret", "title": "" }, { "docid": "be0367d14ea955148fc0d35d105f4d8d", "score": "0.6457617", "text": "def get_list(self):", "title": "" }, { "docid": "44ddbbd42ca9f8fa5b6cbbd342aefe77", "score": "0.6446769", "text": "def AdaptListToValue(self, list, value):", "title": "" }, { "docid": "530ad8e05a9d09ce65d894e4746732ec", "score": "0.6406907", "text": "def values(self) -> List:\n return list(self._ordered[0].values())", "title": "" }, { "docid": "5aec8c10988e65ff1856c93da3838943", "score": "0.63739073", "text": "def build_list():\n return [1, 2, 3]", "title": "" }, { "docid": "c3ebd2aa529775fba3b0916be0ffccb0", "score": "0.6342094", "text": "def get_sub_values(self):\n return []", "title": "" }, { "docid": "6a0f02b007997c5ce881b693925cc7a5", "score": "0.63371044", "text": "def _getList(form, value):\n rval = form.get(value, [])\n if not isinstance(rval, list):\n rval = [rval,]\n return rval", "title": "" }, { "docid": "345da20b813e016c00afb12771c18f28", "score": "0.63323903", "text": "def value_to_list(value: Any, target_list: List[Any]) -> Tuple[List[Any]]:\n return ([value] * len(target_list),)", "title": "" }, { "docid": "723577635331813c138645d15090ac42", "score": "0.6326759", "text": "def value(self) -> list:\n if self.vector == self._last_report == self.default_vector:\n return None\n if self.parse_func_ is None:\n return self._set_last_report(self.vector_value)\n return self.parse_func_(self._set_last_report(self.vector_value))", "title": "" }, { "docid": "95659bc94f807f916f6f2bb19c793c7e", "score": "0.63256437", "text": "def to_list(self):\n pass", "title": "" }, { "docid": "25b531208e33c01bc3c74cf79d708f4a", "score": "0.63212776", "text": "def get(self):\n return self._unwrapListItems()", "title": "" }, { "docid": "41cafb6ba7280187daf19524adaf619e", "score": "0.6309462", "text": "def values(self):\n return list(self.__values)", "title": "" }, { "docid": "4f3e0903f26cacf616f46294595c96f6", "score": "0.63054156", "text": "def get_l_list(self):\n return tuple([l for l, s in self.get_ls_list()])", "title": "" }, { "docid": "0d1e8c5b1a9ff674cc3c816d745cabe0", "score": "0.6259463", "text": "def getDataAsList(self):\n self.refreshData()\n return self.dataList", "title": "" }, { "docid": "12ccc85d8cda89d1bbb01645388f80a0", "score": "0.6254431", "text": "def some_list():\n return []", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.624864", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.624864", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.624864", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.624864", "text": "def getList(self):", "title": "" }, { "docid": "ddefd4013632b48eb115f0c1686997df", "score": "0.62460905", "text": "def get_list(self):\n return self._reader.get_list()", "title": "" }, { "docid": "61ff6cfc6748f4683af8a7cc7efc2380", "score": "0.62296945", "text": "def value_to_list(self, value):\n res = []\n if not isinstance(value, (ast.List, ast.Tuple)):\n return value\n for elt in value.elts:\n if not isinstance(value, (ast.List, ast.Tuple)):\n res.append(elt)\n else:\n res.append(self.value_to_list(elt))\n return ast.List(elts=res)", "title": "" }, { "docid": "012e7f5397f0c32cdd649305e25cffbc", "score": "0.62244576", "text": "def to_list(self, val):\r\n\t\tif isinstance(val, str): return val.split()\r\n\t\telse: return val", "title": "" }, { "docid": "47c3ac6e30573ade0757cb951e81c84e", "score": "0.62139845", "text": "def to_list(self):\r\n lst = []\r\n for value in self._elements:\r\n if value is not None:\r\n lst.append(value)\r\n return lst", "title": "" }, { "docid": "dfc3862639a298b99c5be0a1de94174c", "score": "0.62082285", "text": "def toList(self):\n return map(lambda x: x[1:-1],self.sot.dispStack().split('|')[1:])", "title": "" }, { "docid": "d16c9893c83cb325ff978fbc7d1a5af4", "score": "0.62018895", "text": "def get_values(self):\n return list(self.__data.values())", "title": "" }, { "docid": "3e795159ae3960c0ce497459f93c84af", "score": "0.6189933", "text": "def getList(self):\n return self._list", "title": "" }, { "docid": "1f778a92417032676972c8c1169d3a13", "score": "0.61826056", "text": "def get_value(lines) -> list:\n value_list = []\n while True:\n try:\n line = next(lines)\n # debug(f\"get_value {line}\")\n except StopIteration:\n # debug(f\"returning value_list1 {value_list}\")\n return value_list\n tmp_list = get_valuelist_from_one_line(line)\n value_list.extend(tmp_list)\n # debug(f\"current value_list = {value_list}\")\n # debug(f\"current line = {line}\")\n if (tag1 in line) or (tag2 in line):\n debug(f'Pushing back line {line}')\n lines.push_back(line)\n # debug(f\"returning value_list2 {value_list}\")\n return value_list", "title": "" }, { "docid": "bb20368fe918effd879615a52ef95d14", "score": "0.61804634", "text": "def _get_list(self, item):\n return self._fields_list[self._item2idx(item)]", "title": "" }, { "docid": "a234a0a66ef4ba9de27fc935d84fdad7", "score": "0.6179843", "text": "def aslist(self):\r\n return self.options['aslist']", "title": "" }, { "docid": "e58d1070fa32935343dabdb727a62b8b", "score": "0.6178169", "text": "def get_value(self):\n if self.__is_value_array:\n if self.__bit_size == 8: #matching c_ubyte\n return list(self.__value)\n else:\n result = []\n for i in range(self.__report_count):\n result.append(self.__getitem__(i))\n return result\n else:\n return self.__value", "title": "" }, { "docid": "39dfec79c28c2a8248384de6221d426c", "score": "0.6155569", "text": "def get_list_value(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"get_list_value\"), kwargs)", "title": "" }, { "docid": "e4235a9102b42567c9b71edd94b55ee8", "score": "0.6148351", "text": "def GetValue(self):\n \n return self.right_list.GetStrings()", "title": "" }, { "docid": "aad0be57c14794c297d90464dd2e1e09", "score": "0.61423755", "text": "def tolist(self):\n return list(self)", "title": "" }, { "docid": "aad0be57c14794c297d90464dd2e1e09", "score": "0.61423755", "text": "def tolist(self):\n return list(self)", "title": "" }, { "docid": "4c02a21d0063cdf4ad45985d539b25b7", "score": "0.6136946", "text": "def getvallist(self):\n e = self._enum()\n if e:\n return e\n return self._getbarevallist()", "title": "" }, { "docid": "478072f1d87b767a29bc1a6011125c33", "score": "0.61182475", "text": "def get_list(self, lst=\"main_list\"):\n try:\n if isinstance(self.dict[lst], list):\n return self.dict[lst]\n except KeyError:\n return None\n return None", "title": "" }, { "docid": "2ee31176b01540d0accb8ce4c405051b", "score": "0.6118214", "text": "def list(self):\n return self.__element_vector", "title": "" }, { "docid": "c6576abaf4c0de082d81a6ee84984bc8", "score": "0.610867", "text": "def result(self):\n if self._ordered:\n objlist = {int(v[0]):v[1] for v in self._objlist if v is not None}\n return [objlist[k] if k in objlist else None for k in range(len(self._objlist))] # restore order\n else:\n return self._objlist", "title": "" }, { "docid": "4b445b6c7b7997f3f2565134268bb9d4", "score": "0.61075574", "text": "def tolist(self) -> Any:\n return self.__array__().tolist()", "title": "" }, { "docid": "62435de8b47410895d745a4d914b41fb", "score": "0.6102752", "text": "def getPropertyValueList(self):\n return list(PropertyValue.get(self))", "title": "" }, { "docid": "1dfa632b1cc858d60a1cba850a90f722", "score": "0.60993356", "text": "def getList(self):\n return self.items if not self.isInteger() else None", "title": "" }, { "docid": "7f9fa365ece1975f90453ee8540382ca", "score": "0.60850084", "text": "def getValues_spo2(self) -> list:\n return NotImplementedError", "title": "" }, { "docid": "82bc570f274628cb216f2dd0888eadf3", "score": "0.6084913", "text": "def wrapper(y):\r\n return list(y)", "title": "" }, { "docid": "745343af0f914b60f28878929a99a39b", "score": "0.60740286", "text": "def tolist(self):\n return eval(self._toliststr())", "title": "" }, { "docid": "cf0aff37968ebf6eec8f843437b7b223", "score": "0.60737914", "text": "def tolist(self):\n return [v for v in self]", "title": "" }, { "docid": "09bd70e93c795e39ff6386bb5062e76c", "score": "0.60615206", "text": "def _list(x):\r\n if not isinstance(x, list):\r\n x = list(x)\r\n return x", "title": "" }, { "docid": "59a699f377335b33503494364b43c8e7", "score": "0.6060646", "text": "def get(self):\n return list(self)", "title": "" }, { "docid": "91b020e7e601db4ca4a25880bf9f7e90", "score": "0.6057042", "text": "def to_list(self):\n\n acc = []\n for node in self:\n acc.append(node.value)\n\n return acc", "title": "" }, { "docid": "a7cf0a22b19c1f014828a6fbfcdad2a4", "score": "0.60541975", "text": "def getList(value):\n\tresult = []\n\tif type(value) is TupleType:\n\t\tfor i in value:\n\t\t\tresult.append(i)\n\telif isinstance(value, NoneType):\n\t\tresult.append(None)\n\telif type(value) is ListType:\n\t\tresult = value\n\telif isinstance(value, ogre.Vector3):\n\t\tresult.append(value.x)\n\t\tresult.append(value.y)\n\t\tresult.append(value.z)\n\telif isinstance (value, ogre.Quaternion) :\n\t\tresult.append(value.w)\n\t\tresult.append(value.x)\n\t\tresult.append(value.y)\n\t\tresult.append(value.z)\n\telse :\n\t\traise Exception(\"Unknown instance in ror.lputils.getList()\")\n\treturn result", "title": "" }, { "docid": "c49e8d4bc71726352f999a561cc8b090", "score": "0.60522187", "text": "def getValueTest_isList(self):\r\n ret = self.atc.getValue()\r\n assert type(ret) == ListType", "title": "" }, { "docid": "9ee459c1a93c36cdf20daea41321efae", "score": "0.6048761", "text": "def force_list(val=None):\n if val is None:\n return []\n if isinstance(val, pd.Series):\n return val.tolist()\n return val if isinstance(val, list) else [val]", "title": "" }, { "docid": "dad3719a78a5f161755c65dddbb52c00", "score": "0.6048077", "text": "def get_list(self, name):\n\n if self.lists is None:\n self.refresh_lists()\n return self.lists.get(name)", "title": "" }, { "docid": "7c23fe6546cb04037f877b8ed55a84e6", "score": "0.6037308", "text": "def list(self):\n return self._list_from_iterator()", "title": "" }, { "docid": "981280c80aa79347eba2e9609c396ed8", "score": "0.6026248", "text": "def list(self):\r\n return list(self)", "title": "" }, { "docid": "e870677b6280625e6a54125e6b74c7db", "score": "0.60253227", "text": "def to_list(self):\n return list(self)", "title": "" }, { "docid": "d465c56a3a9953e3ad00cb81780c1410", "score": "0.6023641", "text": "def __get_list(self, name, size):\n atr = self.get(name)\n if atr is None:\n return [None] * size\n if not isinstance(atr, str):\n return [atr] * size\n if ',' not in atr:\n return [atr] * size\n return atr.split(',')", "title": "" }, { "docid": "e20d65a623a5467b316021f89b3568f6", "score": "0.6019688", "text": "def values_as_list(self) -> Optional[List[str]]:\n if self.delimiter is not None:\n tmp = self.text.rstrip(self.delimiter)\n return tmp.split(self.delimiter)\n return None", "title": "" }, { "docid": "103bec634b4ea8bcedda7a6ce583d904", "score": "0.6015219", "text": "def to_list(self):\n return [val.to_dict() if hasattr(val, 'to_dict') else val for val in self]", "title": "" }, { "docid": "31c417a043f10068645b3583b9c7691a", "score": "0.6008205", "text": "def alwayslist(value):\n if value is None:\n return []\n if isinstance(value, (list, tuple)):\n return value\n else:\n return [value]", "title": "" }, { "docid": "31c417a043f10068645b3583b9c7691a", "score": "0.6008205", "text": "def alwayslist(value):\n if value is None:\n return []\n if isinstance(value, (list, tuple)):\n return value\n else:\n return [value]", "title": "" }, { "docid": "4bdd2be5497290efa93c884b3f565f13", "score": "0.6000827", "text": "def to_list(self):\n if not isinstance(self._result, list):\n raise Error('{0} is not a list-like object'.format(\n self.__class__.__name__))\n return list(self._result)", "title": "" }, { "docid": "878a0a2b487e667493797ce5bacfb5d6", "score": "0.5990454", "text": "def get_list(*args):\n value = get(*args, converter=_list_converter)\n return value.split(_LIST_SEPARATOR)", "title": "" }, { "docid": "1b7e062987a56ec572fdfce407e39c89", "score": "0.5979481", "text": "def to_list(self):\n if self.inf:\n raise ValueError\n return [self[i] for i in range(len(self))]", "title": "" }, { "docid": "8d573a71f5801bae7f5c6d565fabe6d1", "score": "0.5977643", "text": "def _get_cvaluelist(param, field):\n values = param[field]\n assert (isinstance(values, list))\n form = \"\"\n for value in values:\n if param['type'] == 'std::vector<std::string>':\n value = '\"{}\"'.format(value)\n elif param['type'] == 'std::vector<bool>':\n value = str(value).lower()\n else:\n value = str(value)\n form += value + ','\n # remove last ','\n return form[:-1]", "title": "" }, { "docid": "65c092374288d2a3e6addcec182313fe", "score": "0.5976365", "text": "def DoGetPropertyValues(self, listname, baseparent, flags):", "title": "" }, { "docid": "a5b7de7ca3d9cab287804b361d20e39a", "score": "0.5962528", "text": "def _flag_value_as_list(self, wanted_flag_name):\n string_value_list = []\n found, flag_value = self.get_flag_value(wanted_flag_name)\n\n if found:\n assert flag_value is not None\n string_value_list = flag_value.split(',')\n return string_value_list", "title": "" }, { "docid": "312c38f8e2a6adbde8cea977a24b748e", "score": "0.59594125", "text": "def raw_list(self) -> List[R]:\n\n return self._list", "title": "" }, { "docid": "f38ced01c275f54f19d9c623e84989aa", "score": "0.595147", "text": "def ensure_list(value):\n if value is None:\n return []\n return value if isinstance(value, list) else [value]", "title": "" }, { "docid": "dce7f0bd5e867018f4a69786145f5811", "score": "0.5950895", "text": "def _listify(data):\n\n if not isinstance(data, list):\n return [data]\n return data", "title": "" }, { "docid": "614a03a35f8c092e7291d4fc30505200", "score": "0.59508353", "text": "def getStringList(self):\n return [self.getString()]", "title": "" }, { "docid": "0dd74c46cbdb8e41bbbd9f8910c30e08", "score": "0.5944012", "text": "def list(self):\n l = [self.item(i) for i in range(3)]\n return l", "title": "" }, { "docid": "dc3c9b433c324c16fe3478c1fb4b20f4", "score": "0.5932256", "text": "def val(self) -> Sequence[Sequence]:\n pass", "title": "" }, { "docid": "e9fb308dbec22918ed60094b56557805", "score": "0.5926494", "text": "def get_item_list(self) -> list:\n # Return the frame value as a flat list.\n return list(self._face_item_frame.values.flat)", "title": "" }, { "docid": "8c862d47da821749c0521eec5855c51e", "score": "0.5925875", "text": "def __getitem__(self, idx: int) -> List[list]:\n row = self.data[idx].tolist()\n return [[v] for v in row]", "title": "" }, { "docid": "45efc68ab2ad243994797154a50ed6c7", "score": "0.59230554", "text": "def _getRenderedValue(self):\n if self._renderedValueSet():\n if self._data is self.context.missing_value:\n sequence = []\n else:\n sequence = list(self._data)\n elif self.hasInput():\n sequence = self._generateSequence()\n elif self.context.default is not None:\n sequence = self.context.default\n else:\n sequence = []\n return sequence", "title": "" }, { "docid": "1dbaa90fa9b3d24237b28578b5540487", "score": "0.5917082", "text": "def get_list(name, clear=False):\n return get_mpdict_value('list', 'l_' + name, clear=clear)", "title": "" }, { "docid": "4849f4bdd11bb98ac53461820b800cb3", "score": "0.5903671", "text": "def data(self):\n return list(self)", "title": "" }, { "docid": "bbccbcfdc02de67b376681482216c1ca", "score": "0.58956593", "text": "def to_list(val):\n if isinstance(val, (list, set, tuple, dict)):\n return list(val)\n elif val is not None:\n return [val]\n else:\n return list()", "title": "" }, { "docid": "05b18f40e4bb2dfbb4ecdba44ad3989f", "score": "0.5894416", "text": "def get_value(self):\n if self.get_size() == 1:\n return HAPI.get_parm_string_value(self.session.hapi_session, self.node_id, self.get_name(), 0)\n else:\n values = []\n for index in range(self.get_size()):\n values.append(HAPI.get_parm_string_value(self.session.hapi_session, self.node_id, self.get_name(), index))\n return values", "title": "" }, { "docid": "dc43975bcd95d7bbe7a2373acc87dc0a", "score": "0.5894085", "text": "def get_value(self):\n if self.get_size() == 1:\n return HAPI.get_parm_float_value(self.session.hapi_session, self.node_id, self.get_name(), 0)\n else:\n values = []\n for index in range(self.get_size()):\n values.append(HAPI.get_parm_float_value(self.session.hapi_session, self.node_id, self.get_name(), index))\n return values", "title": "" }, { "docid": "cd4f8c5c93005a11da4a285ce2009f4b", "score": "0.5891073", "text": "def _get_list_from_liststore(self, liststore):\r\n LOGGER.log()\r\n return [liststore.get_value(liststore.get_iter(i), 0)\r\n for i in range(len(liststore))]", "title": "" }, { "docid": "ae679fba08252164f666292c210358cc", "score": "0.5890662", "text": "def __iter__(self):\n return iter(self.valueList)", "title": "" }, { "docid": "9d50ae0b3cbc691a1cd86ae52bedbade", "score": "0.5889289", "text": "def GetValues(self):\n ...", "title": "" } ]
53188e7e2ec681c64a362a4c2d810e21
Determine if a word is the same forwards and backwards.
[ { "docid": "75038c4e136eac304f760b5c396cb57a", "score": "0.67014337", "text": "def is_palindrome(word: str) -> bool:\n return word.casefold() == word[::-1].casefold()", "title": "" } ]
[ { "docid": "0e45a7271bc2dadb60a364e8fba5e6f2", "score": "0.78020686", "text": "def is_reverse(word1, word2):\n return (word1[::1] == word2[::-1])", "title": "" }, { "docid": "195dd0bdae702ea529fe5b342fa9b454", "score": "0.71716213", "text": "def es_palindroma(word):\n return word[::-1] == word", "title": "" }, { "docid": "a95b0a9f197999cabb365ab2182a6c87", "score": "0.71596587", "text": "def isPalindrome( word ):\n wordbackwards = word[::-1]\n if word == wordbackwards:\n return True", "title": "" }, { "docid": "a36a6629b9f2793fa170914bbcae2e97", "score": "0.71560663", "text": "def is_palindrome_v1(word):\r\n\r\n return reverse(word) == word", "title": "" }, { "docid": "c668270ef962408481960e308fbe9897", "score": "0.7091587", "text": "def is_palindrome(word):\n\n\t# reverse word, if word== word, true", "title": "" }, { "docid": "e31c4dd8bb156c672336f07908fa482a", "score": "0.69076544", "text": "def is_palindrome(word):\n return is_reverse(word, word)", "title": "" }, { "docid": "e86542c55aa3356f6700803d1490bfe2", "score": "0.690508", "text": "def is_palindrome(word):\n return word == word[::-1]", "title": "" }, { "docid": "e86542c55aa3356f6700803d1490bfe2", "score": "0.690508", "text": "def is_palindrome(word):\n return word == word[::-1]", "title": "" }, { "docid": "2fac02fe914a2a827469b1292075df69", "score": "0.6881517", "text": "def isPalindrome(word):\n boolean = True\n \n forward = list(word.lower())\n print(forward)\n backward = \"\".join((reversed(word.lower())))\n print(backward)\n\n for count, letter in enumerate(forward):\n if letter == backward[count]:\n pass\n else:\n boolean = False\n break\n \n \n return boolean", "title": "" }, { "docid": "55b7ce12369f51f967c3a8a6a73831ea", "score": "0.6871119", "text": "def ReverseDirection(self) -> bool:", "title": "" }, { "docid": "01ada30438251faaff05442ab97fb337", "score": "0.6867443", "text": "def palindrome(word):\r\n if word == word[::-1]:\r\n return True", "title": "" }, { "docid": "dd5c1d10849cc5a12f5075c64a6f6cc4", "score": "0.68304884", "text": "def palindrome(word):\n return word == word[::-1]", "title": "" }, { "docid": "3f796de6e016910e5c1139f251d36945", "score": "0.68212605", "text": "def reverse_check(forward, reverse):\n newword = ''\n for x in forward:\n newword = x + newword\n if reverse == newword:\n result = 'Good test data'\n else:\n result = 'Mismatch! Bad test data'\n return result", "title": "" }, { "docid": "e1436add86ef99c16c84e3ea2c3fe0e0", "score": "0.6763662", "text": "def equal_reverse(text=''):\n if text == text[::-1]:\n return True\n return False", "title": "" }, { "docid": "0f12f545b9d8a3c847b1704ee63bf542", "score": "0.6733079", "text": "def is_reverse_pair(word,word_list):\n\treturn word[::-1] in word_list", "title": "" }, { "docid": "457c7ee273083dc695415f052fa0ed56", "score": "0.67312086", "text": "def is_palindrome(word):\n if word == word[::-1]:\n return True\n else:\n return False", "title": "" }, { "docid": "edf3441eda63dba574f562dd45c5ed43", "score": "0.6711873", "text": "def sprawdz_czy_palindrom(self, word):\n self.first_word = word\n tmp_string = word.upper()\n tmp_string_reversed = reversed(tmp_string)\n if list(tmp_string)==list(tmp_string_reversed):\n return True\n else:\n return False", "title": "" }, { "docid": "19d2eca1244c3e5664165a6caaf61de6", "score": "0.670266", "text": "def is_palindrome(word: str) -> bool:\r\n if word == word[::-1]:\r\n return True\r\n return False", "title": "" }, { "docid": "668e84a069f646bfcf12889d38111793", "score": "0.645663", "text": "def is_palindrome(string: str) -> bool:\r\n # backwards = string[::-1]\r\n # return backwards == string\r\n return string[::-1].casefold() == string.casefold()", "title": "" }, { "docid": "7fc2f779c232ed0b7e0d0aa92782b0aa", "score": "0.64465934", "text": "def is_polindrome_2(string: str) -> bool:\n return string == string[::-1]", "title": "" }, { "docid": "06a0ad38bd915833fd0d06209903e46d", "score": "0.64390117", "text": "def e_palindrome(text):\n return text == text[::-1]", "title": "" }, { "docid": "426a794fd169dd40512576cae2b0e3ea", "score": "0.64057213", "text": "def trifeca(word):\n if len(word) < 1:\n return False\n else:\n letter = 0\n count = 0\n while letter < len(word)-1:\n if letter == len(word)-1:\n break\n elif word[letter] == word[letter + 1]:\n count += 1\n letter += 1\n if count >= 3:\n return True\n else:\n return False", "title": "" }, { "docid": "1b1b908d1d7e0b35fb33ffb9078a8435", "score": "0.6392128", "text": "def palindrome(word):\r\n # use a for loop to check the letter from the start\r\n # corresponding letter from the end\r\n for i in range(len(word)//2):\r\n if word[i] == word[-1-i]:\r\n return True\r\n return False", "title": "" }, { "docid": "91ba9571f1a2f76a3da0671ad9f36cd5", "score": "0.63757", "text": "def is_palindrome_v3(word):\r\n\r\n # i starts at first index of word, j starts at last\r\n i = 0\r\n j = len(word) - 1\r\n\r\n while i < j and word[i] == word[j]:\r\n i = i + 1\r\n j = j - 1\r\n\r\n return j <= i", "title": "" }, { "docid": "fb72da2994b4ea8c6721043353378830", "score": "0.63742787", "text": "def is_palindrome_v1(s):\n\n return reverse(s) == s", "title": "" }, { "docid": "47d32a6ab4ec6a8adb247870f80f3659", "score": "0.6372323", "text": "def palindrome(word):\n from collections import deque\n dq = deque(word)\n while len(dq) > 1 :\n if dq.popleft() !=dq.pop():\n return False\n return True", "title": "" }, { "docid": "e7c4c7d944857b46440712e712ee4b70", "score": "0.63430977", "text": "def validBackward(test, s):\n for i in range(len(s)):\n if cn(test + s[:i]) != s[i]:\n return False\n return True", "title": "" }, { "docid": "60e5c6b27e2ac7a38611779b5980bfc7", "score": "0.6339663", "text": "def if_palindrom(string):\n return string == string[::-1]", "title": "" }, { "docid": "7ab17cda6d5043f21bc6f78c02c71737", "score": "0.6302287", "text": "def check_if_palindrome(phrase):\n # strip whitespace and punctuation and make lowercase\n phrase = strip_whitespace_and_punctuation_and_make_lowercase(phrase)\n #check if reverse is same as normal and return\n return phrase == phrase[::-1]", "title": "" }, { "docid": "f0e04179fe0984087bd22c2cb84271b3", "score": "0.62611884", "text": "def does_word_exist(self, word):\r\n for translation in self.__wordbook:\r\n for fwd in translation.forward:\r\n if fwd in word.forward:\r\n return True\r\n for rev in translation.reverse:\r\n if rev in word.reverse:\r\n return True\r\n return False", "title": "" }, { "docid": "d021d1a4a7e979bd310a59f53767952c", "score": "0.62506515", "text": "def wordCheck(word1: str, word2: str):\n x = len(word1)\n y = len(word2)\n word1index = 0\n word2index = 0\n while word1index < x and word2index < y:\n\n if word1[word1index] == word2[word2index]:\n word1index += 1\n word2index += 1\n else:\n word2index += 1\n\n if x == word1index:\n return True\n return False", "title": "" }, { "docid": "32c76e2274ef5c823c7805d16b3d3236", "score": "0.6250243", "text": "def is_solved(self):\n return self._from_word == self._to_word", "title": "" }, { "docid": "8b1e2abcc8cb8250e5e0d8ce76dade77", "score": "0.62247187", "text": "def is_palindrome_v1(s):\r\n return s[:] == s[::-1]", "title": "" }, { "docid": "7bcefcfdbc00a12df311d1325a61f4a6", "score": "0.6202301", "text": "def isPalindrome(self):\n forward, backward = self._hashes\n if forward != backward:\n return False\n return self._isPalindrome()", "title": "" }, { "docid": "748e896da3cd2a86c770a48ea41ae808", "score": "0.6178757", "text": "def _same_pos(self, word, part_of_speech):\n word_pos_list = self._grammar_helper.pos_tag([word])\n full_part_of_speach = word_pos_list[0][1]\n if part_of_speech == self._grammar_helper.get_wordnet_pos(full_part_of_speach):\n return True\n return False", "title": "" }, { "docid": "281844faec6888b5103255b9db434982", "score": "0.617371", "text": "def is_palindrome(string):\n return string == string[::-1]", "title": "" }, { "docid": "95e26ee002ae4757c54c22fbcb545e69", "score": "0.6170492", "text": "def check_palindrome(self, word):\n for i in range(len(word)//2):\n if word[i] == word[-i-1]:\n continue\n else:\n return False\n self.is_palindrome.add(word)\n return True", "title": "" }, { "docid": "db4b5ec1802e812827d301ad19b65c47", "score": "0.6143468", "text": "def is_palindrome(word):\n is_palindrome = True\n for i in range(len(word) // 2):\n if word[i] != word[-i - 1]:\n is_palindrome = False\n return is_palindrome", "title": "" }, { "docid": "9138946bda4b6a0878f5dbeb81965c62", "score": "0.61348915", "text": "def backspace_compare(first: str, second: str) -> bool:\n\n def custom_gen(word: str):\n \"\"\"Yielding not backspaced letters from the end of the string.\"\"\"\n backspace = 0\n for letter in reversed(word):\n if not backspace and letter != \"#\":\n yield letter\n elif letter == \"#\":\n backspace += 1\n continue\n else:\n backspace -= 1\n continue\n\n for letter1, letter2 in zip_longest(custom_gen(first), custom_gen(second)):\n if letter1 != letter2:\n return False\n\n return True", "title": "" }, { "docid": "3bd69f98138cd4e07f9d28000a8e442a", "score": "0.6129967", "text": "def is_triangle_word(self, word):\n number = self.to_triangle_number(word)\n outcome = bool(number in self.gen_triangle_numbers())\n if Options.verbose:\n print word, number, outcome\n return outcome", "title": "" }, { "docid": "9f9cfcf75567820e07ec2bf73da7aa11", "score": "0.6126306", "text": "def check_chars(front, back, word):\n\n if front >= back:\n return True\n\n if word[front] != word[back]:\n return False\n else:\n return check_chars(front+1, back-1, word)", "title": "" }, { "docid": "f470fd24e0e828dab22e36ab881e6252", "score": "0.611934", "text": "def trifeca(word):\n if len(word) < 6:\n return False\n\n first_letters = word[::2]\n second_letters = word[1::2]\n # Deal with pairs starting in an even letter\n if _three_pairs_in_a_row(first_letters, second_letters):\n return True\n # If we reached here we need to deal with the case of a pair of letters\n # starting in an odd letter\n else:\n return _three_pairs_in_a_row(first_letters[1:], second_letters)", "title": "" }, { "docid": "084c228013a16d6dffbb700288aed12a", "score": "0.61082435", "text": "def is_palindrome(word):\n b_is_palindrome = True\n\n for i in range(int(len(word)/2)):\n print(word[i], word[-i])\n b_is_palindrome &= word[i] == word[len(word) - i - 1]\n\n return b_is_palindrome", "title": "" }, { "docid": "2876a706fc3134aa1cbbb639a44e1870", "score": "0.60938", "text": "def is_next_word(word1, word2):\n count = 0\n for a, b in izip_longest(word1, word2):\n # `bool` is a subclass of `int`.\n count += a != b\n if count > 1:\n return False\n return count == 1", "title": "" }, { "docid": "7c3de8d60636da3f2083f6113a0910cd", "score": "0.609306", "text": "def is_palindrome(string):\n return string[::-1].casefold() == string.casefold()", "title": "" }, { "docid": "2540433f8da5345822fe2369c2ff4e09", "score": "0.6082993", "text": "def isPalindrome(string):\n if string == reverse(string):\n return True\n else:\n return False", "title": "" }, { "docid": "d985847573501236c864ad5aa35ec2e1", "score": "0.60822415", "text": "def can_work(word):\n\n return False if word[-1] in VOWELS else True", "title": "" }, { "docid": "64e01753f843d26c9cf0cbfb7f566761", "score": "0.6080494", "text": "def end_other(a, b):\n # CODE GOES HERE\n\n if(b[-len(a)].lower() == a.lower()):\n return True\n else: \n return False", "title": "" }, { "docid": "5970cbfe90458b46c28cff38cd029fcf", "score": "0.60516167", "text": "def _isPalindrome(self):\n begin = self._offset\n end = self._length+self._offset-1\n while end > begin:\n if self._string[begin] != self._string[end]:\n return False\n begin += 1\n end -= 1\n return True", "title": "" }, { "docid": "282fc838286a023b1a404e182ae35bc6", "score": "0.60481495", "text": "def is_reversed(last_direction, curr_direction):\n if last_direction and curr_direction != last_direction:\n return True\n return False", "title": "" }, { "docid": "35e2cc90bbb0d74bab5dad07dae85ec6", "score": "0.6047713", "text": "def is_palindrome(x):\n return reverse(x) == x", "title": "" }, { "docid": "ae71cf9c8fdfa90649526cce5bebca34", "score": "0.6036291", "text": "def checkPalindromeFormation(self, a: str, b: str) -> bool:\n i = 0\n j = len(a) - 1\n\n while i < j and a[i] == b[j]:\n i = i + 1\n j = j - 1\n s1 = a[i : j + 1]\n s2 = b[i : j + 1]\n\n print(s1, s2)\n\n i, j = 0, len(a) - 1\n while i < j and b[i] == a[j]:\n i, j = i + 1, j - 1\n s3, s4 = a[i : j + 1], b[i : j + 1]\n print(s3, s4)\n\n return any(s == s[::-1] for s in (s1, s2, s3, s4))", "title": "" }, { "docid": "c5b364351586ef0c4afb0e7838aee3c1", "score": "0.6034881", "text": "def is_palindrome(str):\n \n return str == str[::-1]", "title": "" }, { "docid": "85d3f7905b378266c6281cf0ccd3e21e", "score": "0.6029167", "text": "def is_winner(current_word):\n return \"_\" not in current_word", "title": "" }, { "docid": "108cb60749dacc261e8a99280cd3af98", "score": "0.60206604", "text": "def palindrome(s):\n return s == s[::-1]", "title": "" }, { "docid": "9662bea673ef9356fd0fb4406e01bb9d", "score": "0.60125065", "text": "def is_palindrome(value) -> bool:\n x = value.replace(\" \",\"\")\n y = x.lower()\n return y == y[::-1]", "title": "" }, { "docid": "c53ebfc798125cecd1dc1929b441e511", "score": "0.6011779", "text": "def is_palindrome(phrase):\n phrase = phrase.upper()\n phrase = phrase.replace(' ', '')\n if phrase == phrase[::-1]:\n return True\n else:\n return False", "title": "" }, { "docid": "0002a76b1b06342bc26f1bbb22eda376", "score": "0.6007092", "text": "def is_palindrome(word):\r\n if len(word) <= 1:\r\n return True\r\n if first(word) != last(word):\r\n return False\r\n return is_palindrome(middle(word))", "title": "" }, { "docid": "f63cf399421119920b7ff035013e72db", "score": "0.5997916", "text": "def palindrome(phrase_new, phrase_backwards):\n if phrase_backwards == phrase_new:\n # palindrome = True\n print(\"This here is a palindrome: \", phrase_new, phrase_backwards)\n else:\n # palindrome = False\n print(\"This here is not a palindrome: \", phrase_new, phrase_backwards)", "title": "" }, { "docid": "a742843d26828541c4d2cdbdc468aabb", "score": "0.5995712", "text": "def is_word_in(self, word):\n current_states = {self.init_state}\n for letter in word:\n prev_states = current_states\n current_states = set()\n for state in prev_states:\n current_states.update(self.transitions[state][letter])\n\n if len(set(self.final_states).intersection(current_states)) != 0:\n return True\n else:\n return False", "title": "" }, { "docid": "cbb0cabd9765f36b67ef22e14283d6e6", "score": "0.59861946", "text": "def is_palindrome(phrase):\n phrase = deque(phrase.lower().replace(' ', ''))\n \n while len(phrase) > 1:\n start, end = phrase.popleft(), phrase.pop()\n if start != end:\n return False\n \n return True", "title": "" }, { "docid": "6bef91c86dd992921ea7a2fcc7a83186", "score": "0.5977051", "text": "def is_word_adjective(self, word):", "title": "" }, { "docid": "4032b363a562a8555daf2fc7dc4b2f1d", "score": "0.5974225", "text": "def isParent(self, s, word):\n ms = len(s)\n mw = len(word)\n i = 0\n j = 0\n while i < ms and j < mw:\n if s[i] == word[j]:\n i += 1\n j += 1\n else:\n i += 1\n if j == mw:\n return True\n else:\n return False", "title": "" }, { "docid": "f381486c628584b394897d63a91158b5", "score": "0.59623444", "text": "def _is_word(self, word):\n return self.value == word", "title": "" }, { "docid": "9ada7290cb43ad66343e689db96fafc4", "score": "0.59539866", "text": "def trifeca(word):\r\n return_ans = False\r\n if len(word) > 5:\r\n for i in range(0,len(word) - 5):\r\n if (word[i] == word[i+1]) & (word[i+2] == word[i+3]) & (word[i+4] == word[i+5]):\r\n if return_ans == False: \r\n return_ans = True\r\n return return_ans", "title": "" }, { "docid": "4a5595a56141dd70eefb1bc2e6e603a7", "score": "0.5952692", "text": "def is_palindrome(word):\n if len(word) <= 1:\n return True\n if first(word) != last(word):\n return False\n return is_palindrome(middle(word))", "title": "" }, { "docid": "1d8f541d3deed637604a216ebb1f8d64", "score": "0.5949837", "text": "def has_two_pairs(s: str) -> bool:\n prev = None\n for i, j in zip(s, itertools.islice(s, 1, None)):\n if i == j != prev:\n if prev is not None:\n return True\n prev = i\n return False", "title": "" }, { "docid": "2526d382e9bce6c2e51e7ef21f02c81a", "score": "0.5945069", "text": "def palindrome(sen):\n sen = sen.lower()\n if sen == sen[::-1]:\n return True\n return False", "title": "" }, { "docid": "78c86fb1e7a79b7e852bf825e670b305", "score": "0.59334505", "text": "def is_palindrome1(text):\n text = text.lower()\n for i in range(len(text) // 2):\n if text[i] != text[len(text)-2-1]:\n return False\n return True", "title": "" }, { "docid": "53732dff5ffa51af3de5d2ab38aca407", "score": "0.59304744", "text": "def palindrome(sentence):\n # TODO: return the correct value after checking the palindrome condition\n \n\n \n sentence = sentence.casefold()\n\n \n rev_sentence = reversed(sentence)\n\n \n if list(sentence) == list(rev_sentence):\n return True\n else:\n return False", "title": "" }, { "docid": "0f98aa0737be572c854e19aac6396709", "score": "0.5916425", "text": "def palindrom(text):\n text = list(text)\n a = list(text[::-1])\n return a == text", "title": "" }, { "docid": "b7cb81e426dece8fcacf200af392e104", "score": "0.59118474", "text": "def compare_words(lhs: List[aai.Word], rhs: List[Dict[str, Any]]) -> bool:\n for idx, word in enumerate(lhs):\n if word.text != rhs[idx][\"text\"]:\n return False\n if word.start != rhs[idx][\"start\"]:\n return False\n if word.end != rhs[idx][\"end\"]:\n return False\n return True", "title": "" }, { "docid": "e88d1faadfbaf9d7117518ae31140bc1", "score": "0.5908681", "text": "def is_palinrome(string):\n return string == string[::-1]", "title": "" }, { "docid": "2da512023938458f6e6725d87580d1d3", "score": "0.59034836", "text": "def is_forward(txt):\n if \"-----Original Message-----\" in txt:\n return True\n else:\n return False", "title": "" }, { "docid": "c46373688526d35b914ddc7de16bba35", "score": "0.58996904", "text": "def is_palindrome(string: str) -> bool:\n string = string.lower()\n string = string.replace(' ', '')\n return (string == string[::-1])", "title": "" }, { "docid": "e4100e8927915218f19802cb69f3b226", "score": "0.58989495", "text": "def _is_palindrome(self, string):\n return str(string) == str(string)[::-1]", "title": "" }, { "docid": "8398cce834bf1bafc9f0d75b050d5ff4", "score": "0.5897876", "text": "def is_palindrome(word):\n# print repr(word)\n if (len(word) == 1)or(len(word) == 0):\n return True\n else:\n if first(word) == last(word):\n return is_palindrome(middle(word))\n else:\n return False", "title": "" }, { "docid": "a3b1e2b84bf11c1703d7e1ad8fb09c3b", "score": "0.589231", "text": "def accepts(self, word: List[str]) -> bool:\n\n if len(word) == 0:\n word.append('_')\n\n queue: Deque[Tuple[List[str], int, str, str]] = \\\n deque([(word[:], 0, self.init_state[:], word[0][:])])\n\n start: float = time.time()\n\n while len(queue) != 0:\n if time.time() - start >= 9 * 60:\n return False\n\n cur_word, cur_pos, cur_state, cur_symbol = queue.popleft()\n\n if cur_state in self.accept_states:\n if (cur_state, cur_symbol) not in self.transitions or \\\n len(self.transitions[(cur_state, cur_symbol)]) == 0:\n return True\n\n if (cur_state, cur_symbol) not in self.transitions:\n continue\n\n for next_state, next_symbol, shift in self.transitions[(cur_state, cur_symbol)]:\n new_word: List[str] = \\\n [x if i != cur_pos else next_symbol[:] for i, x in enumerate(cur_word)]\n new_pos: int = cur_pos\n\n if (shift == '<') and (new_pos == 0):\n new_word = ['_'] + new_word\n elif (shift == '>') and (new_pos == len(new_word) - 1):\n new_word = new_word + ['_']\n new_pos += 1\n elif shift == '<':\n new_pos -= 1\n elif shift == '>':\n new_pos += 1\n\n new_state: str = next_state[:]\n new_symbol: str = new_word[new_pos][:]\n\n queue.append((new_word, new_pos, new_state, new_symbol))\n\n return False", "title": "" }, { "docid": "f53d5063fc55e900b03787ebf2d115cd", "score": "0.5879677", "text": "def part3(string):\n\n word = input(\"Please enter a word\")\n word = str(word)\n reverse = word[::-1]\n print(reverse)\n if word == reverse:\n print(\"This word is a palindrome\")\n else:\n print(\"This word is not a palindrome\")", "title": "" }, { "docid": "865e7199c034a8ba83b9c40eec4345a1", "score": "0.5871525", "text": "def is_triangle_word(word):\n num = sum(map(lambda c: ord(c) - ord('A') + 1, word))\n return is_triangle_num(num)", "title": "" }, { "docid": "f043af9326ee9677017b0c212fe455d6", "score": "0.58660525", "text": "def is_palindrome(a):\n return a==reverse(a)", "title": "" }, { "docid": "71c0d660ddaf56095a3fd1cb06685687", "score": "0.5858581", "text": "def is_palindrome(p: str) -> bool:\n return p == p[::-1]", "title": "" }, { "docid": "088d17011e3c24d7d4eec5c58b32dd4b", "score": "0.5855688", "text": "def is_palindrome(n):\n word = str(n)\n mid = len(word)/2\n return (word[:mid] == word[mid:])", "title": "" }, { "docid": "fabba5a8ea4faa94b9b71e57d804623f", "score": "0.582574", "text": "def one_away(string_one, string_two):\n #TODO: Try to do this in a combined case\n if string_one == string_two:\n # zero edits away so true\n return True\n # Find how many characters in one string and not the other\n # needs to be in same order\n found_non_matching = False\n if len(string_one) == len(string_two):\n for x in range(len(string_one)):\n if string_one[x] != string_two[x]:\n if found_non_matching:\n return False\n else:\n found_non_matching = True\n if found_non_matching:\n return True\n\n # check if different lengths (one removed or added)\n if len(string_one) - len(string_two) in [1, -1]:\n longer_string = string_one if len(string_one) == (len(string_two)+1) else string_two\n shorter_string = string_two if len(string_one) == (len(string_two)+1) else string_one\n # if all letters match except one then true else False\n y = 0\n for x in range(len(longer_string)):\n if y >= len(shorter_string):\n if found_non_matching:\n return False\n else:\n return True\n if longer_string[x] != shorter_string[y]:\n if found_non_matching:\n return False\n elif longer_string[x + 1] == shorter_string[y]:\n y -= 1\n found_non_matching = True\n else:\n return False\n y += 1\n if found_non_matching:\n return True", "title": "" }, { "docid": "a6e97a97d772e90fba9864086dcc3cba", "score": "0.5814189", "text": "def one_away(s: str, t: str) -> bool:\n \n len_diff = abs(len(s) - len(t))\n\n if len_diff > 1:\n return False\n\n elif len_diff == 0:\n diff = 0\n for i in range(len(s)):\n if s[i] != t[i]:\n diff += 1\n if diff > 1:\n return False\n return True\n\n else:\n max_s = s if len(s) > len(t) else t\n min_s = t if len(s) > len(t) else s\n\n diff = i = j = 0\n\n while i < len(min_s) and j < len(max_s):\n if min_s[i] != max_s[j]:\n diff += 1\n j += 1\n if diff > 1:\n return False\n else:\n j += 1\n i += 1\n\n return True", "title": "" }, { "docid": "3257d252bc2ac02cfd1516a9b9b9c90e", "score": "0.5809181", "text": "def is_abecedarian(word):\n index = 0\n while index < len(word) -1:\n if word[index] > word[index+1]:\n return False\n else:\n index +=1\n return True", "title": "" }, { "docid": "07565cc76c7c7d87f5810e35ddbc427d", "score": "0.58077645", "text": "def is_sequence(word):\n count = 0; i = 0\n while i < (len(word) - 1): \n if(word[i] == word[i + 1]):\n count += 1\n i = i + 2\n if(count == 3):\n return True\n else:\n i = i + 1\n count = 0\n return False", "title": "" }, { "docid": "bd15dd7447c2f9ddb747e9bed5de9b4d", "score": "0.578775", "text": "def if_rhyme(self, word1, word2):\n word1 = Word(word1, self.dict)\n word2 = Word(word2, self.dict)\n if word1.final_syllable == word2.final_syllable:\n return True\n else:\n return False", "title": "" }, { "docid": "fe3c167cd07dd0f52ba14845685e472f", "score": "0.57794416", "text": "def is_palindrome(string: str) -> bool:\n if \"\".join(reversed(string)).lower() == string.lower():\n return True\n return False", "title": "" }, { "docid": "4c66196736785c69c9ddf098e8b03c85", "score": "0.5773772", "text": "def isPalindrome3(self, s):\n s = ''.join(e for e in s if e.isalnum()).lower()\n return s==s[::-1]", "title": "" }, { "docid": "e609b586f0dd63808db55740263d7457", "score": "0.5762862", "text": "def sprawdz_czy_metagramy(self, word_one, word_two):\n self.first_word = word_one\n self.second_word = word_two\n number_of_difference = 0\n if len(word_one)==len(word_two):\n for i in range(0, len(word_one)):\n if number_of_difference > 1:\n return False\n if word_one[i] != word_two[i]:\n number_of_difference += 1\n if number_of_difference == 1:\n return True\n else:\n return False\n else:\n return False", "title": "" }, { "docid": "7aa972c8e9f1f0de34f7a5c09de19ef0", "score": "0.5758277", "text": "def is_two_palindrome(txt):\n## Note:\n## The many +1 and -1 are to get the right results\n## with even and odd numbers.\n## even //2 = exactly half\n## odd // 2 = half (and a bit)\n## so:\n## even+1 //2 = still half\n## odd+1 //2 = one over\n\n return (True if len(txt) == 1\n else (txt[:len(txt)//2] == txt[len(txt)//2-1::-1] and\n txt[(len(txt)+1)//2:] == txt[:(len(txt)+1)//2-1:-1]))", "title": "" }, { "docid": "282b8a527a2558608db8c3bd6ca51c80", "score": "0.57549894", "text": "def is_triangle_word(word):\n count = 0\n for ch in word.upper().strip():\n count += ord(ch) - ord('A') + 1\n return is_triangle_number(count)", "title": "" }, { "docid": "e0834da957334454ff5d9eeded6201d9", "score": "0.57508105", "text": "def is_triple_double(word):\n i = 0\n count = 0\n while i < len(word)-1:\n if word[i] == word[i+1]:\n count = count + 1\n if count == 3:\n return True\n i = i + 2\n else:\n count = 0\n i = i + 1\n return", "title": "" }, { "docid": "bd19262e37d0536d0389d3d843d3dbf4", "score": "0.5746574", "text": "def is_palindrome_v3(s):\r\n counter = 0\r\n for ch in range(len(s)):\r\n if s[ch] == s[-ch-1]:\r\n counter += 1\r\n return counter == len(s)", "title": "" }, { "docid": "af5dc5e420489f19bd253cfbac0d7102", "score": "0.5746273", "text": "def is_polindrome_3(string: str) -> bool:\n n = len(string)\n for i in range(n // 2 + 1):\n if string[i] != string[-(i + 1)]:\n return False\n return True", "title": "" }, { "docid": "29aff38e90e97a5a9bbe62ba28901a21", "score": "0.5745846", "text": "def validPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n while left < right:\n if s[left] != s[right]:\n return s[left:right] == s[left:right][::-1] or s[left+1:right+1] == s[left+1:right+1][::-1]\n left += 1\n right -= 1\n return True", "title": "" }, { "docid": "e6640f0f6da2e5819be918539aab88bd", "score": "0.57404786", "text": "def transformation_consists_of_word_swaps(transformation):\n from textattack.transformations import WordSwap, WordSwapGradientBased\n\n return transformation_consists_of(transformation, [WordSwap, WordSwapGradientBased])", "title": "" }, { "docid": "92236698f16a34f7b7aa6fa797a926e9", "score": "0.573482", "text": "def is_palindrome(text: str):\n#[SOLUTION]\n for i in range(len(text)):\n if text[i] != text[len(text) - 1 - i]:\n return False\n return True", "title": "" }, { "docid": "14966ed0f0cdde565d909aae1f278d57", "score": "0.57330406", "text": "def palindrome(sentence):\n # remove non-letters\n normal = ''.join([ char for char in sentence.lower() if char in string.ascii_lowercase ])\n center = len(normal) / 2\n left, right = normal[center::-1], normal[center:]\n return left == right", "title": "" } ]
28a5c5061b080ba6988e28b43463b904
a c / \ / \ b ar > b a / \ / \ / \ bl c bl cl cr ar / \ cl cr
[ { "docid": "3409e010af32696ebec3666fd5fa4698", "score": "0.0", "text": "def __lr(self, a):\n b = a.left\n c = b.right\n cl = c.left\n cr = c.right\n\n # Rotation\n c.right = a\n c.left = b\n b.right = cl\n a.left = cr\n\n # Affected nodes\n a.height = 1 + max(self.__get_height(a.left),\n self.__get_height(a.right))\n b.height = 1 + max(self.__get_height(b.left),\n self.__get_height(b.right))\n c.height = 1 + max(self.__get_height(c.left),\n self.__get_height(c.right))\n\n return c", "title": "" } ]
[ { "docid": "9e6321f4f699dca82b8364355e8c5b8f", "score": "0.57508165", "text": "def cat_ears():\n return \"/\\\\\" + ((SIZE - 2) * \"-\") + ((SIZE - 2) * \"-\") + \"/\\\\\"", "title": "" }, { "docid": "3523727d40ae8f829a045dc98b172c26", "score": "0.5483277", "text": "def conc(left, root, right):\r\n\r\n\tlwid = len(left[-1])\r\n\trwid = len(right[-1])\r\n\trootwid = len(root)\r\n\r\n\tresult = [(lwid + 1) * \" \" + root + (rwid + 1) * \" \"]\r\n\r\n\tls = leftspace(left[0])\r\n\trs = rightspace(right[0])\r\n\tresult.append(ls * \" \" + (lwid - ls) * \"_\" + \"/\" + rootwid * \" \" + \"\\\\\" + rs * \"_\" + (rwid - rs) * \" \")\r\n\r\n\tfor i in range(max(len(left), len(right))):\r\n\t\trow = \"\"\r\n\t\tif i < len(left):\r\n\t\t\trow += left[i]\r\n\t\telse:\r\n\t\t\trow += lwid * \" \"\r\n\r\n\t\trow += (rootwid + 2) * \" \"\r\n\r\n\t\tif i < len(right):\r\n\t\t\trow += right[i]\r\n\t\telse:\r\n\t\t\trow += rwid * \" \"\r\n\r\n\t\tresult.append(row)\r\n\r\n\treturn result", "title": "" }, { "docid": "e68ec7242a094a82592747460d0b1e95", "score": "0.53313935", "text": "def test_branch_and_ring_at_beginning_of_branch():\n\n # CC1CCCS((Br)1Cl)F\n s = \"[C][C][C][C][C][S][#Branch1][#Branch1][Branch1][C][Br]\" \\\n \"[Ring1][Branch1][Cl][F]\"\n assert decode_eq(s, \"CC1CCCS1(Br)(Cl)F\")\n\n # CC1CCCS(1(Br)Cl)F\n s = \"[C][C][C][C][C][S][#Branch1][#Branch1][Ring1][Branch1]\" \\\n \"[Branch1][C][Br][Cl][F]\"\n assert decode_eq(s, \"CC1CCCS1(Br)(Cl)F\")", "title": "" }, { "docid": "2d84e662d1497b6358abe983f61e070a", "score": "0.5330223", "text": "def _create_char_spinner():\n while True:\n for c in '|/-\\\\':\n yield c", "title": "" }, { "docid": "2d84e662d1497b6358abe983f61e070a", "score": "0.5330223", "text": "def _create_char_spinner():\n while True:\n for c in '|/-\\\\':\n yield c", "title": "" }, { "docid": "48742ba8338f6d6641059901f5e66097", "score": "0.53102624", "text": "def conc(left,root,right):\n\t\t\n\t\tlwid = len(left[-1])\n\t\trwid = len(right[-1])\n\t\trootwid = len(root)\n\t\t\n\t\tresult = [(lwid+1)*\" \" + root + (rwid+1)*\" \"]\n\t\t\n\t\tls = leftspace(left[0])\n\t\trs = rightspace(right[0])\n\t\tresult.append(ls*\" \" + (lwid-ls)*\"_\" + \"/\" + rootwid*\" \" + \"|\" + rs*\"_\" + (rwid-rs)*\" \")\n\t\t\n\t\tfor i in range(max(len(left),len(right))):\n\t\t\t\trow = \"\"\n\t\t\t\tif i<len(left):\n\t\t\t\t\t\trow += left[i]\n\t\t\t\telse:\n\t\t\t\t\t\trow += lwid*\" \"\n\n\t\t\t\trow += (rootwid+2)*\" \"\n\t\t\t\t\n\t\t\t\tif i<len(right):\n\t\t\t\t\t\trow += right[i]\n\t\t\t\telse:\n\t\t\t\t\t\trow += rwid*\" \"\n\t\t\t\t\t\t\n\t\t\t\tresult.append(row)\n\t\t\t\t\n\t\treturn result", "title": "" }, { "docid": "3421c46ed52a5685e98d9ba24f43cdd0", "score": "0.52087164", "text": "def test_ring_after_branch():\n\n # CCCCCCC1(OCO)1\n s = \"[C][C][C][C][C][C][C][Branch1][Ring2][O][C][O][C][Ring1][Branch1]\"\n assert decode_eq(s, \"CCCCCCC(OCO)=C\")\n\n s = \"[C][C][C][C][C][C][C][Branch1][Ring2][O][C][O]\" \\\n \"[Branch1][C][F][C][C][Ring1][=Branch2]\"\n assert decode_eq(s, \"CCCCC1CC(OCO)(F)CC1\")", "title": "" }, { "docid": "dc0672a5ac17368d25410d61007a8f67", "score": "0.5202189", "text": "def _create_char_spinner():\n while True:\n for char in '|/-\\\\':\n yield char", "title": "" }, { "docid": "c659bd84e34b82fac4aa1ffc3a3cbf25", "score": "0.5113633", "text": "def head():\r\n\r\n\tfor i in range(SIZE):\r\n\t\tfor j in range(i,SIZE):\r\n\t\t\tprint(' ', end='')\r\n\t\tfor k in range(i+1):\r\n\t\t\tprint('/', end='')\r\n\t\tfor l in range(i + 1):\r\n\t\t\tprint('\\\\', end='')\r\n\t\tprint('')", "title": "" }, { "docid": "1a1832d1b823bd11910cf74f3dd51a04", "score": "0.5019812", "text": "def test_ring_immediately_following_branch():\n\n # CCC1CCCC(OCO)1\n s = \"[C][C][C][C][C][C][C][Branch1][Ring2][O][C][O][Ring1][Branch1]\"\n assert decode_eq(s, \"CCC1CCCC1OCO\")\n\n # CCC1CCCC(OCO)(F)1\n s = \"[C][C][C][C][C][C][C][Branch1][Ring2][O][C][O]\" \\\n \"[Branch1][C][F][Ring1][Branch1]\"\n assert decode_eq(s, \"CCC1CCCC1(OCO)F\")", "title": "" }, { "docid": "3df2aaea050598bd157061ad4c901cf9", "score": "0.5017937", "text": "def test_ring_at_beginning_of_branch():\n\n # CC1CCC(1CCl)F\n s = \"[C][C][C][C][C][=Branch1][Branch1][Ring1][Ring2][C][Cl][F]\"\n assert decode_eq(s, \"CC1CCC1(CCl)F\")\n\n # CC1CCS(Br)(1CCl)F\n s = \"[C][C][C][C][S][Branch1][C][Br]\" \\\n \"[=Branch1][Branch1][Ring1][Ring2][C][Cl][F]\"\n assert decode_eq(s, \"CC1CCS1(Br)(CCl)F\")", "title": "" }, { "docid": "09ebf5276e88736aae74289417c3461b", "score": "0.5006702", "text": "def BCFlash(w):", "title": "" }, { "docid": "fd53f5f97ae87af9909d4614f9d12956", "score": "0.49526694", "text": "def brbreak(b):\n for i,l in enumerate(b):\n if l.split()[0] in ('bne', 'beq', 'bgt', 'blt', 'jsr'):\n p0 = b[:i+1]\n p1 = b[i+1:]\n if p1:\n global scratch\n scratch += 1\n rest = brbreak(p1)\n rest[0] = ['X%d:' % scratch] + rest[0]\n return [p0] + rest\n else:\n return [p0]\n return [b]", "title": "" }, { "docid": "f47da4edc50066469285b340cd88f24b", "score": "0.4941581", "text": "def test_chiral_symbols():\n\n s = \"[C@@][Branch1][C][Cl][Branch1][C][F][Branch1][C][Br][Branch1][C][I]\"\n assert decode_eq(s, \"[C@@](Cl)(F)(Br)CI\")\n\n s = \"[C@H1][Branch1][C][Cl][Branch1][C][F][Branch1][C][Br]\"\n assert decode_eq(s, \"[C@H1](Cl)(F)CBr\")", "title": "" }, { "docid": "84d1e603a4cf8bf5eef2e43f2f2d1967", "score": "0.49158356", "text": "def lower():\r\n\tfor i in range(SIZE):\r\n\t\tprint('|', end='')\r\n\t\tfor k in range(i):\r\n\t\t\tprint('.', end='')\r\n\t\tfor k in range(i,SIZE):\r\n\t\t\tprint('\\\\', end='')\r\n\t\t\tprint('/', end='')\r\n\t\tfor k in range(i):\r\n\t\t\tprint('.', end='')\r\n\t\tprint('|')", "title": "" }, { "docid": "45029d52e4aa6c7213ee1ad7e897570f", "score": "0.48975864", "text": "def owl_wings_middle_part_right_2():\n return \"\\\\ \\\\ \\\\ \\\\\"", "title": "" }, { "docid": "7f786b1234077b02b6401e5c6c748653", "score": "0.4873619", "text": "def makebreadcrumbs(pathelements):\n breadcrumbs = []\n curpath = '/'\n for element in pathelements[:-1]:\n curpath = curpath + element + '/'\n breadcrumbs.append('<a href=\"%s\">%s</a>' % (curpath, element))\n breadcrumbs.append(pathelements[-1])\n return '&nbsp;/&nbsp;'.join(breadcrumbs)", "title": "" }, { "docid": "16549ee1d081d6676647a27b6ab0c173", "score": "0.48705626", "text": "def test_branch_at_beginning_of_branch():\n\n # [C@]((Br)Cl)F\n s = \"[C@][=Branch1][Branch1][Branch1][C][Br][Cl][F]\"\n assert decode_eq(s, \"[C@](Br)(Cl)F\")\n\n # [C@](((Br)Cl)I)F\n s = \"[C@][#Branch1][Branch2][=Branch1][Branch1][Branch1][C][Br][Cl][I][F]\"\n assert decode_eq(s, \"[C@](Br)(Cl)(I)F\")\n\n # [C@]((Br)(Cl)I)F\n s = \"[C@][#Branch1][Branch2][Branch1][C][Br][Branch1][C][Cl][I][F]\"\n assert decode_eq(s, \"[C@](Br)(Cl)(I)F\")", "title": "" }, { "docid": "a12ee220216103d188c7e646f5584b80", "score": "0.48391664", "text": "def tobranch(path):\n pathsplit = path.split('/')\n return [\"{}/\".format(p) for p in pathsplit[:-1]] + pathsplit[-1:]", "title": "" }, { "docid": "a12ee220216103d188c7e646f5584b80", "score": "0.48391664", "text": "def tobranch(path):\n pathsplit = path.split('/')\n return [\"{}/\".format(p) for p in pathsplit[:-1]] + pathsplit[-1:]", "title": "" }, { "docid": "3036e0671f9f60c385e013d705aa2ce3", "score": "0.48259002", "text": "def rlb(thing):\n return thing.replace(\"\\r\", \" \").replace(\"\\n\", \" \")", "title": "" }, { "docid": "a7a262ed5e2f35dfa3912ec4c4bf5e6a", "score": "0.48248285", "text": "def head():\r\n for i in range(SIZE):\r\n # The outer for loop\r\n\r\n print(' ', end='')\r\n # offset the head to the middle as the belt and body are one space bigger than the head\r\n\r\n for j in range(SIZE):\r\n # The first inner for loop\r\n if (i + j) <= SIZE - 2:\r\n print(' ', end='')\r\n else:\r\n print('/', end='')\r\n\r\n for k in range(SIZE):\r\n # The second inner for loop\r\n if (i + (SIZE - k - 1)) <= SIZE - 2:\r\n print(' ', end='')\r\n else:\r\n print('\\\\', end='')\r\n print('')", "title": "" }, { "docid": "8e1c76d487d46065837c9ff51b147f47", "score": "0.4814849", "text": "def test_backslash_grid():\n assert (\n isort.code(\n \"\"\"\nfrom kopf.engines import loggers, posting\nfrom kopf.reactor import causation, daemons, effects, handling, lifecycles, registries\nfrom kopf.storage import finalizers, states\nfrom kopf.structs import (bodies, configuration, containers, diffs,\n handlers as handlers_, patches, resources)\n\"\"\",\n multi_line_output=11,\n line_length=88,\n combine_as_imports=True,\n )\n == \"\"\"\nfrom kopf.engines import loggers, posting\nfrom kopf.reactor import causation, daemons, effects, handling, lifecycles, registries\nfrom kopf.storage import finalizers, states\nfrom kopf.structs import bodies, configuration, containers, diffs, \\\\\n handlers as handlers_, patches, resources\n\"\"\"\n )", "title": "" }, { "docid": "f42345b7cf5dd5e5bc7a5524392816c8", "score": "0.48116565", "text": "def test_oversized_branch():\n\n assert decode_eq(\"[C][Branch2][O][O][C][C][S][F][C]\", \"CCCSF\")\n assert decode_eq(\"[C][#Branch2][O][O][#C][C][S][F]\", \"C#CCSF\")", "title": "" }, { "docid": "078a50c4f7c3e9f2e6172a621b69a433", "score": "0.47796232", "text": "def owl_wings_middle_part_right_1():\n return \" \\\\ \\\\ \\\\\"", "title": "" }, { "docid": "f0ad21a0debcbbb9f8b86ecfadccdcd6", "score": "0.4759233", "text": "def test_07_pathology_mutual_no_base_case(self):\n\t\tself.R('S: X c')\n\t\tself.R('X : Y a')\n\t\tself.R('Y : X b')\n\t\tself.pathological = True", "title": "" }, { "docid": "116bbbf5bf67adeb11dfd9ddcc4e5bc2", "score": "0.47448036", "text": "def test_example_2(self):\n\n raw = \"\"\"\n q, r |- p.\n b |- p.\n p |- q.\n r |- q.\n a |- r.\n b |- r.\n r |- s.\n contrary(a, x).\n contrary(b, x).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['a', 'b', 'r', 'r', 's', 's', 'p', 'p', 'p', 'q', 'q', 'q'])", "title": "" }, { "docid": "0ef14c9eb253110b9f90564947b56e60", "score": "0.4740292", "text": "def backslash_line():\n return ((\"\\\\\" * (SIZE + 1)) + (\"/\" * (SIZE + 1)))", "title": "" }, { "docid": "c47b796bf7ed9229aa5b42c91bc770e2", "score": "0.47317103", "text": "def branches(tree):\r\n return tree[1:]", "title": "" }, { "docid": "c47b796bf7ed9229aa5b42c91bc770e2", "score": "0.47317103", "text": "def branches(tree):\r\n return tree[1:]", "title": "" }, { "docid": "dac1fa1e00ce4dd4e701be0c9c868f6f", "score": "0.47012606", "text": "def test_rm_brs():\n assert '1 2 3 4' == rm_brs('\\n\\n\\r1\\t2\\n3\\n\\r\\n4 ') # act", "title": "" }, { "docid": "5de6f2fecd8bfc968d38d14038bcbe7d", "score": "0.46857893", "text": "def ll(*stuff):\n return \"\\\\(\" + ll_raw(*stuff) + \"\\\\)\"", "title": "" }, { "docid": "ca58ab7218c7bc0c9085b10962c28a01", "score": "0.46477422", "text": "def lcs(x, y):\n\tm = len(x)\n\tn = len(y)\n\t\n\n\tif m < 1 or n < 1:\n\t\treturn None\n\n\trow = [(0,'-')]*(n+1)\n\tc = [0] * (m+1)\n\n\tfor i in range(m+1):\n\t\tc[i] = row[:]\n\n\t\n\n\tfor j in range(n+1):\n\t\tfor i in range(m+1):\n\t\t\tif i == 0 or j == 0:\n\t\t\t\tc[i][j] = (0,\"*\")\n\t\t\telif x[i-1] == y[j-1]:\n\t\t\t\tc[i][j] = ((c[i-1][j-1][0] +1), \"\\\\\")\n\t\t\telse:\n\t\t\t\t\n\t\t\t\tMax = c[i-1][j][0]\n\n\t\t\t\tif c[i][j-1][0] > Max:\n\t\t\t\t\tc[i][j] = (c[i][j-1][0], '-')\n\t\t\t\telse:\n\t\t\t\t\tc[i][j] = (c[i-1][j][0], '|')\n\n\n\t\t\n\n\tpprint(c)\n\tprint\n\tprint x \n\tprint y\n\n\ti = m\n\tj = n\n\tres = []\n\tlength = c[m][n][0]\n\n\twhile j >=0:\n\t\tif c[i][j][1] == \"\\\\\" and c[i][j][0] == length:\n\t\t\tres.append(y[j-1])\n\t\t\tj -=1\n\t\t\tk = i -1\n\t\t\tlength -=1\n\n\t\ti -=1\n\n\t\tif i == 1:\n\t\t\ti = k\n\t\t\tj -=1\n\tres.reverse()\n\treturn res", "title": "" }, { "docid": "9644e462a0c79550a95184d544a53697", "score": "0.46425575", "text": "def test_branch_with_no_atoms():\n\n s = \"[C][Branch1][Ring2][Branch1][Branch1][Branch1][F]\"\n assert decode_eq(s, \"CF\")\n\n s = \"[C][Branch1][Ring2][Ring1][Ring1][Branch1][F]\"\n assert decode_eq(s, \"CF\")\n\n s = \"[C][=Branch1][Ring2][Branch1][C][Cl][F]\"\n assert decode_eq(s, \"C(Cl)F\")\n\n # special case: #Branch3 takes Q_1, Q_2 = [O] and Q_3 = ''. However,\n # there are no more symbols in the branch.\n assert decode_eq(\"[C][C][C][C][#Branch3][O][O]\", \"CCCC\")", "title": "" }, { "docid": "9aac468dc7f551ecf73241c90ae5284f", "score": "0.46412808", "text": "def make_breadcrumb(path=''):\n return '\\n'.join([TD.format('active' if i == len(path.split(SEP))-1 else '', x) for i, x in enumerate(path.split(SEP))])", "title": "" }, { "docid": "8176a9748a6475b359f0899c6fe1c23c", "score": "0.4627003", "text": "def test_branch_at_end_of_selfies():\n\n assert decode_eq(\"[C][C][C][C][Branch1]\", \"CCCC\")\n assert decode_eq(\"[C][C][C][C][#Branch3]\", \"CCCC\")", "title": "" }, { "docid": "ca3f7d1ff2a02e73272b926b9d2cc305", "score": "0.4626038", "text": "def owl_wings_middle_part_left_1():\n return \"/ / / \"", "title": "" }, { "docid": "e0f8d37ffc4bb5cbd5addd393d966125", "score": "0.46212763", "text": "def br(count=1):\n for x in range(count):\n out(\"\")", "title": "" }, { "docid": "123ebfcf6e3e05fa937a8cdc461b020b", "score": "0.45944998", "text": "def branches(tree):\n return tree[1:]", "title": "" }, { "docid": "123ebfcf6e3e05fa937a8cdc461b020b", "score": "0.45944998", "text": "def branches(tree):\n return tree[1:]", "title": "" }, { "docid": "123ebfcf6e3e05fa937a8cdc461b020b", "score": "0.45944998", "text": "def branches(tree):\n return tree[1:]", "title": "" }, { "docid": "25274c59fbc58ccc5fc79ff19952f83e", "score": "0.45897013", "text": "def test_00_non_lalr(self):\n\t\tself.RR(\"\"\"\n\t\tS:a X d | a Y e | b X e | b Y d\n\t\tX:c\n\t\tY:c\n\t\t\"\"\")\n\t\tself.good = ['acd', 'ace', 'bcd', 'bce']", "title": "" }, { "docid": "53472673caf29875bb74614d8851b405", "score": "0.45868263", "text": "def expand_line(chunk, bbc):\n line = []\n tip = A;\n for i in range(1, chunk[0] + 1):\n tip = A if tip == B else B\n line += expand_to_segment(tip, chunk[i])\n output = \"\".join(line) \n if bbc == True:\n output += \"\".join(line[::-1])\n return output", "title": "" }, { "docid": "98758bd4b240445da28c4aed714f56b7", "score": "0.4564502", "text": "def split(self,branch,how_to_split):", "title": "" }, { "docid": "f9b8ce5f3c2ddfe9f7e3ef555988beb6", "score": "0.4550942", "text": "def te_squash(line):\n return squash(Vac.regex,Vac.regex1,line)", "title": "" }, { "docid": "48646c2702ced7d9d71091d8b83e02e9", "score": "0.45486173", "text": "def owl_wings_middle_part_left_2():\n return \"/ / / /\"", "title": "" }, { "docid": "ed489539c1772a3c157a3a0c5f667b6c", "score": "0.45447716", "text": "def test_branch_and_ring_decrement_state():\n\n assert decode_eq(\"[C][C][C][Ring1][Ring1][#C]\", \"C1CC1=C\")\n assert decode_eq(\"[C][=C][C][C][#Ring1][Ring1][#C]\", \"C=C1CC1\")\n assert decode_eq(\"[C][O][C][C][=Ring1][Ring1][#C]\", \"COCCC\")\n\n assert decode_eq(\"[C][=C][Branch1][C][=C][#C]\", \"C=C(C)C\")", "title": "" }, { "docid": "e77d00c64c9d154dd14195c69b1d2d4c", "score": "0.454067", "text": "def clb():\n Gitx().clb()", "title": "" }, { "docid": "62f02bd550d9020ccadbd1d4be6f50a6", "score": "0.45378816", "text": "def test_example_3(self):\n\n raw = \"\"\"\n q, r |- p.\n s |- q.\n s, a |- r.\n |- s.\n r |- t.\n contrary(a, x).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['t', 'p', 'q', 'r', 's', 'a'])", "title": "" }, { "docid": "bb3c2b83fb58d2c6f370686bb17b353c", "score": "0.45268375", "text": "def test_case_24(self):\n\t\ta_list = ['<', 'a', '>', 'b', '<', '>', '<', 'c', 'd', '>', '<', 'e', 'f']\n\n\t\tinstance = MyShell(a_list)\n\t\tinstance.join('-', re.compile('<'), re.compile('>'))\n\n\t\tself.assertEqual(instance.list, ['<-a->', 'b', '<->', '<-c-d->', '<-e-f'])", "title": "" }, { "docid": "d4d40a5e423c4079e0c927f2bf3554cb", "score": "0.45204163", "text": "def upper():\r\n\tfor i in range(SIZE):\r\n\t\tprint('|', end='')\r\n\t\tfor j in range(i,SIZE-1):\r\n\t\t\tprint('.', end='')\r\n\t\tfor k in range(i+1):\r\n\t\t\tprint('/', end='')\r\n\t\t\tprint('\\\\', end='')\r\n\t\tfor j in range(i,SIZE-1):\r\n\t\t\tprint('.', end='')\r\n\t\tprint('|')", "title": "" }, { "docid": "fae19950d1ef7c8a7cc5f5f98d4dfbbf", "score": "0.45104963", "text": "def expand_contractions2(text):\r\n text = re.sub(r\"won't\\b\", \"wo n't\", text)\r\n text = re.sub(r\"can't\\b\", \"ca n't\", text)\r\n text = re.sub(r\"n't\\b\", \" n't\", text)\r\n text = re.sub(r\"'re\\b\", \" 're\", text)\r\n text = re.sub(r\"'s\\b\", \" 's\", text)\r\n text = re.sub(r\"'d\\b\", \" 'd\", text)\r\n text = re.sub(r\"'ll\\b\", \" 'll\", text)\r\n text = re.sub(r\"'ve\\b\", \" 've\", text)\r\n text = re.sub(r\"'m\\b\", \" 'm\", text)\r\n\r\n # string operation\r\n text = text.replace('\\\\r', ' ')\r\n text = text.replace('\\\\n', ' ')\r\n\r\n # return text with splitted contractions\r\n return text", "title": "" }, { "docid": "ef40cde864e99453e07c0b203ae1be20", "score": "0.45075318", "text": "def br() -> None:\n\n\tprint('')", "title": "" }, { "docid": "6160e75716c8bd40056a1946a87614ad", "score": "0.45068178", "text": "def pdb_process():\r\n \r\n\r\n from tempfile import mkstemp\r\n from shutil import move\r\n from os import remove, close\r\n\r\n # for each chlorin molecule, the resName and (atom) names\r\n # chl(orophyll) a, chl b, pheophytin a,\r\n # bacteriochlorophyll a, bacteriopheophytin a\r\n\r\n atom_order = {'CLA':['G ', 'CHA', 'CHB', 'CHC', 'CHD', 'NA ', 'C1A',\r\n 'C2A', 'C3A', 'C4A', 'CMA', 'CAA', 'CBA', 'CGA',\r\n 'O1A', 'O2A', 'NB ', 'C1B', 'C2B', 'C3B', 'C4B',\r\n 'CMB', 'CAB', 'CBB', 'NC ', 'C1C', 'C2C', 'C3C',\r\n 'C4C', 'CMC', 'CAC', 'CBC', 'ND ', 'C1D', 'C2D',\r\n 'C3D', 'C4D', 'CMD', 'CAD', 'OBD', 'CBD', 'CGD',\r\n 'O1D', 'O2D', 'CED', 'C1 ', 'C2 ', 'C3 ', 'C4 ',\r\n 'C5 ', 'C6 ', 'C7 ', 'C8 ', 'C9 ', 'C10', 'C11',\r\n 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18',\r\n 'C19', 'C20'],\r\n 'CHL':['G ', 'CHA', 'CHB', 'CHC', 'CHD', 'NA ', 'C1A',\r\n 'C2A', 'C3A', 'C4A', 'CMA', 'CAA', 'CBA', 'CGA',\r\n 'O1A', 'O2A', 'NB ', 'C1B', 'C2B', 'C3B', 'C4B',\r\n 'CMB', 'CAB', 'CBB', 'NC ', 'C1C', 'C2C', 'C3C',\r\n 'C4C', 'CMC', 'OMC', 'CAC', 'CBC', 'ND ', 'C1D',\r\n 'C2D', 'C3D', 'C4D', 'CMD', 'CAD', 'OBD', 'CBD',\r\n 'CGD', 'O1D', 'O2D', 'CED', 'C1 ', 'C2 ', 'C3 ',\r\n 'C4 ', 'C5 ', 'C6 ', 'C7 ', 'C8 ', 'C9 ', 'C10',\r\n 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17',\r\n 'C18', 'C19', 'C20'],\r\n 'PHO':['CHA', 'CHB', 'CHC', 'CHD', 'NA ', 'C1A', 'C2A',\r\n 'C3A', 'C4A', 'CMA', 'CAA', 'CBA', 'CGA', 'O1A',\r\n 'O2A', 'NB ', 'C1B', 'C2B', 'C3B', 'C4B', 'CMB',\r\n 'CAB', 'CBB', 'NC ', 'C1C', 'C2C', 'C3C', 'C4C',\r\n 'CMC', 'CAC', 'CBC', 'ND ', 'C1D', 'C2D', 'C3D',\r\n 'C4D', 'CMD', 'CAD', 'OBD', 'CBD', 'CGD', 'O1D',\r\n 'O2D', 'CED', 'C1 ', 'C2 ', 'C3 ', 'C4 ', 'C5 ',\r\n 'C6 ', 'C7 ', 'C8 ', 'C9 ', 'C10', 'C11', 'C12',\r\n 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19',\r\n 'C20'],\r\n 'BCL':['G ', 'CHA', 'CHB', 'CHC', 'CHD', 'NA ', 'C1A',\r\n 'C2A', 'C3A', 'C4A', 'CMA', 'CAA', 'CBA', 'CGA',\r\n 'O1A', 'O2A', 'NB ', 'C1B', 'C2B', 'C3B', 'C4B',\r\n 'CMB', 'CAB', 'OBB', 'CBB', 'NC ', 'C1C', 'C2C',\r\n 'C3C', 'C4C', 'CMC', 'CAC', 'CBC', 'ND ', 'C1D',\r\n 'C2D', 'C3D', 'C4D', 'CMD', 'CAD', 'OBD', 'CBD',\r\n 'CGD', 'O1D', 'O2D', 'CED', 'C1 ', 'C2 ', 'C3 ',\r\n 'C4 ', 'C5 ', 'C6 ', 'C7 ', 'C8 ', 'C9 ', 'C10',\r\n 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17',\r\n 'C18', 'C19', 'C20'],\r\n 'BPH':['CHA', 'CHB', 'CHC', 'CHD', 'NA ', 'C1A', 'C2A',\r\n 'C3A', 'C4A', 'CMA', 'CAA', 'CBA', 'CGA', 'O1A',\r\n 'O2A', 'NB ', 'C1B', 'C2B', 'C3B', 'C4B', 'CMB',\r\n 'CAB', 'OBB', 'CBB', 'NC ', 'C1C', 'C2C', 'C3C',\r\n 'C4C', 'CMC', 'CAC', 'CBC', 'ND ', 'C1D', 'C2D',\r\n 'C3D', 'C4D', 'CMD', 'CAD', 'OBD', 'CBD', 'CGD',\r\n 'O1D', 'O2D', 'CED', 'C1 ', 'C2 ', 'C3 ', 'C4 ',\r\n 'C5 ', 'C6 ', 'C7 ', 'C8 ', 'C9 ', 'C10', 'C11',\r\n 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18',\r\n 'C19', 'C20']}\r\n\r\n file_name = input('enter filename: ')\r\n print('\\nextracting data...')\r\n\r\n # pull only relevant data from file\r\n # store resName in pigments and resSeq in names (keys are resSeq)\r\n with open(file_name, 'r') as old_file:\r\n coords = [line for line in old_file if line[:6] == 'HETATM'\r\n and line[17:20] in ('CLA', 'CHL', 'PHO', 'BCL', 'BPH')]\r\n names = {}\r\n pigments = {}\r\n for line in coords:\r\n if line[22:26] not in names:\r\n names[line[22:26]] = line[22:26]\r\n pigments[line[22:26]] = line[17:20]\r\n print(f'pigment names: {list(names.values())}\\n')\r\n \r\n # resSeq numbers are not useful for identification\r\n # change names as desired\r\n cmnd = input('rename a pigment? <y> or <n> ')\r\n \r\n if cmnd == 'y':\r\n while True:\r\n old = input('replace: (<enter> to stop) ')\r\n # check for stop\r\n if old == '':\r\n break\r\n # add whitespace if needed\r\n length = len(old)\r\n if length < 4:\r\n old = old.rjust(4)\r\n # check if available to change\r\n if old in names.values():\r\n while True:\r\n new = input('with: ')\r\n length = len(new)\r\n # check length, if too long try again\r\n if length > 4:\r\n print('keep names to four or less characters')\r\n continue\r\n elif length <= 4:\r\n new = new.rjust(4)\r\n key = list(names.keys())[list(names.values())\r\n .index(old)]\r\n names[key] = new\r\n print(f'pigment names: {list(names.values())}\\n')\r\n break\r\n # if not available name, try again until stop\r\n else:\r\n print('name not found')\r\n # reorder pigments if desired\r\n cmnd = input('\\nreorder pigments? <y> or <n> ')\r\n if cmnd == 'y':\r\n while True:\r\n new_order = input('new order: (<enter> for example) ')\r\n # explain input style\r\n if new_order == '':\r\n print(('\\ngiven names are [\"Chlb\", \"Chla\", \"Chlc\"]'\r\n '\\nto switch order input [2, 1, 3]'\r\n '\\nand result will be [\"Chla\", \"Chlb\", \"Chlc\"]\\n'))\r\n continue\r\n # convert input string to list\r\n new_order = eval(new_order)\r\n # check \r\n if len(new_order) != len(names):\r\n print(f'there are {len(names)} pigments')\r\n continue\r\n # list of keys reordered\r\n new_order = [list(names.keys())[ind]\r\n for ind in [int(num) - 1 for num in new_order]]\r\n # names dictionary reordered\r\n names = {key:names[key] for key in new_order}\r\n break\r\n print('\\nplease wait...')\r\n # create temp file \r\n fh, abs_path = mkstemp()\r\n for i, line in enumerate(coords):\r\n # apply renaming\r\n if names[line[22:26]] != line[22:26]:\r\n coords[i] = line[:22] + names[line[22:26]] + line[26:]\r\n # open temp file for writing\r\n with open(abs_path, 'a') as new_file: \r\n for key in names:\r\n for atom in atom_order[pigments[key]]:\r\n for line in coords:\r\n if line[13:16] == atom and line[22:26] == names[key]:\r\n new_file.write(line)\r\n close(fh)\r\n # remove original file\r\n remove(file_name)\r\n # move new file\r\n move(abs_path, file_name)\r\n print('done')", "title": "" }, { "docid": "57ec42a5bb092170b6d826aba2946e52", "score": "0.44986075", "text": "def test_example_4(self):\n\n raw = \"\"\"\n a, q |- p.\n b, r |- p.\n p |- q.\n b |- r.\n c |- r.\n contrary(a, z).\n contrary(b, z).\n contrary(c, z).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['r', 'r', 'b', 'c', 'a', 'p', 'p', 'q', 'q'])", "title": "" }, { "docid": "d63ef74eb5330d18c482f0cda7da6288", "score": "0.4497198", "text": "def reflect_horizontal(block):\n rows = tuple(''.join(reversed(row)) for row in block.split('/'))\n return '/'.join(rows)", "title": "" }, { "docid": "359732e1dd24390d70f3731017fc7c60", "score": "0.449163", "text": "def tree_join(tree):\r\n if tree is None: return \"\" # empty tree\r\n op,t1,t2=tree # decompose tree \r\n if t1 is None and t2 is None: # simple node: return op name\r\n return op\r\n\r\n # Comma \r\n if op ==Comma: # a list (not hashable)\r\n li=[tree_join(x) for x in t1] # list elements RECURSIVE\r\n txt=Comma.join(li)\r\n if len(li)==1: txt+=Comma # comma if single element\r\n return Obr+txt+Cbr # (elt1, elt2, ...)\r\n\r\n # has result been computed before\r\n try: \r\n if tree in Dic_tree_join:\r\n return Dic_tree_join[tree] # retrieve from dict when possible \r\n can_hash=True # tree is hashable\r\n except TypeError:\r\n can_hash=False # unhashable type\r\n \r\n term1=tree_join(t1) # recursive call for terms\r\n term2=tree_join(t2)\r\n \r\n # Special (subscripting) \r\n if op ==Special: # used for subscripting lists, dicts or attributes\r\n txt=term1+Obr+no_brackets(term2)+Cbr # term1(term2) \r\n\r\n # Bracketed block\r\n elif op==Obr: # brackets (disambiguating)\r\n if t2:\r\n print(\"\\n\", Anom_text_after_block, Obr+no_brackets(term1)+Cbr+term2) # no second term allowed\r\n raise ReferenceError\r\n txt=Obr+no_brackets(term1)+Cbr # (term1) \r\n \r\n # other \r\n elif not t2: # unary operator: no second term\r\n txt=op+Obr+no_brackets(term1)+Cbr # op(term1)\r\n\r\n else:\r\n if op in Alphakwords: op=Underscore+op+Underscore # special spacing for expression names \r\n txt=term1+op+term2 # term1 op term2 (no brackets)\r\n\r\n if can_hash: Dic_tree_join[tree]=txt # save result for later \r\n return txt", "title": "" }, { "docid": "515e2733f1f5e64b352c4e3c76a62e6a", "score": "0.44898993", "text": "def fancy_vector(v):\n return \"\\n / %5.2F \\\\\\n\" % (v[0]) + \\\n \" | %5.2F |\\n\" % (v[1]) + \\\n \" \\\\ %5.2F /\\n\" % (v[2])", "title": "" }, { "docid": "29254d8d9e8b6491d6a0ef0d578038f0", "score": "0.44889694", "text": "def explicit_line_join(logical_line, tokens):\r\n prev_start = prev_end = parens = 0\r\n for token_type, text, start, end, line in tokens:\r\n if start[0] != prev_start and parens and backslash:\r\n yield backslash, \"E502 the backslash is redundant between brackets\"\r\n if end[0] != prev_end:\r\n if line.rstrip('\\r\\n').endswith('\\\\'):\r\n backslash = (end[0], len(line.splitlines()[-1]) - 1)\r\n else:\r\n backslash = None\r\n prev_start = prev_end = end[0]\r\n else:\r\n prev_start = start[0]\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n parens += 1\r\n elif text in ')]}':\r\n parens -= 1", "title": "" }, { "docid": "3adbcc3a853518237cf67f3f33186aa6", "score": "0.44849068", "text": "def find_carrying_branch(self):", "title": "" }, { "docid": "77f250601a9599e02c46b31912dcee1e", "score": "0.44833848", "text": "def construct_bwt(text):\n text += '$'\n # list orient containing all ordered substitution of text\n orient = []\n for i in range(len(text)):\n orient.append(text[i:] + text[:i])\n # sort orient and create bwt by taking last letter of each subsitution ordered\n orient.sort()\n bwt = ''.join([i[-1] for i in orient])\n return bwt, orient # return bwt str and matrix", "title": "" }, { "docid": "dc2aa334d60fb8e7a6daa336edee2c45", "score": "0.44825774", "text": "def print_align(alignment):\r\n\tcost, path = alignment[0]\r\n\tleft, right = alignment[1:]\r\n\tn = len(left)\r\n\r\n\tresult = []\r\n\tfor i in xrange(n):\r\n\t\tresult.append( '%d ' % (i+1) )\r\n\tresult.append('\\n')\r\n\tfor p in left:\r\n\t\tresult.append( '%s ' % p )\r\n\tresult.append('\\n')\r\n\tfor action, coord, cost in path:\r\n\t\tif action == 'right_occl' or action == 'left_occl':\r\n\t\t\tresult.append( ' ' )\r\n\t\telif action == 'good_match' or action == 'bad_match':\r\n\t\t\ti, j = coord\r\n\t\t\tif i == j:\r\n\t\t\t\tresult.append('| ')\r\n\t\t\telif i > j:\r\n\t\t\t\tresult.append('/ ')\r\n\t\t\telse:\r\n\t\t\t\tresult.append('\\\\ ')\r\n\tresult.append('\\n')\r\n\r\n\tfor p in right:\r\n\t\tresult.append( '%s ' % p )\r\n\tresult.append('\\n')\r\n\tresult.append('cost=%d' % cost)\r\n\t\r\n\tprint ''.join(result)", "title": "" }, { "docid": "e0a762d4abf85b06c20c489d93f564f2", "score": "0.44789588", "text": "def blockjoin(lines):\n acc = []\n block = []\n\n for line in lines:\n line = line.strip()\n\n if not line:\n if block:\n acc.append(\" \".join(block))\n block = []\n else:\n block.append(line)\n\n if block:\n acc.append(\" \".join(block))\n\n return \"\\n\\n\".join(acc)", "title": "" }, { "docid": "47089b3490725a018150e457b0d2868d", "score": "0.4476545", "text": "def test_breadth():\n os.chdir(os.path.dirname(__file__) + '/data')\n proc = subprocess.Popen(['swc', 'find', 'pass_simple_branch.swc',\n '-b', '1'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n stdout, stderr = proc.communicate()\n assert proc.returncode == 0\n assert stdout == '4 5 6 7 10 11 12 13 \\n'\n assert stderr == ''", "title": "" }, { "docid": "b125940f3a818a8fbd5fe87085751198", "score": "0.44720542", "text": "def make_binary_output(watson_handle,crick_handle,output):\n for i,handle in enumerate([watson_handle,crick_handle]):\n if i == 0:\n name_start = '>w_'\n else:\n name_start = '>c_'\n for line in handle:\n if line.startswith('>'):\n try:\n seq = seq.replace('C','T').replace('G','A')\n out = name + '\\n' + seq + '\\n'\n output.write(out)\n except NameError:\n pass\n name = '%s'%name_start\n seq = ''\n else:\n name += line.rstrip('\\n')\n seq += line.rstrip('\\n')", "title": "" }, { "docid": "ee7203632cc4585c045a9a5231a08ae4", "score": "0.44698185", "text": "def ColorWheel():\n colors = ['k', 'b', 'g', 'r', 'c', 'm']\n symbols = ['o', 's', '^', 'v', '<', \">\", 'x', 'D', 'h', 'p']\n lines = ['-', '--', '-.', ':']\n while 1:\n for l in lines:\n for s in symbols:\n for c in colors:\n yield c + s + l", "title": "" }, { "docid": "091d34f7a248fa8d556ec421c5f922af", "score": "0.44678476", "text": "def test_init_for_two_abs_paths( self ):\n\n str_command = \"This is a command\"\n str_path_one = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path1\" )\n str_path_two = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path2\" )\n str_path_three = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path3\" )\n str_path_four = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path4\" )\n lstr_deps = [ str_path_one, str_path_three ]\n lstr_prods = [ str_path_two, str_path_four ]\n lstr_deps_answer = sorted( [ str_path_one, str_path_three ] )\n lstr_prods_answer = sorted( [ str_path_two, str_path_four ] )\n cmd_test = Command.Command( str_command, lstr_deps, lstr_prods )\n str_result = str( sorted( [ str( str_dep ) for str_dep in cmd_test.lstr_dependencies ] +\n [ str( str_prod ) for str_prod in cmd_test.lstr_products ] ) )\n str_answer = \"\".join([\"[\\\"PATH: /This/is/a/path1, CLEAN: 2, Dependency PARENTS: [] CHILDREN: ['This is a command']\\\", \",\n \"\\\"PATH: /This/is/a/path2, CLEAN: 2, Product PARENTS: ['This is a command'] CHILDREN: []\\\", \",\n \"\\\"PATH: /This/is/a/path3, CLEAN: 2, Dependency PARENTS: [] CHILDREN: ['This is a command']\\\", \",\n \"\\\"PATH: /This/is/a/path4, CLEAN: 2, Product PARENTS: ['This is a command'] CHILDREN: []\\\"]\"])\n self.func_test_equals(str_answer, str_result)", "title": "" }, { "docid": "ac76dd6b3efd536910ffa0ef18901ba4", "score": "0.4464915", "text": "def test_example_15(self):\n\n raw = \"\"\"\n a |- p.\n b, c |- z.\n a |- q.\n |- r.\n contrary(a, z).\n contrary(b, q).\n contrary(c, r).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['a', 'b' , 'c', 'p', 'q', 'r', 'z'])", "title": "" }, { "docid": "36ade9adfc7d01721ff7211f3c55f13e", "score": "0.44563192", "text": "def all_variants(include_blunt_angles=True):\n a, b, c = 3., 4., 5.\n alpha = 55.0\n yield CUB(a)\n yield FCC(a)\n yield BCC(a)\n yield TET(a, c)\n bct1 = BCT(2 * a, c)\n bct2 = BCT(a, c)\n assert bct1.variant == 'BCT1'\n assert bct2.variant == 'BCT2'\n\n yield bct1\n yield bct2\n\n yield ORC(a, b, c)\n\n a0 = np.sqrt(1.0 / (1 / b**2 + 1 / c**2))\n orcf1 = ORCF(0.5 * a0, b, c)\n orcf2 = ORCF(1.2 * a0, b, c)\n orcf3 = ORCF(a0, b, c)\n assert orcf1.variant == 'ORCF1'\n assert orcf2.variant == 'ORCF2'\n assert orcf3.variant == 'ORCF3'\n yield orcf1\n yield orcf2\n yield orcf3\n\n yield ORCI(a, b, c)\n yield ORCC(a, b, c)\n\n yield HEX(a, c)\n\n rhl1 = RHL(a, alpha=55.0)\n assert rhl1.variant == 'RHL1'\n yield rhl1\n\n rhl2 = RHL(a, alpha=105.0)\n assert rhl2.variant == 'RHL2'\n yield rhl2\n\n # With these lengths, alpha < 65 (or so) would result in a lattice that\n # could also be represented with alpha > 65, which is more conventional.\n yield MCL(a, b, c, alpha=70.0)\n\n mclc1 = MCLC(a, b, c, 80)\n assert mclc1.variant == 'MCLC1'\n yield mclc1\n # mclc2 has same special points as mclc1\n\n mclc3 = MCLC(1.8 * a, b, c * 2, 80)\n assert mclc3.variant == 'MCLC3'\n yield mclc3\n # mclc4 has same special points as mclc3\n\n # XXX We should add MCLC2 and MCLC4 as well.\n\n mclc5 = MCLC(b, b, 1.1 * b, 70)\n assert mclc5.variant == 'MCLC5'\n yield mclc5\n\n def get_tri(kcellpar):\n # We build the TRI lattices from cellpars of reciprocal cell\n icell = Cell.fromcellpar(kcellpar)\n cellpar = Cell(4 * icell.reciprocal()).cellpar()\n return TRI(*cellpar)\n\n tri1a = get_tri([1., 1.2, 1.4, 120., 110., 100.])\n assert tri1a.variant == 'TRI1a'\n yield tri1a\n\n tri1b = get_tri([1., 1.2, 1.4, 50., 60., 70.])\n assert tri1b.variant == 'TRI1b'\n yield tri1b\n\n tri2a = get_tri([1., 1.2, 1.4, 120., 110., 90.])\n assert tri2a.variant == 'TRI2a'\n yield tri2a\n tri2b = get_tri([1., 1.2, 1.4, 50., 60., 90.])\n assert tri2b.variant == 'TRI2b'\n yield tri2b\n\n yield OBL(a, b, alpha=alpha)\n yield RECT(a, b)\n yield CRECT(a, alpha=alpha)\n yield HEX2D(a)\n yield SQR(a)\n yield LINE(a)\n\n if include_blunt_angles:\n beta = 110\n yield OBL(a, b, alpha=beta)\n yield CRECT(a, alpha=beta)", "title": "" }, { "docid": "7c99ef3ca7474746aa593b61c7ab3887", "score": "0.4451551", "text": "def BCHide(w):", "title": "" }, { "docid": "bffae6b8cf156dbc1056dda0a8e1c404", "score": "0.4451219", "text": "def rv_comp(seq):\n rv = seq[::-1]\n flip = ''\n for initial in rv:\n if initial == 'A':\n flip += 'T'\n elif initial == 'T':\n flip += 'A'\n elif initial == 'C':\n flip += 'G'\n elif initial == 'G':\n flip += 'C'\n elif initial == 'N':\n flip += 'N'\n\n return flip", "title": "" }, { "docid": "311afb318477c69081af34bc49d2b7bc", "score": "0.44436386", "text": "def test_slash_join(a, b):\n assert utils.slash_join(a, b) == \"a/b\"", "title": "" }, { "docid": "b283e9f41fb888a6d9e8295a5f9decdd", "score": "0.44393885", "text": "def celebrate(self):\n print()\n print(' ' + (self.prCyan(' \\\\') + self.prYellow('0') + self.prCyan('/')).replace(' ', ''))\n print(self.prCyan(' |'))\n print(self.prGrey(' / \\\\'))\n print(self.prGreen('#######'))\n print\n print(self.prGreen(' __________ ________'))\n print(self.prGreen('| | | |\\\\ | |\\\\ | | | \\\\'))\n print(self.prGreen('| | | | \\\\ | | \\\\ | | | |'))\n print(self.prGreen('| | | | \\\\ | | \\\\ | | | |'))\n print(self.prGreen('| /\\\\ | | | \\\\ | | \\\\ | |______ |________/'))\n print(self.prGreen('| / \\\\ | | | \\\\ | | \\\\ | | | \\\\ ')) \n print(self.prGreen('| / \\\\ | | | \\\\ | | \\\\ | | | \\\\'))\n print(self.prGreen('| / \\\\ | | | \\\\ | | \\\\ | | | \\\\ '))\n print(self.prGreen('| / \\\\ | | | \\\\ | | \\\\ | | | \\\\ '))\n print(self.prGreen('|/ \\\\| | | \\\\| | \\\\| |__________ | \\\\ \\n'))", "title": "" }, { "docid": "357e8ce4c73ba07fd625101a81c8deb0", "score": "0.44370973", "text": "def compliment(self,dna):\n bp = { 'A':'T', 'T':'A', 'C':'G','G':'C'}\n revdna ='' \n for i in range(len(dna)-1,-1,-1):\n revdna += bp[dna[i]]\n return (revdna)", "title": "" }, { "docid": "886a7e1696f6ae1b12b1b093535e5bed", "score": "0.44339845", "text": "def test_example_11(self):\n\n raw = \"\"\"\n q, r |- p.\n a |- q.\n t, a, b |- r.\n b |- s.\n |- t.\n contrary(a, x).\n contrary(b, y).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['p', 'q', 'r', 's', 't', 'a', 'b'])", "title": "" }, { "docid": "c8e14158c276d736c4d894b5d4c2b8b3", "score": "0.44322982", "text": "def newick(self,node):\n\n\n#every string has to start off with this. \n newicks=\"(\"\n \n#If the list of children is not empty, go through each child in the list \n if node.children!=[]:\n for child in node.children:\n#based this part off of what Andre had done, definitely clearer than what I had in mind\n if node.children[-1]==child:\n newicks=newicks+self.newick(child)\n else:\n newicks=newicks+self.newick(child) + \",\"\n#If the branch length isn't 0, add the branch length after \":\"\n#close everything with \")\" \n if node.brl !=0:\n newicks=newicks+ \"):\" +str(node.brl)\n else:\n newicks=newicks+ \")\"\n return newicks \n else:\n return node.name + \":\" + str(node.brl)", "title": "" }, { "docid": "33c9ead501fa992a20c5a62c0f0a4881", "score": "0.4432031", "text": "def align(left, right, \\\r\n\t\tbad_match_cost, occlusion_cost, good_match_cost=0):\r\n\tdef match(i, j):\r\n\t\tif left[i] == right[j]:\r\n\t\t\treturn good_match_cost, 'good_match'\r\n\t\treturn bad_match_cost, 'bad_match'\r\n\r\n\tdef ismatch(action):\r\n\t\treturn action == 'good_match' or action == 'bad_match'\r\n\r\n\tdef best_tuple(value_path_action_tuples):\r\n\t\tbest_tuple = None\r\n\t\tfor t in value_path_action_tuples:\r\n\t\t\tval, path, action = t\r\n\t\t\tif best_tuple is None:\r\n\t\t\t\tbest_tuple = t\r\n\t\t\telse:\r\n\t\t\t\tbest_val, best_path, best_action = best_tuple\r\n\t\t\t\tif val < best_val:\r\n\t\t\t\t\tbest_tuple = t\r\n\t\t\t\telif val == best_val:\r\n\t\t\t\t\tpath_action, path_coords, path_val = path[0]\r\n\t\t\t\t\tif ismatch(path_action):\r\n\t\t\t\t\t\tbest_tuple = t\r\n\t\treturn best_tuple\r\n\r\n\tcache = dict()\r\n\r\n\tdef value(i, j):\r\n\t\tif (i, j) in cache:\r\n\t\t\treturn cache[(i, j)]\r\n\r\n\t\ttuples = []\r\n\t\tif i >= 0 and j >= 0:\r\n\t\t\tmatch_val, match_path = value(i-1, j-1)\r\n\t\t\tmatch_cost = match(i, j)\r\n\t\t\tmatch_val += match_cost[0]\r\n\t\t\ttuples.append( (match_val, match_path, match_cost[1]) )\r\n\t\tif i >= 0:\r\n\t\t\thoriz_occl_val, horiz_occl_path = value(i-1, j)\r\n\t\t\thoriz_occl_val += occlusion_cost\r\n\t\t\ttuples.append( (horiz_occl_val, horiz_occl_path,'left_occl') )\r\n\t\tif j >= 0:\r\n\t\t\tvert_occl_val, vert_occl_path = value(i, j-1)\r\n\t\t\tvert_occl_val += occlusion_cost\r\n\t\t\ttuples.append( (vert_occl_val, vert_occl_path, 'right_occl') )\r\n\r\n\t\tif tuples:\r\n\t\t\tbest_val, best_path, best_action = best_tuple(tuples)\r\n\t\t\tmy_path = list(best_path)\r\n\r\n\t\t\t# mlake sure to append our best to the existing path\r\n\t\t\tmy_path.append( (best_action, (i, j), best_val) )\r\n\t\t\tresult = best_val, my_path\r\n\t\telse:\r\n\t\t\tresult = 0, []\r\n\r\n\t\tcache[(i, j)] = result\r\n\t\treturn result\r\n\r\n\tn = len(left)-1\r\n\r\n\treturn value(n, n), left, right", "title": "" }, { "docid": "9ea47344616b154649faba68dad3ac1e", "score": "0.4427919", "text": "def p(*args):\n assert len(args) == 8\n return \"\\\\\\\\\".join(args)", "title": "" }, { "docid": "d6022d0e53055a7851c518f4251c5f8e", "score": "0.44235128", "text": "def arb_char():\n\n return ArbChar()", "title": "" }, { "docid": "bae36e30a74ed380e010708cbdfaadfb", "score": "0.44190335", "text": "def test_example_16(self):\n\n raw = \"\"\"\n q |- p.\n a |- q.\n p |- r.\n contrary(a, b).\n contrary(b, r).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['a', 'p', 'q', 'r'])", "title": "" }, { "docid": "eacb417749bc74d213ac7a15c4a95806", "score": "0.4418977", "text": "def go(a, b, result, r):\n \n \n # base case: if the remaining result is equal to what we need to add\n if len(a) < 1:\n return b == result[r:]\n if len(b) < 1:\n return a == result[r:]\n \n for i in range(len(a)):\n # longest matching substring or we've seen this combo before\n if a[i] != result[r+i] or (a[i+1:], b) in seen:\n return False\n\n # interleaving is valid\n if result[r+i+1] == b[0]:\n seen.add((a[i+1:], b)) # save the remaining substring of a and all of b\n if go(b, a[i+1:], result, r+i+1): # break as soon as we find True\n return True", "title": "" }, { "docid": "8c2f58138227c2fd3ea3aa9935212caf", "score": "0.44182965", "text": "def be_rude(bbc):\n i = 0\n finished = False\n print(\"\")\n while finished == False:\n line = expand_line( g[ i : i + 1 + g[i]] , bbc)\n print(line)\n i += g[i] + 1\n if i >= len(g):\n finished = True\n print(\"\")", "title": "" }, { "docid": "00e3f637918dc093f61f78fa74108efa", "score": "0.44123572", "text": "def fm2_line(up, down, left, right, a, b, start, select, reset=False):\n return ''.join(('|1|' if reset else '|0|') +\n ('R' if right else '.') +\n ('L' if left else '.') +\n ('D' if down else '.') +\n ('U' if up else '.') +\n ('T' if start else '.') +\n ('D' if select else '.') +\n ('B' if b else '.') +\n ('A' if a else '.') +\n '|........||')", "title": "" }, { "docid": "3b49438c44f9ec625033c92ac6d89b7f", "score": "0.4409626", "text": "def combineEscapedLines(lines):\n\n # Replace any \\t characters\n lines = [line.replace(\"\\t\", \"\") for line in lines]\n\n # Copy the lines because we are changing while we iterate\n combinedLines = lines.copy()\n\n for i, line in enumerate(lines):\n if str.endswith(line, \"\\\\\"):\n escapedLine = combinedLines[i][:-1]\n nextLine = combinedLines[i + 1]\n combinedLines[i] = escapedLine + nextLine\n del combinedLines[i + 1]\n\n return combinedLines", "title": "" }, { "docid": "f987e0ff9a5bd65fe5cd4a844d0048bb", "score": "0.44074902", "text": "def test_branch_and_ring_at_state_X0():\n\n assert decode_eq(\"[Branch3][C][S][C][O]\", \"CSCO\")\n assert decode_eq(\"[Ring3][C][S][C][O]\", \"CSCO\")\n assert decode_eq(\"[Branch1][Ring1][Ring3][C][S][C][O]\", \"CSCO\")", "title": "" }, { "docid": "869c2c510dd6e952410d79947d6b80e8", "score": "0.44068417", "text": "def vcp_squash(line):\n return squash(Vcp.regex,Vcp.regex1,line)", "title": "" }, { "docid": "13090bf833c86a1986613cce04877362", "score": "0.439714", "text": "def test_init_for_three_mixed_paths( self ):\n\n str_command = \"This is a command\"\n str_path_one = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path1\" )\n str_path_two = os.path.join( \"This\",\"is\",\"a\",\"path2\" )\n str_path_three = os.path.join( \"This\",\"is\",\"a\",\"path3\" )\n str_path_four = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path4\" )\n str_path_five = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path5\" )\n str_path_six = os.path.sep + os.path.join( \"This\",\"is\",\"a\",\"path6\" )\n lstr_deps = [ str_path_one, str_path_three, str_path_five ]\n lstr_prods = [ str_path_two, str_path_four, str_path_six ]\n lstr_deps_answer = [ str_path_one, os.path.join( os.getcwd(), str_path_three), str_path_five ]\n lstr_prods_answer = [ os.path.join( os.getcwd(), str_path_two ), str_path_four, str_path_six ]\n cmd_test = Command.Command( str_command, lstr_deps, lstr_prods )\n str_result = str( sorted( [ str( str_dep ) for str_dep in cmd_test.lstr_dependencies ] +\n [ str( str_prod ) for str_prod in cmd_test.lstr_products ] ) )\n str_answer = \"\".join([\"[\\\"PATH: /This/is/a/path1, CLEAN: 2, Dependency PARENTS: [] CHILDREN: ['This is a command']\\\", \",\n \"\\\"PATH: /This/is/a/path4, CLEAN: 2, Product PARENTS: ['This is a command'] CHILDREN: []\\\", \",\n \"\\\"PATH: /This/is/a/path5, CLEAN: 2, Dependency PARENTS: [] CHILDREN: ['This is a command']\\\", \",\n \"\\\"PATH: /This/is/a/path6, CLEAN: 2, Product PARENTS: ['This is a command'] CHILDREN: []\\\", \",\n \"\\\"PATH: \" + os.getcwd() + \"/This/is/a/path2, \",\n \"CLEAN: 2, Product PARENTS: ['This is a command'] CHILDREN: []\\\", \",\n \"\\\"PATH: \" + os.getcwd() + \"/This/is/a/path3, \",\n \"CLEAN: 2, Dependency PARENTS: [] CHILDREN: ['This is a command']\\\"]\"])\n self.func_test_equals(str_answer, str_result)", "title": "" }, { "docid": "cbe000fbde450e2d5f6b8b42fffc39d6", "score": "0.43934247", "text": "def t3_branch(offset, a_word): \n res = \"\"\n column = 0\n op = get_field(a_word, 20, 26)\n op1 = get_field(a_word, 12, 14)\n op1_masked = get_field(a_word, 12, 14) & 0b101\n op2 = get_field(a_word, 8, 11)\n j1_bit = get_field(a_word, 13, 13)\n j2_bit = get_field(a_word, 11, 11)\n s_bit = get_field(a_word, 26, 26)\n imm11 = get_field(a_word, 0, 10) \n\n if (op1_masked == 0):\n if (op & 0b0111000) == 0:\n res += t32_branch_cond(offset, a_word)\n elif op == 0b0111000:\n if (op2 & 3) == 0:\n res += t32_msr(offset, a_word)\n else: # ?? system level?\n res += t32_msr(offset, a_word)\n elif op == 0b011101:\n res += t32_msr(offset, a_word)\n elif op == 0b0111010:\n res += \"change processor state\"\n elif op == 0b0111011:\n res += \"Miscellaneous control\"\n elif op == 0b0111100:\n # Bx Jazelle\n res += \"BXJ\"\n column = 3\n res += SPACES[column:ICOLUMN]; column = ICOLUMN \n rm = get_field(a_word, 16, 19)\n res += get_reg(rm) \n elif op == 0b0111101:\n res += \"Exception return SUBS PC, LR etc\"\n elif (op == 0b0111110) or (op == 0b0111111):\n res += t32_mrs(offset, a_word)\n elif op == 0b1111111:\n res += \"SMC Secure Monitor call\"\n elif (op1 == 2) & (op == 0x7f):\n res += undefined + \" (FOR EVER)\"\n elif (op1_masked) == 1:\n # branch\n if get_field(a_word, 12, 12) == 0:\n #T3. armv7-M manual is missing an explanation (7-R manual OK. A8-44)\n res += t32_branch_cond(offset, a_word) \n else:\n #T4\n i1 = (~(j1_bit ^ s_bit)) &1\n i2 = (~(j2_bit ^ s_bit)) &1\n imm10 = get_field(a_word, 16, 25)\n imm32 = (s_bit<<24) | (i1 << 23) | (i2 << 22) | (imm10 << 12) | (\n imm11 << 1)\n if s_bit: \n imm32 = extend_sign(imm32, 24)\n imm32 += offset+4 # pc-relative\n res += \"b.w\"\n column = 3\n res += SPACES[column:ICOLUMN]; column = ICOLUMN \n res += \"{:#x} ;{:s}\".format(imm32, get_sym(imm32))\n elif (op1_masked) == 4:\n #res += \"branch with link and exchange\"\n # not armv7-M\n i1 = (~(j1_bit ^ s_bit)) &1\n i2 = (~(j2_bit ^ s_bit)) &1\n imm10 = get_field(a_word, 16, 25)\n imm10L = imm11 >> 1\n imm32 = (s_bit << 24) | (i1 << 23) | (i2 << 22) | (imm10 << 12) | (\n imm10L << 2)\n if s_bit: \n imm32 = extend_sign(imm32, 24)\n imm32 += offset+4 # pc-relative\n res += \"blx\"\n column = 3\n res += SPACES[column:ICOLUMN]; column = ICOLUMN \n res += \"{:#x} ;{:s}\".format(imm32, get_sym(imm32)) \n elif (op1_masked) == 5:\n #res += \"branch with link\"\n i1 = (~(j1_bit ^ s_bit)) & 1\n i2 = (~(j2_bit ^ s_bit)) & 1\n imm10 = get_field(a_word, 16, 25)\n imm32 = (s_bit << 24) | (i1 << 23) | (i2 << 22) | (imm10 << 12) | (\n imm11 << 1)\n if s_bit: \n imm32 = extend_sign(imm32, 24)\n imm32 += offset+4 # pc-relative\n res += \"bl\"\n column = 2\n res += SPACES[column:ICOLUMN]; column = ICOLUMN \n res += \"{:#x} ;{:s}\".format(imm32, get_sym(imm32)) \n \n return res", "title": "" }, { "docid": "61a283eb95a1c800fac6209ca3544c31", "score": "0.43853182", "text": "def test_example_17(self):\n\n raw = \"\"\"\n b, r |- p.\n b, s |- p.\n |- q.\n assumption(a).\n contrary(a, p).\n contrary(b, q).\n \"\"\"\n parser = ABA_Parser(raw)\n parser.parse()\n aba = parser.construct_aba()\n self.assertCountEqual([x[0].root for x in aba.arguments], ['q', 'a', 'b'])", "title": "" }, { "docid": "c4f4e23ad7ec51178920a6263c058379", "score": "0.43848687", "text": "def putM(op, a, b, c):\n if b == R0: putInstr(op + ' ' + a + ', ' + str(c))\n else: putInstr(op + ' ' + a + ', ' + str(c) + '(' + b + ')')", "title": "" }, { "docid": "5d0fae45a2fa436885d4dc81d98a3bde", "score": "0.43838805", "text": "def combine(a, b):\n new = HuffmanNode(char_less(a.char, b.char),a.freq + b.freq)\n if comes_before(a, b):\n new.set_left(a)\n new.set_right(b)\n else:\n new.set_left(b)\n new.set_right(a)\n return(new)", "title": "" }, { "docid": "27e37b322076fd1c6e0c633563e505b9", "score": "0.43761104", "text": "def sing(self):\n print('\\a')\n print('\\a')\n print('\\a')\n print('\\a')\n print('\\a')", "title": "" }, { "docid": "833f684aea14b24f5373af9df5cc957d", "score": "0.43746376", "text": "def split_alt_r(self):\r\n # your code here\r\n return", "title": "" }, { "docid": "b574a91478e2c773f7e641504e052b51", "score": "0.43745512", "text": "def treeify(body):", "title": "" }, { "docid": "15b4e1f62f6a402424145f1ec8d60776", "score": "0.43720025", "text": "def inverse(state):\n for c in state:\n if c == \"\\\\\":\n yield \"/\"\n elif c == \"/\":\n yield \"\\\\\"\n else:\n yield c", "title": "" }, { "docid": "003fc701ff0d5d9d08e3102dbc09872d", "score": "0.4371247", "text": "def safejoin(a, b):\n import os\n #print 'safejoin()'\n #print ' a: %r' % a\n a = os.path.abspath(a)\n if a[-1:] != os.sep:\n a += os.sep\n #print ' a: %r' % a\n #print ' b: %r' % b\n c = os.path.abspath(os.path.join(a, b))\n\n if len(c) < len(a):\n # Special case - c is a directory, but does not yet end with /\n if c[-1:] != os.sep:\n c += os.sep\n #print ' c: %r' % c\n if c[:len(a)] != a:\n raise Exception('Resulting path %r is not within parent path %r' %\n (c, a))\n return os.path.abspath(c)", "title": "" }, { "docid": "8b29577ca5d9ef954e3c9ae8e2304216", "score": "0.43709472", "text": "def border(self):\n # pylint: disable=R0912\n # Too many branches (17/12)\n topright = self.glyphs.get('top-right', u'*')\n thoriz = self.glyphs.get('top-horiz', u'-') * (max(0, self.width - 2))\n topleft = self.glyphs.get('top-left', u'/')\n leftvert = self.glyphs.get('left-vert', u'|')\n rightvert = self.glyphs.get('right-vert', u'|')\n botleft = self.glyphs.get('bot-left', u'\\\\')\n bhoriz = self.glyphs.get('bot-horiz', u'-') * (max(0, self.width - 2))\n botright = self.glyphs.get('bot-right', u'/')\n rstr = u''\n for row in range(0, self.height):\n for col in range(0, self.width):\n if col == 0 or col == self.width - 1:\n rstr += (self.pos(row, col) + topleft\n if row == 0 and col == 0 else\n self.pos(row, col) + botleft\n if row == self.height - 1 and col == 0 else\n self.pos(row, col) + topright\n if row == 0 else\n self.pos(row, col) + botright\n if row == self.height - 1 else\n self.pos(row, col) + leftvert\n if col == 0 else\n self.pos(row, col) + rightvert\n if col == self.width - 1 else\n u'')\n elif row == 0:\n # top row (column 1)\n if thoriz == u'':\n if topright != u'':\n # prepare for top-right, (horiz skipped)\n rstr += self.pos(row, max(0, self.width - 1))\n else:\n rstr += thoriz\n rstr += topright\n break\n elif row == self.height - 1:\n # bottom row (column 1)\n if bhoriz == u'':\n if botright != u'':\n # prepare for bot-right, (horiz skipped)\n rstr += self.pos(row, max(0, self.width - 1))\n else:\n # horizontal line\n rstr += bhoriz\n # bot-right\n rstr += botright\n break\n return (self.colors.get('border', u'') + rstr +\n self.colors.get('normal', u''))", "title": "" }, { "docid": "3cd1dc6f50648e20581dfa318919ae4d", "score": "0.43678486", "text": "def main():\n # dfa = Dfa.from_atom(\"bab\")\n # dfa = dfa.complement()\n # dfa.to_png()\n\n dfa = Dfa.from_regex(\"b*a\")\n dfa.to_png()", "title": "" }, { "docid": "1285d43b34f3767778f33772ec6adec7", "score": "0.4364452", "text": "def road_runner(path_a, path_b, encoding=UTF, shouldPrint=False, file=None):\n\n gamma = 2.0\n\n # Remove new lines and comments\n file_a = io.open(path_a, mode='r', encoding=encoding).read().replace('\\n', '')\n file_a = re.sub('(<!--.*?-->)', '', file_a, flags=re.DOTALL)\n\n file_b = io.open(path_b, mode='r', encoding=encoding).read().replace('\\n', '')\n file_b = re.sub('(<!--.*?-->)', '', file_b, flags=re.DOTALL)\n\n html_a = BeautifulSoup(file_a, features='html.parser')\n html_b = BeautifulSoup(file_b, features='html.parser')\n\n clean_html(html_a)\n clean_html(html_b)\n\n body_a = html_a.find('body')\n body_b = html_b.find('body')\n\n text_a = body_a.__str__()\n text_b = body_b.__str__()\n\n tags_a = list(re.finditer(HTML_TAGS_REGEX, text_a))\n tags_b = list(re.finditer(HTML_TAGS_REGEX, text_b))\n\n stack_a = []\n stack_b = []\n\n index_a = 0\n index_b = 0\n\n counter_same = 0\n max_opening_size_diff = abs(len(tags_a) - len(tags_b)) * gamma\n\n if shouldPrint: print(f'{len(tags_a)} : {len(tags_b)} ({min(len(tags_a), len(tags_b)) * 100 / max(len(tags_a), len(tags_b)):.2f} %)')\n if shouldPrint: print('-----------------------------------------------------')\n\n while index_a < len(tags_a) and index_b < len(tags_b):\n\n tag_a = tags_a[index_a]\n tag_b = tags_b[index_b]\n\n tag_a_name = extract_tag_name(tag_a)\n tag_b_name = extract_tag_name(tag_b)\n\n opening_a_size = 0\n opening_b_size = 0\n if re.match(HTML_OPENING_TAGS_REGEX, tag_a_name) and re.match(HTML_OPENING_TAGS_REGEX,\n tag_b_name) and tag_a_name == tag_b_name:\n opening_a_size, opening_b_size, _ = count_same_tags(tag_a_name, index_a, index_b, tags_a, tags_b)\n\n # Tags match\n if tag_a_name == tag_b_name and len(stack_a) == len(stack_b) and not \\\n (re.match(HTML_OPENING_TAGS_REGEX, tag_a_name) and re.match(HTML_OPENING_TAGS_REGEX, tag_b_name) and\n abs(opening_a_size - opening_b_size) >= max_opening_size_diff):\n\n if re.match(HTML_OPENING_TAGS_REGEX, tag_a_name):\n stack_a.append(tag_a)\n stack_b.append(tag_b)\n\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_a_name):\n start_tag_a = stack_a.pop()\n start_tag_b = stack_b.pop()\n\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5} * {opening_a_size:>4} {opening_b_size:>4}')\n counter_same += 1\n\n # SELF-CLOSING and SELF-CLOSING\n elif re.match(HTML_SELF_CLOSING_REGEX, tag_a_name) and re.match(HTML_SELF_CLOSING_REGEX, tag_b_name):\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n tags_b.insert(index_b, None)\n index_a += 1\n index_b += 1\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n tags_a.insert(index_a, None)\n\n # SELF-CLOSING and OPENING\n elif re.match(HTML_SELF_CLOSING_REGEX, tag_a_name) and re.match(HTML_OPENING_TAGS_REGEX, tag_b_name):\n tags_b.insert(index_b, None)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n # OPENING and SELF-CLOSING\n elif re.match(HTML_OPENING_TAGS_REGEX, tag_a_name) and re.match(HTML_SELF_CLOSING_REGEX, tag_b_name):\n tags_a.insert(index_a, None)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n # SELF-CLOSING and CLOSING\n elif re.match(HTML_SELF_CLOSING_REGEX, tag_a_name) and re.match(HTML_CLOSING_TAGS_REGEX, tag_b_name):\n tags_b.insert(index_b, None)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n # CLOSING and SELF-CLOSING\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_a_name) and re.match(HTML_SELF_CLOSING_REGEX, tag_b_name):\n tags_a.insert(index_a, None)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n # OPENING and CLOSING\n elif re.match(HTML_OPENING_TAGS_REGEX, tag_a_name) and re.match(HTML_CLOSING_TAGS_REGEX, tag_b_name):\n tags_b.insert(index_b, None)\n stack_a.append(tag_a)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n # CLOSING and OPENING\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_a_name) and re.match(HTML_OPENING_TAGS_REGEX, tag_b_name):\n tags_a.insert(index_a, None)\n stack_b.append(tag_b)\n if shouldPrint: print(f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n # CLOSING and CLOSING\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_a_name) and re.match(HTML_CLOSING_TAGS_REGEX, tag_b_name):\n if len(stack_a) > len(stack_b):\n start_tag_a = stack_a.pop()\n tags_b.insert(index_b, None)\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n else:\n start_tag_b = stack_b.pop()\n tags_a.insert(index_a, None)\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n # OPENING and OPENING\n elif re.match(HTML_OPENING_TAGS_REGEX, tag_a_name) and re.match(HTML_OPENING_TAGS_REGEX, tag_b_name):\n\n # find closest equal opening tag\n # score_a = find_closest_equal_opening_tag(tag_a_name, index_b, tags_b)\n # score_b = find_closest_equal_opening_tag(tag_b_name, index_a, tags_a)\n\n # find closest equal closing tag\n score_a = find_closest_equal_closing_tag(tag_a_name, index_a, tags_a)\n score_b = find_closest_equal_closing_tag(tag_b_name, index_b, tags_b)\n\n if score_a < score_b:\n\n closing_depth = len(stack_a)\n closing_tag = '</' + tag_a_name[1:]\n\n stack_a.append(tag_a_name)\n while tag_a_name != closing_tag or len(stack_a) != closing_depth:\n tags_b.insert(index_b, None)\n\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n index_a += 1\n index_b += 1\n\n tag_a = tags_a[index_a]\n tag_a_name = extract_tag_name(tag_a)\n\n if re.match(HTML_OPENING_TAGS_REGEX, tag_a_name):\n stack_a.append(tag_a_name)\n\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_a_name):\n stack_a.pop()\n\n tags_b.insert(index_b, None)\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {tag_a_name:>10} : {\"None\":<10} {f\"({len(stack_b)})\":>5}')\n\n index_a += 1\n index_b += 1\n\n else:\n\n closing_depth = len(stack_b)\n closing_tag = '</' + tag_b_name[1:]\n\n stack_b.append(tag_b_name)\n while tag_b_name != closing_tag or len(stack_b) != closing_depth:\n tags_a.insert(index_a, None)\n\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n index_a += 1\n index_b += 1\n\n tag_b = tags_b[index_b]\n tag_b_name = extract_tag_name(tag_b)\n\n if re.match(HTML_OPENING_TAGS_REGEX, tag_b_name):\n stack_b.append(tag_b_name)\n\n elif re.match(HTML_CLOSING_TAGS_REGEX, tag_b_name):\n stack_b.pop()\n\n tags_a.insert(index_a, None)\n if shouldPrint: print(\n f'{f\"({len(stack_a)})\":<5} {\"None\":>10} : {tag_b_name:<10} {f\"({len(stack_b)})\":>5}')\n\n index_a += 1\n index_b += 1\n\n continue\n\n else:\n if shouldPrint: print(\"ERROR: Tags don't match with any criterion.\")\n\n index_a += 1\n index_b += 1\n\n if shouldPrint: print('\\n-----------------------------------------------------')\n\n tags_a.append(None)\n tags_b.append(None)\n\n indentation = 0\n tags = []\n\n index_a = 0\n index_b = 0\n\n while index_a < len(tags_a) - 1 and index_b < len(tags_b) - 1:\n\n tag_a = Tag(tags_a[index_a], tags_a[index_a + 1], text_a)\n tag_b = Tag(tags_b[index_b], tags_b[index_b + 1], text_b)\n\n if tag_a is not None and tag_b is not None and tag_a.__eq__(tag_b):\n\n if tag_a.word == tag_b.word:\n tag_a.placeholder = tag_a.word\n tag_b.placeholder = tag_b.word\n else:\n tag_a.placeholder = '#TEXT'\n tag_b.placeholder = '#TEXT'\n\n indentation = save_tag(tag_a, indentation, tags)\n\n elif tag_a is not None:\n\n tag_a.placeholder = tag_a.word\n tag_a.optional = True\n\n indentation = save_tag(tag_a, indentation, tags)\n\n elif tag_b is not None:\n\n tag_b.placeholder = tag_b.word\n tag_b.optional = True\n\n indentation = save_tag(tag_b, indentation, tags)\n\n else:\n if shouldPrint: print('ERROR: Both tags are None.')\n\n index_a += 1\n index_b += 1\n\n index_tag = 0\n while index_tag < len(tags) - 1:\n\n if tags[index_tag].__is_closing__(tags[index_tag + 1]):\n prev_index, length = find_previous_index_new(tags[index_tag + 1], index_tag, tags)\n\n update_list: List = [None] * 2 * (length + 1)\n matching = True\n\n for i in range(prev_index, prev_index + length + 1):\n tag_p: Tag = tags[i]\n tag_a: Tag = tags[i + length + 1]\n\n tag_new = Tag(None, None, None)\n tag_new.indent = tag_p.indent\n tag_new.quantity = tag_p.quantity\n tag_new.name = tag_p.name\n tag_new.prefix = ''\n tag_new.suffix = ''\n tag_new.optional = tag_p.optional\n\n if tag_p.name == tag_a.name \\\n and tag_p.placeholder == tag_a.placeholder \\\n and tag_p.indent == tag_a.indent:\n tag_new.placeholder = tag_p.placeholder\n update_list[i - prev_index] = tag_new\n\n elif tag_p.name == tag_a.name \\\n and tag_p.indent == tag_a.indent:\n tag_new.placeholder = '#TEXT'\n update_list[i - prev_index] = tag_new\n\n else:\n matching = False\n break\n\n if matching:\n\n update_list[0].prefix = '('\n update_list[index_tag - prev_index].suffix = ')*'\n\n tmp = 0\n for i in range(prev_index, prev_index + 2 * (length + 1)):\n if update_list[i - prev_index] is not None:\n tags[i] = update_list[i - prev_index]\n else:\n del tags[i - tmp]\n tmp += 1\n\n else:\n index_tag += 1\n else:\n index_tag += 1\n\n if file is not None:\n with open(file, 'w', encoding=encoding) as f:\n print(create_re(tags), file=f)\n else:\n print(create_re(tags))\n\n if shouldPrint: print('-----------------------------------------------------')\n\n if shouldPrint: print(f'{counter_same} : {len(tags_a)} ({counter_same * 100 / len(tags_a):.2f} %)')\n\n if shouldPrint: print('-----------------------------------------------------')", "title": "" }, { "docid": "18064c9d855583fe28f54d1b14679edf", "score": "0.4355161", "text": "def crl():", "title": "" }, { "docid": "6c1e46bab91a9f8816701cc075c6e476", "score": "0.4351395", "text": "def test_follow_tree_to_leaf(text: str, code2char: str):\n weights = Counter(text)\n root = build_tree(weights)\n codes = json.loads(code2char)\n for code in codes:\n assert follow_tree(root, code) == codes[code]", "title": "" } ]
84c2e88611ea74f7a6840290521a80ec
Place schematic components determined by the layout(keyswitches and diodes)
[ { "docid": "b98eb24e9d12bffc66b188815d5ada34", "score": "0.81252825", "text": "def place_schematic_components(self):\n switch_tpl = self.jinja_env.get_template(\"schematic/keyswitch.tpl\")\n\n component_count = 0\n components_section = \"\"\n\n # Place keyswitches and diodes\n for key in self.keyboard.keys:\n placement_x = int(600 + key.x_unit * 800)\n placement_y = int(800 + key.y_unit * 500)\n\n components_section = components_section + switch_tpl.render(\n num=component_count,\n x=placement_x,\n y=placement_y,\n rowNum=key.row,\n colNum=key.col,\n keywidth=unit_width_to_available_footprint(key.width),\n )\n components_section = components_section + \"\\n\"\n component_count += 1\n\n return components_section", "title": "" } ]
[ { "docid": "16fd92a4fdbc4fc5103d9521d2477e9b", "score": "0.7992899", "text": "def place_layout_components(self):\n switch = self.jinja_env.get_template(\"layout/keyswitch.tpl\")\n diode = self.jinja_env.get_template(\"layout/diode.tpl\")\n component_count = 0\n components_section = \"\"\n\n # Load templates for netnames\n diodetpl = self.jinja_env.get_template(\"layout/diodenetname.tpl\")\n rowtpl = self.jinja_env.get_template(\"layout/rownetname.tpl\")\n coltpl = self.jinja_env.get_template(\"layout/colnetname.tpl\")\n viatpl = self.jinja_env.get_template(\"layout/via.tpl\")\n tracetpl = self.jinja_env.get_template(\"layout/trace.tpl\")\n\n # Place keyswitches, diodes, vias and traces\n key_pitch = 19.05\n diode_offset = [-6.35, 8.89]\n col_via_offsets = [[0, -2.03], [0, 12.24]]\n row_via_offsets = [[-9.68, 9.83], [4.6, 9.83]]\n diode_trace_offsets = [[-6.38, 2.54], [-6.38, 7.77]]\n\n for key in self.keyboard.keys:\n # Place switch\n ref_x = -100 + key.x_unit * key_pitch\n ref_y = 17.78 + key.y_unit * key_pitch\n components_section = (\n components_section\n + switch.render(\n num=component_count,\n x=ref_x,\n y=ref_y,\n diodenetnum=key.diodenetnum,\n diodenetname=diodetpl.render(diodenum=key.num),\n colnetnum=key.colnetnum,\n colnetname=coltpl.render(colnum=key.col),\n keywidth=unit_width_to_available_footprint(key.width),\n )\n + \"\\n\"\n )\n # Place diode\n diode_x = ref_x + diode_offset[0]\n diode_y = ref_y + diode_offset[1]\n components_section = (\n components_section\n + diode.render(\n num=component_count,\n x=diode_x,\n y=diode_y,\n diodenetnum=key.diodenetnum,\n diodenetname=diodetpl.render(diodenum=key.num),\n rownetnum=key.rownetnum,\n rownetname=rowtpl.render(rownum=key.row),\n )\n + \"\\n\"\n )\n\n # Place vias\n for offset in col_via_offsets:\n via_x = ref_x + offset[0]\n via_y = ref_y + offset[1]\n components_section = (\n components_section\n + viatpl.render(x=via_x, y=via_y, netnum=key.colnetnum)\n + \"\\n\"\n )\n\n for offset in row_via_offsets:\n via_x = ref_x + offset[0]\n via_y = ref_y + offset[1]\n components_section = (\n components_section\n + viatpl.render(x=via_x, y=via_y, netnum=key.rownetnum)\n + \"\\n\"\n )\n\n # Place traces\n components_section = (\n components_section\n + tracetpl.render(\n x1=ref_x + row_via_offsets[0][0],\n y1=ref_y + row_via_offsets[0][1],\n x2=ref_x + row_via_offsets[1][0],\n y2=ref_y + row_via_offsets[1][1],\n layer=\"B.Cu\",\n netnum=key.rownetnum,\n )\n + \"\\n\"\n )\n\n components_section = (\n components_section\n + tracetpl.render(\n x1=ref_x + col_via_offsets[0][0],\n y1=ref_y + col_via_offsets[0][1],\n x2=ref_x + col_via_offsets[1][0],\n y2=ref_y + col_via_offsets[1][1],\n layer=\"F.Cu\",\n netnum=key.colnetnum,\n )\n + \"\\n\"\n )\n\n components_section = (\n components_section\n + tracetpl.render(\n x1=ref_x + diode_trace_offsets[0][0],\n y1=ref_y + diode_trace_offsets[0][1],\n x2=ref_x + diode_trace_offsets[1][0],\n y2=ref_y + diode_trace_offsets[1][1],\n layer=\"B.Cu\",\n netnum=key.diodenetnum,\n )\n + \"\\n\"\n )\n\n # Place stabilizer mount holes, if necessary\n\n component_count += 1\n\n return components_section, component_count", "title": "" }, { "docid": "7f63a52a6e5d1cff65dc9d65069e6a39", "score": "0.6315842", "text": "def _generate_schematic(self):\n self._schematic_scr = [\n 'ADD %s %s (%s %s);' % (\n self.footprint,\n self.name,\n float_to_str(self.coord_in[0]),\n float_to_str(self.coord_in[1])\n ),\n self.diode.schematic_scr\n ]\n\n if self.left_key:\n if self.left_key.sch_pin[1] == self.sch_pin[1]:\n self._schematic_scr.append('NET ROW%s (%s %s) (%s %s);' % (\n self.coord[1] * -1,\n float_to_str(self.left_key.sch_pin[0]),\n float_to_str(self.left_key.sch_pin[1]),\n float_to_str(self.sch_pin[0]),\n float_to_str(self.sch_pin[1])\n ))\n\n else:\n self._schematic_scr.append(\n 'ADD HEADER-1P-KEYBOARD@Headers PROW%s R90 (%s %s);\\n' % (\n self.coord[1] * -1,\n self.sch_pin[0],\n self.sch_pin[1]\n ) + 'JUNCTION (%s %s);\\n' % (\n self.sch_pin[0],\n self.sch_pin[1]\n ) + 'NAME ROW%s (%s %s);\\n' % (\n self.coord[1] * -1,\n self.sch_pin[0],\n self.sch_pin[1]\n )\n )", "title": "" }, { "docid": "e97c02fb7138dc82e3de525f0940fa67", "score": "0.5966253", "text": "def create_layout(self):\n self.create_central_bus()\n self.route_pre_charge_to_bitcell_array()\n self.route_between_sense_amp_and_tri_gate()\n self.route_tri_gate_out()\n\n self.route_between_wordline_driver_and_bitcell_array()\n self.route_column_address_lines()\n self.route_msf_address_to_row_decoder()\n self.route_control_lines()\n if(self.num_banks > 1):\n self.route_bank_select_or2_gates()\n self.route_power_rail_vdd()\n self.route_power_rail_gnd()\n\n self.offset_all_coordinates()", "title": "" }, { "docid": "3c3306b1ec3af786a05fb189e5bcb55e", "score": "0.572596", "text": "def __do_layout(self):\n s1 = wx.BoxSizer(wx.VERTICAL)\n\n s2 = wx.BoxSizer(wx.VERTICAL)\n s2.Add(self.textctrl_1, 0, wx.EXPAND | wx.DOWN, 5)\n s2.Add(self.textctrl_2, 1, wx.EXPAND)\n s1.Add(s2, 1, wx.EXPAND | wx.ALL, 5)\n s1.Add(wx.StaticLine(self), 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)\n\n s3 = wx.GridSizer(6, 4, 0, 0) # 6 filas, 4 columnas\n # botones fila 1\n s3.Add(self.button_16, 0, wx.EXPAND)\n s3.Add(self.button_20, 0, wx.EXPAND)\n s3.AddSpacer(1)\n s3.AddSpacer(1)\n # botones fila 2\n s3.Add(self.button_19, 0, wx.EXPAND)\n s3.Add(self.button_18, 0, wx.EXPAND)\n s3.Add(self.button_17, 0, wx.EXPAND)\n s3.Add(self.button_14, 0, wx.EXPAND)\n # botones fila 3\n s3.Add(self.button_7, 0, wx.EXPAND)\n s3.Add(self.button_8, 0, wx.EXPAND)\n s3.Add(self.button_9, 0, wx.EXPAND)\n s3.Add(self.button_13, 0, wx.EXPAND)\n # botones fila 4\n s3.Add(self.button_4, 0, wx.EXPAND)\n s3.Add(self.button_5, 0, wx.EXPAND)\n s3.Add(self.button_6, 0, wx.EXPAND)\n s3.Add(self.button_12, 0, wx.EXPAND)\n # botones fila 5\n s3.Add(self.button_1, 0, wx.EXPAND)\n s3.Add(self.button_2, 0, wx.EXPAND)\n s3.Add(self.button_3, 0, wx.EXPAND)\n s3.Add(self.button_11, 0, wx.EXPAND)\n # botones fila 6\n s3.Add(self.button_15, 0, wx.EXPAND)\n s3.Add(self.button_0, 0, wx.EXPAND)\n s3.Add(self.button_10, 0, wx.EXPAND)\n s3.Add(self.button_21, 0, wx.EXPAND)\n\n s1.Add(s3, 3, wx.EXPAND | wx.ALL, 5)\n self.SetSizer(s1)\n s1.Fit(self)\n self.Layout()", "title": "" }, { "docid": "2eb022d5376e30434b4b198b140c0c3b", "score": "0.57105833", "text": "def setup_layout_constraints(self):\n\n tri_gate_min_point = (self.tri_gate_array_offset.y - 6 * drc[\"minwidth_metal3\"]\n - self.tri_gate_array.height) \n\n self.min_point = min(tri_gate_min_point, self.min_point)\n self.max_point = self.precharge_array_position.y + self.precharge_array.height\n\n # VDD constraints\n gap_between_bitcell_array_and_vdd = 3 * drc[\"minwidth_metal1\"]\n self.right_vdd_x_offset = self.bitcell_array.width + gap_between_bitcell_array_and_vdd\n self.right_vdd_position = vector(self.right_vdd_x_offset, self.min_point)\n self.add_layout_pin(text=\"vdd\",\n layer=\"metal1\", \n offset=[self.right_vdd_x_offset, self.min_point], \n width=self.power_rail_width,\n height=self.max_point - self.min_point)\n # the width of the metal rail is 10 times minwidth metal1 and the gap\n # from the edge of the decoder is another 2 times minwidth metal1\n\n self.left_vdd_x_offset = (- 14 * drc[\"minwidth_metal1\"]\n + min(self.msf_address_offset.x, \n self.decoder_position.x))\n self.left_vdd_position = vector(self.left_vdd_x_offset, self.min_point)\n self.add_layout_pin(text=\"vdd\",\n layer=\"metal1\", \n offset=[self.left_vdd_x_offset, self.min_point], \n width=self.power_rail_width,\n height=self.max_point - self.min_point)\n\n self.left_gnd_x_offset = (self.left_gnd_rail_gap / 2\n - self.start_of_left_central_bus)\n self.left_gnd_position = vector(self.left_gnd_x_offset, self.min_point)\n self.add_layout_pin(text=\"gnd\",\n layer=\"metal2\", \n offset=self.left_gnd_position , \n width=self.power_rail_width,\n height=self.max_point - self.min_point)\n\n # Height and Width of the entire bank\n self.height = self.max_point - self.min_point\n self.width = (self.right_vdd_x_offset - self.left_vdd_x_offset\n + self.power_rail_width)", "title": "" }, { "docid": "0098a33902b4df99e6f87b4693112504", "score": "0.56906265", "text": "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Hand\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "title": "" }, { "docid": "2541351e4a77ea312cc659824741c40f", "score": "0.5644594", "text": "def create_layout_nets(self):\n addnets = \"\"\n declarenets = \"\"\n\n # Create a declaration and addition for each net\n for netnum in range(0, 1 + self.nets.number_of_nets()):\n netname = self.nets.get_net_name(netnum)\n declarenets = (\n declarenets + \" (net \" + str(netnum + 1) + \" \" + netname + \")\\n\"\n )\n addnets = addnets + \" (add_net \" + netname + \")\\n\"\n\n # make each key in the board aware in which row/column/diode net it resides\n rowtpl = self.jinja_env.get_template(\"layout/rownetname.tpl\")\n for index, row in enumerate(self.keyboard.rows.blocks):\n rownetname = rowtpl.render(rownum=index)\n for keyindex in row:\n self.keyboard.keys[keyindex].rownetnum = self.nets.get_net_num(\n rownetname\n )\n\n coltpl = self.jinja_env.get_template(\"layout/colnetname.tpl\")\n for index, col in enumerate(self.keyboard.columns.blocks):\n colnetname = coltpl.render(colnum=index)\n for keyindex in col:\n self.keyboard.keys[keyindex].colnetnum = self.nets.get_net_num(\n colnetname\n )\n\n diodetpl = self.jinja_env.get_template(\"layout/diodenetname.tpl\")\n for diodenum in range(len(self.keyboard.keys)):\n diodenetname = diodetpl.render(diodenum=diodenum)\n self.keyboard.keys[diodenum].diodenetnum = self.nets.get_net_num(\n diodenetname\n )\n\n nets = self.jinja_env.get_template(\"layout/nets.tpl\")\n\n return nets.render(netdeclarations=declarenets, addnets=addnets)", "title": "" }, { "docid": "398674b6fb9187beda0138f3623730ef", "score": "0.56250286", "text": "def _generate_layout(self):\n\n self.lattice_height = 4 * self.distance - 4\n self.lattice_width = 2 * self.distance - 2\n data_ids = self._data_id_iter()\n ancilla_ids = self._ancilla_id_iter()\n\n self.lattice_dimensions = {\n 'width': self.lattice_width,\n 'height': self.lattice_height\n }\n\n # Determine the position of things\n for y in range(self.lattice_width + 1):\n for x in range(self.lattice_height + 1):\n\n if (x, y) == (x, x + 2) and x % 2 == 1 and y % 8 == 3:\n\n # self._add_node(x, y, ancilla_ids)\n pass\n\n elif (x, y) == (4 * self.distance - y, y) and x % 2 == 1 and y % 8 == 7:\n # self._add_node(x, y, ancilla_ids)\n pass\n\n elif (x, y) > (x, x) or (x, y) > (4 * self.distance - y - 2, y):\n continue\n\n if x % 2 == 0 and y % 2 == 0: # Data\n if (y / 2) % 4 == 1 or (y / 2) % 4 == 2:\n # self._add_node(x, y, data_ids)\n if (x / 2) % 4 == 2 or (x / 2) % 4 == 3:\n self._add_node(x, y, data_ids)\n\n else:\n if (x / 2) % 4 == 0 or (x / 2) % 4 == 1:\n self._add_node(x, y, data_ids)\n\n if x % 4 == 1 and y % 4 == 3:\n self._add_node(x, y, ancilla_ids)\n\n if y == 0:\n if x % 8 == 5:\n self._add_node(x, y, ancilla_ids)\n\n # elif y % 4 == 3 and x % 4 == 1:\n # self._add_node(x, y, ancilla_ids)\n\n return self.layout", "title": "" }, { "docid": "c4d9e1b7f2641ee1b94a5738013e5ef6", "score": "0.5602552", "text": "def construct_inputs(self):\n \n def add_things_to_frame(frame, layout_number, header_text, i):\n l = Label(frame, text=header_text, font=\"-size 13\")\n l.grid(row=i,columnspan=2, sticky=SW, pady=3)\n i += 1\n for a, b in self._loop_gen(layout_number):\n if b == 'A': # text with range\n Label(frame, text=ns(a)).grid(row=i, sticky=W)\n x = InputsRangeWidget(frame, self.settings[a])\n x.grid(row=i, column=1, sticky=W)\n elif b == 'B': # text without range\n Label(frame, text=ns(a)).grid(row=i, sticky=W)\n x = Entry(frame, textvariable=self.settings[a], width=25)\n x.grid(row=i, column=1, columnspan=2, sticky=W) \n elif b == 'C': # options menu\n x = OptsWidget( frame, ns(a), self.settings[a], \n self.units[a], row=i )\n x.grid(row=i, column=0, sticky=W)\n elif b == 'D': # check button\n l = Label(frame, text=ns(a)).grid(row=i, sticky=W)\n r = Checkbutton( frame, variable=self.settings[a],\n onvalue=True, offvalue=False )\n r.grid(row=i, column=1)\n elif b == 'E': # load button\n l = Label(frame, text=ns(a)).grid(row=i, sticky=W)\n f = FileFindWidget( frame, value=self.settings[a], \n mode='load')\n f.grid(row=i, column=1)\n elif b == 'F': # save button\n l = Label(frame, text=ns(a)).grid(row=i, sticky=W)\n f = FileFindWidget( frame, value=self.settings[a], \n mode='dir')\n f.grid(row=i, column=1)\n i += 1\n return i\n \n px = 5 # padding between frames\n py = 2 # padding between frames\n \n \n F1 = Frame(self.master)\n Label(F1, text='Geochemical Inputs').grid(row=0,columnspan=3)\n i = 1\n i = add_things_to_frame(F1, 10, 'Second Gas End-member', i)\n i = add_things_to_frame(F1, 11, 'Soil Gas End-member', i)\n i = add_things_to_frame(F1, 12, 'Mixed Gas', i)\n i = add_things_to_frame(F1, 17, 'Cave Air', i)\n F1.pack(side='left', fill=Y, anchor='n', padx=px, pady=py)\n \n F2 = Frame(self.master)\n Label(F2, text=' ').grid(row=0,columnspan=3)\n i = 1\n i = add_things_to_frame(F2, 16, 'Soil Metals (Chloride Salts)', i)\n i = add_things_to_frame(F2, 13, 'Bedrock Chemistry', i)\n i = add_things_to_frame(F2, 14, 'Bedrock Dissolution Conditions', i)\n i = add_things_to_frame(F2, 15, 'General', i)\n F2.pack(side='left', fill=Y, anchor='n', padx=px, pady=py)\n self.F2 = F2\n\n F3 = Frame(self.master)\n Label(F3, text='Model Scripting Options').grid(row=0,columnspan=3)\n i = 1\n i = add_things_to_frame(F3, 2, 'Scripting Options', i)\n i = add_things_to_frame(F3, 3, 'Additional PHREEQC output', i)\n i = add_things_to_frame(F3, 4, 'File IO Settings', i)\n\n # add run button\n RunButton = Button( F3, text=\"Run!\", command= lambda : \\\n self._run_models())\n RunButton.grid(row=i, columnspan=3, sticky=S, pady=20)\n i = i + 1\n\n # add link to output GUI\n LinkButton = Button( F3, text=\"Open Output GUI\", command= lambda : \\\n CCAnalyseGUI(Toplevel(self.master)) )\n LinkButton.grid(row=i, columnspan=3, sticky=S)\n \n F3.pack(side='left', anchor='n', padx=px, pady=py)\n self.F3 = F3", "title": "" }, { "docid": "b5cb40d0c1c2773578af1430e92d41d6", "score": "0.55725574", "text": "def _perform_layout(self):\n raise NotImplementedError", "title": "" }, { "docid": "01fe5947c1d934f35a381a60dda7e107", "score": "0.5549452", "text": "def _do_layout(self):\n left = self.left\n right = self.right\n top = self.top\n bottom = self.bottom\n center = self.center\n\n # Calculate the bounds of the resizable center container, then set\n # the bounds on all the containers. center_x,_y represent the (x,y)\n # coordinate of the lower-left corner of the center region;\n # center_x2 and center_y2 represent the upper-right corner of the\n # center region.\n\n if self.left.visible:\n center_x = self.left_width\n else:\n center_x = self.x\n if self.bottom.visible:\n center_y = self.bottom_height\n else:\n center_y = self.y\n if self.right.visible:\n center_x2 = self.width - self.right_width - 1\n else:\n center_x2 = self.width\n if self.top.visible:\n center_y2 = self.height - self.top_height - 1\n else:\n center_y2 = self.height\n\n left.outer_position = [0.0, center_y]\n left.outer_bounds = [self.left_width, center_y2 - center_y + 1]\n\n right.outer_position = [center_x2 + 1, center_y]\n right.outer_bounds = [self.right_width, left.height]\n\n bottom.outer_position = [center_x, 0.0]\n bottom.outer_bounds = [center_x2 - center_x + 1, self.bottom_height]\n\n top.outer_position = [center_x, center_y2 + 1]\n top.outer_bounds = [bottom.width, self.top_height]\n\n center.outer_position = [center_x, center_y]\n center.outer_bounds = [bottom.width, left.height]\n\n for slot in self._frame_slots.values():\n if slot.visible:\n preferred_size = slot.get_preferred_size()\n if \"h\" not in slot.resizable:\n slot.outer_width = preferred_size[0]\n if \"v\" not in slot.resizable:\n slot.outer_height = preferred_size[1]\n slot.do_layout()\n\n return", "title": "" }, { "docid": "63e910975aae507227ddd6233d6a4e58", "score": "0.55484825", "text": "def create_h_layouts(self, key):\n\n if not self.horizontal_layout_list:\n self.horizontal_layout_list.append(QtWidgets.QHBoxLayout())\n self.button_v_layout.addLayout(self.horizontal_layout_list[-1])\n\n if self.horizontal_layout_list[-1].count() <= 3:\n self.horizontal_layout_list[-1].addWidget(self.sound_buttons[key])\n else:\n self.horizontal_layout_list.append(QtWidgets.QHBoxLayout())\n self.button_v_layout.addLayout(self.horizontal_layout_list[-1])\n self.horizontal_layout_list[-1].addWidget(self.sound_buttons[key])", "title": "" }, { "docid": "3153f743803c997e5486edfc09883257", "score": "0.5515778", "text": "def ConstructLayout(self):\n self.implantLabels = []\n self.respecBar = []\n self.totalLabels = []\n iconsize = 32\n buttonSize = 24\n boxWidth = 6\n boxHeight = 12\n boxMargin = 1\n boxSpacing = 1\n numBoxes = const.respecMaximumAttributeValue - const.respecMinimumAttributeValue\n barWidth = numBoxes * boxSpacing + 2 * boxMargin + numBoxes * boxWidth - 1\n barHeight = boxHeight + 2 * boxMargin\n backgroundColor = (0.0, 0.0, 0.0, 0.0)\n colorDict = {uicls.ClickableBoxBar.COLOR_UNSELECTED: (0.2, 0.2, 0.2, 1.0),\n uicls.ClickableBoxBar.COLOR_SELECTED: (0.2, 0.8, 0.2, 1.0)}\n headerText = EveLabelMedium(parent=self.sr.main, text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterRespecMessage'), state=uiconst.UI_NORMAL, align=uiconst.TOTOP, padding=8)\n self.headerText = headerText\n if self.readOnly:\n columns = 7\n else:\n columns = 9\n mainGrid = LayoutGrid(parent=self.sr.main, columns=columns, cellPadding=4, left=6, top=6, OnGridSizeChanged=self.OnMainGridSizeChanged)\n self.mainGrid = mainGrid\n for labelPath, colSpan in (('UI/CharacterSheet/CharacterSheetWindow/NavScroll/Attributes', 2),\n ('UI/CharacterSheet/CharacterSheetWindow/Attributes/BaseStatPoints', 1),\n ('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterImplants', 2),\n ('UI/CharacterSheet/CharacterSheetWindow/Attributes/RemappableStat', 1 if self.readOnly else 3),\n ('UI/CharacterSheet/CharacterSheetWindow/Attributes/StatTotal', 1)):\n label = EveLabelMedium(text=localization.GetByLabel(labelPath), align=uiconst.CENTER)\n mainGrid.AddCell(cellObject=label, colSpan=colSpan, cellPadding=(10, 2, 10, 2))\n\n line = Line(align=uiconst.TOTOP)\n mainGrid.AddCell(cellObject=line, colSpan=mainGrid.columns)\n for x in xrange(5):\n uicontrols.Icon(parent=mainGrid, width=iconsize, height=iconsize, size=iconsize, icon=self.attributeIcons[x], align=uiconst.TOPLEFT)\n EveLabelMedium(text=self.attributeLabels[x], parent=mainGrid, state=uiconst.UI_DISABLED, align=uiconst.CENTERLEFT)\n minText = localization.formatters.FormatNumeric(const.respecMinimumAttributeValue, decimalPlaces=0)\n EveLabelMedium(text=minText, parent=mainGrid, state=uiconst.UI_DISABLED, align=uiconst.CENTER, bold=True)\n icon = uicontrols.Icon(parent=mainGrid, width=32, height=32, size=32, icon=util.IconFile(cfg.invtypes.Get(self.implantTypes[x]).iconID), align=uiconst.TOPLEFT, ignoreSize=True)\n implantLabel = EveLabelMedium(text='0', parent=mainGrid, align=uiconst.CENTERLEFT)\n self.implantLabels.append((implantLabel, icon))\n if not self.readOnly:\n minusText = localization.GetByLabel('UI/Common/Buttons/Minus')\n Button(parent=mainGrid, label=minusText, fixedwidth=buttonSize, func=self.DecreaseAttribute, args=(x,), align=uiconst.CENTERRIGHT)\n bar = Container(parent=mainGrid, align=uiconst.CENTER, width=barWidth, height=barHeight, state=uiconst.UI_PICKCHILDREN)\n bar = uicls.ClickableBoxBar(parent=bar, numBoxes=numBoxes, boxWidth=boxWidth, boxHeight=boxHeight, boxMargin=boxMargin, boxSpacing=boxSpacing, backgroundColor=backgroundColor, colorDict=colorDict)\n bar.OnValueChanged = self.OnMemberBoxClick\n bar.OnAttemptBoxClicked = self.ValidateBoxClick\n self.respecBar.append(bar)\n if not self.readOnly:\n plusText = localization.GetByLabel('UI/Common/Buttons/Plus')\n Button(parent=mainGrid, label=plusText, fixedwidth=buttonSize, func=self.IncreaseAttribute, args=(x,), align=uiconst.CENTERLEFT)\n totalLabel = EveLabelMedium(text='0', parent=mainGrid, left=8, align=uiconst.CENTERRIGHT, bold=True)\n self.totalLabels.append(totalLabel)\n\n if not self.readOnly:\n line = Line(align=uiconst.TOTOP)\n mainGrid.AddCell(cellObject=line, colSpan=mainGrid.columns)\n textObj = EveLabelMedium(text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/UnassignedAttributePoints'))\n mainGrid.AddCell(cellObject=textObj, colSpan=6)\n numBoxes = const.respecTotalRespecPoints - const.respecMinimumAttributeValue * 5\n barWidth = numBoxes * boxSpacing + 2 * boxMargin + numBoxes * boxWidth - 1\n unassignedBarParent = Container(align=uiconst.TOPLEFT, width=barWidth, height=barHeight, state=uiconst.UI_PICKCHILDREN)\n mainGrid.AddCell(cellObject=unassignedBarParent, colSpan=2)\n self.sr.unassignedBar = uicls.ClickableBoxBar(parent=unassignedBarParent, numBoxes=numBoxes, boxWidth=boxWidth, boxHeight=boxHeight, boxMargin=boxMargin, boxSpacing=boxSpacing, backgroundColor=backgroundColor, colorDict=colorDict, readonly=True, hintFormat='UI/CharacterSheet/CharacterSheetWindow/Attributes/UnassignedPointsHint')\n self.availableLabel = EveLabelMedium(parent=mainGrid, align=uiconst.CENTERRIGHT, left=8)\n mainGrid.FillRow()\n self.sr.saveWarningText = EveLabelMedium(text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CannotSaveUnassignedPoints'), color=(1.0, 0.0, 0.0, 0.9))\n mainGrid.AddCell(cellObject=self.sr.saveWarningText, colSpan=mainGrid.columns)\n if not self.readOnly:\n uicontrols.ButtonGroup(btns=[[localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/SaveStatChanges'),\n self.SaveChanges,\n (),\n None], [localization.GetByLabel('UI/Common/Buttons/Cancel'),\n self.CloseByUser,\n (),\n None]], parent=self.sr.main, idx=0)", "title": "" }, { "docid": "4d401b3ea1950422a8534719123cc7c8", "score": "0.54922456", "text": "def get_layouts():", "title": "" }, { "docid": "ffda937d0cdebc6b65141c747255ffbc", "score": "0.5483285", "text": "def _draw_layout_helper(self, **kwargs):\n\n lch = self.params['lch']\n wn = self.params['wn']\n wp = self.params['wp']\n nfn_inv = self.params['nfn_inv']\n nfp_inv = self.params['nfp_inv']\n nfn_tinv0 = self.params['nfn_tinv0']\n nfp_tinv0 = self.params['nfp_tinv0']\n nfn_tinv1 = self.params['nfn_tinv1']\n nfp_tinv1 = self.params['nfp_tinv1']\n ndum = self.params['ndum']\n intent = self.params['intent']\n ptap_w = self.params['ptap_w']\n ntap_w = self.params['ntap_w']\n g_width_ntr = self.params['g_width_ntr']\n ds_width_ntr = self.params['ds_width_ntr']\n show_pins = self.params['show_pins']\n\n # get resolution\n res = self.grid.resolution\n\n # make sure all fingers are even number\n if nfn_inv%2 != 0 or nfp_inv%2 != 0 or nfn_tinv0%2 != 0 or nfn_tinv1%2 != 0 or\\\n nfp_tinv0%2 != 0 or nfp_tinv1%2 != 0:\n raise ValueError(\"Need all finger number to be even!\")\n\n # get layer separation space\n layout_info = AnalogBaseInfo(self.grid.copy(), lch, 0)\n m4h_layer = layout_info.mconn_port_layer + 1\n m5v_layer = layout_info.mconn_port_layer + 2\n g_sp_ntr = self.grid.get_num_space_tracks(m4h_layer, g_width_ntr)\n ds_sp_ntr = self.grid.get_num_space_tracks(m4h_layer, ds_width_ntr)\n\n fg_tot = ndum*4 + max(nfn_inv, nfp_inv) + max(nfn_tinv0, nfp_tinv0) + max(nfn_tinv1, nfp_tinv1)\n\n # draw transistor rows\n nw_list = [wn]\n pw_list = [wp]\n nth_list = [intent]\n pth_list = [intent]\n ng_tracks = [g_width_ntr]\n pg_tracks = [g_width_ntr]\n nds_tracks = [ds_width_ntr*2]\n pds_tracks = [ds_width_ntr*2]\n n_orientation = ['R0']\n p_orientation = ['MX']\n\n self.draw_base(lch, fg_tot, ptap_w, ntap_w, nw_list,\n nth_list, pw_list, pth_list,\n ng_tracks=ng_tracks, nds_tracks=nds_tracks,\n pg_tracks=pg_tracks, pds_tracks=pds_tracks,\n n_orientations=n_orientation, p_orientations=p_orientation,\n )\n\n # get gate and drain index\n ngate_id = self.make_track_id('nch', 0, 'g', 0, width=g_width_ntr)\n pgate_id = self.make_track_id('pch', 0, 'g', 0, width=g_width_ntr)\n out_id = self.make_track_id('nch', 0, 'ds', 1, width=ds_width_ntr)\n ndrain_id = self.make_track_id('nch', 0, 'ds', 0, width=ds_width_ntr)\n pdrain_id = self.make_track_id('pch', 0, 'ds', 0, width=ds_width_ntr)\n\n # Step1: connect inverter\n # group transistors\n inv_n_ports = self.draw_mos_conn('nch', 0, ndum, nfn_inv, 1, 1)\n inv_p_ports = self.draw_mos_conn('pch', 0, ndum, nfp_inv, 1, 1)\n\n # connect gate\n ng_inv_warr = self.connect_to_tracks(inv_n_ports['g'], ngate_id)\n pg_inv_warr = self.connect_to_tracks(inv_p_ports['g'], pgate_id)\n # connect gate vertically\n vgate_idx = self.grid.coord_to_nearest_track(m5v_layer, ng_inv_warr.lower)\n vgate_tid = TrackID(m5v_layer, vgate_idx)\n inv_in_warr = self.connect_to_tracks([ng_inv_warr, pg_inv_warr], vgate_tid)\n # connect drain\n inv_d_warr = self.connect_to_tracks([inv_n_ports['d'], inv_p_ports['d']], out_id)\n # connect gate\n self.connect_to_substrate('ptap', inv_n_ports['s'])\n self.connect_to_substrate('ntap', inv_p_ports['s'])\n\n # Step2: connect tri-inverter\n nf_inv = max(nfn_inv, nfp_inv)\n nf_tinv0 = max(nfn_tinv0, nfp_tinv0)\n # group transistors\n tinv0_n_ports = self.draw_mos_conn('nch', 0, nf_inv+2*ndum, nfn_tinv0, 1, 1)\n tinv0_p_ports = self.draw_mos_conn('pch', 0, nf_inv+2*ndum, nfp_tinv0, 1, 1)\n tinv1_n_ports = self.draw_mos_conn('nch', 0, nf_tinv0+nf_inv+3*ndum, nfn_tinv1, 1, 1)\n tinv1_p_ports = self.draw_mos_conn('pch', 0, nf_tinv0+nf_inv+3*ndum, nfp_tinv1, 1, 1)\n\n # connect top/bottom MOSs\n # connect gate\n ng_tinv0_warr = self.connect_to_tracks(tinv0_n_ports['g'], ngate_id)\n pg_tinv0_warr = self.connect_to_tracks(tinv0_p_ports['g'], pgate_id)\n # connect gate vertically\n vgate_idx = self.grid.coord_to_nearest_track(m5v_layer, ng_tinv0_warr.lower)\n vgate_tid = TrackID(m5v_layer, vgate_idx)\n # also connect inverter drain\n tinv0_g_warr = self.connect_to_tracks([inv_d_warr, ng_tinv0_warr, pg_tinv0_warr], vgate_tid)\n\n # connect middle MOSs\n ng_tinv1_warr = self.connect_to_tracks(tinv1_n_ports['g'], ngate_id)\n pg_tinv1_warr = self.connect_to_tracks(tinv1_p_ports['g'], pgate_id)\n # connect source\n ns_tinv0_warr = self.connect_to_tracks([tinv0_n_ports['d'], tinv1_n_ports['s']], ndrain_id)\n ps_tinv0_warr = self.connect_to_tracks([tinv0_p_ports['d'], tinv1_p_ports['s']], pdrain_id)\n # connect drain\n tinv_out_warr = self.connect_to_tracks([tinv1_n_ports['d'], tinv1_p_ports['d']], out_id)\n # connect source\n self.connect_to_substrate('ptap', tinv0_n_ports['s'])\n self.connect_to_substrate('ntap', tinv0_p_ports['s'])\n\n # draw dummies\n ptap_wire_arrs, ntap_wire_arrs = self.fill_dummy()\n\n # add pins\n self.add_pin('clk_i', inv_in_warr)\n self.add_pin('clk_o', tinv_out_warr)\n self.add_pin('ctrl', ng_tinv1_warr)\n self.add_pin('ctrl_b', pg_tinv1_warr)\n\n # export supplies\n self.add_pin(self.get_pin_name('VSS'), ptap_wire_arrs, show=show_pins)\n self.add_pin(self.get_pin_name('VDD'), ntap_wire_arrs, show=show_pins)\n\n # get size\n self.size = self.set_size_from_array_box(m5v_layer)\n\n # get schematic parameters\n dum_info = self.get_sch_dummy_info()\n dum_nmos = dum_info[0][1]\n dum_pmos = dum_info[1][1]\n print(dum_info)\n self._sch_params = dict(\n lch=self.params['lch'],\n wn=self.params['wn'],\n wp=self.params['wp'],\n nf_inv=self.params['nfn_inv'],\n nfp_inv=self.params['nfp_inv'],\n nfn_tinv0=self.params['nfn_tinv0'],\n nfp_tinv0=self.params['nfp_tinv0'],\n nfn_tinv1=self.params['nfn_tinv1'],\n nfp_tinv1=self.params['nfp_tinv1'],\n intent=self.params['intent'],\n dum_n0=dum_nmos-2,\n dum_n1=2,\n dum_p0=dum_pmos-2,\n dum_p1=2,\n )", "title": "" }, { "docid": "131f8c694ba978113f105189554fae6c", "score": "0.54753476", "text": "def position_controls(nb,parent_frame, x,y,demo_btn,handType):\n #position_controls(0,0)\n #keeps track of the nuber of check buttons checked\n\n #Adding other Gui Elements as well as formatting their \n #position on window by putting them into frames\n poscon= LabelFrame(parent_frame,text=\"Position Control\")\n selected_frame=Frame(poscon,borderwidth=1)\n selected= Label(selected_frame, text=\"Selected\", fg= \"grey\")\n canvas= Canvas(poscon, width= 20, height =170)\n \n str_var1=StringVar()\n str_var2=StringVar()\n str_var3=StringVar()\n str_var_spread=StringVar()\n\n #Position Control Entry Boxes\n e1= IntegerEntry(poscon, width= 5,textvariable=str_var1)\n e2= IntegerEntry(poscon, width= 5,textvariable=str_var2)\n e3= IntegerEntry(poscon, width= 5,textvariable=str_var3)\n e_spread= IntegerEntry(poscon, width= 5,textvariable=str_var_spread)\n\n cur_symbol=StringVar()\n symbol_options=['%','Rad','Theta','Encoder']\n symbol_menu = OptionMenu(poscon, cur_symbol, *symbol_options)\n symbol_menu.configure(width=7)\n cur_symbol.set(symbol_options[0])\n \n e1.var=str_var1\n e2.var=str_var2\n e3.var=str_var3\n e_spread.var=str_var_spread\n\n e1.var.set(\"0\")\n e2.var.set(\"0\")\n e3.var.set(\"0\")\n e_spread.var.set(\"0\")\n\n if handType!=-1:\n set_modes_tstop()\n\n scale_var_1=IntVar()\n scale_var_2=IntVar()\n scale_var_3=IntVar()\n scale_var_spread=IntVar()\n\n encode_var_1=IntVar()\n encode_var_2=IntVar()\n encode_var_3=IntVar()\n encode_var_spread=IntVar()\n\n #Position Control Scales/sliders\n #resolution=.1\n scale1 = Scale( poscon, variable=scale_var_1, showvalue=0,resolution=1 ,orient=HORIZONTAL,length=250, command= lambda x: update_text(scale1.get(),e1))\n scale2 = Scale( poscon, variable=scale_var_2, showvalue=0,resolution=1,orient=HORIZONTAL,length=250, command= lambda x: update_text(scale2.get(),e2))\n scale3 = Scale( poscon, variable=scale_var_3, showvalue=0,resolution=1,orient=HORIZONTAL,length=250, command= lambda x: update_text(scale3.get(),e3)) \n scale_spread = Scale( poscon, variable=scale_var_spread, showvalue=0,resolution=1,orient=HORIZONTAL, \n command= lambda x:update_text(scale_spread.get(), e_spread), length= 250)\n\n \n chart={scale1:{scale2:0,scale3:0,scale_spread:0}, scale2:{scale1:0,scale3:0,scale_spread:0}, scale3:{scale1:0,scale2:0,scale_spread:0},scale_spread:{scale1:0,scale2:0,scale3:0},\"CURRENT_LINKS\":set()}\n\n #Variables needed for checkbox buttons\n v1= BooleanVar()\n v2= BooleanVar()\n v3= BooleanVar()\n v_spread=BooleanVar()\n\n #Position Control Check buttons. \n #0 is unchecked and 1 is checked\n finger1 = Checkbutton(poscon, text=\"Finger 1\", variable= v1) \n finger2 = Checkbutton(poscon, text=\"Finger 2\", variable= v2) \n finger3 = Checkbutton(poscon, text=\"Finger 3\", variable= v3) \n spread = Checkbutton(poscon, text=\"Spread\",variable=v_spread)\n\n finger1.number= (30,4)\n finger2.number= (30,58)\n finger3.number= (30, 113)\n spread.number= (30, 168)\n\n finger1.finger=FINGER1\n finger2.finger=FINGER2\n finger3.finger=FINGER3\n spread.finger=SPREAD\n\n scale1.encode_var=encode_var_1\n scale2.encode_var=encode_var_2\n scale2.encode_var=encode_var_3\n scale_spread.encode_var=encode_var_spread\n\n n={finger1:False,finger2:False,finger3:False,spread:False}\n\n finger1.slide=scale1\n finger2.slide=scale2\n finger3.slide=scale3\n spread.slide=scale_spread\n\n scale1.entry=e1\n scale2.entry=e2\n scale3.entry=e3\n scale_spread.entry=e_spread\n\n finger1.var = v1\n finger2.var = v2\n finger3.var = v3\n spread.var= v_spread\n \n if handType!=-1:\n update_positions(chart,nb,scale_var_1,scale_var_2,scale_var_3,scale_var_spread)\n\n finger1.config(command=lambda:check(True,n,chart,finger1,finger2,finger3,spread,canvas))\n finger2.config(command=lambda:check(True,n,chart,finger2,finger1,finger3,spread,canvas))\n finger3.config(command=lambda:check(True,n,chart,finger3,finger2,finger1,spread,canvas))\n spread.config(command=lambda:check(True,n,chart,spread,finger1,finger2,finger3,canvas))\n\n scale1.bind(\"<Button-1>\",lambda x:call(chart,scale1,scale2,scale3,scale_spread))\n scale2.bind(\"<Button-1>\",lambda x:call(chart,scale2,scale1,scale3,scale_spread))\n scale3.bind(\"<Button-1>\",lambda x:call(chart,scale3,scale1,scale2,scale_spread))\n scale_spread.bind(\"<Button-1>\",lambda x:call(chart,scale_spread,scale1,scale2,scale3))\n\n scale1.bind(\"<ButtonRelease-1>\",lambda x:update_all_delays(chart,scale1,scale2,scale3,scale_spread))\n scale2.bind(\"<ButtonRelease-1>\",lambda x:update_all_delays(chart,scale2,scale1,scale3,scale_spread))\n scale3.bind(\"<ButtonRelease-1>\",lambda x:update_all_delays(chart,scale3,scale1,scale2,scale_spread))\n scale_spread.bind(\"<ButtonRelease-1>\",lambda x:update_all_delays(chart,scale_spread,scale1,scale2,scale3))\n\n e1.bind('<Return>', lambda x: set_scale(chart,finger1,finger2,finger3,spread))\n e2.bind('<Return>', lambda x: set_scale(chart,finger2,finger1,finger3,spread))\n e3.bind('<Return>', lambda x: set_scale(chart,finger3,finger1,finger2,spread))\n e_spread.bind('<Return>',lambda x:set_scale(chart,spread,finger1,finger2,finger3))\n\n grasp_var=BooleanVar()\n grasp_var.set(0)\n\n #Allows user to select all three fingers with just one button\n\n selectb= Checkbutton(poscon, text= \"ALL FINGERS\", variable= grasp_var, command= lambda: toggle(n,chart, selectb, finger1, finger2, finger3, spread, canvas))\n\n selectb.var=grasp_var\n\n percent_1=Label(poscon,textvariable=cur_symbol)\n percent_2=Label(poscon,textvariable=cur_symbol)\n percent_3=Label(poscon,textvariable=cur_symbol)\n percent_spread=Label(poscon,textvariable=cur_symbol)\n\n nb.bind_all(\"<<NotebookTabChanged>>\",lambda x:tab_selected(nb,chart,scale1,scale2,scale3,scale_spread,handType))\n demo_btn.config(command = lambda: run_stop_toggle(demo_btn,parent_frame,chart,scale1,scale2,scale3,scale_spread))\n #Open and Close labels to give directionality to slider\n customFont = tkFont.Font(family=\"Helvetica\", size=12)\n canvas_open= Canvas(poscon, width=30, height=200)\n canvas_close= Canvas(poscon, width=35, height=200)\n\n canvas_open.create_text(17, 100,text = \"\\n\".join(\"OPEN GRASP\"))\n canvas_close.create_text(17, 100,text = \"\\n\".join(\"CLOSE GRASP\"))\n\n canvas_open.create_line(29, 10,29, 192)\n canvas_open.create_line(5, 10,5, 192)\n canvas_close.create_line(29, 10,29, 192)\n canvas_close.create_line(5, 10,5, 192) \n \n canvas_open.grid(row=0,column=3, rowspan=4)\n canvas_close.grid(row=0,column=5,rowspan=4)\n \n #Formatting Spacing of all the above components\n finger1.grid(row= 0, column= 2,sticky=\"w\", pady=15)\n e1.grid(row= 0, column= 6,sticky=\"e\")\n percent_1.grid(row=0,column=7)\n scale1.grid(row= 0, column= 4, columnspan= 1,pady=15)\n\n finger2.grid(row=1 , column=2,sticky=\"w\", pady=15)\n e2.grid(row= 1, column= 6,sticky=\"e\", pady=15)\n percent_2.grid(row=1,column=7, pady=15)\n scale2.grid(row= 1, column= 4, columnspan= 1,pady=15)\n\n finger3.grid(row= 2, column=2,sticky=\"w\", pady=15)\n e3.grid(row= 2, column=6,sticky=\"e\", pady=15)\n percent_3.grid(row=2,column=7, pady=15)\n scale3.grid(row= 2, column= 4, columnspan=1,pady=15)\n\n selectb.grid(row= 1, column= 0,pady=10)\n\n canvas.grid(row=0, column=1, rowspan=4 , pady=(0,0))\n\n spread.grid(row= 3, column= 2,sticky=\"w\", pady=15)\n e_spread.grid(row= 3, column= 6,sticky=\"e\", pady=15)\n percent_spread.grid(row=3,column=7, pady=15)\n scale_spread.grid(row= 3, column= 4, columnspan=1, pady=15)\n \n poscon.grid(row= y, column= x,rowspan=3,pady=(14,0), sticky=\"ns\")\n create_bottom_pos_frame(poscon, 0, 5,[finger1,finger2,finger3,spread],demo_btn, chart, scale1, scale2, scale3, scale_spread)", "title": "" }, { "docid": "cab879e109ec7fe4330e9fa458b0cc62", "score": "0.5455166", "text": "def gui_layout_view(self) -> List[List[sg.Element]]:\n\n components = self.gui_layout_components()\n layout = [\n components[\"view_label\"],\n [sg.Multiline(default_text=\"gcode\",\n size=(60, 10),\n key=self.key_gen(\"gcode\"),\n autoscroll=True,\n disabled=True,\n enable_events=False,\n ),],\n components[\"view_connection\"],\n components[\"view_buttons\"],\n ]\n return layout", "title": "" }, { "docid": "a06ce5ee994f807795ed4491bf6e9f62", "score": "0.5445078", "text": "def createLayout(self):\n self.createPage()\n if self.drawSauce:\n setActiveLayer(self.layerImg)\n self.createImage()\n setActiveLayer(self.layerCal)", "title": "" }, { "docid": "66dec542315755ea0c4c719c571e569a", "score": "0.54437405", "text": "def __init__(self, parent, position):\n\n wx.SashLayoutWindow.__init__(self, parent, -1, wx.DefaultPosition,\n (200, 30), wx.NO_BORDER|wx.SW_3D)\n\n self.parent = parent\n self.position = position\n\n if position in ['top', 'bottom']:\n self.SetDefaultSize((1000, 0))\n else:\n self.SetDefaultSize((0, 1000))\n\n data = { \n 'left' : (wx.LAYOUT_VERTICAL, wx.LAYOUT_LEFT, wx.SASH_RIGHT,\n wx.VERTICAL, wx.HORIZONTAL, wx.TB_VERTICAL),\n 'right' : (wx.LAYOUT_VERTICAL, wx.LAYOUT_RIGHT, wx.SASH_LEFT, \n wx.VERTICAL, wx.HORIZONTAL, wx.TB_VERTICAL),\n 'top' : (wx.LAYOUT_HORIZONTAL, wx.LAYOUT_TOP, wx.SASH_BOTTOM, \n wx.HORIZONTAL, wx.VERTICAL, wx.TB_HORIZONTAL),\n 'bottom' : (wx.LAYOUT_HORIZONTAL, wx.LAYOUT_BOTTOM, wx.SASH_TOP, \n wx.HORIZONTAL, wx.VERTICAL, wx.TB_HORIZONTAL) }\n\n d_orientation, d_alignment, d_showsash, d_btnbox, d_mainbox, d_toolbar = data[position]\n\n self.SetOrientation(d_orientation)\n self.SetAlignment(d_alignment)\n self.SetSashVisible(d_showsash, True)\n\n self.panel = wx.Panel(self, -1)\n self.btnbox = wx.BoxSizer(d_btnbox)\n self.contentbox = wx.BoxSizer(d_mainbox)\n self.box = wx.BoxSizer(d_mainbox)\n if position in ['top', 'left']:\n self.box.Add(self.btnbox, 0, wx.EXPAND)\n self.box.Add(self.contentbox, 1, wx.EXPAND)\n else:\n self.box.Add(self.contentbox, 1, wx.EXPAND)\n self.box.Add(self.btnbox, 0, wx.EXPAND)\n\n self.toolbar = wx.ToolBar(self.panel, -1, \n style=d_toolbar|wx.SUNKEN_BORDER|wx.TB_3DBUTTONS)\n self.btnbox.Add(self.toolbar, 1)\n self.toolbar.Bind(wx.EVT_TOOL, self.on_toolbar)\n\n self.panel.SetSizer(self.box)\n self.panel.SetAutoLayout(True)\n\n self.contents = []\n self.buttons = []\n self.last_width = 180\n self.last_height = 120", "title": "" }, { "docid": "317e4042243c1b072554a34b8ea1aff2", "score": "0.54422", "text": "def __do_layout(self):\n sizer_1 = wx.BoxSizer(wx.VERTICAL)\n sizer_2 = wx.BoxSizer(wx.VERTICAL)\n sizer_4 = wx.BoxSizer(wx.HORIZONTAL)\n sizer_5 = wx.BoxSizer(wx.VERTICAL)\n sizer_3 = wx.BoxSizer(wx.VERTICAL)\n self.panel_3.SetSizer(sizer_3)\n self.panel_4.SetSizer(sizer_5)\n sizer_1.Add(self.panel_3, 0, 0, 0)\n sizer_2.Add(self.grid_1, 1, wx.ALIGN_BOTTOM | wx.ALL | wx.EXPAND, 1)\n sizer_4.Add(self.button_2, 1, wx.ALIGN_BOTTOM | wx.ALL, 1)\n sizer_4.Add(self.button_3, 1, wx.ALIGN_BOTTOM | wx.ALL, 1)\n sizer_5.Add(self.multiText, 1, wx.EXPAND, 0)\n sizer_2.Add(sizer_4, 0, wx.ALL, 0)\n self.panel_2.SetSizer(sizer_2)\n self.window_2.SplitHorizontally(self.panel_1, self.panel_2)\n self.window_1.SplitVertically(self.tree_ctrl_1, self.window_2)\n sizer_1.Add(self.window_1, 1, wx.EXPAND, 0)\n sizer_1.Add(self.panel_4, 0, wx.EXPAND, 1)\n self.SetSizer(sizer_1)\n self.Layout()\n self.panel_1.Refresh(False)", "title": "" }, { "docid": "979adff76664e81cc77c84c66bb7e301", "score": "0.54193056", "text": "def add_layout_pins(self):\n bitline_names = self.cell.get_all_bitline_names()\n for col in range(self.column_size):\n for port in self.all_ports:\n bl_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port])\n text = \"bl_{0}_{1}\".format(port, col)\n self.add_layout_pin(text=text,\n layer=bl_pin.layer,\n offset=bl_pin.ll().scale(1, 0),\n width=bl_pin.width(),\n height=self.height)\n br_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port + 1])\n text = \"br_{0}_{1}\".format(port, col)\n self.add_layout_pin(text=text,\n layer=br_pin.layer,\n offset=br_pin.ll().scale(1, 0),\n width=br_pin.width(),\n height=self.height)\n # self.add_rect(layer=bl_pin.layer,\n # offset=bl_pin.ll().scale(1, 0),\n # width=bl_pin.width(),\n # height=self.height)\n # self.add_rect(layer=br_pin.layer,\n # offset=br_pin.ll().scale(1, 0),\n # width=br_pin.width(),\n # height=self.height)\n\n wl_names = self.cell.get_all_wl_names()\n for row in range(self.row_size):\n for port in self.all_ports:\n wl_pin = self.cell_inst[row, 0].get_pin(wl_names[port])\n self.add_layout_pin(text=\"wl_{0}_{1}\".format(port, row),\n layer=wl_pin.layer,\n offset=wl_pin.ll().scale(0, 1),\n width=self.width,\n height=wl_pin.height())\n\n # Copy a vdd/gnd layout pin from every cell\n for row in range(self.row_size):\n for col in range(self.column_size):\n inst = self.cell_inst[row, col]\n for pin_name in [\"vdd\", \"gnd\"]:\n self.copy_layout_pin(inst, pin_name)", "title": "" }, { "docid": "25a163f3bff624c730ea5d6a7014060d", "score": "0.54133844", "text": "def generate(self):\n\n\t\tmodifiers = [[list(map(sum, zip(self.rm[row], self.cm[column], self.im[row][column]))) for column in range(self.columns)] for row in range(self.rows)]\n\t\tmodifiers = [[modifiers[row][column] + [self.ik[row][column]] for column in range(self.columns)] for row in range(self.rows)]\n\n\t\t#def __init__(self, transformations, ik=False, switch_type='alps', mount_length=DSA_KEY_WIDTH, mount_width=DSA_KEY_WIDTH, mx_notches=True):\n\t\tself.sm = self.switch_matrix = [[Keyswitch_mount([list(map(sum, zip(modifiers[row][column][:3], [column * (self.mount_width + self.column_spacing), row * (self.mount_length + self.row_spacing), 0]))) + modifiers[row][column][3:6], [self.origin[0], self.origin[1], self.origin[2], self.x_tent, self.y_tent, self.z_tent]], modifiers[row][column][6], self.switch_type, self.mount_length, self.mount_width, self.mx_notches) for column in range(self.columns)] for row in range(self.rows)]\n\n\t\tself.row_hulls = [[(self.sm[row][column].get_front(self.row_hull_thickness, self.row_hull_extrude) + self.sm[row+1][column].get_back(self.row_hull_thickness, self.row_hull_extrude)).hull() for column in range(self.columns)] for row in range(self.rows-1)] \n\n\t\tself.column_hulls = [[(self.sm[row][column].get_right(self.col_hull_thickness, self.col_hull_extrude) + self.sm[row][column+1].get_left(self.col_hull_thickness, self.col_hull_extrude)).hull() for column in range(self.columns - 1)] for row in range(self.rows)] \n\t\t\n\t\tself.corner_hulls = [[(self.sm[row][column].get_corner('fr', self.ch_thickness, self.ch_thickness) \n\t\t\t\t\t+ self.sm[row][column+1].get_corner('fl', self.ch_thickness, self.ch_thickness) \n\t\t\t\t\t+ self.sm[row+1][column].get_corner('br', self.ch_thickness, self.ch_thickness)\n\t\t\t\t\t+ self.sm[row+1][column+1].get_corner('bl', self.ch_thickness, self.ch_thickness)).hull() for column in range(self.columns-1)] for row in range(self.rows-1)] \n\n\t\tself.front_wall = [project(self.sm[self.rows-1][column].get_front(self.wall_thickness, self.wall_extrude)) for column in range(self.columns)]\n\t\tself.front_wall_hulls = [project((self.sm[self.rows-1][column].get_corner('fr', self.wall_x, self.wall_thickness, 0, self.wall_extrude) \n\t\t\t\t\t+ self.sm[self.rows-1][column+1].get_corner('fl', self.wall_x, self.wall_thickness, 0, self.wall_extrude))).hull() for column in range(self.columns - 1)]\n\n\t\tself.back_wall = [project(self.sm[0][column].get_back(self.wall_thickness, self.wall_extrude)) for column in range(self.columns)]\n\t\tself.back_wall_hulls = [project((self.sm[0][column].get_corner('br', self.wall_x, self.wall_thickness, 0, self.wall_extrude) \n\t\t\t\t\t+ self.sm[0][column+1].get_corner('bl', self.wall_x, self.wall_thickness, 0, self.wall_extrude))).hull() for column in range(self.columns - 1)]\n\n\t\tself.left_wall = [project(self.sm[row][0].get_left(self.side_wall_thickness, self.side_extrude)) for row in range(self.rows)]\n\t\tself.left_wall_hulls = [project((self.sm[row][0].get_corner('fl', self.side_wall_thickness, self.wall_y, self.side_extrude) \n\t\t\t\t\t+ self.sm[row+1][0].get_corner('bl', self.side_wall_thickness, self.wall_y, self.side_extrude)).hull()) for row in range(self.rows - 1)]\n\n\t\tself.right_wall = [project(self.sm[row][self.columns-1].get_right(self.side_wall_thickness, self.side_extrude)) for row in range(self.rows)]\n\t\tself.right_wall_hulls = [project((self.sm[row][self.columns-1].get_corner('fr', self.side_wall_thickness, self.wall_y, self.side_extrude) \n\t\t\t\t\t+ self.sm[row+1][self.columns-1].get_corner('br', self.side_wall_thickness, self.wall_y, self.side_extrude)).hull()) for row in range(self.rows - 1)]\n\n\t\tself.front_left_corner = project(self.sm[self.rows-1][0].get_corner('fl', self.side_extrude, self.wall_extrude, self.side_extrude, self.wall_extrude))\n\t\tself.front_right_corner = project(self.sm[self.rows-1][self.columns-1].get_corner('fr', self.side_extrude, self.wall_extrude, self.side_extrude, self.wall_extrude))\n\t\tself.back_left_corner = project(self.sm[0][0].get_corner('bl', self.side_extrude, self.wall_extrude, self.side_extrude, self.wall_extrude))\n\t\tself.back_right_corner = project(self.sm[0][self.columns-1].get_corner('br', self.side_extrude, self.wall_extrude, self.side_extrude, self.wall_extrude))", "title": "" }, { "docid": "bba733004957cf0555e2e9489af7e574", "score": "0.53933346", "text": "def add_layout_pins(self):\n # Add vdd/gnd via stacks\n for cols in range((self.column_size * 2) - 1):\n inst = self.cell_inst[cols]\n for pin_name in [\"vdd\", \"gnd\"]:\n for pin in inst.get_pins(pin_name):\n if inst.mod.cell_name == 'sky130_fd_bd_sram__sram_sp_colend' or 'sky130_fd_bd_sram__sram_sp_colenda':\n if inst.mirror == \"MY\":\n if pin_name == \"vdd\" and pin.layer == 'm1':\n self.add_layout_pin_rect_center(text=\"vdd\",\n layer=pin.layer,\n offset=inst.lr(),\n width=pin.width(),\n height=pin.height())\n elif pin_name == \"gnd\" and pin.layer == 'm1':\n self.add_layout_pin_rect_center(text=\"gnd\",\n layer=pin.layer,\n offset=inst.ll(),\n width=pin.width(),\n height=pin.height())\n else:\n if pin_name == \"vdd\" and pin.layer == 'm1':\n self.add_layout_pin_rect_center(text=\"vdd\",\n layer=pin.layer,\n offset=inst.ll(),\n width=pin.width(),\n height=pin.height())\n elif pin_name == \"gnd\" and pin.layer == 'm1':\n self.add_layout_pin_rect_center(text=\"gnd\",\n layer=pin.layer,\n offset=inst.lr(),\n width=pin.width(),\n height=pin.height())\n \n\n for col in range(len(self.insts)):\n\n inst = self.insts[col]\n if col % 4 == 0:\n pin = self.cell_inst[col].get_pin(\"bl\")\n text = \"fake_bl_{}\".format(int(col/2))\n self.add_layout_pin(text=text,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=pin.height())\n \n pin = self.cell_inst[col].get_pin(\"br\")\n text = \"fake_br_{}\".format(int(col/2))\n self.add_layout_pin(text=text,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=pin.height())\n\n elif col % 4 == 2:\n pin = self.cell_inst[col].get_pin(\"bl\")\n text = \"fake_bl_{}\".format(int(col/2))\n self.add_layout_pin(text=text,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=pin.height())\n \n pin = self.cell_inst[col].get_pin(\"br\")\n text = \"fake_br_{}\".format(int(col/2))\n self.add_layout_pin(text=text,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=pin.height())\n return", "title": "" }, { "docid": "3adf6d2f8d371529f64e0821d156d5e4", "score": "0.53863406", "text": "def define_layout(self):\n grid = QGridLayout()\n grid.setContentsMargins(11, 11, 0, 0)\n\n grid.addWidget(self._button_cabinet, 0, 0)\n grid.addWidget(self._button_search, 1, 0)\n \n spacer = QSpacerItem(20, 50, QSizePolicy.Minimum, QSizePolicy.Preferred)\n grid.addItem(spacer, 3, 0, -1, -1)\n grid.addWidget(self._button_view_data, 2, 0)\n\n grid.addWidget(self._app_stack, 0, 1, -1, -1)\n\n grid.setHorizontalSpacing(0)\n\n self.setLayout(grid)", "title": "" }, { "docid": "c45bc76fbe0c94c6851c64e7bb340f45", "score": "0.53779286", "text": "def create_widgets(self):\n self.create_upper_section()\n self.create_checkbox()\n self.create_password_lenght_section()\n self.create_lower_section()", "title": "" }, { "docid": "8290ac024294eff842f1a6c7070eabc5", "score": "0.5372493", "text": "def place_widgets(self):\n\n self.content.grid(column=0, row=0, sticky=(N,S,E,W))\n self.frame.grid(column=0, row=0, columnspan=4, rowspan=11,\\\n sticky=(N,S,E,W))\n self.info_label.grid(column=0, row=0, columnspan=4, rowspan=1,\\\n sticky=(W))\n \n self.col_label_1.grid(column=0, row=1, columnspan=2, sticky=(E))\n self.col_label_2.grid(column=0, row=2, columnspan=2, sticky=(E))\n self.col_label_3.grid(column=0, row=3, columnspan=2, sticky=(E))\n self.col_label_4.grid(column=0, row=4, columnspan=2, sticky=(E))\n self.col_label_5.grid(column=0, row=5, columnspan=2, sticky=(E))\n self.col_label_6.grid(column=0, row=6, columnspan=2, sticky=(E))\n self.col_label_7.grid(column=0, row=7, columnspan=2, sticky=(E))\n self.col_label_8.grid(column=0, row=8, columnspan=2, sticky=(E))\n self.col_label_9.grid(column=0, row=9, columnspan=2, sticky=(E))\n\n self.combobox_1.grid(column=2, row=1, columnspan=2, sticky=(W))\n self.combobox_2.grid(column=2, row=2, columnspan=2, sticky=(W))\n self.combobox_3.grid(column=2, row=3, columnspan=2, sticky=(W))\n self.combobox_4.grid(column=2, row=4, columnspan=2, sticky=(W))\n self.combobox_5.grid(column=2, row=5, columnspan=2, sticky=(W))\n self.combobox_6.grid(column=2, row=6, columnspan=2, sticky=(W))\n self.combobox_7.grid(column=2, row=7, columnspan=2, sticky=(W))\n self.combobox_8.grid(column=2, row=8, columnspan=2, sticky=(W))\n self.combobox_9.grid(column=2, row=9, columnspan=2, sticky=(W))\n\n self.preview_button.grid(column=0, row=10, sticky=(S,W))\n self.save_button.grid(column=2, row=10,sticky=(S,E))\n self.quit_button.grid(column=3, row=10, sticky=(S,E))", "title": "" }, { "docid": "8def33e56d9acd63e524df5f39b34e9a", "score": "0.5337791", "text": "def make_widgets(self):\r\n \r\n self.bgcolor = self.frame.cget('bg')\r\n self.instructions = Label(self.frame, text = ' Please provide the following sorption properties: ')\r\n\r\n self.leftcolumn = Label(self.frame, width = 2, text = '' )\r\n self.matrixcolumn = Label(self.frame, width = 10, text = '' )\r\n self.chemicalcolumn = Label(self.frame, width = 10, text = '' )\r\n self.isothermcolumn = Label(self.frame, width = 20, text = '' )\r\n self.kineticcolumn = Label(self.frame, width = 15, text = '' )\r\n self.equacolumn = Label(self.frame, width = 17, text = '' )\r\n self.coefcolumn1 = Label(self.frame, width = 6, text = '' )\r\n self.coefcolumn2 = Label(self.frame, width = 9, text = '' )\r\n self.coefcolumn3 = Label(self.frame, width = int(self.tkfont.measure(self.concunit[:-1]+'/kg/('+ self.concunit +')'+ u'\\u1d3a')/8)+1, text = '' )\r\n self.coefcolumn4 = Label(self.frame, width = 4, text = '' )\r\n self.coefcolumn5 = Label(self.frame, width = 9, text = '' )\r\n self.coefcolumn6 = Label(self.frame, width = int(self.tkfont.measure(self.concunit)/8)+1, text = '' )\r\n self.coefcolumn7 = Label(self.frame, width = 7, text = '' )\r\n self.coefcolumn8 = Label(self.frame, width = 9, text = '' )\r\n self.coefcolumn9 = Label(self.frame, width = 10, text = '' )\r\n self.rightcolumn = Label(self.frame, width = 2, text = '' )\r\n\r\n self.matrixlabel = Label(self.frame, text = 'Matrix')\r\n self.chemicallabel = Label(self.frame, text = 'Chemical')\r\n self.isothermlabel = Label(self.frame, text = 'Sorption Isotherm')\r\n self.kineticlabel = Label(self.frame, text = 'Kinetics')\r\n\r\n self.matrixwidget = Label(self.frame, text = self.sorption.matrix.name)\r\n self.chemicalwidget = Label(self.frame, text = self.sorption.chemical.name)\r\n self.isothermwidget = OptionMenu(self.frame, self.isotherm, *self.isotherms, command = self.click_isotherm)\r\n self.kineticwidget = OptionMenu(self.frame, self.kinetic, *self.kinetics, command = self.click_kinetic)\r\n \r\n self.okbutton = Button(self.frame, text = 'OK', width = 20, command = self.OK)\r\n self.cancelbutton = Button(self.frame, text = 'Cancel', width = 20, command = self.Cancel)\r\n self.blank1 = Label(self.frame, text = ' ')\r\n self.blank2 = Label(self.frame, text = ' ')\r\n \r\n #show the widgets on the grid\r\n self.instructions.grid(row = 0, column = 0, columnspan = 15, padx = 2, pady = 8, sticky = 'W')\r\n\r\n self.leftcolumn.grid( row = 1, column = 0, padx = 2, pady = 1, sticky = 'WE')\r\n self.matrixcolumn.grid( row = 1, column = 1, padx = 2, pady = 1, sticky = 'WE')\r\n self.chemicalcolumn.grid(row = 1, column = 2, padx = 2, pady = 1, sticky = 'WE')\r\n self.isothermcolumn.grid(row = 1, column = 3, padx = 2, pady = 1, sticky = 'WE')\r\n self.kineticcolumn.grid( row = 1, column = 4, padx = 2, pady = 1, sticky = 'WE')\r\n self.equacolumn.grid( row = 1, column = 5, padx = 2, pady = 1, sticky = 'WE')\r\n self.coefcolumn1.grid( row = 1, column = 6, padx = 0, pady = 1)\r\n self.coefcolumn2.grid( row = 1, column = 7, padx = 0, pady = 1)\r\n self.coefcolumn3.grid( row = 1, column = 8, padx = 0, pady = 1)\r\n self.coefcolumn4.grid( row = 1, column = 9, padx = 0, pady = 1)\r\n self.coefcolumn5.grid( row = 1, column = 10, padx = 0, pady = 1)\r\n self.coefcolumn6.grid( row = 1, column = 11, padx = 0, pady = 1)\r\n self.coefcolumn7.grid( row = 1, column = 12, padx = 0, pady = 1)\r\n self.coefcolumn8.grid( row = 1, column = 13, padx = 0, pady = 1)\r\n self.coefcolumn9.grid( row = 1, column = 14, padx = 0, pady = 1)\r\n self.rightcolumn.grid( row = 1, column = 15, padx = 0, pady = 1)\r\n\r\n self.matrixlabel.grid( row = 2, column = 1, padx = 2, pady = 4, sticky = 'WE')\r\n self.chemicallabel.grid( row = 2, column = 2, padx = 2, pady = 4, sticky = 'WE')\r\n self.isothermlabel.grid( row = 2, column = 3, padx = 2, pady = 4, sticky = 'WE')\r\n self.kineticlabel.grid( row = 2, column = 4, padx = 2, pady = 4, sticky = 'WE')\r\n\r\n self.matrixwidget.grid( row = 3, column = 1, padx = 2, pady = 1, sticky = 'WE')\r\n self.chemicalwidget.grid(row = 3, column = 2, padx = 2, pady = 1, sticky = 'WE')\r\n self.isothermwidget.grid(row = 3, column = 3, padx = 2, pady = 1, sticky = 'WE')\r\n self.kineticwidget.grid( row = 3, column = 4, padx = 2, pady = 1, sticky = 'WE')\r\n\r\n self.blank1.grid( row = 4)\r\n self.okbutton.grid( row = 5, columnspan = 15)\r\n self.cancelbutton.grid( row = 6, columnspan = 15)\r\n self.blank2.grid( row = 7)\r\n self.okbutton.bind('<Return>', self.OK)\r\n self.focusbutton = self.okbutton\r\n\r\n self.click_isotherm()\r\n self.click_kinetic()", "title": "" }, { "docid": "4044f559a59cbb1332bf202d6f3552ad", "score": "0.53200245", "text": "def addDockContent(self,inputDict):\n for key in inputDict:\n inputDict[key][1].addWidget(inputDict[key][0])", "title": "" }, { "docid": "6a1b81cf9ea188a66d8f4a3cb7a03939", "score": "0.5273105", "text": "def set_layout(self, layouts):\n llist = \",\".join([l[0] for l in layouts])\n vlist = \",\".join([l[1] for l in layouts])\n command = [\"setxkbmap\", \"-layout\", llist, \"-variant\", vlist]\n try:\n subprocess.check_call(command)\n except CalledProcessError as e:\n # invalid layout string\n pass\n except OSError as e:\n # setxkbmap not installed\n pass", "title": "" }, { "docid": "9762a0a50cf58515b5437ed4cd01c60e", "score": "0.52694845", "text": "def _descriptor_layout_(self):\n self.descriptor_lay = QtWidgets.QHBoxLayout()\n self.descriptor_lay.addWidget(self.descriptor_lbl)\n self.descriptor_lay.addWidget(self.descriptor_le)\n self.main_layout.addLayout(self.descriptor_lay)", "title": "" }, { "docid": "de01b1747f47b46b255f1c637480492a", "score": "0.52684486", "text": "def build_layout(self,side_number):\n print \"In build layout for side %d\" % (self.side_number,)\n print \"Building of layouts is implemented by another program,\"\n print \"which may be called here by exec or xmlrpc\"\n print self.image_filename\n print self.landmarks\n # python BallotTemplate.py dpi image_filename \n # can be set up as xmlrpc or just called\n # xmlrpc will send xml to stdout, so let's incorporate that as our\n # way of generating layouts; need to ensure it meets the LayoutSpec.txt\n # we can simplify its job by passing it landmarks, layout_id,\n # brand, and target sizes;\n # we need to rename it to something like GenericBuildLayout.py\n #proxy = xmlrpclib.ServerProxy(\"http://localhost:8000/\",allow_none=True)\n #layout = proxy.get_layout(self.image_filename,self.landmarks)\n\n src_image = self.image.convert(\"L\")\n #Image.open(self.image_filename).convert(\"L\")\n\n tb = TemplateBuilder(src_image,\n self.dpi,\n self.image_filename,\n ulc_x = self.landmarks.ulc.x,\n ulc_y = self.landmarks.ulc.y,\n urc_x = self.landmarks.urc.x,\n urc_y = self.landmarks.urc.y,\n llc_x = self.landmarks.llc.x,\n llc_y = self.landmarks.llc.y,\n lrc_x = self.landmarks.lrc.x,\n lrc_y = self.landmarks.lrc.y,\n layout_id = self.layout_id,\n contest_gap_inches = 0.7,\n min_target_width_inches = 0.16,\n max_target_width_inches = 0.19,\n target_width_inches = 0.25,\n target_height_inches = 0.17,\n check_for_horizontal = False,\n min_target_set_height_inches = 0.25,\n min_contest_height_inches = 0.6,\n ignore_height_inches = 0.8,\n ignore_width_inches = 0.9,\n ignore_right_inches = 0.5,\n diebold = False)\n layout = tb.__repr__() \n return layout, tb.out_image\n\n\n #layout = BallotTemplate(self.dpi,\n # self.image_filename,\n # landmarks = self.landmarks,\n # layout_id = self.layout_id,\n # precinct = 'yabba',\n # vendor = 'ess')\n # the alternative would be to use \n # an ESS specific layout development routine\n # as found in ess_ballot and ess1_ballot (old files)\n #return layout.__repr__()", "title": "" }, { "docid": "b0e79c10aa955abce4c2cb0932f703b3", "score": "0.52670956", "text": "def __layoutDisplayPage(self):\n \n settings = self.KaraokeMgr.SongDB.Settings\n\n panel = wx.Panel(self.notebook)\n dispsizer = wx.BoxSizer(wx.VERTICAL)\n\n self.FSCheckBox = wx.CheckBox(panel, -1, \"Full-screen mode\")\n self.FSCheckBox.SetValue(settings.FullScreen)\n dispsizer.Add(self.FSCheckBox, flag = wx.LEFT | wx.RIGHT | wx.TOP, border = 10)\n gsizer = wx.FlexGridSizer(0, 4, 2, 0)\n text = wx.StaticText(panel, -1, \"Win size:\")\n gsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n self.WinSizeX = wx.TextCtrl(panel, -1, value = str(settings.WinSize[0]))\n gsizer.Add(self.WinSizeX, flag = wx.EXPAND | wx.RIGHT, border = 5)\n self.WinSizeY = wx.TextCtrl(panel, -1, value = str(settings.WinSize[1]))\n gsizer.Add(self.WinSizeY, flag = wx.EXPAND | wx.RIGHT, border = 10)\n gsizer.Add((0, 0))\n\n # Window placement only seems to work reliably on Linux. Only\n # offer it there.\n self.DefaultPosCheckBox = None\n if env == ENV_LINUX:\n text = wx.StaticText(panel, -1, \"Placement:\")\n gsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n pos_x = pos_y = ''\n if settings.WinPos:\n pos_x, pos_y = settings.WinPos\n self.WinPosX = wx.TextCtrl(panel, -1, value = str(pos_x))\n gsizer.Add(self.WinPosX, flag = wx.EXPAND | wx.RIGHT, border = 5)\n self.WinPosY = wx.TextCtrl(panel, -1, value = str(pos_y))\n gsizer.Add(self.WinPosY, flag = wx.EXPAND | wx.RIGHT, border = 10)\n\n self.DefaultPosCheckBox = wx.CheckBox(panel, -1, \"Default placement\")\n self.Bind(wx.EVT_CHECKBOX, self.clickedDefaultPos, self.DefaultPosCheckBox)\n self.DefaultPosCheckBox.SetValue(settings.WinPos is None)\n self.clickedDefaultPos(None)\n \n gsizer.Add(self.DefaultPosCheckBox, flag = wx.EXPAND)\n dispsizer.Add(gsizer, flag = wx.EXPAND | wx.ALL, border = 10)\n\n self.SplitVerticallyCheckBox = wx.CheckBox(panel, -1, \"Split playlist window vertically\")\n self.SplitVerticallyCheckBox.SetValue(settings.SplitVertically)\n dispsizer.Add(self.SplitVerticallyCheckBox, flag = wx.LEFT | wx.RIGHT | wx.TOP, border = 10)\n\n panel.SetSizer(dispsizer)\n self.notebook.AddPage(panel, \"Display\")", "title": "" }, { "docid": "e935289aafe9d93242e465b5817df147", "score": "0.5261793", "text": "def get_layout(sl, sr):", "title": "" }, { "docid": "ecdd662062166e8843953de6c2dd5993", "score": "0.52560365", "text": "def init_ui_layout(self):\n\n # Calls parent init.\n super(SolutionRoot, self).init_ui_layout()\n\n # To override in each specific solution. Prepare layout items with right values.\n\n self.master_node = self.get_node(\"fit\", \"master\")\n self.master_node_shape = self.master_node.listRelatives(shapes=True)[0]\n self.master_node_shape_circle = self.master_node_shape.listConnections(source=True)[0]", "title": "" }, { "docid": "14e8aea87cf44c338a910ec63df1188c", "score": "0.52554375", "text": "def _compute_gridspec(self, layout):\n layout_items = layout.grid_items()\n layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None\n\n layouts = {}\n col_widthratios, row_heightratios = {}, {}\n for (r, c) in self.coords:\n # Get view at layout position and wrap in AdjointLayout\n _, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))\n if isinstance(view, NdLayout):\n raise SkipRendering(\"Cannot render NdLayout nested inside a Layout\")\n layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])\n layouts[(r, c)] = layout_view\n\n # Compute shape of AdjointLayout element\n layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}\n layout_type = layout_lens[len(layout_view)]\n\n # Get aspects\n main = layout_view.main\n main = main.last if isinstance(main, HoloMap) else main\n main_options = self.lookup_options(main, 'plot').options if main else {}\n if main and not isinstance(main_options.get('aspect', 1), str):\n main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)\n main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight\n else:\n main_aspect = np.nan\n\n if layout_type in ['Dual', 'Triple']:\n el = layout_view.get('right', None)\n eltype = type(el)\n if el and eltype in MPLPlot.sideplots:\n plot_type = MPLPlot.sideplots[type(el)]\n ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)\n width_ratios = [4, 4*ratio]\n else:\n width_ratios = [4, 1]\n else:\n width_ratios = [4]\n\n inv_aspect = 1./main_aspect if main_aspect else np.NaN\n if layout_type in ['Embedded Dual', 'Triple']:\n el = layout_view.get('top', None)\n eltype = type(el)\n if el and eltype in MPLPlot.sideplots:\n plot_type = MPLPlot.sideplots[type(el)]\n ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)\n height_ratios = [4*ratio, 4]\n else:\n height_ratios = [1, 4]\n else:\n height_ratios = [4]\n\n if not isinstance(main_aspect, (str, type(None))):\n width_ratios = [wratio * main_aspect for wratio in width_ratios]\n height_ratios = [hratio * inv_aspect for hratio in height_ratios]\n layout_shape = (len(width_ratios), len(height_ratios))\n\n # For each row and column record the width and height ratios\n # of the LayoutPlot with the most horizontal or vertical splits\n # and largest aspect\n prev_heights = row_heightratios.get(r, (0, []))\n if layout_shape[1] > prev_heights[0]:\n row_heightratios[r] = [layout_shape[1], prev_heights[1]]\n row_heightratios[r][1].append(height_ratios)\n\n prev_widths = col_widthratios.get(c, (0, []))\n if layout_shape[0] > prev_widths[0]:\n col_widthratios[c] = (layout_shape[0], prev_widths[1])\n col_widthratios[c][1].append(width_ratios)\n\n\n col_splits = [v[0] for __, v in sorted(col_widthratios.items())]\n row_splits = [v[0] for ___, v in sorted(row_heightratios.items())]\n\n widths = np.array([r for col in col_widthratios.values()\n for ratios in col[1] for r in ratios])/4\n\n wr_unnormalized = compute_ratios(col_widthratios, False)\n hr_list = compute_ratios(row_heightratios)\n wr_list = compute_ratios(col_widthratios)\n\n # Compute the number of rows and cols\n cols, rows = len(wr_list), len(hr_list)\n\n\n wr_list = [r if np.isfinite(r) else 1 for r in wr_list]\n hr_list = [r if np.isfinite(r) else 1 for r in hr_list]\n\n width = sum([r if np.isfinite(r) else 1 for r in wr_list])\n yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])\n if self.absolute_scaling:\n width = width*np.nanmax(widths)\n\n xinches, yinches = None, None\n if not isinstance(self.fig_inches, (tuple, list)):\n xinches = self.fig_inches * width\n yinches = xinches/yscale\n elif self.fig_inches[0] is None:\n xinches = self.fig_inches[1] * yscale\n yinches = self.fig_inches[1]\n elif self.fig_inches[1] is None:\n xinches = self.fig_inches[0]\n yinches = self.fig_inches[0] / yscale\n if xinches and yinches:\n self.handles['fig'].set_size_inches([xinches, yinches])\n\n self.gs = gridspec.GridSpec(rows, cols,\n width_ratios=wr_list,\n height_ratios=hr_list,\n wspace=self.hspace,\n hspace=self.vspace)\n\n # Explicitly clear Matplotlib figures to avoid\n # \"Auto-removal of overlapping axes\" warning.\n self.handles['fig'].clf()\n\n # Situate all the Layouts in the grid and compute the gridspec\n # indices for all the axes required by each LayoutPlot.\n gidx = 0\n layout_count = 0\n tight = self.tight\n collapsed_layout = layout.clone(shared_data=False, id=layout.id)\n frame_ranges = self.compute_ranges(layout, None, None)\n keys = self.keys[:1] if self.dynamic else self.keys\n frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))\n for key in keys])\n layout_subplots, layout_axes = {}, {}\n for r, c in self.coords:\n # Compute the layout type from shape\n wsplits = col_splits[c]\n hsplits = row_splits[r]\n if (wsplits, hsplits) == (1,1):\n layout_type = 'Single'\n elif (wsplits, hsplits) == (2,1):\n layout_type = 'Dual'\n elif (wsplits, hsplits) == (1,2):\n layout_type = 'Embedded Dual'\n elif (wsplits, hsplits) == (2,2):\n layout_type = 'Triple'\n\n # Get the AdjoinLayout at the specified coordinate\n view = layouts[(r, c)]\n positions = AdjointLayoutPlot.layout_dict[layout_type]\n\n # Create temporary subplots to get projections types\n # to create the correct subaxes for all plots in the layout\n _, _, projs = self._create_subplots(layouts[(r, c)], positions,\n None, frame_ranges, create=False)\n gidx, gsinds = self.grid_situate(gidx, layout_type, cols)\n\n layout_key, _ = layout_items.get((r, c), (None, None))\n if isinstance(layout, NdLayout) and layout_key:\n layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))\n\n # Generate the axes and create the subplots with the appropriate\n # axis objects, handling any Empty objects.\n obj = layouts[(r, c)]\n empty = isinstance(obj.main, Empty)\n if view.main is None:\n continue\n elif empty:\n obj = AdjointLayout([])\n elif not view.traverse(lambda x: x, [Element]):\n self.param.warning(f'{obj.main} is empty, skipping subplot.')\n continue\n elif self.transpose:\n layout_count = (c*self.rows+(r+1))\n else:\n layout_count += 1\n subaxes = [plt.subplot(self.gs[ind], projection=proj)\n for ind, proj in zip(gsinds, projs)]\n subplot_data = self._create_subplots(obj, positions,\n layout_dimensions, frame_ranges,\n dict(zip(positions, subaxes)),\n num=0 if empty else layout_count)\n subplots, adjoint_layout, _ = subplot_data\n layout_axes[(r, c)] = subaxes\n\n # Generate the AdjointLayoutsPlot which will coordinate\n # plotting of AdjointLayouts in the larger grid\n plotopts = self.lookup_options(view, 'plot').options\n layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,\n fig=self.handles['fig'], **plotopts)\n layout_subplots[(r, c)] = layout_plot\n tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight\n if layout_key:\n collapsed_layout[layout_key] = adjoint_layout\n\n # Apply tight layout if enabled and incompatible\n # GridPlot isn't present.\n if tight:\n if isinstance(self.tight_padding, (tuple, list)):\n wpad, hpad = self.tight_padding\n padding = dict(w_pad=wpad, h_pad=hpad)\n else:\n padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)\n self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)\n\n return layout_subplots, layout_axes, collapsed_layout", "title": "" }, { "docid": "84004411e04498f594ad02f5187949d3", "score": "0.5247371", "text": "def create_buttons(self):\n\t\tself.buttons = {}\n\t\tbuttons_layout = qtw.QGridLayout()\n\t\tbuttons = {\n\t\t\t'^':(0,0),\n\t\t\t'(':(0,1),\n\t\t\t')':(0,2),\n\t\t\t'/':(0,3),\n\t\t\t'7':(1,0),\n\t\t\t'8':(1,1),\n\t\t\t'9':(1,2),\n\t\t\t'*':(1,3),\n\t\t\t'4':(2,0),\n\t\t\t'5':(2,1),\n\t\t\t'6':(2,2),\n\t\t\t'-':(2,3),\n\t\t\t'1':(3,0),\n\t\t\t'2':(3,1),\n\t\t\t'3':(3,2),\n\t\t\t'+':(3,3),\n\t\t\t'0':(4,0),\n\t\t\t'.':(4,1),\n\t\t\t'C':(4,2),\n\t\t\t'=':(4,3),\n\t\t\t'del':(4,4),\n\t\t}\n\t\tfor btntext, pos in buttons.items():\n\t\t\t\n\t\t\tself.buttons[btntext] = qtw.QPushButton(btntext)\n\t\t\t#Connecting signals to the appropriate slots\n\t\t\tif btntext in self.values or btntext in self.possible_operations:\n\t\t\t\t\"\"\"Here we use partial (from functools) to pass an argument to the slot. Notice that you cannot use a lambda function, as it will pass the current value of btntext, \n\t\t\t\twhich after the buttons have been setup is always '=' (ie the key of the last button defined)\"\"\"\n\t\t\t\tself.buttons[btntext].clicked.connect(partial(self.prep_operation, btntext))\n\n\t\t\telif btntext == 'C':\n\t\t\t\t self.buttons[btntext].clicked.connect(self.clear_screen)\n\n\t\t\telif btntext == '=':\n\t\t\t\tself.buttons[btntext].clicked.connect(self.do_operation)\n\n\t\t\telif btntext == 'del':\n\t\t\t\tself.buttons[btntext].clicked.connect(self.del_value)\n\n\t\t\t#sizing the various buttons\n\t\t\tself.buttons[btntext].setSizePolicy(qtw.QSizePolicy.Expanding, qtw.QSizePolicy.Expanding)\n\t\t\tbuttons_layout.addWidget(self.buttons[btntext], pos[0], pos[1])\n\n\t\tself.main_layout.addLayout(buttons_layout)", "title": "" }, { "docid": "d17892fad67721e6915b8506b9953dc7", "score": "0.524577", "text": "def create_layout(self):\n\n # grid is the layout of the app\n grid = QGridLayout()\n\n # results is the input/text/display box at the top of the app\n results = QLineEdit()\n\n # the buttons themselves\n button_labels = ['AC', 'DEL', 'โˆš', '/',\n 7, 8, 9, \"*\",\n 4, 5, 6, \"-\",\n 1, 2, 3, \"+\",\n 0, \".\", \"=\"]\n row = 1\n col = 0\n grid.addWidget(results, 0, 0, 1, 4)\n for button in button_labels:\n if col > 3:\n col = 0\n row += 1\n button_object = buttons.Button(button, results)\n if button == 0:\n grid.addWidget(button_object.b, row, col, 1, 2)\n col += 1\n else:\n grid.addWidget(button_object.b, row, col, 1, 1)\n col += 1\n\n self.grid = grid", "title": "" }, { "docid": "3eb511418967832ca91b33b930f88137", "score": "0.52220273", "text": "def init_ui_layout(self):\n\n # Calls parent init.\n super(SolutionJointChainFK, self).init_ui_layout()\n\n self.master_node = self.get_node(\"fit\", \"master\")\n self.master_node_shape = self.master_node.listRelatives(shapes=True)[0]\n self.master_node_shape_circle = self.master_node_shape.listConnections(source=True)[0]\n\n # To override in each specific solution. Prepare layout items with right values.\n self.dsp_len = ui.get_child(self.ui_widget, \"dsp_len\")\n self.sp_segments = ui.get_child(self.ui_widget, \"sp_segments\")\n self.pb_apply = ui.get_child(self.ui_widget, \"pb_apply\")\n\n # Update parameters.\n if self.is_goal_built(\"fit\"):\n last_core_node = self.get_node(\"fit\", \"core\", \"^last$\")\n last_core_zt_node = self.get_node(\"fit\", \"core\", \"^lastZT$\")\n if last_core_node and last_core_zt_node:\n self.dsp_len.setValue(last_core_node.attr(\"translateY\").get() + last_core_zt_node.attr(\"translateY\").get())\n\n segments = self.get_nodes(\"fit\", \"core\", \"segment[0-9]{3,3}$\")\n if segments:\n self.sp_segments.setValue(len(segments) - 1)", "title": "" }, { "docid": "9defe2b383cd2b05a120cfd11a85ae47", "score": "0.521316", "text": "def graph_layout(self, alg_name):\r\n # create the figure, axes etc... (set up the graph)\r\n fig = plt.Figure(figsize=(15, 8), dpi=100, facecolor=\"#8ecae6\")\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.tick_params(axis=\"x\", colors=\"white\") # change the tick colors to white on both the x and the y axis\r\n ax.tick_params(axis=\"y\", colors=\"white\")\r\n # create the embedded canvas object and an operation counter\r\n self.graph_canvas = FigureCanvasTkAgg(fig, master=self.root)\r\n self.operation_var = tkinter.StringVar()\r\n self.operation_var.set(f\"Operations: 0\")\r\n op_counter = tkinter.Label(self.root, textvariable=self.operation_var, font=(\"Aria;\", 18, \"bold\"), bg=\"#8ecae6\")\r\n # create a 'header' using a canvas for the round edges...\r\n header_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0, height=90)\r\n round_rectangle(0, 0, 1100, 60, header_canvas, fill=\"#a2d2ff\")\r\n header = tkinter.Label(self.root, text=f\"{alg_name.capitalize()} performed on an array of {self.array_size} elements\", font=(\"Aria;\", 28, \"bold\"), bg=\"#a2d2ff\", justify=\"center\", width=47)\r\n # use a canvas as a menu button\r\n menu_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0)\r\n round_rectangle(0, 0, 170, 50, menu_canvas, fill=\"#a2d2ff\")\r\n menu_canvas.create_text(85, 25, fill=\"black\", font=(\"Aria;\", 20, \"bold\"), text=\"Menu\")\r\n menu_canvas.bind(\"<ButtonRelease-1>\", lambda e: self.main_screen())\r\n # put everything on the screen\r\n header_canvas.pack(fill=tkinter.BOTH, padx=(150, 0), pady=(60, 0))\r\n header.place(x=155, y=68)\r\n menu_canvas.place(x=1200, y=825, w=200, h=50)\r\n self.graph_canvas.get_tk_widget().place(x=-67, y=60)\r\n op_counter.place(x=120, y=820)\r\n\r\n return fig, ax", "title": "" }, { "docid": "0d49fdb744f4190faa36792a99966da1", "score": "0.52028245", "text": "def keypadBinding(self):\n self.view.rb1.configure(command=lambda: self.buttonInput(self.view.ml4.cget(\"text\")))\n self.view.rb2.configure(command=lambda: self.buttonInput(self.view.ml8.cget(\"text\")))\n self.view.rb3.configure(command=lambda: self.buttonInput(self.view.ml12.cget(\"text\")))\n\n self.view.lb1.configure(command=lambda: self.buttonInput(self.view.ml1.cget(\"text\")))\n self.view.lb2.configure(command=lambda: self.buttonInput(self.view.ml5.cget(\"text\")))\n self.view.lb3.configure(command=self.cancelSession)\n\n self.view.npb1.configure(command=lambda: self.keypadEntry(1))\n self.view.npb2.configure(command=lambda: self.keypadEntry(2))\n self.view.npb3.configure(command=lambda: self.keypadEntry(3))\n self.view.npb4.configure(command=lambda: self.keypadEntry(4))\n self.view.npb5.configure(command=lambda: self.keypadEntry(5))\n self.view.npb6.configure(command=lambda: self.keypadEntry(6))\n self.view.npb7.configure(command=lambda: self.keypadEntry(7))\n self.view.npb8.configure(command=lambda: self.keypadEntry(8))\n self.view.npb9.configure(command=lambda: self.keypadEntry(9))\n self.view.npb10.configure(command=lambda: self.input_command(\"DEL\"))\n self.view.npb11.configure(command=lambda: self.keypadEntry(0))\n self.view.npb12.configure(command=lambda: self.input_command(\"OK\"))", "title": "" }, { "docid": "09f829f56806fb7b54ab5073c3ab31a6", "score": "0.51906365", "text": "def produce(self, layout, layers, parameters, cell):\n self._layers = layers\n self.cell = cell\n self._param_values = parameters\n self.layout = layout\n\n\n # cell: layout cell to place the layout\n # LayerSiN: which layer to use\n # r: radius\n # w: waveguide width\n # start_angle: starting angle of the arc\n # stop_agnle: stopping angle of the arc\n # length units in dbu\n import math\n from math import pi, cos, sin\n from SiEPIC.utils import arc_wg\n \n # fetch the parameters\n dbu = self.layout.dbu\n ly = self.layout\n \n LayerSi = self.silayer\n LayerSiN = self.silayer_layer\n# LayerSiN = ly.layer(LayerSi)\n LayerPinRecN = ly.layer(self.pinrec)\n LayerDevRecN = ly.layer(self.devrec)\n \n from SiEPIC.extend import to_itype\n w = to_itype(self.wg_width,dbu)\n r = to_itype(self.radius,dbu)\n start_angle = self.start_angle\n stop_angle = self.stop_angle\n \n if start_angle > stop_angle:\n start_angle = self.stop_angle\n stop_angle = self.start_angle\n \n deg_to_rad = math.pi / 180.0\n \n \n # draw the arc\n x = 0\n y = 0\n \n \n from SiEPIC._globals import PIN_LENGTH as pin_length\n \n self.cell.shapes(LayerSiN).insert(arc_wg(r, w, start_angle, stop_angle))\n # Create the pins, as short paths:\n\n # Pin on the right side:\n x = r*math.cos( start_angle * deg_to_rad ) \n y = r*math.sin( start_angle * deg_to_rad )\n \n x_pin = math.cos( (90 - start_angle)*deg_to_rad ) *pin_length/2\n y_pin = math.sin( (90 - start_angle)*deg_to_rad ) *pin_length/2\n \n p2 = [Point(x-x_pin, y+y_pin), Point(x+x_pin, y-y_pin)]\n p2c = Point(x, y)\n self.set_p2 = p2c\n self.p2 = p2c\n pin = Path(p2, w)\n self.cell.shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, x, y)\n text = Text (\"pin2\", t)\n shape = self.cell.shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n\n\n # Pin on the left side:\n x = round(r*math.cos( stop_angle * deg_to_rad ))\n y = round(r*math.sin( stop_angle * deg_to_rad ))\n \n x_pin = math.cos( (90.0 - stop_angle)*deg_to_rad ) *pin_length/2\n y_pin = math.sin( (90.0 - stop_angle)*deg_to_rad ) *pin_length/2\n \n p1 = [Point(x+x_pin, y-y_pin), Point(x-x_pin, y+y_pin)]\n p1c = Point(x,y)\n self.set_p1 = p1c\n self.p1 = p1c\n pin = Path(p1, w)\n self.cell.shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, x, y)\n text = Text (\"pin1\", t)\n shape = self.cell.shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n\n \n # Create the device recognition layer -- make it 1 * wg_width away from the waveguides.\n x = 0\n y = 0\n #layout_arc_wg_dbu(self.cell, LayerDevRecN, x, y, r, w*3, start_angle, stop_angle)\n self.cell.shapes(LayerDevRecN).insert(arc_wg(r, w*3, start_angle, stop_angle))", "title": "" }, { "docid": "0184458e7e71ed8eb7d929607741562f", "score": "0.5187572", "text": "def _populate_control(self):\n self.mainwidget = ControlPanel()\n self.mainwidget.setWindowTitle(\"Mosca Control\")\n\n # command widgets to be populated\n self.tools = QtGui.QHBoxLayout()\n self.viewbutton = QtGui.QPushButton(TOGGLE_ACQ_VIEW)\n self.recordbutton = QtGui.QPushButton(TOGGLE_ACQ_REC)\n self.oscillobutton = QtGui.QPushButton(TOGGLE_OSCILLO)\n self.viewbutton.clicked.connect(self.toggle_viewing)\n self.recordbutton.clicked.connect(self.toggle_recording)\n self.oscillobutton.setCheckable(True)\n self.oscillobutton.setEnabled(False)\n self.oscillobutton.toggled.connect(self.toggle_oscillo)\n\n self.tools.addWidget(self.oscillobutton)\n self.tools.addStretch(1)\n self.tools.addWidget(self.viewbutton)\n self.tools.addWidget(self.recordbutton)\n\n # layout components\n self._layout = QtGui.QGridLayout()\n self.mainwidget.setLayout(self._layout)\n self.device = DriverPanel(DeviceManager, \"Device\", \"DAQ selection\")\n self.storage = DriverPanel(StorageManager, \"Storage\", \"I/O selection\")\n self.AI = ChannelPanel(\"Analog Inputs\")\n self.AI.channelsLoaded.connect(self._update_with_channels)\n self._layout.addWidget(self.device, 0, 0, 1, 1) # row: 0, col: 0\n self._layout.addWidget(self.AI, 0, 1, 1, 1) # row: 0, col: 1\n self._layout.addWidget(self.storage, 0, 2, 1, 1) # row: 0, col: 2\n self._layout.addLayout(self.tools, 1, 0, 1, 3) # row: 3, col: 0-2\n self._layout.setColumnStretch(0, 1)\n self._layout.setColumnStretch(1, 3)\n self._layout.setColumnStretch(2, 1)\n self._layout.setRowStretch(0, 4)\n self._layout.setRowStretch(1, 3)\n self.mainwidget.resize(1250,250)\n self.mainwidget.move(40,40)", "title": "" }, { "docid": "3ebadc46cffa4867188a0ea9c6043653", "score": "0.5186459", "text": "def parameters_ui(layout, params):\n \n r = layout.row()\n r.prop(params, \"tweak_extra_layers\")\n r.active = params.tweak_extra_layers\n \n col = r.column(align=True)\n row = col.row(align=True)\n row.prop(params, \"tweak_layers\", index=0, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=1, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=2, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=3, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=4, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=5, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=6, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=7, toggle=True, text=\"\")\n row = col.row(align=True)\n row.prop(params, \"tweak_layers\", index=16, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=17, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=18, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=19, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=20, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=21, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=22, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=23, toggle=True, text=\"\")\n \n col = r.column(align=True)\n row = col.row(align=True)\n row.prop(params, \"tweak_layers\", index=8, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=9, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=10, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=11, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=12, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=13, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=14, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=15, toggle=True, text=\"\")\n row = col.row(align=True)\n row.prop(params, \"tweak_layers\", index=24, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=25, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=26, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=27, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=28, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=29, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=30, toggle=True, text=\"\")\n row.prop(params, \"tweak_layers\", index=31, toggle=True, text=\"\")", "title": "" }, { "docid": "58cdda090dc3705be5d6aae49e5b6f4a", "score": "0.51820946", "text": "def generate_base_layout_tests ():\n\n blacklist = [\n 'dz', # <SPCE> is of type EIGHT_LEVEL but the keymap never assigns ISO_Level5_Shift so levels 5,6,7,8 are broken.\n 'mv', # Maps 2 real modifiers to <MDSW>.\n 'nec_vndr/jp' # Maps 2 real modifiers to <RALT>, also has 2 groups which we currently don't support.\n ]\n\n layout_names = ex (\"./bin/keyboard-layout-editor --list-default\", ret_stdout=True).split('\\n')\n for name in layout_names:\n if name in blacklist:\n continue\n\n target_fname = './tests/XKeyboardConfig/' + name + '.xkb'\n\n # Getting this information makes everything O(n^2) because the\n # implementation of --show-info looks up the information of all layouts\n # and from that list it linearly searches for the passed layout name.\n # We could implement this whole test layout generation in C the 'right\n # way' if we ever implement support for include statements in our\n # parser. Then we wouldn't need to use the get_xkb_str.sh script. For\n # now I don't care that much because this stuff won't be called at\n # runtime.\n info_lines = ex (\"./bin/keyboard-layout-editor --show-info \" + name, ret_stdout=True).split('\\n')\n for idx, line in enumerate(info_lines):\n if idx == 0:\n ex (\"echo '// \" + line + \"' > \" + target_fname)\n else:\n ex (\"echo '// \" + line + \"' >> \" + target_fname)\n\n ex (\"echo\" + \" >> \" + target_fname)\n ex ('./tests/get_xkb_str.sh ' + name + ' >> ' + target_fname)", "title": "" }, { "docid": "e14fb4ae20a9710edbb94bab11676273", "score": "0.51657337", "text": "def display_layout(self):\n for square in self.__slots__:\n if square[0] == 'H':\n print(self[square])\n else:\n print(self[square], end=\"\")\n print(Style.RESET_ALL)", "title": "" }, { "docid": "0c38f3224475f5a76355aed9afb33895", "score": "0.5153351", "text": "def add_layout_pins(self):\n\n # Add power and ground to all the cells except:\n # the fanout driver, the right-most load\n # The routing to connect the loads is over the first and last cells\n # We have an even number of drivers and must only do every other\n # supply rail\n for i in range(0,len(self.driver_inst_list),2):\n inv = self.driver_inst_list[i]\n for load in self.load_inst_map[inv]:\n if load==self.rightest_load_inst[inv]:\n continue\n for pin_name in [\"vdd\", \"gnd\"]:\n pin = load.get_pin(pin_name)\n self.add_power_pin(pin_name, pin.rc())\n else:\n # We have an even number of rows, so need to get the last gnd rail\n inv = self.driver_inst_list[-1]\n for load in self.load_inst_map[inv]:\n if load==self.rightest_load_inst[inv]:\n continue\n pin_name = \"gnd\"\n pin = load.get_pin(pin_name)\n self.add_power_pin(pin_name, pin.rc())\n\n\n # input is A pin of first inverter\n a_pin = self.driver_inst_list[0].get_pin(\"A\")\n self.add_via_center(layers=self.m1_stack,\n offset=a_pin.center())\n self.add_layout_pin(text=\"in\",\n layer=\"m2\",\n offset=a_pin.ll().scale(1,0),\n height=a_pin.cy())\n \n\n # output is A pin of last load inverter\n last_driver_inst = self.driver_inst_list[-1]\n a_pin = self.rightest_load_inst[last_driver_inst].get_pin(\"A\")\n self.add_via_center(layers=self.m1_stack,\n offset=a_pin.center())\n mid_point = vector(a_pin.cx()+3*self.m2_width,a_pin.cy())\n self.add_path(\"m2\",[a_pin.center(), mid_point, mid_point.scale(1,0)])\n self.add_layout_pin_segment_center(text=\"out\",\n layer=\"m2\",\n start=mid_point,\n end=mid_point.scale(1,0))", "title": "" }, { "docid": "753e98a8f03b8c037489c0210416a71a", "score": "0.5140268", "text": "def symbol_layout(symbol_name):\n # list to hold layouts for each section\n layouts = []\n\n symbol = Registry(\"symbols\")[symbol_name]\n\n main_name = symbol.display_names[0]\n\n layouts.append(html.H6('Graph'))\n # TODO: costly, should just construct subgraph directly?\n subgraph = nx.ego_graph(propnet_nx_graph, symbol, undirected=True, radius=2)\n subgraph_data = graph_conversion(subgraph,\n show_model_labels=True, show_symbol_labels=True)\n\n if len(subgraph_data) < 50:\n graph_config = GRAPH_LAYOUT_CONFIG.copy()\n graph_config['maxSimulationTime'] = 1500\n else:\n graph_config = GRAPH_LAYOUT_CONFIG\n\n layouts.append(html.Div(\n Cytoscape(\n id=\"model_graph\",\n elements=subgraph_data,\n stylesheet=GRAPH_STYLESHEET,\n layout=graph_config,\n **GRAPH_SETTINGS['model_symbol_view']\n )\n ))\n\n if len(symbol.display_names) > 1:\n display_names = \", \".join(symbol.display_names[1:])\n other_names = dcc.Markdown(\"Also known as: {}\".format(display_names))\n layouts.append(other_names)\n\n if len(symbol.display_symbols) > 1:\n symbols = \" \".join(symbol.display_symbols)\n symbols = dcc.Markdown(\"Common symbols: {}\".format(symbols))\n layouts.append(symbols)\n\n if symbol.category in ('property', 'condition'):\n units = dcc.Markdown(\"Canonical units: **{}**\".format(symbol.unit_as_string))\n dimension = dcc.Markdown(\"**{}**\".format(symbol.dimension_as_string))\n layouts.append(units)\n layouts.append(dimension)\n\n if symbol.comment:\n layouts.append(dcc.Markdown(symbol.comment))\n\n return html.Div([\n main_name,\n html.Br(),\n html.Div(layouts),\n html.Br(),\n #dcc.Link('< Back to Properties', href='/property'),\n #html.Br(),\n dcc.Link('< Back', href='/explore')\n ])", "title": "" }, { "docid": "f347faded92a000aa1ddca8d9161df40", "score": "0.5138637", "text": "def create_layout_stuff(self):\n if self.layout_parent is None or not self.layout_created:\n self.layout_parent = Qt.QWidget(self.wparent())\n self.layout = Qt.QGridLayout(self.layout_parent)\n self.set_widgets(self.layout_parent,self.dataitem.caption,icon=self.icon())\n self.layout_created = True\n self._wtop = self.layout_parent;", "title": "" }, { "docid": "9896ee2ea4a1ea648405044084177170", "score": "0.5116098", "text": "def layout_parts(self):\n grid = QGridLayout()\n grid.addWidget(self.from_label, 0, 0)\n grid.addWidget(self.from_currency, 0, 1)\n grid.addWidget(self.amount_label, 0, 2)\n grid.addWidget(self.amount, 0, 3)\n grid.addWidget(self.to_label, 1, 0)\n grid.addWidget(self.to_currency, 1, 1)\n grid.addWidget(self.converted_label, 1, 2)\n grid.addWidget(self.amount_converted, 1, 3)\n grid.addWidget(self.from_date, 3, 0, 1, 2)\n grid.addWidget(self.to_date, 3, 2, 1, 2)\n return grid", "title": "" }, { "docid": "3e4b5d06ebbcc2a92c8eaf25888c4040", "score": "0.5113629", "text": "def sectionSelect():\r\n self.MyInput = buttonValue.get() # gets the value of the radiobutton that the user selected.\r\n if self.MyInput == 1: # if it was the first radio button, add widgets for part (a)\r\n self.textLabel1.place(x=120, y=40) #placing the text label on the window at the specified x and y pixel coordinates\r\n self.textInput1.place(x=180, y=60)\r\n self.input1error.place(x=220, y=60)\r\n self.textLabel2.place(x=60, y=85)\r\n self.textInput2.place(x=80, y=140)\r\n self.input2error.place(x=325, y=140)\r\n self.button1.place(x=175, y=160) \r\n self.output1.place(x=200-self.output1.winfo_width()/2, y=190) # for the x coordinate we half the width of the label widget and half the width of the window and subtract the two so that the label is exacly central.\r\n \r\n self.textLabel3.place_forget() # incase these widgets are currently being displayed on the window we want to remove them\r\n self.textInput3.place_forget()\r\n self.button2.place_forget()\r\n self.output2.place_forget()\r\n self.openGraphButton.place_forget()\r\n \r\n self.textLabel4.place_forget()\r\n self.textInput4.place_forget()\r\n self.button3.place_forget()\r\n self.output3.place_forget()\r\n \r\n elif self.MyInput == 2: #if it was the second radio button, add widgets for part (d)\r\n self.textLabel1.place_forget()\r\n self.textInput1.place_forget()\r\n self.input1error.place_forget()\r\n self.textLabel2.place_forget()\r\n self.textInput2.place_forget()\r\n self.input2error.place_forget()\r\n self.button1.place_forget()\r\n self.output1.place_forget()\r\n \r\n self.textLabel3.place(x=120, y=40)\r\n self.textInput3.place(x=180, y=60)\r\n self.input3error.place(x=220, y=60)\r\n self.button2.place(x=175, y=85)\r\n self.output2.place(x=200-self.output2.winfo_width()/2, y=115)\r\n if self.graphGenerated == True: # if a graph has been generated before then the button to display the graph can be shown\r\n self.openGraphButton.place(x=180,y=160)\r\n else: # otherwise it can't be displayed\r\n self.openGraphButton.place_forget()\r\n \r\n self.textLabel4.place_forget()\r\n self.textInput4.place_forget()\r\n self.input4error.place_forget()\r\n self.button3.place_forget()\r\n self.output3.place_forget()\r\n elif self.MyInput == 3: # if it was the third radio button add widgets for part (f)\r\n self.textLabel1.place_forget()\r\n self.textInput1.place_forget()\r\n self.input1error.place_forget()\r\n self.textLabel2.place_forget()\r\n self.textInput2.place_forget()\r\n self.input2error.place_forget()\r\n self.button1.place_forget()\r\n self.output1.place_forget()\r\n \r\n self.textLabel3.place_forget()\r\n self.textInput3.place_forget()\r\n self.input3error.place_forget()\r\n self.button2.place_forget()\r\n self.output2.place_forget()\r\n self.openGraphButton.place_forget()\r\n \r\n self.textLabel4.place(x=120, y=40)\r\n self.textInput4.place(x=180, y=60)\r\n self.input4error.place(x=220, y=60)\r\n self.button3.place(x=175, y=85)\r\n self.output3.place(x=200-self.output3.winfo_width()/2, y=115)", "title": "" }, { "docid": "ec15ddc5a5a4ba8e04dfbd8bf66c67dc", "score": "0.51125133", "text": "def generateLayout(self):\n\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n\n ########## Render the elements\n\n heading = wx.StaticText(self, label=\"Output directory\", size=(200, -1))\n\n self.OutputDirBtn = wx.Button(self, label='Browse...')\n self.OutputDirTxt = wx.TextCtrl(self, value='Select output directory',style=wx.TE_READONLY)\n self.generate = wx.Button(self, label='Generate Layout')\n\n self.OutputDirBtn.Bind(wx.EVT_BUTTON, self.selectOutputDirectory, id=self.OutputDirBtn.GetId())\n self.generate.Bind(wx.EVT_BUTTON, self.OnGenerate, id=self.generate.GetId())\n\n hbox.Add(self.OutputDirBtn, flag=wx.LEFT | wx.EXPAND, border=10)\n hbox.Add(self.OutputDirTxt, proportion=1,flag=wx.LEFT | wx.EXPAND, border=10)\n self.vbox.Add(heading,flag=wx.ALL | wx.EXPAND, border=10)\n self.vbox.Add(hbox,flag=wx.ALL | wx.EXPAND, border=10)\n\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\n hbox2.Add(self.generate,proportion=1,flag=wx.LEFT | wx.EXPAND, border=10)\n self.vbox.Add(hbox2,flag=wx.ALL | wx.EXPAND, border=10)\n\n self.vbox.Add((-1, 10))", "title": "" }, { "docid": "a47dc198a30c3372bab0399dcf99c96f", "score": "0.51088333", "text": "def route_control_lines(self):\n # 5 = clk, 4 = tri_en_bar, 3 = tri_en, 2 = clk_bar, 1 = w_en, 0 = s_en\n\n self.clk_position = [self.central_line_xoffset[5], 0]\n self.tri_en_bar_position = [self.central_line_xoffset[4], 0]\n self.tri_en_position = [self.central_line_xoffset[3], 0]\n self.clk_bar_position = [self.central_line_xoffset[2], 0]\n self.w_en_position = [self.central_line_xoffset[1], 0]\n self.s_en_position = [self.central_line_xoffset[0], 0]\n\n right_hand_mapping = [2, 4, 3, 2, 1, 0]\n\n right_side = []\n right_side.append(self.ms_flop_data_in_offset\n + self.msf_data_in.clk_positions[0]\n - vector(0, 0.5 * drc[\"minwidth_metal1\"]))\n right_side.append(self.tri_gate_array_offset\n + vector(1,-1).scale(self.tri_gate_chars[\"en_bar\"])\n - vector(0, 0.5 * drc[\"minwidth_metal1\"]))\n right_side.append(self.tri_gate_array_offset\n + vector(1,-1).scale(self.tri_gate_chars[\"en\"])\n - vector(0, 0.5 * drc[\"minwidth_metal1\"]))\n right_side.append(self.precharge_array_position\n + self.precharge_array.pclk_position)\n right_side.append(self.write_driver_array_position\n + self.write_driver_array.wen_positions[0])\n right_side.append(self.sens_amp_array_position \n + self.sens_amp_array.SCLK_positions[0])\n \n \"\"\" Routing control signals through the central bus.\n Connection of control signal input to the central bus is in metal1\n Connection from the central bus to the main control block crosses\n pre-decoder and this connections are in metal3\"\"\"\n\n control_line_offsets = []\n \"\"\" Connecting right hand side [sense amp. write_driver , tri state\n gates, ffs] to the central bus\"\"\" \n \n for i in range(len(right_side)):\n bus_line_index = right_hand_mapping[i]\n\n x_offset = self.central_line_xoffset[bus_line_index]\n y_offset = self.tri_gate_array_offset.y\n height = self.central_line_y_offset - y_offset\n right_side_connection_width = right_side[i].x - self.central_line_xoffset[bus_line_index]\n right_side_contact_offset = [self.central_line_xoffset[bus_line_index],\n right_side[i].y]\n self.add_rect(layer=\"metal1\", \n offset=right_side_contact_offset,\n width=right_side_connection_width, \n height=drc[\"minwidth_metal1\"])\n self.add_via(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=right_side_contact_offset)\n\n if(right_side[i].y > 0):\n self.add_rect(layer=\"metal2\", \n offset=[self.central_line_xoffset[bus_line_index], 0], \n width=drc[\"minwidth_metal2\"], \n height=right_side[i].y + 2*drc[\"minwidth_metal2\"])\n\n \"\"\" CLK connection from central bus to MSF address\n should we move this somewhere else hard to find when modify\"\"\" \n msf_address_clk_position = (self.msf_address_offset \n + self.msf_address.clk_positions[0].rotate_scale(1,-1) \n + vector(- 0.5 * drc[\"minwidth_metal1\"], \n 2 * drc[\"minwidth_metal2\"]))\n clk_connection_position = (self.msf_address_offset \n + vector(self.msf_address.clk_positions[0].y, \n 2 * drc[\"minwidth_metal3\"]))\n\n connection_width = self.central_line_xoffset[5] - clk_connection_position.x\n self.add_via(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=msf_address_clk_position, \n mirror=\"R90\")\n self.add_via(layers=(\"metal2\", \"via2\", \"metal3\"),\n offset=msf_address_clk_position, \n mirror=\"R90\")\n\n mid_base = vector(msf_address_clk_position.x, clk_connection_position.y)\n mid1 = mid_base + vector(0, 0.5 * drc[\"minwidth_metal3\"])\n mid2 = (mid_base + vector([0.5 * drc[\"minwidth_metal3\"]] * 2) \n + vector(connection_width, 0)) \n self.add_path(layer=\"metal3\",\n coordinates=[msf_address_clk_position,mid1,mid2], \n width=drc[\"minwidth_metal3\"])\n \n self.add_via(layers=(\"metal2\", \"via2\", \"metal3\"),\n offset=[self.central_line_xoffset[5], \n clk_connection_position.y])\n\n # Clk connection from central Bus to wordline_driver\n wl_clk_position = (self.wordline_driver_position \n + self.wordline_driver.clk_positions[0])\n connection_width = (self.central_line_xoffset[5] - wl_clk_position.x\n + drc[\"minwidth_metal1\"])\n y_off = self.max_point - 2.5 * drc[\"minwidth_metal1\"]\n start = wl_clk_position + vector(0.5 * drc[\"minwidth_metal1\"], 0)\n mid1 = [wl_clk_position.x, y_off]\n mid2 = mid1 + vector(connection_width, 0)\n self.add_path(layer=\"metal1\",\n coordinates=[wl_clk_position, mid1, mid2], \n width=drc[\"minwidth_metal1\"],\n offset=start)\n \n self.add_via(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=[self.central_line_xoffset[5], \n self.max_point - 3*drc[\"minwidth_metal1\"]])", "title": "" }, { "docid": "fa98c07160c1c3ae588c4bf636de6f4c", "score": "0.5106492", "text": "def __init__(self, layout=None):\n # Constants\n self._ROWS = (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\")\n self._COLUMNS = (\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\")\n self._BLUE_PALACE_ROW_MIN = 7\n self._BLUE_PALACE_ROW_MAX = 9\n self._BLUE_PALACE_COLUMN_MIN = 3\n self._BLUE_PALACE_COLUMN_MAX = 5\n self._RED_PALACE_ROW_MIN = 0\n self._RED_PALACE_ROW_MAX = 2\n self._RED_PALACE_COLUMN_MIN = 3\n self._RED_PALACE_COLUMN_MAX = 5\n self._ROWS_MIN = 0\n self._ROWS_MAX = len(self._ROWS)\n self._COLUMNS_MIN = 0\n self._COLUMNS_MAX = len(self._COLUMNS)\n self._BLUE_PIECES = {BlueGeneral, BlueGuard, BlueChariot, BlueElephant, BlueHorse, BlueCannon, BlueSoldier}\n self._RED_PIECES = {RedGeneral, RedGuard, RedChariot, RedElephant, RedHorse, RedCannon, RedSoldier}\n\n self._moves = list()\n self._layout_copies = list()\n self._layout = {\n (0, 0): RedChariot(),\n (0, 1): RedElephant(),\n (0, 2): RedHorse(),\n (0, 3): RedGuard(),\n (0, 4): None,\n (0, 5): RedGuard(),\n (0, 6): RedElephant(),\n (0, 7): RedHorse(),\n (0, 8): RedChariot(),\n (1, 0): None,\n (1, 1): None,\n (1, 2): None,\n (1, 3): None,\n (1, 4): RedGeneral(),\n (1, 5): None,\n (1, 6): None,\n (1, 7): None,\n (1, 8): None,\n (2, 0): None,\n (2, 1): RedCannon(),\n (2, 2): None,\n (2, 3): None,\n (2, 4): None,\n (2, 5): None,\n (2, 6): None,\n (2, 7): RedCannon(),\n (2, 8): None,\n (3, 0): RedSoldier(),\n (3, 1): None,\n (3, 2): RedSoldier(),\n (3, 3): None,\n (3, 4): RedSoldier(),\n (3, 5): None,\n (3, 6): RedSoldier(),\n (3, 7): None,\n (3, 8): RedSoldier(),\n (4, 0): None,\n (4, 1): None,\n (4, 2): None,\n (4, 3): None,\n (4, 4): None,\n (4, 5): None,\n (4, 6): None,\n (4, 7): None,\n (4, 8): None,\n (5, 0): None,\n (5, 1): None,\n (5, 2): None,\n (5, 3): None,\n (5, 4): None,\n (5, 5): None,\n (5, 6): None,\n (5, 7): None,\n (5, 8): None,\n (6, 0): BlueSoldier(),\n (6, 1): None,\n (6, 2): BlueSoldier(),\n (6, 3): None,\n (6, 4): BlueSoldier(),\n (6, 5): None,\n (6, 6): BlueSoldier(),\n (6, 7): None,\n (6, 8): BlueSoldier(),\n (7, 0): None,\n (7, 1): BlueCannon(),\n (7, 2): None,\n (7, 3): None,\n (7, 4): None,\n (7, 5): None,\n (7, 6): None,\n (7, 7): BlueCannon(),\n (7, 8): None,\n (8, 0): None,\n (8, 1): None,\n (8, 2): None,\n (8, 3): None,\n (8, 4): BlueGeneral(),\n (8, 5): None,\n (8, 6): None,\n (8, 7): None,\n (8, 8): None,\n (9, 0): BlueChariot(),\n (9, 1): BlueElephant(),\n (9, 2): BlueHorse(),\n (9, 3): BlueGuard(),\n (9, 4): None,\n (9, 5): BlueGuard(),\n (9, 6): BlueElephant(),\n (9, 7): BlueHorse(),\n (9, 8): BlueChariot()\n }\n\n if layout:\n for position in self._layout:\n if position in layout:\n self._layout[position] = layout[position]\n else:\n self._layout[position] = None", "title": "" }, { "docid": "178143b9bcdae1e38fb13e796977ea9a", "score": "0.508665", "text": "def assignmentsLayout(self):\n settings_menu_items = [\"Unused\", [\"&General Settings\", \"&Change Theme\", \"&Update Assignments\"]]\n navigate_menu_items = [\"Unused\", [\" &Main Menu\", \"!> & Assignments Menu\", \" &Reviews Menu\"]]\n help_menu_items = [\"Unused\", [\"&About\"]]\n\n self.layout = [\n [ sg.ButtonMenu(\"Settings\", settings_menu_items, key=\"-SETTINGS-MENU-\",\n size=(9,1), font=\"Calibri 9\"),\n sg.ButtonMenu(\"Navigate\", navigate_menu_items, key=\"-NAVIGATE-MENU-\",\n size=(9,1), font=\"Calibri 9\"),\n sg.ButtonMenu(\"Help\", help_menu_items, key=\"-HELP-MENU-\",\n size=(8,1), font=\"Calibri 9\"),\n sg.Text(\"\", pad=(305,0)),\n sg.Button(\"Exit\", pad=(0,0))\n ],\n [ sg.HSeparator() ],\n [ sg.Column(\n [ [ sg.Text(\"Modules\", font=\"Calibri 9\") ],\n [ sg.Column(\n [ [ sg.Listbox(\n values=self.listboxs.modulesList,\n size=(30,20), key=\"-MODULE-\", enable_events=True) ] ]\n ), ] ], element_justification='c' ),\n sg.Column(\n [ [ sg.Text(\"Topics\", font=\"Calibri 9\") ],\n [ sg.Column(\n [ [ sg.Listbox(values=[],\n size=(35,20), key=\"-TOPIC-\",enable_events=True,\n disabled=True) ] ] ),\n ] ], element_justification='c' ),\n sg.Column(\n [ [ sg.Text(\"Assignments\", font=\"Calibri 9\") ],\n [ sg.Column(\n [ [ sg.Listbox(values=[],\n size=(40,20), key=\"-PROBLEM-\",enable_events=True,\n disabled=True) ] ]\n ) ]\n ], element_justification='c' ),\n sg.Text(\"\", pad=(10,0)),\n sg.Column(\n [ [ sg.Text(\"Actions\", font=\"Calibri 9\") ],\n [ sg.Frame(title=\"Operations\",\n layout=self.actionButtons.buttonsFrame,\n pad=(0,82)) ],\n ], element_justification='c')\n ]\n ]", "title": "" }, { "docid": "0159e9ae30a1edd6de1487acc919ee0a", "score": "0.5085237", "text": "def configure(self):\n\n # Place all design variables on the Assembly boundary.\n self.add('S', Float(0.0, iotype='in', desc = 'Wing Area'))\n self.add('ac_w', Float(0.0, iotype='in',\n desc = 'Weight of aircraft + payload'))\n self.add('SFCSL', Float(0.0, iotype='in',\n desc = 'sea-level SFC value'))\n self.add('thrust_sl', Float(0.0, iotype='in',\n desc = 'Maximum sea-level thrust'))\n self.add('AR', Float(0.0, iotype='in',\n desc = 'Aspect Ratio'))\n self.add('oswald', Float(0.0, iotype='in',\n desc = \"Oswald's efficiency\"))\n\n\n # Splines\n self.add('SysXBspline', SysXBspline(num_elem=self.num_elem,\n num_pt=self.num_pt,\n x_init=self.x_pts,\n jac_h=self.jac_h))\n self.SysXBspline.x_pt = self.x_pts\n\n self.add('SysHBspline', SysHBspline(num_elem=self.num_elem,\n num_pt=self.num_pt,\n x_init=self.x_pts,\n jac_h=self.jac_h))\n\n self.add('SysMVBspline', SysMVBspline(num_elem=self.num_elem,\n num_pt=self.num_pt,\n x_init=self.x_pts,\n jac_h=self.jac_h))\n\n self.add('SysGammaBspline', SysGammaBspline(num_elem=self.num_elem,\n num_pt=self.num_pt,\n x_init=self.x_pts,\n jac_gamma=self.jac_gamma))\n\n\n\n # Atmospherics\n self.add('SysSFC', SysSFC(num_elem=self.num_elem))\n self.add('SysTemp', SysTemp(num_elem=self.num_elem))\n self.add('SysRho', SysRho(num_elem=self.num_elem))\n self.add('SysSpeed', SysSpeed(num_elem=self.num_elem))\n\n self.connect('SFCSL', 'SysSFC.SFCSL')\n self.connect('SysHBspline.h', 'SysSFC.h')\n self.connect('SysHBspline.h', 'SysTemp.h')\n self.connect('SysHBspline.h', 'SysRho.h')\n self.connect('SysTemp.temp', 'SysRho.temp')\n self.connect('SysTemp.temp', 'SysSpeed.temp')\n self.connect('SysMVBspline.M', 'SysSpeed.M')\n self.connect('SysMVBspline.v_spline', 'SysSpeed.v_spline')\n\n\n # -----------------------------------\n # Comps for Coupled System begin here\n # -----------------------------------\n\n # Vertical Equilibrium\n self.add('SysCLTar', SysCLTar(num_elem=self.num_elem))\n\n self.connect('S', 'SysCLTar.S')\n self.connect('ac_w', 'SysCLTar.ac_w')\n self.connect('SysRho.rho', 'SysCLTar.rho')\n self.connect('SysGammaBspline.Gamma', 'SysCLTar.Gamma')\n self.connect('SysSpeed.v', 'SysCLTar.v')\n\n\n # Drag\n self.add('SysAeroSurrogate', SysAeroSurrogate(num_elem=self.num_elem))\n\n self.connect('AR', 'SysAeroSurrogate.AR')\n self.connect('oswald', 'SysAeroSurrogate.oswald')\n\n\n # Horizontal Equilibrium\n self.add('SysCTTar', SysCTTar(num_elem=self.num_elem))\n\n self.connect('S', 'SysCTTar.S')\n self.connect('ac_w', 'SysCTTar.ac_w')\n self.connect('SysRho.rho', 'SysCTTar.rho')\n self.connect('SysGammaBspline.Gamma', 'SysCTTar.Gamma')\n self.connect('SysSpeed.v', 'SysCTTar.v')\n self.connect('SysAeroSurrogate.CD', 'SysCTTar.CD')\n self.connect('SysAeroSurrogate.alpha', 'SysCTTar.alpha')\n\n\n # Moment Equilibrium\n self.add('SysCM', SysCM(num_elem=self.num_elem))\n self.connect('SysAeroSurrogate.alpha', 'SysCM.alpha')\n self.SysCM.eval_only = True\n\n\n # Weight\n self.add('SysFuelWeight', SysFuelWeight(num_elem=self.num_elem))\n self.SysFuelWeight.fuel_w = np.linspace(1.0, 0.0, self.num_elem+1)\n\n self.connect('S', 'SysFuelWeight.S')\n self.connect('SysRho.rho', 'SysFuelWeight.rho')\n self.connect('SysXBspline.x', 'SysFuelWeight.x')\n self.connect('SysGammaBspline.Gamma', 'SysFuelWeight.Gamma')\n self.connect('SysSpeed.v', 'SysFuelWeight.v')\n self.connect('SysSFC.SFC', 'SysFuelWeight.SFC')\n self.connect('SysCTTar.CT_tar', 'SysFuelWeight.CT_tar')\n\n\n # ----------------------------------------\n # Drag subsystem - Newton for inner loop\n # ----------------------------------------\n\n self.add('drag_solver', NewtonSolver())\n self.drag_solver.add_parameter(('SysAeroSurrogate.alpha'))\n self.drag_solver.add_constraint('SysAeroSurrogate.CL = SysCLTar.CL')\n\n self.drag_solver.iprint = 1\n self.drag_solver.gradient_options.iprint = 1\n self.drag_solver.atol = 1e-9\n self.drag_solver.rtol = 1e-9\n self.drag_solver.max_iteration = 15\n self.drag_solver.gradient_options.atol = 1e-14\n self.drag_solver.gradient_options.rtol = 1e-14\n self.drag_solver.gradient_options.maxiter = 25\n #self.drag_solver.gradient_options.lin_solver = 'petsc_ksp'\n\n\n # ------------------------------------------------\n # Coupled Analysis - Newton for outer loop\n # TODO: replace with GS/Newton cascaded solvers when working\n # -----------------------------------------------\n\n self.add('coupled_solver', NewtonSolver())\n\n\n # Old way, using params and eq-constraints\n #self.coupled_solver.add_parameter('SysCLTar.CT_tar')\n #self.coupled_solver.add_parameter('SysCLTar.fuel_w')\n #self.coupled_solver.add_parameter('SysCLTar.alpha')\n #self.coupled_solver.add_parameter('SysAeroSurrogate.eta')\n #self.coupled_solver.add_parameter('SysCTTar.fuel_w')\n #self.coupled_solver.add_constraint('SysCLTar.CT_tar = SysCTTar.CT_tar')\n #self.coupled_solver.add_constraint('SysCLTar.fuel_w = SysFuelWeight.fuel_w')\n #self.coupled_solver.add_constraint('SysCLTar.alpha = SysAeroSurrogate.alpha')\n #self.coupled_solver.add_constraint('SysAeroSurrogate.eta = SysCM.eta')\n #self.coupled_solver.add_constraint('SysCTTar.fuel_w = SysFuelWeight.fuel_w')\n\n # Direct connections (cycles) are faster.\n self.connect('SysCTTar.CT_tar', 'SysCLTar.CT_tar')\n self.connect('SysFuelWeight.fuel_w', 'SysCLTar.fuel_w')\n self.connect('SysAeroSurrogate.alpha', 'SysCLTar.alpha')\n self.connect('SysCM.eta', 'SysAeroSurrogate.eta')\n self.connect('SysFuelWeight.fuel_w', 'SysCTTar.fuel_w')\n\n # (Only non-GS pair)\n self.coupled_solver.add_parameter('SysCM.eta')\n self.coupled_solver.add_constraint('SysCM.eta_res = 0')\n\n self.coupled_solver.atol = 1e-9\n self.coupled_solver.rtol = 1e-9\n self.coupled_solver.max_iteration = 15\n self.coupled_solver.gradient_options.atol = 1e-14\n self.coupled_solver.gradient_options.rtol = 1e-14\n self.coupled_solver.gradient_options.maxiter = 25\n #self.coupled_solver.gradient_options.lin_solver = 'petsc_ksp'\n\n self.driver.gradient_options.iprint = 1\n self.coupled_solver.gradient_options.iprint = 1\n self.coupled_solver.iprint = 1\n\n\n # --------------------\n # Downstream of solver\n # --------------------\n\n # Functionals (i.e., components downstream of the coupled system.)\n self.add('SysTau', SysTau(num_elem=self.num_elem))\n self.add('SysTmin', SysTmin(num_elem=self.num_elem))\n self.add('SysTmax', SysTmax(num_elem=self.num_elem))\n self.add('SysSlopeMin', SysSlopeMin(num_elem=self.num_elem))\n self.add('SysSlopeMax', SysSlopeMax(num_elem=self.num_elem))\n self.add('SysFuelObj', SysFuelObj(num_elem=self.num_elem))\n\n self.connect('S', 'SysTau.S')\n self.connect('thrust_sl', 'SysTau.thrust_sl')\n self.connect('SysRho.rho', 'SysTau.rho')\n self.connect('SysCTTar.CT_tar', 'SysTau.CT_tar')\n self.connect('SysHBspline.h', 'SysTau.h')\n self.connect('SysSpeed.v', 'SysTau.v')\n self.connect('SysTau.tau', 'SysTmin.tau')\n self.connect('SysTau.tau', 'SysTmax.tau')\n self.connect('SysGammaBspline.Gamma', 'SysSlopeMin.Gamma')\n self.connect('SysGammaBspline.Gamma', 'SysSlopeMax.Gamma')\n self.connect('SysFuelWeight.fuel_w', 'SysFuelObj.fuel_w')\n\n\n # Promote useful variables to the boundary.\n self.create_passthrough('SysHBspline.h_pt')\n self.connect('h_pt', 'SysGammaBspline.h_pt')\n self.create_passthrough('SysMVBspline.v_pt')\n self.create_passthrough('SysTmin.Tmin')\n self.create_passthrough('SysTmax.Tmax')\n self.create_passthrough('SysFuelObj.fuelburn')\n self.create_passthrough('SysHBspline.h')\n\n #-------------------------\n # Iteration Hieararchy\n #-------------------------\n self.driver.workflow.add(['SysXBspline', 'SysHBspline',\n 'SysMVBspline', 'SysGammaBspline',\n 'SysSFC', 'SysTemp', 'SysRho', 'SysSpeed',\n 'coupled_solver',\n 'SysTau', 'SysTmin', 'SysTmax', 'SysSlopeMin', 'SysSlopeMax',\n 'SysFuelObj'])\n self.coupled_solver.workflow.add(['SysCLTar', 'drag_solver', 'SysCTTar', 'SysCM', 'SysFuelWeight'])\n self.drag_solver.workflow.add(['SysAeroSurrogate'])\n\n # Change some scaling parameters so that we match what they were when\n # the pickle was created.\n self.SysTau.thrust_scale = 0.072\n self.SysCLTar.fuel_scale = 1e6\n self.SysFuelWeight.fuel_scale = 1e6", "title": "" }, { "docid": "986b6080d2ebfc55974806f05565a485", "score": "0.508458", "text": "def build_layout(self, pb_window: sg.Window):\n index = [0]\n tasks = [\"get data ...\", \"building search layout ...\", \"building search results layout ...\",\n \"building answer layout ...\",\n \"building master layout ...\"]\n\n @contextlib.contextmanager\n def write_case():\n \"\"\"write $start {task_name} update index and then write $pb {index} {len_of_tasks }\"\"\"\n pb_window.write_event_value(\"$start\", tasks[index[0]])\n yield\n index[0] += 1\n pb_window.write_event_value(\"$pb\", [index[0], len(tasks)])\n\n with write_case():\n self.data = datalib.get_data()\n self.choices = list(datalib.get_data().keys())\n\n with write_case():\n search_layout = [\n [sg.Input(size=(input_width, 1), enable_events=True, key='-inp-'),\n sg.B('search', key='-search-', bind_return_key=True), sg.B('create', key='-create-'),\n sg.B('Locals', key='-locals-')],\n [sg.pin(sg.Col(\n [[sg.Listbox(values=[], size=(input_width, num_items_to_show), enable_events=True, key='-box-',\n select_mode=sg.LISTBOX_SELECT_MODE_SINGLE, no_scrollbar=True)]],\n key='-box-visible-', pad=(0, 0), visible=False))],\n [sg.T(\"Number Answers :\"), sg.Spin(list(range(1, 11)), initial_value=3, key='-num_answers-')],\n [sg.Checkbox('All Answer', default=False, key='-all_answer-bool-')],\n [sg.Checkbox('Saved Answer', default=False, key='-save-answer-bool-')],\n [sg.T('Search Engine'),\n sg.Combo(SUPPORTED_SEARCH_ENGINES, SUPPORTED_SEARCH_ENGINES[1], key='-search_engine-')],\n ]\n with write_case():\n search_results_layout = [\n [sg.Listbox(['search something for results !' + ' ' * 20], size=(60, 6), key='-search-results-',\n enable_events=True)]\n ]\n with write_case():\n answer_layout = [\n [sg.B('open question', key='-open-question-'), sg.T('with link'), sg.In(key='-link-')],\n [sg.Multiline('', size=(70, 8), key='-answer-')],\n [sg.B('Save', key='-save-answer-'), sg.T('under the name:'), sg.Input(key='-answer-name-')]\n ]\n with write_case():\n layout = [[\n sg.TabGroup([\n [sg.Tab('search', search_layout, key='-search-master-')],\n [sg.Tab('search results', search_results_layout, key='-search-results-master-', visible=False)],\n [sg.Tab('answer', answer_layout, key='-answer-master-', visible=False)]\n ], key='-master-')\n ],\n [sg.T(\"found bug? you have idea to new feature? click here to open issue !\", enable_events=True,\n font=\"Courier-New 12 underline\", key=\"-open-issue-\"), ],\n\n [sg.T(\"if you enjoy from this free software,it would be great if you could buy me a coffee:\",\n font='Courier-New 12'),\n sg.B(image_data=sg.ICON_BUY_ME_A_COFFEE, key=\"-buy-me-coffee-\")]\n ]\n pb_window.write_event_value(\"$done\", layout)", "title": "" }, { "docid": "775616613dcac1a1db06e9162287ca98", "score": "0.5074555", "text": "def make_layout(self):\r\n\r\n layout = QG.QGridLayout() # create a grid for subWidgets\r\n layout.setSpacing(10)\r\n self.setLayout(layout)\r\n\r\n self.setup_font_styles()\r\n\r\n # -------- Define items ----------\r\n # import button\r\n self.importFileBtn = QW.QPushButton('Import File(s)', self)\r\n self.importFileBtn.clicked.connect(self.import_multiple_files)\r\n layout.addWidget(self.importFileBtn, 18, 1, 1, 7)\r\n\r\n # Save block\r\n self.saveasCSVBtn = QW.QPushButton('Save as CSV', self)\r\n self.saveasCSVBtn.clicked.connect(self.saveasCSV)\r\n layout.addWidget(self.saveasCSVBtn, 16, 24, 1, 5)\r\n self.saveFigure = QW.QPushButton('Save Figure', self)\r\n # self.saveFigure.resize(self.clrButton.sizeHint())\r\n self.saveFigure.clicked.connect(self.no_function_yet)\r\n layout.addWidget(self.saveFigure, 18, 24, 1, 5)\r\n\r\n # File name block\r\n self.nameTxtbox = QW.QLabel('Series Name:', self)\r\n self.nameTxtbox.setFont(self.title_font)\r\n layout.addWidget(self.nameTxtbox, 1, 1, 1, 7)\r\n self.nameTxtbox = QW.QLineEdit(self)\r\n self.nameTxtbox.setPlaceholderText('file name')\r\n self.nameTxtbox.editingFinished.connect(self.no_function_yet)\r\n self.nameTxtbox.returnPressed.connect(self.no_function_yet)\r\n layout.addWidget(self.nameTxtbox, 2, 1, 1, 7)\r\n\r\n # Metadata tree\r\n # self.metadataTree_name = qw.QLabel('Metadata:', self)\r\n # self.metadataTree_name.setFont(font)\r\n # layout.addWidget(self.metadataTree_name, 3, 1)\r\n # self.metadataTree = pg.DataTreeWidget()\r\n # self.metadataTree.setHeaderItem()\r\n # layout.addWidget(self.metadataTree, 4, 1, 13, 7)\r\n\r\n # Scan list tree\r\n self.scanListTree_label = QW.QLabel('Scans:', self)\r\n self.scanListTree_label.setFont(self.title_font)\r\n layout.addWidget(self.scanListTree_label, 3, 1)\r\n self.transientData_list = QW.QListWidget()\r\n # self.scanListTree = pg.parametertree.ParameterTree()\r\n # self.metadataTree.setHeaderItem()\r\n layout.addWidget(self.transientData_list, 4, 1, 13, 7)\r\n\r\n # Plot widget\r\n self.plotWidget_name = QW.QLabel('Plot', self)\r\n self.plotWidget_name.setFont(self.title_font)\r\n layout.addWidget(self.plotWidget_name, 1, 9)\r\n\r\n self.setup_plot_widget()\r\n layout.addWidget(self.plotWidget, 2, 9, 13, 20)\r\n\r\n # plot modification buttons\r\n # self.DataAnalysisBox_label = qw.QLabel('Modify', self)\r\n # self.DataAnalysisBox_label.setFont(font)\r\n # layout.addWidget(self.DataAnalysisBox_label, 15, 10)\r\n self.DataAnalysisBox = QW.QGroupBox()\r\n self.setup_data_analysis_box()\r\n layout.addWidget(self.DataAnalysisBox, 16, 10)", "title": "" }, { "docid": "cf3b4dd4b232ab081d4ff8fa92248cb6", "score": "0.50708556", "text": "def show_control_panel(self):\n root=Tk()\n s1 = Scale(root,length=600,label='Hmin', from_=0, to=360,orient=HORIZONTAL, command=lambda v: setattr(self, '_hmin', int(v)))\n s1.set(self._hmin)\n s4 = Scale(root,length=600,label='Hmax', from_=0, to=360,orient=HORIZONTAL, command=lambda v: setattr(self, '_hmax', int(v)))\n s4.set(self._hmax)\n s2 = Scale(root,length=600,label='Smin', from_=0, to=255, orient=HORIZONTAL, command=lambda v: setattr(self, '_smin', int(v)))\n s2.set(self._smin)\n s5 = Scale(root,length=600,label='Smax', from_=0, to=255, orient=HORIZONTAL, command=lambda v: setattr(self, '_smax', int(v)))\n s5.set(self._smax)\n s3 = Scale(root,length=600,label='Vmin', from_=0, to=255, orient=HORIZONTAL, command=lambda v: setattr(self, '_vmin', int(v)))\n s3.set(self._vmin)\n s6 = Scale(root,length=600,label='Vmax', from_=0, to=255, orient=HORIZONTAL, command=lambda v: setattr(self, '_vmax', int(v)))\n s6.set(self._vmax)\n s1.pack()\n s2.pack()\n s3.pack()\n s4.pack()\n s5.pack()\n s6.pack()\n root.mainloop()", "title": "" }, { "docid": "ac71ce5ccd27d9aae802bb51bae95886", "score": "0.5067484", "text": "def build(self):\n #list of operators\n self.operators = [\"+\", \"-\", \"*\", \"/\"]\n #last operator, at start will be none\n self.last_operators = None\n #last button that was pressed, at start will be none\n self.last_button = None\n\n #main layout for the app\n main_layout = BoxLayout(orientation = 'vertical')\n #text that would be displayed\n self.solution = TextInput(multiline = False, readonly = True,\n halign = 'right', font_size = 55)\n main_layout.add_widget(self.solution)\n\n # buttons that are to be displayed\n buttons = [\n ['7','8','9','/'],\n ['4','5','6','*'],\n ['1','2','3','-'],\n ['.','0','C','+'],\n ]\n\n # creating a layout for the button and binding a function to each\n for btns in buttons:\n horizontal_layout = BoxLayout()\n for btn in btns:\n button = Button(text=btn, pos_hint={'center_x':.5,\n 'center_y':.5})\n button.bind(on_press=self.button_on_press)\n horizontal_layout.add_widget(button)\n main_layout.add_widget(horizontal_layout)\n #equals button to calculate the end result\n equal_button = Button(text=\"=\", pos_hint={\"center_x\": 0.5,\n \"center_y\": 0.5})\n equal_button.bind(on_press=self.on_solution)\n main_layout.add_widget(equal_button)\n\n return main_layout", "title": "" }, { "docid": "f9ba69fc5e86bd17999a9b173d0e6ad6", "score": "0.5063112", "text": "def addComponents(self):\n\n # mainLayout\n self.mainLayout = QVBoxLayout()\n self.setLayout(self.mainLayout)\n # title\n self.lblTitle = QLabel(\"Simple Converter\")\n self.mainLayout.addWidget(self.lblTitle)\n\n # comboCategoryChooser\n self.lblCategoryChooser = QLabel(\"Choose A Category...\")\n self.comboCategoryChooser = QComboBox()\n self.comboCategoryChooser.addItems([\"Volumes\",\n \"Distances\",\n \"Weights\",\n \"Temperatures\",\n \"Areas\" ,\n \"Times\"\n ])\n\n self.mainLayout.addWidget(self.lblCategoryChooser)\n self.mainLayout.addWidget(self.comboCategoryChooser)\n\n\n # control\n self.controlWidget = QWidget()\n self.controlLayout = QHBoxLayout()\n self.controlWidget.setLayout(self.controlLayout)\n self.mainLayout.addWidget(self.controlWidget)\n\n # comboFrom\n self.widgetFrom = QWidget()\n self.layoutFrom = QFormLayout()\n self.widgetFrom.setLayout(self.layoutFrom)\n self.controlLayout.addWidget(self.widgetFrom)\n # label , combo and lineEdit\n self.lblFrom = QLabel(\"From\")\n self.comboFrom = QComboBox()\n self.editFrom = QLineEdit()\n self.layoutFrom.addRow(self.lblFrom, self.comboFrom)\n self.layoutFrom.addRow(QLabel(), self.editFrom)\n self.comboFrom.addItems(self.data[self.comboCategoryChooser.currentText()])\n\n # comboTo\n self.widgetTo = QWidget()\n self.layoutTo = QFormLayout()\n self.widgetTo.setLayout(self.layoutTo)\n self.controlLayout.addWidget(self.widgetTo)\n # label , combo and lineEdit\n self.lblTo = QLabel(\"To\")\n self.comboTo = QComboBox()\n self.editTo = QLineEdit()\n self.layoutTo.addRow(self.lblTo, self.comboTo)\n self.layoutTo.addRow(QLabel(), self.editTo)\n self.comboTo.addItems(self.data[self.comboCategoryChooser.currentText()])\n\n\n # buttons\n self.widgetButtons = QWidget()\n self.layoutButtons = QHBoxLayout()\n self.widgetButtons.setLayout(self.layoutButtons)\n self.mainLayout.addWidget(self.widgetButtons)\n # btnConvert, btnReset\n self.btnConvert = QPushButton(\"Convert\")\n self.btnReset = QPushButton(\"Reset\")\n self.layoutButtons.addWidget(self.btnConvert)\n self.layoutButtons.addWidget(self.btnReset)", "title": "" }, { "docid": "9b4abcf895edcfd9d78378ab6ca1ec07", "score": "0.5050799", "text": "def __init__(self, name, *to_place):\n\t\tself.name = name\n\t\tself.sublayouts = [] # list of (position, sub-layout)\n\t\tself.entities = [] # list of (position, entitiy)\n\t\tfor x, y, child in to_place:\n\t\t\tself.place(x, y, child)", "title": "" }, { "docid": "0ec0efe01e9a009c3483c6ca8a81035c", "score": "0.50356144", "text": "def __init__(self, transformations, ik=False, switch_type='alps', mount_length=DSA_KEY_WIDTH, mount_width=DSA_KEY_WIDTH, mx_notches=True):\n\t\tmx_length = 14.4\n\t\tmx_width = 14.4\n\t\talps_length = 12.8\n\t\talps_width = 15.5\n\t\tself.mount_length = mount_length \n\t\tself.mount_width = mount_width\n\t\tself.switch_type = switch_type\n\t\t\n\t\t\n\t\tmx_hole = Cube([mx_width, mx_length, self.thickness], center=True)\n\t\tnip_r = ((Cylinder(2.75,1,_fn=60).rotate([0,90,90]).translate([-7.2,-1.475,-0.5])))+\\\n\t\t (Cube([1,2.75,3]).rotate([0,-30,0]).translate([-7.2,-1.475,-0.5]))\n\t\tnip_l = nip_r.mirror(-1)\n\t\tif mx_notches == True:\n\t\t\tmx_hole = mx_hole - nip_l - nip_r\n\n\t\talps_hole = Cube([alps_width, alps_length, self.thickness], center=True)\n\t\n\t\tif switch_type == 'mx':\n\t\t\tself.switch_mount = Cube([self.mount_width, self.mount_length, self.thickness], center=True) - mx_hole\n\t\tif switch_type == 'alps':\n\t\t\tself.switch_mount = Cube([self.mount_width, self.mount_length, self.thickness], center=True) - alps_hole \n\t\t\n\t\tself.ignore_key = ik\n\t\tself.transformations = transformations", "title": "" }, { "docid": "2640e7be2e564318422c877c37aca650", "score": "0.5028264", "text": "def initUI(self):\n\n self.hor_layout = QtWidgets.QHBoxLayout()\n self.base_path_edit = QtWidgets.QPlainTextEdit(self.path)\n self.base_path_edit.setMaximumHeight(50)\n self.base_path_browse_pb = QtWidgets.QPushButton()\n self.base_path_browse_pb.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/Icon_Library/Browse_Dir_Path.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.base_path_browse_pb.setIcon(icon3)\n self.hor_layout.addWidget(self.base_path_edit)\n\n verlayout = QtWidgets.QVBoxLayout()\n verlayout.addWidget(self.base_path_browse_pb)\n verlayout.addStretch()\n self.hor_layout.addLayout(verlayout)\n self.hor_layout.setSpacing(0)\n self.setLayout(self.hor_layout)", "title": "" }, { "docid": "23aabb304587a381e196c85e957208c0", "score": "0.50218856", "text": "def calculate_mapping(self):\n\t\t# Create a list of lists filled with None, then we can populate with\n\t\t# the serial location if present\n\t\tself.layout_mapping = [[None for y in range(0,self.size_y)] for x in range(0,self.size_x)]\n\t\tself.pixel_count = 0\n\t\t\n\t\tfor module in sorted(self.module_config.keys()):\n\t\t\tmodule_data = self.module_config[module]\n\t\t\tmodule_orientation = module_data[\"orientation\"]\n\t\t\tmodule_height = module_data[\"height\"]\n\t\t\tmodule_width = module_data[\"width\"]\n\t\n\t\t\tif module_orientation == 'N':\n\t\t\t\tself.add_north(module_data[\"height\"],\n\t\t\t\t module_data[\"width\"],\n\t\t\t\t module_data[\"x_position\"],\n\t\t\t\t module_data[\"y_position\"])\n\t\t\telif module_orientation == 'E':\n\t\t\t\tself.add_east(module_data[\"height\"],\n\t\t\t\t module_data[\"width\"],\n\t\t\t\t module_data[\"x_position\"],\n\t\t\t\t module_data[\"y_position\"])\n\t\t\telif module_orientation == 'S':\n\t\t\t\tself.add_south(module_data[\"height\"],\n\t\t\t\t module_data[\"width\"],\n\t\t\t\t module_data[\"x_position\"],\n\t\t\t\t module_data[\"y_position\"])\n\t\t\telif module_orientation == 'W':\n\t\t\t\tself.add_west(module_data[\"height\"],\n\t\t\t\t module_data[\"width\"],\n\t\t\t\t module_data[\"x_position\"],\n\t\t\t\t module_data[\"y_position\"])\n\t\t\telse:\n\t\t\t\tlogging.error(\"The orientation of a tile in the config was not recognised\")", "title": "" }, { "docid": "87ff50348194e8ad5796721cf3ac450a", "score": "0.50183487", "text": "def layout(self,obj):\n pass", "title": "" }, { "docid": "ed7a5cd1feef7a93e056b969b3f35e66", "score": "0.5013701", "text": "def layout(self,obj): \n if self._objects == []:\n # This is the first object to be packed. Align it with\n # this empty box.\n right = self.right()\n top = self.top()\n else:\n # Align the new object with the last object that was\n # packed.\n last = self._objects[-1]\n bottom_left,top_right = last.getTightBounds()\n right = top_right.getX()\n top = top_right.getZ()\n\n # Align the left of the new object with `right`.\n bottom_left,top_right = obj.getTightBounds() \n left = bottom_left.getX()\n distance = right - left\n obj.setPos(obj.getPos() + Point3(distance,0,0))\n \n # Align the top of the new object with `top`.\n t = top_right.getZ()\n distance = top - t\n obj.setPos(obj.getPos() + Point3(0,0,distance))\n \n obj.setPos(obj.getPos() + Point3(self.margin,0,0))", "title": "" }, { "docid": "ff391d2833ba6e641774c03dcaf524d7", "score": "0.50134426", "text": "def reset_starting_layout(self):", "title": "" }, { "docid": "64726fe555720ca61c1dfc886a4b2784", "score": "0.5009818", "text": "def _ui_layout(self):\n\n # layout the major elements of our widget\n layout = QtWidgets.QGridLayout()\n layout.setSpacing(get_dpi_scale()*5)\n layout.addWidget(self._table_view)\n layout.addWidget(self._toolbar)\n\n # apply the layout to the containing form\n self.widget.setLayout(layout)", "title": "" }, { "docid": "3f4e8af4afd2d474f9522918bb770c02", "score": "0.50015944", "text": "def create_lower_section(self):\n # frame to fit widgets in\n lower_frame = ttk.Frame(self.master, borderwidth=10)\n lower_frame.pack()\n\n # generate password button\n self.genbutton = ttk.Button(\n lower_frame, text=_(\"Generate\"),\n command=self.generate_password)\n self.genbutton.pack()\n \n # blank label to add space\n ttk.Label(lower_frame).pack()\n\n # generated passwords display\n ttk.Entry(lower_frame, width=52, font=(\"Consolas\", 10),\n state=\"readonly\", cursor=\"\", textvariable=self.password,\n justify=\"center\").pack(side='left')\n\n # copy to clipboard button\n self.copybutton = ttk.Button(\n lower_frame, text=_(\"Copy\"),\n state=[\"disabled\"], command=self.copy_to_clipboard\n )\n self.copybutton.pack(side='left')\n \n # blank label to add space\n ttk.Label(self.master, text=\" \", font=(\"\",5)).pack()\n\n # reset button\n ttk.Button(\n self.master, text=_(\"Reset\"), command=self.reset).pack()", "title": "" }, { "docid": "aed8a690aa7c28a72cd69132bdc04064", "score": "0.49964", "text": "def initLayout(self):\n self._icon = _icon = wx.EmptyIcon()\n _icon.LoadFile(\"./pic/pc.ico\", wx.BITMAP_TYPE_ICO)\n self.SetIcon(_icon)\n\n client_left_V_sizer = wx.BoxSizer(wx.VERTICAL)\n client_left_V_sizer.Add(self.clientCommandList, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 5)\n self.clientListPanel.SetSizer(client_left_V_sizer)\n\n eneityPanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n eneityPanelSizer.Add((30, -1))\n eneityPanelSizer.Add(self.statictext_eneityinfo, 4, wx.ALIGN_CENTER, 50)\n eneityPanelSizer.Add(self.eneitysendButton, 1, wx.ALIGN_CENTER, 50)\n eneityPanelSizer.Add((30, -1))\n self.clientCommandEntityPanel.SetSizer(eneityPanelSizer)\n # self.clientCommandEntityPanel.Enable(False)\n\n runpanelsizer_V = wx.BoxSizer(wx.VERTICAL)\n\n runpanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n runpanelSizer.Add((30, -1))\n runpanelSizer.Add(self.statictext_commandtorun, 2, wx.ALIGN_CENTER, 5)\n runpanelSizer.Add(self.EditText_commandtorun, 1, wx.ALIGN_CENTER, 50)\n runpanelSizer.Add((30, -1))\n\n # self.runpanelSizer.Add(self.editlistcrtl2,1,wx.EXPAND, 50)\n\n runpanelsizer_V.Add(runpanelSizer, 1, wx.EXPAND | wx.ALIGN_CENTER, 20)\n runpanelsizer_V.Add(self.editlistcrtl2, 6, wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 25)\n\n self.clientCommandRunPanel.SetSizer(runpanelsizer_V)\n\n contentPanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n contentPanelSizer.Add((20, -1))\n # contentPanelSizer.Add(self.statictext_contentPara,4,wx.ALIGN_CENTER,50)\n contentPanelSizer.Add(self.runButton, 1, wx.ALIGN_CENTER, 0)\n contentPanelSizer.Add((20, -1))\n self.clientCommandContextPanel.SetSizer(contentPanelSizer)\n\n client_right_V_sizer = wx.BoxSizer(wx.VERTICAL)\n\n client_right_V_sizer.Add(self.clientCommandEntityPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n\n client_right_V_sizer.Add(self.clientCommandRunPanel, 7,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n\n client_right_V_sizer.Add(self.clientCommandContextPanel, 2,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n self.clientCommandRCPanel.SetSizer(client_right_V_sizer)\n\n client_all_H_sizer = wx.BoxSizer(wx.HORIZONTAL)\n # client_all_H_sizer.Add(self.clientCommandList)\n client_all_H_sizer.Add(self.clientListPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n client_all_H_sizer.Add(self.clientCommandRCPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n self.clientPanel.SetSizer(client_all_H_sizer)\n # ########## the upper is for the client panel\n\n\n server_left_V_sizer = wx.BoxSizer(wx.VERTICAL)\n server_left_V_sizer.Add(self.serverCommandList, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 5)\n self.serverListPanel.SetSizer(server_left_V_sizer)\n\n runpanelsizer_V2 = wx.BoxSizer(wx.VERTICAL)\n\n runpanelSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n runpanelSizer2.Add((30, -1))\n runpanelSizer2.Add(self.statictext_commandtorun_server, 2, wx.ALIGN_CENTER, 5)\n runpanelSizer2.Add(self.EditText_commandtorun_server, 1, wx.ALIGN_CENTER, 50)\n runpanelSizer2.Add((30, -1))\n\n runpanelsizer_V2.Add(runpanelSizer2, 1, wx.EXPAND | wx.ALIGN_CENTER, 20)\n runpanelsizer_V2.Add(self.editlistcrtl_server2, 8, wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM,\n 25)\n self.serverCommandRunPanel.SetSizer(runpanelsizer_V2)\n\n server_contentPanelSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n server_contentPanelSizer2.Add((20, -1))\n # contentPanelSizer.Add(self.statictext_contentPara,4,wx.ALIGN_CENTER,50)\n server_contentPanelSizer2.Add(self.runButton2, 1, wx.ALIGN_CENTER, 0)\n server_contentPanelSizer2.Add((20, -1))\n self.serverCommandContextPanel.SetSizer(server_contentPanelSizer2)\n\n server_right_V_sizer = wx.BoxSizer(wx.VERTICAL)\n\n server_right_V_sizer.Add(self.serverCommandRunPanel, 4,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n server_right_V_sizer.Add(self.serverCommandContextPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n self.serverCommandRCPanel.SetSizer(server_right_V_sizer)\n\n server_all_H_sizer = wx.BoxSizer(wx.HORIZONTAL)\n # server_all_H_sizer.Add(self.clientCommandList)\n server_all_H_sizer.Add(self.serverListPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n server_all_H_sizer.Add(self.serverCommandRCPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n self.serverPanel.SetSizer(server_all_H_sizer)\n\n testsettoolSizer = wx.BoxSizer(wx.HORIZONTAL)\n testsettoolSizer.Add((400, -1))\n testsettoolSizer.Add(self.addsetToolButton, 0, wx.RIGHT | wx.ALIGN_CENTER, 5)\n testsettoolSizer.Add(self.delsetToolButton, 0, wx.RIGHT | wx.ALIGN_CENTER, 5)\n self.testsetToolPanel.SetSizer(testsettoolSizer)\n\n testsubsettoolSizer = wx.BoxSizer(wx.HORIZONTAL)\n testsubsettoolSizer.Add((400, -1))\n testsubsettoolSizer.Add(self.addsubsetToolButton, 0, wx.RIGHT | wx.ALIGN_CENTER, 5)\n testsubsettoolSizer.Add(self.delsubsetToolButton, 0, wx.RIGHT | wx.ALIGN_CENTER, 5)\n self.testsetsubToolPanel.SetSizer(testsubsettoolSizer)\n\n testsetListSizer = wx.BoxSizer(wx.VERTICAL)\n testsetListSizer.Add(self.testsetToolPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 5)\n self.testsetListPanel.SetSizer(testsetListSizer)\n testsetListSizer.Add(self.testsetList, 10,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n\n testsubsetListSizer = wx.BoxSizer(wx.VERTICAL)\n testsubsetListSizer.Add(self.testsetsubToolPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 4)\n testsubsetListSizer.Add(self.testsetsubList, 7,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n\n testsubsetListSizer.Add(self.runsetButton,2,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 5)\n\n self.testsetsubListPanel.SetSizer(testsubsetListSizer)\n\n testAllSizer = wx.BoxSizer(wx.HORIZONTAL)\n testAllSizer.Add(self.testsetListPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 8)\n testAllSizer.Add(self.testsetsubListPanel, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 8)\n self.testsetPanel.SetSizer(testAllSizer)\n\n pageSizer = wx.BoxSizer(wx.VERTICAL)\n pageSizer.Add(self.logobmp, 1, wx.ALL | wx.EXPAND, 0)\n pageSizer.Add(self.bookPage, 8, wx.ALL | wx.EXPAND, 5)\n self.bookPanel.SetSizer(pageSizer)\n\n bsizer = wx.StaticBoxSizer(self.staticbox, wx.VERTICAL)\n bsizer.Add(self.msgTextCtrl, 1,\n wx.ALL | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 0)\n self.msgPanel.SetSizer(bsizer)\n\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.bookPanel, 9, wx.ALL | wx.EXPAND, 0)\n mainSizer.Add(self.msgPanel, 1, wx.ALL | wx.EXPAND, 0)\n\n self.SetSizer(mainSizer)\n\n pass", "title": "" }, { "docid": "52d112ad195dad4938f709cab0db621a", "score": "0.49864924", "text": "def draw_layout(self):\n\t\ts = \"\"\t\t\n\t\tfor y in range(0,self.size_y):\n\t\t\tfor x in range(0,self.size_x):\n\t\t\t\ts += \"%04s \" % self.layout_mapping[x][y]\n\t\t\ts += \"\\n\"\n\t\treturn s", "title": "" }, { "docid": "b2fa655ecb647ff15cc5e667a029b979", "score": "0.49848548", "text": "def gui_layout_edit(self) -> List[List[sg.Element]]:\n\n components = self.gui_layout_components()\n layout = [\n components[\"edit_label\"],\n components[\"edit_buttons\"],\n ]\n return layout", "title": "" }, { "docid": "1ec95680056ab201ad44c97af8efcec7", "score": "0.4979368", "text": "def reposition_panes() -> None:\n comp_win = CompilerPane.window\n opt_win = windows['opt']\n pal_win = windows['pal']\n # The x-pos of the right side of the main window\n xpos = min(\n TK_ROOT.winfo_screenwidth()\n - itemconfig.window.winfo_reqwidth(),\n\n TK_ROOT.winfo_rootx()\n + TK_ROOT.winfo_reqwidth()\n + 25\n )\n # The x-pos for the palette and compiler panes\n pal_x = TK_ROOT.winfo_rootx() - comp_win.winfo_reqwidth() - 25\n pal_win.move(\n x=pal_x,\n y=(TK_ROOT.winfo_rooty() - 50),\n height=max(\n TK_ROOT.winfo_reqheight() -\n comp_win.winfo_reqheight() -\n 25,\n 30,\n ),\n width=comp_win.winfo_reqwidth(),\n )\n comp_win.move(\n x=pal_x,\n y=pal_win.winfo_rooty() + pal_win.winfo_reqheight(),\n )\n opt_win.move(\n x=xpos,\n y=TK_ROOT.winfo_rooty()-40,\n width=itemconfig.window.winfo_reqwidth())\n itemconfig.window.move(\n x=xpos,\n y=TK_ROOT.winfo_rooty() + opt_win.winfo_reqheight() + 25)", "title": "" }, { "docid": "856b7f3942646ace008b989d50382828", "score": "0.49744037", "text": "def setup_ui(self):\n self.buttons_dict = {self.button_down: 'down',\n self.button_forwards: 'forward',\n self.button_up: 'up',\n self.button_left: 'left',\n self.button_backwards: 'backward',\n self.button_right: 'right',\n self.button_turn_left: 'turn_left',\n self.button_turn_right: 'turn_right'}\n\n # Strings for file paths to different images with matrix index as key.\n self.image_strings = {'10': ('../images/open2_1.jpg',\n '../images/square2_1.jpg'),\n '12': ('../images/open2_3.jpg',\n '../images/square2_3.jpg'),\n '01': ('../images/open1_2.jpg',\n '../images/square1_2.jpg'),\n '21': ('../images/open3_2.jpg',\n '../images/square3_2.jpg'),\n '11': ('../images/open2_2.jpg',\n '../images/square2_2.jpg')}\n\n # Creates array of labels to display images\n self.image_grid = ((self.image_11, self.image_12, self.image_13),\n (self.image_21, self.image_22, self.image_23),\n (self.image_31, self.image_32, self.image_33))", "title": "" }, { "docid": "129b806e6da7df0692f1f61df7152b8d", "score": "0.49649903", "text": "def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n\n # Load widget from .ui file (created by Qt Designer).\n # Additional widgets can be instantiated manually and added to self.layout.\n uiWidget = slicer.util.loadUI(self.resourcePath('UI/StereotacticPlan.ui'))\n self.layout.addWidget(uiWidget)\n self.ui = slicer.util.childWidgetVariables(uiWidget)\n\n # Custom Widgets \n self.trajectoryCoordinateWidgets = {}\n for name in ['Entry', 'Target']:\n self.trajectoryCoordinateWidgets[name] = TransformableCoordinatesWidget(name, self.setTransformableWidgetsState)\n self.trajectoryCoordinateWidgets[name].coordinatesChanged.connect(self.updateParameterNodeFromGUI)\n self.ui.trajectoriesCollapsibleButton.layout().insertRow(2,name + ':', self.trajectoryCoordinateWidgets[name])\n for widget in [self.trajectoryCoordinateWidgets['Entry'], self.ui.rollAngleSliderWidget]:\n widget.setVisible(False)\n self.ui.trajectoriesCollapsibleButton.layout().labelForField(widget).setVisible(False)\n self.ui.trajectoryModeComboBox.currentTextChanged.connect(lambda t,w=widget,target_t='Target Entry Roll': [w.setVisible(t==target_t), self.ui.trajectoriesCollapsibleButton.layout().labelForField(w).setVisible(t==target_t)])\n for widget in [self.ui.ringAngleSliderWidget, self.ui.arcAngleSliderWidget, self.ui.mountingComboBox]:\n self.ui.trajectoryModeComboBox.currentTextChanged.connect(lambda t,w=widget,target_t='Target Mounting Ring Arc': [w.setVisible(t==target_t), self.ui.trajectoriesCollapsibleButton.layout().labelForField(w).setVisible(t==target_t)])\n \n self.referenceToFrameCoordinateWidgets = {}\n for name in ['Reference MS', 'Reference PC', 'Reference AC']:\n self.referenceToFrameCoordinateWidgets[name] = TransformableCoordinatesWidget(name, self.setTransformableWidgetsState)\n self.referenceToFrameCoordinateWidgets[name].coordinatesChanged.connect(self.updateParameterNodeFromGUI)\n self.ui.referenceToFrameCollapsibleButton.layout().insertRow(1, name + ':', self.referenceToFrameCoordinateWidgets[name])\n for name in ['Frame MS', 'Frame PC', 'Frame AC']:\n self.referenceToFrameCoordinateWidgets[name] = CustomCoordinatesWidget(name)\n self.referenceToFrameCoordinateWidgets[name].coordinatesChanged.connect(self.updateParameterNodeFromGUI)\n self.referenceToFrameCoordinateWidgets[name].setVisible(False)\n self.ui.referenceToFrameCollapsibleButton.layout().insertRow(5, name + ':', self.referenceToFrameCoordinateWidgets[name])\n self.ui.referenceToFrameCollapsibleButton.layout().labelForField(self.referenceToFrameCoordinateWidgets[name]).setVisible(False)\n self.ui.referenceToFrameModeComboBox.currentTextChanged.connect(lambda t,w=self.referenceToFrameCoordinateWidgets[name]: [w.setVisible(t=='ACPC Register'), self.ui.referenceToFrameCollapsibleButton.layout().labelForField(w).setVisible(t=='ACPC Register')])\n\n buttonSize = self.trajectoryCoordinateWidgets['Entry'].transformButton.height\n transformReferenceVolumeAction = qt.QAction()\n transformReferenceVolumeAction.setIcon(qt.QIcon(\":/Icons/Transforms.png\"))\n transformReferenceVolumeAction.setCheckable(True)\n self.transformReferenceVolumeButton = qt.QToolButton()\n self.transformReferenceVolumeButton.setDefaultAction(transformReferenceVolumeAction)\n self.transformReferenceVolumeButton.setToolButtonStyle(qt.Qt.ToolButtonIconOnly)\n self.transformReferenceVolumeButton.setFixedSize(buttonSize, buttonSize)\n self.transformReferenceVolumeButton.toggled.connect(self.updateParameterNodeFromGUI)\n self.transformReferenceVolumeButton.toggled.connect(self.setTransformableWidgetsState)\n self.ui.referenceVolumeLayout.addWidget(self.transformReferenceVolumeButton)\n\n viewTrajectoryAction = qt.QAction()\n viewTrajectoryAction.setIcon(qt.QIcon(\":/Icons/Small/SlicerVisible.png\"))\n viewTrajectoryAction.setCheckable(True)\n self.ui.viewTrajectoryToolButton.setDefaultAction(viewTrajectoryAction)\n self.ui.viewTrajectoryToolButton.setFixedSize(buttonSize, buttonSize)\n self.ui.viewTrajectoryToolButton.connect(\"toggled(bool)\", self.onViewTrajectoryToggled)\n\n resliceDriverAction = qt.QAction()\n resliceDriverAction.setIcon(qt.QIcon(qt.QPixmap(self.resourcePath('Icons/VolumeResliceDriver.png'))))\n resliceDriverAction.setCheckable(True)\n self.ui.resliceDriverToolButton.setDefaultAction(resliceDriverAction)\n self.ui.resliceDriverToolButton.connect(\"toggled(bool)\", self.setDefaultResliceDriver)\n self.ui.resliceDriverToolButton.setFixedSize(buttonSize, buttonSize)\n\n importFromActionGroup = qt.QActionGroup(self.ui.importFromToolButton)\n importFromOptions = [os.path.basename(g).replace('.py','') for g in glob.glob(os.path.join(os.path.dirname(__file__), 'StereotacticPlanLib', 'ImportFrom', 'Import_From_*.py'))]\n for option in importFromOptions:\n importAction = qt.QAction(option.replace('_',' '), self.ui.importFromToolButton)\n importAction.triggered.connect(lambda b,o=option: self.importTrajectoryFrom(o))\n importFromActionGroup.addAction(importAction)\n importFromMenu = qt.QMenu(self.ui.importFromToolButton)\n importFromMenu.addActions(importFromActionGroup.actions())\n self.ui.importFromToolButton.setMenu(importFromMenu)\n self.ui.importFromToolButton.setFixedSize(buttonSize, buttonSize)\n\n # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\n uiWidget.setMRMLScene(slicer.mrmlScene)\n\n # Create logic class. Logic implements all computations that should be possible to run\n # in batch mode, without a graphical user interface.\n self.logic = StereotacticPlanLogic()\n\n # Connections\n\n # These connections ensure that we update parameter node when scene is closed\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)\n\n # These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene\n # (in the selected parameter node).\n self.ui.trajectoryTransformNodeComboBox.connect(\"currentNodeChanged(vtkMRMLNode*)\", lambda n,w=self.ui.resliceDriverToolButton: self.setDefaultResliceDriver(w.checked))\n self.ui.trajectoryTransformNodeComboBox.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updatePreviewLineTransform)\n self.ui.trajectoryTransformNodeComboBox.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateCurrentTrajectory)\n self.ui.referenceToFrameTransformNodeComboBox.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n self.ui.referenceVolumeNodeComboBox.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n self.ui.trajectoryModeComboBox.connect(\"currentTextChanged(QString)\", self.updateParameterNodeFromGUI)\n self.ui.arcAngleSliderWidget.connect(\"valueChanged(double)\", self.updateParameterNodeFromGUI)\n self.ui.ringAngleSliderWidget.connect(\"valueChanged(double)\", self.updateParameterNodeFromGUI)\n self.ui.rollAngleSliderWidget.connect(\"valueChanged(double)\", self.updateParameterNodeFromGUI)\n self.ui.mountingComboBox.currentIndexChanged.connect(self.updateParameterNodeFromGUI)\n self.ui.referenceToFrameModeComboBox.currentIndexChanged.connect(self.updateParameterNodeFromGUI)\n\n # Buttons\n self.ui.calculateReferenceToFramePushButton.connect('clicked(bool)', self.onCalculateReferenceToFrame)\n\n # Make sure parameter node is initialized (needed for module reload)\n self.initializeParameterNode()", "title": "" }, { "docid": "4279bdbd3e2473dd13b05eba1e43074a", "score": "0.49648798", "text": "def common_buttons(self):\r\n self.common_btn_size = (278,26)\r\n self.check_btn = mc.button(\r\n label = 'Check Selected',\r\n width = self.common_btn_size[0],\r\n height = self.common_btn_size[1], \r\n command = self.check_btn_cmd\r\n )\r\n self.move_btn = mc.button(\r\n label = 'Check And Move Selected',\r\n width = self.common_btn_size[0],\r\n height = self.common_btn_size[1],\r\n command = self.move_btn_cmd\r\n ) \r\n mc.formLayout(\r\n self.main_form,\r\n e= True,\r\n attachForm = (\r\n [self.check_btn,'left',5],\r\n [self.check_btn,'bottom',10],\r\n [self.move_btn,'bottom',40],\r\n [self.move_btn,'left',5],\r\n \r\n ),\r\n attachPosition = (\r\n [self.check_btn,'right',0,99], \r\n [self.move_btn,'right',0,99],\r\n )\r\n )", "title": "" }, { "docid": "0d50f10d3eb3c7285561a827beee1b5b", "score": "0.4958221", "text": "def set_up_panes(self):\n self.player_pane = BoxLayout(orientation='vertical', size_hint=(.7, 1))\n with self.player_pane.canvas.before:\n Color(0, 1, 0, 1)\n self.player_pane.rect = Rectangle(size=self.player_pane.size, pos=self.player_pane.pos)\n self.info_pane = BoxLayout(orientation='vertical', size_hint=(.3, 1))\n with self.info_pane.canvas.before:\n Color(0, 1, 1, 1)\n self.info_pane.rect = Rectangle(size=self.info_pane.size, pos=self.info_pane.pos)\n self.add_widget(self.player_pane)\n self.add_widget(self.info_pane)\n self.player_pane.bind(pos=InfoForm.update_rect, size=InfoForm.update_rect)\n self.info_pane.bind(pos=InfoForm.update_rect, size=InfoForm.update_rect)", "title": "" }, { "docid": "395f60e4eabebd7842d8ca1dc8b4d9a7", "score": "0.49571973", "text": "def updateLayout(self):\r\n# state = self.get_layout_state()\r\n# if self.__layout_state is not None and\\\r\n# state == self.__layout_state:\r\n# return\r\n# self.__layout_state = state\r\n\r\n self.__data.layout.activate(self, self.contentsRect())\r\n \r\n titleRect = self.__data.layout.titleRect().toRect()\r\n footerRect = self.__data.layout.footerRect().toRect()\r\n scaleRect = [self.__data.layout.scaleRect(axisId).toRect()\r\n for axisId in self.validAxes]\r\n legendRect = self.__data.layout.legendRect().toRect()\r\n canvasRect = self.__data.layout.canvasRect().toRect()\r\n \r\n if self.__data.titleLabel.text():\r\n self.__data.titleLabel.setGeometry(titleRect)\r\n if not self.__data.titleLabel.isVisibleTo(self):\r\n self.__data.titleLabel.show()\r\n else:\r\n self.__data.titleLabel.hide()\r\n\r\n if self.__data.footerLabel.text():\r\n self.__data.footerLabel.setGeometry(footerRect)\r\n if not self.__data.footerLabel.isVisibleTo(self):\r\n self.__data.footerLabel.show()\r\n else:\r\n self.__data.footerLabel.hide()\r\n \r\n for axisId in self.validAxes:\r\n if self.axisEnabled(axisId):\r\n self.axisWidget(axisId).setGeometry(scaleRect[axisId])\r\n \r\n if axisId in (self.xBottom, self.xTop):\r\n r = QRegion(scaleRect[axisId])\r\n if self.axisEnabled(self.yLeft):\r\n r = r.subtracted(QRegion(scaleRect[self.yLeft]))\r\n if self.axisEnabled(self.yRight):\r\n r = r.subtracted(QRegion(scaleRect[self.yRight]))\r\n r.translate(-scaleRect[axisId].x(), -scaleRect[axisId].y())\r\n \r\n self.axisWidget(axisId).setMask(r)\r\n \r\n if not self.axisWidget(axisId).isVisibleTo(self):\r\n self.axisWidget(axisId).show()\r\n \r\n else:\r\n self.axisWidget(axisId).hide()\r\n \r\n if self.__data.legend:\r\n if self.__data.legend.isEmpty():\r\n self.__data.legend.hide()\r\n else:\r\n self.__data.legend.setGeometry(legendRect)\r\n self.__data.legend.show()\r\n \r\n self.__data.canvas.setGeometry(canvasRect)", "title": "" }, { "docid": "d412db9c96683b2eb3aa3728c5decb0d", "score": "0.49532402", "text": "def produce(self, layout, layers, parameters, cell):\n self._layers = layers\n self.cell = cell\n self._param_values = parameters\n self.layout = layout\n shapes = self.cell.shapes\n \n \n # cell: layout cell to place the layout\n # LayerSiN: which layer to use\n # w: waveguide width\n # length units in dbu\n \n # fetch the parameters\n dbu = self.layout.dbu\n ly = self.layout\n \n LayerSi = ly.layer(self.silayer)\n LayerSip = ly.layer(self.siplayer)\n LayerPinRecN = ly.layer(self.pinrec)\n LayerDevRecN = ly.layer(self.devrec)\n \n # Top PSR Wdiths\n w1 = int(round(self.w_tin/dbu))\n w2 = int(round(self.w_twmid/dbu))\n w7 = int(round(self.w_tsmid/dbu))\n w3 = int(round(self.w_tc/dbu))\n w4 = int(round(self.w_out/dbu))\n \n # SWG Widths\n self.w_swg_mid = (self.w_tc+self.w_swg_in) - self.w_out\n \n w5 = int(round(self.w_swg_in/dbu))\n w6 = int(round(self.w_swg_mid/dbu))\n \n la = int(round(self.la/dbu))\n lb = int(round(self.lb/dbu))\n \n n_periods = int(self.lc/self.period)\n self.lc = self.period*n_periods\n lc = int(round(self.lc/dbu))\n ld = int(round(self.l_swg_out/dbu))\n \n g = int(round(self.g/dbu))\n period = int(round(self.period/dbu))\n ff = int(round(self.ff/dbu))\n \n ############# PSR TOP #############\n # Create a list of coordinates to draw for the top waveguide portion of PSR\n coords_PSR_top_wg = [[0,w1/2], [la, w2/2], [la+lb, w3/2], [la+lb+lc, w4/2], \n [la+lb+lc, -w4/2], [la+lb, -w3/2], [la, -w2/2], [0, -w1/2]]\n \n # Draw the top waveguide portion of the PSR\n pts = []\n for xy in coords_PSR_top_wg:\n pts.append(Point(xy[0], xy[1]))\n shapes(LayerSi).insert(Polygon(pts))\n \n # Create list of coordinates for slab waveguide\n coords_PSR_top_slab = [[0,w1/2], [la, w7/2], [la+lb, w3/2],\n [la+lb, -w3/2], [la, -w7/2], [0, -w1/2]]\n \n # Draw the slab waveguide portion of the PSR\n pts = []\n for xy in coords_PSR_top_slab:\n pts.append(Point(xy[0], xy[1]))\n shapes(LayerSip).insert(Polygon(pts))\n \n ############# PSR BOTTOM #############\n slope_coupling = (self.w_swg_mid/2 - self.w_swg_in/2)/self.lc\n \n # Create list of coordinates to draw for the SWG coupler\n coords_swg = []\n for i in range(0, n_periods+1):\n # Create top portion of SWG\n x = self.la + self.lb + i*self.period\n y1 = -self.w_tc/2 - self.g + i*self.period*slope_coupling\n y2 = -self.w_tc/2 - self.g\n coords_swg.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n if i < n_periods:\n x = self.la + self.lb + (i*self.period) + self.period*(1-self.ff)\n y1 = -self.w_tc/2 - self.g\n y2 = y1 + ((i*self.period) + self.period*(1-self.ff))*slope_coupling\n coords_swg.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n # Create sloping out SWG into single waveguide\n slope_swg_out = (self.w_swg_mid/2 - self.w_out/2) / self.l_swg_out\n slope_wg_out = (self.w_out/2 - self.w_swg_in/2) / self.l_swg_out\n i = 0\n while i < int(round(self.l_swg_out / self.period)):\n x = self.la + self.lb + self.lc + (i*self.period) + self.period*(1-self.ff)\n y1 = -self.w_tc/2 - self.g + ((i*self.period) + self.period*(1-self.ff))*slope_wg_out\n y2 = -self.w_out/2 - self.g - ((i*self.period) + self.period*(1-self.ff))*slope_swg_out\n coords_swg.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n x = self.la + self.lb + self.lc + (i+1)*self.period\n y1 = -self.w_out/2 - self.g - (i+1)*self.period*slope_swg_out\n y2 = -self.w_tc/2 - self.g + (i+1)*self.period*slope_wg_out\n coords_swg.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg.append([int(round(x/dbu)),int(round(y2/dbu))])\n\n i = i+1\n \n \n coords_swg_bot = []\n for i in range(0, n_periods+1):\n # Create bottom portion of SWG\n x = self.la + self.lb + i*self.period\n y1 = -self.w_tc/2 - self.g - self.w_swg_in - i*self.period*slope_coupling\n y2 = -self.w_tc/2 - self.g - self.w_swg_in\n coords_swg_bot.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg_bot.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n if i < n_periods:\n x = self.la + self.lb + (i*self.period) + self.period*(1-self.ff)\n y1 = -self.w_tc/2 - self.g - self.w_swg_in\n y2 = y1 - ((i*self.period) + self.period*(1-self.ff))*slope_coupling\n coords_swg_bot.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg_bot.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n i = 0\n while i < int(round(self.l_swg_out / self.period)):\n x = self.la + self.lb + self.lc + (i*self.period) + self.period*(1-self.ff)\n y1 = -self.w_tc/2 - self.g - self.w_swg_in - ((i*self.period) + self.period*(1-self.ff))*slope_wg_out\n y2 = -self.w_out/2 - self.g - self.w_swg_mid + ((i*self.period) + self.period*(1-self.ff))*slope_swg_out\n coords_swg_bot.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg_bot.append([int(round(x/dbu)),int(round(y2/dbu))])\n \n x = self.la + self.lb + self.lc + (i+1)*self.period\n y1 = -self.w_out/2 - self.g - self.w_swg_mid + (i+1)*self.period*slope_swg_out\n y2 = -self.w_tc/2 - self.g - self.w_swg_in - (i+1)*self.period*slope_wg_out\n coords_swg_bot.append([int(round(x/dbu)),int(round(y1/dbu))])\n coords_swg_bot.append([int(round(x/dbu)),int(round(y2/dbu))])\n\n i = i+1\n \n # Append bottom portion of SWG from end\n coords_swg_bot.reverse()\n for coord in coords_swg_bot:\n coords_swg.append(coord)\n \n # Draw bottom portion of PSR\n pts = []\n for xy in coords_swg:\n pts.append(Point(xy[0], xy[1]))\n shapes(LayerSi).insert(Polygon(pts))\n \n # Create the pins on the waveguides, as short paths:\n from SiEPIC._globals import PIN_LENGTH as pin_length\n \n # Pin on the left side:\n p1 = [Point(pin_length/2,0), Point(-pin_length/2,0)]\n p1c = Point(0,0)\n self.set_p1 = p1c\n self.p1 = p1c\n pin = Path(p1, w1)\n shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, 0, 0)\n text = Text (\"pin1\", t)\n shape = shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n \n # Pin on the top right side:\n p2 = [Point(la+lb+lc-pin_length/2, 0), Point(la+lb+lc+pin_length/2, 0)]\n p2c = Point(la+lb+lc, 0)\n self.set_p2 = p2c\n self.p2 = p2c\n pin = Path(p2, w4)\n shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, la+lb+lc, 0)\n text = Text (\"pin2\", t)\n shape = shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n shape.text_halign = 2\n \n # Pin on the bottom right side:\n p3 = [Point(la+lb+lc+ld-pin_length/2, -w4/2-g-w6/2), Point(la+lb+lc+ld+pin_length/2,-w4/2-g-w6/2)]\n p3c = Point(la+lb+lc+ld, -w4/2-g-w6/2)\n self.set_p3 = p3c\n self.p3 = p3c\n pin = Path(p3, w4)\n shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, la+lb+lc+ld, -w4/2-g-w6/2)\n text = Text (\"pin3\", t)\n shape = shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n shape.text_halign = 2\n \n # Create the device recognition layer\n # First find the bounds of the device\n upper_bound = coords_PSR_top_wg[0][1]\n for coord in coords_PSR_top_wg:\n if coord[1] > upper_bound:\n upper_bound = coord[1]\n for coord in coords_PSR_top_slab:\n if coord[1] > upper_bound:\n upper_bound = coord[1]\n \n lower_bound = coords_swg[0][1]\n for coord in coords_swg:\n if coord[1] < lower_bound:\n lower_bound = coord[1]\n right_bound = la+lb+lc+ld\n left_bound = 0\n \n devrec_box = Box(Point(left_bound, lower_bound), Point(right_bound, upper_bound))\n shapes(LayerDevRecN).insert(devrec_box)\n \n psr_params = ('%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f-%.3f' % \n (self.w_tin,self.w_twmid,self.w_tsmid, self.w_tc,\n self.la, self.lb, self.w_out, self.lc, self.g,\n self.w_swg_in, self.w_swg_mid, self.l_swg_out,\n self.period, self.ff) )\n \n return \"PSR_Slab(\" + psr_params + \")\"", "title": "" }, { "docid": "9fce8abf22b716f04c4c01c134f32dab", "score": "0.49502122", "text": "def draw_symops_param(cell_param_list,sg,radius=0.2,extension=0):\n radius=float(radius)\n extension=float(extension)\n\n U=uctbx.unit_cell((cell_param_list))\n\n#rotation axes\n# \"2\" \"yellow\",\n# \"3\" \"orange\",\n# \"4\" \"mauve\",\n# \"6\" \"purple\",\n\n#screw axes (all sub_1 axes are green)\n# \"21\" \"green\",\n# \"31\" \"green\",\n# \"32\" \"lime\",\n# \"41\" \"green\",\n# \"42\" \"cyan\",\n# \"43\" \"iceblue\",\n# \"61\" \"green\",\n# \"62\" \"silver\",\n# \"63\" \"cyan\",\n# \"64\" \"iceblue\",\n# \"65\" \"blue\",\n\n color = {\n \"2\" : [1.0, 1.0, 0.0],\n \"3\" : [1.0, 0.5, 0.0],\n \"4\" : [1.0, 0.5, 1.0],\n \"6\" : [1.0, 0.0, 1.0],\n \"2^1\" : [0.0, 1.0, 0.0],\n \"3^1\" : [0.0, 1.0, 0.0],\n \"3^2\" : [0.5, 1.0, 0.5],\n \"4^1\" : [0.0, 1.0, 0.0],\n \"4^2\" : [0.0, 1.0, 1.0],\n \"4^3\" : [0.5, 0.5, 1.0],\n \"6^1\" : [0.0, 1.0, 0.0],\n \"6^2\" : [0.8, 0.8, 0.8],\n \"6^3\" : [0.0, 1.0, 1.0],\n \"6^4\" : [0.5, 0.5, 1.0],\n \"6^5\" : [0.0, 0.0, 1.0],\n }\n\n sg = sg.upper()\n symop_axes = get_all_axes(sg,extension=extension)\n\n #CYLINDER = 'CYLINDER'\n ax_obj = {}\n #vert_obj = []\n\n #debug_out = open('debug.log','w')\n\n if symop_axes:\n for ax in symop_axes:\n #print ax\n start = list(map(set_to_zero,U.orthogonalize(list(ax['start']))))\n end = list(map(set_to_zero,U.orthogonalize(list(ax['end']))))\n###############################################################################\n# Tried rounding off start and end values in order to understand why axes go\n# missing in the drawing, but seem to be present in the cgo. Doesn't help!\n# e.g. for space group 'p23' one of the 3-fold rotations is missing (0,0,0 -> x,-x,x)\n# changing one cell axis to something ever so slightly different recovers the axis\n# e.g. set cell to be (30.00001,30.,30.,90.,90.,90) and it works!\n# start = map(lambda x: round(x,3),U.orthogonalize(ax['start']))\n# end = map(lambda x: round(x,3),U.orthogonalize(ax['end']))\n###############################################################################\n symb_ax = ax['symb']\n color_ax = color[symb_ax]\n\n #print \"axis: \",symb_ax, start, end\n if symb_ax in ax_obj:\n ax_obj[symb_ax].append(CYLINDER)\n else:\n ax_obj[symb_ax] = [CYLINDER]\n\n ax_obj[symb_ax].extend(start + end + [radius])\n ax_obj[symb_ax].extend(color[symb_ax] + color[symb_ax])\n ax_obj[symb_ax].extend(draw_symbol(start,end,symb_ax,color[symb_ax],radius*6.))\n\n# #######################################################################################\n# # Debugging output to try to understand why some axes go missing in the drawing.\n# # They don't appear to be missing from the cgo object, though!\n# for xxx in ax_obj[symb_ax]:\n# if xxx == 9.0:\n# #print \"\\n\\n\",xxx\n# xxx = \"\\n\\n\" + str(xxx) + \" \"\n# debug_out.write(xxx)\n# else:\n# #print xxx\n# #xxx = \"\\n\" + str(xxx) + \" \"\n# xxx = str(xxx) + \" \"\n# debug_out.write(xxx)\n# #print ax_obj[symb_ax]\n# debug_out.write(\"\\n\\n\")\n# big_string = str(ax_obj)\n# debug_out.write(big_string)\n# # End of debugging output\n# #######################################################################################\n\n else:\n print(\"\\nNo symmetry axes found for this space group: %s\\n\" % sg)\n\n for key,val in ax_obj.items():\n name=sg + \"_\" + key\n cmd.load_cgo(val,name)\n #debug_out.write(\"\\n\\n\" + key + \"\\n\" + str(val))\n #return ax_obj", "title": "" }, { "docid": "3f6f54ffe55a8192c44aed66766424ec", "score": "0.49455887", "text": "def setup_controls(self):\n buttons = [[('Auto levels', self.do_autolevels),\n ('Levels1', self.do_levels_test),\n ('Oldie', self.do_levels_old),\n ('Clip', self.do_levels_clip)],\n [('Blur uniform', self.do_blur),\n ('Blur gaussian', self.do_blur_gaussian),\n ('Sharpen', self.do_sharpen),\n ('Unsharp', self.do_unsharp),\n ('Edge detection', self.do_edges)],\n [('Apply', self.apply),\n ('Reset', self.reset),\n ('Save', self.save),\n ('Quit', self.quit)]]\n max_cols = max(len(row) for row in buttons)\n padding = 0.01\n start, end = 0.03, 0.97\n hspace = (end - start) / max_cols\n row_height = 0.03\n vspace = row_height + 0.003\n for j, row in enumerate(buttons):\n for i, b in enumerate(row):\n box = [start + i*hspace, 0.01 + j*vspace,\n hspace - padding, row_height]\n print box\n axis = self.fig.add_axes(box)\n\n button = Button(axis, b[0])\n self.axes.append(axis)\n\n button.on_clicked(b[1])\n self.buttons.append(button)", "title": "" }, { "docid": "a38ed49cab554fbfb024de864bfde3de", "score": "0.4944384", "text": "def setupShipDesign(self, compDict, weapDict, name):\n self.clearMyText()\n self.componentdata = self.mode.game.componentdata\n self.weapondata = self.mode.game.weapondata\n self.myShipDesign = self.mode.game.getShipDesign('1', self.id, compDict, weapDict, name)\n self.mode.designName = self.getShipDesignName()\n self.createQuads()\n self.createWeaponList()\n self.createComponentList()\n self.createDesignInfo()\n self.createDesignNameEntry()\n self.createDesignSubmit()", "title": "" }, { "docid": "ec753f1e0881ce109c11d13466208d21", "score": "0.494378", "text": "def setup_containers(self):\n self.containers[\"main_frame\"].grid(row=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"MAIN_FRAME\"][\"row\"],\n column=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"MAIN_FRAME\"][\"column\"],\n sticky=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"MAIN_FRAME\"][\"sticky\"])\n self.containers[\"panel_frame\"].grid(row=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"PANEL_FRAME\"][\"row\"],\n column=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"PANEL_FRAME\"][\"column\"])\n self.containers[\"order_frame\"].grid(row=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_FRAME\"][\"row\"],\n column=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_FRAME\"][\"column\"],\n sticky=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_FRAME\"][\"sticky\"])\n self.containers[\"order_frame\"].grid_rowconfigure(0, weight=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_FRAME\"][\n \"weight\"])\n self.containers[\"order_frame\"].grid_columnconfigure(0, weight=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_FRAME\"][\n \"weight\"])\n self.containers[\"order_canvas\"].grid(row=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_CANVAS\"][\"row\"],\n column=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_CANVAS\"][\"column\"],\n sticky=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDER_CANVAS\"][\"sticky\"])\n self.containers[\"orders_container\"].grid(row=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDERS_CONTAINER\"][\"row\"],\n column=const.ARCHIVE_WIDGET_CONFIG_VALUES[\"ORDERS_CONTAINER\"][\n \"column\"])", "title": "" }, { "docid": "1a528d36b6836426c4be268ad5e8ac32", "score": "0.49401894", "text": "def divide_into_panels():\n pass", "title": "" }, { "docid": "9649e3a6ce6a8df136fb60c6adc49a5e", "score": "0.49373874", "text": "def add_control(self):\n start_button = tk.Button(self.control_frame, text = \"Start\")\n start_button.grid(row=1, column=1)\n pause_button = tk.Button(self.control_frame, text = \"Pause\")\n pause_button.grid(row=1, column=2)\n step_button = tk.Button(self.control_frame, text = \"Step\")\n step_button.grid(row=1, column=3)\n step_speed_slider = tk.Scale(self.control_frame, from_=1, to=10, \n label = \"Step Speed\", showvalue=0, orient=tk.HORIZONTAL)\n step_speed_slider.grid(row=1, column=4)\n reset_button = tk.Button(self.control_frame, text = \"Reset\")\n reset_button.grid(row=1, column=5)\n quit_button = tk.Button(self.control_frame, text = \"Quit\")\n quit_button.grid(row=1, column=6)\n \n # Checkbox variable\n self.chkbox = tk.BooleanVar()\n self.chkbox.set(False)\n wraparound_checkbox = tk.Checkbutton(self.control_frame, text = \"Wraparound\", var = self.chkbox)\n wraparound_checkbox.grid(row = 1, column = 7)\n\n\n # Vertically center the controls in the control frame\n self.control_frame.grid_rowconfigure(1, weight = 1) \n\n # Horizontally center the controls in the control frame\n self.control_frame.grid_columnconfigure(0, weight = 1)\n self.control_frame.grid_columnconfigure(1, weight = 1)\n self.control_frame.grid_columnconfigure(2, weight = 1)\n self.control_frame.grid_columnconfigure(3, weight = 1)\n self.control_frame.grid_columnconfigure(4, weight = 1)\n self.control_frame.grid_columnconfigure(5, weight = 1) \n self.control_frame.grid_columnconfigure(6, weight = 1)\n self.control_frame.grid_columnconfigure(7, weight = 1) \n \n return (start_button, pause_button, step_button, step_speed_slider, \n reset_button, quit_button, wraparound_checkbox)", "title": "" }, { "docid": "2f777e6339e7839563fafe6c96f242b7", "score": "0.4935982", "text": "def addObjects(self):\n\n if self.settings[\"neutralRotation\"]:\n t = transform.getTransformFromPos(self.guide.pos[\"root\"])\n else:\n t = self.guide.tra[\"root\"]\n if self.settings[\"mirrorBehaviour\"] and self.negate:\n scl = [1, 1, -1]\n else:\n scl = [1, 1, 1]\n t = transform.setMatrixScale(t, scl)\n\n self.ik_cns = primitive.addTransform(\n self.root, self.getName(\"ik_cns\"), t)\n\n self.SDKctl = self.addCtl(self.ik_cns,\n \"SDK_ctl\",\n t,\n self.color_ik,\n \"cube\",\n w=self.settings[\"ctlSize\"] * self.size * 1.35,\n h=self.settings[\"ctlSize\"] * self.size * 0.75,\n d=self.settings[\"ctlSize\"] * self.size * 0.75,\n tp=self.parentCtlTag)\n\n self.ctl = self.addCtl(self.SDKctl,\n \"ctl\",\n t,\n self.color_fk,\n \"sphere\",\n w=self.settings[\"ctlSize\"] * self.size,\n h=self.settings[\"ctlSize\"] * self.size,\n d=self.settings[\"ctlSize\"] * self.size,\n tp=self.parentCtlTag)\n\n # we need to set the rotation order before lock any rotation axis\n if self.settings[\"k_ro\"]:\n rotOderList = [\"XYZ\", \"YZX\", \"ZXY\", \"XZY\", \"YXZ\", \"ZYX\"]\n attribute.setRotOrder(\n self.ctl, rotOderList[self.settings[\"default_rotorder\"]])\n attribute.setRotOrder(\n self.SDKctl, rotOderList[self.settings[\"default_rotorder\"]])\n\n params = [s for s in\n [\"tx\", \"ty\", \"tz\", \"ro\", \"rx\", \"ry\", \"rz\", \"sx\", \"sy\", \"sz\"]\n if self.settings[\"k_\" + s]]\n attribute.setKeyableAttributes(self.ctl, params)\n attribute.setKeyableAttributes(self.SDKctl, params)\n\n if self.settings[\"joint\"]:\n self.jnt_pos.append([self.ctl, 0, None, self.settings[\"uniScale\"]])", "title": "" }, { "docid": "7fe60cc96d79b8501686521ad20f1d5c", "score": "0.49352285", "text": "def add_ui_components(self):\n\n\t# Creating the frames.\n self.sub_frame1 = Frame(self)\n self.sub_frame1.grid(column=0, row=0)\n sub_frame2 = Frame(self)\n sub_frame2.grid(column=0, row=1)\n sub_frame3 = Frame(self)\n sub_frame3.grid(column=0, row=2)\n sub_frame21 = Frame(sub_frame2)\n sub_frame21.grid(column=0, row=0)\n sub_frame22 = Frame(sub_frame2)\n sub_frame22.grid(padx=20, column=1, row=0) \n sub_frame221 = Frame(sub_frame22)\n sub_frame221.grid(row=1, column=0)\n\n\n\t# Creating the top-menu buttons.\n # Creating the preprocessing button.\n self.preprocess_button = Button(self.sub_frame1, text=\"Preprocess\", command=self.preprocess)\n self.preprocess_button.grid(row=0, column=1)\n\n # Creating the segmentation button.\n self.segment_button = Button(self.sub_frame1,\n text=\"Segment\",\n state=DISABLED,\n command=self.segment)\n self.segment_button.grid(row=0, column=2)\n\n # Creating the visualization button.\n self.visualise_button = Button(self.sub_frame1,\n text=\"Visualize\",\n command=self.start_visualisation)\n self.visualise_button.grid(row=0, column=3)\n\n # Creating the help button.\n self.help_button = Button(self.sub_frame1,\n text=\"Help\",\n command=self.open_help)\n self.help_button.grid(row=0, column=4)\n\n # Checks if we have modalities imported.\n if len(self.mod_paths) == 0:\n self.preprocess_button['state'] = 'disabled'\n self.visualise_button['state'] = 'disabled'\n\n\n\t# Creating the select modality path.\n self.modality_label = Label(sub_frame21,\n text=\"Modality Paths\",\n relief=FLAT)\n\n self.modality_label.grid(row=1, column=1)\n self.modality_path_entry = Entry(sub_frame21)\n self.modality_path_entry.grid(row=2, column=1)\n \n self.segmentation_label = Label(sub_frame21, text=\"Segmentation Path(Optional)\", relief=FLAT) \n self.segmentation_path_entry = Entry(sub_frame21)\n self.segmentation_path_button = Button(sub_frame21, text=\"Choose\", \n command= lambda: self.choose_path(self.segmentation_path_entry))\n\n self.supervoxel_label = Label(sub_frame21, text=\"Supervoxel Path(Optional)\", relief=FLAT)\n self.supervoxel_path_entry = Entry(sub_frame21)\n self.supervoxel_path_button = Button(sub_frame21, text=\"Choose\", \n command= lambda: self.choose_path(self.supervoxel_path_entry))\n\n self.segmentation_label.grid(row=6,column=1)\n self.segmentation_path_entry.grid(row=7,column=1)\n self.segmentation_path_button.grid(row=7,column=2)\n \n self.supervoxel_label.grid(row=8, column=1)\n self.supervoxel_path_entry.grid(row=9, column=1)\n self.supervoxel_path_button.grid(row=9, column=2)\n\n self.modality_path_button = Button(sub_frame21,\n text=\"Choose\",\n command=self.choose_directory_and_import)\n\n self.modality_path_button.grid(row=2, column=2)\n\n\n\t# Creating the patients listbox.\n self.label_patients = Label(sub_frame22, text=\"Patients\")\n self.label_patients.grid(row=0, column=0)\n\n self.listbox_patients = Listbox(sub_frame221,\n selectmode='multiple',\n width=35,\n height=10)\n\n self.listbox_patients.pack(side=LEFT, fill=Y)\n self.listbox_patients.bind(\"<Button-1>\", self.listbox_changed)\n\n self.scrollbar = Scrollbar(sub_frame221)\n self.scrollbar.pack(side=RIGHT, fill=Y)\n \n # Attach listbox to scrollbar.\n self.listbox_patients.config(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.listbox_patients.yview)\n\n # Creating the status console.\n self.status_text = Text(sub_frame3, height=5)\n self.status_text.grid(column=0, row=0)\n self.status_text.tag_configure('title', justify='center', font=\"Arial 10 bold\")\n self.status_text.tag_configure('entry', justify='left', font=\"Arial 9\")\n self.status_text.insert(END, 'Status Console', 'title')\n self.status_text_entry_number = 1\n self.status_text.configure(state='disabled')\n\n self.status_text.pack(side=LEFT, fill=Y)\n self.scrollbar_status = Scrollbar(sub_frame3)\n self.scrollbar_status.pack(side=RIGHT, fill=Y)\n \n # Attach status_text to scrollbar.\n self.status_text.config(yscrollcommand=self.scrollbar_status.set)\n self.scrollbar_status.config(command=self.status_text.yview)\n\n self.segmentation_label['state'] = 'disabled'\n self.segmentation_path_entry['state'] = 'disabled'\n self.segmentation_path_button['state'] = 'disabled'\n self.supervoxel_label['state'] = 'disabled'\n self.supervoxel_path_entry['state'] = 'disabled'\n self.supervoxel_path_button['state'] = 'disabled'", "title": "" }, { "docid": "dd2c1a9ff1663fe954279e0a966df2cf", "score": "0.4930684", "text": "def auto_fill_the_form(self):\n self.x1_form.insert(0, self.x1)\n self.y1_form.insert(0, self.y1)\n self.x2_form.insert(0, self.x2)\n self.y2_form.insert(0, self.y2)", "title": "" }, { "docid": "f34ed883a925a933d064842c9e144857", "score": "0.4923422", "text": "def initUI(self):\n grid = QGridLayout()\n grid.addWidget(QLabel('Both'), 0, 1)\n both = QCheckBox()\n grid.addWidget(both, 0, 0)\n grid.addWidget(QLabel('Horizontal'), 0, 3)\n horizontal = QCheckBox()\n grid.addWidget(horizontal, 0, 2)\n grid.addWidget(QLabel('Vertical'), 0, 5)\n vertical = QCheckBox()\n grid.addWidget(vertical, 0, 4)\n \"\"\"both.setChecked(True)\"\"\"\n self.direction = 'none'\n choosesobel = QButtonGroup(self)\n choosesobel.addButton(both)\n choosesobel.addButton(horizontal)\n choosesobel.addButton(vertical)\n both.stateChanged.connect(self.checked_both)\n horizontal.stateChanged.connect(self.checked_horizontal)\n vertical.stateChanged.connect(self.checked_vertical)\n self.show()\n box = QDialogButtonBox()\n box.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n box.accepted.connect(self.accept)\n box.rejected.connect(self.close)\n grid.addWidget(box, 10, 0, -1, -1)\n box.setWindowTitle(\"Sobel filter\")\n self.setLayout(grid)", "title": "" }, { "docid": "407397436445910cbc045e0febfb5699", "score": "0.49177462", "text": "def add_shape_row(self,i):\n layout = self.gridLayout_13\n \n nlabel = QtWidgets.QLabel('%i' % i)\n bg = QtWidgets.QButtonGroup(self)\n hexbox = QtWidgets.QRadioButton('')\n hexbox.setChecked(True)\n rodbox = QtWidgets.QRadioButton('')\n bg.addButton(hexbox)\n bg.addButton(rodbox)\n #self.b1.toggled.connect(lambda:self.btnstate(self.b1))\n #self.b2.toggled.connect(lambda:self.btnstate(self.b2))\n \n self.includebox = QtWidgets.QCheckBox('')\n self.includebox.setChecked(True)\n \n layout.addWidget(nlabel)\n layout.addWidget(hexbox)\n layout.addWidget(rodbox)\n layout.addWidget(self.includebox)", "title": "" }, { "docid": "50e873cd5a2a4b70c2238144054e31fc", "score": "0.4915381", "text": "def generate_rows_and_columns(self):\n\n print(\"Grouping keys in rows and columns ... \")\n\n # For each key in the board, determine the X,Y of the center of the key. This determines\n # the row/column a key is in\n keysInRow = [0] * MAX_ROWS\n for index, key in enumerate(self.keyboard.keys):\n centery = key.y_unit\n row = math.floor(centery)\n\n if row > MAX_ROWS-1:\n exit(\"ERROR: Key placement produced too many rows. klepcbgen currently cannot generate a valid KiCad project for this keyboard layout.\\nExiting ...\")\n\n self.keyboard.add_key_to_row(row, index)\n self.keyboard.keys[index].row = row\n\n col = keysInRow[row]\n keysInRow[row] += 1\n \n if col > MAX_COLS-1:\n exit(\"ERROR: Key placement produced too many columns. klepcbgen currently cannot generate a valid KiCad project for this keyboard layout.\\nExiting ...\")\n\n self.keyboard.add_key_to_col(col, index)\n self.keyboard.keys[index].col = col", "title": "" }, { "docid": "cac55d3e613a7b8101ae340f01a1ab66", "score": "0.4914786", "text": "def main_screen(self):\r\n self.operations = 0\r\n def on_start_click():\r\n # when the user clicks the start button activate the on_start function to continue\r\n self.on_start()\r\n\r\n def alg_selected(index, value, op):\r\n # show a brief description when a sorting alg has been selected from the combobox\r\n information = [sort for sort in sorts if self.combobox_v.get().lower() in sort.lower()][\r\n 0] # if eg 'Bubble Sort' is in the sort description then choose that one. There are no duplicates of let's say 2 'Bubble Sort' in 2 descriptions\r\n self.alg_information.config(text=information)\r\n\r\n clear_screen(self.root) # clean the screen from other layouts\r\n\r\n header = tkinter.Label(text=\"Sorting Algorithms Visualization\", font=(\"Aria;\", 48, \"bold\"), background=\"#023047\", fg=\"white\")\r\n alg_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0)\r\n round_rectangle(0, 0, 425, 120, alg_canvas, radius=25, fill=\"#a2d2ff\")\r\n algorithms = (\"Bubble Sort\", \"Selection Sort\", \"Insertion Sort\", \"Merge Sort\", \"Quick Sort\", \"Bogo Sort\", \"Heap Sort\")\r\n self.combobox_v = tkinter.StringVar()\r\n self.combobox_v.trace(\"w\", alg_selected)\r\n sorting_alg_label = tkinter.Label(self.root, text=\"Sorting algorithm:\", font=(\"Aria;\", 28), bg=\"#a2d2ff\")\r\n self.sorting_alg = ttk.Combobox(self.root, values=algorithms, textvar=self.combobox_v, font=(\"Aria;\", 24), state=\"readonly\", width=16)\r\n header.pack(fill=tkinter.X)\r\n alg_canvas.place(x=65, y=130, w=425)\r\n sorting_alg_label.place(x=126, y=140)\r\n self.sorting_alg.place(x=120, y=195)\r\n\r\n information_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0, width=2000, height=1000)\r\n round_rectangle(0, 0, 1255, 380, information_canvas, radius=25, fill=\"#a2d2ff\")\r\n self.alg_information = tkinter.Label(information_canvas, text=\"\", font=(\"Aria;\", 28), wraplength=1240, justify=\"left\", bg=\"#a2d2ff\")\r\n information_canvas.place(x=75, y=325)\r\n self.alg_information.place(x=15, y=15)\r\n self.sorting_alg.current(0) # set the algorithm combobox default to bubble sort. This will also trigger the alg_selected function since we are tracing the self.combobox_v StringVar() and will put the description of bubblesort on the screen\r\n\r\n size_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0, height=150)\r\n round_rectangle(0, 0, 325, 120, size_canvas, fill=\"#a2d2ff\")\r\n array_size_label = tkinter.Label(text=\"Array size:\", font=(\"Aria;\", 28), bg=\"#a2d2ff\")\r\n self.array_size = ttk.Combobox(values=(\"Miniature\", \"Small\", \"Average\", \"Big\"), state=\"readonly\", font=(\"Aria;\", 24), width=10)\r\n self.array_size.current(0) # set the current item to the first item\r\n size_canvas.place(x=540, y=130, w=425)\r\n array_size_label.place(x=615, y=140)\r\n self.array_size.place(x=600, y=195)\r\n\r\n speed_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0, height=180)\r\n round_rectangle(0, 0, 425, 120, speed_canvas, fill=\"#a2d2ff\")\r\n sorting_speed_label = tkinter.Label(text=\"Animation speed:\", font=(\"Aria;\", 28), bg=\"#a2d2ff\")\r\n self.sorting_speed = ttk.Combobox(values=(\"Study\", \"Slow\", \"Fast\"), state=\"readonly\", font=(\"Aria;\", 24), width=15)\r\n self.sorting_speed.current(0) # set the current item to the first item\r\n speed_canvas.place(x=915, y=130, w=425)\r\n sorting_speed_label.place(x=975, y=140)\r\n self.sorting_speed.place(x=970, y=195)\r\n\r\n # use a canvas as a 'button' and set up event handlers for click press and release\r\n start_canvas = tkinter.Canvas(self.root, bg=\"#8ecae6\", highlightthickness=0)\r\n round_rectangle(0, 0, 170, 50, start_canvas, fill=\"#a2d2ff\")\r\n start_canvas.create_text(85, 25, fill=\"black\", font=(\"Aria;\", 20, \"bold\"), text=\"Visualize\")\r\n start_canvas.bind(\"<ButtonRelease-1>\", lambda e: on_start_click())\r\n start_canvas.place(x=1200, y=825, w=200, h=50)\r\n\r\n self.root.mainloop()", "title": "" }, { "docid": "0cb74de145dc51bf8e64d4e01c72e89b", "score": "0.49095875", "text": "def layout(self,obj):\n if self._objects == []:\n # This is the first object to be packed. Align it with\n # this empty box.\n bottom = self.bottom()\n left = self.left()\n else:\n # Align the new object with the last object that was\n # packed.\n last = self._objects[-1]\n bottom_left,top_right = last.getTightBounds()\n bottom = bottom_left.getZ()\n left = bottom_left.getX()\n\n # Align the top of the new object with `bottom`.\n bottom_left,top_right = obj.getTightBounds() \n top = top_right.getZ()\n distance = bottom - top\n obj.setPos(obj.getPos() + Point3(0,0,distance))\n\n # Align the left of the new object with `left`.\n l = bottom_left.getX()\n distance = left - l\n obj.setPos(obj.getPos() + Point3(distance,0,0))\n \n obj.setPos(obj.getPos() + Point3(0,0,-self.margin))", "title": "" }, { "docid": "aedb348c07d681372b29edcdefefce86", "score": "0.49092022", "text": "def create_widgets(self):\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self.create_content_frame()\n self.content_frame.grid(row=0, column=0,\n sticky=(tk.W, tk.N, tk.E, tk.S))\n self.create_control_frame()\n self.control_frame.grid(row=1, column=0,\n sticky=(tk.W, tk.N, tk.E, tk.S))", "title": "" }, { "docid": "59760bf8b98c2cd39c5542b6dffc67a6", "score": "0.4909181", "text": "def __layoutKarPage(self):\n \n settings = self.KaraokeMgr.SongDB.Settings\n\n panel = wx.Panel(self.notebook)\n karsizer = wx.BoxSizer(wx.VERTICAL)\n \n hsizer = wx.BoxSizer(wx.HORIZONTAL)\n text = wx.StaticText(panel, -1, \"Encoding:\")\n hsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n self.KarEncoding = wx.ComboBox(\n panel, -1, value = settings.KarEncoding,\n choices = settings.Encodings)\n hsizer.Add(self.KarEncoding, flag = wx.EXPAND, proportion = 1)\n karsizer.Add(hsizer, flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border = 10)\n\n hsizer = wx.BoxSizer(wx.HORIZONTAL)\n text = wx.StaticText(panel, -1, \"Font:\")\n hsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n self.KarFont = copy.copy(settings.KarFont)\n self.KarFontLabel = wx.StaticText(panel, -1, self.KarFont.getDescription())\n # Make sure the label has enough space to include big font names.\n w, h = self.KarFontLabel.GetSize()\n self.KarFontLabel.SetMinSize((max(w, 100), h))\n hsizer.Add(self.KarFontLabel, flag = wx.ALIGN_CENTER_VERTICAL, proportion = 1)\n b = wx.Button(panel, -1, 'Select')\n self.Bind(wx.EVT_BUTTON, self.clickedFontSelect, b)\n hsizer.Add(b, flag = wx.EXPAND | wx.LEFT, border = 10)\n b = wx.Button(panel, -1, 'Browse')\n self.Bind(wx.EVT_BUTTON, self.clickedFontBrowse, b)\n hsizer.Add(b, flag = wx.EXPAND | wx.LEFT, border = 10)\n\n karsizer.Add(hsizer, flag = wx.EXPAND | wx.ALL, border = 10)\n\n gsizer = wx.FlexGridSizer(0, 2, 2, 0)\n gsizer.AddGrowableCol(1, 1)\n text = wx.StaticText(panel, -1, \"MIDI Sample rate:\")\n gsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n self.MIDISampleRate = wx.ComboBox(\n panel, -1, value = str(settings.MIDISampleRate),\n choices = map(str, settings.SampleRates))\n gsizer.Add(self.MIDISampleRate, flag = wx.EXPAND)\n karsizer.Add(gsizer, flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 10)\n \n\n self.Colours = {}\n self.ColourSamples = {}\n gsizer = wx.FlexGridSizer(0, 3, 2, 0)\n gsizer.AddGrowableCol(1, 1)\n for attribName in ['Ready', 'Sweep', 'Info', 'Title', 'Background']:\n text = wx.StaticText(panel, -1, \"%s colour:\" % (attribName))\n gsizer.Add(text, flag = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border = 5)\n colour = getattr(settings, 'Kar%sColour' % (attribName))\n sample = wx.Panel(panel)\n sample.SetSize((50, 10))\n sample.SetBackgroundColour(colour)\n gsizer.Add(sample, flag = wx.EXPAND, proportion = 1)\n b = wx.Button(panel, -1, 'Select')\n self.Bind(wx.EVT_BUTTON, lambda evt, attribName = attribName: self.clickedColourSelect(attribName), b)\n gsizer.Add(b, flag = wx.EXPAND | wx.LEFT, border = 10)\n\n self.Colours[attribName] = colour\n self.ColourSamples[attribName] = sample\n karsizer.Add(gsizer, flag = wx.EXPAND | wx.ALL, border = 10)\n\n panel.SetSizer(karsizer)\n self.notebook.AddPage(panel, 'Kar (MIDI)')", "title": "" }, { "docid": "e78fe4c23fbdf4cc4f8e97687eb06841", "score": "0.49032086", "text": "def _layout_slit_size(self):\n slit_size_txt = wx.StaticText(self, -1, 'Slit Size (FWHM/2): ')\n self.slit_size_tcl = InterActiveOutputTextCtrl(self, -1,\n size=(_BOX_WIDTH, -1))\n slit_size_hint = \" Estimated full slit size\"\n self.slit_size_tcl.SetToolTipString(slit_size_hint)\n slit_size_unit_txt = wx.StaticText(self, -1, 'Unit: ')\n self.slit_size_unit_tcl = OutputTextCtrl(self, -1,\n size=(_BOX_WIDTH, -1))\n slit_size_unit_hint = \"Full slit size's unit\"\n self.slit_size_unit_tcl.SetToolTipString(slit_size_unit_hint)\n self.slit_size_sizer.AddMany([(slit_size_txt, 0, wx.LEFT, 15),\n (self.slit_size_tcl, 0, wx.LEFT, 10),\n (slit_size_unit_txt, 0, wx.LEFT, 10),\n (self.slit_size_unit_tcl, 0, wx.LEFT, 10)])", "title": "" }, { "docid": "1b207bc70ea57af888899af7bd5d4bf4", "score": "0.48973712", "text": "def setupShipDesign(self, compDict, weapDict, name):\n self.clearMyText()\n self.componentdata = self.mode.game.componentdata\n self.weapondata = self.mode.game.weapondata\n self.myShipDesign = self.mode.game.getDroneDesign('1', self.id, compDict, weapDict, name)\n self.mode.designName = self.getShipDesignName()\n self.createQuads()\n self.createWeaponList()\n self.createComponentList()\n self.createDesignInfo()\n self.createDesignNameEntry()\n self.createDesignSubmit()", "title": "" }, { "docid": "0f286bcec6aa78a5c24e31cf030f1ad3", "score": "0.4896608", "text": "def create_widgets(self):\n\n # Frames for each supply\n self.com_frame = tk.LabelFrame(self.root, text='Com Port')\n self.left_frame = tk.LabelFrame(self.root, text='Left Supply', bg='green')\n self.right_frame = tk.LabelFrame(self.root, text='Right Supply', bg='red')\n self.both_frame = tk.LabelFrame(self.root, text='Synchronus On/Off')\n self.cycle_frame = tk.LabelFrame(self.root, text = 'Power Cycle')\n\n self.com_frame.pack(side='top')\n self.left_frame.pack(side='left', pady=5)\n self.right_frame.pack(side='right', pady=5)\n self.both_frame.pack(side='top')\n self.cycle_frame.pack(side='bottom')\n\n # Getting com ports to chose from\n self.com_menu = tk.OptionMenu(self.com_frame, self.ser_port, *psu_data.get_comports())\n self.com_menu.pack(side='top')\n\n # Voltage Reading and Setting\n self.left_voltframe = tk.LabelFrame(self.left_frame, text='Volt Amp')\n self.right_voltframe = tk.LabelFrame(self.right_frame, text='Volt Amp')\n self.left_voltframe.pack(side='top')\n self.right_voltframe.pack(side='top')\n\n # Left side voltage and amperage\n self.left_volt = tk.Entry(self.left_voltframe, width=4)\n self.left_amp = tk.Entry(self.left_voltframe, width=4)\n self.left_set = tk.Button(self.left_voltframe, text='SET', command=lambda: self.set_values('1'))\n tk.Label(self.left_voltframe, text='V\\nA').pack(side='left')\n self.left_volt.pack(side='top')\n self.left_set.pack(side='right')\n self.left_amp.pack()\n\n # Right side voltage and amperage\n self.right_volt = tk.Entry(self.right_voltframe, width=4)\n self.right_amp = tk.Entry(self.right_voltframe, width=4)\n self.right_set = tk.Button(self.right_voltframe, text='SET', command=lambda: self.set_values('2'))\n tk.Label(self.right_voltframe, text='V\\nA').pack(side='left')\n self.right_volt.pack(side='top')\n self.right_set.pack(side='right')\n self.right_amp.pack()\n\n # Buttons to turn on/off\n # lambda is needed to pass argument to function\n self.one_on_button = tk.Button(self.left_frame, text='On', command=lambda: self.turn_on('1'))\n self.one_on_button.pack()\n self.two_on_button = tk.Button(self.right_frame, text='On', command=lambda: self.turn_on('2'))\n self.two_on_button.pack()\n self.off_one = tk.Button(self.left_frame, text='Off', command=lambda: self.turn_off('1'))\n self.off_one.pack(side='bottom')\n self.off_two = tk.Button(self.right_frame, text='Off', command=lambda: self.turn_off('2'))\n self.off_two.pack(side='bottom')\n\n #Synch buttons\n self.dual_button_on = tk.Button(self.both_frame, text='Both On', command=lambda: self.dual_power('1'))\n self.dual_button_off = tk.Button(self.both_frame, text='Both Off', command=lambda: self.dual_power('0'))\n self.dual_button_off.pack(side='right')\n self.dual_button_on.pack(side='left')\n\n #Power cycling buttons\n self.cycle_time = tk.Entry(self.cycle_frame, width=4)\n tk.Label(self.cycle_frame, text = 'Time Off').pack(side='left')\n self.cycle_time.pack(side='right')\n self.cycle_button_l = tk.Button(self.cycle_frame, text='Left')\n self.cycle_button_r = tk.Button(self.cycle_frame, text='Right')\n\n self.cycle_button_l.anchor()\n self.cycle_button_r.anchor()", "title": "" }, { "docid": "559c5eb01a1dccf2016d98bdd100c884", "score": "0.48950267", "text": "def _define_structure(self):\n self.main_sizer = wx.BoxSizer(wx.VERTICAL)\n self.box_source = wx.StaticBox(self, -1, str(\"Slit Size Calculator\"))\n self.boxsizer_source = wx.StaticBoxSizer(self.box_source,\n wx.VERTICAL)\n self.data_name_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.slit_size_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.hint_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)", "title": "" }, { "docid": "de8cf28f56f469284ed165989b46183f", "score": "0.48879674", "text": "def create_keys(ap_settings, screen, stats):\r\n\t#create the group of keys\r\n\tkeys_to_make = ap_settings.key_map\r\n\tpiano_keys = []\r\n\ttotal_width = 0\r\n\tfor key, value in keys_to_make.items():\r\n\t\tpiano_key = PianoKey(ap_settings, screen, value, key)\r\n\t\tpiano_keys.append(piano_key)\r\n\t\ttotal_width = total_width + piano_key.width \r\n\t\t\r\n\t#find center of screen\r\n\tcenter_screen = ap_settings.screen_width / 2\r\n\t\t\r\n\t# display the keys like a keyboard\r\n\t#define the x starting value\r\n\tx = center_screen - (total_width / 2)\r\n\t#define top y value\r\n\ty = stats.button_height_max * 7\r\n\tfor piano_key in piano_keys:\r\n\t\tpiano_key.update_location(x, y)\r\n\t\tx = x + piano_key.width \r\n\t\t\r\n\treturn piano_keys", "title": "" } ]
2a7f0a4955e3eb8db210fc2e3e607aef
Load the latest model and run a test episode
[ { "docid": "6d34a9cac0e3ab9c1237a1141c59f02f", "score": "0.0", "text": "def eval(dqn):\n ckpt_file = os.path.join(os.path.dirname(__file__), 'models/checkpoint')\n with open(ckpt_file, 'r') as f:\n first_line = f.readline()\n model_name = first_line.split()[-1].strip(\"\\\"\")\n dqn.saver.restore(dqn.sess, os.path.join(os.path.dirname(__file__), 'models/'+model_name))\n dqn.eval(save_snapshot=False)", "title": "" } ]
[ { "docid": "212b86f1917cde8b4574004fc1c33626", "score": "0.649339", "text": "def run_model(self):\n self.set_up_run_model()", "title": "" }, { "docid": "4b489c45200b9cf8f036daec4304dea6", "score": "0.64562917", "text": "def run_load_model_single(ind):\n global BEST_LOSS,BEST_EPOCH_IND\n ep_hist_train = {}\n ep_hist_val = {}\n model,vars_dict = load_model(model_path)\n print(vars_dict)\n BEST_LOSS = vars_dict[\"loss\"]\n BEST_EPOCH_IND = vars_dict[\"ep_ind\"]\n start_ep = vars_dict[\"epoch\"] + 1\n opt = optimizers.Adam(lr=LEARNING_RATE)\n # model.compile(optimizer=rms, loss=[\"categorical_crossentropy\", \"categorical_crossentropy\",\"categorical_crossentropy\", \"categorical_crossentropy\",\"categorical_crossentropy\"], metrics=['accuracy'])\n model.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=['accuracy'])\n generator = DataGeneratorCelebA(resolution, bulk_size)\n\n print(\"Starting loaded model at epoch[\",str(start_ep),\"]\",\" with best loss: \", str(BEST_LOSS))\n for e in range(start_ep,n_epochs):\n print(\"epoch %d\" % e)\n train_epoch_single(model, generator, e, ep_hist_train, ind)\n # Validing epoch\n validate_epoch_single(model, generator, e, ep_hist_val, ind)", "title": "" }, { "docid": "93b325860eda32706d1d6b043ee70583", "score": "0.6422174", "text": "def run_load_model_virtual():\n global BEST_LOSS, BEST_EPOCH_IND\n ep_hist_train = {}\n ep_hist_val = {}\n model,vars_dict = load_model(model_path)\n print(vars_dict)\n BEST_LOSS = vars_dict[\"loss\"]\n BEST_EPOCH_IND = vars_dict[\"ep_ind\"]\n start_ep = vars_dict[\"epoch\"] + 1\n opt = optimizers.Adam(lr=LEARNING_RATE)\n model.compile(optimizer=opt,loss= \"categorical_crossentropy\", metrics=['accuracy'])\n generator = DataGeneratorCelebAVirtual(resolution, bulk_size)\n\n print(\"Starting loaded model at epoch[\",str(start_ep),\"]\",\" with best loss: \", str(BEST_LOSS))\n for e in range(start_ep,n_epochs):\n print(\"epoch %d\" % e)\n # Training\n train_epoch(model, generator, e, ep_hist_train)\n # Validating\n validate_epoch(model, generator, e, ep_hist_val)", "title": "" }, { "docid": "fa7294977160681ce64bc4ef981fa672", "score": "0.6357844", "text": "def train(self):\n if self.episode is not None:\n self._process_episode()", "title": "" }, { "docid": "238e8419447d0f0c741c9bd889f6994b", "score": "0.635024", "text": "def test_02_load(self):\n\n ## train the model\n all_data, all_models = model_load()\n model = all_models['united_kingdom']\n self.assertTrue('predict' in dir(model))\n self.assertTrue('fit' in dir(model))", "title": "" }, { "docid": "b91c27c9fc82de5524246b6632d02f00", "score": "0.63486576", "text": "def run_load_model():\n global BEST_LOSS,BEST_EPOCH_IND\n ep_hist_train = {}\n ep_hist_val = {}\n model,vars_dict = load_model(model_path)\n print(vars_dict)\n BEST_LOSS = vars_dict[\"loss\"]\n BEST_EPOCH_IND = vars_dict[\"ep_ind\"]\n start_ep = vars_dict[\"epoch\"] + 1\n opt = optimizers.Adam(lr=LEARNING_RATE)\n # model.compile(optimizer=rms, loss=[\"categorical_crossentropy\", \"categorical_crossentropy\",\"categorical_crossentropy\", \"categorical_crossentropy\",\"categorical_crossentropy\"], metrics=['accuracy'])\n # model.compile(optimizer=opt,loss= \"categorical_crossentropy\", metrics=['accuracy'])\n model.compile(optimizer=opt,loss=masked_loss_function, metrics=['accuracy'])\n generator = DataGeneratorCelebASparse(resolution, bulk_size)\n\n print(\"Starting loaded model at epoch[\",str(start_ep),\"]\",\" with best loss: \", str(BEST_LOSS))\n for e in range(start_ep,n_epochs):\n print(\"epoch %d\" % e)\n # Training\n train_epoch(model, generator, e, ep_hist_train)\n # Validating\n validate_epoch(model, generator, e, ep_hist_val)", "title": "" }, { "docid": "7f56c6a339dc13748e6434257124de29", "score": "0.6277077", "text": "def main():\r\n\r\n # print(\"AAAAAAA\")\r\n\r\n model, last_epoch = prepare_model(globals.model_save_folder)\r\n train(model, last_epoch, 1000)", "title": "" }, { "docid": "a3c6c72380cd95af043bf2523813d3a1", "score": "0.6251177", "text": "def test_3(self, tmpdir):\n self.setup_tmpdir(tmpdir)\n # command = \"edflow -b train.yaml -n test\"\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator_checkpoint_latest)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n command = [\n \"edflow\",\n \"-p\",\n os.path.join(\"logs\", \"trained_model\"),\n \"-b\",\n \"config.yaml\",\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n eval_dirs = os.listdir(os.path.join(tmpdir, \"logs\", \"trained_model\", \"eval\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, eval_dirs)))", "title": "" }, { "docid": "44f1748af38bef4bece0e332a64c0143", "score": "0.624881", "text": "def test_ae_jaguar():\n\n # Sample data\n df = jaguar()\n\n # Hyperparameters\n batch_size = 10\n num_past = 10\n num_future = 5\n # Prepare the dataloader\n data_loaders = dataset.MultiModalDataLoader(\n df,\n batch_size=batch_size,\n n_past=num_past,\n n_future=num_future,\n num_workers=1,\n train_split_ratio=0.5,\n validation_split_ratio=0.2,\n )\n\n model_save_path = \"./model.pt\"\n\n model = MultiModelAE(\n input_size=2,\n num_past=num_past,\n batch_size=batch_size,\n num_future=num_future,\n lstm_hidden_size=32,\n num_lstm_layers=2,\n output_size=2,\n latent_size=10,\n batch_first=True,\n dropout=0.1,\n reset_state=True,\n bidirectional=False,\n )\n\n # Model Trainer\n # Model types; \"ae\" or \"vae\"\n trainer = HybridTrainer(model=model, optimizer_type=\"Adam\", loss_type=\"huber\")\n\n # Train the model\n trainer.fit(data_loaders, model_save_path, epochs=1, training_mode=\"forecasting\", validate_every=2, test_every=5)\n trainer.fit(data_loaders, model_save_path, epochs=1, training_mode=\"forecasting\", validate_every=None, test_every=5)\n trainer.fit(data_loaders, model_save_path, epochs=1, training_mode=\"forecasting\", validate_every=2, test_every=None)\n\n trainer.validate(data_loaders[\"sequential_validation_loader\"])", "title": "" }, { "docid": "e52ed4db94c6eb33470643532d5836e8", "score": "0.62233424", "text": "def load_model(self):\n print(\"[*] Load models from {}...\".format(self.model_sub_path))\n\n paths = glob.glob(os.path.join(self.model_sub_path, 'Vrae_*_*.pth'))\n paths.sort()\n\n if len(paths) == 0:\n print(\"[!] No checkpoint found in {}...\".format(self.model_sub_path))\n return\n\n stepidxes = [int(os.path.basename(path.split('.')[-2].split('_')[-1])) for path in paths]\n\n step_index = stepidxes.index(max(stepidxes))\n\n self.start_step = stepidxes[step_index]\n\n self.start_epoch = int(os.path.basename(paths[step_index].split('.')[-2].split('_')[-2]))\n\n Vrae_filename = '{}/Vrae_{}_{}.pth'.format(self.model_sub_path, self.start_epoch, self.start_step)\n device = torch.device('cuda:0')\n self.load_state_dict(\n torch.load('{}/Vrae_{}_{}.pth'.format(self.model_sub_path, self.start_epoch, self.start_step),\n map_location=device))\n \"\"\"self.load_state_dict(\n torch.load('{}/Vrae_{}_{}.pth'.format(self.model_sub_path, self.start_epoch, self.start_step)))\n \"\"\"\n self.step._step = self.start_step\n self.step._epoch = self.start_epoch\n\n print(\"[*] Model loaded: {}\".format(Vrae_filename))", "title": "" }, { "docid": "88a65c97d664f60d0ea74249b2e31a35", "score": "0.6221978", "text": "def execute():\n # Load params from config\n with open(CONFIG_FILE, encoding='utf8') as yaml_file:\n config = yaml.load(yaml_file)\n init_experiment_folder(config)\n train(config)", "title": "" }, { "docid": "94fc49464e4c35dedf71e64771aa4249", "score": "0.6204537", "text": "def executeEpisode(iteration, load_model, checkpoint, checkpoint_filename, numMCTSSims, cpuct, dirichletAlpha, tempThreshold):\n game = SantoriniGame()\n\n nnet = nn(game)\n if load_model or iteration>1:\n nnet.load_checkpoint(folder=checkpoint, filename=checkpoint_filename)\n\n\n mcts_args = dotdict({'numMCTSSims': numMCTSSims, 'cpuct':cpuct, 'dirichletAlpha': dirichletAlpha})\n mcts = MCTS(game, nnet, mcts_args, dirichlet_noise=True)\n\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n board = game.getRandomSymmetry(board)\n canonicalBoard = game.getCanonicalForm(board,curPlayer)\n temp = int(episodeStep < tempThreshold)\n\n pi = mcts.getActionProb(canonicalBoard, temp=temp)\n sym = game.getSymmetries(canonicalBoard, pi)\n for b,p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer)\n\n if r!=0:\n return [(x[0],x[2],r*((-1)**(x[1]!=curPlayer))) for x in trainExamples]", "title": "" }, { "docid": "918a9787f4f10253791eab84406b1d76", "score": "0.6194004", "text": "def on_episode_begin(self, env, observation, epoch):", "title": "" }, { "docid": "a7a1ec446d72e52949a2436fe136a097", "score": "0.6184231", "text": "def main():\n model = testModel()\n model.play_game()", "title": "" }, { "docid": "60e8d6c7544bde839428f180909186bd", "score": "0.61834264", "text": "def test_02_load(self):\n \n ## train the model\n all_data, all_models = model_load()\n data = all_data['all']\n target_date = \"{}-{}-{}\".format('2018','05','01')\n self.assertTrue('all' in all_models.keys())\n self.assertTrue(target_date in data['dates'])", "title": "" }, { "docid": "54185b1b87405b6b709732fdac745830", "score": "0.61826116", "text": "def run_episode(name=None, epochs=100, board_height=4, board_width=4,\n verbose=False):\n\n # Episode parameters\n batch_size = 50\n\n # Stats directory\n stats_directory = \"stats\"\n\n # Build the game\n the_game = gaming.SeekGame(board_height, board_width)\n the_game.reset()\n\n # Build the model\n print(\"Building the model...\")\n model = ModelBuilder(name, the_game.width, the_game.height)\n model.load_weights()\n print(\"Model ready.\")\n\n # Build the memory\n exp_replay = ExperienceReplay()\n\n # Reset the winning count\n win_cnt = 0\n\n # Start time\n start_time = time.time()\n\n # Train loop (over the epochs)\n for e in range(epochs):\n loss = 0.0\n the_game.reset()\n game_over = False\n\n if verbose:\n print(\"--- Board representation:\")\n x = the_game.get_board()\n print(f\"{x}\")\n\n # Get initial input\n input_t = the_game.get_board_vector()\n\n while not game_over:\n # print(\"--- Board ---\")\n # x = the_game.get_board()\n # print(\"{}\".format(x))\n\n input_tm1 = input_t\n\n # Get the next action\n action = model.next_action(input_tm1)\n\n # apply action, get rewards and new state\n # Move the player on the board\n the_game.move(action)\n\n # Get the resulting board, reward and if the game is over\n input_t = the_game.get_board_vector()\n reward = the_game.get_reward()\n game_over = the_game.is_over()\n\n # Have we won?\n if reward >= 1.0:\n win_cnt += 1\n\n # Remember this movement\n exp_replay.remember([input_tm1, action, reward, input_t], game_over)\n\n # Adapt model: train a memory batch\n inputs, targets = exp_replay.get_batch(model.get_real_model(),\n batch_size=batch_size)\n loss += model.train_on_batch(inputs, targets)\n\n # End time\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n has_won = the_game.get_reward() >= 1.0\n\n # Just for testing...\n if verbose:\n if has_won:\n print(f\" WIN ({the_game.get_reward()})\")\n else:\n print(f\" LOSE ({the_game.get_reward()})\")\n\n # Episode summary\n print(f\"Epoch {(e+1):03d}/{epochs:03d} | Loss {loss:.4f} |\"\n f\" win={has_won:d} | Win count {win_cnt}\")\n\n print(\"----\")\n print(f\"Elapsed time {elapsed_time:.3f}\")\n\n # Save the model\n model.save_weights()\n\n # Directory\n if not os.path.exists(stats_directory):\n os.mkdir(stats_directory)\n\n if name:\n suffix = f\"-{name}\"\n else:\n suffix = \"\"\n\n statistics_file = os.path.join(stats_directory, f\"stats{suffix}.csv\")\n\n # Load previous data (or create an empty dataset)\n if os.path.exists(statistics_file):\n with open(statistics_file) as s_f:\n data = s_f.readlines()\n else:\n data = [f\"Episode,Epochs,WinCount,WinPct,Loss,Time\\n\"]\n\n # Creates a new stat entry\n data.append(f\"{len(data):d},{epochs:d},{win_cnt:d},\"\n f\"{(win_cnt / epochs):.3f},{loss:.6f},{elapsed_time:.3f}\\n\")\n\n # Save the data\n with open(statistics_file, \"w\") as s_f:\n s_f.writelines(data)", "title": "" }, { "docid": "aef444b813a92dc15cc549b70a7e0ecf", "score": "0.61704814", "text": "def test_6(self, tmpdir):\n self.setup_tmpdir(tmpdir)\n # command = \"edflow -b train.yaml -n test\"\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator_no_checkpoint)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n config[\"eval_all\"] = True\n config[\"eval_forever\"] = False\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n command = [\n \"edflow\",\n \"-p\",\n os.path.join(\"logs\", \"trained_model\"),\n \"-b\",\n \"config.yaml\",\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n eval_dirs = os.listdir(os.path.join(tmpdir, \"logs\", \"trained_model\", \"eval\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, eval_dirs)))", "title": "" }, { "docid": "809d6553015273cd065e562dee723ba2", "score": "0.6163192", "text": "def play(self) -> None:\n assert self.trained, 'Call model.fit() before model.play()'\n self.eval(episodes=5, render=True)", "title": "" }, { "docid": "a1c02ad036c6f38246fb2c7e49bb41df", "score": "0.6148606", "text": "def test_5(self, tmpdir):\n self.setup_tmpdir(tmpdir)\n # command = \"edflow -b train.yaml -n test\"\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator_no_checkpoint)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n config[\"eval_all\"] = True\n config[\"eval_forever\"] = False\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n command = [\n \"edflow\",\n \"-p\",\n os.path.join(\"logs\", \"trained_model\"),\n \"-b\",\n \"config.yaml\",\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n eval_dirs = os.listdir(os.path.join(tmpdir, \"logs\", \"trained_model\", \"eval\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, eval_dirs)))", "title": "" }, { "docid": "8b71643c6415302a1809e2aeba306fab", "score": "0.61459017", "text": "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial()\r\n # Load standard (IK) model\r\n self.loadStandardModel()\r\n # Update model visualizations\r\n self.updateStandardModelVisuals()\r\n # Load IK motion to IK model\r\n self.loadIKMotion()\r\n # Add adjusted COM (RRA) model\r\n self.loadAdjustedModel()\r\n # Load RRA motion to RRA model\r\n self.loadRRAMotion()\r\n # Manually:\r\n # Reset model offset to zero\r\n # Associate GRF data with RRA data\r\n # Sync motions of IK and RRA data\r", "title": "" }, { "docid": "9740b325af3d499c74d69793330be0f7", "score": "0.6141965", "text": "def load_saved_model(self):\n rospack = rospkg.RosPack()\n dirc = os.path.join(rospack.get_path(\"aer1217_ardrone_simulator\"), \"DNN/model.json\")\n # load json and create model\n json_file = open(dirc, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.model = model_from_json(loaded_model_json)\n # load weights into new model\n dirc = os.path.join(rospack.get_path(\"aer1217_ardrone_simulator\"), \"DNN/model.h5\")\n self.model.load_weights(dirc)\n print(\"Loaded model from disk\")\n \n # evaluate loaded model on test data\n self.model.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics=['acc', 'mae'])", "title": "" }, { "docid": "f735ca4953b9f9e613a2d22ebb13ba23", "score": "0.6137001", "text": "def run_training():", "title": "" }, { "docid": "e2a9eadad3588778068ca5cb06dde2ed", "score": "0.61279404", "text": "def demonstrate(model, env, nb_steps, data_filepath, nb_max_start_steps=4, start_step_policy=None, verbose=1):\n if not model.compiled:\n raise RuntimeError('Your tried to test your agent but it hasn\\'t been compiled yet. Please call `compile()` before `test()`.')\n model.training = False\n model.step = 0\n\n #matrix of cumulative demo data.\n transitions = []\n\n #Start a new episode as long as we haven't hit the step limit.\n steps = 0\n while steps < nb_steps:\n model.reset_states()\n observation = deepcopy(env.reset())\n if model.processor is not None:\n observation = model.processor.process_observation(observation)\n assert observation is not None\n # Perform random starts at beginning of episode and do not record them into the demo.\n #This gives the set some variety, even in simple envs like cartpole.\n nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)\n for _ in range(nb_random_start_steps):\n if start_step_policy is None:\n action = env.action_space.sample()\n else:\n action = start_step_policy(observation)\n if model.processor is not None:\n action = model.processor.process_action(action)\n observation, r, done, info = env.step(action)\n observation = deepcopy(observation)\n if model.processor is not None:\n observation, r, done, info = model.processor.process_step(observation, r, done, info)\n if done:\n warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))\n observation = deepcopy(env.reset())\n if model.processor is not None:\n observation = model.processor.process_observation(observation)\n break\n # Run the episode until we're done.\n done = False\n while not done:\n transition = [observation]\n action = model.forward(observation)\n if model.processor is not None:\n action = model.processor.process_action(action)\n transition.append(action)\n reward = 0.\n accumulated_info = {}\n observation, r, d, info = env.step(action)\n #We add unprocessed reward to demoset, letting us use different reward clipping for the student model.\n transition.append(r)\n transition.append(d)\n if len(transitions) < nb_steps:\n transitions.append(transition)\n observation = deepcopy(observation)\n if model.processor is not None:\n #now we process the reward\n observation, r, d, info = model.processor.process_step(observation, r, d, info)\n if d:\n done = True\n model.backward(reward, terminal=done)\n steps += 1\n model.step += 1\n\n model.forward(observation)\n model.backward(0., terminal=False)\n\n model._on_test_end()\n\n data_matrix = np.array(transitions)\n np.save(data_filepath, data_matrix)", "title": "" }, { "docid": "e7614d10251cd1f8231dfdcd82eb801b", "score": "0.61098677", "text": "def train(self, load_ctrl=None, save_ctrl=None):\n config = self.config\n lr_ctrl = config.lr_ctrl\n lr_task = config.lr_task\n model_ctrl = self.model_ctrl\n model_task = self.model_task\n # The bigger the performance is, the better.\n best_performance = -1e10\n # Record the number of latest episodes without a better result\n endurance = 0\n\n # ----Initialize controllor.----\n model_ctrl.initialize_weights()\n if load_ctrl:\n model_ctrl.load_model(load_ctrl)\n\n # ----Start episodes.----\n for ep in range(config.total_episodes):\n logger.info('=================')\n logger.info('episodes: {}'.format(ep))\n\n # ----Initialize task model.----\n model_task.initialize_weights()\n model_task.reset()\n\n state = model_task.get_state()\n transitions = []\n\n step = -1\n # ----Running one episode.----\n logger.info('TRAINING TASK MODEL...')\n while True:\n step += 1\n action = model_ctrl.sample(state)\n state_new, reward, dead = model_task.response(action)\n extra_info = model_task.extra_info\n # ----Record training details.----\n transition = {'state': state,\n 'action': action,\n 'reward': reward,\n 'extra_info': extra_info}\n #print(transition)\n transitions.append(transition)\n\n state = state_new\n if dead:\n break\n\n # Reinitialize the controller if the output of the controller\n # collapse to one specific action.\n if model_task.collapse:\n model_ctrl.initialize_weights()\n\n # ----Use the best model to get inception score on a\n # larger number of samples to reduce the variance of reward----\n if args.task_name == 'gan' and model_task.checkpoint_dir:\n model_task.load_model(model_task.checkpoint_dir)\n # TODO use hyperparameter\n inps_test = model_task.get_inception_score(5000)\n logger.info('inps_test: {}'.format(inps_test))\n model_task.update_inception_score(inps_test[0])\n\n # ----Update the controller.----\n # We actually use the advantage as the final reward\n _, adv = model_task.get_final_reward()\n discount_rewards(transitions, adv)\n\n if ep % config.update_frequency_ctrl == (config.update_frequency_ctrl - 1):\n logger.info('UPDATING CONTROLLOR...')\n model_ctrl.train_one_step(transitions, lr_ctrl)\n logger.info('adv: {}'.format(adv))\n model_ctrl.print_weights()\n\n\n # Printing training results and saving model_ctrl\n save_model_flag = False\n if model_task.best_performance > best_performance:\n best_performance = model_task.best_performance\n save_model_flag = True\n endurance = 0\n else:\n endurance += 1\n logger.info('----LOG FOR EPISODE {}----'.format(ep))\n logger.info('Performance in this episode: {}'.\\\n format(model_task.best_performance))\n logger.info('Best performance till now : {}'.\\\n format(best_performance))\n\n if args.task_name == 'reg':\n loss_analyzer_reg(transitions)\n elif args.task_name == 'gan':\n loss_analyzer_gan(transitions)\n elif args.task_name == 'cls':\n loss_analyzer_reg(transitions)\n\n logger.info('--------------------------')\n\n if save_model_flag and save_ctrl:\n model_ctrl.save_model(ep)\n print(endurance)\n if endurance > config.max_endurance_ctrl:\n break", "title": "" }, { "docid": "9eefeaf9141745cbfc14fbe4e1d8eb91", "score": "0.610838", "text": "def train_test_model(model, env, seed, total_num_episodes, total_learning_timesteps=10_000):\n\n #reproduce training and test\n print ('-' * 80)\n obs = env.reset(seed=seed)\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n \n vec_env = None\n\n if model is not None:\n print(f'model {type(model)}')\n print(f'policy {type(model.policy)}')\n #print(f'model.learn(): {total_learning_timesteps} timesteps ...')\n\n #custom callback for 'progress_bar'\n model.learn(total_timesteps=total_learning_timesteps, callback=ProgressBarCallback(100))\n #model.learn(total_timesteps=total_learning_timesteps, progress_bar=True)\n # ImportError: You must install tqdm and rich in order to use the progress bar callback. \n # It is included if you install stable-baselines with the extra packages: `pip install stable-baselines3[extra]`\n\n vec_env = model.get_env()\n obs = vec_env.reset()\n else:\n print (\"RANDOM actions\")\n\n reward_over_episodes = []\n\n tbar = tqdm(range(total_num_episodes))\n\n for episode in tbar:\n \n if vec_env: \n obs = vec_env.reset()\n else:\n obs, info = env.reset()\n\n total_reward = 0\n done = False\n while not done:\n\n if model is not None:\n action, _states = model.predict(obs)\n obs, reward, done, info = vec_env.step(action)\n else: #random\n action = env.action_space.sample()\n obs, reward, terminated, truncated, info = env.step(action)\n done = terminated or truncated \n\n total_reward += reward\n if done: break\n \n reward_over_episodes.append(total_reward)\n\n if episode % 10 == 0:\n avg_reward = np.mean(reward_over_episodes)\n tbar.set_description(f'Episode: {episode}, Avg. Reward: {avg_reward:.3f}')\n tbar.update()\n\n tbar.close()\n avg_reward = np.mean(reward_over_episodes)\n \n return reward_over_episodes", "title": "" }, { "docid": "92e1e69386ae5207f5e565b59b3d108e", "score": "0.6066776", "text": "def train_episode(self):\n\n # Reset the environment\n state = self.env.reset()\n nA = self.env.action_space.n\n alpha = self.options.alpha\n gamma = self.options.gamma\n policy = self.make_epsilon_greedy_policy()\n replay_memory_size = self.options.replay_memory_size\n batch_size = self.options.batch_size\n epsilon = self.options.epsilon\n input_size = self.options.input_size\n\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n zerocount = 0\n for step in range(self.options.steps):\n #while True:\n self.steps += 1\n if self.steps % self.options.update_target_estimator_every == 0:\n self.update_model = True\n action_probs = policy(state)\n\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n # action = 1\n next_state, reward, done, _ = self.step(action)\n # ______________________________________________________________\n self.env.render()\n if reward == 0:\n zerocount += 1\n else:\n zerocount = 0\n if zerocount > self.options.skipzerocount:\n reward = self.options.skipzerocount - zerocount\n \n continue\n self.D = self.D[1:]\n self.D.append((state, action , reward, next_state, done))\n batch = random.sample(self.D, batch_size)\n random.shuffle(batch)\n batch = np.array(batch)\n next_state_arr = np.array([list(x) for x in batch[:, 3]])\n state_arr = np.array([list(x) for x in batch[:, 0]])\n action_arr = np.array([int(x) for x in batch[:, 1]])\n reward_arr = batch[:, 2]\n done_arr = batch[:, 4]\n y_arr = self.model.predict(state_arr)\n\n not_done_arr = 1 - done_arr\n next_action_values = self.target_model.predict(next_state_arr)\n next_action_arr = np.argmax(next_action_values, axis=1)\n \n # single line code.\n # reward_arr += done_arr * gamma * np.multiply(next_action_values, np.eye(batch_size)[next_action_arr])\n \n # multi line of the above single line. To test properly\n next_state_best_action_value = next_action_values[np.arange(len(next_action_arr)), next_action_arr]\n additional_reward = np.multiply(not_done_arr, gamma * next_state_best_action_value)\n reward_arr += additional_reward\n y_arr[np.arange(len(action_arr)), action_arr] = reward_arr\n self.model.fit(state_arr, y_arr, verbose=0)\n\n # same as above but iteratively. so slow. (very very slow)\n # for (statej, actionj , rewardj, next_statej, donej, yj) in batch:\n # if not donej:\n # next_statej = np.array([next_statej])\n # next_state_values = self.target_model.predict(next_statej)[0]\n # best_next_action = np.argmax(next_state_values)\n # best_next_action_value = next_state_values[best_next_action]\n # rewardj += gamma * best_next_action_value\n # # statej_arr = np.array([statej])\n # # yj = self.model.predict(statej_arr)[0]\n # yj[actionj] = rewardj\n # x.append(statej)\n # y.append(yj)\n # y = np.array(y)\n # x = np.array(x)\n # self.model.fit(x, y, verbose=0)\n\n if self.update_model:\n print(\"Updating Model\")\n self.update_target_model()\n self.update_model = False\n print(f\"Step {step} completed with reward {reward} with action {action}\")\n state = next_state\n if done:\n break\n #print(\"Total Steps:\", self.steps)\n #raise NotImplementedError", "title": "" }, { "docid": "5d966f3c105d68b7a9260cc17115e6dc", "score": "0.6062088", "text": "def _run_training_step(self):\n pass", "title": "" }, { "docid": "48411d096dd25ab2aa38730f0cd6fa2d", "score": "0.6057587", "text": "def test_02_load(self): \n ## train the model\n model = model_load(os.path.join(os.path.dirname(os.path.dirname(__file__)),\"cs-train\"))\n \n self.assertIsNotNone(model)", "title": "" }, { "docid": "973b0187d17dde51ef6353372294d125", "score": "0.6045856", "text": "def show_episode(model, env, save_video=False, video_path=\"movie.mp4\", images_path=\"./video_images\", fps=15):\n model.cpu()\n model.eval()\n\n is_done = False\n obs = env.reset()\n total_reward = 0\n frame = 0\n images = []\n print(\"starting episode\")\n while (not is_done):\n obs = torch.FloatTensor(obs).unsqueeze(0)\n if save_video:\n data = env.render(mode='rgb_array')\n img = Image.fromarray(data, 'RGB')\n images.append(img)\n else:\n env.render()\n obs, reward, is_done, _ = env.step(model(obs).max(1)[1].item())\n total_reward+=reward\n frame=+1\n print(f\"total reward:{total_reward}\")\n if save_video:\n for i, img in enumerate(images):\n img.save(images_path+\"/image\"+str(i)+\".png\")\n save_videofile(images_path, video_path, fps=fps)\n print(f\"saved video as {video_path}\")", "title": "" }, { "docid": "026f9b82fabfb901a2f0c68fd8b07630", "score": "0.604179", "text": "def test_run_basic(self):\n folders = DummyTrainFolder(10)\n script = TrainScript.create(model=folders.model, folder=folders.root)\n script.run()\n assert isfile(folders.model)", "title": "" }, { "docid": "28baa070d4e02663294a2cb9a3bc323e", "score": "0.6035133", "text": "def train_episode(self):\n # start the environment with a seed from a list of seeds\n self.env.seed(np.random.randint(0, self.config['num_train_seeds']))\n self.reset_game()\n loss = 0\n while not self.done:\n # print(f\"calculating episode {self.episode_number}, step:{self.episode_step_number}\")\n self.episode_step()\n # self.update_next_state_reward_done_and_score()\n # TODO need to update time_to_learn to be based on number of batches\n if self.time_to_learn():\n loss = self.learn()\n self.state = self.next_state # this is to set the state for the next iteration\n self.episode_step_number += 1\n self.episode_number += 1\n self.total_rewards_window.append(sum(self.episode_rewards))", "title": "" }, { "docid": "c44e217a0849ba1cdb31e90246b6a6cc", "score": "0.6033965", "text": "def load_model(self):\n self.actor.load_checkpoint()\n self.critic.load_checkpoint()\n self.target_actor.load_checkpoint()\n self.target_critic.load_checkpoint()", "title": "" }, { "docid": "a7a6ba76fbf4db40ad961bdeb46998f5", "score": "0.60229707", "text": "def test_run_model(self, *args, **kwargs): # pragma: no cover\n pass", "title": "" }, { "docid": "8b5c9a7a68086e73617de4e3dbb76dfd", "score": "0.6008221", "text": "def run_episode(self):\n steps_since_last_feedback_publish = 0\n with torch.no_grad():\n while not self.is_episode_finished():\n self.update_brain()\n behavior_id = self.get_behavior()\n self.execute_behavior(behavior_id)\n\n # Publish feedback\n steps_since_last_feedback_publish += 1\n if steps_since_last_feedback_publish > self.feedback_frequency_in_steps:\n self.action_server_publish_feedback(behavior_id)\n steps_since_last_feedback_publish = 0\n\n self.rate.sleep()\n if self.is_episode_successful() is True:\n result = EpisodeResult.SUCCESS\n elif self.is_deviated() is True:\n result = EpisodeResult.DEVIATED\n elif self.is_stuck() is True:\n result = EpisodeResult.STUCK\n else:\n raise ValueError\n print(result)\n return result", "title": "" }, { "docid": "ea659879cc2ea8aba27e06a19c861fcc", "score": "0.5987981", "text": "def test(path):\n model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))\n env.reset()\n state = env.observation\n\n env.render()\n\n while True:\n action = select_action(np.array(state.state))\n action = env.robot.actions[action]\n\n env.robotStep(action[0], action[1])\n env.render()", "title": "" }, { "docid": "1d7eb6d99a7b39abf9cc19e773231e2f", "score": "0.59805864", "text": "def train(self, episode):\n raise NotImplementedError()", "title": "" }, { "docid": "65f87179765fd5020e0fa61062dca392", "score": "0.597208", "text": "def test_resume_checkpoint(self):\n import parlai.scripts.train_model as tms\n\n def get_popt_and_tl(opt):\n parser = tms.setup_args()\n parser.set_params(**opt)\n popt = parser.parse_args([])\n for k, v in opt.items():\n popt[k] = v\n return popt, tms.TrainLoop(popt)\n\n def get_opt(init_mf, mf):\n return {\n 'task': 'integration_tests',\n 'init_model': init_mf,\n 'model': 'parlai.agents.test_agents.test_agents:MockTorchAgent',\n 'model_file': mf,\n 'num_epochs': 3,\n 'validation_every_n_epochs': 1,\n 'save_after_valid': True,\n 'log_every_n_secs': 10,\n }\n\n with tempdir() as tmpdir:\n # First train model with init_model path set\n mf = os.path.join(tmpdir, 'model')\n init_mf = os.path.join(tmpdir, 'init_model')\n with open(init_mf, 'w') as f:\n f.write(' ')\n opt = get_opt(init_mf, mf)\n popt, tl = get_popt_and_tl(opt)\n agent = tl.agent\n # init model file should be set appropriately\n init_model_file, is_finetune = agent._get_init_model(popt, None)\n self.assertEqual(init_model_file, init_mf)\n self.assertTrue(is_finetune)\n valid, test = tl.train()\n # now, train the model for another epoch\n opt = get_opt('{}.checkpoint'.format(mf), mf)\n opt['load_from_checkpoint'] = True\n popt, tl = get_popt_and_tl(opt)\n agent = tl.agent\n init_model_file, is_finetune = agent._get_init_model(popt, None)\n self.assertEqual(init_model_file, '{}.checkpoint'.format(mf))\n self.assertFalse(is_finetune)", "title": "" }, { "docid": "eea31b3f4f3184bba6ffadfa73c9e3af", "score": "0.5963719", "text": "def test_model_train_and_eval(self):\n train_input_fn = launcher.get_input_fn(FLAGS.training_data_filepattern)\n eval_input_fn = launcher.get_input_fn(FLAGS.testing_data_filepattern)\n\n model = launcher.create_keras_model()\n\n history = launcher.train_and_eval(\n model=model,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n steps_per_epoch=FLAGS.steps_per_epoch,\n epochs=FLAGS.num_epochs,\n eval_steps=FLAGS.num_eval_steps,\n callbacks=None)\n\n # Ensure the model has a valid loss after one epoch (not NaN).\n self.assertIn('loss', history.history)\n losses = history.history['loss']\n self.assertLen(losses, FLAGS.num_epochs)\n self.assertFalse(tf.math.is_nan(losses[0]))", "title": "" }, { "docid": "140ae2f016f7ae68b4eb62211f93f233", "score": "0.5947814", "text": "def main():\n # Create environment\n environment = gym.make(ENVIRONMENT)\n if RECORD:\n environment = Monitor(\n env=environment,\n directory=VIDEO_DIRECTORY,\n #video_callable=lambda episode_id: True,\n force=True\n )\n # Set random seeds\n environment.seed(0)\n np.random.seed(0)\n # Get action and state space sizes\n action_space = environment.action_space.n\n state_space = environment.observation_space.shape[0]\n # Instantiate agent\n agent = Agent(action_space, state_space)\n # Load model weights\n if path.exists(CHECKPOINT_DIRECTORY):\n agent.load(CHECKPOINT_DIRECTORY)\n # Initialise list of all rewards\n rewards = []\n for episode in range(EPISODES):\n # Get initial state\n state = environment.reset()\n state = np.reshape(state, (1, state_space))\n # Reset score for this episode\n score = 0\n for _ in range(STEPS):\n if RENDER:\n environment.render()\n # Agent selects action from state\n action = agent.act(state)\n # Agent performs action and makes an observation of the environment\n next_state, reward, done, _ = agent.observe(environment, action)\n next_state = np.reshape(next_state, (1, state_space))\n observation = (state, action, reward, next_state, done)\n # Agent remembers parameters of this time step\n agent.remember(observation)\n state = next_state\n # Agent retrains model\n agent.learn()\n score += reward\n if done:\n print(\"Episode: {}/{}. Reward: {:.2f}\".format(episode+1, EPISODES, score))\n break\n rewards.append(score)\n # Average reward over the last 100 episodes\n average_reward = np.mean(rewards[-100:])\n print(\"Average reward: {:.2f}\\n\".format(average_reward))\n # Terminate environment\n environment.close()\n # Save model\n agent.save(CHECKPOINT_DIRECTORY)", "title": "" }, { "docid": "6164a9a4396c7f9da72e89f94e99c3f3", "score": "0.5943742", "text": "def main():\n \n dir = './models/2018-08-14_07:10:32'\n model = MLP.load_model(dir)\n print(model)", "title": "" }, { "docid": "31043b962cab7b76ae81d0cd512d9c4e", "score": "0.59411836", "text": "def test_4(self, tmpdir):\n self.setup_tmpdir(tmpdir)\n # command = \"edflow -b eval.yaml train.yaml -n test\"\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator4)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n config[\"eval_all\"] = True\n config[\"eval_forever\"] = True\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n command = [\n \"edflow\",\n \"-c\",\n os.path.join(\n \"logs\", \"trained_model\", \"train\", \"checkpoints\", \"model.ckpt-0\"\n ),\n \"-b\",\n \"config.yaml\",\n \"-p\",\n os.path.join(\"logs\", \"trained_model\"),\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n eval_dirs = os.listdir(os.path.join(tmpdir, \"logs\", \"trained_model\", \"eval\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, eval_dirs)))", "title": "" }, { "docid": "f5022de21b697fd68943b2917e61f305", "score": "0.59203714", "text": "def test_model_save_load(self):\n\n for i, (config, trainer, ds, change_net, fix_topology) in enumerate([\n (dna1_config, 'SimpleTrainer', 'Imagenet1000Data', False, True),\n (super1_fairnas, 'SimpleTrainer', 'Imagenet1000Data', False, True),\n (search_darts_config, 'SimpleTrainer', 'Cifar10Data', True, False),\n (retrain_darts_cifar_config, 'SimpleTrainer', 'Cifar10Data', True, False),\n\n # (search_darts_config, 'LightningTrainer'),\n # (retrain_darts_cifar_config, 'LightningTrainer'),\n ]):\n save_dir = \"{path_tmp}/tests/%d/\"\n arg_changes = {\n \"cls_data\": ds,\n \"{cls_data}.fake\": True,\n \"{cls_data}.batch_size_train\": 4,\n \"{cls_data}.batch_size_test\": 4,\n\n \"cls_trainer\": trainer,\n \"{cls_trainer}.max_epochs\": 2,\n\n \"{cls_task}.seed\": 0,\n \"{cls_task}.is_test_run\": True,\n \"{cls_task}.save_dir\": save_dir % 1,\n \"{cls_task}.save_del_old\": True,\n\n \"{cls_schedulers#0}.warmup_epochs\": 0,\n }\n if change_net:\n arg_changes.update({\n \"{cls_network_body}.features_first_cell\": 8,\n \"{cls_network_body}.cell_order\": \"n, r, n, r, n\",\n \"{cls_network_stem}.features\": 4,\n })\n\n print(config)\n exp1 = Main.new_task(config, args_changes=arg_changes).run()\n data = exp1.get_method().get_data_set().sample_random_data(batch_size=4).cuda()\n net = exp1.get_method().get_network()\n if fix_topology and isinstance(net, SearchUninasNetwork):\n net.set_forward_strategy(False)\n net.get_strategy_manager().forward_const(0)\n with torch.no_grad():\n outputs1 = exp1.get_method()(data)\n\n arg_changes[\"{cls_task}.save_dir\"] = save_dir % 2\n arg_changes[\"{cls_task}.seed\"] += 1\n exp2 = Main.new_task(config, args_changes=arg_changes).run().load(save_dir % 1)\n net = exp2.get_method().get_network()\n if fix_topology and isinstance(net, SearchUninasNetwork):\n net.set_forward_strategy(False)\n net.get_strategy_manager().forward_const(0)\n with torch.no_grad():\n outputs2 = exp2.get_method()(data)\n\n for o1, o2 in zip(outputs1, outputs2):\n self._assert_same_tensors('i=%d method=%s' % (i, exp1.get_method().__class__.__name__), o1, o2)", "title": "" }, { "docid": "8928857fb258e09a4a78aaf9eb63877b", "score": "0.5910664", "text": "def test_2(self, tmpdir):\n self.setup_tmpdir(tmpdir)\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator_checkpoint)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n command = [\n \"edflow\",\n \"-c\",\n os.path.join(\n \"logs\", \"trained_model\", \"train\", \"checkpoints\", \"model.ckpt-0\"\n ),\n \"-b\",\n \"config.yaml\",\n \"-p\",\n os.path.join(\"logs\", \"trained_model\"),\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n eval_dirs = os.listdir(os.path.join(tmpdir, \"logs\", \"trained_model\", \"eval\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, eval_dirs)))", "title": "" }, { "docid": "3965a610ffafc8d9c80b421c17b7f1f0", "score": "0.58793926", "text": "def run_models(self, event, timeframes):\n\n pass", "title": "" }, { "docid": "dd9e84334c2a6f2447f4b600641c191a", "score": "0.5873487", "text": "def train(self):\n models_name = []\n for item in self.test_models:\n models_name.append(item[\"name\"])\n self.output(\"Testing these models: \", models_name)\n if self.ensemble == True:\n self.output(\"Using ensemble techniques to predict the stock price movement.\")\n self.output('*'*90)\n if self.ensemble:\n self.ensemble_test()\n else:\n self.seperate_test()", "title": "" }, { "docid": "39a981b4a31a3bc22342e223df544c60", "score": "0.5872866", "text": "def main(hparams):\n\n # ------------------------\n # 1 INIT MODEL\n # ------------------------\n\n model = get_model(hparams)\n model.load_state_dict(torch.load(hparams.checkpoint_file)[\"state_dict\"])\n model.eval()\n\n name = \"-\".join([hparams.model, hparams.out, \"-test\"])\n\n # ------------------------\n # LOGGING SETUP\n # ------------------------\n\n tb_logger = TensorBoardLogger(save_dir=\"logs/tb_logs/\", name=name)\n tb_logger.experiment.add_graph(model, model.data[0][0].unsqueeze(0))\n wandb_logger = WandbLogger(\n name=hparams.comment if hparams.comment else time.ctime(),\n project=name,\n save_dir=\"logs\",\n )\n wandb_logger.watch(model, log=\"all\", log_freq=200)\n wandb_logger.log_hyperparams(model.hparams)\n for file in [\n i\n for s in [glob(x) for x in [\"*.py\", \"dataloader/*.py\", \"model/*.py\"]]\n for i in s\n ]:\n shutil.copy(file, wandb.run.dir)\n\n trainer = pl.Trainer(gpus=hparams.gpus, logger=[wandb_logger]) # , tb_logger],\n\n # ------------------------\n # 3 START TESTING\n # ------------------------\n\n trainer.test(model)", "title": "" }, { "docid": "5f752beba6bb881980a823eeca1e98e4", "score": "0.5853803", "text": "def test_model(self):\n\n test_input, test_target = self.load_dataset(self.__test_directory)\n model = create_model(self._input_window_length)\n model = load_model(model, self.__network_type, self.__algorithm,\n self.__appliance, self.__saved_model_dir)\n #model = self.model\n test_generator = TestSlidingWindowGenerator(number_of_windows=self.__number_of_windows, inputs=test_input, targets=test_target, offset=self.__window_offset)\n\n # Calculate the optimum steps per epoch.\n steps_per_test_epoch = np.round(int(test_generator.total_size / self.__batch_size), decimals=0)\n\n # Test the model.\n start_time = time.time()\n testing_history = model.predict(x=test_generator.load_dataset(), steps=steps_per_test_epoch, verbose=2)\n\n end_time = time.time()\n test_time = end_time - start_time\n\n evaluation_metrics = model.evaluate(x=test_generator.load_dataset(), steps=steps_per_test_epoch)\n\n\n #Denormalizing the mains and appliance data\n testing_history = ((testing_history * appliance_data[self.__appliance][\"std\"]) + appliance_data[self.__appliance][\"mean\"])\n test_target = ((test_target * appliance_data[self.__appliance][\"std\"]) + appliance_data[self.__appliance][\"mean\"])\n test_agg = (test_input.flatten() * mains_data[\"std\"]) + mains_data[\"mean\"]\n test_agg = test_agg[:testing_history.size]\n\n # Can't have negative energy readings - set any results below 0 to 0.\n test_target[test_target < 0] = 0\n testing_history[testing_history < 0] = 0\n test_input[test_input < 0] = 0\n\n\n\n return test_agg, testing_history\n\n # self.log_results(model, test_time, evaluation_metrics)\n #self.plot_results(testing_history, test_input, test_target)", "title": "" }, { "docid": "185835cce9d7cf4d9a23012a616619bf", "score": "0.5846532", "text": "def test_aevae_jaguar():\n\n # Sample data\n df = jaguar()\n\n # Hyperparameters\n batch_size = 10\n num_past = 10\n num_future = 5\n # Prepare the dataloader\n data_loaders = dataset.MultiModalDataLoader(\n df,\n batch_size=batch_size,\n n_past=num_past,\n n_future=num_future,\n train_split_ratio=0.5,\n num_workers=1,\n split_by_id=False,\n )\n\n model_save_path = \"./model.pt\"\n\n model = MultiModelVAE(\n input_size=2,\n output_size=2,\n lstm_hidden_size=32,\n num_lstm_layers=2,\n latent_size=10,\n dropout=0.1,\n batch_size=batch_size,\n num_future=num_future,\n num_past=num_past,\n bidirectional=False,\n batch_first=True,\n reset_state=True,\n )\n\n # Test that we can run functions on our network.\n model.disable_latent_output()\n model.enable_latent_output()\n\n # Model Trainer\n # Model types; \"ae\" or \"vae\"\n trainer = HybridTrainer(model=model, optimizer_type=\"Adam\", loss_type=\"huber\")\n\n # Train the model\n trainer.fit(data_loaders, model_save_path, epochs=1, training_mode=\"forecasting\", validate_every=5, test_every=10)\n\n scaler = data_loaders[\"train_loader\"].dataset.scaler\n\n # Load the trained model given the path\n model_path = \"./model.pt\"\n hyperparams = \"./hypers.json\"\n model_hyperparameters = traja.models.read_hyperparameters(hyperparams)\n\n # For prebuild traja generative models\n generator = traja.models.inference.Generator(\n model_type=\"vae\",\n model_hyperparameters=model_hyperparameters,\n model_path=model_path,\n model=None,\n )\n out = generator.generate(num_future, classify=False, scaler=scaler, plot_data=False)\n\n trainer.validate(data_loaders[\"validation_loader\"])", "title": "" }, { "docid": "95c21ac37df685566a97dd2575c2ffd9", "score": "0.5843821", "text": "def main_base(\n model: SSMILVAE.SSMILVAE,\n args: Namespace,\n optimizer: Adam,\n trainloader: DataLoader,\n validationloader: DataLoader,\n testloader: DataLoader,\n):\n # Declare training trackers\n losses_train = list()\n losses_val = list()\n args.prev_val = -1.0 if args.model == \"base_att\" else 1e9\n # Declare real image counter to save\n args.real_image_counter = 0\n # Trace args\n print(args)\n # Train\n for epoch in range(1, args.epochs + 1):\n # Trace\n print(f\"Epoch {epoch}\")\n # Set the epoch value to args\n args.epoch = epoch\n # Run training\n train_base(model, args, optimizer, trainloader)\n # Run validation [for training set]\n loss_train = validate_base(model, args, trainloader, loadername=\"-Train-\")\n # Run validation\n # Record validation results\n losses_train.append(loss_train)\n # Run validation [for validation set]\n loss_val = validate_base(\n model, args, validationloader, loadername=\"-Validation-\", train=False\n )\n # Record validation results\n losses_val.append(loss_val)\n\n # Declare a test accuracy or ELBO loss tracker\n args.max_test_acc = -1.0 if args.model == \"base_att\" else 1e9\n # Test each one of the saved model to find the best\n for modelname in os.listdir(f\"{args.MODELPATH}\"):\n # Load the best model before testing\n # Update the arguments\n args = update_args(args=args)\n # Create the model again\n model, _ = create_model_optimizer(args=args)\n # Load the state dict\n model.load_state_dict(load(f\"{args.MODELPATH}/{modelname}\"))\n # Run test\n loss_test = validate_base(\n model, args, testloader, loadername=f\"-Test[{modelname}]-\", train=True\n )\n\n # If the model is 'base_att'\n if args.model == \"base_att\":\n # Check if the model accuracy was higher than before\n if loss_test >= args.max_test_acc:\n # Keep the model name\n args.best_model = modelname\n # Update the maximum accuracy\n args.max_test_acc = loss_test\n # If the model is 'base'\n elif args.model == \"base\":\n # Check if the model accuracy was higher than before\n if loss_test <= args.max_test_acc:\n # Keep the model name\n args.best_model = modelname\n # Update the maximum accuracy\n args.max_test_acc = loss_test\n # Trace the best model\n print(f\"Picked {args.best_model}.\")\n\n # If the mode is not 'base_att'\n if args.model != \"base_att\":\n # Load the best model before testing\n # Update the arguments\n args = update_args(args=args)\n # Create the model again\n model, _ = create_model_optimizer(args=args)\n # Load the state dict for the last time for the best model\n model.load_state_dict(load(f\"{args.MODELPATH}/{args.best_model}\"))\n # Reconstruct images\n reconstruct_image(args, model, trainloader)\n # Sample images\n for i in range(1, args.numsamp + 1):\n sample_image(args=args, model=model, idx=i, epoch=None)\n\n # Return the results\n return losses_train, losses_val", "title": "" }, { "docid": "9a7c64795f169dffdbb253804bb7a67b", "score": "0.5843639", "text": "def run_from_beginning():\n X_train_i, Y_train_i, X_test_i, Y_test_i = load_data()\n # preprocess training and test labels \n Xtrain = preprocess_image_data(X_train_i)\n Xtest = preprocess_image_data(X_test_i)\n Ytrain = preprocess_label_data(Y_train_i)\n Ytest = preprocess_label_data(Y_test_i)\n \n # train and evaluate model using training and test data\n history, results = train_evaluate(Xtrain, Ytrain, Xtest, Ytest)\n \n return history, results", "title": "" }, { "docid": "79f21d99aa35d55adb5910a560b4d967", "score": "0.5843172", "text": "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, do_prefill=False,\n rendering=False, max_timesteps=1000, history_length=0, verbose=False):\n\n stats = EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n\n state = env.reset()\n #env._max_episode_steps = max_timesteps\n\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * (history_length + 1))\n state = np.array(image_hist).reshape(96, 96, history_length + 1)\n \n while True:\n\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. \n # action_id = agent.act(...)\n # action = your_id_to_action_method(...)\n action_id = agent.act([state], deterministic)\n action = id_to_action(action_id)\n\n if verbose:\n print('\\tStep ', '{:7d}'.format(step), ' Action: ', ACTIONS[action_id]['log'])\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal: \n break\n\n next_state = state_preprocessing(next_state)\n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist).reshape(96, 96, history_length + 1)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n if do_prefill:\n agent.replay_buffer.add_transition(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n \n if terminal or (step * (skip_frames + 1)) > max_timesteps :\n break\n\n if step % 100 == 0 and False:\n print('\\t\\tStep ', '{:4d}'.format(step), ' Reward: ', '{:4.4f}'.format(stats.episode_reward))\n\n step += 1\n\n return stats", "title": "" }, { "docid": "4696e2d4336abd7aab31f07e1a35f937", "score": "0.5838919", "text": "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.log = open('DQNLOG','w')\n self.replay = ReplayMemory(10000)\n self.state = self.env.reset()\n self.cur_step = 0\n self.num_episode = 0\n self.episode_reward = np.asarray([0])\n self.train_util()", "title": "" }, { "docid": "2e5622497bd15080205e31e04b9f7e22", "score": "0.5823736", "text": "def train(self):\n models_name = []\n for item in self.test_models:\n models_name.append(item[\"name\"])\n self.output(\"Testing these models: \", models_name)\n self.output(self.info_str)\n self.output('*'*80)\n\n if not self.ensemble:\n self.seperate_test()\n else:\n self.ensemble_test()", "title": "" }, { "docid": "95e4f5f2e1a895d7e0034c80a78ef7b6", "score": "0.5818758", "text": "def main(model_folder):\n model_description_file = os.path.join(model_folder, \"info.yml\")\n\n # Read the model description file\n with open(model_description_file, 'r') as ymlfile:\n model_description = yaml.load(ymlfile)\n\n # Analyze model\n logging.info(model_description['model'])\n data = {}\n data['training'] = os.path.join(model_folder, \"traindata.hdf5\")\n data['testing'] = os.path.join(model_folder, \"testdata.hdf5\")\n data['validating'] = os.path.join(model_folder, \"validdata.hdf5\")\n train_model(model_folder)", "title": "" }, { "docid": "c30039354bedf7504d7cc782654f7c9a", "score": "0.5816794", "text": "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board,curPlayer)\n # print(\"canonicalBoard\", canonicalBoard)\n temp = int(episodeStep < self.args[\"tempThreshold\"])\n\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n\n valids = self.game.getValidMoves(canonicalBoard, 1)\n if sum(valids) != 1:\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b,p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, curPlayer = self.game.getNextState(board, curPlayer, action)\n # print(\"board\", board)\n\n r = self.game.getGameEnded(board, curPlayer)\n\n if r!=0:\n return [(x[0],x[2],r*((-1)**(x[1]!=curPlayer))) for x in trainExamples]", "title": "" }, { "docid": "827313d9de11c37d2e91cafb3486f8e0", "score": "0.5815451", "text": "def main(model=\"DATE_TIME\", new_model_name=\"DATE_TIME\", output_dir=\"./DATE_TIME\", n_iter=30):\n random.seed(0)\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank(\"en\") # create blank Language class\n print(\"Created blank 'en' model\")\n # Add entity recognizer to model if it's not in the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner)\n # otherwise, get it, so we can add labels to it\n else:\n ner = nlp.get_pipe(\"ner\")\n\n ner.add_label(LABEL) # add new entity label to entity recognizer\n # Adding extraneous labels shouldn't mess anything up\n ner.add_label(\"VEGETABLE\")\n if model is None:\n optimizer = nlp.begin_training()\n else:\n optimizer = nlp.resume_training()\n move_names = list(ner.move_names)\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n other_pipes = [\n pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n with nlp.disable_pipes(*other_pipes): # only train NER\n sizes = compounding(1.0, 4.0, 1.001)\n # batch up the examples using spaCy's minibatch\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n batches = minibatch(TRAIN_DATA, size=sizes)\n losses = {}\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer,\n drop=0.35, losses=losses)\n print(\"Losses\", losses)\n\n # test the trained model\n test_text = \"Date of termination : 17/09/2019\"\n doc = nlp(test_text)\n print(\"Entities in '%s'\" % test_text)\n for ent in doc.ents:\n print(ent.label_, ent.text)\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.meta[\"name\"] = new_model_name # rename model\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n # Check the classes have loaded back consistently\n assert nlp2.get_pipe(\"ner\").move_names == move_names\n doc2 = nlp2(test_text)\n for ent in doc2.ents:\n print(ent.label_, ent.text)", "title": "" }, { "docid": "0d443cfb023091ddbe88737cfad5e3cf", "score": "0.5810159", "text": "def main():\n\n raw_data = pd.read_csv('../data/data.csv', sep=',')\n kickstarter_model = KickstarterModel()\n data_final = kickstarter_model.preprocess(raw_data)\n X_train, X_test, y_train, y_test = kickstarter_model.split_for_validation(data_final)\n final_model = kickstarter_model.train(X_train, X_test, y_train, y_test)\n with open('../api/models/' + kickstarter_model.filename, 'wb') as file:\n pickle.dump(final_model, file)", "title": "" }, { "docid": "6a1eaf4dcc58fd357e9c2d74583f1231", "score": "0.5809138", "text": "def test_save_and_load(self):\n with test_util.TempDirectory() as f:\n self.model.save(f)\n self.model = tc.load_model(f)\n\n try:\n self.test__list_fields()\n self.test_get()\n except:\n assert False, \"List fields or get failed after save and load.\"\n try:\n self.test_summaries()\n except:\n assert False, \"Model summaries failed after save and load.\"\n del self.model", "title": "" }, { "docid": "1022ee6a984a607fd35fb67a41599042", "score": "0.5804444", "text": "def record(model, env, num_episodes=1):\n env = VecVideoRecorder(env,\"./vid\", record_video_trigger=lambda x: x == 0, video_length=12000, name_prefix=\"tetris_ai_video\")\n obs = env.reset()\n i=0\n steps = 0\n while (i <= num_episodes):\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n steps+=1\n if dones[0]:\n obs = env.reset()\n print(\"=== EPISODE {} ===\".format(i+1))\n print(\"Num_lines: \" + str(info[0]['number_of_lines']))\n print(\"score: \" + str(info[0]['score']))\n print(\"Number of episodes: \", i)\n print(\"=== END ===\")\n i+=1\n \n return \"Done\"", "title": "" }, { "docid": "b7573535a816068fe441eb2b99148747", "score": "0.5802552", "text": "def test_run():\n window_size = 30\n\n train, test = next(get_holdouts(\n max_wiggle_size=2,\n batch_size=16,\n window_size=window_size,\n nrows=10000,\n verbose=True\n ))\n\n model = build_deep_enhancers(window_size=window_size)\n\n model.fit(\n train,\n steps_per_epoch=train.steps_per_epoch,\n validation_data=test,\n validation_steps=test.steps_per_epoch,\n verbose=True\n )\n\n evaluate_model(model, train, test, True)", "title": "" }, { "docid": "80dbcb02f463fa0574556b7aaf0e99e4", "score": "0.58006275", "text": "def _run_episode(self, max_steps=None):\n steps = 0\n done = False\n rewards = []\n envstate = self.env.reset()\n while not done and (steps < max_steps if max_steps is not None else True):\n prev_envstate = envstate #DQL variable\n \n action = self._predict(envstate)\n envstate, reward, done, _ = self.env.step(action)\n rewards.append(reward)\n steps+=1\n \n #Update Replay Buffer\n experience = [prev_envstate, action, reward, envstate, done]\n self.replay_buffer.remember(experience)\n self._learn() #Improve at each step\n \n return rewards, steps", "title": "" }, { "docid": "0e35409d76cf8031b4e1483de386adc5", "score": "0.57950425", "text": "def train():\r\n print(\"N_EQUIVARIANT is {}\".format(FLAGS.N_EQUIVARIANT))\r\n print(\"N_INVARIANT is {}\".format(FLAGS.N_INVARIANT))\r\n print(\"N_CHOOSE is {}\".format(FLAGS.N_CHOOSE))\r\n print('Number of training episodes is', FLAGS.TRAIN_EPISODE)\r\n print('Number of training stexfdsfe per each episode is', FLAGS.TRAIN_STEP)\r\n\r\n save_name, short_name = fname(save_time=True)\r\n\r\n # Create directory to save results\r\n train_result_pth = './{}'.format(FLAGS.ENVIRONMENT)\r\n if not os.path.exists(train_result_pth):\r\n os.makedirs(train_result_pth)\r\n train_result_pth = os.path.join(train_result_pth, 'train-result')\r\n if not os.path.exists(train_result_pth):\r\n os.makedirs(train_result_pth)\r\n train_result_pth = os.path.join(train_result_pth, save_name)\r\n if not os.path.exists(train_result_pth):\r\n os.makedirs(train_result_pth)\r\n\r\n now = time.localtime()\r\n s_time = \"%02d%02d-%02d%02d%02d\" % (now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\r\n train_result_pth += '/seed_' + str(FLAGS.SEED)\r\n print('Train result path is {}'.format(train_result_pth))\r\n\r\n # save params\r\n params_path = './{}/trained-params/'.format(FLAGS.ENVIRONMENT)\r\n params_path += save_name + '/seed_' + str(FLAGS.SEED) +'/'\r\n \r\n if not os.path.exists(params_path):\r\n os.makedirs(params_path)\r\n\r\n print('Trained parameters are saved in {}'.format(params_path))\r\n\r\n # Global TF session\r\n sess = tf.Session(config=gpu_config)\r\n\r\n # Creating workers and corresponding evaluators\r\n env = Environment(FLAGS.N_EQUIVARIANT,FLAGS.N_INVARIANT, train_mode=True)\r\n print(\"create env\")\r\n agent = Agent('global', sess)\r\n print(\"create agent\")\r\n evaluator = Evaluator(sess, 1, train_result_pth, short_name) # 1 means evaluator for training\r\n print(\"create evaluator\")\r\n\r\n # generate saver for only main\r\n if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global_main'):\r\n saver = tf.train.Saver(\r\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global_main'))\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # copy the main params to targets\r\n agent.copy_parameters()\r\n\r\n start_time = time.time()\r\n \r\n # Learning\r\n for episode in range(FLAGS.RELOAD_EP*FLAGS.TRAIN_EPISODE,\r\n (FLAGS.RELOAD_EP+1)*FLAGS.TRAIN_EPISODE):\r\n \r\n # Reset environment\r\n env.reset()\r\n\r\n print(\"\\n-------------- EPISODE {} --------------\\n\".format(episode))\r\n\r\n # Disable test mode before training\r\n agent.disable_test_mode()\r\n\r\n # train mode\r\n for step in range(FLAGS.TRAIN_STEP):\r\n # Normal Process\r\n state = cp.deepcopy(env.get_state())\r\n action = cp.deepcopy(agent.act(state))\r\n reward = cp.deepcopy(env.step(action))\r\n next_state = cp.deepcopy(env.get_state())\r\n\r\n if FLAGS.SORTED==1:\r\n env._sort_state()\r\n if FLAGS.SORTED==2:\r\n env._shuffle_state()\r\n\r\n # Agent gets reward and next state\r\n agent.receive_reward(reward)\r\n\r\n # get the loss, q-values of the current agents\r\n losses = cp.deepcopy(agent.get_loss())\r\n q_values = cp.deepcopy(agent.get_q_value())\r\n\r\n evaluator.save_temp_list(reward,losses,q_values)\r\n\r\n # with some SAVE_PERIOD, evaluator update the long term logs and preserve the consecutive transitions with SAVE_REPEAT\r\n if (FLAGS.TRAIN_STEP * episode + step+1) % FLAGS.SAVE_PERIOD == 0:\r\n evaluator.average_status()\r\n evaluator.save_avg_to_tensorboard(episode,step)\r\n \r\n if episode % max(int(FLAGS.TRAIN_EPISODE * FLAGS.TOTAL_RELOAD/100.0), 1) ==0: # test 100 times\r\n # Enable test mode\r\n agent.enable_test_mode()\r\n\r\n reward_test = 0\r\n losses_test = 0\r\n q_values_test = np.zeros(FLAGS.N_CHOOSE)\r\n check_test_start = time.time()\r\n repeat_test = min(int(10000/FLAGS.TRAIN_STEP), 20)\r\n\r\n for _ in range(repeat_test):\r\n env.reset()\r\n for step in range(FLAGS.TRAIN_STEP): #test mode\r\n # Normal Process\r\n state = cp.deepcopy(env.get_state())\r\n action = cp.deepcopy(agent.act(state))\r\n reward= cp.deepcopy(env.step(action))\r\n next_state = cp.deepcopy(env.get_state())\r\n \r\n # get the loss, q-values of the current agents\r\n reward_test += reward\r\n losses_test += cp.deepcopy(agent.get_loss())\r\n q_values_test += cp.deepcopy(agent.get_q_value())\r\n \r\n # save test result in tb\r\n avg_reward_test = reward_test/(float(FLAGS.TRAIN_STEP)*repeat_test)\r\n avg_losses_test = losses_test/(float(FLAGS.TRAIN_STEP)*repeat_test)\r\n avg_q_values_test = q_values_test/(float(FLAGS.TRAIN_STEP)*repeat_test)\r\n check_test_end = time.time()\r\n spend_time_test = check_test_end-check_test_start\r\n \r\n print('time_test', spend_time_test/repeat_test)\r\n evaluator.save_test_tb(avg_reward_test,avg_losses_test, avg_q_values_test, spend_time_test, episode)\r\n evaluator._reset() # just for clean training time\r\n\r\n if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global_main'):\r\n saver.save(sess, params_path, global_step=(FLAGS.RELOAD_EP+1) * FLAGS.TRAIN_STEP * FLAGS.TRAIN_EPISODE)\r\n\r\n end_time = time.time()\r\n print('Time taken for training: {} seconds'.format(end_time - start_time))\r\n # TODO: save the time to csv\r\n\r\n time.sleep(5)", "title": "" }, { "docid": "7052fb98aa2f235a41744ff558cf2b5f", "score": "0.57908255", "text": "def run():\n features()\n deploy_model()\n predict_model()", "title": "" }, { "docid": "d9e656a5b0d66b845cb9b2ce5dbaf2c8", "score": "0.5779978", "text": "def run_model_imdb():\n model = define_network_multi([2, 6], [\"Gender\", \"Age\"], in_shape=in_shape)\n opt = optimizers.Adam(lr=LEARNING_RATE)\n model.compile(optimizer=opt, loss=\"sparse_categorical_crossentropy\", metrics=['accuracy'])\n\n ep_hist_train = {}\n ep_hist_val = {}\n generator = DataGeneratorIMDB(resolution, bulk_size)\n for e in range(n_epochs):\n print(\"epoch %d\" % e)\n train_epoch(model, generator, e, ep_hist_train)\n validate_epoch(model, generator, e, ep_hist_val)", "title": "" }, { "docid": "8835d3a61ff65f1126db48225723d970", "score": "0.57786834", "text": "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=False, max_timesteps=1000, history_length=0):\n\n stats = EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events() \n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * (history_length + 1))\n state = np.array(image_hist).reshape(96, 96, history_length + 1)\n \n while True:\n\n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. \n # action_id = agent.act(...)\n # action = your_id_to_action_method(...)\n\n action_id = agent.act(state=state, deterministic=deterministic)\n action = id_to_action(action_id)\n\n \n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal: \n break\n\n next_state = state_preprocessing(next_state)\n print (\"next_state shape ::: \", next_state.shape)\n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist).reshape(96, 96, history_length + 1)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n \n if terminal or (step * (skip_frames + 1)) > max_timesteps : \n break\n\n step += 1\n\n return stats", "title": "" }, { "docid": "29cad2a382e3c6c4833e5c6c3bb7ec0d", "score": "0.57750994", "text": "def run_episode(\n episode: core.Episode, mzconfig: core.MuZeroConfig,\n initial_inference_service: prediction_service_pb2_grpc.PredictionServiceStub,\n recurrent_inference_service: prediction_service_pb2_grpc.PredictionServiceStub,\n initial_inference_cache: List[core.NetworkOutput],\n counter: metrics.Metrics.DelegatingCounter) -> None:\n step_num = 0\n while not episode.terminal() and len(episode.history) < mzconfig.max_moves:\n # Get observation.\n current_observation = episode.make_image(state_index=-1)\n\n # Prepare MCTS.\n # We may already have played this, so avoid swamping the predict servers.\n if len(initial_inference_cache) >= step_num + 1:\n initial_inference_output = initial_inference_cache[step_num]\n counter.inc()\n else:\n initial_inference_output = send_initial_inference_request(\n predict_service=initial_inference_service, inputs=current_observation)\n initial_inference_cache.append(initial_inference_output)\n\n legal_actions = episode.legal_actions()\n root = core.prepare_root_node( # pytype: disable=wrong-arg-types # numpy-scalars\n config=mzconfig,\n legal_actions=legal_actions,\n initial_inference_output=initial_inference_output)\n\n # Run MCTS.\n core.run_mcts(\n config=mzconfig,\n root=root,\n action_history=episode.action_history(),\n legal_actions_fn=episode.legal_actions,\n recurrent_inference_fn=functools.partial(\n send_recurrent_inference_request,\n predict_service=recurrent_inference_service))\n\n # Pick action.\n action = core.select_action(\n config=mzconfig,\n num_moves=len(episode.history),\n node=root,\n train_step=0,\n use_softmax=mzconfig.use_softmax_for_action_selection)\n\n episode.apply(action)\n step_num += 1", "title": "" }, { "docid": "1dd55a02b43d5f67276af04ad58bfc30", "score": "0.5772318", "text": "def run_episode(env, agent, deterministic, do_training=True, rendering=True, max_timesteps=1000):\n\n stats = EpisodeStats() # save statistics like episode reward or action usage\n state = env.reset()\n\n step = 0\n while True:\n\n action_id = agent.act(state=state, race=False, deterministic=deterministic)\n next_state, reward, terminal, info = env.step(action_id)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n\n if rendering:\n env.render()\n\n if terminal or step > max_timesteps:\n break\n\n step += 1\n\n return stats", "title": "" }, { "docid": "d4b3626cf3a02d6ab8a54fe7c5bd5efc", "score": "0.57688034", "text": "def run_from_saved_model_with_Test_Data():\n X_train_i, Y_train_i, X_test_i, Y_test_i = load_data()\n # preprocess training and test labels \n Xtest = preprocess_image_data(X_test_i)\n Ytest = preprocess_label_data(Y_test_i)\n # load pre-trained model\n cnn = load_model('cnn_model')\n results = cnn.evaluate(Xtest, Ytest, verbose=1)\n return results", "title": "" }, { "docid": "032cca979ed548f393c97592d860e889", "score": "0.57654774", "text": "def run(self, num_episodes=None):\n\n # This will cause a reset in the first iteration of the following loop.\n end_episode = True\n\n # Counter for the number of states we have processed.\n # This is stored in the TensorFlow graph so it can be\n # saved and reloaded along with the checkpoint.\n count_states = self.model.get_count_states()\n\n # Counter for the number of episodes we have processed.\n count_episodes = self.model.get_count_episodes()\n\n if num_episodes is None:\n # Loop forever by comparing the episode-counter to infinity.\n num_episodes = float('inf')\n else:\n # The episode-counter may not start at zero if training is\n # continued from a checkpoint. Take this into account\n # when determining the number of iterations to perform.\n num_episodes += count_episodes\n\n while count_episodes <= num_episodes:\n if end_episode:\n # Reset the game-environment and get the first image-frame.\n img = self.env.reset()\n\n # Create a new motion-tracer for processing images from the\n # game-environment. Initialize with the first image-frame.\n # This resets the motion-tracer so the trace starts again.\n # This could also be done if end_life==True.\n motion_tracer = MotionTracer(img)\n\n # Reset the reward for the entire episode to zero.\n # This is only used for printing statistics.\n reward_episode = 0.0\n\n # Increase the counter for the number of episodes.\n # This counter is stored inside the TensorFlow graph\n # so it can be saved and restored with the checkpoint.\n count_episodes = self.model.increase_count_episodes()\n\n # Get the number of lives that the agent has left in this episode.\n num_lives = self.get_lives()\n\n # Get the state of the game-environment from the motion-tracer.\n # The state has two images: (1) The last image-frame from the game\n # and (2) a motion-trace that shows movement trajectories.\n state = motion_tracer.get_state()\n\n # Use the Neural Network to estimate the Q-values for the state.\n # Note that the function assumes an array of states and returns\n # a 2-dim array of Q-values, but we just have a single state here.\n q_values = self.model.get_q_values(states=[state])[0]\n\n # Determine the action that the agent must take in the game-environment.\n # The epsilon is just used for printing further below.\n action, epsilon = self.epsilon_greedy.get_action(q_values=q_values,\n iteration=count_states,\n training=self.training)\n\n # Take a step in the game-environment using the given action.\n # Note that in OpenAI Gym, the step-function actually repeats the\n # action between 2 and 4 time-steps for Atari games, with the number\n # chosen at random.\n img, reward, end_episode, info = self.env.step(action=action)\n\n # Process the image from the game-environment in the motion-tracer.\n # This will first be used in the next iteration of the loop.\n motion_tracer.process(image=img)\n\n # Add the reward for the step to the reward for the entire episode.\n reward_episode += reward\n\n # Determine if a life was lost in this step.\n num_lives_new = self.get_lives()\n end_life = (num_lives_new < num_lives)\n num_lives = num_lives_new\n\n # Increase the counter for the number of states that have been processed.\n count_states = self.model.increase_count_states()\n\n if not self.training and self.render:\n # Render the game-environment to screen.\n self.env.render()\n\n # Insert a small pause to slow down the game,\n # making it easier to follow for human eyes.\n time.sleep(0.01)\n\n # If we want to train the Neural Network to better estimate Q-values.\n if self.training:\n # Add the state of the game-environment to the replay-memory.\n self.replay_memory.add(state=state,\n q_values=q_values,\n action=action,\n reward=reward,\n end_life=end_life,\n end_episode=end_episode)\n\n # How much of the replay-memory should be used.\n use_fraction = self.replay_fraction.get_value(iteration=count_states)\n\n # When the replay-memory is sufficiently full.\n if self.replay_memory.is_full() \\\n or self.replay_memory.used_fraction() > use_fraction:\n\n # Update all Q-values in the replay-memory through a backwards-sweep.\n self.replay_memory.update_all_q_values()\n\n # Log statistics for the Q-values to file.\n if self.use_logging:\n self.log_q_values.write(count_episodes=count_episodes,\n count_states=count_states,\n q_values=self.replay_memory.q_values)\n\n # Get the control parameters for optimization of the Neural Network.\n # These are changed linearly depending on the state-counter.\n learning_rate = self.learning_rate_control.get_value(iteration=count_states)\n loss_limit = self.loss_limit_control.get_value(iteration=count_states)\n max_epochs = self.max_epochs_control.get_value(iteration=count_states)\n\n # Perform an optimization run on the Neural Network so as to\n # improve the estimates for the Q-values.\n # This will sample random batches from the replay-memory.\n self.model.optimize(learning_rate=learning_rate,\n loss_limit=loss_limit,\n max_epochs=max_epochs)\n\n # Save a checkpoint of the Neural Network so we can reload it.\n self.model.save_checkpoint(count_states)\n\n # Reset the replay-memory. This throws away all the data we have\n # just gathered, so we will have to fill the replay-memory again.\n self.replay_memory.reset()\n\n if end_episode:\n # Add the episode's reward to a list for calculating statistics.\n self.episode_rewards.append(reward_episode)\n\n # Mean reward of the last 30 episodes.\n if len(self.episode_rewards) == 0:\n # The list of rewards is empty.\n reward_mean = 0.0\n else:\n reward_mean = np.mean(self.episode_rewards[-30:])\n\n if self.training and end_episode:\n # Log reward to file.\n if self.use_logging:\n self.log_reward.write(count_episodes=count_episodes,\n count_states=count_states,\n reward_episode=reward_episode,\n reward_mean=reward_mean)\n\n # Print reward to screen.\n msg = \"{0:4}:{1}\\t Epsilon: {2:4.2f}\\t Reward: {3:.1f}\\t Episode Mean: {4:.1f}\"\n print(msg.format(count_episodes, count_states, epsilon,\n reward_episode, reward_mean))\n elif not self.training and (reward != 0.0 or end_life or end_episode):\n # Print Q-values and reward to screen.\n msg = \"{0:4}:{1}\\tQ-min: {2:5.3f}\\tQ-max: {3:5.3f}\\tLives: {4}\\tReward: {5:.1f}\\tEpisode Mean: {6:.1f}\"\n print(msg.format(count_episodes, count_states, np.min(q_values),\n np.max(q_values), num_lives, reward_episode, reward_mean))", "title": "" }, { "docid": "f2af63fb5244168b5708e570509e507e", "score": "0.5765136", "text": "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "title": "" }, { "docid": "aae38e32497799eac6d811ff28c2b50e", "score": "0.5760729", "text": "def test_build_model(self):\n with fa.app.test_request_context():\n fa.app.preprocess_request()\n build_model_setup()\n conn = fa.g.rdb_conn\n rdb.table(\"projects\").insert({\"id\": \"test\",\n \"name\": \"test\"}).run(conn)\n rdb.table(\"features\").insert({\"id\": \"asas_training_subset\",\n \"projkey\": \"test\",\n \"name\": \"asas_training_subset\",\n \"created\": \"test\",\n \"headerfile_path\": \"HEADPATH.dat\",\n \"zipfile_path\": \"ZIPPATH.tar.gz\",\n \"featlist\": [\"a\", \"b\", \"c\"]}).run(conn)\n rv = fa.buildModel(model_name=\"NEW_MODEL_NAME\",\n project_name=\"test\",\n featureset_name=\"asas_training_subset\",\n model_type=\"RandomForestClassifier\",\n model_params={},\n params_to_optimize={\"n_estimators\":\n [10, 50, 100]})\n res_dict = json.loads(rv.data.decode())\n while \"currently running\" in fa.check_job_status(res_dict[\"PID\"]):\n time.sleep(1)\n new_model_key = res_dict[\"new_model_key\"]\n entry = rdb.table(\"models\").get(new_model_key).run(conn)\n assert \"results_msg\" in entry\n model = joblib.load(pjoin(cfg['paths']['models_folder'],\n \"{}.pkl\".format(new_model_key)))\n assert hasattr(model, \"predict_proba\")\n model_and_prediction_teardown()\n rdb.table(\"models\").get(new_model_key).delete().run(conn)", "title": "" }, { "docid": "46ad0abe066ee5d34c0ea5d0e07f2ace", "score": "0.57596564", "text": "def start_new_episode(self):\n pass", "title": "" }, { "docid": "2151dcc1514200765d16c4394c837691", "score": "0.5754689", "text": "def run_model(model, curr_memory, envs, device):\n\n # Fetch the action taken by the expert in the current observation (from database). For interactive Gym environments, no-op (0) is returned\n curr_memory['expert_ac'] = torch.from_numpy(envs.expert_ac())\n\n #forward()\n model_return = model(curr_memory=curr_memory)\n\n # Execute on environment. Environmental reward unavailable\n cpu_acs = model_return.action.detach().squeeze(1).cpu().numpy()\n obs, _, done, info = envs.step(cpu_acs)\n\n # Use dictionary created by bench.monitor to obtain true episodic returns (for printing and plotting only)\n if np.sum(done) > 0:\n for idx, done_ in enumerate(done.tolist()):\n if done_:\n # Expert envs. do not insert this info. at episode termination. Add a dummy (-1e3) value for them\n curr_memory['episode_reward_info'][idx] = info[idx].get('episode')['r'] if 'episode' in info[idx].keys() else -1e3\n\n # Get \"synthetic reward\" from the discriminator\n reward = - torch.log(1 - model_return.discriminator_out_d.detach() + 1e-3)\n\n # If trajectory ended, create mask to reset previous belief and previous action\n mask = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])\n\n # Update curr_memory\n curr_memory['prev_ob'] = curr_memory['curr_ob']\n curr_memory['curr_ob'] = torch.from_numpy(obs).float()\n\n # For start of a new episode, make the prev_ob same as the curr_ob\n curr_memory['prev_ob'] = mask * curr_memory['prev_ob'] + (1 - mask) * curr_memory['curr_ob']\n mask = mask.to(device)\n\n # Resets for new episodes\n curr_memory['prev_belief'] = model_return.belief_state * mask\n curr_memory['prev_ac'] = model_return.action * mask\n\n return model_return, curr_memory, mask, reward", "title": "" }, { "docid": "170630661db9a156facb994785b0f211", "score": "0.5752719", "text": "def test(self):\n self.eval_engine.test_eval(self.dataset.test, self.engine.model)", "title": "" }, { "docid": "68f30a37c99b261bb5da4f0074cd77e9", "score": "0.5745117", "text": "def test_1(self, tmpdir):\n\n self.setup_tmpdir(tmpdir)\n config = dict()\n config[\"model\"] = \"tmptest.\" + fullname(Model)\n config[\"iterator\"] = \"tmptest.\" + fullname(Iterator_checkpoint)\n config[\"datasets\"] = {\n \"train\": \"tmptest.\" + fullname(Dataset),\n \"validation\": \"tmptest.\" + fullname(Dataset),\n }\n config[\"batch_size\"] = 16\n config[\"num_steps\"] = 100\n config[\"n_processes\"] = 1\n import yaml\n\n with open(os.path.join(tmpdir, \"config.yaml\"), \"w\") as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n import shutil\n\n shutil.copytree(os.path.split(__file__)[0], os.path.join(tmpdir, \"tmptest\"))\n print(config)\n command = [\n \"edflow\",\n \"-c\",\n os.path.join(\n \"logs\", \"trained_model\", \"train\", \"checkpoints\", \"model.ckpt-0\"\n ),\n \"-b\",\n \"config.yaml\",\n \"-n\",\n \"test_inference\",\n ]\n command = \" \".join(command)\n run_edflow_cmdline(command, cwd=tmpdir)\n\n # check if correct folder was created\n log_dirs = os.listdir(os.path.join(tmpdir, \"logs\"))\n assert any(list(filter(lambda x: \"test_inference\" in x, log_dirs)))", "title": "" }, { "docid": "2bb973226f416f6cf80a194de82e792a", "score": "0.5741302", "text": "def run_exploration_episode(self):\n\n new_state, reward, episode_done = self.agent.random_step()\n self.gui.update(new_state)\n\n if episode_done or self.episode_steps >= self.env.MAX_EPISODE_STEPS:\n self.episode_counter += 1\n self.env.restart_environment()\n self.gui.restart_environment(self.env.start_state)\n self.gui.update_episode_labels(self.episode_counter)\n\n if self.episode_counter != self.no_of_exploration_episodes:\n if self.timescale == 0:\n self.gui.root.update_idletasks()\n self.canvas_after_variable = self.gui.animation.canvas.after(self.timescale, self.run_exploration_episode)\n else:\n self.episode_counter = 0\n self.agent.update_values()\n self.gui.draw_values_of_states(self.agent.values_of_state)\n self.gui.draw_policy(self.agent.policy)\n self.gui.add_to_listbox(\"Running greedy episodes.\")\n self.run_greedy_episode()", "title": "" }, { "docid": "75edd19f28b913477cd82dd65d46161b", "score": "0.5740036", "text": "def run_test(**kwargs):\n\n results = {}\n results[\"test_scores\"] = []\n results[\"test_scores_mean\"] = []\n results[\"params\"] = kwargs\n\n for k,v in kwargs.items():\n globals()[k] = v\n\n global model\n model = Net(len(actions))\n if device.lower() == \"cuda\":\n model.cuda()\n\n print(\"Actions:\",actions)\n\n global optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n\n print(\"-\"*60)\n print(\"Parameters \",kwargs)\n print(\"-\" * 60)\n\n time_start = time()\n\n global bad_q\n bad_q = False\n\n for epoch in range(epochs):\n print(\"\\nEpoch {} (eps={:.3f})\\n-------------\".format(epoch + 1, exploration_rate(epoch)))\n train_episodes_finished = 0\n train_scores = []\n\n print(\"Training...\")\n model.train()\n game.new_episode()\n for learning_step in trange(learning_steps_per_epoch, leave=False):\n if SHOW_MAXQ and learning_step % 1000 == 0:\n print(\"maxq: {:.2f} loss: {:.5f}\".format(prev_max_q, float(prev_loss)))\n perform_learning_step(epoch, learning_step)\n if game.is_episode_finished():\n score = game.get_total_reward()\n train_scores.append(score)\n game.new_episode()\n train_episodes_finished += 1\n\n print(\"\\n\\t%d training episodes played.\" % train_episodes_finished)\n\n train_scores = np.array(train_scores)\n\n print(\"\\tResults: mean: %.1f +/- %.1f,\" % (train_scores.mean(), train_scores.std()), \\\n \"min: %.1f,\" % train_scores.min(), \"max: %.1f,\" % train_scores.max())\n\n print(\"\\nTesting...\")\n model.eval()\n test_scores = []\n for test_episode in trange(test_episodes_per_epoch, leave=False):\n game.new_episode()\n step = 0\n while not game.is_episode_finished():\n step += 1\n state = preprocess(game.get_state().screen_buffer)\n state = state.reshape([1, 3, resolution[0], resolution[1]])\n if random() < 0:\n best_action_index = randint(0, len(actions) - 1)\n else:\n best_action_index = get_best_action(state)\n #print(\"step\", step, \"action\", best_action_index, \"state\",state.shape,\"max q\", torch.max(get_q_values(state)))\n reward = game.make_action(actions[best_action_index], frame_repeat)\n if SHOW_REWARDS and reward > 0:\n print(\"Reward! {} at step {}\".format(reward, step))\n\n r = game.get_total_reward()\n test_scores.append(r)\n\n if bad_q:\n print(\"******* Warning MaxQ was too high ***************\")\n\n test_scores = np.array(test_scores)\n print(\"\\n\\tResults: mean: %.1f +/- %.1f,\" % (\n test_scores.mean(), test_scores.std()), \"min: %.1f\" % test_scores.min(),\n \"max: %.1f\" % test_scores.max())\n\n results[\"test_scores_mean\"].append(test_scores.mean())\n results[\"test_scores\"].append(test_scores)\n\n if save_model:\n print(\"Saving the network weigths to:\", model_savefile)\n torch.save(model, model_savefile)\n\n print(\"\\tTotal elapsed time: %.2f minutes\" % ((time() - time_start) / 60.0))\n\n print(\"\\nScores:\", results[\"test_scores_mean\"],\"\\n\")\n\n results[\"elapsed_time\"] = ((time() - time_start) / 60.0)\n\n return results", "title": "" }, { "docid": "6c5a35fd41f32201743b356857bae4a8", "score": "0.57384396", "text": "def run(self, request=None):\n\t\treturn self.load_model(request);\n\t\t#DONE", "title": "" }, { "docid": "2b206335cc49d9454af0ada6e155329c", "score": "0.57380664", "text": "def load_model(self):\n if self.model is not None:\n return\n\n model_file = self.exec_dir / 'model.pkl'\n if model_file.exists():\n self.logger.debug(\"Load previous trained model...\")\n self.model = deserialize(model_file)\n else:\n self.logger.error(\"Cannot load model...\")\n raise Exception(\"Model doesn't exist..\")", "title": "" }, { "docid": "55322438119e7fd780d50f1ff2763e0b", "score": "0.5733764", "text": "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "title": "" }, { "docid": "bccce28c66ae5f056637b3fd255f7266", "score": "0.57328695", "text": "def run():\n # Create the evaluation directory if it doesn't exist.\n\n data_config = configuration.DataConfig().config\n data_gen = Data_Generator(data_config[\"processed_video_dir\"],\n data_config[\"caption_file\"],\n data_config[\"unique_frequency_cutoff\"],\n data_config[\"max_caption_length\"])\n\n data_gen.load_vocabulary(data_config[\"caption_data_dir\"])\n data_gen.load_dataset(data_config[\"caption_data_dir\"])\n\n FLAGS.checkpoint_dir = data_config[\"checkpoint_dir\"]\n\n eval_dir = data_config[\"val_log_dir\"]\n if not tf.gfile.IsDirectory(eval_dir):\n tf.logging.info(\"Creating eval directory: %s\", eval_dir)\n tf.gfile.MakeDirs(eval_dir)\n\n g = tf.Graph()\n with g.as_default():\n # Build the model for evaluation.\n model_config = configuration.ModelConfig(data_gen).config\n model = Model_S2VT(**model_config)\n model.build()\n\n # Create the Saver to restore model Variables.\n saver = tf.train.Saver()\n\n # Create the summary operation and the summary writer.\n val_writer = tf.summary.FileWriter(data_config[\"val_log_dir\"])\n\n g.finalize()\n\n if(FLAGS.eval_all_models):\n model_names = list(set([n.split(\".\")[0] for n in os.listdir(data_config[\"checkpoint_dir\"]) if \"model\" in n]))\n model_names.sort(key= lambda x: int(x[6:]) )\n for name in model_names:\n FLAGS.checkpoint_file = os.path.join(data_config[\"checkpoint_dir\"],name)\n tf.logging.info(\"Starting evaluation of %s at \" %(name) + time.strftime(\n \"%Y-%m-%d-%H:%M:%S\", time.localtime()))\n run_once(model, saver, val_writer,data_gen)\n else:\n # Run a new evaluation run every eval_interval_secs.\n while True:\n start = time.time()\n tf.logging.info(\"Starting evaluation at \" + time.strftime(\n \"%Y-%m-%d-%H:%M:%S\", time.localtime()))\n run_once(model, saver, val_writer,data_gen)\n time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()\n if time_to_next_eval > 0:\n time.sleep(time_to_next_eval)", "title": "" }, { "docid": "98f271fd9f1f90f85a8c163d6966bfbb", "score": "0.57219434", "text": "def predict_interactively():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n \n state = env.reset(mode='interactive', \n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n print('Step {}: '.format(t+1))\n # take the action\n state, reward, done, guess = env.step(action, mode='interactive') \n \n if guess != -1:\n print('Ready to make a guess: Prob(y=1)={:1.3f}, Guess: y={}'.format(env.outcome_prob, guess))\n \n if done:\n print('Episode terminated\\n')\n break", "title": "" }, { "docid": "5f106a7b7b5591c3aeb992fe44bdc112", "score": "0.5720476", "text": "def train(self, episode, max_step, minibatch_size, initial_life=5, render=False, verbose=1, saving=False):\n episode_return = []\n verbose_return = []\n for i_episode in trange(episode):\n return_episode = 0\n observation = self.preprocess(self.env.reset())\n inputs = deque(maxlen=self.features)\n for _ in range(self.features):\n inputs.append(observation)\n\n life_now = initial_life\n\n for t in range(max_step):\n self.i_step += 1\n if render:\n self.env.render()\n\n # exploration\n if self.i_step < self.exploration_step:\n random_action = self.env.action_space.sample()\n observation, reward, done, info = self.env.step(random_action)\n return_episode += reward\n\n if info['ale.lives'] < life_now:\n reward -= 1\n life_now = info['ale.lives']\n\n X = np.transpose(np.array(inputs), [1, 2, 0])\n inputs.append(self.preprocess(observation))\n X_next = np.transpose(np.array(inputs), [1, 2, 0])\n self.dqn.replay_memory.append((X,\n random_action,\n reward,\n X_next,\n done\n ))\n # epsilon greedy\n else:\n X = np.transpose(np.array(inputs), [1, 2, 0])\n if random.random() < self.epsilon:\n action_now = self.env.action_space.sample()\n else:\n action_now = self.dqn.get_action(tf.convert_to_tensor(X))\n observation, reward, done, info = self.env.step(action_now)\n return_episode += reward\n\n if info['ale.lives'] < life_now:\n reward -= 1\n life_now = info['ale.lives']\n\n inputs.append(self.preprocess(observation))\n X_next = np.transpose(np.array(inputs), [1, 2, 0])\n self.dqn.replay_memory.append((X,\n action_now,\n reward,\n X_next,\n done\n ))\n # training step\n X_batch, action_batch, reward_batch, X_next_batch, done_batch = self.dqn.replay_memory.get_batch(\n minibatch_size)\n self.dqn.train(X_batch, action_batch, reward_batch, X_next_batch, done_batch)\n\n # epsilon decay\n if self.epsilon > self.epsilon_final:\n self.epsilon += self.epsilon_decay_rate\n\n if done:\n break\n\n episode_return.append(return_episode)\n verbose_return.append(return_episode)\n self.dqn.copy_active2target()\n\n if i_episode == 0 or ((i_episode + 1) % verbose == 0):\n if self.i_step < self.exploration_step:\n stage_tooltip = \"EXPLORATION\"\n elif self.epsilon > self.epsilon_final:\n stage_tooltip = \"EPSILON-GREEDY : %.4f\" % self.epsilon\n else:\n stage_tooltip = \"EPSILON-GREEDY[final] : %.4f\" % self.epsilon\n # print(Fore.BLACK + \"=\" * 50)\n print(Fore.RED + \"[EPISODE %3d / STEP %5d] - %s\" % (i_episode + 1, self.i_step, stage_tooltip))\n print(Fore.GREEN + \"Learned Step : %4d\" % (self.dqn.global_step))\n print(Fore.BLUE + \"AVG Return : %.4f\" % (sum(verbose_return) / len(verbose_return)))\n print(Fore.BLUE + \"MAX Return : %.4f\" % (max(verbose_return)))\n verbose_return = list()\n # print(Fore.BLACK + \"=\" * 50 + Style.RESET_ALL)\n\n if saving:\n self.save()\n return episode_return", "title": "" }, { "docid": "3a09b6192d25d754fa1bca02409c2327", "score": "0.5716023", "text": "def run_episode(self):\n # Clear buffers\n self.clear_episode()\n\n # Simulate environment while not terminal\n terminal = False\n t = 1\n obs_dict = self.env.reset()\n\n while not terminal and not rospy.is_shutdown():\n\n # Clear memories\n self.clear_memories()\n\n # N-step simulation\n t_last = t\n while t - t_last < self.n_step and not terminal and not rospy.is_shutdown():\n\n self.frames_buffer += 1\n\n print(\"Obs (ego): {} \".format(obs_dict[\"Agent_1\"][1]))\n print(\"Obs (other): {} \".format(obs_dict[\"Agent_1\"][2]))\n\n # Sample actions\n action_dict, log_prob_dict, entropy_dict, value_dict = self.sample_actions(\n obs_dict)\n\n # Calculate trajectories\n traj_dict = self.get_trajectory(action_dict)\n\n # Execute environment step\n next_obs_dict, reward_dict, done_dict, info_dict = self.env.step(\n traj_dict)\n\n print(\"Reward: {0:.2f} \".format(reward_dict[\"Agent_1\"]))\n\n # Store n-step history\n self.store_values(log_prob_dict, entropy_dict,\n value_dict, reward_dict)\n obs_dict = next_obs_dict\n\n # Check if episode is done\n if any(d != 0 for d in done_dict.values()):\n terminal = True\n\n t += 1\n print(\"-\" * 10)\n\n print(\"-\" * 20)\n\n # Accumulate gradients\n self.accumulate_gradients(obs_dict, done_dict)\n\n if self.n_step_update:\n # Prepare gradients req\n gradients_req = self.get_gradients()\n # Send gradients and receive weights\n weights_rsp = self.update_policy_service(gradients_req)\n # Update policies\n self.update_policies(weights_rsp.weights)\n # Update covariance\n self.cov = weights_rsp.covariance\n # Clear buffers\n self.clear_episode()\n\n if not self.n_step_update:\n gradients_req = self.get_gradients()\n else:\n gradients_req = None\n\n return gradients_req, info_dict", "title": "" }, { "docid": "6ca1c2bdcb75cbfd8df865bf9bbab88c", "score": "0.57119775", "text": "def run_task(data_dir, task_name, model_file):\n print(\"Train and test for task %s ...\" % task_name)\n\n\n train_files = glob.glob('%s_train.txt' % data_dir)\n test_files = glob.glob('%s_test.txt' % data_dir)\n\n dictionary = {\"nil\": 0, \"yes\": 1, \"no\": 2}\n\n lines_train_data, dictionary = load_all_data(train_files, dictionary)\n lines_test_data, dictionary = load_all_data(test_files, dictionary)\n train_start = 0\n run_number = 0\n first_train = True\n\n print(\"\\n#############################################################\\n\"\n \"##################### Training Started! #####################\"\n \"\\n#############################################################\\n\")\n\n while train_start != -1:\n # The model is trained story by story\n lines_train_data = lines_train_data[train_start:]\n train_gen = generate_next_story(lines_train_data, dictionary)\n train_story, train_questions, train_qstory, train_start = next(train_gen)\n run_number += 1\n\n # Very important to not make the model from scratch and remove all the trainings so far\n if first_train:\n first_train = False\n general_config = KgConfig(train_story, train_questions, dictionary)\n memory, model, loss = build_model(general_config)\n\n if general_config.linear_start:\n train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n else:\n train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n if run_number % 10 == 0:\n save_path = model_file+str(run_number)\n save_model(dictionary, memory, model, loss, general_config, save_path)\n\n\n\n # Testing\n test_wrapper(lines_test_data, dictionary, memory, model, loss, general_config)", "title": "" }, { "docid": "c73f22a5e6fe9256c50fc91f1e87adda", "score": "0.57045484", "text": "def run(self):\n self.init_raw_data()\n # self.init_ranked_data()\n\n adjusted_data = self.raw_data.copy()\n self.adjusted_data = adjusted_data.drop(self.to_drop, 1)\n self.adjusted_data = self.adjust_features(self.adjusted_data)\n\n #######################\n\n ''' Start Modeling Targets '''\n for target in self.targets:\n print(' ================================= ')\n print(\"Target :: {}\".format(target))\n method = getattr(self, \"target__%s\" % target)\n (target_y, target_X) = method()\n\n self.modeling(target_X, target_y, target)\n\n self.predictions()\n\n #######################\n\n print('Upcoming matches')\n\n \"\"\" Print a CSV for the previous weeks results \"\"\"\n # Will use this when we have previous predictions from the previous games\n #columns = ['team_name', 'opp_name', 'scheduled', 'is_home', 'match_id', 'points', 'goals']\n #self.predictions_compare()\n\n self.init_upcoming_data()\n #self.init_ranked_upcoming_matches_data()\n\n \"\"\" Makes sure there are games for the day \"\"\"\n if not self.upcoming_data.empty:\n\n \"\"\" Formatting data specific to the sport \"\"\"\n self.upcoming_formatted_data = self.upcoming_data.copy()\n self.upcoming_formatted_data = self.upcoming_formatted_data.drop(self.to_drop, 1)\n\n self.upcoming_formatted_data = self.adjust_features(self.upcoming_formatted_data)\n\n self.upcoming_formatted_data_X = self.upcoming_formatted_data.drop(['final_score', 'result'], 1)\n\n self.find_predictions()\n self.predictions_reorder(['team_name', 'opp_name', 'scheduled_pst', 'is_home', 'game_id', 'team_id', 'opp_id'])\n self.post_predictions()\n self.predictions_save()", "title": "" }, { "docid": "095880c71012dcac5a43b13a5e0ffc87", "score": "0.57034737", "text": "def run_train_models(args):\n try:\n with open(args.config, \"r\") as f:\n config = yaml.load(f,Loader=yaml.FullLoader)\n except IOError:\n logger.error(\"Could not read in the config file--verify correct filename/path.\")\n sys.exit(1)\n\n # get global data, trained global forecasting model, evaluate it, and save it\n global_data = read_data_from_db('global',args.engine_string)\n confirmed_series = reduce_and_reshape_data('global',global_data)\n arima_model = train_global_model(confirmed_series,config['train_models']['global_model_configs']['model_params'],config['train_models']['global_model_configs']['optional_fit_args'])\n eval_mape = forward_chaining_eval_global_model(confirmed_series,config['train_models']['global_model_configs']['model_params'],config['train_models']['global_model_configs']['optional_fit_args'],config['train_models']['global_model_configs']['nbr_days_forecast'])\n logger.info(\"Forward chaining MAPE for global forecasting model is: {}\".format(str(eval_mape)))\n #save_global_model(arima_model,args.config,args.s3_flag,**config['train_models']['global_model_configs']['save_model'])\n\n if args.s3_flag == True:\n save_global_model_s3(arima_model,args.config,**config['train_models']['global_model_configs']['save_model_to_s3'])\n else:\n save_global_model_local(arima_model,args.config,**config['train_models']['global_model_configs']['save_model_to_local'])\n\n # get country level data, trained country forecasting models, evaluate it, and save it\n country_data = read_data_from_db('country',args.engine_string)\n country_data = reduce_and_reshape_data('country',country_data)\n logger.info(\"Training models for each country, this will take a few moments.\")\n logger.warning(\"You may see some warnings issued from the ARIMA fit. Due to the nature of the data for some \"\n \"countries, the fit/optimization algorithm encounters issues.\")\n model_df = train_country_models(country_data,config['train_models']['country_model_configs']['model_params'],config['train_models']['country_model_configs']['optional_fit_args'])\n avg_country_model_mape = model_df.MAPE.mean()\n logger.info(\"Average MAPE across all country models: \"+str(avg_country_model_mape))\n #save_country_models(model_df,args.config,args.s3_flag,**config['train_models']['country_model_configs']['save_model'])\n\n if args.s3_flag == True:\n save_country_models_s3(model_df,args.config,**config['train_models']['country_model_configs']['save_model_to_s3'])\n else:\n save_country_models_local(model_df,args.config,**config['train_models']['country_model_configs']['save_model_to_local'])", "title": "" }, { "docid": "8ca199d1876f1e22140bc9413f7b46a1", "score": "0.5699874", "text": "def test_simple_model(self, embedding_name, emb_model, tokenizer, simple_model, device):\n\n total_correct_predictions = 0\n total_actions = 0\n\n dish_list = os.listdir(folder)\n\n test_result_df = pd.DataFrame(columns=[\"Dish\", \"Correct_Predictions\", \"Num_Actions\",\"Accuracy\"])\n\n dish_list = [dish for dish in dish_list if not dish.startswith(\".\")]\n dish_list.sort()\n\n saved_file_path = os.path.join(\n destination_folder3, \"model_result.tsv\"\n ) # Model saved path\n\n for dish in dish_list:\n\n correct_predictions, num_actions, results_df = self.run_model(\n dish,\n emb_model,\n tokenizer,\n simple_model,\n device,\n embedding_name,\n mode=\"Testing\",\n model_name=\"Simple Model\",\n )\n\n save_predictions(destination_folder3, results_df, dish)\n\n accuracy = correct_predictions * 100 / num_actions\n\n test_result = {\n \"Dish\": dish,\n \"Correct_Predictions\": correct_predictions,\n \"Num_Actions\": num_actions,\n \"Accuracy\": accuracy,\n }\n\n test_result_df = test_result_df.append(test_result, ignore_index=True)\n\n total_correct_predictions += correct_predictions\n total_actions += num_actions\n\n model_accuracy = total_correct_predictions * 100 / total_actions\n\n test_result = {\n \"Dish\": \"Overall\",\n \"Correct_Predictions\": total_correct_predictions,\n \"Num_Actions\": total_actions,\n \"Accuracy\": model_accuracy,\n }\n\n test_result_df = test_result_df.append(test_result, ignore_index=True)\n\n print(\"Model Accuracy: {:.2f}\".format(model_accuracy))\n\n test_result_df.to_csv(saved_file_path, sep=\"\\t\", index=False, encoding=\"utf-8\")\n\n print(\"Results saved in ==>\" + saved_file_path)", "title": "" }, { "docid": "07feab8ed8f2cc3336a3f6e7689907e9", "score": "0.5692425", "text": "def _maybe_load_session(self):\n try:\n # list available checkpoints and pick the one from the latest epoch\n _, dirs, _ = next(walk(self.experiment[\"CHECKPOINT_FOLDER\"]))\n dirs.sort()\n last_epoch = self.experiment[\"CHECKPOINT_FOLDER\"] + dirs[-1] + \"/\"\n checkpoint_model = last_epoch + self.experiment[\"CHECKPOINT_FILE\"]\n self.saver.restore(self.session, checkpoint_model)\n load_random_states(self.experiment, last_epoch)\n\n msg = \"Successfully loaded checkpoint file: {}\\n\"\n self.logger.info(msg.format(checkpoint_model))\n\n except (CheckpointNotFoundError, IndexError) as ex:\n if type(ex) == IndexError:\n self.logger.info(\"Unable to find any checkpoint\")\n else:\n self.logger.exception(\"Unable to load checkpoint correctly\")\n msg = \"Starting from scratch. Unable to load checkpoint from: {}\"\n self.logger.info(msg.format(self.experiment[\"CHECKPOINT_FOLDER\"]))\n # tensorboard experiment folder is cleaned iff there is no\n # checkpoint, because it is desirable to keep events from previous\n # runs to display more informative statistics\n remove_folder(self.experiment[\"TB_EXPERIMENT_FOLDER\"])\n makedirs(self.experiment[\"TB_EXPERIMENT_FOLDER\"], exist_ok=True)", "title": "" }, { "docid": "39c68085ecb9e36ee44343ed8ebd7398", "score": "0.5689822", "text": "def run(self):\n while True:\n cur_model = self.wait_for_model()\n fom = self.train_model(cur_model)\n self.send_result(fom)", "title": "" }, { "docid": "6951570e4e8decb0d59b7922ebb7dc97", "score": "0.56768477", "text": "def load(self):\n if not self.requires_learning:\n return\n\n path = join(AGENT_PATH, \"manifest.json\")\n agent_manifest = read_manifest_file(path)\n\n if self.id_str_with_hash not in agent_manifest:\n raise LookupError(\"Couldn't find the saved version.\")\n\n model_path = join(MODEL_PATH_TEMPLATE.format(self.id_str_with_hash))\n self.model.load(model_path)\n self.trained = True", "title": "" }, { "docid": "9a396cfd9c551c6ef54f3a2546f7e268", "score": "0.56765956", "text": "def run(self, max_episodes = 500, max_timesteps = 10000):\n unity_file = self.download_unity_env()\n env = self.get_gym_env(unity_file)\n # ========================================== #\n\n # RL stable baselines algorithms require a vectorized environment to run\n env = DummyVecEnv([lambda: env])\n\n model = PPO2(MlpPolicy, env, verbose=1)\n model.learn(total_timesteps=max_timesteps)\n\n obs = env.reset()\n for i in range(max_episodes):\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()\n\n sb_model_path = os.path.join('/tmp', 'ppo2_rldemo_sb')\n model.save(sb_model_path)\n\n # Note: the content of /opt/ml/model and /opt/ml/output is automatically uploaded\n # to previously selected bucket (by the estimator) at the end of the execution\n # os.environ['SM_MODEL_DIR'] correspongs to /opt/ml/model\n model_path = os.path.join(os.environ['SM_MODEL_DIR'], 'ppo2_rldemo')\n\n # Note: this model can not be directly employed in Unity ml-agents\n # it has to be converted into Barracuda format\n generate_checkpoint_from_model(sb_model_path, model_path)\n\n # ========================================== #\n BaselinePPOTrainer.close_env(env)", "title": "" }, { "docid": "6fed27e2d924eeb1385dc21b3dd68b85", "score": "0.56732494", "text": "def run(self, model: ModelType) -> None:", "title": "" }, { "docid": "0d98df8bdaf3ecbe2adc36f2927d7773", "score": "0.56709677", "text": "def test(testFile, model):\n print(\"not implemented\")", "title": "" }, { "docid": "aac9ae1922611d66ae965847488217df", "score": "0.5670113", "text": "def run_model(config: Dict):\n print(\"Run configuration:\")\n print(config)\n seed(config['seed'])\n\n # read config\n graphs = graphs_from_args(config['graphs'])\n policy_name = config['policy']\n model_path = config['model_name']\n demands = demands_from_args(config, graphs)\n env_kwargs = env_kwargs_from_args(config)\n env_name = config['env_name']\n parallelism = config['parallelism']\n replay_steps = config['replay_steps']\n\n oblivious_routings = [routing_baselines.shortest_path_routing(graph) for\n graph in graphs]\n\n # make env\n env = lambda: gym.make(env_name,\n dm_sequence=demands,\n graphs=graphs,\n oblivious_routings=oblivious_routings,\n **env_kwargs)\n\n if policy_name == 'lstm':\n envs = DummyVecEnv([env] * parallelism)\n else:\n envs = DummyVecEnv([env])\n\n # load\n model = PPO2.load(model_path)\n\n # execute\n obs = envs.reset()\n state = None\n utilisations = []\n opt_utilisations = []\n oblivious_utilisations = []\n actions = []\n if env_name == 'ddr-iterative-v0':\n replay_steps = replay_steps * envs.envs[0].graphs[\n envs.envs[0].graph_index].number_of_edges()\n for i in range(replay_steps - 1):\n action, state = model.predict(obs, state=state, deterministic=True)\n obs, reward, done, info = envs.step(action)\n if info[0]['iter_idx'] == 0:\n utilisations.append(info[0]['utilisation'])\n opt_utilisations.append(info[0]['opt_utilisation'])\n oblivious_utilisations.append(info[0]['oblivious_utilisation'])\n else:\n for i in range(replay_steps - 1):\n action, state = model.predict(obs, state=state, deterministic=True)\n obs, reward, done, info = envs.step(action)\n utilisations.append(info[0]['utilisation'])\n opt_utilisations.append(info[0]['opt_utilisation'])\n oblivious_utilisations.append(info[0]['oblivious_utilisation'])\n actions.append(action)\n envs.close()\n\n # so that is is JSON serialisable\n actions = [action.tolist() for action in actions]\n\n # write the results to file\n result = {\"utilisations\": utilisations,\n \"opt_utilisations\": opt_utilisations,\n \"oblivious_utilisations\": oblivious_utilisations,\n \"actions\": actions}\n if 'output_path' in config:\n data = {**config, **result}\n with jsonlines.open(config['output_path'], 'a') as f:\n f.write(data)", "title": "" }, { "docid": "8da8349ad9ffb9bdc2919bcd6ef6679b", "score": "0.5667835", "text": "def evaluate_model(self):\n print(\"Evaluating model\")\n model = self.trainer.get_model()\n\n analyzer = ModelAnalysis(model, self.sim)\n\n analyzer.set_data(load.test_data, load.test_labels)\n analyzer.set_log_dir(self.run_path)\n analyzer.run()\n return None", "title": "" }, { "docid": "a1c524216c2ff2a6e23f2bf5e9d85325", "score": "0.56593686", "text": "def test_epoch(self, dl_test: DataLoader, **kw) -> EpochResult:\n self.model.train(False) # set evaluation (test) mode\n return self._foreach_batch(dl_test, self.test_batch, **kw)", "title": "" }, { "docid": "c1e0d9de4e058f7c009b0fba8b6fcc87", "score": "0.56511194", "text": "def load_last_saved_model(self, output_dir):\n model_found = False\n # If the output_dir contains a model, then it will be loaded\n # Pick the latest saved model\n try:\n # Get the list of checkpoint files and strip the full path\n model_list = [os.path.basename(x) for x in glob.glob(os.path.join(self.output_dir, 'model_*.chkpnt'))]\n # Sort the files based on the iteration number\n model_list_sorted = sorted(model_list, key=lambda f: int(re.match(r'model_(\\d+)itr.chkpnt', f).groups(0)[0]))\n except:\n print(\"*** NO SAVED MODEL FOUND in {}. LOADING FROM SCRATCH. ****\".format(self.output_dir))\n # Initilize the model\n self.model.init()\n else: # if no exceptions, then run the following\n # If there are saved models\n if len(model_list_sorted) > 0:\n # Pick the most recent model\n self.pretrained = os.path.join(self.output_dir, model_list_sorted[-1])\n # Construct the name of the optimizer file based on the pretrained model path\n opt_path = os.path.join(os.path.dirname(self.pretrained), os.path.basename(self.pretrained).replace('model','opt'))\n if os.path.exists(opt_path):\n self.model.load_state_dict(torch.load(self.pretrained))\n self.opt.load_state_dict(torch.load(opt_path))\n model_found = True\n\n try:\n # Load the best model\n best_model_path = os.path.join(self.output_dir, \"best_model.chkpnt\")\n self.best_model = self.get_model_from_name(self.model_name)\n self.best_model.load_state_dict(torch.load(best_model_path))\n print(\"Loading the model {} with best MRR {}.\".format(self.pretrained, self.best_model.best_mrr))\n except:\n print(\"*** NO BEST MODEL FOUND in {}. ****\".format(self.output_dir))\n # Set the best model to None\n self.best_model = None\n\n if not model_found:\n print(\"*** NO MODEL/OPTIMIZER FOUND in {}. LOADING FROM SCRATCH. ****\".format(self.output_dir))\n # Initilize the model\n self.model.init()", "title": "" }, { "docid": "b6ecaa8813c78a6c9982bf73b0fcc3bb", "score": "0.56452125", "text": "def _initialize_episode(self):\n initial_observation = self._environment.reset()\n return self._agent.begin_episode(initial_observation)", "title": "" }, { "docid": "2c12033a0d0ad4e8b538eb0daa343066", "score": "0.5639963", "text": "def __train_single_model(opt):\n # Create model and assign it to the specified task\n agent = create_agent(opt)\n world = create_task(opt, agent)\n print('[ training... ]')\n\n train_dict = {'train_time': Timer(),\n 'validate_time': Timer(),\n 'log_time': Timer(),\n 'new_epoch': None,\n 'epochs_done': 0,\n 'max_exs': opt['num_epochs'] * len(world),\n 'total_exs': 0,\n 'parleys': 0,\n 'max_parleys': math.ceil(opt['num_epochs'] * len(world) / opt['batchsize']),\n 'best_metrics': opt['chosen_metrics'],\n 'best_metrics_value': 0,\n 'impatience': 0,\n 'lr_drop_impatience': 0,\n 'saved': False,\n 'train_report': None,\n 'train_report_agent': None,\n 'train_report_world': None,\n 'break': None}\n\n try:\n while True:\n world.parley()\n train_dict['parleys'] += 1\n train_dict['new_epoch'] = world.epoch_done()\n if train_dict['new_epoch']:\n world.reset()\n train_dict['epochs_done'] += 1\n world, agent, train_dict = __train_log(opt, world, agent, train_dict)\n if opt['num_epochs'] > 0 and train_dict['parleys'] >= train_dict['max_parleys']:\n print('[ num_epochs completed: {} ]'.format(opt['num_epochs']))\n break\n if 0 < opt['max_train_time'] < train_dict['train_time'].time():\n print('[ max_train_time elapsed: {} ]'.format(train_dict['train_time'].time()))\n break\n _, agent, train_dict = __intermediate_validation(opt, world, agent, train_dict)\n\n if train_dict['break']:\n break\n except KeyboardInterrupt:\n print('Stopped training, starting testing')\n\n if not train_dict['saved']:\n world.save_agents()\n\n world.shutdown()\n agent.shutdown()\n\n # reload best validation model\n vopt = copy.deepcopy(opt)\n if vopt.get('evaltask'):\n vopt['task'] = vopt['evaltask']\n vopt['datatype'] = 'valid'\n vopt['pretrained_model'] = vopt['model_file']\n agent = create_agent(vopt)\n valid_world = create_task(vopt, agent)\n metrics, _ = __evaluate_model(valid_world, vopt['batchsize'], 'valid',\n vopt['display_examples'], vopt['validation_max_exs'])\n valid_world.shutdown()\n agent.shutdown()\n return metrics", "title": "" }, { "docid": "7a2df42f28dd6985749add4012a56be4", "score": "0.56351304", "text": "def start_episode(self):\n self.algo.start_episode()", "title": "" } ]
0e104461cce6a58cb1527de242a9065c
Return a list of this compartments neighbors
[ { "docid": "4ec96ae6657a1638f62adf6d432fc87c", "score": "0.8468298", "text": "def neighbors(self):\n neighbors = [comp for comp in self.node0.compartments if comp != self]\n neighbors.extend(comp for comp in self.node1.compartments if \\\n comp != self and comp not in neighbors)\n return neighbors", "title": "" } ]
[ { "docid": "38fef1ab25c0cc71bec25ddb3225d301", "score": "0.84283286", "text": "def neighbors(self):\n return [comp for comp in self.node.compartments if comp != self]", "title": "" }, { "docid": "e48d37ca06666ef16c66fdb08074775f", "score": "0.7814085", "text": "def get_neighbors(self):\n return list(self.neighbors.values())", "title": "" }, { "docid": "291895262cb4ff01ebf013cf7aeec67b", "score": "0.7807219", "text": "def get_neighbors(self):\n return self.neighbors", "title": "" }, { "docid": "291895262cb4ff01ebf013cf7aeec67b", "score": "0.7807219", "text": "def get_neighbors(self):\n return self.neighbors", "title": "" }, { "docid": "dff512a8f1e4bd0813ed501fef17b70f", "score": "0.77412015", "text": "def getNeighbors(self):\n return self.neighbors", "title": "" }, { "docid": "8ccb49b4b9e92aa64cd4baa054361683", "score": "0.76023066", "text": "def getNeighbors(self):\n return list(self.neighbors.keys())", "title": "" }, { "docid": "e5492755fd6e68e49591aea6e0eb9866", "score": "0.7593142", "text": "def neighbors(self):\r\n nums = list(map(lambda x : Atom.atoms[int(x) - 1], self.bonds()))\r\n return nums", "title": "" }, { "docid": "3bc00daa3b38ec21e8a98cad0ec476aa", "score": "0.7560135", "text": "def get_neighbors(self):\n return list(self.__neighbors_dict.values())", "title": "" }, { "docid": "ebe2cf5182fa9f7cf6993132e9a69589", "score": "0.74886364", "text": "def get_neighbours(self):\n return self.adjacency_list", "title": "" }, { "docid": "53e6960a32e4ab2c97aad6a8454dfc12", "score": "0.7405935", "text": "def get_neighbor(self):\n return [self.env.get_neighbors(self.handles[i]) for i in range(self.n_group)]", "title": "" }, { "docid": "e5941ad0826065fd68354cd7c2870dc7", "score": "0.72763014", "text": "def get_neighbours(self):\n \n neighbor_nodes = []\n for edge in self.edges:\n neighbor_nodes.append(edge.nodeB)\n return neighbor_nodes", "title": "" }, { "docid": "1ebc9697d4984f538e429cc36e6bb11e", "score": "0.7269306", "text": "def neighbours(self):\n neighbours = []\n for offset in OFFSETS:\n row_off, col_off = offset\n neigh_pos = (self.row + row_off, self.col + col_off)\n if self.in_boundary(neigh_pos, self.matrix_size):\n neighbours.append(self.matrix.get_cell(neigh_pos))\n return neighbours", "title": "" }, { "docid": "385c6a59e573583e22cf89e74e618f6a", "score": "0.7238979", "text": "def neighbors(self):\n return scenario.at_points(\n self.point.vicinity(self.radius))", "title": "" }, { "docid": "219c0eed33434882ba85c0eff16758f0", "score": "0.7201328", "text": "def neighbours(self):\n return [n for n in (self.left_node, self.right_node,\n self.up_node, self.down_node) if n is not None]", "title": "" }, { "docid": "71336193dfda66f19843360be5775036", "score": "0.7196894", "text": "def get_neighbours():\n neighbours = THIS_NODE.get_neighbour_list()\n return neighbours", "title": "" }, { "docid": "1f5dd859d8ae0916b4e33b319b61760a", "score": "0.71836555", "text": "def neighbors(self):\n if getattr(self, \"_neighbors\", None) is None:\n self._neighbors = np.array(\n _build_adjacency(self._simplex_faces, self.n_faces)\n )\n return self._neighbors", "title": "" }, { "docid": "74c6a8ffbdb78638f2ea887566cfdea9", "score": "0.716458", "text": "def neighbours(self):\n\n cells = []\n\n if self.x != 0:\n cells.append((self.x - 1, self.y))\n if self.x != self.maze_size - 1:\n cells.append((self.x + 1, self.y))\n if self.y != 0:\n cells.append((self.x, self.y - 1))\n if self.y != self.maze_size - 1:\n cells.append((self.x, self.y + 1))\n\n return cells", "title": "" }, { "docid": "9e4ca10190dd52e583562854cd704b4a", "score": "0.7143296", "text": "def neighbors(self):\n return self._links.keys()", "title": "" }, { "docid": "749d569c21e354a527cc844501115010", "score": "0.71377504", "text": "def get_neighbours(self):\r\n return self.points_to.keys()", "title": "" }, { "docid": "4df5020e0dd058bd2278f62930bae085", "score": "0.7135996", "text": "def getNeighbors(self,snap):\n # build all-atom neighborlist with Voro++\n nl, area = _crayon.voropp(snap.xyz, snap.box, 'x' in snap.pbc, 'y' in snap.pbc, 'z' in snap.pbc)\n all_neighbors = []\n for idx in range(snap.N):\n if self.clustering:\n nn = self.filterNeighbors(idx,idx,nl,snap)\n else:\n nn = nl[idx]\n all_neighbors.append(np.array(nn,dtype=np.int))\n if self.enforce_symmetry:\n self.symmetrize(all_neighbors)\n if self.max_neighbors is not None:\n self.removeOverbonded(all_neighbors)\n return all_neighbors", "title": "" }, { "docid": "aed10e6b2b4cfd8e5301180d9e6e27ca", "score": "0.71342134", "text": "def neighbors(self, (i,j)):\n neighbors = []\n\n if i-1 >= 0: neighbors.append((i-1, j))\n if i+1 < self.GRID_I: neighbors.append((i+1, j))\n\n if j-1 >= 0: neighbors.append((i, j-1))\n if j+1 < self.GRID_J: neighbors.append((i, j+1))\n\n if i-1 >= 0 and j-1 >= 0: neighbors.append((i-1, j-1))\n if i-1 >= 0 and j+1 < self.GRID_J: neighbors.append((i-1, j+1))\n\n if i+1 < self.GRID_I and j-1 >= 0: neighbors.append((i+1, j-1))\n if i+1 < self.GRID_I and j+1 < self.GRID_J: neighbors.append((i+1, j+1))\n\n return neighbors", "title": "" }, { "docid": "b1bbcabfff2c724bcb504a00aedbcb21", "score": "0.7092296", "text": "def neighbors(self):\n raise RuntimeError('Compartment is a pure virtual class')", "title": "" }, { "docid": "8d27e1ea5ca4013c9f38b27e0b4c4bb3", "score": "0.7064458", "text": "def neighbors(self,pos):\r\n neighbors = ()\r\n for i in range(0,self.numNodes):\r\n if self.gArray[pos][i] != None:\r\n neighbors = neighbors + (i,)\r\n return neighbors", "title": "" }, { "docid": "7daa90c8f82f09bff54411b44ee6289d", "score": "0.70580554", "text": "def neighbors(self, u):\n return u.neighbors", "title": "" }, { "docid": "7964412c3db924453c7f51b9b22921e8", "score": "0.7049851", "text": "def get_neighbor_list( self, atoms ):\n cutoffs = [self.Rcut/2.0 for _ in range(len(atoms))]\n nlist = NeighborList(cutoffs,bothways=True,self_interaction=False,skin=0.0)\n nlist.update(atoms)\n return nlist", "title": "" }, { "docid": "822ae2af57c12ac369b6a68cd7384b1f", "score": "0.704949", "text": "def find_neighbors(self):\n k,i,j = self.global_index\n max_indx = 2**k\n max_indx_up = 2**(k-1)\n neighbors = []\n upper_neighbors = []\n\n\n neighbors = [ [None,None,None],[None,self.indx,None],[None,None,None]]\n upper_neighbors = [ [None,None,None],[None,None if self.parent is None else self.parent.indx,None],[None,None,None]]\n stencil = [(-1,0),(1,0),(0,-1),(0,1)]\n stencil += [(-1,1),(1,-1),(1,1),(-1,-1)]\n\n for di,dj in stencil:\n ii = i + di\n jj = j + dj\n if ii>=0 and jj>=0 and ii<max_indx and jj<max_indx:\n neighbors[1+di][1+dj] = self.name_from_index(k,ii,jj)\n iu = ii//2\n ju = jj//2\n ku = k-1\n if iu>=0 and ju>=0 and iu<max_indx_up and ju<max_indx_up:\n upper_neighbors[1+di][1+dj] = self.name_from_index(ku,iu,ju)\n return neighbors, upper_neighbors", "title": "" }, { "docid": "a383353085c713a12ae2941a6ed397b9", "score": "0.7032382", "text": "def neighbors(self):\n return iter(self.__edges.keys())", "title": "" }, { "docid": "ec7f1f8c8a80d90e183d718eb6e0c8d6", "score": "0.70323217", "text": "def get_neighbors(self, state: PuzzleState) -> List[Tuple['PuzzleState', str, float]]:\n return state.get_neighbors()", "title": "" }, { "docid": "6c512fbf124c8228e1c5875952995223", "score": "0.70249796", "text": "def get_neighbors(self, agent: Agent) -> list[Agent]:\n neighbors = list()\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if i == 0 and j == 0:\n continue\n cell = self.env[(agent.pos[0] + i) % self.grid_size,\n (agent.pos[1] + j) % self.grid_size]\n if cell:\n neighbors.append(cell)\n return neighbors", "title": "" }, { "docid": "cb5e50d6ead49a5e93894ec690db1e49", "score": "0.69782174", "text": "def get_neighbors(self):\n n = self.discovered_nodes.keys()", "title": "" }, { "docid": "a22de8ab25b223a3b7436b9d79a2f631", "score": "0.69661766", "text": "def neighbors(self, grid):\n neighbors = []\n north = self.row - 1, self.column\n if north[0] < 0:\n north = 0\n neighbors.append(0)\n if north:\n neighbors.append(grid[north[0]][north[1]])\n south = self.row + 1, self.column\n if south[0] >= self.rows:\n south = 0\n neighbors.append(0)\n if south:\n neighbors.append(grid[south[0]][south[1]])\n east = self.row, self.column + 1\n if east[1] >= self.columns:\n east = 0\n neighbors.append(0)\n if east:\n neighbors.append(grid[east[0]][east[1]])\n west = self.row, self.column - 1\n if west[1] < 0:\n west = 0\n neighbors.append(0)\n if west:\n neighbors.append(grid[west[0]][west[1]])\n return neighbors", "title": "" }, { "docid": "c1536df0e798c723ed2e856ea6c5a93b", "score": "0.69535315", "text": "def neighbors(self):\r\n dirt_pos = self.closest_dirt()\r\n row, col = self.bot_pos\r\n actions = [\r\n (\"CLEAN\", (row, col)),\r\n (\"UP\", (row - 1, col)),\r\n (\"DOWN\", (row + 1, col)),\r\n (\"LEFT\", (row, col - 1)),\r\n (\"RIGHT\", (row, col + 1))\r\n ]\r\n neighbors = []\r\n for action,(r,c) in actions:\r\n if 0<=r<=self.height and 0<=c<=self.width:\r\n neighbors.append((action, (r,c)))\r\n neighbors = sorted(neighbors,\r\n key=lambda action: abs(action[1][0]-dirt_pos[0])+abs(action[1][1]-dirt_pos[1])\r\n )\r\n #if dirt_pos==self.bot_pos:\r\n # neighbors.insert(0, (\"CLEAN\", (row, col)))\r\n return neighbors", "title": "" }, { "docid": "4fa98115d1a6aa019c4e6a034b1fffe5", "score": "0.69475746", "text": "def get_neighbours(self):\n # Append the lists of near and far neighbours.\n return self.near_neighbours + self.far_neighbours", "title": "" }, { "docid": "0aeeb7ac82daa43556b0fb4602b0738e", "score": "0.692971", "text": "def neighbors(board, i, j):\n return [board[i - 1][j], board[i + 1][j],\n board[i][j + 1], board[i][j - 1]]", "title": "" }, { "docid": "75da41ab3cd9a41a549e1290cfdc8d94", "score": "0.6929464", "text": "def get_neighbors(self, i):\n # Assume 1D grid of cells.\n neighbors = []\n graph = self.update_rule.graph\n for index in range(len(graph[0])): # Graph is an adj. matrix, so length will be same\n if graph[i][index] != 0: # is neighbour\n neighbors.append(index)\n return neighbors", "title": "" }, { "docid": "8f047b325de18590aaf161975088817b", "score": "0.69214743", "text": "def neighbors(self, node):", "title": "" }, { "docid": "25fd5dd811d00d044dcd705c681fe884", "score": "0.69015884", "text": "def compile_neighborlist(self):\n self.neighborlist = np.zeros( (self.nx, self.ny, 4, 2), dtype=int)\n # [i,j,:,:] = [[inorth, jnorth],\n # [isouth, jsouth],\n # [iwest, jwest],\n # [ieast, jeast]]\n for i in range(self.nx):\n for j in range(self.ny):\n self.neighborlist[i,j,0,:] = [i, (j-1)%(self.ny)]\n self.neighborlist[i,j,1,:] = [i, (j+1)%(self.ny)]\n self.neighborlist[i,j,2,:] = [(i-1)%(self.nx), j]\n self.neighborlist[i,j,3,:] = [(i+1)%(self.nx), j]", "title": "" }, { "docid": "1e65b85d41a8f3420b4984ecfa5c99bc", "score": "0.69008267", "text": "def neighbors(self, pos):\n\n i, j = pos\n result = []\n if i > 0:\n if not self.verticalWalls[i-1][j]: result.append((i-1, j))\n if j > 0:\n if not self.horizontalWalls[i][j-1]: result.append((i, j-1))\n if i < self.n-1:\n if not self.verticalWalls[i][j]: result.append((i+1, j))\n if j < self.m-1:\n if not self.horizontalWalls[i][j]: result.append((i, j+1))\n assert result, \"Empty neighbor list for position {p}\".format(self.currentPosition)\n return result", "title": "" }, { "docid": "378cc4982b390ddeaaf22e4b25c631ed", "score": "0.6887499", "text": "def neighbours(self):\n return [e.next(self) for e in self._edgelist]", "title": "" }, { "docid": "f4eddb75f764cda160d4e0cfe4c49090", "score": "0.6852747", "text": "def __neighbor_list(self, position):\n\n assert isinstance(position, tuple)\n x, y = position\n m = self.m\n n = self.n\n mode = self.boundary\n\n nxy = lambda x, y: [(x - 1, y - 1), (x - 1, y), (x - 1, y + 1),\n (x, y - 1), (x, y + 1), (x + 1, y - 1),\n (x + 1, y), (x + 1, y + 1)]\n if mode == 'periodic':\n return [(i % m, j % n) for i, j in nxy(x, y)]\n elif mode == 'fixed':\n valid_pix = lambda x, y, m, n: 0 <= x < m and 0 <= y < n\n return [(a, b) for a, b in nxy(x, y) if valid_pix(a, b, m, n)]\n else:\n return nxy(x, y)", "title": "" }, { "docid": "c2df8ceb8bfc450dade36920ce8b0a11", "score": "0.6847149", "text": "def neighbors(self) -> Neighbors:\r\n indptr, indices = self.delaunay.vertex_neighbor_vertices\r\n\r\n sizes = indptr[1:] - indptr[:-1]\r\n\r\n neighbors = -1 * np.ones(\r\n shape=(self.parameters, int(np.max(sizes))), dtype=\"int\"\r\n )\r\n\r\n for k in range(self.parameters):\r\n neighbors[k][0 : sizes[k]] = indices[indptr[k] : indptr[k + 1]]\r\n\r\n return Neighbors(arr=neighbors.astype(\"int\"), sizes=sizes.astype(\"int\"))", "title": "" }, { "docid": "57e4d79c1914a7f414c2f0b5710479b0", "score": "0.6829172", "text": "def neighbours(self, radius = 1):\n x, y = self.x, self.y\n results = [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1), (x + 1, y + 1), (x + 1, y - 1), (x - 1, y + 1), (x - 1, y - 1)]\n if radius == 2:\n for r in results:\n a, b = r\n results += [(a + 1, b), (a - 1, b), (a, b + 1), (a, b - 1), (a + 1, b + 1), (a + 1, b - 1), (a - 1, b + 1), (a - 1, b - 1)]\n results = list(set(results))\n nbrs = [r for r in results if r in Cell.C.keys() and not Cell.C[r].is_barrier]\n return nbrs", "title": "" }, { "docid": "c87c15b4ec1e785f569955ad5d01d351", "score": "0.6824841", "text": "def cell_neighbors(self, cell):\n nbrs = []\n for face in self.cell_faces(cell):\n nbr = self.halfface_opposite_cell(face)\n if nbr is not None:\n nbrs.append(nbr)\n return nbrs", "title": "" }, { "docid": "4a43d8f519ce496f0dff867ea81fc281", "score": "0.68247163", "text": "def _neighbors(self, point):\n return [point - 1, point + 1, point - self.NS, point + self.NS]", "title": "" }, { "docid": "430202fbb90a816691e81b5a4d6ef88b", "score": "0.68173313", "text": "def get_neighbors(self, ecosystem, width, height, distance=1):\n r = range(0 - distance, 1 + distance)\n neighbors = []\n for i in r:\n for j in r:\n if not i == j == 0:\n point_x, point_y = self.__neighbor_position(i, j, width, height)\n neighbor = ecosystem[point_x][point_y]\n if neighbor.live is True:\n neighbors.append(neighbor)\n return neighbors", "title": "" }, { "docid": "f612b67874ad935c037d6903a1f11707", "score": "0.68157583", "text": "def neighbors(self,x,y):\n\n n = []\n\n if x > 0 and ( self.grid[y][x-1] & Prim.IN ) != 0:\n n.append((x-1,y)) \n if x+1 < self.width and ( self.grid[y][x+1] & Prim.IN ) != 0:\n n.append((x+1,y))\n if y > 0 and ( self.grid[y-1][x] & Prim.IN ) != 0:\n n.append((x,y-1))\n if y+1 < self.height and ( self.grid[y+1][x] & Prim.IN ) != 0:\n n.append((x,y+1))\n\n return n", "title": "" }, { "docid": "876801791c4b8edeb5c74badac1fe776", "score": "0.6798272", "text": "def neighbors(self, index):\n pass", "title": "" }, { "docid": "f2fd3ee6213679203878cfe91112e9b3", "score": "0.6798064", "text": "def get_neighbours(self, current_point):\n\n neighbours = []\n for point1, point2 in self.prm_points_to_connection:\n if current_point == point1:\n neighbours.append(point2)\n if current_point == point2:\n neighbours.append(point1)\n return neighbours", "title": "" }, { "docid": "662f0ccc87a44701bf0b31ac2393dc9c", "score": "0.6783106", "text": "def list_neighbors(self, person: SimPerson) -> List[SimPerson]:\n x, y = person.x, person.y\n neighbors = []\n for neighbor_x in range(x - 1, x + 2):\n for neighbor_y in range(y - 1, y + 2):\n if (\n (neighbor_x == x and neighbor_y == y)\n or not (0 <= neighbor_x < 1 + self.radius * 2)\n or not (0 <= neighbor_y < 1 + self.radius * 2)\n ):\n continue\n neighbors.append(self.people_matrix[neighbor_y][neighbor_x])\n return neighbors", "title": "" }, { "docid": "eb24350a4bf9320b5a6a644b97e2d461", "score": "0.67512184", "text": "def neighbors(self):\n cache = self.scheduler.zone_cache\n if not cache:\n die(\"update_zone() must have been called for zone caching.\")\n\n p = self.mobility.current\n i, j = self.zone()\n neighbors = []\n # check nine zones including/surrounding the current one\n for dj in [-1, 0, 1]:\n if j + dj < 0:\n continue\n for di in [-1, 0, 1]:\n if i + di < 0:\n continue\n if not cache.get(j + dj, None):\n continue\n if not cache[j + dj].get(i + di, None):\n continue\n for agent in self.scheduler.zone_cache[j + dj][i + di]:\n if agent == self:\n continue\n q = agent.mobility.current\n if abs(p[0] - q[0]) > self.range_:\n continue\n if abs(p[1] - q[1]) > self.range_:\n continue\n if math.sqrt((p[0] - q[0])**2 +\n (p[1] - q[1])**2) > self.range_:\n continue\n neighbors.append(agent)\n return neighbors", "title": "" }, { "docid": "2b9e751ee023b64b3729c0b8b8512346", "score": "0.673421", "text": "def _get_neighbours(self, cell):\n\n neighbours = cell.neighbours()\n return [self.maze[coord] for coord in neighbours] # Cell.neighbours() returns coordinates not Cell", "title": "" }, { "docid": "125ad0113c5c2e15debe49f481fe5114", "score": "0.67302096", "text": "def get_neighbors(self, r: int, c: int) -> list:\n\n tmp = [\n (r - 1, c - 1),\n (r - 1, c),\n (r, c - 1),\n (r, c + 1),\n (r + 1, c),\n (r + 1, c + 1),\n ]\n neighbors = []\n for cell in tmp:\n row = cell[0]\n col = cell[1]\n if self.is_legal_cell(row, col):\n neighbors.append(cell)\n return neighbors", "title": "" }, { "docid": "8a9d4586580bcf7c19556a4c91c54e31", "score": "0.67237586", "text": "def get_neighbors(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> List[GridContent]:\n return list(self.iter_neighbors(pos, moore, include_center, radius))", "title": "" }, { "docid": "8856cf1d138aac23a30c486507d966d4", "score": "0.6715011", "text": "def _getNeighbors(self, ind):\r\n \r\n search_array = copy.deepcopy(self._dist_matrix[ind,:])\r\n search_array[search_array > self._epsilon] = -1\r\n neighbor_list = [i for i, val in enumerate(search_array) if val >= 0]\r\n \r\n return neighbor_list", "title": "" }, { "docid": "b0b6eda8e8e0e25c2e88893a4af4145a", "score": "0.6702187", "text": "def get_neighbors(self, node):\n return sorted(super().get_neighbors(node))", "title": "" }, { "docid": "367a5772af72d24cb6af2c5396f39b3b", "score": "0.6693415", "text": "def neighbors(self, obj):\n neighbors = set([])\n \n for e in self.node_links[obj]:\n neighbors.update(set(self.edge_links[e]))\n \n return list(neighbors - set([obj]))", "title": "" }, { "docid": "a9e4f107ba0f330cdf1aa492b1eb3880", "score": "0.6656103", "text": "def get_neighbors(self, p):\n\t\tx, y = p\n\t\treturn [(x+i, y+j) for i in range(-1, 2) for j in range(-1, 2)]", "title": "" }, { "docid": "3a4684cd252d8ebda6f5e3c1badf0a82", "score": "0.6651221", "text": "def get_neighbors(self, x, y):\n\t\tif x == 0:\n\t\t\tleft = self.width - 1\n\t\telse:\n\t\t\tleft = x - 1\n\n\t\tif x == self.width - 1:\n\t\t\tright = 0\n\t\telse:\n\t\t\tright = x + 1\n\n\t\tif y == 0:\n\t\t\ttop = self.height - 1\n\t\telse:\n\t\t\ttop = y - 1\n\n\t\tif y == self.height - 1:\n\t\t\tbottom = 0\n\t\telse:\n\t\t\tbottom = y + 1\n\n\t\treturn [\n\t\t\tself.grid[left][top],\n\t\t\tself.grid[x][top],\n\t\t\tself.grid[right][top],\n\t\t\tself.grid[right][y],\n\t\t\tself.grid[right][bottom],\n\t\t\tself.grid[x][bottom],\n\t\t\tself.grid[left][bottom],\n\t\t\tself.grid[left][y]\n\t\t]", "title": "" }, { "docid": "6b8f3db04dc8c150e7459ba22164d758", "score": "0.6637629", "text": "def get_neighbors(self, r, c):\n\n tmp = [\n (r - 1, c),\n (r - 1, c + 1),\n (r, c - 1),\n (r, c + 1),\n (r + 1, c - 1),\n (r + 1, c),\n ]\n neighbors = []\n for cell in tmp:\n row = cell[0]\n col = cell[1]\n if self.is_legal_cell(row, col):\n neighbors.append(cell)\n return neighbors", "title": "" }, { "docid": "4cfbf73a25b3eef1c9b685a9ccc61f6c", "score": "0.66286135", "text": "def find_neigthbors(self):\n self.neighborhood = []\n\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n representation = np.copy(self.representation)\n representation[i][j] = np.abs(representation[i][j] - 1)\n neighbor = Student(self.state.student_id, representation, self.state.materials)\n self.neighborhood.append(neighbor)", "title": "" }, { "docid": "bf685f93e92d3db6cf78e3ff737ae9ea", "score": "0.66084766", "text": "def get_neighbors(self,node):\n return self.model.get_ant_neighbors(node)", "title": "" }, { "docid": "501023e2ecad741a0df170a1778b624b", "score": "0.6602331", "text": "def get_bb_neighbours(self,bb):\n #TODO: right now only neighbours of 1x1x1 buildingblocks are supported\n nbb = []\n for i in (0,1,2):\n for j in (-1,1):\n ind = np.array(bb.index)\n ind[i] += j \n ind= tuple(ind)\n if ind in self._grid:\n conn_vec = vec(*ind) - vec(*bb.index)\n nbb.append([self._grid[ind], conn_vec])\n \n return nbb", "title": "" }, { "docid": "a71a53403243a67a4fa07b3cded3d101", "score": "0.660053", "text": "def get_neighbour_cells(self, x, y):\n return [self[x + i, y] for i in (-1, 1)] + [self[x, y + j] for j in (-1, 1)]", "title": "" }, { "docid": "9a2112be791bde980cf974d0090ef64e", "score": "0.6596537", "text": "def get_neighbors(self):\n for i in range(self.m):\n for j in range(self.n):\n neighbors = np.zeros((self.DEGREE,2), dtype=int)\n if self.m == 1:\n # lattice is actually a ring\n neighbors[0] = (i, (j + 1) % self.n)\n neighbors[1] = (i, (j - 1) % self.n)\n elif self.n == 1:\n # lattice is actually a ring\n neighbors[0] = ((i + 1) % self.m, j)\n neighbors[1] = ((i - 1) % self.m, j)\n else:\n # lattice is not a ring\n neighbors[0] = (i, (j + 1) % self.n)\n neighbors[1] = (i, (j - 1) % self.n)\n neighbors[2] = ((i + 1) % self.m, j)\n neighbors[3] = ((i - 1) % self.m, j)\n self.neighbors[i][j] = neighbors", "title": "" }, { "docid": "b0269a2e8e467682dd2a605f18232d1f", "score": "0.6593525", "text": "def getNeighborCells(self, pos: tuple):\n return self.getCornerCells(pos) + self.getAdjacentCells(pos)", "title": "" }, { "docid": "6bf5528caef3884e9f4b40240dfabe7b", "score": "0.65589815", "text": "def get_neighborlist(atom, cutoff_dict):\n \n\n nl_i, nl_j = ase.neighborlist.neighbor_list('ij', atom, cutoff_dict)\n nl = [[] for i in range(atom.get_global_number_of_atoms())]\n for k in range(len(nl_i)):\n i, j = nl_i[k], nl_j[k]\n nl[i].append(j)\n return nl", "title": "" }, { "docid": "16e3ef4dfd5a20eeddc9cc693d231fa5", "score": "0.6553607", "text": "def get_neighbors(self) -> List[Tuple['PuzzleState', str, float]]:\n next_states = []\n i, j = self.empty_pos\n row = self.state[i]\n elem = row[j]\n if elem == 0:\n if i != self.height - 1:\n next_states.append((self.next_state('S'), 'S', 1))\n if i != 0:\n next_states.append((self.next_state('N'), 'N', 1))\n if j != self.width - 1:\n next_states.append((self.next_state('E'), 'E', 1))\n if j != 0:\n next_states.append((self.next_state('W'), 'W', 1))\n\n return next_states", "title": "" }, { "docid": "8fa04c08b1b606ef545998dff7f7528e", "score": "0.65397185", "text": "def get_neighbors(\n self, pos: Coordinate, include_center: bool = False, radius: int = 1\n ) -> list[Agent]:\n return list(self.iter_neighbors(pos, include_center, radius))", "title": "" }, { "docid": "1861ec0e84be09e6cd0c763e53fca54b", "score": "0.6530201", "text": "def revNeighbors(self,pos):\r\n neighbors = ()\r\n for i in range(0,self.numNodes):\r\n if self.gArray[i][pos] != None:\r\n neighbors = neighbors + (i,)\r\n return neighbors", "title": "" }, { "docid": "97faaa67944d3da8475fbf0e3820989f", "score": "0.6527126", "text": "def get_list_of_neigbors(self, n=1):\n \n Neighbors = []\n if n == 1:\n for neighbor in self.Neighbors:\n Neighbors.append([neighbor.x, neighbor.y])\n elif n == 2:\n for neighbor in self.Neighbors2:\n Neighbors.append([neighbor.x, neighbor.y])\n elif n == 3:\n for neighbor in self.Neighbors3:\n Neighbors.append([neighbor.x, neighbor.y])\n\n return Neighbors", "title": "" }, { "docid": "7509933387dd37c8ab60bc7b2d93568e", "score": "0.65141195", "text": "def neighbors(self, label):\n\n\t\tresults = []\n\n\t\tfor bond in self.bonds:\n\t\t\tif label == bond.atom1:\n\t\t\t\tresults.append(bond.atom2)\n\t\t\telif label == bond.atom2:\n\t\t\t\tresults.append(bond.atom1)\n\n\t\treturn results", "title": "" }, { "docid": "d1bf3acebea4ac082a11243e74b68b25", "score": "0.6506752", "text": "def get_neighbors(self, node):\n neighbors = []\n for edge in self.edges:\n if node == edge[0]:\n neighbors.append((edge[1], edge[2]))\n if node == edge[1]:\n neighbors.append((edge[0], edge[2]))\n return neighbors", "title": "" }, { "docid": "9dd90524d5ba2d6cf648e69ce5fffa12", "score": "0.6479905", "text": "def get_neighbors(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> list[Agent]:\n return list(self.iter_neighbors(pos, moore, include_center, radius))", "title": "" }, { "docid": "7d3d4262d17004c1f83f42017339c7a4", "score": "0.64783627", "text": "def get_neighbours_and_directions(self, from_position):\n \n # Transform index into board matrix into index into index into neighbour matrix\n from_row_index = self.board_to_connection_index(from_position)\n row = self.connection_matrix[from_row_index]\n \n neighbours = []\n for col_num in range(0, len(row)): \n if row[col_num]:\n # Transform index into board index\n board_index = self.connection_to_board_index(col_num)\n if self.board[board_index[0]][board_index[1]].state != PegState.REMOVED:\n neighbours.append((board_index, row[col_num])) # Store board index and direction in neighbours\n return neighbours", "title": "" }, { "docid": "ffa76a235bee69ed55b157907e4fe3fe", "score": "0.6476608", "text": "def getNeighbors(self, pq, pc):\n k = self._nneighbors\n neighbors = []\n for i in range(len(pc)):\n dist = np.linalg.norm(pq-pc[i])\n if dist <= self._radius: #0.005\n neighbors.append((dist, i))\n #print(\"Found {} neighbors\".format(len(neighbors)))\n neighbors.sort(key=lambda x:x[0])\n neighbors.pop(0)\n return neighbors[:k]", "title": "" }, { "docid": "9eea8330fcc1adade1a5f453405c0aef", "score": "0.6476282", "text": "def neighbors(self, node):\n x, y = node\n return [(ax, ay) for ax, ay in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)] if self.test_tile_normal(ax, ay)]", "title": "" }, { "docid": "7b0c09b53b7942d96c713eda1d554b8e", "score": "0.6455129", "text": "def Neighbors(room, Room_List):\n \n neighbors = []\n \n for ROOM in Room_List:\n \n if ROOM != room and Shared_Transition_Exists(room, ROOM) == True:\n \n neighbors.append(ROOM)\n \n return neighbors", "title": "" }, { "docid": "9fd8546bc0eeef016957af12b3094a2d", "score": "0.64326215", "text": "def _get_neighbor_ips(self):\n return self.__neighbor_ips", "title": "" }, { "docid": "c28386e60677dda61e9c2b76dbfeb7cf", "score": "0.6429979", "text": "def generate_neighbors_list(wheel_focus, radius):\n wheel_vector = wheel_focus[1]\n neighbors = []\n angle_increment = math.radians(360 / calculations.num_neighbors)\n fx, fy, _ = WHEEL_FOCUS\n vector_neighbors = calculations.calculate_neighbors(wheel_vector, radius)\n if len(vector_neighbors) < calculations.num_neighbors:\n return []\n for i in range(calculations.num_neighbors):\n angle = angle_increment * i\n x = int(WHEEL_RADIUS * math.cos(angle)) + fx\n y = int(WHEEL_RADIUS * math.sin(angle)) + fy\n neighbors.append([(x, y, NEIGHBOR_RADIUS), vector_neighbors[i]])\n return neighbors", "title": "" }, { "docid": "fd51d9ee5d0425be555021d79669c5b0", "score": "0.6426882", "text": "def neighbours(self, tile_loc):\n return [self[loc] for loc in self.neighbour_locs(tile_loc)]", "title": "" }, { "docid": "b651d8ca73ae1cc45049b3b3d016bad3", "score": "0.6418391", "text": "def get_neighbours(self, point):\n\n loc_x = point[0]\n loc_y = point[1]\n width, height = sw_helper.WIDTH,sw_helper.HEIGHT\n node_size = 1\n\n neighbors_in = [(loc_x - node_size, loc_y), (loc_x, loc_y + node_size), (loc_x + node_size, loc_y), (loc_x, loc_y - node_size), \\\n (loc_x - node_size, loc_y - node_size),(loc_x + node_size, loc_y + node_size),(loc_x + node_size, loc_y - node_size),\n (loc_x + node_size, loc_y - node_size),]\n\n neighbors_out = []\n\n for option in neighbors_in:\n\n if (option[0] >= 0 and option[0] < width) and (option[1] >= 0 and option[1] < height):\n\n self.temp.centerx = option[0]\n self.temp.centery = option[1]\n if self.temp.collidelist(self.obstacles) == -1:#\n neighbors_out.append(option)\n #print time.time()\n\n return neighbors_out", "title": "" }, { "docid": "b541b9059dfcac157753fafbb1d529bf", "score": "0.6403495", "text": "def connected_components(self) -> List[list]:\n if self.Graph is None:\n return []\n return self.Trajans()", "title": "" }, { "docid": "211533f47d26b3c38eee74280219dda6", "score": "0.63980794", "text": "def get_neighbors(self, ai):\n n_filter = [self.is_bonded(ai, n + 1) or self.is_bonded(n + 1, ai) for n in range(len(self.atoms))]\n neighbors_filterd = compress(self.atoms, n_filter)\n return [atom[\"nr\"] for atom in neighbors_filterd]", "title": "" }, { "docid": "13aac6de5290aa4d37b29d29f4ac5587", "score": "0.6384271", "text": "def get_focal_point_plasticity_neighbor_list(self, cell) -> []:\n if self.focal_point_plasticity_plugin is None:\n return []\n else:\n return [fppd.neighborAddress for fppd in self.get_focal_point_plasticity_data_list(cell)]", "title": "" }, { "docid": "12c3d4c05fc2c6070c8414e7fb9dadfe", "score": "0.6383509", "text": "def neighbors(a, radius, rowNumber, columnNumber, agent):\n house_neighbors = []\n\n # Add any neighbors in range thar are not the agent itself.\n for i in range(rowNumber - radius, rowNumber + radius + 1):\n for j in range(columnNumber - radius, columnNumber + radius + 1):\n if 0 <= i < len(a) and 0 <= j < len(a[0]):\n if not a[i][j].empty and a[i][j].occupant != agent:\n house_neighbors.append(a[i][j].occupant)\n return house_neighbors", "title": "" }, { "docid": "a6ab743222d73f3be83644336f6f4e90", "score": "0.6380879", "text": "def neighbor_addresses(row, col):\n return [[row-1, col-1], [row-1, col], [row-1, col+1], \\\n [row, col-1], [row, col+1], \\\n [row+1, col-1], [row+1, col], [row+1, col+1]]", "title": "" }, { "docid": "28eb3301d7e39723ee3b18ce1ba267c0", "score": "0.6351045", "text": "def get_neighbors(self, agent):\n if not self._neighborhood or self._neighborhood.agents != self.agents:\n self._create_neighborhood()\n\n idx = self._neighborhood.idx[agent]\n neighbors_idx = self._neighborhood.neighbors[idx]\n neighbors = [self.agents[i] for i in neighbors_idx]\n return neighbors", "title": "" }, { "docid": "aa39337b79865be136ee83c0b7481f6d", "score": "0.6350291", "text": "def neighbours_of(self, position):\n i = position[0] # x coordinate\n j = position[1] # y coordinate\n neighbours = list(itertools.product(range(i-1, i+2), range(j-1, j+2)))\n neighbours.remove(position)\n return neighbours", "title": "" }, { "docid": "629be3d038bc6b303c2acc2470091cd4", "score": "0.63446283", "text": "def get_neighbors(v, h):\n coord = canvas[v][h]\n neighbors = []\n off_grid = [-1, len(canvas) + 1]\n\n coords_to_check = [(v + 1, h),\n (v - 1, h),\n (v, h + 1),\n (v, h - 1)\n ]\n for coord in coords_to_check:\n if coord[0] in off_grid or coord[1] in off_grid:\n continue\n neighbors.append(coord)\n\n return neighbors", "title": "" }, { "docid": "875109e7e8985590e1ee022fb67c0e99", "score": "0.6342258", "text": "def get_neighbors(self, idx):\n return self.graph_rep.neighbors(int(idx))", "title": "" }, { "docid": "9244de1e2f88963cd1c67d306e60c271", "score": "0.63196963", "text": "def actuator_neighbors(self):\n dm_actuator_neighbors = [] # initialize the empty list of neighboring actuators\n\n for row_i in range(len(self.dm_array)):\n for col_j in range(len(self.dm_array[row_i])): \n if self.dm_array[row_i][col_j] != -1: # make sure the index at (i,j) is represents a real actuator\n start_actuator = self.dm_array[row_i][col_j] # this will be the actuator examined in the for loop\n # if j is not in the last column and the east neighbor isn't -1, add these neighbors to the list \n if col_j !=len(self.dm_array[row_i])-1:\n neighbor = self.dm_array[row_i][col_j+1]\n if neighbor != -1:\n dm_actuator_neighbors.append([start_actuator,neighbor])\n # if row_i is not the last row, the south/southeast/southwest neighbors may be valid\n if row_i!=len(self.dm_array)-1:\n # determine if the southern neighbor is valid\n neighbor = self.dm_array[row_i+1][col_j]\n if neighbor != -1: \n dm_actuator_neighbors.append([start_actuator,neighbor])\n # if col_j is not the last column, determine if the southeastern neighbor is valid\n if col_j != len(self.dm_array[row_i])-1:\n neighbor = self.dm_array[row_i+1][col_j+1]\n if neighbor != -1:\n dm_actuator_neighbors.append([start_actuator,neighbor])\n # if col_j is not the first column, determine if the southwestern neighbor is valid\n if col_j!=0:\n neighbor = self.dm_array[row_i+1][col_j-1]\n if neighbor != -1:\n dm_actuator_neighbors.append([start_actuator,neighbor])\n\n return dm_actuator_neighbors", "title": "" }, { "docid": "83b0d03785f1812d3c412e8c1c8b0991", "score": "0.6319559", "text": "def neighbors(self, u):\n return self.Adj[u]", "title": "" }, { "docid": "41f0a5bba1aab886718b1a8aa01794b2", "score": "0.6314305", "text": "def get_neighbors(r,c): \n\t#defines the neighbors of any interior r,c coordinate pair in terms of r,c\n\treturn [(r-1,c-1),(r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1,c-1), (r+1,c), (r+1, c+1)]", "title": "" }, { "docid": "6fbabfa67d2703ca0e174a192ad65053", "score": "0.6306301", "text": "def neighbors(self, node: Tuple[int, int], vertices: Set[Tuple[int, int]]\n ) -> List[Tuple[int, int]]:\n n = []\n x, y = node\n for nx, ny in NEIGHBORS:\n Nx = x + nx\n Ny = y + ny\n if Nx >= 0 and Nx < self.grid.width and\\\n Ny >= 0 and Ny < self.grid.height and\\\n (Nx, Ny) in vertices:\n n.append((Nx, Ny))\n return [i for i in n if self.grid[i].value == 0]", "title": "" }, { "docid": "17aa491e35c6b9a4ec1f9fbd88a7a116", "score": "0.62967974", "text": "def return_neighbors(self, point):\n \tx, y = point\t\n\t\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), \t\t (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "title": "" }, { "docid": "6581d6764599ee1c9737b4cbbdc70aa8", "score": "0.62901545", "text": "def neighbors(murs,courrant):\n liste_voisins = []\n (ligne,colonne) = courrant\n \n for direction in [(0,1),(0,-1),(1,0),(-1,0)]:\n prochaine_ligne = ligne + direction[0]\n prochaine_colonne = colonne + direction[1]\n prochain = (prochaine_ligne,prochaine_colonne)\n if ((prochain not in murs) and prochaine_ligne >= 0 and prochaine_colonne >= 0 and prochaine_ligne <= 19 and prochaine_colonne <= 19):\n liste_voisins.append(prochain)\n \n return liste_voisins", "title": "" }, { "docid": "cf9fb32f3226332d1053a7ae58d69265", "score": "0.62863743", "text": "def neighbors(self, d=0.001):\n return Building.objects.filter(\n latitude__range=(self.latitude - d, self.latitude + d),\n longitude__range=(self.longitude - d, self.longitude + d),\n )", "title": "" }, { "docid": "f056f6213ee2ba7dcaa312e8454e2a88", "score": "0.6284621", "text": "def get_neighboring_tiles(self, i: int, j: int) -> list:\n dirs = [(-1, -1), (-1, 0), (-1, 1), (0, -1),\n (0, 1), (1, -1), (1, 0), (1, 1)]\n neighbors = []\n for dx, dy in dirs:\n nx, ny = i + dx, j + dy\n # check bounds\n if 0 <= nx and nx < self._dim and 0 <= ny and ny < self._dim:\n neighbors.append(self._tiles[nx][ny])\n\n return neighbors", "title": "" }, { "docid": "bc23bb32d9230171559ce306b7532ed6", "score": "0.6281782", "text": "def nearest_neighbors(self):\n self.update_nearest_neighbors()\n nearest_neighbors = []\n for atom in self._atoms:\n nearest_neighbors.append(atom.NN)\n self._nearest_neighbors = nearest_neighbors[:]\n return np.asarray(self._nearest_neighbors)", "title": "" }, { "docid": "644dc3090d3d462cf53058a8d9b1b459", "score": "0.6276704", "text": "def neighbors(self, node):\r\n raise NotImplementedException", "title": "" } ]
8ce42f3ff08b29817d4f733e56678d1c
Get basic player info.
[ { "docid": "5ac8172ffbecca36926ad6f7d44dbd0a", "score": "0.0", "text": "def get_players(self):\n for i, player in enumerate(self._header.initial.players[1:]):\n achievements = self.get_achievements(player.attributes.player_name)\n if achievements:\n winner = achievements.victory\n else:\n winner = self.guess_winner(i + 1)\n yield {\n 'name': player.attributes.player_name,\n 'civilization': player.attributes.civilization,\n 'human': self._header.scenario.game_settings.player_info[i + 1].type == 'human',\n 'number': i + 1,\n 'color_id': player.attributes.player_color,\n 'winner': winner,\n 'mvp': achievements.mvp if achievements else None,\n 'score': achievements.total_score if achievements else None,\n 'position': (player.attributes.camera_x, player.attributes.camera_y)\n }", "title": "" } ]
[ { "docid": "8ece88e1fdff785b25835f2101797e0d", "score": "0.70133686", "text": "async def playerinfo(self, ctx):\n player = self.bot.wavelink.get_player(ctx.guild.id, cls=Player)\n node = player.node\n\n used = humanize.naturalsize(node.stats.memory_used)\n total = humanize.naturalsize(node.stats.memory_allocated)\n free = humanize.naturalsize(node.stats.memory_free)\n cpu = node.stats.cpu_cores\n\n embed = discord.Embed(color=self.bot.embed_color, title=f'WaveLink: {wavelink.__version__}')\n\n fmt = f'Connected to `{len(self.bot.wavelink.nodes)}` nodes.\\n' \\\n f'Best available Node `{self.bot.wavelink.get_best_node().__repr__()}`\\n' \\\n f'`{len(self.bot.wavelink.players)}` players are distributed on nodes.\\n' \\\n f'`{node.stats.players}` players are distributed on server.\\n' \\\n f'`{node.stats.playing_players}` players are playing on server.\\n\\n' \\\n f'Server Memory: `{used}/{total}` | `({free} free)`\\n' \\\n f'Server CPU: `{cpu}`\\n\\n' \\\n f'Server Uptime: `{datetime.timedelta(milliseconds=node.stats.uptime)}`'\n\n embed.description = fmt\n await ctx.send(embed=embed)", "title": "" }, { "docid": "f44e0f7b78749fe5235ddae86a19c028", "score": "0.68854386", "text": "def print_info():\n media=player.get_media()\n print \"State:\", player.get_state()\n print \"Media:\", media.get_mrl()\n try:\n print \"Current time:\", player.get_time(), \"/\", media.get_duration()\n print \"Position:\", player.get_position()\n print \"FPS:\", player.get_fps()\n print \"Rate:\", player.get_rate()\n print \"Video size: (%d, %d)\" % (player.video_get_width(), player.video_get_height())\n except Exception:\n pass", "title": "" }, { "docid": "4162bea77ae74403aea2c1f2fa342ab4", "score": "0.67473143", "text": "async def get_player_info(self, client, message):\n logging.info(\"lol_player requested by \" + message.author.name + \" on \" + message.channel.name)\n search_arg = message.content[12:]\n if len(search_arg) == 0:\n await client.send_message(message.channel, \"No user name is provided!\")\n return\n target_url = \"http://best.gg/player/\" + search_arg\n await client.send_typing(message.channel)\n raw_page = requests.get(target_url, headers={\"Accept-Language\": \"en-US\"})\n soup = BeautifulSoup(raw_page.text, \"lxml\")\n try:\n # Parse html elements and get needed values\n img_src = \"http:\" + soup.find(\"img\", {\"class\": \"player__profile-face-img\"})[\"src\"]\n info_div = soup.find(\"div\", {\"class\": \"player__profile-info\"})\n player_name = info_div.find(\"div\", {\"class\": \"player__profile-info-name\"}).text\n player_team = info_div.find(\"div\", {\"class\": \"player__profile-info-team-team\"}).text\n player_league = info_div.find(\"span\", {\"class\": \"player__profile-info-team-league\"}).text\n player_position = info_div.find(\"div\", {\"class\": \"player__profile-info-team-position\"}).text\n player_realname = info_div.find(\"span\", {\"class\": \"player__profile-info-full-name-name\"}).text\n player_birth_div = info_div.find(\"div\", {\"class\": \"player__profile-info-birth\"}).find(\"span\")\n # Birth date is not always provided\n if player_birth_div is None:\n player_birth = \" \"\n else:\n player_birth = player_birth_div.text\n # Create embed to send back\n result_embed = discord.Embed(title=player_realname, description=player_birth)\n result_embed.set_author(name=player_name, url=target_url)\n result_embed.set_thumbnail(url=img_src)\n result_embed.add_field(name=\"TEAM\", value=player_team, inline=True)\n result_embed.add_field(name=\"LEAGUE\", value=player_league, inline=True)\n result_embed.add_field(name=\"POSITION\", value=player_position, inline=True)\n except (AttributeError, TypeError):\n await client.send_message(message.channel, \"Player not found.\")\n return\n await client.send_message(message.channel, embed=result_embed)", "title": "" }, { "docid": "0df98c7efa396c25fec22173dfdaee2c", "score": "0.66520447", "text": "def get_player(self):\n return self.player", "title": "" }, { "docid": "95f197da17886fe93ddb8c7b726a4e9a", "score": "0.6547532", "text": "def get_stats(player):\n return full_stats[player]", "title": "" }, { "docid": "a672c775bd7db024a38f294633124bff", "score": "0.6517351", "text": "def get_players_info(self):\n return [{ \"pid\":str(pid),\n \"name\": player.get_name(),\n \"x0\": player.get_x(),\n \"hp\": player.get_hp(),\n \"color\": player.get_color()\n } for pid, player in self.__players.items()]", "title": "" }, { "docid": "5b4b290abb07763ebab05a4b39a557df", "score": "0.65104836", "text": "def getPlayer(self):\n return self.player", "title": "" }, { "docid": "5b4b290abb07763ebab05a4b39a557df", "score": "0.65104836", "text": "def getPlayer(self):\n return self.player", "title": "" }, { "docid": "2bac50fcc10b41ace8f4729842ef6cd6", "score": "0.6450842", "text": "async def player(self, uuid: str) -> dict:\n async with self.session.get('https://api.hypixel.net/player?key=' + API_KEY + '&uuid=' + uuid) as response:\n return await response.json()", "title": "" }, { "docid": "7e5710d08cea685d339ceb4fe39d358c", "score": "0.6425772", "text": "def getPlayerInfo(playerID):\n try:\n pageData = urllib2.urlopen('http://www.nfl.com/players/profile?id=' + playerID).read()\n \n heightTokens = reHeight.findall(pageData)[0].split('-')\n height = int(heightTokens[0]) * 12 + int(heightTokens[1])\n \n return {'name': reName.findall(pageData)[0], \n 'position': rePosition.findall(pageData)[0], \n 'height': height, \n 'weight': int(reWeight.findall(pageData)[0]), \n 'age': int(reAge.findall(pageData)[0]), \n 'college': reCollege.findall(pageData)[0], \n 'team': reTeam.findall(pageData)[0]}\n except:\n print 'Failed to load', playerID", "title": "" }, { "docid": "10806c672b4436085e7ff219700aebad", "score": "0.6418098", "text": "def get_player_sample():\n\n return player.Player.get_players()", "title": "" }, { "docid": "2dca5d11e7a39b6fc2d84421040731c2", "score": "0.64119065", "text": "def get_player_name(player):\r\n return player['name']", "title": "" }, { "docid": "07ea68c4ad03393e432a7321f764c8b4", "score": "0.6387765", "text": "def test_get_player(self):\n pass", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "c6d3b012b441026cdcca1461a4873b66", "score": "0.63756984", "text": "def get_player(self):\n return self._player", "title": "" }, { "docid": "9cd7671248b825cfa99cedfcdcdff567", "score": "0.63511395", "text": "def get_player(self):\n return self.__player__", "title": "" }, { "docid": "4ea76b800e8a1b8f8f7eaa687165fd9c", "score": "0.6333152", "text": "def nflinfo(self, irc, msg, args, optplayer):\n\n # enforce +voice or above to use command?\n if self.registryValue('requireVoiceForCalls', msg.args[0]): # should we check?\n if ircutils.isChannel(msg.args[0]): # are we in a channel?\n if not irc.state.channels[msg.args[0]].isVoicePlus(msg.nick): # are they + or @?\n irc.error(\"ERROR: You have to have voice to use this command in {0}.\".format(msg.args[0]))\n return\n\n pf = self._pf(\"e\", optplayer)\n # did we find the player or get anything back?\n if not pf:\n irc.reply(\"ERROR: Sorry, I was unable to find any player matching '{0}'. Spell the player's name correctly?\".format(optplayer))\n # lets try to help them out with similar names.\n sp = self._similarPlayers(optplayer)\n if sp: # if we get something back, lets return the fullnames.\n irc.reply(\"Possible suggestions: {0}\".format(\" | \".join([i['fullname'].title() for i in sp])))\n # now exit regardless.\n return\n # we did get it. lets go http fetch the page.\n html = self._httpget(pf)\n if not html:\n irc.reply(\"ERROR: Failed to fetch {0}.\".format(url))\n self.log.error(\"ERROR opening {0}\".format(url))\n return\n # process html.\n soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES, fromEncoding='utf-8')\n # find the main div\n div = soup.find('div', attrs={'class':'mod-container mod-no-header-footer mod-page-header'})\n if not div:\n irc.reply(\"ERROR: I could not find active information for player.\")\n return\n playerName = div.find('h1')\n # setup our output container with the name.\n out = []\n # basic stats.\n stats = div.find('ul', attrs={'class':'general-info'})\n for stat in stats: # iterate over and add the text.\n out.append(stat.getText())\n # find the rest of the bio\n bios = div.find('ul', attrs={'class':'player-metadata floatleft'}).findAll('li')\n for bios in bios:\n cat = bios.find('span') # span is the category.\n cat.extract() # now extract the span because bios = rest of text we want.\n out.append(\"{0}: {1}\".format(cat.getText(), bios.getText()))\n # prepare output.\n descstring = \" | \".join([item for item in out])\n output = \"{0} :: {1}\".format(self._red(playerName.getText()), descstring)\n irc.reply(output)", "title": "" }, { "docid": "b294b0c88371387465254078dc3582b7", "score": "0.63138014", "text": "def get_new_player_info(self):\r\n self.view.print_adding_new_player()\r\n # Collecting player info\r\n first_name = self.get_valid_first_name()\r\n last_name = self.get_valid_last_name()\r\n if first_name is None or last_name is None:\r\n self.view.cancelled()\r\n return None\r\n birth_date = self.get_valid_birth_date()\r\n gender = self.get_valid_gender()\r\n ranking = self.get_valid_ranking()\r\n return [first_name, last_name, str(birth_date), gender, ranking]", "title": "" }, { "docid": "2bfaff97c3764306ab7d324f0289c839", "score": "0.6267467", "text": "def printAllPlayerData(self):\n for player in self.players:\n print player.name\n print \"------------------------------\"\n for x in player.__dict__:\n print x, player.__dict__[x]\n \n print \"==============================\"", "title": "" }, { "docid": "20ed094b157d0aa1b8b3a3745240fae5", "score": "0.6238688", "text": "def get_player(self, player_name: str) -> dict:\n item = self.cursor.execute('SELECT * FROM players WHERE player = ?', [player_name]).fetchone()\n return self.dict_from_row(item)", "title": "" }, { "docid": "90884cbc0ab59c35c5ac015694ebeeea", "score": "0.62383777", "text": "def load_player(player_name):\n history = load_history()\n\n health = history[player_name]['health']\n wins = history[player_name]['wins']\n color = history[player_name]['color']\n\n return player_name, color, health, wins", "title": "" }, { "docid": "d5e7aaf4c6b790ad32418ed75655da3d", "score": "0.6215373", "text": "def getPlayer(playerId,fullname):\r\n url = \"http://data.nba.net/prod/v1/2018/players/{}_gamelog.json\".format(playerId)\r\n with urllib.request.urlopen(url) as url2:\r\n data = json.loads(url2.read().decode())\r\n pI=data['league']['standard'][0]['stats']\r\n gamedate = data['league']['standard'][0]['gameDateUTC']\r\n text = \"Game stats for {} ({}) \\n Pts {} \\n Ass {}\\n OfR {}\\n DefR {}\\n TotR {}\\n\".format(fullname \r\n,gamedate,pI['points'],pI['assists'],pI['offReb'],pI['defReb'],pI['totReb'])\r\n return text", "title": "" }, { "docid": "fe194aac0d973818ce4c5d20fb24add5", "score": "0.61839706", "text": "def get_current_player_name(self):\r\n return self.player", "title": "" }, { "docid": "469bf248f02cb0c2a0b08cc068449a56", "score": "0.6181708", "text": "def _get_player_name(self):\n\n self._player = self._gui.player.get().strip()", "title": "" }, { "docid": "644f6a2586edf06f2b68cca919955262", "score": "0.6157766", "text": "def get(self):\n args = GET_PARSER.parse_args()\n return Players().get_all(args[\"status\"])", "title": "" }, { "docid": "bd2edc9d1eb2aac4138b2a4673983a6f", "score": "0.613121", "text": "def listPlayers():\n print '----- Players -----'\n for i, player in players.items():\n print \"Name:\", player.name\n print \"Points:\", player.currentTotal()\n print \"Resource:\", player.resource\n print \"Objectives (index [objective name, objective value]):\", \"\\n\\t\", player.objectives, \"\\n\"", "title": "" }, { "docid": "c3c05e25c558294a1bcc9f415d59a66f", "score": "0.61087734", "text": "def access_player_url(self):\n for name in self.player_name:\n player_url = self.player_name[name]\n player = player_url.split(\"/\")[-1][:-5]\n self.__access_data(player_url, player)", "title": "" }, { "docid": "03d463bb8babbb0ccf5669496769b212", "score": "0.6090212", "text": "async def get(self, username_or_uuid: Union[str, CorkusUUID], timeout: Optional[int] = None) -> Player:\n if isinstance(username_or_uuid, CorkusUUID):\n username_or_uuid = username_or_uuid.string(dashed = True)\n\n response = await self._request.get(\n version = APIVersion.V2,\n parameters = f\"player/{username_or_uuid}/stats\",\n timeout = timeout\n )\n return Player(self._corkus, response.get(\"data\", {})[0])", "title": "" }, { "docid": "f1813956f34edcfbf6c5af99f5816c1c", "score": "0.60831815", "text": "def get_player_stats(self):\n return {'unit_hp': self.__player.unit_hp,\n 'unit_hp_lvl': self.__player.unit_hp_lvl,\n 'unit_dmg': self.__player.unit_dmg,\n 'unit_dmg_lvl': self.__player.unit_dmg_lvl,\n 'unit_as': self.__player.unit_attack_speed,\n 'unit_as_lvl': self.__player.unit_attack_speed_lvl,\n 'unit_regen': self.__player.unit_regen,\n 'unit_regen_lvl': self.__player.unit_regen_lvl,\n 'castle_income': self.__player.castle_income,\n 'castle_income_lvl': self.__player.castle_income_lvl,\n 'castle_dmg': self.__player.castle_dmg,\n 'castle_dmg_lvl': self.__player.castle_dmg_lvl,\n 'castle_regen': self.__player.castle_regen,\n 'castle_regen_lvl': self.__player.castle_regen_lvl,\n 'castle_hp': self.__player.castle.max_hp,\n 'castle_hp_lvl': self.__player.castle_hp_lvl,\n 'spawns': self.__player.spawns}", "title": "" }, { "docid": "32ee8923008d8e17b0de30c3d6a21d87", "score": "0.6082775", "text": "def get_player(self):\n self._player = self._high_score_name.get()", "title": "" }, { "docid": "4956eadfea2bff5263526ad23c3766cc", "score": "0.6082652", "text": "def retrievePlayerList(self):\n self.verbose2('Retrieving Playerlist')\n self.write('RETRIEVE PLAYERLIST')", "title": "" }, { "docid": "81d108ea7d784b37758ca0efb6ad7937", "score": "0.6066058", "text": "async def fetch_player(self) -> Player:\n return await self._corkus.player.get(self.uuid)", "title": "" }, { "docid": "5a5d99ed01240c4aac1a4c4c0c4e26ea", "score": "0.60186034", "text": "def player_stats():\r\n # UNLOCK #\r\n def _unlock():\r\n \"\"\"Unlock dialogue.\"\"\"\r\n # Unlock dialogue\r\n dialogue.locked = False\r\n\r\n # Set text\r\n _text = ['I wish you luck then, young one...\\n'\r\n + 'Be safe on your Journey.',\r\n\r\n 'It is a dangerous land.']\r\n\r\n # Import text and reference next section\r\n dialogue.import_assets(_text, enter_world)\r\n dialogue.skip()\r\n\r\n\r\n # STAT SELECTOR #\r\n def stat_selecter():\r\n \"\"\"Allow player to allocate skill points.\"\"\"\r\n # Create stat frame\r\n _stat_frame = tkin.Frame(program.main_frame,\r\n bg='#2e2e2e')\r\n _stat_frame.place(anchor='center',\r\n relx=0.5, rely=0.5,\r\n relheight=0.5, relwidth=0.3)\r\n\r\n # Lock Dialogue\r\n dialogue.locked = True\r\n\r\n # Initialize stat selector\r\n _stat_selector = stat_selecter()", "title": "" }, { "docid": "977079ce4bde996de6f73e6ef92b4c3d", "score": "0.6008468", "text": "def __get_song_metadata(self) -> dict:\n\n endpoint = \"https://api.spotify.com/v1/me/player/currently-playing\"\n\n api_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {self.access_token}\",\n }\n\n resp = requests.get(endpoint, headers=api_headers)\n return resp.json() if resp.text else None", "title": "" }, { "docid": "7a8a19c4cb4ccf60567242ea2ce1c6c0", "score": "0.59957075", "text": "def getPlayerList(self):\n return self.console.getPlayerList()", "title": "" }, { "docid": "debab3b071c35af52d055adb7bfa8978", "score": "0.5977806", "text": "def get_player_data(self, dictKey = None):\n # If no value is passed, return the entire playerData structure\n return self.playerData\n #if dictKey == None:\n #return self.playerData\n #else:\n #return self.playerData[dictKey]", "title": "" }, { "docid": "737f7d48511f8407f244fea45a472a9f", "score": "0.5958372", "text": "def getName(self):\n # Return player's name\n return self.name", "title": "" }, { "docid": "986f2a24d5c77abe3779e7099621e69e", "score": "0.5953833", "text": "def info():\n\t\treturn API.query('info')", "title": "" }, { "docid": "575dc284e30451380ea299cd4a745cc9", "score": "0.59512913", "text": "def player_general_info(url_endpoint):\n url = f'https://www.balldontlie.io/api/v1/players?search={url_endpoint}'\n api_request = requests.get(url)\n print(f\"Status Code: {api_request.status_code}\")\n return api_request", "title": "" }, { "docid": "3d18535e2c0bf990a3c94d349092e86b", "score": "0.5942893", "text": "def findPlayer(self):\n manager = dbus.Interface(self.bus.get_object(\"org.bluez\", \"/\"), \"org.freedesktop.DBus.ObjectManager\")\n objects = manager.GetManagedObjects()\n\n player_path = None\n for path, interfaces in objects.iteritems():\n if PLAYER_IFACE in interfaces:\n player_path = path\n break\n\n if player_path:\n self.connected = True\n self.getPlayer(player_path)\n player_properties = self.player.GetAll(PLAYER_IFACE, dbus_interface=\"org.freedesktop.DBus.Properties\")\n if \"Status\" in player_properties:\n self.status = player_properties[\"Status\"]\n if \"Track\" in player_properties:\n self.track = player_properties[\"Track\"]", "title": "" }, { "docid": "6d251db5998c5d7d59e592e2df76daa0", "score": "0.5938513", "text": "def extract_data():\n # Convert the api request to a json file containing player info\n player_data = accepted_api.json()\n\n # Get the part of the data containing relevant data\n player_specifics = player_data['data']\n\n # Convert the dictionary inside the list for easier extraction of data\n player_dictionary = {}\n for line in player_specifics:\n player_dictionary.update(line)\n return player_dictionary", "title": "" }, { "docid": "0478e844608d83fba29ed7b0a67bc704", "score": "0.59302855", "text": "def get_player_summary(steamid):\n # TODO: error checking here\n link = '%s%s' % (_get_player_summaries_link_base(), steamid)\n players = json.load(urllib2.urlopen(link))['response']['players']\n if len(players) != 1:\n raise LookupError('Error getting player summary from Steam API')\n return players[0]", "title": "" }, { "docid": "de51f95155002e3d81f4ba69395effdd", "score": "0.58987015", "text": "def get_player_info(player_id, include_futbin_price=True):\r\n player_id = str(player_id)\r\n try:\r\n if Global.fifa_players[player_id]['surname']:\r\n name = Global.fifa_players[player_id]['surname']\r\n else:\r\n name = '{} {}'.format(Global.fifa_players[player_id]['first_name'], Global.fifa_players[player_id]['last_name'])\r\n nation_name = Global.fifa_players[player_id]['nation_name']\r\n rating = int(Global.fifa_players[player_id]['rating'])\r\n except KeyError:\r\n try:\r\n temp_id = get_base_id(player_id)\r\n name = '{} {}'.format(Global.fifa_players[temp_id]['first_name'], Global.fifa_players[temp_id]['last_name'])\r\n nation_name = Global.fifa_players[temp_id]['nation_name']\r\n rating = int(Global.fifa_players[temp_id]['rating'])\r\n except KeyError:\r\n name = 'Unable to locate {} in Global.fifa_players'.format(player_id)\r\n rating = 0\r\n nation_name = ''\r\n link = 'https://www.easports.com/fifa/ultimate-team/fut/database/player/{}'.format(player_id)\r\n img = 'https://fifa17.content.easports.com/fifa/fltOnlineAssets/B1BA185F-AD7C-4128-8A64-746DE4EC5A82/2018/fut/items/images/players/html5/134x134/{}.png' \\\r\n .format(player_id)\r\n player = {\r\n 'name': name,\r\n 'asset_id': player_id,\r\n 'link': link,\r\n 'image': img\r\n }\r\n if include_futbin_price:\r\n futbin_price = get_price(player_id)\r\n tier = get_tier(futbin_price)\r\n player.update({'futbin_price': futbin_price, 'tier': tier})\r\n if nation_name:\r\n player.update({'nation_name': nation_name, 'rating': rating})\r\n return player", "title": "" }, { "docid": "8ef7b67f4ef6d9a2e4b481414880e1ba", "score": "0.5894837", "text": "def show_player_detail():\n for name in nicknames:\n player_id = members[name]\n point = player_point[name]\n deck = players_deck[name]\n player_id.send(\"\\n=============================================\\n\".encode(\"utf-8\"))\n player_id.send(f\"Your Cards: {deck}\\nYour points: {point}\\n\\n\".encode(\"utf-8\"))", "title": "" }, { "docid": "76c9466a3215890a11d1f717f53a9812", "score": "0.58919406", "text": "def requiered_info(self):\n return {\"players\":2}", "title": "" }, { "docid": "2452b67052ab4e915b34328281ecfc35", "score": "0.58911914", "text": "async def info(self, ctx):\n try:\n platform, name = await select(ctx.author.id)\n embed = profile_info(ctx, platform, name)\n await ctx.send(embed=embed)\n except ProfileNotLinked:\n await ctx.send(f\"Connect your profile by running `{ctx.prefix}profile link`\")\n except Exception as ex:\n embed = exception(ex)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "5d13f2d1a42af9307cc13e6d518775d9", "score": "0.5882658", "text": "async def find_player_info(self):\n while True:\n await self.ensure_connected()\n messages = await self.read()\n\n if not messages:\n # XXX Not sure why this could happen. Websocket timeout?\n print(\"{}: No messages?!\".format(self.server_abbr))\n break\n\n for message in messages:\n if message['msg'] == 'player':\n return message\n await self.handle_message(message)", "title": "" }, { "docid": "e062e38758dedf1a6a864c657bc61451", "score": "0.58791274", "text": "def basic_song_info(self) -> \"tuple[str,str,str,str]\":\n\n trackInfo = self.spotipyObject.current_user_playing_track() #get track info for current track\n #future addition: error handling for errors with collecting data... ie: detect if user object needs re-authentication/if user not playing music \n\n artist = trackInfo[\"item\"][\"artists\"][0][\"name\"] #get first artist listed \n songName = trackInfo[\"item\"][\"name\"] #get song name\n albumName = trackInfo[\"item\"][\"album\"][\"name\"] #get album name\n \n songID = trackInfo['item']['id'] #get song's track id\n\n return ((artist, songName, albumName, songID))", "title": "" }, { "docid": "c444f78a6cf4a4d0af2d75b8fd5074c4", "score": "0.58699423", "text": "def get_current_track_info(self):\n action = '\"urn:schemas-upnp-org:service:AVTransport:1#GetPositionInfo\"'\n\n body = '<u:GetPositionInfo xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID><Channel>Master</Channel></u:GetPositionInfo>'\n\n response = self.__send_command(SoCo.TRANSPORT_ENDPOINT, action, body)\n\n dom = XML.fromstring(response)\n\n track = {}\n\n track['playlist_position'] = dom.findtext('.//Track')\n track['duration'] = dom.findtext('.//TrackDuration')\n track['uri'] = dom.findtext('.//TrackURI')\n\n d = dom.findtext('.//TrackMetaData')\n\n # If the speaker is playing from the line-in source, querying for track\n # metadata will return \"NOT_IMPLEMENTED\".\n if d is not '' or d is not 'NOT_IMPLEMENTED':\n # Track metadata is returned in DIDL-Lite format\n metadata = XML.fromstring(d.encode('utf-8'))\n\n track['title'] = metadata.findtext('.//{http://purl.org/dc/elements/1.1/}title')\n track['artist'] = metadata.findtext('.//{http://purl.org/dc/elements/1.1/}creator')\n track['album'] = metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')\n\n album_art = metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')\n\n if album_art is not None:\n track['album_art'] = 'http://' + self.speaker_ip + ':1400' + metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')\n else:\n track['album_art'] = ''\n else:\n track['title'] = ''\n track['artist'] = ''\n track['album'] = ''\n track['album_art'] = ''\n\n return track", "title": "" }, { "docid": "730519bb6d75ca51edc63b981dd709f8", "score": "0.5868764", "text": "def get(self, id):\n return self.player.get(id)", "title": "" }, { "docid": "24cc5040bbc0321ccb238d72e2c8a840", "score": "0.58639467", "text": "def get_player_summary_data(self):\n if self.current_player_data:\n return self.current_player_data\n self.current_player_data = {}\n all_data = self.get_current_summary_data()\n for player in all_data[\"elements\"]:\n self.current_player_data[player[\"id\"]] = player\n return self.current_player_data", "title": "" }, { "docid": "ef388d1930a2d616189c492543cbfa8f", "score": "0.58635813", "text": "def __getPlayerList(self):\r\n return self.players", "title": "" }, { "docid": "2c69450421d6be18993a311ad7408ca8", "score": "0.5862105", "text": "def getPlayers(self):\n\n pass", "title": "" }, { "docid": "bd936ac5e74b8066f1943f15390bef5b", "score": "0.58498144", "text": "def show_players(self):\n for player in self.players:\n print(f\"{player.name} has {player.current_hp} health left\")", "title": "" }, { "docid": "38137a60655945918cbf64e4ff3bab2c", "score": "0.5847797", "text": "def get_player(self, name):\n if not self._valid_setup:\n print('Invalid game setup')\n return\n try:\n return self._players[name]\n except KeyError:\n print('Name not found')", "title": "" }, { "docid": "dde35f6a8886dde6f2ca76c21566336f", "score": "0.5845374", "text": "def get(self, player_id):\n return Players().get_one_player_id(player_id)", "title": "" }, { "docid": "dde35f6a8886dde6f2ca76c21566336f", "score": "0.5845374", "text": "def get(self, player_id):\n return Players().get_one_player_id(player_id)", "title": "" }, { "docid": "f36a8153331c424523c79f1e0d5f7000", "score": "0.58361596", "text": "async def info(ctx: commands.Context):\n await ctx.send(embed=utilities.info_embed)", "title": "" }, { "docid": "030f5162b8ad672189f4eeb10fc0d263", "score": "0.58351296", "text": "def players(self):\n return self._request(1)", "title": "" }, { "docid": "15a86d9eb7c45ee497c60acb01e3dd1b", "score": "0.5816872", "text": "def test_get_player_by_id(self):\n\n resp = self.api_client.get(PLAYER_ID_API_URL + str(0), format=\"json\")\n self.assertHttpOK(resp)\n player = resp.data[\"player\"]\n self.assertEqual(player[\"name\"], \"Cristiano Ronaldo\", \"Name should be equal\")\n self.assertEqual(player[\"age\"], 38, \"Age should be equal\")\n self.assertEqual(player[\"position\"], \"Forward\", \"Position should be equal\")\n self.assertEqual(player[\"appearances\"], 450, \"Appearances should be equal\")", "title": "" }, { "docid": "288294e8e8b30e3b77329dc9c9ce35bf", "score": "0.5813371", "text": "def get_current_player(self):\n return self._current_player", "title": "" }, { "docid": "5a33cb44137ce7ee78364f7d360ef9c5", "score": "0.5805246", "text": "def get_player_names(self, gameno):\n return (\"Unknown\", \"Unknown\")", "title": "" }, { "docid": "b83909302b66df50cf3e71e0e635f01c", "score": "0.580452", "text": "def get_status(self):\n print( 'Player: {}'.format(self.name))\n print( '\\tbuttons: {}'.format(self.buttons))\n print( '\\tincome: {}'.format(self.income))\n print( '\\tempty spaces: {}'.format(self.empty_spaces))\n print( '\\tlocation: {}'.format(self.location))\n print( '\\ttime left: {}'.format(self.get_time_left()))", "title": "" }, { "docid": "bfedeee99e5f2934d60b6e1ac0ac94ff", "score": "0.58015966", "text": "def loaddata(self, *args, **kwargs):\r\n self.playerstatus = self.mprisprops.GetAll('org.mpris.MediaPlayer2.Player')", "title": "" }, { "docid": "513a49fb88581e2934e9a1bd0875e6d2", "score": "0.57887536", "text": "async def info(self, ctx: commands.Context):\n embed=discord.Embed(title='', color=discord.Color.from_rgb(randint(0, 255), randint(0, 255), randint(0, 255)))\n embed.set_author(name='Info & stats',\n icon_url='https://cdn.discordapp.com/app-icons/293879161062424578/bf951775718f2f2fc327d9d96b692f74.png')\n embed.set_thumbnail(url='http://0.media.dorkly.cvcdn.com/84/90/306a0bad001ff7bb9fe733ea479d6204.jpg')\n embed.set_footer(text='Made with the Discord Python API from Rapptz',\n icon_url='https://avatars0.githubusercontent.com/u/1695103?s=400&v=4')\n embed.add_field(name='Guilds', value=str(len(self.bot.guilds)))\n embed.add_field(name='Owner', value='JonSnowWhite')\n embed.add_field(name='Running on', value='Raspberry Pi 3')\n embed.add_field(name='Commands', value=str(len(self.bot.commands)) + '(!help)')\n embed.add_field(name='GitHub Repo',\n value='<https://www.github.com/JonSnowWhite/discord-soundboard-bot>',\n inline=False)\n await ctx.channel.send(embed=embed)", "title": "" }, { "docid": "ca6639f51eca24c4f7d210986f5fb995", "score": "0.5788312", "text": "def get_player(self, guild_id: ari.SnowflakeType) -> ari.PlayerABC:\n ...", "title": "" }, { "docid": "65c234230111c6c685ed946689f51fbc", "score": "0.57831013", "text": "def get_player_data_by_id(self, player_id):\n player = self.Players.find_one({'_id': player_id})\n return player", "title": "" }, { "docid": "d7da0f99968199461f777529ffa3374c", "score": "0.5778521", "text": "def get_info(self):\n return self.send_command(\"/info\", method=\"get\")", "title": "" }, { "docid": "4e8de0ab8f20e79f3aa49f31bae2ab94", "score": "0.57694757", "text": "def showInfo (player, arguments):\n player.showInfoScreen()", "title": "" }, { "docid": "d5bf9e069e7c2ce4c359a0043bd93053", "score": "0.57649004", "text": "def getPlayer(self):\n return Bukkit.getOfflinePlayer(self.entry)", "title": "" }, { "docid": "fb900bd1fa29ea92fa9b4f4d94dc9349", "score": "0.574308", "text": "def get_id():\n return player_information['id']", "title": "" }, { "docid": "732285fb615e1c21f1ea25a06604c3ee", "score": "0.57421327", "text": "def print_players(self):\n print(f'{self.player1} is playing {self.player2}')", "title": "" }, { "docid": "62565c3e2bd5880dadb702bfeccc5e05", "score": "0.5740129", "text": "def getPlayerName(self):\n return NotImplementedError", "title": "" }, { "docid": "fff27686b3f6be70b956a257f4768b2a", "score": "0.57215923", "text": "def get_current_player_name(self) -> str:\n return self.player", "title": "" }, { "docid": "fff27686b3f6be70b956a257f4768b2a", "score": "0.57215923", "text": "def get_current_player_name(self) -> str:\n return self.player", "title": "" }, { "docid": "2b16428c90842424c2b3a8033f79a3ce", "score": "0.5711936", "text": "def get_players(self):\n result = self.raw_send_udp(self.A2S_PLAYER + self.getChallenge())\n\n if result.startswith('\\xFF\\xFF\\xFF\\xFF') and result[4] == self.S2A_PLAYER:\n playercount = struct.unpack('<B', result[5])[0]\n\n index, x = 0, 6\n players = {}\n resultlen = len(result)\n while x < resultlen:\n index = struct.unpack('<B', result[x])[0]\n if index in players:\n x += 5\n continue\n\n currentplayer = players[index] = SourceServerPlayer()\n y = result.find('\\x00', x + 1)\n if y == -1: raise SourceServerError, 'Error parsing player information'\n\n currentplayer.name = result[x + 1:y]\n currentplayer.kills, currentplayer.time_connected = struct.unpack('<BB', result[y + 1:y + 3])\n x = y + 4\n\n return players\n\n raise SourceServerError, 'Unexpected server response \\'%s\\'' % result[4]", "title": "" }, { "docid": "7acbe4161deeea36806d29ef556db7a5", "score": "0.57076037", "text": "def get_player_data(self, gsis_id) -> dict:\n data = self.db.get_player(gsis_id=gsis_id)\n if data is not None:\n return data.asdict()\n data = download_player_data(gsis_id)\n if data is not None:\n player = self.db.create_player(data)\n self.db.add_player(player)\n else:\n raise ValueError(f\"For the player with id {gsis_id} no data available.\")\n return data", "title": "" }, { "docid": "e06b3390c60ce0638f7e542e8f087511", "score": "0.5703833", "text": "def get_players(self):\n return self._players", "title": "" }, { "docid": "5c87ada0b3ca6e7b482fa78a1a756cbd", "score": "0.5697751", "text": "def current_player(self):\n return self.players[self.current_player_index]", "title": "" }, { "docid": "2dc2b0029e4fa5961a62a8c90fd19b60", "score": "0.5696476", "text": "def info(self):\n return self.xbmc.Input.Info()", "title": "" }, { "docid": "e4e3241e517371712e28baf2798f9474", "score": "0.56949294", "text": "def info(self):\n with (self.drive_path / \"info.json\").open() as f:\n return json.load(f)", "title": "" }, { "docid": "00be6081d14de4e6ae3d58760d740aa0", "score": "0.56811154", "text": "def load_all_players():\n history = load_history();\n\n wins = history[player_name]['wins']\n health = history[player_name]['health']\n\n return wins, health", "title": "" }, { "docid": "7e60a584f4ff2fc9d990ec654a8de968", "score": "0.56600225", "text": "def fetch_player_stats(self):\n player_keys = \",\".join([p.player_key for p in self.players])\n keys = (\"season\", \"\")\n if self.week_num:\n keys = (str(self.week_num), f\"type=week;week={self.week_num}\")\n data = self.team.league.ctx._load_or_fetch(\n f\"roster.{self.team.id}.stats.{self.team.league.id}.{keys[0]}\",\n f\"league/{self.team.league.id}/players;player_keys={player_keys}/stats;{keys[1]}\",\n )\n # Populate the stats caches of the individual players too\n player_map = {p.player_key: p for p in self.players}\n player_refs = data[\"fantasy_content\"][\"league\"][\"players\"][\"player\"]\n for player_ref in player_refs:\n player_key = get_value(player_ref[\"player_key\"])\n player_obj = player_map.get(player_key)\n if not player_obj:\n logger.warn(\n f\"Player stats found for {player_key} but they are not on the roster\"\n )\n continue\n player_obj._stats_cache[self.week_num] = player_ref\n return data", "title": "" }, { "docid": "759d2f7e1d0f2b71ddc158a97afe3eaa", "score": "0.5654071", "text": "def debug_player_list(self):\n global CONNECTED_PLAYERS\n for p in CONNECTED_PLAYERS:\n say(\" id=\" + str(p.cid) + \" name='\" + str(p.name) + \"'\")", "title": "" }, { "docid": "f79a00321e999839e8c65546f257c005", "score": "0.56523645", "text": "def _get_player(player_id):\n try:\n return store.get_player(player_id)\n except KeyError as error:\n raise ClientError(error, status_code=404)", "title": "" }, { "docid": "cdc1a4d0b6de8a637e7008ca31e4b46a", "score": "0.56503737", "text": "async def profile(self, ctx):\n try:\n player = await self.bot.character.find_by_id(ctx.author.id)\n\n except:\n player = None\n if player is None:\n await ctx.send(\"You don't have a character profile yet. Create one with <prefix>create\")\n return\n else:\n player[\"_id\"] = ctx.author.display_name\n embed = discord.Embed(title=f'{player[\"_id\"]} Stats', description='\\uFEFF', color=ctx.author.colour, timestamp=ctx.message.created_at)\n try:\n embed.set_image(url=player[\"picture\"])\n except:\n player[\"picture\"] = \"https://cached.imagescaler.hbpl.co.uk/resize/scaleHeight/815/cached.offlinehbpl.hbpl.co.uk/news/SUC/MEMAYY-20200316081851159.jpg\"\n embed.set_image(url=player[\"picture\"])\n embed.add_field(name='Character Name:',value=player[\"name\"], inline=True)\n try:\n embed.add_field(name='Health: '+str(player['current_health'])+'/'+str(player['total_health']),value='\\uFEFF',inline=False)\n except:\n player[\"current_health\"] = 100\n player[\"total_health\"] = 100\n embed.add_field(name='Health: '+str(player['current_health'])+'/'+str(player['total_health']),value='\\uFEFF',inline=False)\n embed.add_field(name='Experience: '+str(player['current_xp'])+'/'+str(player['needed_xp']),value='\\uFEFF',inline=False)\n embed.add_field(name=f\"Level **{player['level']}**\",value='\\uFEFF')\n try:\n embed.add_field(name='Damage:',value=player['damage'],inline=False)\n except:\n player['damage'] = 5\n embed.add_field(name='Damage:',value=player['damage'],inline=False)\n try:\n embed.add_field(name='Armor:',value=player['armor'],inline=False)\n except:\n player['armor'] = 5\n embed.add_field(name='Armor:',value=player['armor'],inline=False)\n embed.add_field(name='Gold:',value=player['gold'])\n\n embed.set_footer(text=f\"Still under development? | {self.bot.user.name}\")\n embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "5f9176e5806cfbfa707828a7237c7d67", "score": "0.5641578", "text": "def get_recent_players(self):\n res = self.request(\"https://xboxapi.com/v2/recent-players\")\n return res.json()", "title": "" }, { "docid": "454d68f28b28352ffd454e5d96ef4386", "score": "0.56363636", "text": "def get_player_state(self):\n return self._http_api_request('GetCurrentlyPlaying()')", "title": "" }, { "docid": "6c4c7bec0cf5404123a371c07a6d4133", "score": "0.56344795", "text": "def get_info(self, song):\n\t\t\n\t\ttry:\n\t\t\twith youtube_dl.YoutubeDL() as ydl:\n\t\t\t\tresult = ydl.extract_info(song, download=False)\n\t\t\t\n\t\texcept youtube_dl.utils.DownloadError as dlError:\n\t\t\tresult = None\n\t\t\t\t\t\t\n\t\treturn result", "title": "" }, { "docid": "25c8784adc386d0152f5124d9aabafdc", "score": "0.56274986", "text": "def request_base_information(self):\n built_uri = 'bootstrap-static/'\n total_result = {}\n\n result = self.perform_get(built_uri=self.uri+built_uri)\n if result:\n current_game_week = None\n for game_week in result['events']:\n if game_week['is_current']:\n current_game_week = int(game_week['id']) # Retrieve current game week from the id in events\n\n players = []\n teams = []\n game_weeks = []\n for player in result['elements']:\n player_dict = {\n Player.FANTASY_ID: player['id'],\n Player.FANTASY_PHOTO_URL: player['photo'],\n Player.FANTASY_TEAM_CODE: player['team_code'],\n Player.FANTASY_TEAM_ID: player['team'],\n Player.FANTASY_CODE: player['code'],\n Player.FIRST_NAME: player['first_name'],\n Player.LAST_NAME: player['second_name'],\n Player.FANTASY_WEB_NAME: player['web_name'],\n Player.NAME: f\"{player['first_name']} {player['second_name']}\",\n Player.SHIRT_NUMBER: player['squad_number'],\n Player.FANTASY_STATUS: st_mapper.get(player['status']),\n Player.FANTASY_NEWS: player['news'],\n Player.FANTASY_PRICE: player['now_cost'],\n Player.FANTASY_NEWS_TIMESTAMP: player['news_added'],\n Player.FANTASY_CHANCE_OF_PLAYING_THIS_WEEK: player['chance_of_playing_this_round'],\n Player.FANTASY_CHANCE_OF_PLAYING_NEXT_WEEK: player['chance_of_playing_next_round'],\n Player.FANTASY_SEASON_VALUE: float(player['value_season']),\n Player.FANTASY_OVERALL_PRICE_RISE: player['cost_change_start'],\n Player.FANTASY_WEEK_PRICE_RISE: player['cost_change_event'],\n Player.FANTASY_OVERALL_PRICE_FALL: player['cost_change_start_fall'],\n Player.FANTASY_WEEK_PRICE_FALL: player['cost_change_event_fall'],\n Player.FANTASY_DREAM_TEAM_MEMBER: player['in_dreamteam'],\n Player.FANTASY_DREAM_TEAM_COUNT: player['dreamteam_count'],\n Player.FANTASY_SELECTION_PERCENTAGE: float(player['selected_by_percent']),\n Player.FANTASY_FORM: float(player['form']),\n Player.FANTASY_OVERALL_TRANSFERS_IN: player['transfers_in'],\n Player.FANTASY_OVERALL_TRANSFERS_OUT: player['transfers_out'],\n Player.FANTASY_WEEK_TRANSFERS_IN: player['transfers_in_event'],\n Player.FANTASY_WEEK_TRANSFERS_OUT: player['transfers_out_event'],\n Player.FANTASY_OVERALL_POINTS: player['total_points'],\n Player.FANTASY_WEEK_POINTS: player['event_points'],\n Player.FANTASY_POINT_AVERAGE: float(player['points_per_game']),\n Player.FANTASY_SPECIAL: player['special'],\n Player.MINUTES_PLAYED: round(float(player['minutes'])),\n Player.NUMBER_OF_GOALS: player['goals_scored'],\n Player.ASSISTS: player['assists'],\n Player.CLEAN_SHEETS: player['clean_sheets'],\n Player.GOALS_CONCEDED: player['goals_conceded'],\n Player.OWN_GOALS: player['own_goals'],\n Player.PENALTIES_SAVED: player['penalties_saved'],\n Player.PENALTIES_MISSED: player['penalties_missed'],\n Player.YELLOW_CARDS: player['yellow_cards'],\n Player.RED_CARDS: player['red_cards'],\n Player.SAVES: player['saves'],\n Player.FANTASY_WEEK_BONUS: player['bonus'],\n Player.FANTASY_TOTAL_BONUS: player['bps'],\n Player.FANTASY_INFLUENCE: float(player['influence']),\n Player.FANTASY_CREATIVITY: float(player['creativity']),\n Player.FANTASY_THREAT: float(player['threat']),\n Player.FANTASY_ICT_INDEX: float(player['ict_index']),\n Player.FANTASY_WEEK: current_game_week\n }\n\n if 'ep_this' in player and player['ep_this']: # Is there a value for estimated points for next week\n player_dict[Player.FANTASY_ESTIMATED_WEEK_POINTS] = float(player['ep_this'])\n\n players.append(player_dict)\n\n if 'teams' in result:\n for team in result['teams']:\n teams.append({\n Team.FANTASY_CODE: team['code'],\n Team.FANTASY_ID: team['id'],\n Team.NAME: team['name'],\n Team.FANTASY_WEEK_STRENGTH: team['strength'],\n Team.FANTASY_OVERALL_HOME_STRENGTH: team['strength_overall_home'],\n Team.FANTASY_OVERALL_AWAY_STRENGTH: team['strength_overall_away'],\n Team.FANTASY_ATTACK_HOME_STRENGTH: team['strength_attack_home'],\n Team.FANTASY_ATTACK_AWAY_STRENGTH: team['strength_attack_away'],\n Team.FANTASY_DEFENCE_HOME_STRENGTH: team['strength_defence_home'],\n Team.FANTASY_DEFENCE_AWAY_STRENGTH: team['strength_defence_away']\n })\n\n if 'events' in result:\n for game_week in result['events']:\n game_weeks.append({\n FantasyGameWeek.FANTASY_ID: game_week['id'],\n FantasyGameWeek.NAME: game_week['name'],\n FantasyGameWeek.DEADLINE_TIME: game_week['deadline_time'],\n FantasyGameWeek.DEADLINE_TIME_EPOCH: game_week['deadline_time_epoch'],\n FantasyGameWeek.AVERAGE_SCORE: game_week['average_entry_score'],\n FantasyGameWeek.HIGHEST_SCORE: game_week['highest_score'],\n FantasyGameWeek.FINISHED: game_week['finished']\n })\n\n if players:\n total_result['players'] = players\n\n if teams:\n total_result['teams'] = teams\n\n if game_weeks:\n total_result['game_weeks'] = game_weeks\n\n return total_result", "title": "" }, { "docid": "9f4d46265b753f789a0b2afc7cedd824", "score": "0.56160164", "text": "def getPlayerData():\n\n\t## This sql query needs to obtain the player info from the Player table, joining\n\t## the overall rating and the preferred foot of a player from the Player_Attributes table\n\tplayer_data = c.execute(\"\"\"SELECT Player.id,Player.player_api_id,Player.player_name,ROUND(AVG(Player_Attributes.overall_rating)),Player_Attributes.preferred_foot\n\t\t\t\t\t\t\tFROM Player\n\t\t\t\t\t\t\tINNER JOIN Player_Attributes ON Player.player_api_id=Player_Attributes.player_api_id\n\t\t\t\t\t\t\tGROUP BY Player.id\"\"\") # there are multiple entries for each player from several fifa editions, so we use group by to just obtain one, and take the average overall rating for each player\n\n\t## Write to csv file\n\twith open('player-data/player_info3.csv', 'w') as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerow(['id','api_id','name','rating','foot'])\n\t\twriter.writerows(player_data)", "title": "" }, { "docid": "680fbf1d6a533a842f63ef0cbe1623a7", "score": "0.55925006", "text": "def get_info(self):\n result, _ = self.__request(\"getinfo\")\n return result", "title": "" }, { "docid": "b4ecddfba36f1a2c27b722c38c52eab5", "score": "0.55923337", "text": "def get_viewing_player(self):\n return self._data_holder.viewingPlayer", "title": "" }, { "docid": "1657aed6634e09795a8be89691887f8b", "score": "0.5591602", "text": "def display_player(self):\n conn = sqlite3.connect(os.path.join(path, \"Data/soccer_data.db\"))\n cur = conn.cursor()\n player_data = cur.execute(\"SELECT * FROM master_player WHERE name = '{:s}'\".format(self.lwPlayer.currentItem().text())).fetchone()\n conn.close()\n\n self.leName.setText(player_data[0])\n\n position = player_data[1]\n if position == \"Forward\":\n self.cbPosition.setCurrentIndex(1)\n elif position == \"Midfielder\":\n self.cbPosition.setCurrentIndex(2)\n elif position == \"Defender\":\n self.cbPosition.setCurrentIndex(3)\n\n age = player_data[2]\n if age == \"10+\":\n self.cbAge.setCurrentIndex(1)\n elif age == \"20+\":\n self.cbAge.setCurrentIndex(2)\n elif age == \"30+\":\n self.cbAge.setCurrentIndex(3)\n elif age == \"40+\":\n self.cbAge.setCurrentIndex(4)\n\n value = player_data[3]\n if value == \"N\":\n self.checkBox.setCheckState(False)\n elif value == \"C\":\n self.checkBox.setCheckState(True)\n\n self.pbAdd.setEnabled(False)\n self.pbUpdate.setEnabled(True)", "title": "" }, { "docid": "b456a62fbc11f520ba132774823ef118", "score": "0.5587513", "text": "def get_player_input():\n global player\n player_f_name = player_first_name.get()\n person_l_name = player_last_name.get()\n player_g_name = player_gamer_name.get()\n # create a new player\n try:\n player = Player(str(player_f_name), str(person_l_name), str(player_g_name))\n except ValueError:\n messagebox.showinfo(\"Error\")\n\n # put the player in a tuple for db\n player_tup = (player.get_first_name(), player.get_last_name(), player.get_gamer_name())\n get_player_data(player_tup)\n if new_player:\n create_new_player(player_tup)\n get_player_data(player_tup)\n return", "title": "" } ]
a5c78dc38b73bbb7aaddd29ec72038c3
Exports mdf class data structure into hdf5 file
[ { "docid": "13d95b2364eff06e374762e8853a6aae", "score": "0.0", "text": "def export_to_hdf5(self, file_name=None, sampling=None, compression=None, compression_opts=None):\n #\n try:\n import h5py\n import os\n except ImportError:\n warn('h5py not found')\n return\n\n def set_attribute(obj, name, value):\n if value is not None and len(value) > 0:\n try:\n if value is dict and 'name' in value:\n value = value['name']\n obj.attrs[name] = value\n except:\n pass\n else:\n pass\n if sampling is not None:\n self.resample(sampling)\n if file_name is None:\n file_name = splitext(self.fileName)[0]\n file_name = file_name + '.hdf'\n if compression is not None:\n compression = compression.lower()\n if compression not in ['gzip', 'lzf']:\n compression = None\n compression_opts = None\n elif compression == 'lzf':\n compression_opts = None\n if compression_opts not in range(10):\n compression_opts = None\n else:\n compression_opts = None\n\n f = h5py.File(file_name, 'w') # create hdf5 file\n # create group in root associated to file\n file_group = f.create_group(os.path.basename(file_name))\n file_group.attrs['Time'] = self.fileMetadata['time']\n set_attribute(file_group, 'Author', self.fileMetadata['author'])\n set_attribute(file_group, 'Organization', self.fileMetadata['organisation'])\n set_attribute(file_group, 'ProjectName', self.fileMetadata['project'])\n set_attribute(file_group, 'Subject', self.fileMetadata['subject'])\n set_attribute(file_group, 'Comment', self.fileMetadata['comment'])\n master_type_dict = {0: 'None', 1: 'Time', 2: 'Angle', 3: 'Distance', 4: 'Index', None: 'None'}\n if len(self.masterChannelList) > 1:\n # if several time groups of channels, not resampled\n groups = {}\n n_groups = 0\n grp = {}\n for channel in self:\n channel_data = self.get_channel_data(channel)\n master_name = self.get_channel_master(channel)\n if masterField in self[channel] and master_name not in groups:\n # create new data group\n n_groups += 1\n if master_name != '' \\\n and master_name is not None:\n group_name = master_name\n else:\n group_name = masterField+str(n_groups)\n groups[group_name] = n_groups\n grp[n_groups] = file_group.create_group(group_name)\n set_attribute(grp[n_groups], masterField, master_name)\n set_attribute(grp[n_groups], masterTypeField,\n master_type_dict[self.get_channel_master_type(channel)])\n elif masterField in self[channel] and master_name in groups:\n group_name = master_name\n if channel_data.dtype.kind not in ('U', 'O'): # not supported type\n channel_name = _convert_to_hdf5_name(channel)\n dset = grp[groups[group_name]].create_dataset(channel_name,\n data=channel_data,\n compression=compression,\n compression_opts=compression_opts,\n chunks=True)\n set_attribute(dset, unitField, self.get_channel_unit(channel))\n if descriptionField in self[channel]:\n set_attribute(dset, descriptionField, self.get_channel_desc(channel))\n else: # resampled or only one time for all channels : no groups\n master_name = list(self.masterChannelList.keys())[0]\n set_attribute(file_group, masterField, master_name)\n set_attribute(file_group, masterTypeField,\n master_type_dict[self.get_channel_master_type(master_name)])\n for channel in self:\n channel_data = self.get_channel_data(channel)\n if channel_data.dtype.kind not in ('U', 'O'): # not supported type\n channel_name = _convert_to_hdf5_name(channel)\n dset = file_group.create_dataset(channel_name, data=channel_data,\n compression=compression,\n compression_opts=compression_opts,\n chunks=True)\n set_attribute(dset, unitField, self.get_channel_unit(channel))\n if descriptionField in self[channel]:\n set_attribute(dset, descriptionField, self.get_channel_desc(channel))\n f.close()", "title": "" } ]
[ { "docid": "1547453acf20dcb55fbb46fddc5fc80d", "score": "0.72480386", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n thetas = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n thetas.append(element.theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "1547453acf20dcb55fbb46fddc5fc80d", "score": "0.72480386", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n thetas = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n thetas.append(element.theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "1547453acf20dcb55fbb46fddc5fc80d", "score": "0.72480386", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n thetas = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n thetas.append(element.theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "1547453acf20dcb55fbb46fddc5fc80d", "score": "0.72480386", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n thetas = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n thetas.append(element.theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "a918dd09d3fbb4674f33106fb95262e4", "score": "0.7231435", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t123 = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t123.append([element.T1, element.T2, element.T3])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "a918dd09d3fbb4674f33106fb95262e4", "score": "0.7231435", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t123 = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t123.append([element.T1, element.T2, element.T3])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "036d08203ba0f017100a1be653ed5e7f", "score": "0.72153616", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t1234 = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t1234.append([element.T1, element.T2, element.T3, element.T4])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "036d08203ba0f017100a1be653ed5e7f", "score": "0.72153616", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t1234 = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t1234.append([element.T1, element.T2, element.T3, element.T4])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "dba51acdc3c3d974caca3c974e6b0294", "score": "0.7094672", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)", "title": "" }, { "docid": "77cd87ae16adb4bb081331312b255287", "score": "0.7045532", "text": "def export_to_hdf5(cls, h5_file: Any, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t1234 = []\n neids = len(eids)\n nodes = np.zeros((neids, 8), dtype='int32')\n for i, eid in enumerate(eids):\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes[i, :] = [nid if nid is not None else 0 for nid in element.nodes]\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t1234.append([element.T1, element.T2, element.T3, element.T4])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "0fdfb4cb11687804b8fe9c1031af83f0", "score": "0.7040283", "text": "def to_hdf5(self, f):\n raise NotImplementedError()", "title": "" }, { "docid": "50942aeda413e6b0c81110dad0f0c74e", "score": "0.7027945", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n zoffsets = []\n #t123 = []\n\n #element0 = model.elements[eids[0]]\n neids = len(eids)\n nnodes = 6\n nodes = np.zeros((neids, nnodes), dtype='int32')\n\n for i, eid in enumerate(eids):\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes[i, :] = [nid if nid is not None else 0 for nid in element.nodes]\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n zoffsets.append(element.zoffset)\n #t123.append([element.T1, element.T2, element.T3])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)\n h5_file.create_dataset('zoffset', data=zoffsets)\n #self.tflag = tflag", "title": "" }, { "docid": "10bb71085170a3cc4c18d6e5f8a1e449", "score": "0.7017164", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n mcids = []\n thetas = []\n neids = len(eids)\n nodes = np.zeros((neids, 9), dtype='int32')\n for i, eid in enumerate(eids):\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes[i, :] = [nid if nid is not None else 0 for nid in element.nodes]\n if isinstance(element.theta_mcid, int):\n mcid = element.theta_mcid\n theta = 0.\n else:\n assert isinstance(element.theta_mcid, float), type(element.theta_mcid)\n mcid = -1\n theta = element.theta_mcid\n mcids.append(mcid)\n thetas.append(theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('mcid', data=mcids)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "8b9a53fd62bf444c90f223c8c49ca685", "score": "0.6871136", "text": "def hdf5_save_parms(self, filename):\n import h5py\n f = h5py.File(filename, 'w')\n f.create_dataset('flat', data=self.flat_model)\n f.create_dataset('bkgs', data=self.bkgs)\n f.create_dataset('fluxes', data=self.fluxes)\n f.close()", "title": "" }, { "docid": "433fb0d34d7542a391bf36e10d833843", "score": "0.682098", "text": "def writeH5(self, fname):\n of=h5py.File(fname, \"w\")\n if (self.meta):\n meta=of.create_dataset(\"meta\",data=[])\n for v in self.meta.keys():\n meta.attrs[v]=self.meta[v]\n meta.attrs['version']=self.version\n \n dset=of.create_dataset(\"objects\", data=self.data, chunks=True,\n shuffle=True,compression=\"gzip\", compression_opts=9)\n if type(self.dNdz)!=type(None):\n dset=of.create_dataset(\"dNdz\", data=self.dNdz)\n if type(self.bz)!=type(None):\n dset=of.create_dataset(\"bz\", data=self.bz)\n self.window.writeH5(of)\n pz=of.create_dataset(\"photoz\",data=[])\n self.photoz.writeH5(pz)", "title": "" }, { "docid": "b9383d1335fb67576725422774a2a52f", "score": "0.67417145", "text": "def _to_hdf5(self, filename):\n utc_now = datetime.datetime.utcnow().isoformat(timespec=\"seconds\")\n with h5py.File(filename, \"w\") as f:\n for attribute, value in [\n (\"ubermag-hdf5-file-version\", \"0.1\"),\n (\"discretisedfield.__version__\", df.__version__),\n (\"file-creation-time-UTC\", utc_now),\n (\"type\", \"discretisedfield.Field\"),\n ]:\n f.attrs[attribute] = value\n\n h5_field = f.create_group(\"field\")\n self._h5_save_structure(h5_field)\n h5_field_data = h5_field.create_dataset(\n \"array\", (*self.mesh.n, self.nvdim), dtype=self.array.dtype\n )\n self._h5_save_data(h5_field_data, slice(None))", "title": "" }, { "docid": "29ec283a0d0be8419b55414cdaa2f20e", "score": "0.6712382", "text": "def _write_hdf5(self, fname):\n print(f'Writing file {fname}')\n dpl_data = dict()\n dpl_data['object_type'] = \"Dipole\"\n dpl_data['times'] = self.times\n dpl_data['sfreq'] = self.sfreq\n dpl_data['nave'] = self.nave\n dpl_data['data'] = self.data\n dpl_data['scale_applied'] = self.scale_applied\n write_hdf5(fname, dpl_data, overwrite=True)", "title": "" }, { "docid": "fe5a77fbd63a876ccf4d935c8007d8af", "score": "0.655364", "text": "def write_as_hdf5(self, filename):\n # prevent overwriting the existing hdf5 files \n assert self._h5_filename != filename, \\\n \"The given hdf5 filename already exists. Please provide a different filename\"\n \n self._h5_filename = filename\n self._N_obs = self.data.shape[0]\n self._allocate_space( )\n \n with self._h5:\n self._h5[self._root + '/data/xy'][:,:] = self.xy\n self._h5[self._root + '/data/spectra'][:,:] = self.data\n \n if self._with_image_cube: #save image cube\n self._h5[self._root + '/data/image/image_cube'][:,:,:] = self.imageCube\n self._h5[self._root + '/data/image/image_mask'][:,:] = self.imageMask\n self._h5[self._root + '/data/image/ind_rc_map'][:,:] = self.ind_rc_map\n self._h5[self._root + '/data/image/image_grid_param'][:] = self.image_grid_param\n \n if self._with_factorization: #save factorization\n self._h5[self._root + '/data/factorization/' + self._factor_prefix +'component'][:,:] = self.component\n self._h5[self._root + '/data/factorization/' + self._factor_prefix +'component_coef'][:,:] = self.component_coef\n \n print(f'Data is saved as an HDF5 file. Filename : {filename}')", "title": "" }, { "docid": "7640c41dacc29fab83eb77fe0b121e47", "score": "0.6545143", "text": "def saveh(dset, path):\n\tf = h5py.File(path, 'w')\n\tf['dset'] = dset\n\tf.close()", "title": "" }, { "docid": "7ae745d9f5a73a69f9b0df5f1c6f10c7", "score": "0.6530162", "text": "def _export_to_h5(self, save_path: str) -> None:\n self._model.save(save_path, save_format=TFExportFormat.KERAS_H5)", "title": "" }, { "docid": "75b632a5137de1f2b7b71445edee216e", "score": "0.6509957", "text": "def to_hdf(self, hdf=None, group_name=None):\n super(SphinxBase, self).to_hdf(hdf=hdf, group_name=group_name)\n self._structure_to_hdf()\n self.input.to_hdf(self._hdf5)\n self._output_parser.to_hdf(self._hdf5)", "title": "" }, { "docid": "77a7429c0e3230530e5b3d2aa7afc86e", "score": "0.6457918", "text": "def to_h5(self, file_name):\n f = h5py.File('%s.h5' % file_name, 'w')\n f.attrs[' Manufacturer'] = np.string_('EDAX')\n f.attrs[' Version'] = np.string_('OIM Analysis 7.3.0 x64 [09-01-15]')\n # create the group containing the data\n data_container = f.create_group('DataContainer')\n ebsd = data_container.create_group('EBSD')\n ebsd_header = ebsd.create_group('Header')\n ebsd_header.create_dataset('Camera Azimuthal Angle', data=np.array([0.0], dtype=np.float32))\n ebsd_header.create_dataset('Camera Elevation Angle', data=np.array([self.workingDistance], dtype=np.float32))\n pattern_center = ebsd_header.create_group('Pattern Center Calibration')\n pattern_center.create_dataset('x-star', data=np.array(self.xstar, dtype=np.float32))\n pattern_center.create_dataset('y-star', data=np.array(self.ystar, dtype=np.float32))\n pattern_center.create_dataset('z-star', data=np.array(self.zstar, dtype=np.float32))\n ebsd_data = ebsd.create_group('Data')\n ci = ebsd_data.create_dataset('CI', data=self.ci)\n iq = ebsd_data.create_dataset('IQ', data=self.iq)\n phase = ebsd_data.create_dataset('Phase', data=self.phase)\n phi1 = ebsd_data.create_dataset('Phi1', data=self.euler[:, :, 0])\n phi = ebsd_data.create_dataset('Phi', data=self.euler[:, :, 1])\n phi2 = ebsd_data.create_dataset('Phi2', data=self.euler[:, :, 2])\n x = ebsd_data.create_dataset('X Position', data=self.x)\n y = ebsd_data.create_dataset('Y Position', data=self.y)\n f.close()", "title": "" }, { "docid": "9a42a5f2e63006b7ea48ed87ddeadf31", "score": "0.6443587", "text": "def _testfile(cls):\n\n # repeatable test\n # create hdf5 file\n cls.temp_file = tempfile.NamedTemporaryFile(suffix=\".hdf5\", prefix=cls._testfilename, delete=False)\n cls.temp_file.close()\n hfile = h5py.File(cls.temp_file.name, 'w')\n\n hfile.create_dataset(\n name='dset0',\n shape=(1,),\n data=[0],\n dtype=np.float,\n chunks=None,\n maxshape=None,\n track_times=None,\n track_order=None,\n fillvalue=1,\n )\n\n hfile.create_dataset(\n name='dsetscalar',\n shape=(),\n data=2,\n dtype=np.float,\n maxshape=None,\n track_times=None,\n track_order=None,\n fillvalue=1,\n )\n\n shape = (20, 3, 4, 13)\n data = np.array([cls.srand.choice([v for k, v in hfile.items()]).ref for _ in range(np.prod(shape)-1)] + [hfile.ref])\n hfile.create_dataset(\n name='dsetobjref',\n shape=shape,\n chunks=(10, 2, 4, 13),\n data=data,\n dtype=h5py.ref_dtype,\n maxshape=(72, 28, 16, 68),\n track_times=None,\n track_order=None,\n fillvalue=None,\n )\n\n return hfile", "title": "" }, { "docid": "22f5e627b5d4f361e8287fc9c8a9dbc5", "score": "0.6435875", "text": "def writeH5(s, filename):\n\n f = h5py.File(\"./parafiles/\" + filename,'w')\n\n # Store state and rank data into the state and rank group of h5 file\n state_group = f.create_group(\"state_group\")\n state = state_group.create_dataset(\"state\",data=s)\n\n #rank_group = f.create_group(\"rank_group\")\n #rank = rank_group.create_dataset(\"rank\",data=r)\n\n f.close()", "title": "" }, { "docid": "e04f3070dc3764ef29b6cbf6d183bc08", "score": "0.6385674", "text": "def _testfile(cls):\n\n # repeatable test\n # create hdf5 file\n cls.temp_file = tempfile.NamedTemporaryFile(suffix=\".hdf5\", prefix=cls._testfilename, delete=False)\n cls.temp_file.close()\n hfile = h5py.File(cls.temp_file.name, 'w')\n\n hfile.create_dataset(\n name='dset0',\n shape=(1,),\n data=[0],\n dtype=np.float,\n chunks=None,\n maxshape=None,\n track_times=None,\n track_order=None,\n fillvalue=1,\n )\n\n hfile.create_dataset(\n name='dsetscalar',\n shape=(),\n data=2,\n dtype=np.float,\n maxshape=None,\n track_times=None,\n track_order=None,\n fillvalue=1,\n )\n\n return hfile", "title": "" }, { "docid": "59226a6bc579bb2a2c6cbf2b63ae9dcc", "score": "0.6384085", "text": "def export_sim_results():\n with h5py.File(data_dir+rec_filename+'.hdf5', 'w') as f:\n sim.export_to_file(f)", "title": "" }, { "docid": "9f6df87db15fac6282971d0768dbb66b", "score": "0.6367657", "text": "def save_hdf5(self, path: str) -> None:\n with h5py.File(path, \"w\") as f:\n to_hdf5(self.__dict__, f)", "title": "" }, { "docid": "7ad31680158a4827ae2e97d0b1fa9ea7", "score": "0.6361016", "text": "def to_hdf5(self, hf, df, **kwargs):\n if self.dielectronic:\n grp_name = '/'.join([self.element, self.ion_name, 'dielectronic', self.filetype])\n else:\n grp_name = '/'.join([self.element, self.ion_name, self.filetype])\n if grp_name not in hf:\n grp = hf.create_group(grp_name)\n grp.attrs['chianti_version'] = df.meta['chianti_version']\n grp.attrs['footer'] = df.meta['footer']\n else:\n grp = hf[grp_name]\n hf['/'.join([self.element, self.ion_name])].attrs['element'] = self.element\n hf['/'.join([self.element, self.ion_name])].attrs['ion'] = self.ion_name\n for name in df.colnames:\n col = df[name]\n if type(col) == u.Quantity:\n data = col.value\n else:\n data = col.data\n if '<U' in data.dtype.str:\n numchar = data.dtype.str[2:]\n data = data.astype('|S{}'.format(numchar))\n if name in grp:\n ds = grp[name]\n else:\n if data.dtype == np.dtype('O'):\n ragged_dtype = h5py.special_dtype(vlen=np.dtype('float64'))\n ds = grp.create_dataset(name, data=data, dtype=ragged_dtype)\n else:\n ds = grp.create_dataset(name, data=data, dtype=data.dtype)\n if col.unit is None:\n ds.attrs['unit'] = 'SKIP'\n else:\n ds.attrs['unit'] = col.unit.to_string()\n ds.attrs['description'] = df.meta['descriptions'][name]", "title": "" }, { "docid": "f2ed8cb0d8711ab9a388beacfec206a9", "score": "0.63573384", "text": "def save_to_hdf5(self):\n\n # Create folder if needed\n if not os.path.exists(self.config[\"data_set_save_folder_path\"]):\n os.makedirs(self.config[\"data_set_save_folder_path\"])\n # Add 'train', 'test' or 'val' to filename.\n path = os.path.join(self.config[\"data_set_save_folder_path\"],\n os.path.basename(self.config[\"data_folder\"])) + '.h5'\n # Write features\n with h5py.File(path, 'w') as hf:\n hf.create_dataset('magnitudes', data=self.magnitudes)\n hf.create_dataset('phases', data=self.phases)\n hf.create_dataset('features', data=self.features.numpy())\n hf.create_dataset('labels', data=self.labels.numpy())\n hf.create_dataset('classes', data=np.array(self.classes, dtype='S'))\n hf.create_dataset('filenames', data=np.array(self.filenames, dtype='S'))", "title": "" }, { "docid": "66c26ee35fbe5944cc9d420065f8618d", "score": "0.6293123", "text": "def export(self, qa_fp, data, dset_name, dset_suffix=''):\n\n if not os.path.exists(qa_fp):\n logger.info('Initializing qa output file: \"{}\"'.format(qa_fp))\n with RexOutputs(qa_fp, mode='w') as f:\n f.meta = self.meta\n f.time_index = self.time_index\n\n shape = (len(self.time_index), len(self.meta))\n attrs = H5_ATTRS.get(Feature.get_basename(dset_name), {})\n\n # dont scale the re-coarsened data or diffs\n attrs['scale_factor'] = 1\n attrs['dtype'] = 'float32'\n\n if dset_suffix:\n dset_name = dset_name + '_' + dset_suffix\n\n logger.info('Adding dataset \"{}\" to output file.'.format(dset_name))\n\n # transpose and flatten to typical h5 (time, space) dimensions\n data = np.transpose(data, axes=(2, 0, 1)).reshape(shape)\n\n RexOutputs.add_dataset(qa_fp, dset_name, data,\n dtype=attrs['dtype'],\n chunks=attrs.get('chunks', None),\n attrs=attrs)", "title": "" }, { "docid": "3c1728599c7f689dc6aa4206ab6ebf4d", "score": "0.6266522", "text": "def to_hdf5(self, f):\n\n f = super().to_hdf5(f)\n\n # if self.potential is not None:\n # import yaml\n # from ..potential.potential.io import to_dict\n # f['potential'] = yaml.dump(to_dict(self.potential)).encode('utf-8')\n\n if self.release_time:\n quantity_to_hdf5(f, 'release_time', self.release_time)\n\n if self.lead_trail is not None:\n f['lead_trail'] = self.lead_trail.astype('S1') # TODO HACK\n return f", "title": "" }, { "docid": "7aa696d491a31e1c943913b7bfb5e9ab", "score": "0.6243459", "text": "def save(self, file_path: PathSpec):\n file_path = Path(file_path).with_suffix(\".h5\")\n if file_path.is_file():\n raise OSError(f\"File {file_path} already exists.\")\n\n # remove BLM names from columns due to hdf header size limitation.\n # save_data = self.df.copy()\n header = self.df.columns\n try:\n # replace real header with numbers to not have problem saving large\n # header to hdf\n self.df.columns = range(self.df.shape[1])\n self.df.to_hdf(file_path, key=\"data\", format=\"table\")\n self.meta.to_hdf(file_path, key=\"meta\", format=\"table\", append=True)\n pd.DataFrame(header).to_hdf(\n file_path, key=\"header\", format=\"table\", append=True\n )\n finally:\n # put real header back\n self.df.columns = header\n # # write columns real columns name in separate file.\n # with open(file_path.with_suffix('.csv'), 'w') as fp:\n # fp.write('\\n'.join(self.df.columns))", "title": "" }, { "docid": "89ea12670ae4dcefd77b601b83070882", "score": "0.6193304", "text": "def export_to_hdf5(self, group, log: SimpleLogger) -> None:\n export_to_hdf5(self, group, log)", "title": "" }, { "docid": "87dc102c9c723d5b8a83e9972c2832b6", "score": "0.61894226", "text": "def save_hdf5(item, path):\n h5f = h5py.File(\"data.h5\", 'w')\n h5f.create_dataset(\"dataset_1\", data=item)\n h5f.close()", "title": "" }, { "docid": "30e5648cdc1a9f87b1d16fe8d93efe6f", "score": "0.6173482", "text": "def write_gsh5(self, filename: str) -> GSData:\n with h5py.File(filename, \"w\") as fl:\n fl[\"data\"] = self.data\n fl[\"freq_array\"] = self.freq_array.to_value(\"MHz\")\n if self.in_lst:\n fl[\"time_array\"] = self.time_array.hour\n else:\n fl[\"time_array\"] = self.time_array.jd\n\n fl[\"telescope_location\"] = np.array(\n [\n self.telescope_location.lat.deg,\n self.telescope_location.lon.deg,\n self.telescope_location.height.to_value(\"m\"),\n ]\n )\n\n fl.attrs[\"loads\"] = \"|\".join(self.loads)\n fl[\"nsamples\"] = self.nsamples\n fl.attrs[\n \"effective_integration_time\"\n ] = self.effective_integration_time.to_value(\"s\")\n\n flg_grp = fl.create_group(\"flags\")\n if self.flags:\n flg_grp.attrs[\"names\"] = tuple(self.flags.keys())\n flg_grp.create_dataset(\n \"values\",\n data=np.array(list(self.flags.values())),\n maxshape=(None,) + self.data.shape,\n chunks=True,\n )\n\n fl.attrs[\"telescope_name\"] = self.telescope_name\n fl.attrs[\"data_unit\"] = self.data_unit\n\n # Now history\n fl.attrs[\"history\"] = repr(self.history)\n\n # Data model\n if self.data_model is not None:\n self.data_model.write(fl, \"data_model\")\n\n # Now aux measurements\n aux_grp = fl.create_group(\"auxiliary_measurements\")\n for name, meas in self.auxiliary_measurements.items():\n aux_grp[name] = meas\n\n return self.update(filename=filename)", "title": "" }, { "docid": "9a51f358f914b37928d18dd61908efb6", "score": "0.6158642", "text": "def write_hdf5( motif_file_name ): \n\n h5file = tables.open_file( motif_file_name, \"a\", driver=\"H5FD_CORE\")\n h5file.close()\n return", "title": "" }, { "docid": "272e53059d07a6a776e8f64eda97a297", "score": "0.6141958", "text": "def write_to_file(self, filename):\n # check to see if the map exists from instantiation\n if hasattr(self, 'map'):\n sunpy_meta = self.map.meta\n\n psihdf.wrh5_meta(filename, self.x, self.y, np.array([]),\n self.data, chd_meta=self.info, sunpy_meta=sunpy_meta)", "title": "" }, { "docid": "55fd093b67a03583ee701c89eabc54f4", "score": "0.6097498", "text": "def save_data(df, path):\n # Export to HDF5\n df.to_hdf(path, 'df', complevel=9)", "title": "" }, { "docid": "9e66dc3d74741eae77b62460ea289c1f", "score": "0.6089806", "text": "def tofile(self, filename):\n with util.atomic_write(filename) as tmpname:\n with util.open_h5py(tmpname, \"w\") as fp:\n fp.attrs[\"type\"] = \"Space\"\n self.config.tofile(fp)\n self.axes.tofile(fp)\n self.metadata.tofile(fp)\n fp.create_dataset(\n \"counts\",\n self.photons.shape,\n dtype=self.photons.dtype,\n compression=\"gzip\",\n ).write_direct(self.photons)\n fp.create_dataset(\n \"contributions\",\n self.contributions.shape,\n dtype=self.contributions.dtype,\n compression=\"gzip\",\n ).write_direct(self.contributions)", "title": "" }, { "docid": "46fc0b7e26e5b233429338039333afe1", "score": "0.60839766", "text": "def write_model_to_h5(self, output_filename, in_cgs=False, r_min=None,\n r_max=None, overwrite=False):\n if os.path.exists(output_filename) and not overwrite:\n raise IOError(f\"Cannot create {output_filename}. It exists and \"\n f\"overwrite=False.\")\n f = h5py.File(output_filename, \"w\")\n f.create_dataset(\"num_elements\", data=self.num_elements)\n f.attrs[\"unit_system\"] = \"cgs\" if in_cgs else \"galactic\"\n f.close()\n if r_min is None:\n r_min = 0.0\n if r_max is None:\n r_max = self.fields[\"radius\"][-1].d*2\n mask = np.logical_and(self.fields[\"radius\"].d >= r_min,\n self.fields[\"radius\"].d <= r_max)\n for k, v in self.fields.items():\n if in_cgs:\n if k == \"temperature\":\n fd = v[mask].to_equivalent(\"K\", \"thermal\")\n elif k not in self._keep_units:\n fd = v[mask].in_cgs()\n else:\n fd = v[mask]\n else:\n fd = v[mask]\n fd.write_hdf5(output_filename, dataset_name=k,\n group_name=\"fields\")\n if getattr(self, \"_dm_virial\", None):\n fd = self.dm_virial.df\n fd.write_hdf5(output_filename, dataset_name=\"dm_df\")\n if getattr(self, \"_star_virial\", None):\n fd = self.star_virial.df\n fd.write_hdf5(output_filename, dataset_name=\"star_df\")", "title": "" }, { "docid": "939293ef4da18e8ea947f5b3b58b89f0", "score": "0.6070948", "text": "def _write_spectrum_hdf5(self, filename):\n mylog.info(\"Writing spectrum to hdf5 file: %s.\", filename)\n output = h5py.File(filename, 'w')\n output.create_dataset('wavelength', data=self.lambda_field)\n output.create_dataset('tau', data=self.tau_field)\n output.create_dataset('flux', data=self.flux_field)\n output.close()", "title": "" }, { "docid": "824d39afecb2b260d8a65c661a46c30e", "score": "0.6069436", "text": "def _to_intermediate(self) -> None:\n h5f = h5py.File(self.intermediate_path, \"w\")\n h5f.create_dataset(\"X\", data=self.X)\n h5f.create_dataset(\"y\", data=self.y)\n json_metadata_str = json.dumps(\n {field: getattr(self, field) for field in self.metadata_fields}\n )\n h5f.attrs[\"metadata\"] = json_metadata_str\n h5f.close()", "title": "" }, { "docid": "79bcbc8ef3c3735b076ac2f55f463ea0", "score": "0.6066029", "text": "def save_hdf5(self, dictionary):\n h5file = h5py.File(self.weights_file, 'w')\n #print(dictionary)\n def recursive_save(dictionary, h5file_obj):\n for key, value in dictionary.items():\n type_name = type(value).__name__\n if not type_name in ['None', 'NoneType']:\n try:\n if type_name in ['ndarray']:\n h5file_obj.create_dataset(key, (value.shape), data=value)\n elif type_name in ['int', 'int32', 'int64', 'float', 'float32', 'float64', 'str', 'tuple', 'bool', 'list']:\n h5file_obj.create_dataset(key, data=value)\n elif type_name in ['dict']:\n dict_group = h5file_obj.create_group(key)\n recursive_save(value, dict_group)\n except:\n continue\n recursive_save(dictionary, h5file)", "title": "" }, { "docid": "652c8b0032b9689a26676daa033b100c", "score": "0.6065993", "text": "def save_hdf(filename, data, **kwargs):\n f = h5py.File(filename, 'w')\n f.create_dataset(name='data', data=np.swapaxes(np.transpose(data), 0, 1), **kwargs)", "title": "" }, { "docid": "ed02c102212aeb8374ebd338e227e3a3", "score": "0.60578245", "text": "def write_hdf5(data, labels, output_filename):\n\n x = data.astype(numpy.float32)\n y = labels.astype(numpy.float32)\n\n with h5py.File(output_filename, 'w') as h:\n h.create_dataset('data', data=x, shape=x.shape)\n h.create_dataset('label', data=y, shape=y.shape)\n # h.create_dataset()", "title": "" }, { "docid": "461d8ed9208a5d49474a2f033a5a4608", "score": "0.6047279", "text": "def save_init(self,filename_long):\n\n\n f=h5py.File(filename_long,'w')\n # try/except to avoid loosing the h5 file if\n # read/write error\n\n try:\n\n f.create_group('sig')\n f.create_group('ray2')\n f.create_group('ray')\n f.create_group('Ct')\n f.create_group('H')\n # mapping point a\n f.create_dataset('p_map',shape=(0,3), maxshape=(None,3),dtype='float64')\n # mapping cycles\n f.create_dataset('c_map',shape=(0,3), maxshape=(None,3),dtype='int')\n # mapping (fmin,fmax,fstep)\n f.create_dataset('f_map',shape=(0,3), maxshape=(None,3),dtype='float64')\n # mapping Antenna name\n f.create_dataset('A_map',shape=(0,1), maxshape=(None,1),dtype=\"S10\")\n # mapping rotation matrices Antenna\n f.create_dataset('T_map',shape=(0,3,3), maxshape=(None,3,3),dtype='float64')\n f.close()\n except:\n f.close()\n raise NameError('Links: issue when initializing h5py file')", "title": "" }, { "docid": "a7d3ee420a3007a2cddaa28f6e525a2a", "score": "0.6040589", "text": "def to_file(self, filename: str) -> None:\n import h5py\n\n with h5py.File(filename, \"w\") as fp:\n # write the grid data\n if self.grid is not None:\n fp.attrs[\"grid\"] = self.grid.state_serialized\n # write actual droplet data\n self._write_hdf_dataset(fp)", "title": "" }, { "docid": "f1bfcd64b8651542920c3a1a1093b580", "score": "0.6027489", "text": "def csv2h5(fname):\n data,area,ind = readcsv(fname)\n if fname.endswith('.csv'):\n fname=fname[:-4]\n data_dict = {'data':data,'area':area,'ind':ind}\n dd.io.save(fname,data_dict,compression='blosc')", "title": "" }, { "docid": "c385ccf46669308bed5f5a4485f19e55", "score": "0.60165584", "text": "def writeToHDF5(self, file_name: str = None) -> None:\n self.__dumpToHDF5(file_name, mode='w')", "title": "" }, { "docid": "ba66c5f039792a26444229da16adb4ba", "score": "0.6000323", "text": "def save_to_hdf5(self, path_to_hdf5):\n print(\"Saving data to {}...\".format(osp.basename(path_to_hdf5)),\n end=\" \")\n\n # Save the data.\n hdf5file = h5py.File(path_to_hdf5, mode='w')\n datagrp = hdf5file.create_group('data')\n for key in list(self.data.keys()):\n if key == 'cid':\n # This is required to avoid a \"TypeError: No conversion path\n # for dtype: dtype('<U5')\".\n # See https://github.com/h5py/h5py/issues/289\n datagrp.create_dataset(\n key, data=[np.string_(i) for i in self.data['cid']])\n else:\n datagrp.create_dataset(key, data=self.data[key])\n hdf5file.close()\n\n # Save the grid.\n self.grid.to_hdf(path_to_hdf5, key='grid', mode='a')\n\n print('done')", "title": "" }, { "docid": "4d84d5fff52396c0461691d5f80b57c2", "score": "0.59892416", "text": "def do_export(cls, cfmg):\n raise NotImplementedError()", "title": "" }, { "docid": "6709f1263e97d0cb01ceefb75e8fad2a", "score": "0.59872794", "text": "def save_5d_matrix_to_h5(directory, matrix):\n file_name = '%s.h5' % str(uuid.uuid4())\n file_path = os.path.join(directory, file_name)\n\n with h5py.File(file_path, 'w') as target:\n target.create_dataset(FULL_DISAGG_MATRIX, data=matrix)\n\n return file_path", "title": "" }, { "docid": "761698a43a7634693cef3db7f956cbe4", "score": "0.59860015", "text": "def to_file(self, io_data, file_handle=None):\n self.io_data = io_data\n if file_handle is None:\n h5file_group = h5py.File(self.filename, 'w')\n else:\n h5file_group = file_handle\n\n self.write_attributes(h5file_group)\n self.write_ndarrays(h5file_group)\n self.write_objects(h5file_group)", "title": "" }, { "docid": "84d42831d48daaef615e84cc602fe0d2", "score": "0.59773856", "text": "def to_hdf(self, hdf, group_name=\"volumetric_data\"):\n volumetric_data_dict_to_hdf(\n data_dict=self.to_dict(),\n hdf=hdf,\n group_name=group_name,\n )", "title": "" }, { "docid": "492338206ed6fe6c6ea836d60b83e141", "score": "0.5959477", "text": "def write(self, filename):\n print(\"Results: writing to {0}\".format(filename))\n with h5py.File(filename,'w') as f: \n for r in range(self.R):\n g = f.create_group('order{0}'.format(r))\n g.create_dataset('ys_predicted', data=self.ys_predicted[r])\n for n in self.component_names:\n g.create_dataset(n+'_ys_predicted', data=getattr(self, n+'_ys_predicted')[r])\n attrs = np.append(COMPONENT_NP_ATTRS, COMPONENT_TF_ATTRS)\n if np.any(np.ravel(getattr(self, n+'_K')) > 0):\n attrs = np.append(attrs, np.append(OPT_COMPONENT_NP_ATTRS, OPT_COMPONENT_TF_ATTRS))\n for attr in attrs:\n g.create_dataset(n+'_'+attr, data=getattr(self, n+'_'+attr)[r])\n for n in self.component_names:\n for attr in POST_COMPONENT_ATTRS:\n try:\n f.create_dataset(n+'_'+attr, data=getattr(self, n+'_'+attr))\n except:\n continue\n self.component_names = [a.encode('utf8') for a in self.component_names] # h5py workaround\n for attr in COMMON_ATTRS:\n f.create_dataset(attr, data=getattr(self, attr))", "title": "" }, { "docid": "baf53fdc0c8c0510960ffddb06e2850d", "score": "0.5925166", "text": "def writeExportCode(self, fileName):\r\n \r\n file = open(fileName, \"w+t\" )\r\n dir, fil = os.path.split(fileName)\r\n funcName = string.split (fil, \".\")\t# Compose class name\r\n\r\n # Header code GML \r\n file.write('Creator\t\\\"Atom3 GML Exporter\\\"\\n')\r\n file.write('Version\t\\\"0.2.2\\\"\\n')\r\n file.write('graph\\n')\r\n file.write('[\\n')\r\n file.write(' hierarchic\t1\\n')\r\n file.write(' label\t\\\"' + funcName[0] + '\\\"\\n')\r\n file.write(' directed\t1\\n')\r\n \r\n \r\n \r\n for nodetype in self.nodeTypes:\t\t# Iterate on all the node types...\r\n for node in self.listNodes[nodetype]:\t# Iterate on all the nodes of each type \r\n if( isConnectionLink( node.graphObject_ ) ): whiteLabel = True\r\n else: whiteLabel = False\r\n \r\n position = [node.graphObject_.x, node.graphObject_.y]\r\n\r\n # Label with no border\r\n if( whiteLabel ): \r\n centerObject = node.graphObject_.getCenterObject() \r\n if( centerObject ): \r\n x0,y0,x1,y1 = centerObject.getbbox()\r\n size = (x1-x0,y1-y0)\r\n else:\r\n size = None\r\n file.write( genGMLNodeCode(node.objectNumber,str(node.objectNumber),position, \\\r\n size=size, fill=\"#FFFFFF\", outline= \"#FFFFFF\" ) )\r\n else: \r\n x0,y0,x1,y1 = node.graphObject_.getbbox() \r\n size = (x1-x0,y1-y0)\r\n file.write( genGMLNodeCode(node.objectNumber,str(node.objectNumber),position,size=size ) )\r\n \r\n \r\n # Generate code for the connections...\r\n for nodetype in self.nodeTypes:\r\n # Are there any nodes of this type?\r\n if( self.listNodes[nodetype] ):\r\n # Is this a link nodeType?\r\n if( isConnectionLink( self.listNodes[nodetype][0].graphObject_ ) ):\r\n # Go over all the nodes\r\n for node in self.listNodes[nodetype]:\r\n # Go over all the outbound connections\r\n for obj in node.out_connections_:\r\n file.write( genGMLConnectionCode(node.objectNumber,\r\n obj.objectNumber,\r\n label = str(node.objectNumber))) \r\n # Go over all the inbound connections\r\n for obj in node.in_connections_:\r\n file.write( genGMLConnectionCode(obj.objectNumber,\r\n node.objectNumber,\r\n label = str(node.objectNumber))) \r\n \r\n # Footer code GML\r\n file.write(']\\n')\r\n \r\n file.close()\r\n return funcName[0] # This indicates that we've done something\r", "title": "" }, { "docid": "614f00786a003e84f1df4307a640d1a9", "score": "0.5917963", "text": "def write_hdf(self, group):\n _hdf_write_helper(group, self._dict)", "title": "" }, { "docid": "fbb4a0b9d31be49d9165dcc70764dabe", "score": "0.5915931", "text": "def to_file(self, filename: str, info: InfoDict = None) -> None:\n import h5py\n\n with h5py.File(filename, \"w\") as fp:\n # write the actual emulsion data\n for i, (time, emulsion) in enumerate(self.items()):\n dataset = emulsion._write_hdf_dataset(fp, f\"time_{i:06d}\")\n dataset.attrs[\"time\"] = time\n\n # write additional information\n if info:\n for k, v in info.items():\n fp.attrs[k] = json.dumps(v)\n\n # write the grid data -> this might overwrite grid data that is\n # present in the info dictionary, but in normal cases these grids\n # should be identical, so we don't handle this case explicitly\n if self.grid is not None:\n fp.attrs[\"grid\"] = self.grid.state_serialized", "title": "" }, { "docid": "314e9d29ed910dba1962d65bfa6eae9d", "score": "0.58946985", "text": "def h5_writer(data_freq_time, data_dm_time,\n dm0, t0, snr, beamno='', basedir='./',\n time_res=''):\n fnout = '%s/CB%s_snr%d_dm%d_t0%d.hdf5'\\\n % (basedir, beamno, snr, dm0, t0)\n\n f = h5py.File(fnout, 'w')\n f.create_dataset('data_freq_time', data=data_freq_time)\n\n if data_dm_time is not None:\n f.create_dataset('data_dm_time', data=data_dm_time)\n ndm = data_dm_time.shape[0]\n else:\n ndm = 0\n\n nfreq, ntime = data_freq_time.shape\n\n f.attrs.create('snr', data=snr)\n f.attrs.create('dm0', data=dm0)\n f.attrs.create('ndm', data=ndm)\n f.attrs.create('nfreq', data=nfreq)\n f.attrs.create('ntime', data=ntime)\n f.attrs.create('time_res', data=time_res)\n f.attrs.create('t0', data=t0)\n f.attrs.create('beamno', data=beamno)\n f.close()\n\n logging.info(\"Wrote to file %s\" % fnout)", "title": "" }, { "docid": "203efa9a882fb8ba97b6265fce85d698", "score": "0.58834803", "text": "def dumpDict(d, h5, where=None, filters=None, matlab=False):\n \n if where is None or where=='':\n (h5, grp, bClose) = openFile(h5, 'w', where, filters)\n else:\n (h5, grp, bClose) = openFile(h5, 'a', where, filters)\n\n import pickle as p\n\n keys = d.keys()\n names = []\n ext_names = []\n non_hdf_names = []\n\n # handle non-hdf5 names\n for key in keys:\n\n if matlab and type(key)==tuple:\n key = reduce(lambda x,y: x + '__' +y, key);\n\n \n if not type(key)==str or not key[0].isalpha() or (len(key)>=12 and key[0:12]=='non_hdf_name'):\n # pickle key\n names+= ['non_hdf_name%d' % len(non_hdf_names)]\n non_hdf_names+=[names[-1]]\n ext_names+=['#'+p.dumps(key)]\n else:\n # key is ok as hdf5 node name\n names+=[key]\n ext_names+=['#']\n\n for i in range(len(keys)):\n key = keys[i]\n name = names[i]\n ext_name = ext_names[i]\n\n if type(d[key])==type(numpy.array([])):\n put_array(h5,grp,name,d[key],ext_name)\n\n elif (matlab and (numpy.isscalar(d[key]) and not type(d[key])==str)):\n h5.createArray(grp,name,numpy.array(d[key]).astype(numpy.float64), ext_name )\n\n elif (matlab) and d[key]==None:\n #what to do with none? just 0 \n h5.createArray(grp,name,numpy.array(0).astype(numpy.float64), ext_name )\n\n elif (not matlab) and (d[key] in (int,float)):\n h5.createArray(grp,name,numpy.array(d[key]), ext_name )\n\n# elif type(d[key]) in (str,):\n# #h5.createArray(grp,name,d[key], ext_name )\n# put_string(h5,grp,name,d[key],ext_name)\n elif type(d[key])==dict:\n # recurse with group named key\n g = h5.createGroup(grp,name, title=ext_name)\n dumpDict(d[key],h5,g,filters,matlab=matlab)\n else:\n try:\n put_python(h5,grp,name,d[key],ext_name)\n except Exception, x:\n if matlab:\n # recurse with group named key (for matlab) CAUTION: cannot be loaded in python\n if type(d[key]) in (list,tuple):\n g = h5.createGroup(grp,name, title=ext_name)\n ii = 0\n for dk in d[key]:\n if type(dk)<>dict:\n n = name + '____' + str(ii)\n tmpdk = {n : dk}\n dumpDict(tmpdk,h5,g,filters,matlab=True)\n else:\n g2 = h5.createGroup(g,name + '____' + str(ii), title=ext_name+ '__' + str(ii))\n dumpDict(dk,h5,g2,filters,matlab=True)\n ii+=1\n \n elif type(d[key]) in (str,):\n put_string(h5,grp,name,d[key],ext_name)\n else:\n g = h5.createGroup(grp,name, title='OBJECT: ' + ext_name)\n try:\n dumpDict(d[key].__dict__,h5,g,filters,matlab=True)\n except:\n pdb.set_trace()\n else:\n #print \"put_python Exception: %s\" % str(x)\n #print \"Pickling unsupported hdf5 type at key '%s' title '%s' for storage.\" % (name,ext_name)\n put_string(h5,grp,name,p.dumps(d[key]),ext_name+p.dumps('Pickled'))\n \n if bClose:\n h5.close()", "title": "" }, { "docid": "42eadc7ec49b271616c4a4c4afeefe9f", "score": "0.5870811", "text": "def inspect_h5py_file(self, f):\n print(f'Inspecting h5py file...')\n for k, v in f.items():\n print(f'\\tFound object {v.name} at key {k}')\n if isinstance(v, Dataset):\n print(f'\\t - Object type: dataset')\n print(f'\\t - Physical chunks shape: {v.chunks}')\n print(f'\\t - Compression: {v.compression}')\n print(f'\\t - Shape: {v.shape}')\n print(f'\\t - Size: {v.size}')\n print(f'\\t - Dtype: {v.dtype}')\n else:\n print(f'\\t - Object type: group')\n pass", "title": "" }, { "docid": "c400a5bc7fa1d9ab73a5aa8c310e117b", "score": "0.58678657", "text": "def write_csv(path_h5file: str, base_path_out: str) -> None:\n file = h5py.File(path_h5file, 'r')\n groups = file.keys()\n for group in groups:\n data = {key: file[group][key] for key in file[group].keys()}\n df = pd.DataFrame(data)\n path_out = os.path.join(base_path_out, group)\n df.to_csv(path_out + \".csv\", sep=',')", "title": "" }, { "docid": "e136ec49bd4de6dfb153840a48091941", "score": "0.58400154", "text": "def from_hdf(self, hdf):", "title": "" }, { "docid": "9cbc05f251c03704649b06fd4b629124", "score": "0.5815771", "text": "def save_mdh(filename):\n csv_filename = 'mdhs/{}_mdh.csv'.format(filename.split('/')[-1][:-4])\n twix = twixreader.read_twix(filename)\n for i in range(twix.num_meas):\n meas = twix.read_measurement(i)\n mdh_dim_order = meas.get_meas_buffer(0).mdh_dim_order\n mdh_all = meas._all_mdh\n print(\"[status]protocol name : {}\".format(Image(meas.hdr, 0).header['measurement']['protocol_name']))\n print(\"[status]mdhs number : {}\".format(len(mdh_all)))\n # write csv\n with open(csv_filename, 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames = mdh_dim_order)\n writer.writeheader()\n for i, mdh in enumerate(mdh_all):\n row = {}\n for k in mdh_dim_order:\n row[k] = mdh[k]\n writer.writerow(row)\n print(\"Done\")", "title": "" }, { "docid": "7d171f249124bd5a063a430368a0f216", "score": "0.5813618", "text": "def test_save_to_open_h5_file():\n\n data = np.random.randn(20,20)\n filename = 'temp_create2.h5'\n dset_name = '/Group1/Dset'\n \n pth = './temp_test'\n os.mkdir(pth)\n assert os.path.isdir(pth)\n\n fp = os.path.join(pth, filename)\n with h5py.File(fp, 'w') as fid:\n save(fid, dset_name, data, pth=pth, mode='w')\n\n assert os.path.isfile(fp)\n assert os.path.getsize(fp) >= data.nbytes\n\n os.remove(fp)\n os.rmdir(pth)", "title": "" }, { "docid": "140cba2955cd87e5ef0de559c462506a", "score": "0.5795861", "text": "def to_asdf(self):\n self_dict = dataclasses.asdict(self)\n del self_dict['override'] # Do not serialize the override transforms\n asdf_file = asdf.AsdfFile({'transforms': self_dict})\n return asdf_file", "title": "" }, { "docid": "4a5a610fa9480b0b444a55475227dd13", "score": "0.57773197", "text": "def saveElement(elem, output, h5group = \"/\"):\n import h5py\n h5f = h5py.File(output)\n grp = h5f.require_group(h5group)\n for fld in elem.fields():\n try:\n val = elem.get(fld, handle=\"setpoint\", unitsys=None)\n grp[fld + \".sp\"] = val\n except:\n pass\n try:\n val = elem.get(fld, handle=\"readback\", unitsys=None)\n grp[fld + \".rb\"] = val\n except:\n pass\n grp.attrs[\"name\"] = elem.name\n grp.attrs[\"family\"] = elem.family\n grp.attrs[\"cell\"] = elem.cell\n grp.attrs[\"girder\"] = elem.girder\n h5f.close()", "title": "" }, { "docid": "c2d77d6b080862c06f3189859cc2fbd9", "score": "0.5766412", "text": "def tofile(self, filename):\n with util.atomic_write(filename) as tmpname:\n with util.open_h5py(tmpname, \"w\") as fp:\n fp.attrs[\"type\"] = \"Empty\"", "title": "" }, { "docid": "24a12b387f8c01f7a88bb1555da8e888", "score": "0.57648134", "text": "def write_to_hdf(self):\n\n if self.kline_df is None or self.kline_df.empty:\n log.notice(\"Not writing to .h5 since no data was received from API\")\n return\n\n db.to_hdf(self.kline_df, self.symbol, self.interval)", "title": "" }, { "docid": "e1d745136f0e5d00cd0008541c153896", "score": "0.576038", "text": "def to_hdf5(self, group):\n group.attrs['type'] = np.string_('kalbach-mann')\n\n dset = group.create_dataset('energy', data=self.energy)\n dset.attrs['interpolation'] = np.vstack((self.breakpoints,\n self.interpolation))\n\n # Determine total number of (E,p,r,a) tuples and create array\n n_tuple = sum(len(d) for d in self.energy_out)\n distribution = np.empty((5, n_tuple))\n\n # Create array for offsets\n offsets = np.empty(len(self.energy_out), dtype=int)\n interpolation = np.empty(len(self.energy_out), dtype=int)\n n_discrete_lines = np.empty(len(self.energy_out), dtype=int)\n j = 0\n\n # Populate offsets and distribution array\n for i, (eout, km_r, km_a) in enumerate(zip(\n self.energy_out, self.precompound, self.slope)):\n n = len(eout)\n offsets[i] = j\n\n if isinstance(eout, Mixture):\n discrete, continuous = eout.distribution\n n_discrete_lines[i] = m = len(discrete)\n interpolation[i] = 1 if continuous.interpolation == 'histogram' else 2\n distribution[0, j:j+m] = discrete.x\n distribution[1, j:j+m] = discrete.p\n distribution[2, j:j+m] = discrete.c\n distribution[0, j+m:j+n] = continuous.x\n distribution[1, j+m:j+n] = continuous.p\n distribution[2, j+m:j+n] = continuous.c\n else:\n if isinstance(eout, Tabular):\n n_discrete_lines[i] = 0\n interpolation[i] = 1 if eout.interpolation == 'histogram' else 2\n elif isinstance(eout, Discrete):\n n_discrete_lines[i] = n\n interpolation[i] = 1\n distribution[0, j:j+n] = eout.x\n distribution[1, j:j+n] = eout.p\n distribution[2, j:j+n] = eout.c\n\n distribution[3, j:j+n] = km_r.y\n distribution[4, j:j+n] = km_a.y\n j += n\n\n # Create dataset for distributions\n dset = group.create_dataset('distribution', data=distribution)\n\n # Write interpolation as attribute\n dset.attrs['offsets'] = offsets\n dset.attrs['interpolation'] = interpolation\n dset.attrs['n_discrete_lines'] = n_discrete_lines", "title": "" }, { "docid": "cddee6809ef889563e3d28479ceecc91", "score": "0.57567674", "text": "def save_hdf(data,times,cnames,fs,base_file_name):\n \n (x,y)= data.shape\n #Store in hd5(pytables) format\n logger.info( \"Converting to pytables\")\n #signals = pd.HDFStore(base_file_name+'.hd5','w',complevel=9)\n signals = pd.HDFStore(base_file_name+'.hd5','w')\n #\n logger.debug( '\\tSaving timing info')\n signals['times'] = pd.Series(times,dtype='float64')\n #\n logger.debug( '\\tSaving data')\n signals['data']=pd.DataFrame(data.T,columns=cnames,index=times) #Ideally this would be tables=True\n # \n logger.debug( \"\\tSaving meta data\")\n signals['channels'] = pd.Series(cnames)\n signals['fs'] = pd.Series(fs)\n #signals['data_dimensions'] = pd.Series(['channels', 'samples'])\n signals.close()\n logger.info( 'Conversion complete')", "title": "" }, { "docid": "244590ca6772961828932e9c5bd60f39", "score": "0.57541543", "text": "def write_attributes(self, h5file_group):\n h5file_group.attrs.create(\"__type\", self.io_data.typename) # Record the type of the current class instance\n attributes = self.io_data.attributes\n for attr_name, attr_value in attributes.items():\n if isinstance(attr_value, dict): # h5py does not serialize dicts automatically, so have to do it manually\n group_name = \"__dicts/\" + attr_name\n h5file_group.create_group(group_name)\n io.write(attr_value, self.filename, file_handle=h5file_group[group_name])\n elif isinstance(attr_value, (list, tuple)):\n group_name = \"__lists/\" + attr_name\n h5file_group.create_group(group_name)\n io.write(attr_value, self.filename, file_handle=h5file_group[group_name])\n else:\n h5file_group.attrs[attr_name] = attr_value", "title": "" }, { "docid": "c55e2726f29ded68c5738b83d88f7077", "score": "0.5744065", "text": "def _h5_save_structure(self, h5_field: h5py.Group):\n h5_mesh = h5_field.create_group(\"mesh\")\n self.mesh._h5_save(h5_mesh)\n\n h5_field.attrs[\"nvdim\"] = self.nvdim\n h5_field.attrs[\"vdims\"] = self.vdims if self.vdims is not None else \"None\"\n h5_field.attrs[\"unit\"] = str(self.unit)", "title": "" }, { "docid": "37b52ad3add9e013f1993acabed66968", "score": "0.5733749", "text": "def _create_file(cls, name):\n\n srand = cls.srand\n\n # create hdf5 file\n cls.temp_file = tempfile.NamedTemporaryFile(suffix=\".hdf5\", prefix=name, delete=False)\n cls.temp_file.close()\n hfile = h5py.File(cls.temp_file.name, 'w')\n\n # create nested groups\n groupnames_prefix = [chr(65+i)for i in range(cls.n_groups)] # e.g. ['A', 'B', 'C']\n group_list = [hfile] # list containing all groups\n\n def _create_groups(obj, d):\n nonlocal group_list\n\n for c in groupnames_prefix:\n g_name = c + str(cls.depth - d)\n g = obj.create_group(g_name)\n group_list.append(g)\n if d > 0:\n _create_groups(obj[g_name], d-1)\n\n _create_groups(hfile, cls.depth)\n\n # create softlinks to groups\n for g in group_list:\n for i in range(cls.n_groupsoftlink):\n # do not use rand_rng.choice\n target_str = srand.choice(group_list).name\n g[f\"SoftLg{i}\"] = h5py.SoftLink(target_str)\n\n # create datasets\n # TO DO, external dsets\n # TO DO, compression\n srand.shuffle(cls.dset_dtypes)\n iter_dtypes = itertools.cycle(cls.dset_dtypes) # shuffle dtypes to cycle over when creating dsets\n iter_chunks = itertools.cycle([True, None]) # True or False cycle for auto chunking\n iter_track_times = itertools.cycle([False, True]) # True or False cycle for track_times\n iter_track_order = itertools.cycle([False, False, True, True]) # True or False cycle for track_order\n iter_fillvalue = itertools.cycle([None, True, True, None]) # True or False cycle for track_order\n rand_rng = np.random.default_rng()\n dset_list = []\n for g in group_list:\n # TO DO, add test with datasets with zero in dimensions\n for i in range(cls.n_dsets):\n shape = srand.choices(range(1, 90//(i or 1)), k=i) # dseti has i dimensions\n size = np.prod(shape)\n dtype = next(iter_dtypes)\n if dtype == np.bool_:\n data = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n data = np.datetime64('1970-01-01T00:00:00', 'ns') + np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n dtype = h5py.opaque_dtype(data.dtype)\n data = data.astype(dtype)\n else:\n data = np.frombuffer(rand_rng.bytes(size*np.dtype(dtype).itemsize), dtype=dtype)\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n fillvalue = None if (next(iter_fillvalue) is None or\n data.dtype.char == 'M') else data.reshape(size)[rand_rng.integers(0, size)]\n\n dset = g.create_dataset(\n name='dset'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create softlinks to datasets\n for g in group_list:\n for i in range(cls.n_dsetsoftlink):\n # do not use rand_rng.choice\n target_str = srand.choice(dset_list).name\n g[f\"SoftLd{i}\"] = h5py.SoftLink(target_str)\n\n # add attributes\n srand.shuffle(cls.dset_dtypes)\n iter_dtypes = itertools.cycle(cls.dset_dtypes) # shuffle dtypes to cycle over when creating attributes\n for obj in itertools.chain(group_list, dset_list):\n for i in range(rand_rng.integers(cls.n_attributes_min, 26, endpoint=True)):\n dtype = next(iter_dtypes)\n attr_name = chr(97+i)\n if dtype == np.bool_:\n attr = np.frombuffer(rand_rng.bytes(8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n continue\n else:\n attr = np.frombuffer(rand_rng.bytes(np.dtype(dtype).itemsize), dtype=dtype)\n obj.attrs[attr_name] = attr[0]\n\n # add array attributes\n for i in range(rand_rng.integers(cls.n_attributes_min, 26, endpoint=True)):\n shape = srand.choices(range(1, 10//(i//5 or 1)), k=i//5) # attributes has i//5 dimensions\n size = np.prod(shape)\n dtype = next(iter_dtypes)\n attr_name = chr(65+i) + '_array_attr'\n if dtype == np.bool_:\n attr = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n attr = np.datetime64('1970-01-01T00:00:00', 'ns') + np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n attr = attr.astype(h5py.opaque_dtype(attr.dtype))\n else:\n attr = np.frombuffer(rand_rng.bytes(size*np.dtype(dtype).itemsize), dtype=dtype)\n obj.attrs[attr_name] = attr\n\n return hfile", "title": "" }, { "docid": "1dded455ecef2708827c2cc0fa0f736e", "score": "0.5733749", "text": "def _create_file(cls, name):\n\n srand = cls.srand\n\n # create hdf5 file\n cls.temp_file = tempfile.NamedTemporaryFile(suffix=\".hdf5\", prefix=name, delete=False)\n cls.temp_file.close()\n hfile = h5py.File(cls.temp_file.name, 'w')\n\n # create nested groups\n groupnames_prefix = [chr(65+i)for i in range(cls.n_groups)] # e.g. ['A', 'B', 'C']\n group_list = [hfile] # list containing all groups\n\n def _create_groups(obj, d):\n nonlocal group_list\n\n for c in groupnames_prefix:\n g_name = c + str(cls.depth - d)\n g = obj.create_group(g_name)\n group_list.append(g)\n if d > 0:\n _create_groups(obj[g_name], d-1)\n\n _create_groups(hfile, cls.depth)\n\n # create softlinks to groups\n for g in group_list:\n for i in range(cls.n_groupsoftlink):\n # do not use rand_rng.choice\n target_str = srand.choice(group_list).name\n g[f\"SoftLg{i}\"] = h5py.SoftLink(target_str)\n\n # create datasets\n # TO DO, external dsets\n # TO DO, compression\n srand.shuffle(cls.dset_dtypes)\n iter_dtypes = itertools.cycle(cls.dset_dtypes) # shuffle dtypes to cycle over when creating dsets\n iter_chunks = itertools.cycle([True, None]) # True or False cycle for auto chunking\n iter_track_times = itertools.cycle([False, True]) # True or False cycle for track_times\n iter_track_order = itertools.cycle([False, False, True, True]) # True or False cycle for track_order\n iter_fillvalue = itertools.cycle([None, True, True, None]) # True or False cycle for track_order\n rand_rng = np.random.default_rng()\n dset_list = []\n for g in group_list:\n # TO DO, add test with datasets with zero in dimensions\n for i in range(cls.n_dsets):\n shape = srand.choices(range(1, 90//(i or 1)), k=i) # dseti has i dimensions\n size = np.prod(shape)\n dtype = next(iter_dtypes)\n if dtype == np.bool_:\n data = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n data = np.datetime64('1970-01-01T00:00:00', 'ns') + np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n dtype = h5py.opaque_dtype(data.dtype)\n data = data.astype(dtype)\n else:\n data = np.frombuffer(rand_rng.bytes(size*np.dtype(dtype).itemsize), dtype=dtype)\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n fillvalue = None if (next(iter_fillvalue) is None or\n data.dtype.char == 'M') else data.reshape(size)[rand_rng.integers(0, size)]\n\n dset = g.create_dataset(\n name='dset'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create variable length string datasets\n for g in group_list:\n for i in range(cls.n_vlenstringdset):\n k = i + srand.randint(0, cls.n_dsetmaxdim)\n shape = srand.choices(range(1, int(60**(1/(k or 1)))+1), k=k) # dseti has k dimensions\n size = int(np.prod(shape))\n dtype = h5py.string_dtype(encoding='utf-8')\n str_len = np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64) % cls.n_vlenstringdsetmaxlen\n data = np.array([''.join([chr(rand_rng.integers(0x0020, 0x03ff)) for _ in range(i)])\n for i in str_len], dtype=dtype)\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n fillvalue = None\n\n dset = g.create_dataset(\n name='dsetvlenstring'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create struct array datasets\n for g in group_list:\n # TO DO, add test with datasets with zero in dimensions\n for i in range(cls.n_structarrayregulardset):\n k = i + srand.randint(0, cls.n_dsetmaxdim)\n shape = srand.choices(range(1, 90//(k or 1)), k=k) # dseti has k dimensions\n size = int(np.prod(shape))\n dtype = [(chr(97+j), next(iter_dtypes)) for j in range(cls.n_structarraydtypelen)]\n data = np.empty(shape=shape, dtype=dtype)\n for j in range(len(dtype)):\n dt_name, dt = dtype[j]\n if dt == np.bool_:\n data_ = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dt == np.datetime64:\n data_ = np.datetime64('1970-01-01T00:00:00', 'ns')+np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n dtype[j] = (dt_name, h5py.opaque_dtype(data_.dtype))\n data_ = data_.astype(dtype[j][1])\n else:\n data_ = np.frombuffer(rand_rng.bytes(size*np.dtype(dt).itemsize), dtype=dt)\n\n data[dt_name] = data_.reshape(shape)\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n fillvalue = None if (next(iter_fillvalue) is None or\n data.dtype.char == 'M') else data.reshape(size)[rand_rng.integers(0, size)]\n\n dset = g.create_dataset(\n name='dsetstructarray'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create object reference datasets\n for g in group_list:\n for i in range(cls.n_objectrefdset):\n k = i + srand.randint(0, cls.n_objectrefdsetmaxdim)\n shape = srand.choices(range(1, int(60**(1/(k or 1)))+1), k=k) # dseti has k dimensions\n size = int(np.prod(shape))\n dtype = h5py.ref_dtype\n\n obj_list = dset_list + group_list\n data = np.array([srand.choice(obj_list).ref for _ in range(size)])\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n\n fillvalue = None\n\n dset = g.create_dataset(\n name='dsetobjref'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create struct array datasets with object reference\n for g in group_list:\n # TO DO, add test with datasets with zero in dimensions\n for i in range(cls.n_structarraywithobjrefdset):\n k = i + srand.randint(0, cls.n_objectrefdsetmaxdim)\n shape = srand.choices(range(1, int(60**(1/(k or 1)))+1), k=k) # dseti has i dimensions\n size = int(np.prod(shape))\n dtypeobjind = rand_rng.choice(range(cls.n_structarraydtypelen), size=cls.n_structarrayobjrefdtype, replace=False)\n dtype = [(chr(97+j), h5py.ref_dtype if j in dtypeobjind else next(iter_dtypes))\n for j in range(cls.n_structarraydtypelen)]\n\n data = np.empty(shape=shape, dtype=dtype)\n for j in range(len(dtype)):\n dt_name, dt = dtype[j]\n if dt == h5py.ref_dtype:\n obj_list = dset_list + group_list\n data_ = np.array([srand.choice(obj_list).ref for _ in range(size)])\n elif dt == np.bool_:\n data_ = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dt == np.datetime64:\n data_ = np.datetime64('1970-01-01T00:00:00', 'ns')+np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n dtype[j] = (dt_name, h5py.opaque_dtype(data_.dtype))\n data_ = data_.astype(dtype[j][1])\n else:\n data_ = np.frombuffer(rand_rng.bytes(size*np.dtype(dt).itemsize), dtype=dt)\n\n data[dt_name] = data_.reshape(shape)\n\n # create_dataset options comptability\n if len(shape) > 0:\n chunks = next(iter_chunks)\n else:\n chunks = None\n # compression = None\n # compression_opts = None\n # shuffle = None\n # fletcher32 = None\n # scaleoffset = None\n fillvalue = None if (next(iter_fillvalue) is None or\n data.dtype.char == 'M') else data.reshape(size)[rand_rng.integers(0, size)]\n\n dset = g.create_dataset(\n name='dsetstructarraywobjref'+str(i),\n shape=shape,\n data=data,\n dtype=dtype,\n chunks=chunks,\n maxshape=None if chunks is None else tuple(\n (np.array(shape) + rand_rng.integers(0, 5))*rand_rng.integers(1, 5, size=len(shape))),\n track_times=next(iter_track_times),\n track_order=next(iter_track_order),\n fillvalue=fillvalue\n )\n\n dset_list.append(dset)\n\n # create softlinks to datasets\n for g in group_list:\n for i in range(cls.n_dsetsoftlink):\n # do not use rand_rng.choice\n target_str = srand.choice(dset_list).name\n g[f\"SoftLd{i}\"] = h5py.SoftLink(target_str)\n\n # add attributes\n srand.shuffle(cls.dset_dtypes)\n iter_dtypes = itertools.cycle(cls.dset_dtypes) # shuffle dtypes to cycle over when creating attributes\n for obj in itertools.chain(group_list, dset_list):\n for i in range(rand_rng.integers(cls.n_attributes_min, 26, endpoint=True)):\n dtype = next(iter_dtypes)\n attr_name = chr(97+i)\n if dtype == np.bool_:\n attr = np.frombuffer(rand_rng.bytes(8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n continue\n else:\n attr = np.frombuffer(rand_rng.bytes(np.dtype(dtype).itemsize), dtype=dtype)\n obj.attrs[attr_name] = attr[0]\n\n # add array attributes\n for i in range(rand_rng.integers(cls.n_attributes_min, 26, endpoint=True)):\n shape = srand.choices(range(1, 10//(i//5 or 1)), k=i//5) # attributes has i//5 dimensions\n size = np.prod(shape)\n dtype = next(iter_dtypes)\n attr_name = chr(65+i) + '_array_attr'\n if dtype == np.bool_:\n attr = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0\n elif dtype == np.datetime64:\n attr = np.datetime64('1970-01-01T00:00:00', 'ns') + np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)\n attr = attr.astype(h5py.opaque_dtype(attr.dtype))\n else:\n attr = np.frombuffer(rand_rng.bytes(size*np.dtype(dtype).itemsize), dtype=dtype)\n obj.attrs[attr_name] = attr\n\n return hfile", "title": "" }, { "docid": "9ce6768dd96d004bbeaa47c8c3364f95", "score": "0.57286024", "text": "def h5fields2d(folder, h5path=None, fld_ids = ['Ex','Ey','Ez','Bx','By','Bz'], pool = None):\n if not h5path:\n h5path = os.path.join(folder, 'fields2d.hdf5')\n fns = ls.listp4(folder)\n nfiles = len(fns)\n print('Total number of files: ' + str(len(fns)))\n\n print(\"Opening the HDF5 file\")\n with h5py.File(h5path,'w') as f:\n # Read all the files into RAM\n print(\"Reading files into NumPy arrays in RAM\")\n data = fields2d(fns, fld_ids=fld_ids, pool=pool)\n # Build the HDF5 file, assuming every element in \"data\" is a NumPy array\n print(\"Saving arrays in RAM to HDF5\")\n for k in data:\n f.create_dataset(k, data = data[k], compression='gzip', compression_opts=4)\n print(\"All done!\")\n return h5path", "title": "" }, { "docid": "7ffe75a9b57de50d035ea275566d5820", "score": "0.57218087", "text": "def export(self):\n pass", "title": "" }, { "docid": "e7a2d2837377e5fffa5435621c5e2a73", "score": "0.56739837", "text": "def __dumpToHDF5(self, file_name: str = None, mode: str ='a') -> None:\n if file_name is None:\n file_name = os.path.join(self.file_path, self.file_name)\n else:\n file_name = os.path.join(self.file_path, file_name)\n\n with h5py.File(file_name, \"w\") as f:\n for attr, value in self.__dict__.items():\n if isinstance(value, str): # Skip file_name and file_path attrib\n continue\n # Check if data-set is already in h5file\n if f.__contains__(attr):\n if mode == 'a': # Append mode\n data0 = list(f[attr])\n del f[attr]\n value = data0 + value\n else: # Write\n del f[attr]\n\n f.create_dataset(attr, data=np.array(value))", "title": "" }, { "docid": "62038e7a99f95b84fc5a730490c6c2b5", "score": "0.5671383", "text": "def export(self):\n\n #data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '/data/Similarity/'\n data_path = slash.join(vand_folder_path + ['sim_matrix.txt'])\n with open(data_path, 'w') as file:\n json.dump(self.similarity_dict, file)", "title": "" }, { "docid": "0856828480b43b95c3ccc9aa99e30a8d", "score": "0.5667656", "text": "def export_df( df, path, name = 'df' ):\r\n path = os.path.join( path, name )\r\n df.to_csv( path + '.csv' )\r\n df.to_pickle( path + '.pkl' )", "title": "" }, { "docid": "8dfa2652b073819e8f0a21b3a1a5784b", "score": "0.5666998", "text": "def pandas2hdf(df, out_file, key=\"data\", format=\"fixed\"):\n pandas2file(partial(df.to_hdf, key=key, format=format, mode='w'), out_file)", "title": "" }, { "docid": "40b54caef6505729ad867b4a221eefc8", "score": "0.5665254", "text": "def toHDF(self, directory: str, name: str, overwrite: bool = False, compression: str = 'gzip'):\n super().toHDF(directory, name, overwrite=overwrite, compression=compression)", "title": "" }, { "docid": "6bdcb027f7765371292fa16fadf1a50a", "score": "0.56621575", "text": "def build_hdf5_store(self, filename='mgxs.h5', directory='mgxs',\n subdomains='all', nuclides='all', xs_type='macro'):\n\n if self.sp_filename is None:\n msg = 'Unable to export multi-group cross section library ' \\\n 'since a statepoint has not yet been loaded'\n raise ValueError(msg)\n\n cv.check_type('filename', filename, basestring)\n cv.check_type('directory', directory, basestring)\n\n import h5py\n\n # Make directory if it does not exist\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Add an attribute for the number of energy groups to the HDF5 file\n full_filename = os.path.join(directory, filename)\n full_filename = full_filename.replace(' ', '-')\n f = h5py.File(full_filename, 'w')\n f.attrs[\"# groups\"] = self.num_groups\n f.close()\n\n # Export MGXS for each domain and mgxs type to an HDF5 file\n for domain in self.domains:\n for mgxs_type in self.mgxs_types:\n mgxs = self.all_mgxs[domain.id][mgxs_type]\n\n if subdomains == 'avg':\n mgxs = mgxs.get_subdomain_avg_xs()\n\n mgxs.build_hdf5_store(filename, directory,\n xs_type=xs_type, nuclides=nuclides)", "title": "" }, { "docid": "27b41d2cd4a2e83e81d3403f839c51c5", "score": "0.5661093", "text": "def saveH5pyData(data_mappings, target_file_path, chunk_size):\n gen = generatorForH5py(data_mappings,chunk_size)\n \n lidar_data_chunk, distance_chunk, labels_chunk = next(gen)\n row_count = lidar_data_chunk.shape[0]\n\n checkAndCreateDir(target_file_path)\n with h5py.File(target_file_path, 'w') as f:\n\n # Initialize a resizable dataset to hold the output\n lidar_data_chunk_maxshape = (None,) + lidar_data_chunk.shape[1:]\n distance_chunk_maxshape = (None,) + distance_chunk.shape[1:]\n labels_chunk_maxshape = (None,) + labels_chunk.shape[1:]\n\n dset_lidar_data = f.create_dataset('lidar_data', shape=lidar_data_chunk.shape, maxshape=lidar_data_chunk_maxshape,\n chunks=lidar_data_chunk.shape, dtype=lidar_data_chunk.dtype)\n\n dset_distances = f.create_dataset('distance', shape=distance_chunk.shape, maxshape=distance_chunk_maxshape,\n chunks=distance_chunk.shape, dtype=distance_chunk.dtype)\n \n dset_labels = f.create_dataset('label', shape=labels_chunk.shape, maxshape=labels_chunk_maxshape,\n chunks=labels_chunk.shape, dtype=labels_chunk.dtype)\n \n dset_lidar_data[:] = lidar_data_chunk\n dset_distances[:] = distance_chunk\n dset_labels[:] = labels_chunk\n\n for lidar_data_chunk, distance_chunk, labels_chunk in gen:\n \n # Resize the dataset to accommodate the next chunk of rows\n dset_lidar_data.resize(row_count + lidar_data_chunk.shape[0], axis=0)\n dset_distances.resize(row_count + distance_chunk.shape[0], axis=0)\n dset_labels.resize(row_count + labels_chunk.shape[0], axis=0)\n # Create the next chunk\n dset_lidar_data[row_count:] = lidar_data_chunk\n dset_distances[row_count:] = distance_chunk\n dset_labels[row_count:] = labels_chunk\n\n # Increment the row count\n row_count += lidar_data_chunk.shape[0]", "title": "" }, { "docid": "799a0daf08464a2dab8fb012a348bc2b", "score": "0.56556773", "text": "def dataset_to_hdf(self, flname='default', key='default', format='table', verbose=True):\n if flname == 'default':\n flname = self._name+'_%s-%s'%(self.years[0], self.years[-1])+'_SITC-L%s'%(self.level)+'_dataset.h5'\n if key == 'default':\n key = self._name\n if verbose: print \"[INFO] Saving dataset to: %s\" % flname\n self.dataset.to_hdf(flname, key='dataset', mode='w', complevel=9, complib='zlib', format=format)\n gc.collect()", "title": "" }, { "docid": "97299529b0a89162502d93d100bc1280", "score": "0.56482804", "text": "def to_hdf(self, hdf, force_update=False):\n\n if len(self._parse_dict[\"scf_energy_zero\"]) == 0:\n self._parse_dict[\"scf_energy_zero\"] = [\n (0.5 * (np.array(fr) + np.array(en))).tolist()\n for fr, en in zip(\n self._parse_dict[\"scf_energy_free\"],\n self._parse_dict[\"scf_energy_int\"],\n )\n ]\n with hdf.open(\"input\") as hdf5_input:\n with hdf5_input.open(\"generic\") as hdf5_generic:\n if \"dft\" not in hdf5_generic.list_groups():\n hdf5_generic.create_group(\"dft\")\n with hdf5_generic.open(\"dft\") as hdf5_dft:\n if (\n len(self._parse_dict[\"atom_spin_constrains\"]) > 0\n and \"atom_spin_constraints\" not in hdf5_dft.list_nodes()\n ):\n hdf5_dft[\"atom_spin_constraints\"] = [\n self._parse_dict[\"atom_spin_constrains\"]\n ]\n\n with hdf.open(\"output\") as hdf5_output:\n if \"sphinx\" not in hdf5_output.list_groups():\n hdf5_output.create_group(\"sphinx\")\n with hdf5_output.open(\"sphinx\") as hdf5_sphinx:\n hdf5_sphinx[\"bands_occ_initial\"] = self._parse_dict[\"bands_occ_initial\"]\n hdf5_sphinx[\"bands_eigen_values_initial\"] = self._parse_dict[\n \"bands_eigen_values_initial\"\n ]\n with hdf5_output.open(\"generic\") as hdf5_generic:\n if \"dft\" not in hdf5_generic.list_groups():\n hdf5_generic.create_group(\"dft\")\n with hdf5_generic.open(\"dft\") as hdf5_dft:\n hdf5_dft[\"scf_convergence\"] = self._parse_dict[\"scf_convergence\"]\n for k in [\n \"scf_residue\",\n \"scf_energy_free\",\n \"scf_energy_zero\",\n \"scf_energy_int\",\n \"scf_electronic_entropy\",\n \"scf_energy_band\",\n \"scf_magnetic_forces\",\n \"scf_computation_time\",\n \"bands_occ\",\n \"bands_e_fermi\",\n \"bands_k_weights\",\n \"bands_eigen_values\",\n \"atom_scf_spins\",\n \"n_valence\",\n ]:\n if len(self._parse_dict[k]) > 0:\n hdf5_dft[k] = self._parse_dict[k]\n if \"scf_\" in k:\n hdf5_dft[k.replace(\"scf_\", \"\")] = np.array(\n [vv[-1] for vv in self._parse_dict[k]]\n )\n if len(self._parse_dict[\"scf_computation_time\"]) > 0:\n hdf5_generic[\"computation_time\"] = np.array(\n [tt[-1] for tt in self._parse_dict[\"scf_computation_time\"]]\n )\n if len([en[-1] for en in self._parse_dict[\"scf_energy_free\"]]) > 0:\n hdf5_generic[\"energy_tot\"] = np.array(\n [en[-1] for en in self._parse_dict[\"scf_energy_free\"]]\n )\n hdf5_generic[\"energy_pot\"] = np.array(\n [en[-1] for en in self._parse_dict[\"scf_energy_free\"]]\n )\n hdf5_generic[\"volume\"] = self._parse_dict[\"volume\"]\n if \"positions\" not in hdf5_generic.list_nodes() or force_update:\n if len(self._parse_dict[\"positions\"]) > 0:\n hdf5_generic[\"positions\"] = np.array(\n self._parse_dict[\"positions\"]\n )\n elif len(self._parse_dict[\"scf_convergence\"]) == 1:\n hdf5_generic[\"positions\"] = np.array(\n [self._job.structure.positions]\n )\n if (\"forces\" not in hdf5_generic.list_nodes() or force_update) and len(\n self._parse_dict[\"forces\"]\n ) > 0:\n hdf5_generic[\"forces\"] = np.array(self._parse_dict[\"forces\"])\n if \"cells\" not in hdf5_generic.list_nodes() or force_update:\n if len(self._parse_dict[\"cell\"]) > 0:\n hdf5_generic[\"cells\"] = np.array(self._parse_dict[\"cell\"])\n elif len(self._parse_dict[\"scf_convergence\"]) == 1:\n hdf5_generic[\"cells\"] = np.array([self._job.structure.cell])", "title": "" }, { "docid": "d89bc216fc8d3fe034cfa71ce3d86eb1", "score": "0.56447655", "text": "def export(self, filename, **attrs):", "title": "" }, { "docid": "ee0481fc50ba1dbfe2f8fd71bd91ba49", "score": "0.5642443", "text": "def save_model_to_h5(model_obj, model_file_path):\n if not model_file_path.endswith('.h5'):\n model_file_path = f'{model_file_path}.h5'\n model_obj.save(model_file_path)\n logging.info(f'Model saved to {model_file_path}')", "title": "" }, { "docid": "1bab22a2f6c7babc6572816d204269aa", "score": "0.56364113", "text": "def raw_export_data(self):\n filename = 'data_export_%s.csv' % self.name\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n base_header = ColumnData.generate_header_row()\n header = []\n # Generate the Header Row\n for j in self.joints.keys():\n for h in base_header:\n header.append('%s %s' % (j, h))\n csv_writer.writerow(['framenumber'] + header)\n # Write the Calibration zeros to the file\n output_list = ['calibration']\n # The Header also contains the information to export in each column\n for v in header:\n tmp = v.split(' ')\n if len(tmp) == 2:\n # Header is <joint> angle\n output_list.append(self.calibration_zeros[tmp[0]].angle)\n else:\n # Header is <joint> <plane> <data type>\n output_list.append(self.calibration_zeros[tmp[0]].data[tmp[2]][tmp[1]])\n\n csv_writer.writerow(output_list)\n for i in range(1, self.frame_count + 1):\n output_list = [i]\n for v in header:\n tmp = v.split(' ')\n if len(tmp) == 2:\n # Header is <joint> angle\n output_list.append(self.joints[tmp[0]].joint_data[i].angle)\n else:\n # Header is <joint> <plane> <data type>\n output_list.append(self.joints[tmp[0]].joint_data[i].data[tmp[2]][tmp[1]])\n\n csv_writer.writerow(output_list)", "title": "" }, { "docid": "6d55252bc78acbc41f34cb3a454d37f8", "score": "0.5626459", "text": "def write_hdf5(self, file_name = None, mode = 'a'):\n\n if not file_name: file_name = self.model.h5_file_name()\n if self.uuid is None: self.uuid = bu.new_uuid()\n # NB: patch arrays must all have been set up prior to calling this function\n h5_reg = rwh5.H5Register(self.model)\n for patch_index in range(self.patch_count):\n h5_reg.register_dataset(self.uuid, 'points_{}'.format(patch_index), self.patch_array_list[patch_index])\n h5_reg.write(file_name, mode = mode)", "title": "" }, { "docid": "fd7833311a1f133dbcc3e1d180b5699e", "score": "0.5623412", "text": "def generateData(self, dmd):", "title": "" }, { "docid": "ed5f284e37af2c4ebc87cb926b1a9d3d", "score": "0.5622219", "text": "def saveReconstructedH5(savePath, result, metadata=None, xAxis='X'):\n if metadata is None:\n metadata = []\n makeSaveDirs(os.path.dirname(savePath))\n with h5py.File(savePath, 'w') as h5Out:\n dsetResult = h5Out.create_dataset('entry_0000/data', result.shape, dtype='f')\n dsetResult[...] = result\n dsetMetadata = h5Out.create_dataset('entry_0000/%s' % xAxis, [len(metadata)], dtype='f')\n dsetMetadata[...] = metadata\n print('[INFO] %s saved!' % savePath)", "title": "" }, { "docid": "f29aa8ea2fdb0bf1cac011190a6a4850", "score": "0.5616931", "text": "def get_model_to_export(self, model, dataset):", "title": "" }, { "docid": "14f1ff6cbdb8bf76d8480bdd04bd3936", "score": "0.56163716", "text": "def to_file(self, filename):\n mdf_file = self.export()\n with open(filename, 'w') as f:\n f.writelines(mdf_file)", "title": "" }, { "docid": "4cae60f7fe71896c7e1e808d5430c9fe", "score": "0.55939394", "text": "def dump(self, model_name, **kwargs):\n num_add = 1\n file_saved = self.filename\n lock = Lock()\n lock.acquire()\n \n if not self.overwrite:\n while os.path.exists(file_saved):\n file_saved = self._modify_filename(self.filename, model_name, num_add)\n num_add += 1\n print('HDF5 FILE NAME %s' %(file_saved))\n \n with h5py.File(self.filename, 'w') as h5f:\n for key, item in kwargs.items():\n try:\n h5f.create_dataset(key, data=np.asarray(item))\n except TypeError:\n dt = h5py.special_dtype(vlen=bytes)\n h5f.create_dataset(key, data=item, dtype=dt)\n lock.release()\n return", "title": "" }, { "docid": "d5d03a747d48469589f5792b6b96e275", "score": "0.55919236", "text": "def writeXdmf(dims,dx,filename,h5_file):\n\n f = open(\"./parafiles/\" + filename,'w')\n f.write('<?xml version=\"1.0\" ?>\\n')\n f.write('<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\\n')\n f.write('<Xdmf xmlns:xi=\"http://www.w3.org/2003/XInclude\" Version=\"2.1\">\\n')\n f.write('<Domain>\\n')\n\n f.write('<Grid Name=\"my_Grid\" GridType=\"Uniform\">\\n')\n f.write('<Topology TopologyType=\"3DCoRectMesh\" Dimensions=\"%i %i %i\">\\n'%(dims[0],dims[1],dims[2]))\n f.write('</Topology>\\n')\n\n f.write('<Geometry GeometryType=\"Origin_DxDyDz\">\\n')\n f.write('<DataItem Dimensions=\"3\" NumberType=\"Integer\" Format=\"XML\">\\n')\n f.write('0 0 0\\n') \n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"3\" NumberType=\"Integer\" Format=\"XML\">\\n')\n f.write('%g %g %g\\n'%(dx,dx,dx))\n f.write('</DataItem>\\n')\n f.write('</Geometry>\\n')\n\n f.write('<Attribute Name=\"state\" AttributeType=\"Scalar\" Center=\"Node\">\\n')\n f.write('<DataItem Dimensions=\"%i %i %i\" NumberType=\"Integer\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n f.write('%s:/state_group/state\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('</Attribute>\\n')\n\n #f.write('<Attribute Name=\"rank\" AttributeType=\"Scalar\" Center=\"Node\">\\n')\n #f.write('<DataItem Dimensions=\"%i %i %i\" NumberType=\"Integer\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('%s:/rank_group/rank\\n'%h5_file)\n #f.write('</DataItem>\\n')\n #f.write('</Attribute>\\n')\n\n f.write('</Grid>\\n')\n f.write('</Domain>\\n')\n f.write('</Xdmf>\\n')\n\n f.close()", "title": "" }, { "docid": "f00523196ede0bea08556f24c0638cf6", "score": "0.5574306", "text": "def export(self):\n self.create_data()\n if self.median_line:\n self.create_median()\n if self.figs:\n self.data += self.figs\n return self.data", "title": "" }, { "docid": "0d6bf3602d6996987d0349603d7ccd2e", "score": "0.55690783", "text": "def build_hdf5_database(self):\n # TODO: Make sure we've processed a configuration\n self.parse_nodes()\n self.parse_relationships()", "title": "" }, { "docid": "05439144702fc7658400a9932b4c8fbe", "score": "0.5563964", "text": "def _write_hdf_dataset(self, hdf_path, key: str = \"emulsion\"):\n if self:\n # emulsion contains at least one droplet\n dataset = hdf_path.create_dataset(key, data=self.data)\n # self.data ensures that there is only one droplet class\n dataset.attrs[\"droplet_class\"] = self[0].__class__.__name__\n\n else:\n # create empty dataset to indicate empty emulsion\n dataset = hdf_path.create_dataset(key, shape=tuple())\n dataset.attrs[\"droplet_class\"] = \"None\"\n\n return dataset", "title": "" }, { "docid": "3fef05124bed2a1595210229adafb840", "score": "0.5562115", "text": "def export(self, *args, **kwargs):", "title": "" }, { "docid": "3fef05124bed2a1595210229adafb840", "score": "0.5562115", "text": "def export(self, *args, **kwargs):", "title": "" } ]
22ee0c05930fea1d2164d13242a82399
GIVEN a source path to images files, extensions and a valid camera model WHEN the method is called THEN return a list of images files names that has such model in the exif metadata
[ { "docid": "4f012ed8064f0314e4995ff96e56b3aa", "score": "0.705926", "text": "def test_fetch_exif_images_by_model():\n files = c.fetch_images_by_camera_model(\"/home/urra/projects/pto/tests/data/dated_images/\", [\".jpg\", \".JPG\"],\"iPhone 4\")\n for file in files:\n assert c.is_exif_model(file,\"iPhone 4\") == True", "title": "" } ]
[ { "docid": "69e1f335d6291ec8d45ac999c6f8da78", "score": "0.74459547", "text": "def test_fetch_exif_models():\n cameras = []\n files = c.read_src_files(\"/home/urra/projects/pto/tests/data/dated_images/\")\n for file in files:\n model = c.get_exif_attribute(file,\"model\")\n if not model in cameras:\n cameras.append(model)\n print(cameras)\n return", "title": "" }, { "docid": "78dd3997306f9089dbc2e535d2ce3684", "score": "0.6277022", "text": "def get_imlist(path):\n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpeg')]", "title": "" }, { "docid": "fb9eca0a2502dde808c8e59b3197e766", "score": "0.61666477", "text": "def extract_multiple_exif(paths: List[Union[Path, str]]) -> list:\n with ExifTool() as exiftool:\n return exiftool.get_metadata_batch([str(path) for path in paths])", "title": "" }, { "docid": "eead8c3233092dbebda866302c37b423", "score": "0.616533", "text": "def _extract_filenames(self, pathname):\n if not os.path.exists(pathname):\n raise IOError('invalid path : %s' % pathname)\n elif not os.path.isfile(pathname):\n raise IOError('%s is not a file' % pathname)\n lines = open(pathname, 'r').readlines()\n return [line.split('\\n')[0] + '.jpg' for line in lines]", "title": "" }, { "docid": "b10a9dceae4b27b814d66697b68fdd57", "score": "0.6157443", "text": "def get_image_filenames(imagepath):\n image_files = []\n valid_extensions = ['jpg','jpeg','png']\n for fn in os.listdir(imagepath):\n if fn[0]=='.':\n # skip Mac thumbs\n continue\n parts = fn.split('.')\n for ext in valid_extensions:\n if parts[-1].lower() == ext:\n image_files.append(fn)\n return image_files", "title": "" }, { "docid": "61c34f35e49f50e347e114434524f943", "score": "0.5997755", "text": "def get_names_and_extensions(image_dir):\n\tfor root, _, files in os.walk(image_dir, topdown = True):\n\t\treturn [os.path.splitext(x) for x in files]", "title": "" }, { "docid": "421e9c849432518c801672a94c8fd08b", "score": "0.598656", "text": "def _get_model_filenames(model_dir):\n\n files = os.listdir(model_dir)\n meta_files = [s for s in files if s.endswith('.meta')]\n if len(meta_files) == 0:\n raise ValueError('No meta file found in the model directory (%s)' % model_dir)\n elif len(meta_files) > 1:\n raise ValueError('There should not be more than one meta file in\\\n the model directory (%s)'\n % model_dir)\n meta_file = meta_files[0]\n ckpt = tf.train.get_checkpoint_state(model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_file = os.path.basename(ckpt.model_checkpoint_path)\n return meta_file, ckpt_file\n\n meta_files = [s for s in files if '.ckpt' in s]\n max_step = -1\n for f in files:\n step_str = re.match(r'(^model-[\\w\\- ]+.ckpt-(\\d+))', f)\n if step_str is not None and len(step_str.groups()) >= 2:\n step = int(step_str.groups()[1])\n if step > max_step:\n max_step = step\n ckpt_file = step_str.groups()[0]\n return meta_file, ckpt_file", "title": "" }, { "docid": "b9984f1e152c96b0bc84b84d4b1016f1", "score": "0.5980995", "text": "def list_input_files(self, job_spec=None):\n trace_log = logging.getLogger(\"trace\")\n trace_log.info(\"enter\")\n log = logging.getLogger(\"base\")\n src_paths = []\n for in_dir in self.conf.get_dir(\"im_sources\"):\n for root, dirs, files in os.walk(in_dir):\n for file_name in files:\n file_path = os.path.join(root, file_name)\n if self.check_file_type(foo, file_path):\n src_paths.append(file_path)\n log.warn(\"prints to follow\")\n print (\"listed {} files; starting with: \\n {}\"\n \"\".format(len(src_paths), src_paths[0]))\n trace_log.error(\"early return\")\n return\n\n base_dir = \".\"\n in_media = \"raw-media\"\n in_dir = os.path.join(base_dir, in_media)\n out_dir = os.path.join(base_dir, \"preview\")\n out_dir = out_dir\n image_paths = []\n for root, dirs, files in os.walk(in_dir):\n for file_name in files:\n file_path = os.path.join(root, file_name)\n if self.check_file_type(foo, file_path):\n image_paths.append(os.path.join(root, file_name))\n #ext = os.path.splitext(file_name.lower())[-1][1:]\n #if \"jpg\" == ext:\n\n print \"file extensions\"\n \"jpg\"\n #image_tool = \"gm convert\"\n #image_params = \"-resize\", \"600x600\"\n #image_output = \"preview\"\n trace_log.info(\"exit\")\n pass", "title": "" }, { "docid": "84c03c9cdc41f816cec612ca02e3f956", "score": "0.59300333", "text": "def get_file_names():\n # The names are needed to display messages in consoles\n video_names = []\n comics_names = []\n for dirPath, dirNames, fileNames in os.walk(src):\n for file in fileNames:\n if file.endswith(\".mkv\") or file.endswith(\".mp4\"):\n video_names.append(file)\n if file.endswith(\".cbr\") or file.endswith(\".cbz\"):\n comics_names.append(file)\n return video_names, comics_names", "title": "" }, { "docid": "1737c9bd2acb09f88a337b651be6dfd2", "score": "0.58922255", "text": "def get_all_input_files(source_dir, input_files_types=['.JPG', '.jpg', '.png']):\n return find_files_with_ext(source_dir, input_files_types)", "title": "" }, { "docid": "16b35c30a6915ff538aa345587dc35c7", "score": "0.58922106", "text": "def get_file_names_by_ext(path, extension='pkl'):\n files_grabbed = []\n files_grabbed.extend(glob.glob(path + os.sep + \"*.\" + extension))\n return files_grabbed", "title": "" }, { "docid": "58c855e93a19074b9e49d0912ca287ae", "score": "0.5874856", "text": "def fileSearch():\r\n\tfileEntry=[]\r\n\tfor file in os.listdir(\"SampleGestures\"):\r\n\t if file.endswith(\".png\"):\r\n\t \tfileEntry.append(file)\r\n\treturn fileEntry", "title": "" }, { "docid": "7e1fa07653ba5fd80b63214b521ebc20", "score": "0.5872979", "text": "def create_image_list(target_path: str, ext: str = 'jpg') -> List[Path]:\n image_paths = sorted(list(Path(target_path).glob(f'*.{ext}')))\n logger.info(f'{len(image_paths)} {ext} images were found in {target_path}')\n return image_paths", "title": "" }, { "docid": "22378e4d7431e91e15df5ad1942769a0", "score": "0.5871151", "text": "def get_file_names(image_folder_path_):\n global file_names\n\n all_file_names = os.listdir(image_folder_path_)\n file_names = []\n for file in all_file_names:\n if file.endswith(\".jpeg\") or file.endswith(\".jpg\") or file.endswith(\".png\"):\n file_names.append(file)", "title": "" }, { "docid": "004e84c44cbf4fc40bf3a8ce4287699e", "score": "0.58688885", "text": "def getImList(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "title": "" }, { "docid": "55b84133789d7a4dfb356a2ef1ae4e7c", "score": "0.58631116", "text": "def layer_file_names(self):\n\n vv = gxvv.GXvv(dtype='U1024')\n self.gxagg.list_img(vv.gxvv)\n return list(vv.np)", "title": "" }, { "docid": "b8953d232c3b3e49d6c9f9af6ebb583a", "score": "0.5858773", "text": "def img_paths(src: Path) -> list:\n return [pt for pt in src.iterdir() if pt.suffix in IMG_EXTS]", "title": "" }, { "docid": "27118828be3f3b987ba711e6d71f81a9", "score": "0.58374494", "text": "def list_files(inputfolder, type='.jpg'):\n files = os.listdir(inputfolder)\n names = []\n for fname in files:\n name, ext = splitext(fname)\n if type == '.jpg' and ext == '.jpg':\n names.append(int(name))\n elif type == '.avi' and ext == '.avi':\n names.append(join(dirname(inputfolder), fname))\n return names", "title": "" }, { "docid": "08b3f751191111b46f9ea4b9199d06f8", "score": "0.58136696", "text": "def get_im_names(im_dir, pattern='*.jpg', return_np=True, return_path=False):\n im_paths = glob.glob(osp.join(im_dir, pattern))\n im_names = [osp.basename(path) for path in im_paths]\n ret = im_paths if return_path else im_names\n if return_np:\n ret = np.array(ret)\n return ret", "title": "" }, { "docid": "17b7fe4a849909a757c347c6eaa296e0", "score": "0.5798092", "text": "def LoadImageFilenames(orientation) :\n image_file_extensions = [\"png\", \"tif\", \"tiff\", \"bmp\", \"jpg\", \"jpeg\", \"tga\"] # can add others as needed\n if(orientation == 'Z'):\n imagefilepaths = bpy.context.scene.imagefilepaths_z\n file_min = bpy.context.scene.file_min_Z\n path = bpy.context.scene.image_path_Z\n elif (orientation == 'X'):\n imagefilepaths = bpy.context.scene.imagefilepaths_x\n file_min = bpy.context.scene.file_min_X\n path = bpy.context.scene.image_path_X\n elif (orientation == 'Y'):\n imagefilepaths = bpy.context.scene.imagefilepaths_y\n file_min = bpy.context.scene.file_min_Y\n path = bpy.context.scene.image_path_Y\n\n # get filenames at image_path, extract filenames of type (image extension with most elements) in correct order\n filenames = [f for f in listdir(path) if os.path.isfile(os.path.join(path, f))]\n grouped = {extension:[f for f in filenames\n if os.path.splitext(f)[1].lower() == os.path.extsep+extension]\n for extension in image_file_extensions}\n largest_group = max(grouped.values(), key=len)\n\n # sort only the filenames, then add the full path\n sorted_filenames = sort_nicely([f for f in largest_group])\n the_filepaths = [os.path.join(path, f) for f in sorted_filenames]\n\n imagefilepaths.clear()\n\n # insert into CollectionProperty\n for f in the_filepaths:\n imagefilepaths.add().name = f\n\n # store minimum image index\n min_im_name = sorted_filenames[0]\n id_string = re.search('([0-9]+)', min_im_name) # only searches filename, not full path\n file_min = int(id_string.group())\n\n if(orientation == 'Z'):\n bpy.context.scene.file_min_Z = file_min\n elif (orientation == 'X'):\n bpy.context.scene.file_min_X = file_min\n elif (orientation == 'Y'):\n bpy.context.scene.file_min_Y = file_min", "title": "" }, { "docid": "d9c1be24dec46d9a5781467dc5115812", "score": "0.57841146", "text": "def parse_image_upload(filename, image_types):\n is_image = False\n for img_type in image_types:\n if img_type in filename:\n is_image = True\n break\n\n if is_image:\n path_options = find_image_dir_on_system(filename)\n if len(path_options) > 0:\n return [{'label': path, 'value': i} for i, path in enumerate(path_options[::-1])]\n else:\n return []\n else:\n return []", "title": "" }, { "docid": "183983995d2f4d6f7d3374da255e434d", "score": "0.57836366", "text": "def get_jpeg_data_files_paths():\n\n data_root_folder = os.path.abspath(\"../data/\")\n train_jpeg_dir = os.path.join(data_root_folder, 'train_img/')\n test_jpeg_dir = os.path.join(data_root_folder, 'test_img/')\n train_csv_file = os.path.join(data_root_folder, 'meta-data', 'train.csv')\n test_csv_file = os.path.join(data_root_folder, 'meta-data', 'test.csv')\n return [train_jpeg_dir, test_jpeg_dir, train_csv_file, test_csv_file]", "title": "" }, { "docid": "10add19b98a6c249ecb4daf5bd3b7c91", "score": "0.57682216", "text": "def get_image_file_names(self, files):\n platform = platform_helper.get_platform()\n if platform == platform_helper.Platforms.windows:\n # Windows returns a single string for the file list.\n return self.window.tk.splitlist(files)\n else:\n # Mac returns a tuple of files.\n # Also use this in the default case.\n return files", "title": "" }, { "docid": "b60c4ba8e7e26af2877bc4ad1a8252be", "score": "0.575964", "text": "def model_list():\n return [os.path.basename(p) for p in glob.glob(os.path.join(SICONC_PATH, '*'))]", "title": "" }, { "docid": "f6924fc98b727976635f5093b02b0106", "score": "0.5758042", "text": "def list_examples():\n for __, __, file_names in os.walk(example_file()):\n return [os.path.splitext(f)[0] for f in file_names]", "title": "" }, { "docid": "68fedae38be09bd8e53b6a7b03f7847e", "score": "0.5750143", "text": "def fileNames(self):\n for name in self.fnames:\n curImg = cv2.imread('{}/{}'.format(self.path, name))\n self.images.append(curImg)\n self.classNames.append(os.path.splitext(name)[0])\n print(self.classNames)", "title": "" }, { "docid": "8be1337d617774856a62ad3ac9d3a6dc", "score": "0.5746192", "text": "def find_images(input_dir):\n result = []\n extensions = [\".jpg\", \".png\", \".jpeg\"]\n for root, _, files in os.walk(input_dir):\n for file in files:\n if os.path.splitext(file)[1].lower() in extensions:\n result.append(os.path.join(root, file))\n return result", "title": "" }, { "docid": "8f1585f76413b90dca2f4f626b0bfa9f", "score": "0.5740115", "text": "def get_images():\n images = []\n # get the actual path\n for image in bpy.data.images:\n if image.source == 'FILE':\n if not image.library:\n images.append(image)\n return images", "title": "" }, { "docid": "3120e22d303f8639f0368c49fe5661bc", "score": "0.5701862", "text": "def filter_file_extension(self,exts):\n filelist = []\n for zi in self.filelist:\n name, ext = os.path.splitext(zi.filename)\n if ext.lower() in exts:\n filelist.append(zi)\n \n return filelist", "title": "" }, { "docid": "234d8aad03d08a4fedbdb6fcf1100924", "score": "0.5701799", "text": "def file_name():\n L=[]\n lc_file_dir = os.path.join(os.getcwd())\n for root, dirs, files in os.walk(lc_file_dir): \n for file in files:\n try:\n if file.split(os.extsep,2)[1] == 'fits': \n L.append(os.path.join(root, file))\n \n except IndexError:\n continue\n return L", "title": "" }, { "docid": "5a26c69fa502670bc6f28b9f908349a7", "score": "0.56881887", "text": "def get_jpeg_data_files_paths(overfit=False):\n\n data_root_folder = os.path.abspath(\"../input/\")\n train_jpeg_dir = os.path.join(data_root_folder, 'train-jpg')\n test_jpeg_dir = os.path.join(data_root_folder, 'test-jpg')\n if overfit:\n train_csv_file = os.path.join(data_root_folder, 'train_v3_overfit.csv')\n test_csv_file = os.path.join(data_root_folder, 'test_v3_overfit.csv')\n else:\n train_csv_file = os.path.join(data_root_folder, 'train_v3_resample.csv')\n #train_csv_file = os.path.join(data_root_folder, 'train_v3.csv')\n test_csv_file = os.path.join(data_root_folder, 'test_v3.csv')\n print(\"train_jpeg_dir, test_jpeg_dir, train_csv_file, test_csv_file=\", \\\n \"\\n\", train_jpeg_dir, \"\\n\", test_jpeg_dir, \"\\n\", train_csv_file, \"\\n\", test_csv_file)\n return [train_jpeg_dir, test_jpeg_dir, train_csv_file, test_csv_file]", "title": "" }, { "docid": "52dff38cc0049842f473f522642e56bf", "score": "0.5659648", "text": "def get_imlist(path, suffix):\n imlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(suffix)]\n return imlist", "title": "" }, { "docid": "9f53292730c7446b44f7aa5564a13f9f", "score": "0.5638702", "text": "def test_read_src_files():\n files = c.read_src_files(\"/home/urra/projects/pto/tests/data/dated_images/2017\", [\".jpg\", \".JPG\"],\"201710\")\n for file in files:\n assert \"201710\" in file.stem and file.suffix == \".jpg\"", "title": "" }, { "docid": "abf4330f668d24523ec1af2ba11df879", "score": "0.5596138", "text": "def _find_image_files(data_dir, labels_file):#\n print('Determining list of input files and labels from %s.' % data_dir)\n unique_labels = [l.strip() for l in tf.gfile.FastGFile(\n labels_file, 'r').readlines()]\n\n labels = []\n filenames = []\n texts = []\n\n # Leave label index 0 empty as a background class.\n label_index = 1\n\n # Construct the list of JPEG files and labels.\n for text in unique_labels:\n \n jpeg_file_path = '%s/%s/*' % (data_dir, text)\n matching_files = tf.gfile.Glob(jpeg_file_path)#A list of strings containing filenames that match the given pattern\n\n labels.extend([label_index] * len(matching_files))\n texts.extend([text] * len(matching_files))\n filenames.extend(matching_files)\n\n if not label_index % 100:\n print('Finished finding files in %d of %d classes.' % (\n label_index, len(labels)))\n label_index += 1\n\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(filenames)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n filenames = [filenames[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n\n print('Found %d JPEG files across %d labels inside %s.' %\n (len(filenames), len(unique_labels), data_dir))\n return filenames, texts, labels# ", "title": "" }, { "docid": "bd10d51e4f5e46b35f0e29d85d987599", "score": "0.55727816", "text": "def model_sources(model_info):\n search_path = [dirname(model_info['filename']), MODEL_PATH]\n return [_search(search_path, f) for f in model_info['source']]", "title": "" }, { "docid": "aed7356b763afaaa3e5c330436fc55a5", "score": "0.55664766", "text": "def find_images(current_dir: str=\"./\", exts=(\"jpg\", \"png\", \"jpeg\", \"gif\")):\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield path_join(root, file_name)", "title": "" }, { "docid": "952bf2bb784e8797c79e370ab50486c2", "score": "0.5554656", "text": "def _GenerateAffectedFileExtList(input_api, source_file_filter):\n for f in input_api.AffectedFiles(\n include_deletes=False, file_filter=source_file_filter):\n extension = str(f.LocalPath()).rsplit('.', 1)[-1]\n yield (f, extension)", "title": "" }, { "docid": "6b1465d5e2540c6c5963fbdfb6649b44", "score": "0.55400735", "text": "def extract_paths_and_extensions(image_path):\n file_name, extension = os.path.splitext(image_path)\n root_path = os.path.dirname(image_path)\n\n return file_name, extension, root_path", "title": "" }, { "docid": "c654239d825fb29954947a1546589009", "score": "0.55233943", "text": "def get_image_paths(directory, ext):\n image_files = []\n for (dirpath, _, filenames) in os.walk(directory):\n for filename in filenames:\n if filename.lower().endswith(ext):\n image_files.append(os.sep.join([dirpath, filename]))\n return image_files", "title": "" }, { "docid": "2f24585f958232f319b10c0eee4e0ccc", "score": "0.5522414", "text": "def filename_parser(args):\n # Metadata data structure\n meta = {}\n \n # Compile regular expression to remove image file extensions\n pattern = '\\.' + args.type + '$'\n ext = re.compile(pattern, re.IGNORECASE)\n \n # Walk through the input directory and find images that match input criteria\n for (dirpath, dirnames, filenames) in os.walk(args.dir):\n for filename in filenames:\n # Is filename and image?\n is_img = ext.search(filename)\n # If filename is an image, parse the metadata\n if is_img is not None:\n # Remove the file extension\n prefix = ext.sub('', filename)\n metadata = prefix.split(args.deliminator)\n \n # Image metadata\n img_meta = {}\n img_meta['path'] = dirpath\n img_pass = 1\n # For each of the type of metadata PlantCV keeps track of\n for field in args.valid_meta:\n # If the same metadata is found in the image filename, store the value\n if field in args.fields:\n meta_value = metadata[args.fields[field]]\n # If the metadata type has a user-provided restriction\n if field in args.imgtype:\n # If the input value does not match the image value, fail the image\n if meta_value != args.imgtype[field]:\n img_pass = 0\n img_meta[field] = meta_value\n # Or use the default value\n else:\n img_meta[field] = args.valid_meta[field]\n \n # If the image meets the user's criteria, store the metadata\n if img_pass == 1:\n meta[filename] = img_meta\n \n return(meta)", "title": "" }, { "docid": "7f9f86183de390f7f09dc2031d540f64", "score": "0.5497812", "text": "def get_file_names(source='', extensions=''):\n\n file_names = set()\n for dirpath, _, files in os.walk(source):\n if os.path.basename(dirpath) != 'patches':\n for ext in extensions:\n for f in fnmatch.filter(files, ext):\n if f != 'quilt.html':\n file_names.update([os.path.join(dirpath, f)])\n\n return file_names", "title": "" }, { "docid": "26ace505146a3420556bae6c09f92fad", "score": "0.5492464", "text": "def collectImages(self,source):\n if (source == calib_image_dir):\n img_mask = source +'calibration*.jpg' # create a mask using glob\n \n elif (source == test_image_dir):\n img_mask = source +'*.jpg' # create a mask using glob\n \n images = self.collectCalibImages(img_mask)\n \n return images", "title": "" }, { "docid": "91e2caff6ef44a5bbe3eb8376e92c6a0", "score": "0.5484064", "text": "def raw_file_list(src='train'):\n src_dir = os.path.join('raw-data', src, '*', 'images')\n return file_list(src_dir)", "title": "" }, { "docid": "86553670df636406257ccb01ac0ffbfc", "score": "0.54795015", "text": "def get_images_list(path_to_folder: str) -> list:\n image_names_list = [\n x for x in os.listdir(path_to_folder) if x[-3:] in [\"jpg\", \"peg\", \"png\"]\n ]\n return image_names_list", "title": "" }, { "docid": "1b4eda34bcb2248377a1d3766760837f", "score": "0.5476582", "text": "def get_jpg_input_extensions():\n return __CONF['jpg_input_extensions']", "title": "" }, { "docid": "6a3d83307f98fc65d72afdfe434276fb", "score": "0.54735845", "text": "def _get_camera_model(raw_metadata: dict) -> str:\n value = raw_metadata.get(\n 'Exif.Image.Model',\n raw_metadata.get('Exif.Photo.Model', '')\n )\n return value", "title": "" }, { "docid": "43d063e8ac9bcf2a74daf11cdd6b1863", "score": "0.54696643", "text": "def _load_image_names(self):\n image_set_file = self.image_set_path\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_list = [x.strip() for x in f.readlines()]\n return image_list", "title": "" }, { "docid": "080e9ab7b3235b2478994926689179c5", "score": "0.54572886", "text": "def _filter_file_type(self, repository_name, file_name_list):\n ext_list = self._configure.get_valid_extensions(repository_name)\n result = []\n for file_name in file_name_list:\n _, ext = splitext(file_name)\n if ext in ext_list:\n result.append(file_name)\n return result", "title": "" }, { "docid": "7e64834b181105cf42ac9947487fb5fa", "score": "0.545282", "text": "def frame_names(path_frames: str, camera_name: str) -> List[str]:\n # Path with frames for this camera\n path: str = f'{path_frames}/{camera_name}'\n # Files in this path; frames are named e.g. 'Camera1_Frame01234.png'\n fnames: List[str] = os.listdir(path)\n # Filter the list of frames to only those matching the pattern\n pattern: re.Pattern = re.compile(f'^{camera_name}_SyncFrame'+'(\\d{5}).png$')\n # Return list of all file names matching this pattern\n return [fname for fname in fnames if pattern.match(fname) is not None]", "title": "" }, { "docid": "ce5b443c04a9ceccc09dce6f6db71853", "score": "0.54419", "text": "def getsupportedimagesextension() :\n\t\n\ttry :\n\t\tk = request.args.get('k', type = int)\n\t\temobj = get_model(k)\n\t\treturn jsonify(emobj.IMAGE_EXT_SUPPORTED)\n\texcept Exception as e:\n\t\tLOGGER.LOG(e) # Log Exception\n\t\treturn {}", "title": "" }, { "docid": "7f5242f7799ec14141ea5c1ddba2490f", "score": "0.54304093", "text": "def get_list_of_input_file_names() :\n\n list_of_files = [\n #'spv2-cxi49812-r0200-ev-000000-042962.np',\n #'spv2-cxi49812-r0201-ev-000000-015990.np',\n 'spv2-cxi49812-r0202-ev-000000-040336.np',\n 'spv2-cxi49812-r0203-ev-000000-010197.np',\n 'spv2-cxi49812-r0204-ev-000000-066368.np',\n 'spv2-cxi49812-r0205-ev-000000-032111.np',\n 'spv2-cxi49812-r0206-ev-000000-073412.np',\n 'spv2-cxi49812-r0207-ev-000000-029735.np']\n #'spv2-cxi49812-r0208-ev-000000-035901.np',\n #'spv2-cxi49812-r0209-ev-000000-027595.np',\n #'spv2-cxi49812-r0210-ev-000000-034038.np',\n #'spv2-cxi49812-r0211-ev-000000-015550.np',\n #'spv2-cxi49812-r0212-ev-000000-047923.np',\n #'spv2-cxi49812-r0213-ev-000000-022799.np',\n #'spv2-cxi49812-r0214-ev-000000-025638.np']\n\n print('list_of_files = ', list_of_files)\n return list_of_files", "title": "" }, { "docid": "1d76271acbdff4528b42288df7864dcd", "score": "0.5430268", "text": "def _load_image_path_list(image_data_path):\n examples = []\n for dirpath, dirnames, filenames in os.walk(image_data_path):\n for filename in filenames:\n image_id = int(filename.split('.')[0])\n filename = os.path.join(dirpath, filename)\n examples.append((image_id, filename))\n return examples", "title": "" }, { "docid": "51f1b9cbff2103c11b890b8abee9e411", "score": "0.54184383", "text": "def get_file_path_meta():\n\n pattern = re.compile(r'model\\.ckpt\\.meta$')\n for file_name in os.listdir(FLAGS.model_dir):\n if re.search(pattern, file_name) is not None:\n file_path = os.path.join(FLAGS.model_dir, file_name)\n break\n\n return file_path", "title": "" }, { "docid": "1ed59487ac8936eee2e44e8d83ef228e", "score": "0.5412717", "text": "def find_images(path_folder, name_file):\n assert os.path.isdir(path_folder), 'missing folder: %s' % path_folder\n name_file = os.path.splitext(name_file)[0]\n paths_img = [p for p in glob.glob(os.path.join(path_folder, name_file + '.*'))\n if get_file_ext(p) in IMAGE_EXT]\n return sorted(paths_img)", "title": "" }, { "docid": "3ee1f90110e710269906f94fe4985d89", "score": "0.54058295", "text": "def _find_image_files(data_dir, classes_file):\n print('Determining list of input files and labels from %s.' % data_dir)\n unique_labels = [l.split()[1] for l in tf.gfile.FastGFile(\n classes_file, 'r').readlines()]\n\n labels = []\n filenames = []\n texts = []\n\n # Leave label index 0 empty as a background class.\n label_index = 1\n\n # Construct the list of JPEG files and labels.\n for text in unique_labels:\n jpeg_file_path = '%s/%s/*' % (data_dir, text)\n matching_files = tf.gfile.Glob(jpeg_file_path)\n\n labels.extend([label_index] * len(matching_files))\n texts.extend([text] * len(matching_files))\n filenames.extend(matching_files)\n\n if not label_index % 100:\n print('Finished finding files in %d of %d classes.' % (\n label_index, len(labels)))\n label_index += 1\n\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(filenames)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n filenames = [filenames[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n\n print('Found %d JPEG files across %d labels inside %s.' %\n (len(filenames), len(unique_labels), data_dir))\n return filenames, texts, labels", "title": "" }, { "docid": "ca07882da61b08f8700aab366441268e", "score": "0.54034716", "text": "def load_img_name_list(dataset_path):\n img_gt_name_list = open(dataset_path).read().splitlines()\n img_name_list = [\n img_gt_name.split(' ')[0][-15:-4]\n for img_gt_name in img_gt_name_list\n ]\n \"\"\" /JPEGImages/2007_000121.jpg -> [-15:-4] = 2007_000121 \"\"\"\n return img_name_list", "title": "" }, { "docid": "1f303903b9038074e530a0dbe25bd1d5", "score": "0.54026985", "text": "def makefly_metafiles(self, pathname, extension):\n files = []\n for listed_file in listdir(pathname):\n if listed_file.endswith(extension):\n files.append(listed_file)\n if not self.DBFILES and files:\n self.DBFILES = files\n return files", "title": "" }, { "docid": "1a7bb219b0f0e84bb5e8a55d65d31d35", "score": "0.5396332", "text": "def searchImages(self):\n valid_extensions = ['jpg', 'jpeg', 'png',\n 'gif', 'tiff', 'bmp']\n return self.search(valid_extensions=valid_extensions)", "title": "" }, { "docid": "4308eea491e30d148246cab2593918df", "score": "0.53944874", "text": "def analyse(src_path, files, title, description, author):\n src_dir = task.get_task_path(task.get_actual()['task']) + '/' + DIR_FINAL\n # files = list_jpg(src_dir)\n exifs = images_get_exifs(src_dir, files, report=False)\n\n ret = []\n for each in exifs:\n ret.append(dict(name=each['name'], title=_analyze_tag(title, each['title']),\n description=_analyze_tag(description, each['description']),\n author=_analyze_tag(author, each['author'])))\n\n # print('analyse() ret=' + str(ret))\n\n return ret", "title": "" }, { "docid": "f90dbbf6329e568839c08b6b41dc286c", "score": "0.53906775", "text": "def getImageFilenames(self):\n return self.filenames", "title": "" }, { "docid": "0798386ab11c0157a07fef4039e369b2", "score": "0.53899467", "text": "def get_image_paths(self, image_type, cropped=True, get_date=False):\n image_path = self.get_images_folder_path()\n all_dates = os.listdir(image_path)\n all_dates.sort()\n print('Select one of the dates to view the Images:')\n\n for num, date in enumerate(all_dates):\n print('({}): {}'.format(num, date))\n\n selected = input()\n path = image_path + all_dates[int(selected)]\n\n if cropped:\n path = path + os.sep + 'cropped'\n\n final_path = ''\n for files in os.listdir(path):\n if image_type in files.lower() and 'xml' not in files:\n final_path = path + os.sep + files\n\n if get_date and final_path != '':\n return final_path, all_dates[int(selected)]\n else:\n return final_path\n\n print('No Imagery subjects found')\n return None", "title": "" }, { "docid": "d80cb189fc8a63789f535e65884386f9", "score": "0.5388186", "text": "def find_images():\n pass", "title": "" }, { "docid": "6466a8f7e718c1527e25fe91bb33e3d1", "score": "0.5386157", "text": "def file_list(directory, extension='jpg'):\n if not os.path.isdir(directory):\n return []\n return [os.path.join(directory,f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory,f)) and f.endswith(\".{}\".format(extension))]", "title": "" }, { "docid": "ec0fdc25f3f32d8676f319a020580595", "score": "0.53857946", "text": "def get_images(base_dir: str) -> List[str]:\n return glob.glob(os.path.join('data', base_dir, 'inputs', '*'))", "title": "" }, { "docid": "424feae6373d74d7f487529700d934fc", "score": "0.5380695", "text": "def get_image_files():\n return [\n f for f in listdir(get_images_directory())\n if isfile(join(get_images_directory(), f))\n ]", "title": "" }, { "docid": "9ea9ae503b81d1b99fdef5c18f5a145a", "score": "0.5377136", "text": "def _get_cim_files(institute, source_id):\n folder = io_mgr.get_model_folder(institute, source_id, 'cim')\n\n return [os.path.join(folder, i) for i in os.listdir(folder)]", "title": "" }, { "docid": "0bb01fcf3b5188f5c0313d713c473436", "score": "0.53695655", "text": "def load_filenames_labels(mode):\n label_dict, class_description = build_label_dicts()\n filenames_labels = []\n if mode == 'train':\n filenames = glob.glob('data/tiny-imagenet-200/train/*/images/*.JPEG')\n for filename in filenames:\n match = re.search(r'n\\d+', filename)\n label = str(label_dict[match.group()])\n filenames_labels.append((filename, label))\n elif mode == 'val':\n with open('data/tiny-imagenet-200/val/val_annotations.txt', 'r') as f:\n for line in f.readlines():\n split_line = line.split('\\t')\n filename = 'data/tiny-imagenet-200/val/images/' + split_line[0]\n label = str(label_dict[split_line[1]])\n filenames_labels.append((filename, label))\n\n return filenames_labels", "title": "" }, { "docid": "bba1db715d269170ae0d2b49e37bb4db", "score": "0.5351091", "text": "def get_files_list_from_filemanager(self):\n files = self.filemanager.get_files()\n logging.debug(files)\n return [ \"{}.mkv\".format(os.path.splitext(x.split(\"/\")[-1])[0]) for x in files if self.prefix in x ]", "title": "" }, { "docid": "ecf8a8003275ab22626c4bc6e015ab3e", "score": "0.53505623", "text": "def file_list(src_dir, fname_wildcard='*'):\n result = []\n search_path = os.path.join(src_dir, f\"{fname_wildcard}.{IMG_EXT}\")\n for img_path in gfile.Glob(search_path):\n result.append(img_path)\n\n return result", "title": "" }, { "docid": "cccd517c82743ebacf15e1b04df021f7", "score": "0.5350321", "text": "def GetFilenames(path, ext):\n files = []\n for file in os.listdir(path):\n if file.endswith(ext):\n files.append(os.path.join(path, file))\n\n return files", "title": "" }, { "docid": "a9d9d048f11bdf2c34e44a54110d418a", "score": "0.53443915", "text": "def get_image_filenames(data_path):\n return listdir(data_path)", "title": "" }, { "docid": "6ebace5430d4c97200694c0e141e2e22", "score": "0.5342931", "text": "def get_file_list_ext(dir_path, file_ext_list=[None]):\n\n file_list = [f for f in os.listdir(dir_path) if os.path.splitext(f)[1] in file_ext_list]\n\n return file_list", "title": "" }, { "docid": "41a9319fa8de0d01c4923abbfb9b36f1", "score": "0.5342027", "text": "def input_image_names(self):\n return list(self._input_file_names.items())", "title": "" }, { "docid": "933d5e2371c735313f4e8d58b24111c2", "score": "0.5340839", "text": "def image_glob(image_directory, ext):\n return glob.glob(image_glob_pattern(image_directory, ext))", "title": "" }, { "docid": "0c8cebd7a6382bf795af6379043845b9", "score": "0.53407", "text": "def get_files(ftype='activity'):\n s = 'activity_0' if ftype == 'activity' else ftype\n file_dict = {\n 'SL': [\n 'alexnetRepEI_128FF_nt16_dt2.0_pwr1.9_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Abs_0009v3_{}.pkl'.format(s)\n ],\n 'L': [\n 'alexnetRepEI_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Abs_0011v3_{}.pkl'.format(s)\n ],\n 'l10_L': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd10_0002v4_{}.pkl'.format(s)\n ],\n 'l40_SL': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.9_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd40_0011v4_{}.pkl'.format(s)\n ],\n 'l40_SL_VC': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd40VC_0013v4_{}.pkl'.format(s)\n ],\n 'l20_L_PI': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20PO_0009v4_{}.pkl'.format(s)\n ],\n 'l10_L_VC': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd10VC_0007v4_{}.pkl'.format(s)\n ],\n 'l25_SL_VC': [\n 'alexnetRepEIdd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.04tnorm_FF0.02Mom0.9LR0.01drp30Absdd25VC_0021v4_{}.pkl'.format(s)\n ],\n 'l20_L_VC_PI_EO': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2POEO_0019v5_{}.pkl'.format(s)\n ],\n 'l20_SL_VC_PI': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2PO_0010v5_{}.pkl'.format(s)\n ],\n 'l20_L_VC_PI': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2PO_0013v5_{}.pkl'.format(s)\n ],\n 'l20_SL_PI': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20PO_0011v5_{}.pkl'.format(s)\n ],\n 'l20_L_PI': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20PO_0012v5_{}.pkl'.format(s)\n ],\n 'l20_SL_VC': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2_0016v5_{}.pkl'.format(s)\n ],\n 'l20_L_VC': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.0_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2_0017v5_{}.pkl'.format(s)\n ],\n 'l20_SL_VC_PI_EO': [\n 'alexnetRepEI5dd_128FF_nt16_dt2.0_pwr1.8_k1.0_EIstd0.02tnorm_FF0.02Mom0.9LR0.01drp30Absdd20VC2POEO_0020v5_{}.pkl'.format(s)\n ]\n \n } \n \n if ftype == 'weights':\n del file_dict['SL']\n del file_dict['L']\n return file_dict", "title": "" }, { "docid": "a7978b949c8519fea157c9687b87dee3", "score": "0.534022", "text": "def _detect_file(path, verbose=False, **configs):\n # If the user sets a start frame, only filter\n # from that point onward\n if 'start_frame' in configs.keys():\n t0 = int(configs['start_frame'])\n else:\n t0 = 0 \n\n # If the user sets a stop frame, only filter\n # up to that frame\n if 'stop_frame' in configs.keys():\n t1 = int(configs['stop_frame'])\n else:\n t1 = None \n\n # Create image file reader \n reader = qio.ImageFileReader(path)\n\n # Create an image filterer\n filterer = image_filter.SubregionFilterer(\n reader, None, start_iter=t0, stop_iter=t1,\n **configs['filtering'])\n\n # Set up filtering and detection \n detections = (detect.detect(img, **configs['detection']) \\\n for img in filterer)\n\n # Run filtering and detection \n if verbose:\n locs = pd.concat(\n [d.assign(frame_idx=i) for i, d in tqdm(enumerate(detections))],\n ignore_index=True, sort=False\n )\n else:\n locs = pd.concat(\n [d.assign(frame_idx=i) for i, d in enumerate(detections)],\n ignore_index=True, sort=False\n )\n\n # Adjust frame indices to account for start\n # frame\n locs['frame_idx'] += t0 \n\n return locs", "title": "" }, { "docid": "c3bf568a2b6403227731cd71625496e3", "score": "0.53381336", "text": "def lens_point_sources(x, y, lens_model, mass_scale, model_param_8, model_param_9, model_param_10, galaxy_position=(0,0), e_L=0, theta_L=0, shear=0, theta_shear=0, gravlens_params={}, gravlens_input_file='findimg_input.txt', gravlens_output_file='findimg_out.txt', keep_files=False):\n\n inputlens, setlens, gravlens_params_updated = lens_parameters(lens_model, mass_scale, model_param_8, model_param_9, model_param_10, galaxy_position, e_L, theta_L, shear, theta_shear, gravlens_params )\n\n # run gravlens to get the images ========================================================\n f = open(gravlens_input_file, 'w')\n f.write(inputlens)\n out_file = []\n for i in range(len(x)):\n out_file.append(gravlens_output_file[:-4] + '_' + str(i) + gravlens_output_file[-4:])\n f.write('findimg %0.6f %0.6f %s \\n' % (x[i], y[i], out_file[-1] ))\n f.close()\n os.system('gravlens %s > /dev/null' % (gravlens_input_file) )\n # now we have 'len(x)' files with the images information (pos, magnification, time delay)\n #========================================================================================\n\n # we now read the files ======================================================== \n x_img, y_img, magnification, time_delay = [], [], [], []\n for i in range (len(x)): # len(x) = len(out_file)\n img_properties = np.loadtxt(out_file[i], comments='#', skiprows=1, unpack=True) # , ndmin=4\n if len(img_properties) == 0: # in the case no images were found (means that the grid is not big enough)\n x_img.append([]); y_img.append([]); magnification.append([]); time_delay.append([])\n else:\n x_img.append( list(np.array(img_properties[0], ndmin=1)) ) # I transform to list so sum(x_img, []) will flatten this list\n y_img.append( list(np.array(img_properties[1], ndmin=1)) ) # I transform to list so sum(y_img, []) will flatten this list\n magnification.append(img_properties[2])\n time_delay.append(img_properties[3])\n # ==============================================================================\n\n if keep_files == False:\n os.system('rm -f %s %s' % (gravlens_input_file, gravlens_output_file[:-4]+ '*.' + gravlens_output_file[-3:]) )\n\n\n return x_img, y_img, magnification, time_delay, out_file", "title": "" }, { "docid": "5104c02e1694639582b50350b40a4f3b", "score": "0.5337294", "text": "def find_paths(metadata, base_folder, file_extension):\n paths = []\n for i in range(len(metadata)):\n relative_path = relative_file_path(\n metadata.show_filename_prefix.iloc[i],\n metadata.episode_filename_prefix.iloc[i],\n )\n path = os.path.join(base_folder, relative_path + file_extension)\n paths.append(path)\n return paths", "title": "" }, { "docid": "49058def9145de00ea91b3a8f24cebc8", "score": "0.53308284", "text": "def load_images(self, files_path, ext):\n # data = load_files(files_path, shuffle = False) #load files\n # images = np.array(data['filenames']) #load images\n data = sorted(glob.glob(osp.join(files_path, ext)))\n images = np.array(data) # load images\n return images", "title": "" }, { "docid": "2d3663e845352b83e46df8324290f198", "score": "0.53263974", "text": "def get_metadata_files():\n rel_path = '/'.join([BQ_PARAMS['BQ_REPO'], BQ_PARAMS['TABLE_METADATA_DIR'], get_rel_prefix(BQ_PARAMS)])\n metadata_fp = get_filepath(rel_path)\n\n return [f for f in os.listdir(metadata_fp) if os.path.isfile(os.path.join(metadata_fp, f))]", "title": "" }, { "docid": "b3f0e15816aa7c848f83cde71619f521", "score": "0.531588", "text": "def get_all_files(base_path, filetypes=['jpeg', 'JPG', 'jpg', 'gif', 'GIF', 'png', 'PNG', 'mov', 'MOV', 'mp4']):\r\n all_images = []\r\n for ftype in filetypes:\r\n all_images += glob.glob(os.path.join(base_path, f'*.{ftype}'))\r\n return all_images", "title": "" }, { "docid": "d619498213ba6296cea38b36e8ab7e33", "score": "0.5312877", "text": "def images_ext(self):\n return self.ext + '?extended=images'", "title": "" }, { "docid": "81ee50f5189e5b26900ba29311322025", "score": "0.5310522", "text": "def getFiles(self):\n files = os.listdir(self.dir)\n filtr = lambda s: s[0]!='.' and s[0]!='_'\n files = filter(filtr,files)\n if self.ext:\n filtr = lambda s: s.endswith(self.ext)\n files = filter(filtr,files)\n n = len(self.ext)\n files = [ f[:-n] for f in files ]\n\n files = self.filterFiles(files)\n ## filtr = lambda s:utils.is_pyFormex(self.fileName(s))\n ## files = filter(filtr,files)\n\n ## if self.max > 0 and len(files) > self.max:\n ## files = files[:self.max]\n\n files.sort()\n return files", "title": "" }, { "docid": "a52e3e3f71bad012dd160386ec41f73d", "score": "0.5307245", "text": "def get_files(extension,path=\"./\"):\n if not isinstance(path, str):\n raise TypeError(\"The given path is not a string!\")\n temp = glob.glob(path + \"*.\" + extension)\n if len(temp) == 0:\n raise TypeError(\"Found no files with path \" + path + \" and extension .\" + extension)\n body_names = []\n for item in temp:\n body_names.append(os.path.splitext(os.path.split(item)[-1])[0])\n return(temp,body_names)", "title": "" }, { "docid": "1c66f830cbe29972ce5feb69f4c0ad9d", "score": "0.5307185", "text": "def get_model_paths(model_dir):\n all_models = gfile.Glob(os.path.join(model_dir, '*.meta'))\n model_filenames = [os.path.basename(m) for m in all_models]\n model_numbers_names = [\n (shipname.detect_model_num(m), shipname.detect_model_name(m))\n for m in model_filenames]\n model_names = sorted(model_numbers_names)\n return [os.path.join(model_dir, name[1]) for name in model_names]", "title": "" }, { "docid": "905545f61183ec444dfe432d900cb2b9", "score": "0.5303663", "text": "def findImageFile( fileName ):\n \n filePath = os.path.normpath( os.path.join( META_MODEL_PATH, \n os.path.normpath( fileName ) ) )\n #print 'filePath', filePath\n if( os.path.exists( filePath ) ): return filePath\n \n filePath = os.path.normpath( os.path.join( USER_MMODEL_PATH, \n os.path.normpath( fileName ) ) )\n if( os.path.exists( filePath ) ): return filePath\n \n filePath = os.path.normpath( os.path.join( SOURCE_CODE_PATH, \n os.path.normpath( fileName ) ) )\n if( os.path.exists( filePath ) ): return filePath\n \n return None", "title": "" }, { "docid": "f19ea1ced1397c43e920276b34d2012c", "score": "0.53008777", "text": "def metadata_listdir(name):", "title": "" }, { "docid": "dd6c42b993a522f42e4c6a6d9e2215aa", "score": "0.52986646", "text": "def match_mimetypes(path, mimetypes):\n filenames = get_extensions(path)\n\n for k,v in mimetypes.items():\n for filename in filenames:\n if filename[1] == k:\n filename.append(v)\n full_name = filename[0] + filename[1]\n filename.append(full_name)\n\n return filenames", "title": "" }, { "docid": "32e641ce99d1710145bf27d3b756f8d5", "score": "0.52962226", "text": "def extract_multiple_exif_fast(paths: List[Union[Path, str]], batch_size=100) -> list:\n if len(paths) <= batch_size:\n batch_size = len(paths)\n\n paths = [str(path) for path in paths]\n batched_paths = list(zip(*[iter(paths)] * batch_size))\n\n exif_data = process_map(\n extract_multiple_exif, batched_paths, desc=\"Extracting EXIF data\", chunksize=3\n )\n\n # Unbatch\n return [item for sublist in exif_data for item in sublist]", "title": "" }, { "docid": "44b49af0be906f36d80f8de3fef7942f", "score": "0.5295923", "text": "def get_files_of_a_type(root_path, file_extension='.mat'):\n if file_extension[0] != '.':\n file_extension = '.' + file_extension\n\n assert(len(file_extension) == 4)\n\n filepaths_found = []\n for root, dirs, files in os.walk(root_path):\n current_files = [f for f in files if f[-4:] == file_extension]\n for f in current_files:\n filepaths_found.append(os.path.join(root, f))\n\n return filepaths_found", "title": "" }, { "docid": "3c3fec22e66c2121b7037059c3e1f97a", "score": "0.52948225", "text": "def autodetect_cameras(dirname):\n \n counter = 5\n while counter > 0 and not os.path.exists(os.path.join(dirname, 'models', 'nip')):\n dirname = os.path.split(dirname)[0]\n counter -= 1\n\n if counter == 0:\n raise ValueError('The {} directory does not seem to be a valid results directory'.format(dirname))\n\n return fsutil.listdir(os.path.join(dirname, 'models', 'nip'), '.*', dirs_only=True)", "title": "" }, { "docid": "e4f9bb2be2f100e3e4da13a7afd20e78", "score": "0.5293242", "text": "def filter_files(file_list: List[str], extensions: Set[str]) -> List[str]:\n return [\n current_file\n for current_file in file_list\n if Path(current_file).suffix in extensions\n ]", "title": "" }, { "docid": "cdf68cb2b4a6f901d6773a767e1c108e", "score": "0.52859765", "text": "def _find_image_ext(path):\n path = os.path.splitext(path)[0]\n for ext in _KNOWN_IMG_EXTS:\n this_path = f\"{path}.{ext}\"\n if os.path.isfile(this_path):\n break\n else:\n ext = \"png\"\n return (f\"{path}.{ext}\", ext)", "title": "" }, { "docid": "caf74b4d2e7c075c5658f01ee733a710", "score": "0.5281779", "text": "def __loadSynthImages(self):\n \n files = []\n for root,dirs,file in os.walk('SampleSynth',topdown=True):\n for i in file:\n extn = i.split('.')[-1]\n if extn != 'DS_Store' and extn!= 'mat':\n path = root+os.sep+i\n path = '/'.join(path.split('\\\\'))\n files.append(path)\n return files", "title": "" }, { "docid": "9c4069d514fca631ac52dcf37bcffd7e", "score": "0.5280908", "text": "def getFiles(path):\r\n \r\n imlist = {}\r\n count = 0\r\n for each in os.listdir(path):\r\n\r\n print (\" #### Reading image category \", each, \" ##### \")\r\n #imlist[each] = []\r\n path1 = path+'\\\\'+each\r\n \r\n for imagefile in os.listdir(path1):\r\n\r\n print (\"Reading file \", imagefile)\r\n if imagefile[:4] not in imlist.keys():\r\n imlist[imagefile[:4]] = []\r\n\r\n im = cv2.imread(path1+'\\\\'+imagefile)\r\n #im1 = cv2.resize(im,(227, 227), interpolation = cv2.INTER_CUBIC)\r\n imlist[imagefile[:4]].append(im)\r\n count +=1\r\n\r\n return [imlist, count]", "title": "" }, { "docid": "c12829dcb986371ebb57b7fe5f9e7ee3", "score": "0.5266165", "text": "def _get_media_files_extensions(self) -> tuple:\n os.chdir(self._program_dir)\n\n with open(\"resources\\\\file-extensions.txt\", \"r\") as f:\n movie_extensions = re.findall(r\"(\\.\\w*)\", f.read())\n return tuple(movie_extensions)", "title": "" }, { "docid": "5f9b14e9a655a9d645523f07fd30f89f", "score": "0.52562284", "text": "def _get_image_files(self, character):\r\n p = os.path.join(DEFAULT_PLAYERS, character)\r\n files = os.listdir(p)\r\n\r\n def filter_files(pattern):\r\n filter_function = lambda file:re.match(pattern, file)\r\n filtered_files = [os.path.join(p, i) for i in filter(filter_function, files)]\r\n return filtered_files\r\n\r\n walk_files = filter_files(WALK_IMAGE_FILE_PATTERN)\r\n jump_files = filter_files(JUMP_IMAGE_FILE_PATTERN)\r\n weapon_file = filter_files(WEAPON_FILE_PATTERN)[0]\r\n try:\r\n still_file = filter_files(STILL_FILE_PATTERN)[0]\r\n except IndexError:\r\n still_file = walk_files[0]\r\n\r\n return walk_files, jump_files, weapon_file, still_file", "title": "" }, { "docid": "a5b5242ab5a791889c71b31678ee9774", "score": "0.5252204", "text": "def create_image_lists(image_dir):\n \n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n \n # Find image list if its common in annotation\n image_pattern = os.path.join(image_dir, 'JPEGImages', '*.' + 'jpg')\n image_lst = glob(image_pattern) \n data = []\n if not image_lst:\n print('No files found') \n else:\n for image_file in image_lst:\n filename = image_file.split(\"/\")[-1].split('.')[0]\n annotation_file = os.path.join(image_dir, 'SegmentationClass', filename + '.png')\n if os.path.exists(annotation_file):\n record = {'image': image_file, 'annotation': annotation_file, 'filename': filename}\n data.append(record)\n else:\n print('Annotation file not found for %s - Skipping' % filename)\n print('Pattern %s' % annotation_file)\n \n print ('Nunmber of files: %d' %len(data))\n return data", "title": "" }, { "docid": "627dad05ceec5af4415930c519954334", "score": "0.52506876", "text": "def image_read(path, form):\n files = sorted(glob.glob(path + form))\n image_list = []\n [image_list.append(tiff.imread(image)) for image in files]\n return image_list", "title": "" }, { "docid": "e94fd6aa2284a387d11a81de953493fb", "score": "0.5247347", "text": "def get_file_list(directory):\n file_list = []\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"):\n filepath = os.path.join(directory, filename)\n file_list.append(filepath)\n return file_list", "title": "" } ]
0ae60727fde0f48a9395014a6e53f368
Get most recent data object from local store (resultsdir) or database depending on use_local flag.
[ { "docid": "6c515790931a0e7851bab6f6a6949aeb", "score": "0.6872474", "text": "def get_latest_data(self, use_local: bool = None):\n if use_local is None:\n use_local = self.config.flags.use_local\n if use_local:\n return self.find_latest_data()\n else:\n return [result.data for result in self.all_latest_results()]", "title": "" } ]
[ { "docid": "9bc5fc8af3a9c1739be2bb32accd70b3", "score": "0.6305982", "text": "def find_latest_data(self):\n jsonname = \"{}_data.json\".format(self.test_name.replace(\".\", \"_\"))\n resultsdir = os.path.expandvars(self.config.resultsdir)\n latest = None\n latest_mtime = 0.0\n for dirpath, dirnames, filenames in os.walk(resultsdir):\n for fname in filenames:\n if jsonname in fname:\n fpath = os.path.join(dirpath, fname)\n st = os.stat(fpath)\n if st.st_mtime > latest_mtime:\n latest_mtime = st.st_mtime\n latest = fpath\n if latest is not None:\n self.resultslocation = os.path.dirname(latest)\n return json.from_file(latest)\n else:\n return None", "title": "" }, { "docid": "ce4f02c4882eb5abbd49b8ff1d5a0fdc", "score": "0.5784426", "text": "def local_latest(results):\n\n if len(results.index) <= 1:\n return results\n # separate all the attributes which could be different between two versions\n separate = ['path', 'version', 'time_complete', 'filename','fdate', 'tdate', 'periods']\n cols = [k for k in results.columns if k not in separate]\n results = results.sort_values('version').drop_duplicates(subset=cols, keep='last')\n return results", "title": "" }, { "docid": "22a37f33876d97efeb7cc1b6fd365da7", "score": "0.55801266", "text": "def fetch(self) -> pb.LocalPath:", "title": "" }, { "docid": "c79a694f60bb25bf4859c2b4df65ef13", "score": "0.55601543", "text": "def get_local_pickled_results(file_location):\n with open(file_location, 'rb') as file:\n results = pickle.load(file)\n return results", "title": "" }, { "docid": "a2f78b7f7ddb6b68c00f593f63c35bdd", "score": "0.5521102", "text": "def __get_db(self, db_folder, db_name, cache_size=30):\n\n if self.is_for_scenario:\n return DBFetcher(db_folder, db_name, \"scenarios\", cache_size).fetch()\n else:\n return DBFetcher(db_folder, db_name, \"positions\", cache_size).fetch()", "title": "" }, { "docid": "aade73ca6f826320d70a38693f6dabf0", "score": "0.54962987", "text": "def get_latest(file_id, family):\n latest = Metadata.query.filter_by(id_file=file_id, family=family).first()\n # There is the only possible result (tested by test_update_metadata_db_records)\n if latest is not None:\n logger.info('Latest is from this workspace: %s', latest)\n return latest\n\n # Nothing in the workspace was found, try the previous global metadata\n # Important: this function only looks on the global workspace until\n # a certain metadata id reference. It will not find metadata that has\n # been added after this reference because this would be new metadata\n # that the workspace should not be able to access\n workspace = family.workspace\n reference = 0\n if workspace.fk_last_metadata_id is not None:\n reference = workspace.fk_last_metadata_id\n\n latest_global = (\n Metadata\n .query\n .filter(Metadata.id_file == file_id,\n Family.fk_workspace_id.is_(None),\n Metadata.id <= reference)\n .join(Family)\n .filter(Family.name == family.name)\n .order_by(Metadata.id.desc())\n .first()\n )\n if latest_global is not None:\n logger.info('Latest is from previous workspace: %s', latest_global)\n return latest_global\n\n return None", "title": "" }, { "docid": "65c971f408b97fc29c032044c8f500bd", "score": "0.5435981", "text": "def fetch_latest_resource():", "title": "" }, { "docid": "61ebe3506d71aad8295dd9f54f1a393b", "score": "0.54037464", "text": "def get_lager_current_data():\n return Lager.query.order_by(Lager.id.desc()).first()", "title": "" }, { "docid": "6dc82731e170fbbc2fb30ab35bd7af8f", "score": "0.53958315", "text": "def get_data(results_path, max_real_time=None):\n if isinstance(results_path, list): # list of paths\n data_dct = {}\n for path in results_path:\n data_dct[path] = get_data(path, max_real_time)\n return data_dct\n elif results_path.endswith(\".db\"): # single data file\n return get_data_from_single_file(results_path, max_real_time)\n else: # path to folder\n if results_path.endswith('/'):\n results_path = results_path[:-1]\n if not os.path.isdir(results_path):\n raise ValueError(\"'results_path={} is not in correct format\".format(results_path))\n data_dct = {}\n for entry in os.listdir(results_path):\n if entry.endswith(\".db\"):\n data_dct[entry] = get_data(os.path.join(results_path, entry), max_real_time)\n return data_dct", "title": "" }, { "docid": "c55e5eb2a4ca6ab89e390178c5492fad", "score": "0.5361852", "text": "def get_latest_file(filename_str_contains, datapath='../data/', filetype='parquet'):\r\n\r\n # Get list with files\r\n onlyfiles = sorted([f for f in listdir(datapath) if isfile(join(datapath, f))])\r\n # Get last file\r\n filename = [s for s in onlyfiles if filename_str_contains in s][-1]\r\n if filetype == 'parquet':\r\n df = pd.read_parquet(datapath + filename)\r\n return df\r\n if filetype == 'pickle':\r\n model = pickle.load(open(datapath + filename, 'rb'))\r\n return model", "title": "" }, { "docid": "b245b52ab64a4635d4af19cc937a6df9", "score": "0.5346024", "text": "def fetch(self):\n self._validate_params()\n if self.islast:\n data = super(DailySummaryReader, self).fetch()\n else:\n data = self._fetch_dates()\n if self.output_format == 'pandas':\n data.set_index('date', inplace=True)\n return data\n else:\n return data", "title": "" }, { "docid": "3c9f5d00992860e4ce3a449650209373", "score": "0.52948964", "text": "def _readself(self):\n return Database.__read(self.name)", "title": "" }, { "docid": "911225fb119dfc0fe8fcb5e39f5b43f7", "score": "0.5285356", "text": "def get_last_data():\n db = connect_db(run_info['db_name'])\n last = last_run_number()\n try:\n return iter(db.view('_design/all/_view/all', descending=True)[last]).next()['value']\n except (KeyError, StopIteration):\n return {}", "title": "" }, { "docid": "375e08072d62ba52140a945a1175dd92", "score": "0.52399296", "text": "def fetch(self) -> pb.LocalPath:\n raise NotImplementedError()", "title": "" }, { "docid": "caef6156964c991748135ce2089746cd", "score": "0.52326256", "text": "def get_puffer_current_data():\n return Puffer.query.order_by(Puffer.id.desc()).first()", "title": "" }, { "docid": "5a472b26021a6a131d81ede96490db70", "score": "0.5194381", "text": "def get_latest_train_data():\n\n data_file = os.path.join(\"models\",'latest-train.pickle')\n\n \n models = [f for f in os.listdir(os.path.join(\".\",\"models\")) if re.search(\"test\",f)]\n\n if len(models) == 0:\n raise Exception(\"Models with prefix '{}' cannot be found did you train?\".format(\"test\"))\n\n\n with open(data_file,'rb') as tmp:\n data = pickle.load(tmp)\n\n\n print('data is:', data)\n\n\n return(data)", "title": "" }, { "docid": "1a77b5926fddf38d02c5a4a9b8642bb9", "score": "0.5169384", "text": "def get_storage(self, name):\n cache_result = cache.get(self.get_cache_key(name))\n if cache_result:\n return self.remote\n elif cache_result is None and self.remote.exists(name):\n cache.set(self.get_cache_key(name), True)\n return self.remote\n else:\n return self.local", "title": "" }, { "docid": "76e486c2aaf91313c2416e9532417ed2", "score": "0.51233995", "text": "def __get_db(db_folder, db_name, cache_size=None):\n\n return DBFetcher(db_folder, db_name, \"scenarios\", cache_size).fetch()", "title": "" }, { "docid": "e9ade7162567727239ba7784d30539ee", "score": "0.51099885", "text": "async def getRecord(self, name):\n record = self.records.get(name)\n while record[\"pickling\"]:\n pass\n if record[\"pickled\"]:\n record[\"pickling\"] = True\n record[\"pickled\"] = False\n filehandler = open(os.getcwd() + \"/cache/\"+ str(record[\"ID\"]) + \".cache\", \"rb\")\n\n cache = pickle.load(filehandler)\n fileType = record.get(\"fileType\")\n fileClass = create_parser_object(fileType, name)\n fileObj = fileClass(name)\n record[\"fileObj\"] = fileObj\n await fileObj.set_cache(cache)\n\n record[\"pickling\"] = False\n filehandler.close()\n os.remove(os.getcwd() + \"/cache/\"+ str(record[\"ID\"]) + \".cache\")\n record[\"time\"] = datetime.now()\n return record.get(\"fileObj\")", "title": "" }, { "docid": "939055d783c44e132e63d456cd68b69c", "score": "0.51094985", "text": "def get_experiment_data(experiment_names,\n main_experiment_name,\n from_cached_data,\n data_path,\n main_experiment_benchmarks=None):\n if from_cached_data and os.path.exists(data_path):\n logger.info('Reading experiment data from %s.', data_path)\n experiment_df = pd.read_csv(data_path)\n logger.info('Done reading data from %s.', data_path)\n return experiment_df, 'from cached data'\n logger.info('Reading experiment data from db.')\n experiment_df = queries.get_experiment_data(experiment_names,\n main_experiment_benchmarks)\n logger.info('Done reading experiment data from db.')\n description = queries.get_experiment_description(main_experiment_name)\n return experiment_df, description", "title": "" }, { "docid": "9ff91daab695a87a3e67eaec32b12f77", "score": "0.51086473", "text": "def get_snapshot(self):\n snapshot_path = f\"{self.experiment_path}/latest.pkl\"\n if os.path.exists(snapshot_path):\n self.logger.warning(f\"Snapshot is available, loading from {snapshot_path}.\")\n snapshot = torch.load(snapshot_path)\n else:\n snapshot = None\n return snapshot", "title": "" }, { "docid": "84014a174886f7eff7b55ae53c956cbc", "score": "0.5108492", "text": "def get_latest_version_from_disk() -> Optional[LooseVersion]:\n version_cache = Path(click.get_app_dir(\"senza\")) / \"pypi_version\"\n now = time.time()\n latest_version = None\n if version_cache.exists() and now - version_cache.stat().st_mtime < ONE_DAY:\n with version_cache.open() as version_cache_file:\n str_version = version_cache_file.read()\n if str_version:\n latest_version = LooseVersion(str_version)\n return latest_version", "title": "" }, { "docid": "b80b0cc46f6ee2d3897ab11baa692d26", "score": "0.5107387", "text": "def latest_data(self):\n if self._data:\n return self._data\n return None", "title": "" }, { "docid": "b80b0cc46f6ee2d3897ab11baa692d26", "score": "0.5107387", "text": "def latest_data(self):\n if self._data:\n return self._data\n return None", "title": "" }, { "docid": "f13608fb749a43478435875b97ba9859", "score": "0.5095489", "text": "def getResultsDb(self):\n return os.path.join(self.outDir, 'resultsDb_sqlite.db')", "title": "" }, { "docid": "22f7a7f63321498e4a5930c2beb74da2", "score": "0.50889915", "text": "def test_access_run_result_files_local(database, tmpdir):\n # -- Setup ----------------------------------------------------------------\n env = Config().basedir(tmpdir).auth()\n fs = FS(env=env)\n workflow_id, group_id, run_id, user_id = success_run(database, fs, tmpdir)\n local_service = LocalAPIFactory(env=env, db=database, engine=StateEngine())\n # -- Read result files ----------------------------------------------------\n with local_service(user_id=user_id) as api:\n # Map file names to file handles.\n r = api.runs().get_run(run_id=run_id)\n files = dict()\n for fh in r['files']:\n files[fh['name']] = fh['id']\n # Read content of result files.\n fh = api.runs().get_result_file(\n run_id=run_id,\n file_id=files['run/results/B.json']\n )\n results = util.read_object(fh.open())\n assert results == {'B': 1}\n # -- Error when user 2 attempts to read file ------------------------------\n with database.session() as session:\n user_2 = create_user(session, active=True)\n with local_service(user_id=user_2) as api:\n with pytest.raises(err.UnauthorizedAccessError):\n api.runs().get_result_file(\n run_id=run_id,\n file_id=files['run/results/B.json']\n )\n # -- With an open access policy user 2 can read the data file -------------\n env = Config().basedir(tmpdir).open_access()\n local_service = LocalAPIFactory(env=env, db=database, engine=StateEngine())\n with local_service(user_id=user_2) as api:\n api.runs().get_result_file(\n run_id=run_id,\n file_id=files['run/results/B.json']\n )", "title": "" }, { "docid": "214cdf2cbd569812f96159db366923c0", "score": "0.50827193", "text": "def get_dataset_state_dict_latest_run(\n path: Union[str, Path], name: str, index: int = -1\n) -> Union[str, None]:\n latest = get_hydra_latest_run(path, index=index)\n\n if latest is not None:\n dataset_state_dict = latest.joinpath(name)\n if dataset_state_dict.exists():\n return dataset_state_dict.as_posix()\n\n return None", "title": "" }, { "docid": "8f93839e6b1c7bdaca95f213cc31b624", "score": "0.5081968", "text": "def retrieve_saved_dataset(\n self, config: RepoConfig, dataset: SavedDataset\n ) -> RetrievalJob:\n pass", "title": "" }, { "docid": "7284fc63dbd09c6ca6de5ecf8d1c558e", "score": "0.50815266", "text": "def local_query(session, project='CMIP5', latest=True, **kwargs): \n\n # make sure project is upper case \n project = project.upper()\n r = build_query(session, project, **kwargs)\n\n # run the sql using pandas read_sql,index data using path, returns a dataframe\n df = pd.read_sql(r.selectable, con=session.connection())\n df = df.rename(columns={'path': 'opath'})\n\n # fix path by substituing output1/2 with combined, separate path from filenames\n fix_paths = df['opath'].apply(fix_path, latest=latest)\n df['path'] = fix_paths.map(os.path.dirname)\n # added to eliminate wrong paths for mk3.6.0 once that is fixed might be removed\n df = df[df.path != '/path/todelete']\n df['filename'] = df['opath'].map(os.path.basename)\n\n # group by path\n mcols = ['filename','period']\n agg_dict = {k: ('first' if k not in mcols else set) for k in list(df)}\n res = df.groupby(['path']).agg(agg_dict)\n\n # apply postprocessing function to each row\n res = res.apply(post_local, axis=1)\n # remove unuseful columns\n todel = ['opath','r','i','p','f','period']\n cols = [c for c in todel if c in res.columns]\n res = res.drop(columns=cols)\n return res", "title": "" }, { "docid": "5fb48e2047669317912fadb023d92aef", "score": "0.5079554", "text": "def _load_one(self, id, doc, use_cache=True):\n if use_cache:\n cache = self._cache\n obj = cache.get(self._cache_get_key(id, doc))\n if obj is not None:\n return obj\n # Create a DBRef object and then load the full state of the object.\n dbref = serialize.DBRef(self._pj_table, id, self._pj_jar.database)\n # Stick the doc into the _latest_states:\n self._pj_jar._latest_states[dbref] = doc\n obj = self._pj_jar.load(dbref)\n self._locate(obj, id, doc)\n # Add the object into the local container cache.\n if use_cache:\n cache[obj.__name__] = obj\n return obj", "title": "" }, { "docid": "e4a91d21a342754903c229fbd1ef0034", "score": "0.5074083", "text": "def load_last_used() -> dict:\n if not os.path.isfile('last_used.cache'):\n return None\n try:\n with open('last_used.cache', mode='r') as f:\n return json.loads(f.read())\n except Exception as e:\n print(e, file=sys.stderr)\n return None", "title": "" }, { "docid": "e4196721a55a13911714551331af3946", "score": "0.50714445", "text": "def _local_restore(db_name_or_dump_key, *, conn_db, temp_db, curr_db, prev_db):\n if db_name_or_dump_key == ':current':\n local_restore_db = curr_db\n elif db_name_or_dump_key == ':previous':\n local_restore_db = prev_db\n else:\n local_restore_db = database.make_config(db_name_or_dump_key[1:])\n\n if not _db_exists(local_restore_db):\n raise RuntimeError(\n f'local database {local_restore_db[\"NAME\"]} does not exist.'\n ' Use \"pgclone ls --local\" to see local database keys.'\n )\n\n # Perform the local restore process. It is completely valid to\n # try to restore the temp_db, so do nothing if this database\n # is provided by the user\n if local_restore_db != temp_db: # pragma: no branch\n _drop_db(temp_db)\n create_temp_sql = f'''\n CREATE DATABASE {temp_db[\"NAME\"]}\n TEMPLATE {local_restore_db[\"NAME\"]}\n '''\n command.run_psql(create_temp_sql, db=conn_db)\n\n _set_search_path(temp_db, conn_db)\n\n return db_name_or_dump_key", "title": "" }, { "docid": "7b15065c304dde34ad49532f8e38f031", "score": "0.50708765", "text": "def test_result_archive_local(database, tmpdir):\n # -- Setup ----------------------------------------------------------------\n env = Config().basedir(tmpdir).auth()\n fs = FS(env=env)\n workflow_id, group_id, run_id, user_id = success_run(database, fs, tmpdir)\n local_service = LocalAPIFactory(env=env, db=database, engine=StateEngine())\n # -- Get result archive ---------------------------------------------------\n with local_service(user_id=user_id) as api:\n archive = api.runs().get_result_archive(run_id=run_id)\n tar = tarfile.open(fileobj=archive.open(), mode='r:gz')\n members = [t.name for t in tar.getmembers()]\n assert len(members) == 2\n assert 'A.json' in members\n assert 'run/results/B.json' in members", "title": "" }, { "docid": "1c8bc3edc631eeb60438072287778829", "score": "0.5051284", "text": "def _FetchFromCache(self):\n\n filename = f'{self.set_code}-{self.cycle}'\n try:\n mtime = datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)\n age = datetime.datetime.now() - mtime\n if age < datetime.timedelta(minutes=20) and not flags.FLAGS.fetch:\n player_list = pickle.load(open(filename, 'rb'))\n print('Loaded previous results from cache')\n return player_list\n except (IOError, EOFError, FileNotFoundError):\n pass\n player_list = self._FetchFromSheet()\n pickle.dump(player_list, open(filename, 'wb'))\n return player_list", "title": "" }, { "docid": "9c0dda613b7f044aeccdb4481d2468ff", "score": "0.5043707", "text": "def last_data_file(self):\n return os.path.join(self.storage_dir(False), \"%s%s\" % (self.storage_key, self.ext))", "title": "" }, { "docid": "42733387f5ac6103f5b8086ff1fd435e", "score": "0.5043544", "text": "def load(self, name: str, task_name: str, task_unique_config: Dict) -> Optional[Any]:\n task_result_cls = self._get_task_result_class()\n # try to get query run document based on task name and task unique config\n try:\n task_result = task_result_cls.objects(name=task_name).get(unique_config__lte=task_unique_config)\n except me.DoesNotExist:\n return None\n # try to query obj from results DictField\n try:\n result_obj = task_result.results[name]\n obj = pickle.loads(result_obj.obj.read())\n except KeyError:\n logger.warning(f'\"{name}\" could not be found in store.')\n return None\n return obj", "title": "" }, { "docid": "80ee952c4422cd3fc6b4461b5c7334cc", "score": "0.5040105", "text": "def _gen_false_local_retrieve(self):\n # Create the temp dir\n temp_dir = tempfile.mkdtemp()\n # Create the temp task\n name = os.path.split(self.solver_override)[1]\n id = str(uuid.uuid1())\n logging.debug('Creating a task for {} named {}, id {}'.format(self.solver_override, name, id))\n\n temp_task = os.path.join(temp_dir, 'task.json')\n with open(temp_task, 'w') as f:\n train_directive = [{'type': 'train', 'parameters': {'solver': self.solver_override}}]\n task_file = {'directives': train_directive, 'title': name, 'id': id}\n json.dump(task_file, f)\n # Create the temp list containing one task\n temp_task_list = tempfile.NamedTemporaryFile()\n print(temp_task, file=temp_task_list)\n logging.debug('Created task list with single task at {}'.format(temp_task_list.name))\n return LocalRetrieve(temp_task_list.name)", "title": "" }, { "docid": "1079b8e8704b65b7bf2c053fe4ae49c8", "score": "0.50376886", "text": "def _getLocalMetadata(self):\n log.debug(\"LOC - Getting local metadata for %s\" % self.name)\n ret = None\n if os.path.exists(self.localRepomdXml):\n ftmp = open(self.localRepomdXml, 'r')\n data = ftmp.read()\n if data != None:\n ret = self._checkRepoMD(data)\n ftmp.close()\n return ret", "title": "" }, { "docid": "d6924738a1547fe710676bf07f31dc24", "score": "0.5024995", "text": "def get(self, synset, name):\n # Flush any pending writes to the file before we try to read.\n self.__data_file.flush()\n\n if not self.is_in_cache(synset, name):\n return None\n return self.__do_get(synset, name)", "title": "" }, { "docid": "6773f0aa531d0fc379e80fe4aa0b3474", "score": "0.5013541", "text": "def get_data(self): # refactor get?\n # if self._cache is not None:\n # return self._cache\n\n df = self.source.get()\n\n return df", "title": "" }, { "docid": "78a201a5d027fa3efb5d67688fc00079", "score": "0.5013078", "text": "def latest_result(self):\n controllers.connect()\n result = controllers.TestResultsController.latest_result_for(self.test_name)\n self.set_resultslocation_from_testresult(result)\n return result", "title": "" }, { "docid": "d6514c7348a30be9eeee4750c1d14fa4", "score": "0.5010318", "text": "def getOtherdata(self, location, checksum):\n return self.open_database(_sqlitecache.update_other(location,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchecksum,\n self.callback,\n self.repoid))", "title": "" }, { "docid": "82929e9e42dc50cc6fc36ad91ac4f5f9", "score": "0.49958733", "text": "def get_latest_version(self):\n if self.mode == \"Not Found\":\n return None\n return self.latest_version", "title": "" }, { "docid": "3612b0779d1f5426b152b102e42ca4eb", "score": "0.49910244", "text": "def get_object_for_this_type(self, **kwargs):\n return self.model_class()._default_manager.using(self._state.db).get(**kwargs)", "title": "" }, { "docid": "89fbfa109abac29d55a6933231206027", "score": "0.49891764", "text": "def get_latest_checkpoint(train_logdir_local: str) -> str:\n ckpts = glob.glob(join(train_logdir_local, 'model.ckpt-*.meta'))\n times = map(os.path.getmtime, ckpts)\n latest = sorted(zip(times, ckpts))[-1][1]\n return latest[:len(latest) - len('.meta')]", "title": "" }, { "docid": "b9ea0ef1494d3368fc09e30dcf79aad8", "score": "0.49861476", "text": "def fetch(self, filename: str, default: Optional[str] = None) -> Optional[str]:\n if os.path.exists(os.path.join(self.work_dir, filename)):\n with open(os.path.join(self.work_dir, filename), \"r\") as f:\n return f.read()\n return default", "title": "" }, { "docid": "da12b24e549c47b1562841bfe698dbef", "score": "0.4983532", "text": "def get_pickled_ontology(filename):\n\tpickledfile = ONTOSPY_LOCAL_CACHE + \"/\" + filename + \".pickle\"\n\tif os.path.isfile(pickledfile):\n\t\ttry:\n\t\t\treturn cPickle.load(open(pickledfile, \"rb\"))\n\t\texcept:\n\t\t\tprint Style.DIM + \"** WARNING: Cache is out of date ** ...recreating it... \" + Style.RESET_ALL\n\t\t\treturn None\n\telse:\n\t\treturn None", "title": "" }, { "docid": "fec41988da3489ca4db15721f41c846b", "score": "0.49826935", "text": "def load(self,):\n try:\n m = self.db.fast.find_one({'resource.commit':False})\n m['resource'] = {'online':[], 'netdisk':[], 'episode':[],\n 'complete':[resource for resource in m['resource'] if resource['commit'] == False]\n }\n m['patch'] = False\n return m\n except:\n return None", "title": "" }, { "docid": "72818f0367fd807edec69f2ed361aa98", "score": "0.49766222", "text": "def get_last_fetched(self, size=20):\r\n return self.get_best_in_zset('last_fetched', size)", "title": "" }, { "docid": "7dbdfa5023f2fb50b561fe8145ea8e60", "score": "0.49760035", "text": "def get(\n self,\n *,\n db_model: data_algebra.db_model.DBModel,\n sql: str,\n data_map: Dict[str, Any],\n ):\n k = make_cache_key(db_model=db_model, sql=sql, data_map=data_map)\n res = self.result_cache[k]\n assert data_algebra.data_model.default_data_model().is_appropriate_data_instance(res)\n return res.copy()", "title": "" }, { "docid": "8b2f167f186ad7265d5d5a01663269c8", "score": "0.49704647", "text": "def _get_local_dataset(dataset_location=None): \n if dataset_location is None: #if S3 not provided, downloading data locally on Sagemaker nodes.\n dataset = get_dataset(\"electricity\", regenerate=False)\n \n else: #if S3 bucket is specified in os.environ['SM_CHANNEL_TRAINING'], that means that Sagemaker automatically download dataset locally. So no need to download it again.\n logger.info(\"Attempting to get GluonTS dataset from {}\".format(dataset_location))\n root_dir = os.path.join(dataset_location, \"data\")\n train_dir = os.path.join(root_dir, \"train\")\n test_dir = os.path.join(root_dir, \"test\")\n\n dataset = common.load_datasets(\n metadata=root_dir,\n train=train_dir,\n test=test_dir)\n \n logger.info(\"GluonTS dataset retrieved successfully...\")\n \n return dataset", "title": "" }, { "docid": "479962a5ed279e677f0e422fc063f518", "score": "0.49420264", "text": "def load_tdb(self):\r\n with open(self.data_path,\"rb\") as tdb:\r\n return pickle.load(tdb)", "title": "" }, { "docid": "50704446bab3d34ac05bc8788e378f98", "score": "0.49377665", "text": "def find_latest_report(self):\n\n local_path = self.information.get_framework_local_path()\n report_path = os.path.join(local_path, 'report')\n files, name = self.find(report_path, '\\\\d+-\\\\d+')\n files.sort()\n latest_report = files[-1]\n\n return latest_report", "title": "" }, { "docid": "a528ebb01a41c2b935a3e32a7b2a5a05", "score": "0.4931873", "text": "def get_last(self):\n if self._results:\n return self._results[0]\n else:\n return 0", "title": "" }, { "docid": "78805c765a9ec5658d1d06eb19edc70a", "score": "0.49286348", "text": "def load_data(self, opt: Opt, filename: str) -> Optional[List[List[Message]]]:\n # first check for the most recent date\n save_dir = self._get_save_path(opt['datapath'], '*')\n all_dates = []\n for fname in glob.glob(os.path.join(save_dir, filename)):\n date = os.path.split(fname)[0].split('_')[-1]\n all_dates.append(date)\n\n if len(all_dates) > 0:\n most_recent = os.path.join(\n self._get_save_path(opt['datapath'], sorted(all_dates)[-1]), filename\n )\n else:\n # data has not been built yet\n return None\n\n if opt['invalidate_cache']:\n # invalidate the cache and remove the existing data\n logging.warning(\n f' [ WARNING: invalidating cache at {self.save_path} and rebuilding the data. ]'\n )\n if self.save_path == most_recent:\n os.remove(self.save_path)\n return None\n\n # Loading from most recent date\n self.save_path = most_recent\n logging.info(f' [ Data already exists. Loading from: {self.save_path} ]')\n with PathManager.open(self.save_path, 'rb') as f:\n data = json.load(f)\n\n return data", "title": "" }, { "docid": "63419482d074fa1e064012157b8d9a64", "score": "0.49239388", "text": "def __get_latest_file(self):\n fn = self.FILENAME[\"PREFIX\"] + \"*\" + self.FILENAME[\"SUFFIX\"]\n archives = sorted(glob.glob(fn), key=os.path.getmtime)\n if len(archives) > 0:\n return archives[-1] # last one on list is latest\n return None", "title": "" }, { "docid": "65784a1759a4eb95179fc02237c026fd", "score": "0.49186122", "text": "def get_data_file(localfile, mode='r'):\n with open(localfile, mode=mode) as datafile:\n contents = datafile.read()\n\n return contents", "title": "" }, { "docid": "a8661e8e5715a63637c928e6329b623a", "score": "0.49121428", "text": "def get_best_model_file(modeldb, T_EQU=1100, AB=-4, R_PL=1.25, M_PL=1.81, species='all') :\n\n # load json database file containig all models in the library\n try :\n with open(modeldb, 'r') as f:\n datastore = json.load(f)\n except :\n print(\"ERROR: could not open models database file \",modeldb)\n exit()\n\n mind_T_EQU, mind_ab = 1.e20, 1.e20\n mind_R_PL, mind_M_PL = 1.e20, 1.e20\n\n minkey = None\n\n # loop over all entried in the database to get best matched model file\n for key in datastore.keys() :\n if species == 'all' :\n ab_key = 'AB_{0}'.format(datastore[key]['SELECSPC'])\n else :\n ab_key = 'AB_{0}'.format(species)\n \n if ab_key in datastore[key].keys() :\n\n d_T_EQU = np.abs(datastore[key]['TEQ'] - T_EQU)\n d_ab = np.abs(datastore[key][ab_key] - AB)\n d_R_PL = np.abs(datastore[key]['RPJUP'] - R_PL)\n d_M_PL = np.abs(datastore[key]['MPJUP'] - M_PL)\n \n if d_T_EQU <= mind_T_EQU and d_ab <= mind_ab and d_R_PL <= mind_R_PL and d_M_PL <= mind_M_PL and (species in datastore[key]['filename']):\n mind_T_EQU = d_T_EQU\n mind_ab = d_ab\n mind_R_PL, mind_M_PL = d_R_PL, d_M_PL\n minkey = key\n\n # return best model file path\n return datastore[minkey]['filepath']", "title": "" }, { "docid": "d53c368efc4d11a460c51b07d4e0db85", "score": "0.49118307", "text": "def latest(self):\n dirs = sorted(self.dirs(), key=getmtime, reverse=True)\n return dirs[0] if dirs else None", "title": "" }, { "docid": "ac59a8dcb2182a2501548e1328d9c04a", "score": "0.49065137", "text": "def get_cached_source( url, refresh=False, uselocal=False, debug=None ):\n c_source, sock, c_filename = \"\", None, None\n try:\n # set cached filename\n c_filename = get_cached_filename( url )\n # if cached file exists read this, only is expired\n if uselocal: refresh = False\n if not refresh and os.path.exists( c_filename ):\n if uselocal or not is_expired( os.path.getmtime( c_filename ) ):\n #print ( \"Reading local source: %r\" % c_filename )\n sock = open( c_filename )\n c_source = sock.read()\n except:\n print_exc()\n return c_source, sock, c_filename", "title": "" }, { "docid": "0a188500151601191f131dce161a42e0", "score": "0.48975754", "text": "def _gen_retrieve(self):\n # Retriever gets new Caffe tasks\n # If we don't want to override via solver get the specified retriever\n if not self.solver_override:\n logging.debug(\"Generating retriever specified in config file\")\n retrieve_type = self._get_config('mri-client', 'retrieve').lower()\n if retrieve_type == 'local-retrieve':\n return LocalRetrieve(self._get_config('local-retrieve', 'task_list'))\n # Otherwise we'll override by creating a false task list and false task (local)\n else:\n logging.debug(\"Overriding retriever via command line\")\n return self._gen_false_local_retrieve()", "title": "" }, { "docid": "87e8945036b0c3c4e82309d5c177c9b4", "score": "0.4893085", "text": "def get_cached_data(self, filename, default=None):\n return self._cache.get(filename, default)", "title": "" }, { "docid": "18ac2e3ff0eca2f16e981b9b4a8bddf4", "score": "0.4883984", "text": "def load_data(config: object) -> object:\n if os.path.isfile(config.pickle_path):\n print(f'Loading existing data for {config.mode} model')\n with open(config.pickle_path, 'rb') as pfile:\n tmp = pickle.load(pfile)\n return tmp\n else:\n raise RuntimeError('No pickle file exist')", "title": "" }, { "docid": "0f3e0ad14b546f1bc6d501596f180dc1", "score": "0.48826542", "text": "def getDatabase(self):\r\n try:\r\n return self.modDict['database'][0]\r\n except KeyError:\r\n return None", "title": "" }, { "docid": "2b920094fa3b3939db24c53abdec8668", "score": "0.4876163", "text": "def get_stored(self, uuid):\n return self.delayed_query(uuid, args={'mode': 'results'})", "title": "" }, { "docid": "d8e81feeb99c76c70a8717694a01fc7b", "score": "0.48720574", "text": "def _get_current_dataset(self) -> DatasetData:\n return self.get_dataset_by_id(dataset_id=self.dataset_id)", "title": "" }, { "docid": "06811993e112bdb83f65d6ede683bcfb", "score": "0.48701692", "text": "def get_data(filename, field, path='./results/'):\n\n result = FCResult(filename, path=path)\n returnvalue = result.cache[0][field]\n\n return returnvalue", "title": "" }, { "docid": "15823575a213efad1a64cfc2688d5ea4", "score": "0.4867898", "text": "def get_data_from_url(filename):\n for folder in PREDEFINED_DATA_FOLDERS:\n full_path = os.path.join(folder, filename)\n if os.path.exists(full_path):\n return full_path\n with data.conf.set_temp(\"dataurl\", DATAURL), data.conf.set_temp(\n \"remote_timeout\", 30\n ):\n map_out = data.get_pkg_data_filename(filename, show_progress=True)\n return map_out", "title": "" }, { "docid": "c665465af05c8563dd471e2a6aef55c2", "score": "0.48666346", "text": "def load_results(PATH_RESULTS, chosen_params):\n# -----------------------------------------------------------------------------\n file_name = get_chosen_file_name(PATH_RESULTS, chosen_params)\n return load_pickle(PATH_RESULTS+\"/\"+file_name)", "title": "" }, { "docid": "82138d1a5f744e36a519133b1d4b3eea", "score": "0.48651052", "text": "def load_data(state):\n\tsubprocess.call([\"mongorestore\", \"--maintainInsertionOrder\", state])", "title": "" }, { "docid": "2c5390b7b936e40339232d48134d0a39", "score": "0.48642182", "text": "def pickle_as_single_data_frame(max_year=settings.MAX_YEAR):\n results = []\n path = Path(settings.PICKLE_SRC)\n for file in [str(x) for x in path.glob('**/*.pickle')]:\n results.append(pd.read_pickle(file))\n\n # merge the files\n df = pd.concat(results, sort=False)\n\n # return data within the max year value\n return df[df[YEAR] <= max_year]", "title": "" }, { "docid": "2c91a4cba2eea760d5b7080f76a592cf", "score": "0.48628324", "text": "def initial_load():\n local('./manage.py loaddata data/initial.json')\n if os.path.exists('data/initial_local.json'):\n local('./manage.py loaddata data/initial_local.json')", "title": "" }, { "docid": "3d56bb4d6c2a612adf898c17498431d4", "score": "0.48606214", "text": "def call_local_query(s, project, latest, **kwargs):\n\n ds_list = [] \n paths = []\n combs = [dict(zip(kwargs, x)) for x in itertools.product(*kwargs.values())]\n for c in combs:\n ds_list.append(local_query(s,project=project, latest=latest, **c))\n datasets = pd.concat(ds_list, ignore_index=True)\n \n paths = datasets['path'].tolist()\n return datasets, paths", "title": "" }, { "docid": "add4f098b957b3dfe0a8bad1edb33757", "score": "0.4854056", "text": "def get_study_database():\n if os.path.isfile(processed_filepath):\n return processed_filepath\n else:\n print('Error: study database does not exist.')\n quit()", "title": "" }, { "docid": "7a4a9cdd80fb8aba6b6198fede126bd7", "score": "0.4852507", "text": "def _get_database(self):\n if not os.path.exists(self.host):\n return EphemeralDB()\n\n with open(self.host, \"rb\") as f:\n data = f.read()\n if not data:\n database = EphemeralDB()\n else:\n # TODO: This call seems to block sometimes on Github CI\n # when running dashboard tests\n database = pickle.loads(data)\n\n return database", "title": "" }, { "docid": "3e0d048ee047bb53fc88b3f978c04c25", "score": "0.48418108", "text": "def get_database():\n return _database.get(_thread_locals, get_default_database())", "title": "" }, { "docid": "33c0cabe0645c074f397afffe903e342", "score": "0.48407808", "text": "def _get(self, name, local=False, refresh=False):\n a_name = f'_{name}'\n if local:\n setattr(self, a_name, self._load(name))\n elif refresh or getattr(self, a_name) is None:\n response = requests.get(f'https://jsonplaceholder.typicode.com/{name}', headers=self.headers)\n json_ = response.json()\n setattr(self, a_name, json_)\n self._save(name, json_)\n\n return deepcopy(getattr(self, a_name))", "title": "" }, { "docid": "5721b3b2b184a1aea1d124cd4d5008ff", "score": "0.48318335", "text": "def _attempt_to_retrieve_result(self):\n\n def copy_from_docker(container_id, src):\n archived_result, stat = self.cli.get_archive(container_id, src)\n if stat[\"size\"] == 0:\n # 0 byte file, it can't be anything else than None\n return None\n # no need to port to a file since we intend to deserialize\n file_standin = BytesIO(b\"\".join(archived_result))\n tar = tarfile.open(fileobj=file_standin)\n file = tar.extractfile(stat[\"name\"])\n lib = getattr(self, \"pickling_library\", pickle)\n return lib.loads(file.read())\n\n try:\n return copy_from_docker(self.container[\"Id\"], self.retrieve_output_path)\n except APIError:\n return None", "title": "" }, { "docid": "135a43b56e5af7fcf3cce650b59ef280", "score": "0.4818903", "text": "def db():\n return the_db", "title": "" }, { "docid": "cf82a015d07c603e4efc7813cf6214a2", "score": "0.48177016", "text": "def get(self, data):\n return db", "title": "" }, { "docid": "cf82a015d07c603e4efc7813cf6214a2", "score": "0.48177016", "text": "def get(self, data):\n return db", "title": "" }, { "docid": "7b4576d7631f9e04f32e1761da73a740", "score": "0.4813267", "text": "async def get(self, oid):\n obj = self.modified.get(oid, None)\n if obj is not None:\n return obj\n\n obj = self._cache.get(oid, None)\n if obj is not None:\n return obj\n\n result = HARD_CACHE.get(oid, None)\n if result is not None:\n obj = reader(result)\n obj._p_jar = self\n return obj\n\n result = await self._manager._storage.load(self, oid)\n obj = reader(result)\n obj._p_jar = self\n\n if obj.__cache__ == 0:\n HARD_CACHE[oid] = result\n\n return obj", "title": "" }, { "docid": "0d2b58bbb814e98f3758141953ad77b6", "score": "0.48126087", "text": "def get_latest_local_version(self, strict=False, loose=False):\n latest_version = '0.0'\n # check if it already exists and its version\n files = os.listdir(self.outputdir) if os.path.isdir(self.outputdir) else []\n webdriver_files = [x for x in files if x.startswith(self.base_filename)]\n if webdriver_files:\n sorted_files = sorted(webdriver_files, reverse=True)\n latest_version_filename = sorted_files[0]\n extracted_version = helpers.extract_version_from_filename(latest_version_filename)\n if extracted_version:\n latest_version = extracted_version\n if strict:\n latest_version = helpers.strict_version(latest_version)\n elif loose:\n latest_version = helpers.loose_version(latest_version)\n return latest_version", "title": "" }, { "docid": "63afe62f66e2df3edee1145a9a895690", "score": "0.48066273", "text": "def get_store(reuse=True, *, in_process=False):\n stores = _active_stores()\n if reuse and stores:\n return stores[-1]\n elif in_process:\n return NoopModelStore()\n elif SHMModelStore.ENABLED:\n return SHMModelStore()\n else:\n return FileModelStore()", "title": "" }, { "docid": "b810bf3ca6f58db8e97f2aac7427d2cf", "score": "0.47975758", "text": "def get(self, path):\n if self._thread and self._thread.is_alive():\n raise error.ProgrammingError(\n b'cannot obtain cached repo while loader is active'\n )\n return self._cache.peek(path, None)", "title": "" }, { "docid": "4b8357b611b0ab741ed1861c3d6830dc", "score": "0.4796209", "text": "def db_for_read(self, model, **hints):\n\n return self.database_for_app(model._meta.app_label)", "title": "" }, { "docid": "5a1334c687aa78c5d6fa01c21298cff6", "score": "0.47923633", "text": "def FetchFrom(self, db):\n return db.db.Fetch(self, result=True)", "title": "" }, { "docid": "efa33c4d6e91c5890974215f58f58120", "score": "0.4785722", "text": "def load_experiment(\n tuner_name: str,\n download_if_not_found: bool = True,\n load_tuner: bool = False,\n local_path: Optional[str] = None,\n experiment_name: Optional[str] = None,\n) -> ExperimentResult:\n path = experiment_path(tuner_name, local_path)\n metadata_path = path / ST_METADATA_FILENAME\n if not (metadata_path.exists()) and download_if_not_found:\n logger.info(\n f\"experiment {tuner_name} not found locally, trying to get it from s3.\"\n )\n download_single_experiment(\n tuner_name=tuner_name, experiment_name=experiment_name\n )\n try:\n with open(metadata_path, \"r\") as f:\n metadata = json.load(f)\n except FileNotFoundError:\n metadata = None\n try:\n results_fname = ST_RESULTS_DATAFRAME_FILENAME\n if (path / results_fname).exists():\n results = pd.read_csv(path / results_fname)\n else:\n results = pd.read_csv(path / results_fname[:-4])\n except Exception:\n results = None\n if load_tuner:\n try:\n tuner = Tuner.load(str(path))\n except FileNotFoundError:\n tuner = None\n except Exception:\n tuner = None\n else:\n tuner = None\n return ExperimentResult(\n name=tuner.name if tuner is not None else path.stem,\n results=results,\n tuner=tuner,\n metadata=metadata,\n path=path,\n )", "title": "" }, { "docid": "94422a22c2de1167531c4ff7fe14419b", "score": "0.47785616", "text": "def getPrimary(self, location, checksum):\n return self.open_database(_sqlitecache.update_primary(location,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t checksum,\n self.callback,\n self.repoid))", "title": "" }, { "docid": "54ff67f4f1c8dbcdc8accfaad48d5b14", "score": "0.47684452", "text": "def _get_local(local_dir, path):\n\n local_path = local_dir + path\n\n if not os.path.exists(local_path):\n raise FileNotFoundError(local_path + ' doesn\\'t exist.')\n \n reader = open(local_path, 'rb')\n file_size = os.path.getsize(local_path)\n\n # (reader, writer, file_size)\n return (reader, None, file_size)", "title": "" }, { "docid": "d74334730cc051b3ab0008d7527435bd", "score": "0.47607294", "text": "def _get_ldp_database(self):\n return self.__ldp_database", "title": "" }, { "docid": "2d3531603b21bb03ff16e8945eec496b", "score": "0.47603187", "text": "def objPull(self, *args, **kwargs):\n b_status = True\n l_localfile = [] # Name on the local file system\n l_objectfile = [] # Name in the object storage\n str_swiftLocation = ''\n str_mapLocationOver = ''\n str_localfilename = ''\n str_storagefilename = ''\n str_prependBucketPath = ''\n d_ret = {\n 'status': b_status,\n 'localFileList': [],\n 'objectFileList': [],\n 'localpath': ''\n }\n\n d_conn = self.connect(*args, **kwargs)\n if d_conn['status']:\n str_prependBucketPath = d_conn['prependBucketPath']\n\n str_swiftLocation = str_prependBucketPath\n\n for k,v in kwargs.items():\n if k == 'fromLocation': str_swiftLocation = '%s%s' % (str_prependBucketPath, v)\n if k == 'mapLocationOver': str_mapLocationOver = v\n\n # Get dictionary of objects in storage\n d_ls = self.ls(*args, **kwargs)\n\n # List of objects in storage\n l_objectfile = [x['name'] for x in d_ls['objectDict']]\n\n if len(str_mapLocationOver):\n # replace the local file path with object store path\n l_localfile = [w.replace(str_swiftLocation, str_mapLocationOver) \\\n for w in l_objectfile]\n else:\n # Prepend a '/' to each element in the l_objectfile:\n l_localfile = ['/' + '{0}'.format(i) for i in l_objectfile]\n str_mapLocationOver = '/' + str_swiftLocation\n\n d_ret['localpath'] = str_mapLocationOver\n d_ret['currentWorkingDir'] = os.getcwd()\n\n if d_conn['status']:\n for str_localfilename, str_storagefilename in zip(l_localfile, l_objectfile):\n try:\n d_ret['status'] = True and d_ret['status']\n obj_tuple = d_conn['conn'].get_object(\n d_conn['container_name'],\n str_storagefilename\n )\n str_parentDir = os.path.dirname(str_localfilename)\n os.makedirs(str_parentDir, exist_ok = True)\n with open(str_localfilename, 'wb') as fp:\n # fp.write(str(obj_tuple[1], 'utf-8'))\n fp.write(obj_tuple[1])\n except Exception as e:\n d_ret['error'] = str(e)\n d_ret['status'] = False\n d_ret['localFileList'].append(str_localfilename)\n d_ret['objectFileList'].append(str_storagefilename)\n return d_ret", "title": "" }, { "docid": "684dc973c14265eb776b318b1614bd2d", "score": "0.47527802", "text": "def get_latest(self, default=None):\n if len(self):\n return self[0]\n else:\n return default", "title": "" }, { "docid": "c936a824d6035dd9f228deffbcab2a52", "score": "0.47526893", "text": "def get_cached_data(self, url):\n key, build = self.get_key_from_url(url)\n self.cursor.execute(\"SELECT * FROM ensembl WHERE key=? AND genome_build=?\",\n (key, build))\n row = self.cursor.fetchone()\n \n # if the data has been cached, check that it is not out of date\n if row is not None:\n data = zlib.decompress(row[\"data\"])\n diff = self.today - datetime.strptime(row[\"cache_date\"], \"%Y-%m-%d\")\n if diff.days < 180:\n return data\n \n return None", "title": "" }, { "docid": "6ae4e25d741a92a7912a5018685015e7", "score": "0.4752327", "text": "def data_file(self, read=True):\n if read:\n if os.path.exists(self.this_data_file()):\n return self.this_data_file()\n elif os.path.exists(self.last_data_file()):\n return self.last_data_file()\n else:\n return self.this_data_file()\n else:\n return self.this_data_file()", "title": "" }, { "docid": "1d543074572435161fdfa8daad6be092", "score": "0.47514248", "text": "def get_last_date_found():\n def gldf_inner(hot100):\n return list(hot100.keys())[-1]\n return gldf_inner(read_json(os.sep.join(['data', 'hot100.json'])))", "title": "" }, { "docid": "025dd6c548f1691ca1c45105511011d4", "score": "0.47474656", "text": "def get_local_data_store() -> str:\n _global_conf = read_global_fdpconfig()\n\n try:\n return _global_conf[\"registries\"][\"local\"][\"data_store\"]\n except KeyError as e:\n raise fdp_exc.CLIConfigurationError(\n \"Expected key 'registries:local:data_store' in global CLI configuration\"\n ) from e", "title": "" }, { "docid": "7193b1fd4d0d051071f92b4cfff59ab8", "score": "0.47452772", "text": "def get_kessel_current_data():\n return Kessel.query.order_by(Kessel.id.desc()).first()", "title": "" }, { "docid": "59222de29d12b0cd83ef7b5a2c096cb8", "score": "0.47317797", "text": "def load_data():\n pkg_dir = Path(__file__).parent\n hitran = pkg_dir / \"Hitran\"\n if not (hitran.exists() and hitran.is_dir()):\n download_data(cwd=str(pkg_dir))\n return str(hitran.absolute())", "title": "" }, { "docid": "fe34efa7538f5a335a5a355d3b259417", "score": "0.4726304", "text": "def get_database_reader(data_path):\n if data_path.endswith('.npz'):\n print('Numpy database detected.')\n return NumpyData(data_path, 'r')\n elif data_path.endswith('.h5') or data_path.endswith('.hdf5'):\n print('HDF5 database detected.')\n return H5pyData(data_path, 'r')\n else:\n raise TypeError(\"Error! Database must have extension %s\" % supported_database_extensions)", "title": "" } ]
60adf11f2a9751f14a8ef65231d4c05d
r""" Return the FindStat identifier of the statistic.
[ { "docid": "a4d686e2f37ebac8c56bb38feb5e58fc", "score": "0.0", "text": "def id(self):\n return self._id", "title": "" } ]
[ { "docid": "43b0dde7ac6d869f6563a21cd3d75741", "score": "0.7452811", "text": "def id(self):\n return self._map[FINDSTAT_MAP_IDENTIFIER]", "title": "" }, { "docid": "a088cfd9a870d83188b34abc3a381360", "score": "0.6871175", "text": "def name(self):\n return self._map[FINDSTAT_MAP_NAME]", "title": "" }, { "docid": "64e6c772b64e739a176ff7ee3b3a3a52", "score": "0.6669847", "text": "def stat_id(self):\n return self._stat_id", "title": "" }, { "docid": "37c8d34dce93bd45ae62a1a13100740b", "score": "0.65966517", "text": "def name(self):\n # this needs to be decided how to do properly\n if hasattr(self,\"_name\"):\n return self._name\n else:\n return self._description.partition(FINDSTAT_SEPARATOR_NAME)[0]", "title": "" }, { "docid": "842cac82c712a21bf43ef8ce84ea0bd8", "score": "0.6175179", "text": "def _find_by_id(self):\n self._query = \"ID\"\n\n # get the database entry from FindStat\n url = FINDSTAT_URL_DOWNLOADS_STATISTICS %self.id_str()\n _ = verbose(\"Fetching URL %s ...\" %url, caller_name='FindStat')\n try:\n self._raw = json.load(urlopen(url), object_pairs_hook=OrderedDict)\n except HTTPError as error:\n if error.code == 404:\n raise ValueError(\"%s is not a FindStat statistic identifier.\" %self.id_str())\n else:\n raise\n\n self._description = self._raw[FINDSTAT_STATISTIC_DESCRIPTION].encode(\"utf-8\")\n self._name = self._raw[FINDSTAT_STATISTIC_NAME].encode(\"utf-8\")\n self._references = self._raw[FINDSTAT_STATISTIC_REFERENCES].encode(\"utf-8\")\n self._collection = FindStatCollection(self._raw[FINDSTAT_STATISTIC_COLLECTION])\n self._code = self._raw[FINDSTAT_STATISTIC_CODE]\n\n from ast import literal_eval\n gf = self._raw[FINDSTAT_STATISTIC_GENERATING_FUNCTION]\n self._generating_functions_dict = { literal_eval(key):\n { literal_eval(inner_key): inner_value\n for inner_key, inner_value in value.iteritems() }\n for key, value in gf.iteritems() }\n\n from_str = self._collection.from_string()\n # we want to keep FindStat's ordering here!\n self._first_terms = [(from_str(obj), Integer(val)) for (obj, val) in self._raw[FINDSTAT_STATISTIC_DATA].iteritems()]\n return self", "title": "" }, { "docid": "eb994227d440251d03341f4adb96fd85", "score": "0.61253005", "text": "def code(self):\n return self._map[FINDSTAT_MAP_CODE]", "title": "" }, { "docid": "d80989a3c3ead258b54a4faf55933532", "score": "0.59436893", "text": "def FindStat(name):\n for a in ALL_STATS:\n if a.Name()==name or a.Abbreviation() == name:\n return a\n return None", "title": "" }, { "docid": "11f4c67d1112f622e55ad8e50f91139c", "score": "0.5928136", "text": "def stat(self, stat_identifier):\r\n if isinstance(stat_identifier, Stat):\r\n stat_identifier = stat_identifier.identifier\r\n\r\n for pokemon_stat in self.stats:\r\n if pokemon_stat.stat.identifier == stat_identifier:\r\n return pokemon_stat\r\n\r\n raise KeyError(u'No stat named %s' % stat_identifier)", "title": "" }, { "docid": "987f1005f9fe5fa1e86c056e4e7795c0", "score": "0.58277476", "text": "def get_counter_name(self, object_stat_map, counter_stat):\n\n lookup_table = self.get_reverse_stat_lookup(object_stat_map)\n\n if not lookup_table:\n return None\n\n return lookup_table.get(counter_stat, None)", "title": "" }, { "docid": "c72307b7a6646c445c19708dc106d5a7", "score": "0.5819848", "text": "def GetId(self):\n return ed_glob.ID_FIND_RESULTS", "title": "" }, { "docid": "a2288b4be6d18839c503bd42e2d0ad0f", "score": "0.5739933", "text": "def _getIdentifier(self):\n return self._point.getIdentifier()", "title": "" }, { "docid": "fb194ccef238bf31734b583ea623a2aa", "score": "0.5593681", "text": "def get_stat(self,stat_name):\n return self.stat[stat_name]", "title": "" }, { "docid": "ea9f7219cf3f80442a651a4d6fddb128", "score": "0.55460054", "text": "def _get_identifier(self):\n return self._point.identifier", "title": "" }, { "docid": "0a3cba49bfc9bb3900a86c095343251a", "score": "0.55375445", "text": "def get_stid(st_code):\n try:\n stid = allradars[st_code]\n except KeyError:\n logging.error(\"No radar by that code\")\n stid = -1\n return stid", "title": "" }, { "docid": "a4f14eace2234fc0941da21c9b163a63", "score": "0.55331355", "text": "def unique_id(self):\n return \"NeoStat %s\" % (self.name)", "title": "" }, { "docid": "66ff03223c702a9a819de1278efbb9b6", "score": "0.55309063", "text": "def _findid(self):\n id = None\n aspects = self._connection.getUserInfo()['aspects']\n for a in aspects:\n if a['name'] == self.name:\n id = a['id']\n break\n return id", "title": "" }, { "docid": "5625d5f157888bec3bc0b8afef103a11", "score": "0.54615897", "text": "def get_identifier(self):\n return 'SMNIST'", "title": "" }, { "docid": "883a42bef04ecf5bc2f83fab2bd44616", "score": "0.54495245", "text": "def findR200profileIndex(self):\n\t\tself.readData(datasets=['Radius', 'R200'])\n\t\treturn np.abs(np.subtract.outer(self.hp['Radius'], self.hp['R200'])).argmin(0)", "title": "" }, { "docid": "a21cf925e69aedb259277e9156dc500f", "score": "0.54369164", "text": "def statistic(self) -> str:\n return pulumi.get(self, \"statistic\")", "title": "" }, { "docid": "a21cf925e69aedb259277e9156dc500f", "score": "0.54369164", "text": "def statistic(self) -> str:\n return pulumi.get(self, \"statistic\")", "title": "" }, { "docid": "26f87dcf8a4518e5c43bbe66b8abad30", "score": "0.5430929", "text": "def get_id_location(self, name):\n if self.is_global(name):\n return 'global'\n elif self.is_param(name):\n return 'param'\n\n return 'local'", "title": "" }, { "docid": "a2cefacff263883426a4fdce563ce890", "score": "0.5422593", "text": "def GetFindString(self):", "title": "" }, { "docid": "a2cefacff263883426a4fdce563ce890", "score": "0.5422593", "text": "def GetFindString(self):", "title": "" }, { "docid": "8657038d396607908c2fa6e4debf0e96", "score": "0.54182786", "text": "def local_identifier(soup):\n return generic_find(soup, 'identifier', {'type': 'local'})", "title": "" }, { "docid": "5f17fabe1ec1cdfa75276517cefd598a", "score": "0.5390623", "text": "def _get_var_id(self, ds):\r\n try:\r\n variable = nc_util.get_main_variable(ds)\r\n except:\r\n return None\r\n\r\n return variable.name", "title": "" }, { "docid": "669fadaf2cfb20083629993878620767", "score": "0.53904325", "text": "def get_attribute_idx(data, stimulus_idx, attribute):\n attribute_names = data['stimuli'][stimulus_idx]['dimnames']\n for attribute_idx, attribute_str in enumerate(attribute_names):\n if attribute_str == attribute:\n return attribute_idx\n\n raise KeyError('Attribute {} for stimulus_ids {} not found!'.format(\n attribute, stimulus_idx\n ))", "title": "" }, { "docid": "0a3b4f3c9bfd0a28ffbc19c51d95b636", "score": "0.5376887", "text": "def id(self):\n\n string = ''\n if self.file:\n string += self.file\n if self.scene_label:\n string += self.scene_label\n if self.event_label:\n string += self.event_label\n if self.tags:\n string += ','.join(self.tags)\n if self.onset:\n string += '{:8.4f}'.format(self.onset)\n if self.offset:\n string += '{:8.4f}'.format(self.offset)\n\n return get_parameter_hash(string)", "title": "" }, { "docid": "bd6ffdbedb879d96a5a6c13c4ae37d14", "score": "0.53761125", "text": "def get_idx_w_id(self, id_to_find):\n\n return int(self.__df[self.__df['LCBO_id']==id_to_find].index[0])", "title": "" }, { "docid": "0b96e34d07dc18746eaf4e4d85947478", "score": "0.5374062", "text": "def find(self, idx) -> int:\n for clust_id, clust in enumerate(self.clust):\n if idx in clust:\n return clust_id\n return -1", "title": "" }, { "docid": "cf6a8fbeefa9eb59467f97b5922f2f84", "score": "0.53722537", "text": "def _repr_(self):\n return \"%s: %s\" %(self.id_str(), self._map[FINDSTAT_MAP_NAME])", "title": "" }, { "docid": "c67faa5a6e6300510faa30a8995ecc59", "score": "0.53594244", "text": "def detector_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detector_id\")", "title": "" }, { "docid": "c67faa5a6e6300510faa30a8995ecc59", "score": "0.53594244", "text": "def detector_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detector_id\")", "title": "" }, { "docid": "8a11e90e998f152de0e8f326e344ac37", "score": "0.5318753", "text": "def base_stat(self, stat_identifier, default=0):\r\n\r\n if isinstance(stat_identifier, Stat):\r\n stat_identifier = stat_identifier.identifier\r\n\r\n for pokemon_stat in self.stats:\r\n if pokemon_stat.stat.identifier == stat_identifier:\r\n return pokemon_stat.base_stat\r\n\r\n return default", "title": "" }, { "docid": "63c4bd21cf8c7aa76acb6ce76a0a855b", "score": "0.5313134", "text": "def id(self):\n\n string = ''\n if self.file:\n string += self.file\n if self.timestamp:\n string += '{:8.4f}'.format(self.timestamp)\n if self.label:\n string += self.label\n if self.probability:\n string += '{:8.4f}'.format(self.probability)\n\n return get_parameter_hash(string)", "title": "" }, { "docid": "9c814e088777e41b155a8848d53ec690", "score": "0.5311934", "text": "def get_stat(fmt, if_name):\n statfile = fmt % if_name\n return int(open(statfile).readline().rstrip('\\n'))", "title": "" }, { "docid": "9593e0e3849daae506d8d383abfe90a4", "score": "0.5266892", "text": "def idx(self): # type: () -> int\n return self._split_pt_name()[1]", "title": "" }, { "docid": "44e4739b1ebc3ee15f9946af921b084e", "score": "0.5245109", "text": "def Found(self) -> int:", "title": "" }, { "docid": "23b7fa08e43be25f9d8f32fbeedf6e61", "score": "0.5206311", "text": "def get_fits_id(cls):\n return \"SFL\"", "title": "" }, { "docid": "17d629f5eceb8376cff76e0dc443395a", "score": "0.5203445", "text": "def QueryIdentification(self):\n try:\n return self.instr.query(\"*IDN?\")\n except pyvisa.errors.VisaIOError:\n return np.nan", "title": "" }, { "docid": "fca6ac5cd7d6fb9d7c6b596a02f7c166", "score": "0.5197821", "text": "def getName( comment ):\n pat = re.compile(\"^- (\\d+) \\((.+)\\).*\")\n m = pat.match( comment )\n if m:\n i,name = m.groups()\n i = int(i)\n return i,name\n return -1, \"\"", "title": "" }, { "docid": "7f41caa8e2d38ca0b4af509505541fba", "score": "0.5196059", "text": "def Stat23(self):\n if self.force_auto_sync:\n self.get('Stat23')\n return self._Stat23", "title": "" }, { "docid": "dce771479879672355379504b115ae7b", "score": "0.5192336", "text": "def description(self):\n return self._map[FINDSTAT_MAP_DESCRIPTION]", "title": "" }, { "docid": "e2a5b81d96574ef5e3bb11c11adea5d4", "score": "0.5180247", "text": "def findID(datasetName, catalogTable, server = 'Rainier'):\n conn = dc.dbConnect(server)\n cursor = conn.cursor()\n cur_str = \"\"\"select [ID] FROM [Opedia].[dbo].[\"\"\" + catalogTable + \"\"\"] WHERE [Dataset_Name] = '\"\"\" + datasetName + \"\"\"'\"\"\"\n cursor.execute(cur_str)\n IDvar = (cursor.fetchone()[0])\n return IDvar", "title": "" }, { "docid": "b043fedbc9c676db5a82a8e76f671735", "score": "0.51706696", "text": "def get_stimulus_index(data, stim_name):\n for i_stim, stim_data in enumerate(data['stimuli']):\n if stim_name in stim_data['stim_path']:\n return i_stim\n\n raise KeyError('Stimulus with stim_name={} not found!'.format(stim_name))", "title": "" }, { "docid": "44627e4a3c8e6593aecbe130276d812a", "score": "0.5142912", "text": "def Stat22(self):\n if self.force_auto_sync:\n self.get('Stat22')\n return self._Stat22", "title": "" }, { "docid": "7791916ca706ef31b535410d755c7402", "score": "0.51351166", "text": "def sfs_pnfsid(self):\r\n return self._sfs.get_id(self.path)", "title": "" }, { "docid": "3a23c5f0b094bc27af998f4c5cdfceff", "score": "0.5124626", "text": "def game_finder(self):\n score_board = scoreboardv2.ScoreboardV2(game_date=self.__game_date__)\n game_id = pd.DataFrame(score_board.get_data_frames()[0]).loc[:, \"GAME_ID\"]\n return game_id", "title": "" }, { "docid": "53e4bc7f0b7348c3db4f81ebac3d5f53", "score": "0.51162845", "text": "def get_index(self, name: str) -> int:\n curr_scope = self.__subroutine_scope.get(name, None)\n\n if not curr_scope:\n curr_scope = self.__class_scope.get(name, None)\n\n if not curr_scope:\n raise Exception(f\"no identifier '{name}' found in current scope.\")\n\n return curr_scope[\"index\"]", "title": "" }, { "docid": "b9d98d186394e219574f6fef80f8848b", "score": "0.51050633", "text": "def _net_wol_get_id(self, name):\n if not self._wolinfo:\n self.load_net_wol()\n\n # name = str(name).upper()\n if name in self._wolinfo:\n return name\n\n if name in self.name2wol:\n return self.name2wol[name]\n\n return None", "title": "" }, { "docid": "cdd610fc48478d5bfeb6a9842dcdc447", "score": "0.51013386", "text": "def find(self, namespace, identifier):", "title": "" }, { "docid": "edc804edce6d17a6a057dfdaf82cc4d0", "score": "0.50949335", "text": "def informational_entity_identifier(cls):\n return cls._namespace_SIO('SIO_000731')", "title": "" }, { "docid": "33567f88490ab55f2afc28a885f471eb", "score": "0.50894904", "text": "def __repr__(self):\n return \"The Combinatorial Statistic Finder (%s)\" % FINDSTAT_URL", "title": "" }, { "docid": "31e37e94b2edc8188e1ffd71d46e8af2", "score": "0.5083517", "text": "def getID(self):\n fileContents = self.__getEventPathItem(os.path.join(self.getPathToTraceEvent(), \"id\"))\n if (len(fileContents) > 0):\n return fileContents[0].rstrip()\n else:\n return \"\"", "title": "" }, { "docid": "8755bfe996e3ab88c10241a9362cdb7d", "score": "0.50666404", "text": "def instrumentLookup(instrument_df,symbol):\r\n try:\r\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\r\n except:\r\n return -1", "title": "" }, { "docid": "efdcc19d72dd544b976aff3f2a91f830", "score": "0.5064413", "text": "def studentID(self):\r\n return \"20815074\"\r\n #raise NotImplementedError(\"studentID not implemented\")\r", "title": "" }, { "docid": "95ac0549abe6f5677c8522e3ba4d6c91", "score": "0.50549465", "text": "def entity_id(self):\n return 'sensor.{}_{}'.format(self._name, self.var_id)", "title": "" }, { "docid": "97b542c20b3f449b8ca241ae63140437", "score": "0.50468534", "text": "def _FindKind():\n return _FindExecutable('kind')", "title": "" }, { "docid": "7adc3cd47237716896063c24b16d3768", "score": "0.50384456", "text": "def instrumentLookup(instrument_df,symbol):\n try:\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\n except:\n return -1", "title": "" }, { "docid": "fe5d41e51023a0c5db68787a0020a3ef", "score": "0.50373375", "text": "def fsidname(self):\r\n return os.path.join(self.dirname, '.(id)({0})'.format(self.basename))", "title": "" }, { "docid": "3a4ab86d7def7d49b00841fabaeb5f9a", "score": "0.50289077", "text": "def get_id(self):\n return self.query('*IDN?')", "title": "" }, { "docid": "84203a4cb5fa8d0d10ae56a8fb5dd7a9", "score": "0.5016681", "text": "def _get_id_name(self):\n\n result = None\n id_name = {'discoveryrule': 'item',\n 'hostgroup': 'group',\n 'graphptototype': 'graph',\n 'itemprototype': 'item',\n 'triggerprototype': 'trigger',\n }.get(self.obj_type, self.obj_type)\n result = '{0}id'.format(id_name)\n return result", "title": "" }, { "docid": "4e68ff6c32d426fba330326e4afdee47", "score": "0.5007686", "text": "def get_dd_stat(self):\n return self.execute_command('getDDStat')", "title": "" }, { "docid": "dbccbeec44a844abe148e58ac686bdf9", "score": "0.50038475", "text": "def _token_id(self, token, data=None):\n\n if token.type == rlisTokens.RlisEntry.HEADER or \\\n token.type == rlisTokens.RlisEntry.FOOTER:\n id = \"%s_%d\" % (token.function_name, self._next_tree_counter())\n\n elif token.type == rlisTokens.RlisEntry.CALL:\n id = \"%s_%d\" % (token.target, self._next_tree_counter())\n\n elif token.type == rlisTokens.RlisEntry.CONDITIONAL:\n id = \"%s_%d_branch_%d\" % (token.function_name,\n self._next_tree_counter(), token.id)\n\n elif token.type == rlisTokens.RlisEntry.WATCH:\n assert data, \"Data must not be None for watch points\"\n id = \"%s_%d_watch_%d_val_%d\" % (token.function_name,\n self._next_tree_counter(), token.id, data)\n\n else:\n assert False, \"Unexpected token type:\\n%s\" % str(token)\n\n return id", "title": "" }, { "docid": "8b959c4a06dbff9205ac596443d9c40e", "score": "0.49970138", "text": "def studentID(self):\n\t\treturn \"20652186\"", "title": "" }, { "docid": "a0949f2b4c06385ffcf7f173ce6523d5", "score": "0.49888673", "text": "def _find_stat_path(self):\n # Block device trees vary a little. If the newer path doesn't work, try the older\n # one (2.6.18 era). If that doesn't work, open() will raise IOError.\n stat_path = '/sys/block/{}/stat'.format(self._block_device)\n if not os.path.exists(stat_path):\n stat_path = '/sys/block/{}/{}/stat'.format(self._block_device.rstrip(string.digits),\n self._block_device)\n return stat_path", "title": "" }, { "docid": "0527c7352f7976745f6bd60d97095e52", "score": "0.49834833", "text": "def access_identifier(soup):\n return generic_find(soup, 'identifier', {'type': 'access'})", "title": "" }, { "docid": "eb475afc90ab130dfaec71b746666ad2", "score": "0.49728695", "text": "def _extractDetectorName(self, dataId):\n return \"0\"", "title": "" }, { "docid": "5c573ed6d1ebf103b726764f6fa7d7b2", "score": "0.49726778", "text": "def identifier(self):\n\t\treturn self.nick", "title": "" }, { "docid": "25324b1620263dd2ce8e7a09c26c4f3a", "score": "0.49651563", "text": "def get_id(self):\n return self.query('*IDN?').strip()", "title": "" }, { "docid": "17213e5ae773bd8d1105c9c8a20f4f21", "score": "0.4963703", "text": "def _find_model(self) -> int:\n logger.debug(\n \"Searching model based on revision=%s...\", self.settings.modelrevision\n )\n threedimodels_result = self.api.threedimodels_list(\n slug__contains=self.settings.modelrevision\n )\n results = threedimodels_result.results\n if not results:\n raise NotFoundError(\n f\"Model with revision={self.settings.modelrevision} not found\"\n )\n id = results[0].id\n url = results[0].url\n logger.info(\"Simulation uses model %s\", url)\n return id", "title": "" }, { "docid": "a9c6c6f3f8f45045088634fa6241f69a", "score": "0.49589717", "text": "def Find(self, colourName):", "title": "" }, { "docid": "338ee081e11d9ed69be97c1ca5237b46", "score": "0.495205", "text": "def _get_identifier(self):\n return getattr(self.klass, self._get_identifier_column_name())", "title": "" }, { "docid": "81d5da04dea5fe1ad8605a9120861221", "score": "0.49415505", "text": "def symbol(cls):\n return idaapi.get_highlighted_identifier()", "title": "" }, { "docid": "4976ed7328789b7697e22331699f5bf1", "score": "0.49378702", "text": "def get_const_name(species_id):\n return DfbaSubmodel.gen_neg_species_pop_constraint_id(species_id)", "title": "" }, { "docid": "a45bb4186385cb7ef537d1f8e1a61058", "score": "0.4935459", "text": "def find_species(conn, args, speciesShortname):\n logging.debug(f'Finding species: {speciesShortname}')\n cur = conn.cursor()\n cur.execute(\"SELECT species_id FROM species WHERE shortname = %s;\", [speciesShortname])\n row = cur.fetchone()\n if row is not None:\n speciesID = row[0] \n cur.close()\n return speciesID\n else:\n return None", "title": "" }, { "docid": "0100188c80e138810baed8376bf94de9", "score": "0.49315405", "text": "def Identifier(self) -> str:", "title": "" }, { "docid": "0100188c80e138810baed8376bf94de9", "score": "0.49315405", "text": "def Identifier(self) -> str:", "title": "" }, { "docid": "372d0349640672d019db1c8c9dd90f9f", "score": "0.4927292", "text": "def get_id(self):\n outArray = [self.SPI_Address, self.SPI_MESSAGE_TYPE.GET_ID,\\\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n reply = self.spi_transfer_array(outArray)\n if(reply[3] == 0xA5):\n return (\"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\" % \\\n (reply[4], reply[5], reply[6], reply[7], reply[8], reply[9], reply[10], reply[11], \\\n reply[12], reply[13], reply[14], reply[15], reply[16], reply[17], reply[18], reply[19]))\n raise IOError(\"No SPI response\")\n return \"00000000000000000000000000000000\"", "title": "" }, { "docid": "4c53d536b3eb52cb32a27722c19b79f5", "score": "0.4922124", "text": "def getVolleyballEventTeamStatsID(self, eID):\r\n cursor = self.conn.cursor()\r\n query = \"\"\"\r\n SELECT id\r\n FROM volleyball_event_team_stats\r\n WHERE event_id = %s and (is_invalid = false or is_invalid is Null);\r\n \"\"\"\r\n cursor.execute(query, (int(eID),))\r\n result = cursor.fetchone()\r\n # print(result)\r\n return result", "title": "" }, { "docid": "9fcd215f47aaa2970ca105be9c81c132", "score": "0.49201566", "text": "def find(self, name):\n if name in self[-1]:\n identifier = self[-1][name]\n elif name in self[0]:\n identifier = self[0][name]\n else:\n raise ParserNameError()\n\n return identifier", "title": "" }, { "docid": "5a6e7128ae2ddb6c485d0c683ebe4a90", "score": "0.49193022", "text": "def get_series_uid(dirname):\n rootfile = [f for f in os.listdir(dirname) if f.endswith(\".xml\")][0]\n root = ET.parse(os.path.join(dirname, rootfile))\n header = root.find('{http://www.nih.gov}ResponseHeader')\n series_uid = header.find('{http://www.nih.gov}SeriesInstanceUid')\n return series_uid.text", "title": "" }, { "docid": "02a94a5161aa92439738faefbdc89d18", "score": "0.49114412", "text": "def get_stat_lookup(self, object_stat_map):\n\n if not self.stat_lookup.get(object_stat_map, None):\n stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map)\n if stats_map:\n self.stat_lookup[object_stat_map] = stats_map\n else:\n self.stat_lookup[object_stat_map] = None\n\n return self.stat_lookup[object_stat_map]", "title": "" }, { "docid": "a9accc64eb9adc02f072994c796b8bc8", "score": "0.49109367", "text": "def find_marker(detections, marker_num):\n for i in range(0, len(detections)):\n if detections[i].id == marker_num:\n return i\n return -1", "title": "" }, { "docid": "ab14b23377a981ff044c39b8804e82ec", "score": "0.49073076", "text": "def _find_song_id(self, artist_name: str, song_title: str) -> str:\n search_url = self.base_url + '/search'\n params = {'q': song_title + ' ' + artist_name}\n response = requests.get(search_url, params=params, headers=self.headers)\n json_response = response.json()\n for hit in json_response[\"response\"][\"hits\"]:\n if artist_name.lower() in hit[\"result\"][\"primary_artist\"][\"name\"].lower():\n return hit[\"result\"][\"api_path\"]\n self.song_not_found_count += 1\n return \"\"", "title": "" }, { "docid": "21367bfac7afc9923356cf9878c801ae", "score": "0.49068585", "text": "def index_of(self, name):\n indices = [i for i, n in enumerate(self.names) if n == name]\n if len(indices) == 0:\n return -1\n else:\n return indices[0]", "title": "" }, { "docid": "8835368ebf3476b3f02e3af2e88218e7", "score": "0.4901919", "text": "def stat_mode(stat):\n return stat[0]", "title": "" }, { "docid": "bf1e5d24668633bc410fb8bd2f04c796", "score": "0.48993918", "text": "def DPxGetID():\n return getID()", "title": "" }, { "docid": "90d97375cc91f76a15a5223159879434", "score": "0.4897854", "text": "def get_id_by_name(service_type, service_name, run_on_host):\n cmd = \"{} openstack {} show \\'{}\\' -f json'\".format(utility_container, service_type, service_name)\n output = run_on_host.run(cmd)\n try:\n result = json.loads(output.stdout)\n except ValueError:\n return\n\n if 'id' in result:\n return result['id']\n else:\n return", "title": "" }, { "docid": "83b2c5fb27c31971c85f21d93678405c", "score": "0.48933938", "text": "def getIdentifier(self):\n return self._config['identifier']", "title": "" }, { "docid": "ed475008df08966c2a066dfcb59ef786", "score": "0.48927376", "text": "def var_name(search, str_value):\n for stack in inspect.stack():\n # Don't care about variable names within tpm2_pytss or contextlib\n # (ExitStack)\n if stack.frame.f_code.co_filename.startswith(\n os.path.dirname(sys.modules[MODULE_NAME].__file__)\n ) or stack.frame.f_code.co_filename.startswith(\n os.path.dirname(sys.modules[\"contextlib\"].__file__)\n ):\n continue\n for variable_name, variable in stack.frame.f_locals.items():\n if variable is search:\n return variable_name\n return str_value", "title": "" }, { "docid": "5d9511a71c96aff0ba0f435967bbeb80", "score": "0.48895848", "text": "def fid(self):\n return self.type[0] + str(self.id)", "title": "" }, { "docid": "1fa9bd0bc3f59ff5a5cfc9a3a735f3fb", "score": "0.48847604", "text": "def call_name_for_compare(n):\n if isinstance(n, ast.Name):\n return n.id\n elif isinstance(n, ast.Attribute):\n if isinstance(n.value, ast.Name):\n return '.'.join([n.value.id, n.attr])\n else:\n # could be an attribute of a call, but for those, we don't\n # much care\n return None\n else:\n return None", "title": "" }, { "docid": "4b9ac0773687ef23d9570fc96a502a33", "score": "0.48797303", "text": "def sid(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sid\")", "title": "" }, { "docid": "c724bfc2b97904c07d85fca475c81171", "score": "0.48758775", "text": "def get_identifier(self):\n return self.__identifier", "title": "" }, { "docid": "1ea2a283ef90d8db6825c21310b2591a", "score": "0.486962", "text": "def get_id(self):\n return self.name", "title": "" }, { "docid": "8337562ac29b1946ede099b388ad86ee", "score": "0.48691374", "text": "def find_trait(conn, args, trait_name):\n logging.debug(f'Finding trait: {trait_name}')\n cur = conn.cursor()\n cur.execute(\"SELECT trait_id FROM trait WHERE trait_name = %s;\", [trait_name])\n row = cur.fetchone()\n if row is not None:\n trait_ID = row[0]\n cur.close()\n return trait_ID\n else:\n return None", "title": "" }, { "docid": "beaa5df03329f73054b8de8668d637ed", "score": "0.48680145", "text": "def get_acc_status_info_key(tid):\n return str(\"acc_status_info:%s\" % tid)", "title": "" }, { "docid": "18fd45798386f5ac7694904b198303f2", "score": "0.4866052", "text": "def get_info(cls, setid):\n for name, info in cls.desc.items():\n if info[0] == setid:\n return name\n return None", "title": "" }, { "docid": "977102cb9075e8cbfeabf4817b7e5417", "score": "0.4862976", "text": "def get_regenStat(self,stat_name):\n return self.stat_regen[stat_name]", "title": "" }, { "docid": "62e3914aed5b3e8dfdc44fd05397b230", "score": "0.48614144", "text": "def get_identifier(self):\n from wutu.util import get_identity\n return get_identity(self)", "title": "" }, { "docid": "0f140017a15f443d84ffefe5a427e62d", "score": "0.4861399", "text": "def get_id(sw_spec_details):\n\n return SwSpec.get_uid(sw_spec_details)", "title": "" } ]
9eafee5ebddd5186dd57c2b654c81526
Returns a redirect url.
[ { "docid": "9cfd5aa97d497b11446daebd68bceb36", "score": "0.6828274", "text": "def redirect_url(default='index'):\n return request.referrer or url_for(default)", "title": "" } ]
[ { "docid": "2c096b0d65d755bd5a9a865cd350ebd4", "score": "0.8545995", "text": "def redirecturl(self) :\n\t\ttry :\n\t\t\treturn self._redirecturl\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "bb3925ef9109540b34a175d0301cdb67", "score": "0.82717377", "text": "def redirect_url(self):\n return self._redirect_url", "title": "" }, { "docid": "d4329f47c6205e5a653d7b0ea62cb555", "score": "0.8040624", "text": "def get_normal_url(self, redirect_url: str):\n\n return self._make_request(BASE_URL + redirect_url).url", "title": "" }, { "docid": "e8dd50e93c0d521e982c2199cf09d485", "score": "0.7881414", "text": "def get_absolute_url(self):\n return self.redirect", "title": "" }, { "docid": "deaf7c68bcfdbca7804e85f590f17ef2", "score": "0.7837098", "text": "def redirect_uri(self) -> str:\n return f\"{get_url(self.hass, allow_internal=False, allow_ip=False)}{AUTH_CALLBACK_PATH}\"", "title": "" }, { "docid": "48f3d72145c1db2695d72d75bb81935e", "score": "0.7812046", "text": "def redirect_url(self):\n if self.alias:\n return self.alias.get_full_path()\n return self.alias_url", "title": "" }, { "docid": "7dc6bc6d5fed980fcf25affae8beadde", "score": "0.7800122", "text": "def get_redirect_url(url):\n try:\n return reverse(url)\n except:\n return url", "title": "" }, { "docid": "241cdfbace1d039db992c72e4e8390d0", "score": "0.76508045", "text": "def get_redirect_url(self):\n redirect_to = None\n\n if self.user is not None:\n redirect_to = check_user_state(self.user)\n\n if redirect_to is None:\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = is_safe_url(\n url=redirect_to,\n allowed_hosts=self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n )\n return redirect_to if url_is_safe else ''", "title": "" }, { "docid": "caac83b871d89c006ffaf0ef19f9b6e2", "score": "0.76366216", "text": "def redirect_uri(self):\n\n return self.__redirect_uri", "title": "" }, { "docid": "ff7ab63c3e1da1c95ec7fc9671cc20d4", "score": "0.7526949", "text": "def get_redirect_url(self):\n redirect_flow = self.client.redirect_flows.create(\n params={\n \"description\": settings.GO_CARDLESS_PAYMENT_DESCRIPTION,\n \"session_token\": self.request.session.session_key,\n \"success_redirect_url\": settings.GOCARDLESS_REDIRECT_URL\n }\n )\n\n # Save the flow ID on the session\n self.request.session['GC_REDIRECT_FLOW_ID'] = redirect_flow.id\n self.request.session.save()\n return redirect_flow.redirect_url", "title": "" }, { "docid": "4c77016f9ac6697bd820d7f7482ea206", "score": "0.7522097", "text": "def redirect_url(self, state=None):\n raise NotImplementedError()", "title": "" }, { "docid": "4ac5bf5eedf75c14904fa915305c7a8b", "score": "0.74684316", "text": "def redirect_uri(self) -> Optional[str]:\n return pulumi.get(self, \"redirect_uri\")", "title": "" }, { "docid": "db9b4018258d1a179fc8ee1c3a609ae6", "score": "0.7443538", "text": "def get_redirect_url(self, request, **kwargs):\n if self.url:\n url = self.url % kwargs\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args and self.query_string:\n url = \"%s?%s\" % (url, args)\n return url\n else:\n return None", "title": "" }, { "docid": "0b415fa43a654bb6841a6e1e1a58f224", "score": "0.74099374", "text": "def get_redirect_url(self, callback):\n # The callback parameter is added by get_payment_menu_url()\n if callback == 'SUCCESS':\n return self.success_url\n elif callback in ('PENDING', ''): # treat missing value as pending\n return self.pending_url\n elif callback == 'CANCELLED':\n return self.cancelled_url\n else:\n return self.error_url", "title": "" }, { "docid": "9aef56e9b1028b37e59fcca30b07cc49", "score": "0.7377832", "text": "def redirection_uri(self):\n\n return self._redirection_uri", "title": "" }, { "docid": "deae9852f22b1e0a5353a7cbd4d6b339", "score": "0.73444855", "text": "def redirect(url):", "title": "" }, { "docid": "3b6f782ec2232ec9656610989a3b40db", "score": "0.7324925", "text": "def redirect_uri(_request):\n return {}", "title": "" }, { "docid": "0d25247d6cbe417d73f2d9e7871b2973", "score": "0.7317064", "text": "def success_url(self):\n # URL to parts.\n scheme, netloc, path, params, query, fragment = parse.urlparse(self.redirect_to)\n # Add (extra) query string parameter.\n data = parse.parse_qs(query)\n data[\"access_token\"] = self.token\n query = parse.urlencode(data, True)\n # Parts to URL.\n url = parse.urlunparse((scheme, netloc, path, params, query, fragment))\n return str(url)", "title": "" }, { "docid": "93d163422eb8e1114d3d6f94dfba9b89", "score": "0.727064", "text": "def get_redirect_url(self):\n return reverse('media_redirect', kwargs={'recording_id': self.id})", "title": "" }, { "docid": "3b4fc14b7727a7158d896a428fa38beb", "score": "0.7244872", "text": "def redirection_uri(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "197e575c1e6e453d2323be4cb35ff235", "score": "0.72248995", "text": "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "title": "" }, { "docid": "ff98aedd22e222371d8d6faa51d4c700", "score": "0.72165716", "text": "def get_url_with_redirect(url, redirect_url):\n if redirect_url:\n url = url + '?' + urlencode(\n {settings.REDIRECT_FIELD_NAME: redirect_url}\n )\n\n return url", "title": "" }, { "docid": "2b22a25b03322545aaa8f78d7b6ef05d", "score": "0.72159535", "text": "def redirect_uri(self):\n return self.oauth2session.redirect_uri", "title": "" }, { "docid": "299b54dcd5d5abeab215a6e0db0d9e8a", "score": "0.71344453", "text": "def get_success_url(self):\n if not self.success_url:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Provide a success_url.\")\n return str(self.success_url) # success_url may be lazy", "title": "" }, { "docid": "addae8661aa3a125bd4616fccde358bc", "score": "0.7037512", "text": "def get_success_url(self):\n if self.success_url:\n redirect_to = self.success_url\n else:\n redirect_to = self.request.REQUEST.get(\n self.redirect_field_name, 'hits/')\n\n netloc = urlparse(redirect_to)[1]\n\n if not redirect_to:\n redirect_to = settings.LOGIN_REDIRECT_URL\n # Security check -- don't allow redirection to a different host.\n elif netloc and netloc != self.request.get_host():\n redirect_to = settings.LOGIN_REDIRECT_URL\n return redirect_to", "title": "" }, { "docid": "da27d7333bf8ac2e2c9dc4876ea02094", "score": "0.701062", "text": "def get_redirect_url(self, state=''):\n authorization_url = self.oauth2_handler().authorize_url(self.scope, response_type='code')\n return authorization_url", "title": "" }, { "docid": "a0a087f6cf92bb4116ef2336df11cb49", "score": "0.6986974", "text": "def redirect(url: str) -> str:\r\n parsed_url = urlparse(url)\r\n if SITE != f'{parsed_url.scheme}://{parsed_url.netloc}':\r\n return INVALID\r\n entry = decode(parsed_url.path[1:])\r\n if entry not in LINKS.keys():\r\n return NO_RECORD\r\n \r\n return LINKS[entry]", "title": "" }, { "docid": "2decace6d167429841560bd059391e48", "score": "0.6960805", "text": "def get_redirect_target():\n if \"_redirectlink\" in request.values:\n link = \"/website/{}\".format(request.values[\"_redirectlink\"].lstrip(\"/\").replace(\"website\",\"\").lstrip(\"/\"))\n if is_safe_url(link):\n return link\n\n for target in request.values.get('next'), request.referrer:\n if not target:\n continue\n if is_safe_url(target):\n return target", "title": "" }, { "docid": "c757b894db0f68eb30f21c91c6e0154c", "score": "0.69463074", "text": "def redirect(self):\n return self._redirect", "title": "" }, { "docid": "18ff564c13ddda9083140d39a93cb797", "score": "0.6907534", "text": "def redirect_url():\n return request.args.get('next') or \\\n request.referrer or \\\n url_for('renderHomePage')", "title": "" }, { "docid": "b7b78313581b542ff42848ed08df3807", "score": "0.6879828", "text": "def get_success_url(self):\n redirect_to = self.request._post['next']\n return redirect_to", "title": "" }, { "docid": "b7b78313581b542ff42848ed08df3807", "score": "0.6879828", "text": "def get_success_url(self):\n redirect_to = self.request._post['next']\n return redirect_to", "title": "" }, { "docid": "b7b78313581b542ff42848ed08df3807", "score": "0.6879828", "text": "def get_success_url(self):\n redirect_to = self.request._post['next']\n return redirect_to", "title": "" }, { "docid": "35365787a75db39904b9e9863f50b982", "score": "0.6869607", "text": "def get_success_url(self):\n if not self.success_url:\n raise AttributeError(\"No URL to redirect to. Provide a success_url.\")\n return self.success_url", "title": "" }, { "docid": "75303b1eee919a91bc33725fc89f8d1a", "score": "0.6826224", "text": "def _get_redirect_url(self, webhdfs_ex):\n try:\n # The actual HttpError (307) is wrapped inside\n http_error = webhdfs_ex.get_parent_ex()\n if http_error is None:\n raise webhdfs_ex\n\n if http_error.response.status_code not in (301, 302, 303, 307):\n LOG.error(\"Response is not a redirect: %s\" % webhdfs_ex)\n raise webhdfs_ex\n return http_error.response.headers['location']\n except Exception as ex:\n LOG.exception(\"Failed to read redirect from response: %s (%s)\" % (webhdfs_ex, ex))\n raise webhdfs_ex", "title": "" }, { "docid": "9ec20b3a942cbea4ae9fcd92b42af5bf", "score": "0.6821965", "text": "def get_redirect_url(self, payment):\n\n data = {\n 'return_url_success': self._make_status_url('success'),\n 'return_url_canceled': self._make_status_url('canceled'),\n 'return_url_pending': self._make_status_url('pending'),\n 'return_url_error': self._make_status_url('error'),\n }\n return payment.payment_url(**data)", "title": "" }, { "docid": "fc4641bc7d7f825e256e3129259321a0", "score": "0.6816715", "text": "def get_success_url(self):\n if self.success_url:\n url = self.success_url.format(**self.object.__dict__)\n else:\n try:\n url = self.object.get_absolute_url()\n except AttributeError:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either provide an url or define \"\n \"a get_absolute_url method on the Model.\")\n return url", "title": "" }, { "docid": "dba2f805edefbefeec09bf614605f867", "score": "0.6798818", "text": "def redirect_url(default='show_home_page'):\n\n return request.args.get('next') or \\\n request.referrer or \\\n url_for(default)", "title": "" }, { "docid": "68ccf259a0570987a2444fee5b0a51d8", "score": "0.67915237", "text": "def _redirect_url(default='login.login'):\n return request.args.get('next') or request.referrer or url_for(default)", "title": "" }, { "docid": "3fc6e7eb6879ddb55be1ff3cee173a27", "score": "0.6762598", "text": "def redirect_url(self, request: AsyncRequest, response: AsyncResponse) -> URL:\n location = response.headers[\"Location\"]\n\n url = URL(location, allow_relative=True)\n\n # Facilitate relative 'Location' headers, as allowed by RFC 7231.\n # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')\n if url.is_relative_url:\n url = url.resolve_with(request.url)\n\n # Attach previous fragment if needed (RFC 7231 7.1.2)\n if request.url.fragment and not url.fragment:\n url = url.copy_with(fragment=request.url.fragment)\n\n return url", "title": "" }, { "docid": "13b2299d8c9327e7bfa5d1b68d081035", "score": "0.67507327", "text": "def _get_target_url(self, url_str):\n if '/aclk?' in url_str or '/search?' in url_str:\n __r = requests.get('https://www.google.com{0}'.format(url_str), headers=self._get_user_agent())\n if len(__r.history) == 0:\n return None\n\n last_redirect_url = __r.history[-1].url\n parsed = parse.parse_qs(parse.urlparse(last_redirect_url).query)\n\n if parsed.get('adurl'):\n return parsed['adurl'][0]\n\n if 'https://ad.doubleclick.net' in last_redirect_url:\n url2 = urllib.parse.unquote(last_redirect_url)\n return url2.split(';')[-1].split('?')[1]\n\n if '/aclk?' in last_redirect_url:\n return None\n\n return last_redirect_url\n\n return url_str", "title": "" }, { "docid": "e8e1acb22ba867d074a4c12211445a3c", "score": "0.674968", "text": "def get_auth_url(self, redirect_uri=None):\n return self._client.get_auth_token_url(redirect_uri)", "title": "" }, { "docid": "c5647f45cca79722169d29ffc1a792da", "score": "0.6713249", "text": "def authorize_url(self):\r\n url = self.flow.step1_get_authorize_url()\r\n return str(url)", "title": "" }, { "docid": "ba062185e1b151101c2705bd01587251", "score": "0.6710619", "text": "def get_auth_url():\n client = Client()\n auth_url = client.authorization_url(\n client_id=STRAVA_CLIENT_ID,\n redirect_uri='http://localhost:5000/authorization')\n return auth_url", "title": "" }, { "docid": "348a7b56ab519d6805ea695c94f8444a", "score": "0.6705712", "text": "def geturl(self) -> str:\n return self.response.geturl()", "title": "" }, { "docid": "3d32b70e9bbeb7f1e2e5d055abe5a7fd", "score": "0.66860145", "text": "def redirect_url(self, redirect_url):\n if (self.local_vars_configuration.client_side_validation and\n redirect_url is not None and len(redirect_url) > 2500):\n raise ValueError(\"Invalid value for `redirect_url`, length must be less than or equal to `2500`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n redirect_url is not None and not re.search(r'^(?:[a-z0-9\\.\\-\\+]*):\\/\\/(?:[^\\s:@\\/]+(?::[^\\s:@\\/]*)?@)?(?:(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}|\\[[0-9a-f:\\.]+\\]|([a-zยก-๏ฟฟ0-9](?:[a-zยก-๏ฟฟ0-9-]{0,61}[a-zยก-๏ฟฟ0-9])?(?:\\.(?!-)[a-zยก-๏ฟฟ0-9-]{1,63}(?<!-))*\\.(?!-)(?:[a-zยก-๏ฟฟ-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?|localhost))(?::\\d{2,5})?(?:[\\/?#][^\\s]*)?\\Z', redirect_url)): # noqa: E501\n raise ValueError(r\"Invalid value for `redirect_url`, must be a follow pattern or equal to `/^(?:[a-z0-9\\.\\-\\+]*):\\/\\/(?:[^\\s:@\\/]+(?::[^\\s:@\\/]*)?@)?(?:(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}|\\[[0-9a-f:\\.]+\\]|([a-zยก-๏ฟฟ0-9](?:[a-zยก-๏ฟฟ0-9-]{0,61}[a-zยก-๏ฟฟ0-9])?(?:\\.(?!-)[a-zยก-๏ฟฟ0-9-]{1,63}(?<!-))*\\.(?!-)(?:[a-zยก-๏ฟฟ-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?|localhost))(?::\\d{2,5})?(?:[\\/?#][^\\s]*)?\\Z/`\") # noqa: E501\n\n self._redirect_url = redirect_url", "title": "" }, { "docid": "f5cf9cc56da43f499735f3b87fcab526", "score": "0.6676317", "text": "def geturl(self):\n return self.response.geturl()", "title": "" }, { "docid": "1492f3bf4023ff78edb10b4866d00f08", "score": "0.666664", "text": "def accessibility_login_redirect_url(self) -> str:\n return pulumi.get(self, \"accessibility_login_redirect_url\")", "title": "" }, { "docid": "345dcb7e561c38d8a62ba481da98728c", "score": "0.66627264", "text": "def build_redirect_url(hostname, path):\n redirect_base = hostname\n # BASE_URL may be empty or a bare hostname or a hostname with a protocol\n if bool(redirect_base) and not redirect_base.startswith(\"http\"):\n redirect_base = \"https://\" + redirect_base\n return redirect_base + path", "title": "" }, { "docid": "234178795111801634994962790bd4cb", "score": "0.6659015", "text": "def get_url(self, *args, **kwargs):\n return self.application.reverse_url(*args, **kwargs)", "title": "" }, { "docid": "b09989a5ef294f680bb78bc951450ab1", "score": "0.66060895", "text": "def get_absolute_url(self):\n return self.request.get_full_path().partition('?')[0]", "title": "" }, { "docid": "4e33f35a872042bdcb210b0bb3b1e551", "score": "0.66054964", "text": "def get_success_url(self):\r\n return redirect(reverse_lazy('login'))", "title": "" }, { "docid": "4e33f35a872042bdcb210b0bb3b1e551", "score": "0.66054964", "text": "def get_success_url(self):\r\n return redirect(reverse_lazy('login'))", "title": "" }, { "docid": "d3057fe92b3c3d44bb95d561a30a4a60", "score": "0.659652", "text": "def google_redirect(settings, request):\n try:\n state = GoogleOAuth.create_random()\n # Kind of CSRF token for Google OAuth\n request.session['google_state'] = state\n url = \"{token_request_uri}?response_type={response_type}&client_id={client_id}&redirect_uri={redirect_uri}&scope={scope}&nonce={nonce}&state={state}\".format(\n token_request_uri=settings.TOKEN_REQUEST_URI,\n response_type=settings.GOOGLE_RESPONSE_TYPE,\n client_id=settings.SOCIAL_AUTH_GOOGLE_OAUTH2_KEY,\n redirect_uri=settings.REDIRECT_URI,\n scope=settings.GOOGLE_SCOPE,\n nonce=state,\n state=state\n )\n return url\n except Exception as e:\n return False", "title": "" }, { "docid": "130db5298f6f7dd0f931e6d74e6ad5a2", "score": "0.6595629", "text": "def get_redirect(request):\n params = request.GET.copy()\n params.pop(settings.TOKEN_NAME)\n url = request.path\n if params:\n url += \"?\" + urlencode(params)\n return redirect(url)", "title": "" }, { "docid": "c63d02c714c4db59a0f35751837320a7", "score": "0.65858984", "text": "def get_full_url(self):\n return self.request.get_full_path()", "title": "" }, { "docid": "a437bb3a0e304ebaed4aa1cc3dfe85d0", "score": "0.65604264", "text": "def site_url():", "title": "" }, { "docid": "dcf4b4339ae9e44fce581044b247d08b", "score": "0.6543082", "text": "def url(self):\r\n return self.urlparts.geturl()", "title": "" }, { "docid": "dcf4b4339ae9e44fce581044b247d08b", "score": "0.6543082", "text": "def url(self):\r\n return self.urlparts.geturl()", "title": "" }, { "docid": "dcf4b4339ae9e44fce581044b247d08b", "score": "0.6543082", "text": "def url(self):\r\n return self.urlparts.geturl()", "title": "" }, { "docid": "dcf4b4339ae9e44fce581044b247d08b", "score": "0.6543082", "text": "def url(self):\r\n return self.urlparts.geturl()", "title": "" }, { "docid": "1fbbccf7ebd7cb30bf7f66b931611f2a", "score": "0.65330935", "text": "def get_absolute_url(self):\n return reverse('wwwhisper_location', kwargs={'uuid' : self.uuid})", "title": "" }, { "docid": "bd4b251db53d026c3c61d62fb4bcb606", "score": "0.65182626", "text": "def url(self) -> str:\n location = self.__attribute_finders.get(\"callback_url_header\")\n return self.__headers.get(location, \"\") if location is not None else \"\"", "title": "" }, { "docid": "674c63e73d5cce7a52ba42f5e52d1464", "score": "0.6517108", "text": "def get_absolute_url(self):\n return self.url", "title": "" }, { "docid": "9774c902c2c63bd2828c741d503eb1e1", "score": "0.6512983", "text": "def accessibility_error_redirect_url(self) -> str:\n return pulumi.get(self, \"accessibility_error_redirect_url\")", "title": "" }, { "docid": "1250d8e50b60178158c2b1db137534e7", "score": "0.65024674", "text": "def redirect_external(url):\n return render_template(\"redirect.html\", url=url)", "title": "" }, { "docid": "98e8fb430c9ff3ef86be3a4b56459114", "score": "0.6493568", "text": "def get_auth_url(self):\n auth_params = {\n \"client_id\": self.client_pub_key,\n \"response_type\": \"code\",\n \"scope\": \"playback-control-all\",\n \"state\": 1000000,\n \"redirect_uri\": self.redirect_uri\n }\n r = requests.get(self.base_url, params=auth_params)\n return r.url", "title": "" }, { "docid": "b49c2b87fb8087dfe7104f593c4f2ed8", "score": "0.648579", "text": "def get_redirect_url(self, *args, **kwargs):\n return reverse('groups:single', kwargs={ 'slug': self.kwargs.get('slug') })", "title": "" }, { "docid": "b49c2b87fb8087dfe7104f593c4f2ed8", "score": "0.648579", "text": "def get_redirect_url(self, *args, **kwargs):\n return reverse('groups:single', kwargs={ 'slug': self.kwargs.get('slug') })", "title": "" }, { "docid": "00ba73323172f4df82554cf8caf65890", "score": "0.64823097", "text": "def url(self):\n return self.header.get('WARC-Target-URI')", "title": "" }, { "docid": "afd19a9d923a858df8c68ef02a87b878", "score": "0.6479514", "text": "def url(self):\n return geturl(self.__environ)", "title": "" }, { "docid": "c6866f4b71389b4a93f75b5174db30ca", "score": "0.64332324", "text": "def webservice_url(cls):\n return redirect(url_for('nereid.website.home'))", "title": "" }, { "docid": "d56f7965aecb0213fc5065c9b0a8bfec", "score": "0.6430443", "text": "def get_url(self):\r\n return self._url", "title": "" }, { "docid": "57517f626731d320f85400d7a2d76347", "score": "0.6415598", "text": "def get_success_url(self):\n\n return reverse('publisher_sites')", "title": "" }, { "docid": "8b57052a26ae3da248c2fa644fc9137a", "score": "0.63977295", "text": "def get_redirect(self):\n index_url = self.appbuilder.get_url_for_index\n page_history = Stack(session.get(\"page_history\", []))\n\n if page_history.pop() is None:\n return index_url\n session[\"page_history\"] = page_history.to_json()\n url = page_history.pop() or index_url\n return url", "title": "" }, { "docid": "503ffcbf2d88ef56ba168701f9a94004", "score": "0.63841414", "text": "def url(self):\n return self.short_url() if (r := self.long_url()) is None else r", "title": "" }, { "docid": "02905d59ce39565ed50f8566c5441137", "score": "0.63800526", "text": "def get_absolute_url(self):\n return ('')", "title": "" }, { "docid": "02905d59ce39565ed50f8566c5441137", "score": "0.63800526", "text": "def get_absolute_url(self):\n return ('')", "title": "" }, { "docid": "b89244e465e296723e5bc12e1d1d3eb1", "score": "0.6359124", "text": "def get_redirect_target(self, resp):\n # Due to the nature of how requests processes redirects this method will\n # be called at least once upon the original response and at least twice\n # on each subsequent redirect response (if any).\n # If a custom mixin is used to handle this logic, it may be advantageous\n # to cache the redirect location onto the response object as a private\n # attribute.\n if resp.is_redirect:\n location = resp.headers['location']\n # Currently the underlying http module on py3 decode headers\n # in latin1, but empirical evidence suggests that latin1 is very\n # rarely used with non-ASCII characters in HTTP headers.\n # It is more likely to get UTF8 header rather than latin1.\n # This causes incorrect handling of UTF8 encoded location headers.\n # To solve this, we re-encode the location in latin1.\n if is_py3:\n location = location.encode('latin1')\n return to_native_string(location, 'utf8')\n return None", "title": "" }, { "docid": "843ebc6f6392ac0d02b99528803d10ab", "score": "0.6358438", "text": "def get_redirect_url(project, path):\n for project_redirect in project.redirects.all():\n if project_redirect.redirect_type == 'prefix':\n if path.startswith(project_redirect.from_url):\n log.debug('Redirecting %s' % project_redirect)\n cut_path = re.sub('^%s' % project_redirect.from_url, '', path)\n to = redirect_filename(project=project, filename=cut_path)\n return to\n elif project_redirect.redirect_type == 'page':\n if path == project_redirect.from_url:\n log.debug('Redirecting %s' % project_redirect)\n to = redirect_filename(\n project=project,\n filename=project_redirect.to_url.lstrip('/'))\n return to\n elif project_redirect.redirect_type == 'exact':\n if path == project_redirect.from_url:\n log.debug('Redirecting %s' % project_redirect)\n return project_redirect.to_url\n # Handle full sub-level redirects\n if '$rest' in project_redirect.from_url:\n match = project_redirect.from_url.split('$rest')[0]\n if path.startswith(match):\n cut_path = re.sub('^%s' % match, project_redirect.to_url, path)\n return cut_path\n elif project_redirect.redirect_type == 'sphinx_html':\n if path.endswith('/'):\n log.debug('Redirecting %s' % project_redirect)\n to = re.sub('/$', '.html', path)\n return to\n elif project_redirect.redirect_type == 'sphinx_htmldir':\n if path.endswith('.html'):\n log.debug('Redirecting %s' % project_redirect)\n to = re.sub('.html$', '/', path)\n return to", "title": "" }, { "docid": "44ab8f8a6b7670219ebea65beb55c46a", "score": "0.6355777", "text": "def sso_url(self) -> str:\n return pulumi.get(self, \"sso_url\")", "title": "" }, { "docid": "44ab8f8a6b7670219ebea65beb55c46a", "score": "0.6355777", "text": "def sso_url(self) -> str:\n return pulumi.get(self, \"sso_url\")", "title": "" }, { "docid": "9d6e48827dfb6ed9ac660da9d3108203", "score": "0.6352964", "text": "def get_url(self):\n return self._url", "title": "" }, { "docid": "094d7172e9c1f5816c684a9068c05090", "score": "0.6334891", "text": "def to_url(self):\r\n return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())", "title": "" }, { "docid": "0ee7180707e448aac0e3dc946e899116", "score": "0.6332552", "text": "def actual_url(self) -> str:\n return pulumi.get(self, \"actual_url\")", "title": "" }, { "docid": "118b8cab14b8a297cd54d02e3cae2dec", "score": "0.6329005", "text": "def get_success_url(self):\n\n return self.request.POST.get('next') or '/'", "title": "" }, { "docid": "4686596442ce6581fb83442a2b61c91b", "score": "0.63285595", "text": "def base_url(self):\n return self.context.absolute_url()", "title": "" }, { "docid": "76e14b9bfc370d60105cce09e942fb59", "score": "0.63109094", "text": "def current_url(self):\n return self.last_response.request.url", "title": "" }, { "docid": "9bf76983702b6aa1bee245a8c30381c6", "score": "0.6309031", "text": "def action(self):\n return braintree.TransparentRedirect.url()", "title": "" }, { "docid": "6073f3421642d50e89f18cbb1e9c6fb9", "score": "0.6308189", "text": "def get_success_url(self):\n return str(self.request.cradmin_app.reverse_appindexurl())", "title": "" }, { "docid": "b65187a48b73dbdc47533369880aa74a", "score": "0.62940603", "text": "def get_url(self) -> str:\n return self.__url", "title": "" }, { "docid": "296b0e1ae7908a871649ba45072f6b5c", "score": "0.6290705", "text": "def geturl(self):\n return self.url", "title": "" }, { "docid": "ee0a9fbea28444fc6ae760282e6dcdbd", "score": "0.62718487", "text": "def get_success_url(self):\n messages.add_message(self.request, messages.INFO, _(u'Votre annonce a bien รฉtรฉ supprimรฉe.'), fail_silently=True)\n return settings.LOGIN_REDIRECT_URL", "title": "" }, { "docid": "293da25285a37420c67d3e01d4562421", "score": "0.62712926", "text": "def get_url(self):\n return self._get_url_()", "title": "" }, { "docid": "7ddedbd588b06b188dd0b6461027d128", "score": "0.62640357", "text": "def to_url(self):\n return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())", "title": "" }, { "docid": "f2b29e27a3b11f177ede1a3f834f212b", "score": "0.6244803", "text": "def get_url(self, item_dict):\n final_url = ''\n url = item_dict.get('url', '')\n try:\n final_url = reverse(url)\n except NoReverseMatch:\n final_url = url\n return final_url", "title": "" }, { "docid": "55f8e2fc34516ed394a2127102bbb750", "score": "0.62343776", "text": "def generate_auth_url(self):\n url = \"{0}?{1}\".format(\n self.oauth_params.get(\"authorization_endpoint\"),\n urlencode(\n dict(\n client_id=self.client_id,\n scope=self.scopes,\n redirect_uri=\"urn:ietf:wg:oauth:2.0:oob\",\n response_type=\"code\",\n access_type=\"offline\",\n )\n ),\n )\n\n # 'urn:ietf:wg:oauth:2.0:oob' signals to the Google Authorization\n # Server that the authorization code should be returned in the\n # title bar of the browser, with the page text prompting the user\n # to copy the code and paste it in the application.\n\n return url", "title": "" }, { "docid": "60c552030058580db08f1fbadbb6cc38", "score": "0.6225769", "text": "def test_redirect_url(self):\n org = Organization.objects.create()\n consent = Consent.objects.create(org=org, redirect_url=\"http://example.org\")\n contact_uuid = uuid4()\n code = consent.generate_code(contact_uuid)\n\n url = reverse(\"provide-consent\", args=[code])\n response = self.client.get(url)\n self.assertRedirects(\n response, \"http://example.org\", fetch_redirect_response=False\n )", "title": "" }, { "docid": "4f07452ac6a95fd8b28a3d62ffaacc33", "score": "0.62227917", "text": "def get_success_url(self, **kwargs):\n if not self.success_url:\n raise Exception('Must specify a success url')\n\n return url_for(self.success_url, **kwargs)", "title": "" }, { "docid": "2c329cf330059385bb9489e56412a566", "score": "0.6209633", "text": "def get_url(self, routename, **kargs):\r\n scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'\r\n location = self.router.build(routename, **kargs).lstrip('/')\r\n return urljoin(urljoin('/', scriptname), location)", "title": "" } ]
d872102cb6d6374073611e7e8fecba67
runs throught the code and executes it
[ { "docid": "f757c21e8c83cb674b551802f565d782", "score": "0.0", "text": "def execute(self,repl=True,to_use=''):\n \n if not to_use:\n code = self.code\n else:\n code = to_use\n code = self.updatecode(code)\n codepos = self.codepos\n cells = self.cells\n pos = self.pos\n bmap = self.makemap()\n funcs = self.mapping\n print code\n while codepos < len(code):\n token = code[codepos]\n \n if token not in funcs: print \"TOKEN IS NOT IN FUNCS. WAT\" #just in case\n funcs[token]()\n \n codepos += 1\n \n print\n return", "title": "" } ]
[ { "docid": "1efae473535689ade6bba4a0066de701", "score": "0.7500659", "text": "def do_run(self):", "title": "" }, { "docid": "6b853038144e84eca87301e1d74ad67a", "score": "0.74891174", "text": "def run():", "title": "" }, { "docid": "6b853038144e84eca87301e1d74ad67a", "score": "0.74891174", "text": "def run():", "title": "" }, { "docid": "6b853038144e84eca87301e1d74ad67a", "score": "0.74891174", "text": "def run():", "title": "" }, { "docid": "6b853038144e84eca87301e1d74ad67a", "score": "0.74891174", "text": "def run():", "title": "" }, { "docid": "844cd88c58008a5e3d3ddd8744f795b4", "score": "0.7416469", "text": "def run():\n pass", "title": "" }, { "docid": "b45b0411595b9d2f9ddecbad2c1c7eb8", "score": "0.7403389", "text": "def exec(self):\n pass", "title": "" }, { "docid": "530950521852b50ff8a8c00c639e4c47", "score": "0.7341587", "text": "def _execute( self ):\n pass", "title": "" }, { "docid": "731a6e3c9d33db6ed938d61636962607", "score": "0.7296771", "text": "def _run(self):\n pass", "title": "" }, { "docid": "731a6e3c9d33db6ed938d61636962607", "score": "0.7296771", "text": "def _run(self):\n pass", "title": "" }, { "docid": "0009cf3576641f6ef2cace1f6bde3d54", "score": "0.7245134", "text": "def run():\n pass", "title": "" }, { "docid": "0009cf3576641f6ef2cace1f6bde3d54", "score": "0.7245134", "text": "def run():\n pass", "title": "" }, { "docid": "1cc8c223e3575003258e89c09179cebc", "score": "0.7238029", "text": "def _execute(self):\n pass", "title": "" }, { "docid": "0c3c31220e9e5ff55c02641b671c21e1", "score": "0.7217492", "text": "def run_logic(self):\n pass", "title": "" }, { "docid": "dae69a5ed52f7cd55ca86cfb5385b2f0", "score": "0.71713215", "text": "def __call__(self):\n self.run()", "title": "" }, { "docid": "bc238b3094f6829212db48a11a30bdc1", "score": "0.70764166", "text": "def _exec(self):\n raise NotImplementedError", "title": "" }, { "docid": "10721487f7749335472ffb3071fd0bdb", "score": "0.7029038", "text": "def _run(self) -> Any:\n pass", "title": "" }, { "docid": "3ffa3d98481498f751adb6e971111a97", "score": "0.6987815", "text": "def execute(self):\n\t\tpass", "title": "" }, { "docid": "6361f9574d368e22ec4505fb8427e48b", "score": "0.6970949", "text": "def exec_(self, code, **vars): # reliably restored by inspect\n pass", "title": "" }, { "docid": "cc2e615c3320e57832249c8b0fd42871", "score": "0.69340235", "text": "def run(self):\n\t\tpass", "title": "" }, { "docid": "cc2e615c3320e57832249c8b0fd42871", "score": "0.69340235", "text": "def run(self):\n\t\tpass", "title": "" }, { "docid": "ab3e4804d38658580de7852fb5be4470", "score": "0.6915937", "text": "def run(self):\r\n pass", "title": "" }, { "docid": "ab3e4804d38658580de7852fb5be4470", "score": "0.6915937", "text": "def run(self):\r\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6906917", "text": "def execute(self):\n pass", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.68684494", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.68684494", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.68684494", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.68684494", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.68684494", "text": "def execute(self):", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.68590724", "text": "def run(self):\n pass", "title": "" }, { "docid": "979f41bb8b7fe36537f3e78182afe38e", "score": "0.68418485", "text": "def run(self): # -> None:\n ...", "title": "" }, { "docid": "09035a3a567af18c57a26415fdd236d2", "score": "0.683398", "text": "def _run(self):\n raise NotImplementedError()", "title": "" }, { "docid": "80fc2efafec55e9f0d9924d6cae2290e", "score": "0.6830483", "text": "def execute(self):\n self.myfast.execute()", "title": "" }, { "docid": "a768819fe988c045959d11f433bf8aa6", "score": "0.6826694", "text": "def _run_iteration(self) :\n pass", "title": "" }, { "docid": "f1a7c41cf07998e8d7308a26294bd9d2", "score": "0.682072", "text": "def run(self):\n\t\treturn self.function()", "title": "" }, { "docid": "6f0c1b1b2afb977194e05c8287b17270", "score": "0.6816075", "text": "def runner():", "title": "" }, { "docid": "e4a3c8685d3d51b3e77e9e983ed32f0f", "score": "0.67881393", "text": "def run(self):\n\n pass", "title": "" }, { "docid": "bf83919dd9c83e80326a7db2950ffbfb", "score": "0.67685467", "text": "def run(self):\n ...", "title": "" }, { "docid": "6f1f5650a54c7a2e6c3721682da5ab45", "score": "0.67673254", "text": "def execute(self):\n\n pass", "title": "" }, { "docid": "4cf82bea0b68e66070934587037577f5", "score": "0.67650044", "text": "def run_fn(self):\n pass", "title": "" }, { "docid": "a3a42fc3ba909c8942299f1b7db149c5", "score": "0.67566514", "text": "def run(self):\n raise NotImplementedError('run is not implemented')", "title": "" }, { "docid": "72cae301baa8b8b6946d0470d583438d", "score": "0.6736884", "text": "def run(self):\n\n for code in self.programming:\n #Energy = new Energy.Netrual\n #Energy.setProgram(code)\n #Energy.ExecuteAndBind()\n print code", "title": "" }, { "docid": "83eeb6063de88dd9fd160832cd6bb3b5", "score": "0.6703368", "text": "def exec(self):\n raise NotImplemented(\"this functionality has not yet been written.\")", "title": "" }, { "docid": "c0007e479abd1251cccedec52125683b", "score": "0.66900027", "text": "def _run(self):\n pass # abstract method does nothing", "title": "" }, { "docid": "f0eb9245f3a5d830087098fc017be200", "score": "0.6676129", "text": "def eval_code(self):\n\n while 0 <= self.pc < len(self.source):\n self.step()", "title": "" }, { "docid": "0d1716c437f8e2399775022f5286b4a7", "score": "0.6655767", "text": "def Run(self):", "title": "" }, { "docid": "49b5c75315d1c05dcbc41a0e540a27ed", "score": "0.6627721", "text": "def __call__(self):\n self.benchmark()\n self.eval()", "title": "" }, { "docid": "dc4d5ecf891bbc2ad2b727722356e038", "score": "0.6621629", "text": "def _run(self):\n self.result = self.check()", "title": "" }, { "docid": "bc29d81df803f66d92434adc30752d10", "score": "0.66203886", "text": "def run_instruction(self) -> None:\n self.fetch() # Fetching instruction\n self.decode() # Decoding instruction\n self.update_control() # Updating control\n self.read_registers() # Accessing register file\n self.execute() # Executing the instruction in the alu\n self.access_memory() # Accessing memory\n self.write_back() # Write back\n self.print_changes() # Printing details of the instruction execution\n self.fix_pc() # Fix program counter\n self.check_done() # Checking if the program counter is out of range", "title": "" }, { "docid": "3fbb580dcf99a9dd9f2f68dec5c79b5e", "score": "0.66202885", "text": "def run(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "3a7c99a58e41686aff839ba71d8abc65", "score": "0.66182774", "text": "def run(self):\n return", "title": "" }, { "docid": "3a7c99a58e41686aff839ba71d8abc65", "score": "0.66182774", "text": "def run(self):\n return", "title": "" }, { "docid": "f4e74228ff6b18b42cbeb8c7d56bd3ee", "score": "0.66176236", "text": "def run(self, xs):\n \"*** YOUR CODE HERE ***\"", "title": "" }, { "docid": "b66576e01530e8544cd1e23930f8d468", "score": "0.65772486", "text": "def _execute(self):\n\n raise NotImplemented", "title": "" }, { "docid": "867b6425a853e1edc8be826ca37507ce", "score": "0.6575159", "text": "def nbexec(_nbclient):\n # Adapted/simplified from nbclient/client.py (BSD-3-Clause)\n _nbclient._cleanup_kernel()\n\n def execute(code, reset=False):\n _nbclient.reset_execution_trackers()\n with _nbclient.setup_kernel():\n assert _nbclient.kc is not None\n cell = Bunch(cell_type='code', metadata={}, source=dedent(code))\n _nbclient.execute_cell(cell, 0, execution_count=0)\n _nbclient.set_widgets_metadata()\n\n yield execute", "title": "" }, { "docid": "55079ce82ede5adc6d0ace1ee1891354", "score": "0.65630585", "text": "async def _eval(self, ctx, *, code: commands.clean_content):\n r = requests.post(\n \"http://coliru.stacked-crooked.com/compile\",\n data=json.dumps(\n {\"cmd\": \"python3 main.cpp\", \"src\": self.cleanup_code(code)}\n ),\n )\n emoji = self.bot.get_emoji(508_388_437_661_843_483)\n await ctx.message.add_reaction(emoji)\n await ctx.send(\n f\"```py\\n{r.text}\\n```\\n(This is **not** an open eval, everything is sandboxed)\"\n )", "title": "" }, { "docid": "c3036114735aeec14fdb7a668433ac3d", "score": "0.65503436", "text": "def get_code_from_file(self, data):\n return super().run()", "title": "" }, { "docid": "b36a086b0e5c43582bb245262dda4c9b", "score": "0.6529328", "text": "def run(ctx):", "title": "" }, { "docid": "4ee31f4d1fb9eada5f8c4453cf1c6740", "score": "0.6507501", "text": "def execute(self):\n return", "title": "" }, { "docid": "be7c9b325fe8970d31a946e685b98085", "score": "0.6488941", "text": "def do_runsta(self, runsta):", "title": "" }, { "docid": "f0779c7ef36f68f9c5c750314286dec7", "score": "0.64870816", "text": "def __call__(self):\n self.do()", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.6481783", "text": "def run(self):", "title": "" }, { "docid": "6756ec547be081ddd463698d88ef7759", "score": "0.6470163", "text": "def single_run(self) -> None:", "title": "" }, { "docid": "9ded3791ab95abd049c7a8355f1cab2a", "score": "0.6467167", "text": "def processIt(self):\n self.main.run()", "title": "" } ]
ea6f05dcbdaa7b397d23a33d382cf3ea
Create a simple naive multipnomial model where input X is a 1D array of floats in [0, 1.] output Y is onehot encode of three classes, A, B, C. [1,0,0] > class A [0,1,0] > class B [0,0,1] > class C Any x_i in [0, 0.3) belong to class A. Any x_i in [0.3, 0.6) belong to class B. Any x_i in [0.6, 1) belong to class C.
[ { "docid": "15bcca248399937da0adcd09acc877c2", "score": "0.6226215", "text": "def test_naive_multinomial():\n def make_y(x):\n if x <= 0.33:\n return [1,0,0]\n elif x > 0.33 and x <= 0.67:\n return [0,1,0]\n else:\n return [0,0,1]\n\n out_dir = op.join(op.dirname(op.dirname(__file__)), 'out', 'naive_multinomial')\n x_train = np.fromiter(range(1000), dtype=np.float32) / 1000. # 0, 0.001, ..., 0.999\n np.random.shuffle(x_train)\n\n # WARNING: batch_size matters, and randomization matters!\n # If input x_train is not randomized, and batch_size is large (e.g., 512), then\n # trained model has poor performance (accuray 0.33).\n # If batch size is small (e.g., 32), the trained model has good accuracy.\n # If x_train is randomly shuffled, the trained model has good accuracy even when batch size is large.\n\n y_train = [make_y(x) for x in x_train]\n y_train = np.asarray(y_train).reshape(1000, 3)\n\n model, evl = train(x_train=x_train, y_train=y_train,\n out_dir=out_dir, name='naive_multinomial',\n epochs=200, batch_size=32,\n create_and_compile_model_func=multinomial_model_0)\n pred = model.evaluate([0.02, 0.5, 0.8], [[1,0,0], [0, 1, 0], [0, 0, 1]])\n assert pred[1] == 1.0\n # save_dir = '/pbi/dept/secondary/siv/yli/jira/tak-97/naive-multinomial/'\n # new_model = tf.keras.models.load_model(save_dir)\n # new_pred = new_model.evaluate([0.02, 0.5, 0.8], [[1,0,0], [0, 1, 0], [0, 0, 1]])\n # assert pred == new_pred", "title": "" } ]
[ { "docid": "10065240f9d781740b94508886ad9703", "score": "0.6501281", "text": "def multiclass_noisify(y, P, random_state=0):\n print (np.max(y), P.shape[0])\n assert P.shape[0] == P.shape[1]\n assert np.max(y) < P.shape[0]\n\n # row stochastic matrix\n assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))\n assert (P >= 0.0).all()\n\n m = y.shape[0]\n print (m)\n new_y = y.copy()\n flipper = np.random.RandomState(random_state)\n\n for idx in np.arange(m):\n i = y[idx]\n # draw a vector with only an 1\n flipped = flipper.multinomial(1, P[i, :][0], 1)[0]\n new_y[idx] = np.where(flipped == 1)[0]\n\n return new_y", "title": "" }, { "docid": "cba533e1aedd75d9ee05c18719f7839f", "score": "0.63740146", "text": "def one_hot(Y, num_classes):\n batch_size = len(Y)\n Y_tilde = torch.zeros((batch_size, num_classes), device=Y.device)\n Y_tilde[torch.arange(batch_size), Y] = 1\n return Y_tilde.long()", "title": "" }, { "docid": "27096abb4fe5623c76881bae59184fa3", "score": "0.6356957", "text": "def one_hot(n_class,Y):\n\tone_hot_targets = np.eye(n_class)[:,Y]\n\treturn one_hot_targets", "title": "" }, { "docid": "c8cfbc788dcb76c007627b67a606376c", "score": "0.6293823", "text": "def train_X(self):\n X, y = self.preprocess()\n\n if self.library == 'mxnet':\n data = mx.sym.Variable('data')\n\n fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=self.num_labels * 10)\n act1 = mx.sym.Activation(data=fc1, name='relu1', act_type=\"relu\")\n\n # The second fully-connected layer and the according activation function\n fc2 = mx.sym.FullyConnected(data=act1, name='fc2', num_hidden=self.num_labels * 5)\n act2 = mx.sym.Activation(data=fc2, name='relu2', act_type=\"relu\")\n\n # The thrid fully-connected layer, note that the hidden size should be 10, which is the number of unique digits\n fc4 = mx.sym.FullyConnected(data=act2, name='fc4', num_hidden=self.num_labels)\n # The softmax and loss layer\n mlp = mx.sym.SoftmaxOutput(data=fc4, name='softmax')\n # create a model\n # mx.viz.plot_network(symbol=mlp, shape={\"data\": (28, 22)}).render(\"NaiveNet\", view=True)\n examples = mx.io.NDArrayIter(X, y)\n\n import logging\n logging.basicConfig(level=logging.INFO)\n self.model = mx.model.FeedForward(symbol=mlp,\n num_epoch=350,\n learning_rate=0.001,\n wd=0.00001,\n momentum=0.9)\n\n self.model.fit(X=examples)\n if self.library == 'lasagne':\n if self.data_model == 'linear':\n input_var = T.matrix('inputs')\n elif self.data_model == 'matrix':\n input_var = T.tensor3('inputs')\n target_var = T.ivector('targets')\n\n shape = (None, self.sequence_length)\n if self.data_model == 'matrix':\n shape = (None, self.sequence_length, self.sequence_length)\n\n l_in = lasagne.layers.InputLayer(shape=shape,\n input_var=input_var)\n l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)\n l_hid1 = lasagne.layers.DenseLayer(\n l_in_drop, num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify,\n W=lasagne.init.GlorotUniform())\n l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)\n\n l_hid2 = lasagne.layers.DenseLayer(\n l_hid1_drop, num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify)\n\n l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)\n l_out = lasagne.layers.DenseLayer(\n l_hid2_drop, num_units=self.num_labels,\n nonlinearity=lasagne.nonlinearities.softmax)\n\n prediction = lasagne.layers.get_output(l_out)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var).mean()\n params = lasagne.layers.get_all_params(l_out, trainable=True)\n updates = lasagne.updates.sgd(loss, params, learning_rate=0.01)\n\n f_learn = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)\n self.model = theano.function([input_var], prediction, allow_input_downcast=True)\n\n # Training\n it = 5000\n for i in range(it):\n l = f_learn(X, y)", "title": "" }, { "docid": "03ccf5001d69bbc2524fdaeaff9fa99d", "score": "0.6222285", "text": "def multiclass_noisify(self, y, P, random_state=0):\n\n assert P.shape[0] == P.shape[1]\n assert np.max(y) < P.shape[0]\n\n # row stochastic matrix\n assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))\n assert (P >= 0.0).all()\n\n m = y.shape[0]\n new_y = y.copy()\n flipper = np.random.RandomState(random_state)\n\n for idx in np.arange(m):\n i = y[idx]\n # draw a vector with only an 1\n flipped = flipper.multinomial(1, P[i, :], 1)[0]\n new_y[idx] = np.where(flipped == 1)[0]\n\n return new_y", "title": "" }, { "docid": "439bc233c657e2d0652d45bfa212c76a", "score": "0.61863184", "text": "def one_hot_encode(Y, classes):\n onehot = np.zeros((classes, Y.shape[0]))\n for ex, label in enumerate(Y):\n onehot[label][ex] = 1\n return onehot", "title": "" }, { "docid": "d249d5b0e5f6c58190b1bf27f71d6a50", "score": "0.61737025", "text": "def get_one_hot(X, num_classes):\n X = X.astype(int)\n N = reduce(mul, X.shape)\n X_reshaped = np.reshape(X, (-1, ))\n X_one_hot = np.zeros((N, num_classes), dtype=np.float32)\n X_one_hot[np.arange(N), X_reshaped] = 1\n return X_one_hot.reshape((*X.shape, num_classes))", "title": "" }, { "docid": "2cf9cdd474948f23af77e6f702bfd1e0", "score": "0.6132776", "text": "def one_hot_encode(Y, classes):\n if len(Y) == 0 or type(Y) is not np.ndarray:\n return None\n elif type(classes) is not int or classes < np.amax(Y):\n return None\n\n one_hot = np.zeros((classes, Y.shape[0]))\n one_hot[Y, np.arange(Y.shape[0])] = 1\n\n return one_hot", "title": "" }, { "docid": "a7edcba30902329dcc9ab298c7eeed36", "score": "0.6125757", "text": "def one_hot(a, nclass):\n b = np.zeros((a.size, nclass))\n b[np.arange(a.size),a] = 1\n return b", "title": "" }, { "docid": "d3e4aa446fc0ae13fc3abc29bacceb17", "score": "0.61133194", "text": "def one_hot(Y, n_classes):\n\n matrix = np.zeros((n_classes, Y.shape[1]))\n for i in range(n_classes):\n matrix[i, np.argwhere(Y == i)[:, 1]] = 1\n\n return matrix", "title": "" }, { "docid": "7af269aed76f5bf50083791e2ff15442", "score": "0.61124545", "text": "def toy_linear_1d_classification(seed=default_seed):\n def sample_class(f):\n p = 1. / (1. + np.exp(-f))\n c = np.random.binomial(1, p)\n c = np.where(c, 1, -1)\n return c\n\n np.random.seed(seed=seed)\n x1 = np.random.normal(-3, 5, 20)\n x2 = np.random.normal(3, 5, 20)\n X = (np.r_[x1, x2])[:, None]\n return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}", "title": "" }, { "docid": "5a13976605beab49a1cb1f18ae3f13dc", "score": "0.60361516", "text": "def onehot(labels, n_classes):\n values = np.zeros((len(labels), n_classes))\n values[np.arange(len(values)), labels] = 1\n return values", "title": "" }, { "docid": "587da93cc7a1484e685a8c107f59ea86", "score": "0.6022485", "text": "def __init__(self, input, n_in, n_out):\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True)\n # initialize the biases b as a vector of n_out 0s\n self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True)\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyperplane for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of\n # hyperplane-k\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n # keep track of model input\n self.input = input", "title": "" }, { "docid": "fb9bcfea960eadeb0d243e33ee7357d8", "score": "0.60156095", "text": "def __init__(self, input):\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # input is a matrix where row-j represents input training sample-j\n self.p_y_given_x = T.nnet.softmax(input)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)", "title": "" }, { "docid": "a8bd32ac0711a235f7d21c6411c9983a", "score": "0.6012591", "text": "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "title": "" }, { "docid": "6838c8998945ca1113a908a95448a2d6", "score": "0.6000705", "text": "def make_labels(training_set):\r\n \r\n one_hot = np.zeros((len(training_set), num_classes), dtype=np.uint8)\r\n \r\n j = 0\r\n for i in range(len(training_set)):\r\n if j == 100:\r\n j = 0\r\n \r\n temp = np.zeros((num_classes), dtype=np.uint8)\r\n temp[j] = 1\r\n j += 1\r\n one_hot[i] = temp\r\n\r\n return one_hot", "title": "" }, { "docid": "775bf78eaa9f41a4241f3c1e90020aec", "score": "0.599795", "text": "def build_model(n_class):\n\n model = Sequential()\n\n model.add(Conv2D(64, (3, 3), padding='same', \n input_shape=SHAPE,\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(64, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(128, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(512,kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization(name='features_layer'))\n\n model.add(Dropout(0.5))\n model.add(Dense(n_class, name='ll_dense'))\n model.add(Activation('softmax'))\n return model", "title": "" }, { "docid": "0ea1c61fcc8088fba08f5a897d58f05a", "score": "0.5990482", "text": "def forward(self, x):\n mask = (x != 1) # [B,T]\n z = self.latent_model(x, mask)\n y = self.classifier(x, mask, z)\n\n return y", "title": "" }, { "docid": "94b6ad93b0a83334c403ae55afae415f", "score": "0.59673697", "text": "def convert_to_one_hot(Y, number_classes):\n Y = np.eye(number_classes)[Y.reshape(-1)]\n return Y", "title": "" }, { "docid": "fdbf4500163fb02d75dddef9ff1cb3a7", "score": "0.5963015", "text": "def __init__(self, input, n_in, n_out):\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n\n self.W = theano.shared(\n value=np.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='lgReg_W',\n borrow=True\n )\n # initialize the baises b as a vector of n_out 0s\n self.b = theano.shared(\n value=np.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='lgReg_b',\n borrow=True\n )\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyper plain for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of hyper\n # plain-k\n\n softmax_input = T.dot(input, self.W) + self.b\n self.p_y_given_x = T.nnet.softmax(softmax_input)\n #self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W))\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]", "title": "" }, { "docid": "c481c75546574ae59bfa0cd5b90e2f4e", "score": "0.59495986", "text": "def fcnn_classifier_tox21_ml(n_x, n_y):\n classifier = keras.Sequential([\n keras.layers.InputLayer(input_shape=(n_x,)),\n keras.layers.Dense(n_x, activation=tf.nn.relu),\n keras.layers.Dense(int(n_x / 2), activation=tf.nn.relu),\n keras.layers.Dense(n_y, activation=tf.nn.sigmoid)\n\n ])\n\n classifier.compile(optimizer=tf.train.AdamOptimizer(),\n loss=masked_loss_function,\n metrics=[f1_score])\n\n return classifier", "title": "" }, { "docid": "b474f22c77a9b5d6328478587166c6a3", "score": "0.5947831", "text": "def train_nb(X, y):\n # use list comprehension\n\n # Separate training points by class\n ClassLabels, count_Class = np.unique (y, return_counts=True)\n DataInClass = [[x for x, t in zip (X, y) if t == c] for c in ClassLabels] # categorize for each class\n\n #########################################################################\n # TODO: #\n # compute class prior #\n #########################################################################\n\n Number_of_InputData = y.size\n prior = count_Class / Number_of_InputData\n \n #########################################################################\n # Detect type of our data #\n # Categorical or continuous #\n #########################################################################\n\n Col_Continue = np.array([np.all([isinstance(_xj, (int, float)) for _xj in cols])\n for cols in X.T])\n IScontinuous = np.all(Col_Continue) \n \n #########################################################################\n # TODO: #\n # Estimate mean and std for each class / feature #\n #########################################################################\n if IScontinuous:\n # mean and std for continuous data\n \n mean = np.array([np.mean(x, axis=0) for x in DataInClass])\n std = np.array([np.std(x, axis=0) for x in DataInClass])\n \n # mean is a c x d array : c=number of classes and d is dimension of X\n # std is a c x d array : c=number of classes and d is dimension of X\n\n result = prior, mean, std\n else:\n # frequancy for categorical data\n frequancy = []\n for i, x in enumerate(DataInClass):\n x1 = np.array(x)\n frequancy.append([{feature: count / count_Class[i] for feature, count in zip(*np.unique(cols, return_counts=True))}\n for cols in x1.T])\n result = prior, frequancy, \n return result", "title": "" }, { "docid": "a7794c9032710eb65c550630a8187b75", "score": "0.5944941", "text": "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "title": "" }, { "docid": "05f2874601a78aa47e019ef59dfa716c", "score": "0.59346116", "text": "def MLP_classifier(n_input, n_output, n_hidden_neurons = 500):\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n \n print('... building the model')\n \n x = T.matrix('x')\n \n # construct the MLP class\n classifier = MLP.MLP(\n input=x,\n n_in= n_input,\n n_hidden=n_hidden_neurons, #default\n n_out=n_output\n )\n \n return classifier", "title": "" }, { "docid": "1e885ffcee7a1a6197fc78664e545a1c", "score": "0.59339994", "text": "def confusion_multi_class(prediction, truth, labels):\n nclasses = labels.max() + 1\n cf2 = torch.zeros(nclasses, nclasses, dtype=torch.float, device=prediction.device)\n prediction = prediction.view(-1).long()\n truth = truth.view(-1)\n to_one_hot = torch.eye(int(nclasses), dtype=cf2.dtype, device=prediction.device)\n for c in range(nclasses):\n true_mask = (truth == c)\n pred_one_hot = to_one_hot[prediction[true_mask]].sum(0)\n cf2[:, c] = pred_one_hot\n\n return cf2.cpu().numpy()", "title": "" }, { "docid": "34e34ff79ca49c12396fbbdd602e4283", "score": "0.5930569", "text": "def create_onehot(label, num_classes):\n onehot = torch.zeros(label.shape[0], num_classes)\n onehot = onehot.scatter(1, label.unsqueeze(1).data.cpu(), 1)\n onehot = onehot.to(label.device)\n return onehot", "title": "" }, { "docid": "c463d23bc3b32e7eedd3e4d70990e858", "score": "0.59215015", "text": "def to_one_hot(y, num_classes=10):\n if isinstance(y, torch.Tensor):\n one_hot = torch.zeros((y.shape[0], num_classes), dtype=torch.float32)\n one_hot[torch.arange(y.shape[0]), y] = 1.0\n else:\n one_hot = np.zeros((y.shape[0], num_classes), dtype=np.float)\n one_hot[np.arange(y.shape[0]), y] = 1.0\n\n return one_hot", "title": "" }, { "docid": "68927a575e8e5d8b7016f9df3feaaf69", "score": "0.591835", "text": "def encoding(labels):\r\n\r\n if type(labels) in [tuple , list]:\r\n labels = numpy.array(labels)\r\n\r\n # num of classes\r\n num_unique_class = len( numpy.unique(labels) )\r\n # initialize the output after one hot encoding\r\n solution = numpy.zeros(( max(labels) + 1 , labels.size))\r\n\r\n # loop through the target array to encode every label\r\n for index in range(labels.shape[0] ):\r\n\r\n solution[: , index ][labels[index]] = 1\r\n\r\n return solution", "title": "" }, { "docid": "d100448a5327c9bcd5e9b129e06099d2", "score": "0.59124905", "text": "def pointnet_cls(NUM_POINTS,NUM_CLASSES):\n \n input=layers.Input(shape=(NUM_POINTS,3))\n \n # input transform\n x=Tnet(input)\n x=tf.matmul(input,x)\n \n # shared mlp\n x=conv_bn(x,64,1,'relu')\n temp=conv_bn(x,64,1,'relu')\n \n # feature transform\n x=Tnet(temp)\n x=tf.matmul(temp,x)\n \n #shared mlp \n for filter in [64,128,1024]:\n x = conv_bn(x, filter, 1,'relu')\n \n # Global Feature\n x = layers.GlobalMaxPool1D()(x)\n \n # Classifier\n x = dense_bn(x, 512, 'relu')\n x = layers.Dropout(0.7)(x)\n x = dense_bn(x, 256, 'relu')\n x = layers.Dropout(0.7)(x)\n x = layers.Dense(NUM_CLASSES,activation='softmax')(x)\n \n return models.Model(input,x)", "title": "" }, { "docid": "bdff2d4fb9b07ed0b3238eef0e2792da", "score": "0.59117895", "text": "def __customML_tensorflow_defaultpredict_classification(self, features, model, commands, ML_cmnd):\n\n #need to validate\n\n # #user can conduct this import externally to speed up this function\n # module = 'tensorflow'\n # if module not in sys.modules:\n import tensorflow as tf\n \n features = tf.convert_to_tensor(features.to_numpy())\n \n infill = model.predict(features)\n \n if len(infill.shape) == 0:\n pass\n \n elif len(infill.shape) == 1 or len(infill.shape) > 1 and infill.shape[1] == 1:\n #this works for binary classificaiton\n infill = np.rint(infill)\n \n else:\n #for multi label classificiton, assumes infill.shape[1] > 1 and len(infill.shape) == 2:\n \n #convert max entry in row to 1, other entries to 0\n infill2 = np.where(infill == np.max(infill, axis=1).reshape(-1,1), 1, 0)\n\n #for rows where the original derivation had all zeros recover that form\n infill2 = np.where(infill.sum(axis=1).reshape(-1,1) == 0, 0, infill2)\n \n #this translates to a single column with entries correpsonding to column index number of activation +1\n #as str(int) type and entry and '0' used as register for rows without activation\n #e.g. first column activations populated as '1', second as '2', etc. No activations populated as '0'\n #for rows where 1 is populated in every column (as would be case when all entries in row are nonzero and equal), \n #the rightmost column is treated as the activation\n \n infill, ML_cmnd = self.__convert_onehot_to_singlecolumn(pd.DataFrame(infill2), ML_cmnd)\n \n return infill", "title": "" }, { "docid": "d6dcdbfeb971a16961e0770ae69bbbcb", "score": "0.5910432", "text": "def one_hot(labels, num_classes):\n y = torch.eye(num_classes)\n return y[labels]", "title": "" }, { "docid": "2c7a1f324efd828716e96ba140570836", "score": "0.5906346", "text": "def train_classifier(self,X,y,a0=None,lmbd=None,sigma0=None):\n self.a0=a0 if a0!=None else self.a0\n self.sigma0=sigma0 if sigma0!=None else self.sigma0\n self.lmbd=lmbd if lmbd!=None else self.lmbd\n self.train_data = X\n self.n_classes = len(set(y.tolist()))\n imp_mat_shape = tuple([self.n_classes]+list(self.shape)[0:3])\n self.importance_matrix = cp.zeros(shape=imp_mat_shape)\n for t in tqdm(range(self.train_data.shape[0])):\n label = y[t].tolist()\n sample=self.train_data[t]\n sample=helper.create_fields(sample,self.receptive_field_size,self.step)\n bmu_upt = self.identify_bmu(sample)\n self.model_update(bmu_upt,sample,t)\n pred = [[label for i in range(self.neurons)],[i for i in range(self.neurons)],[bmu_upt[i][0] for i in range(self.neurons)],[bmu_upt[i][1] for i in range(self.neurons)]]\n self.importance_matrix[pred]=self.importance_matrix[pred]+1\n self.mode ='classif'", "title": "" }, { "docid": "3eadbf26c3d664c7fa77e3793942fd17", "score": "0.5900069", "text": "def make_classification(\n n_samples=100,\n n_features=20,\n n_informative=2,\n n_redundant=2,\n n_repeated=0,\n n_classes=2,\n n_clusters_per_class=2,\n weights=None,\n flip_y=0.01,\n class_sep=1.0,\n hypercube=True,\n shift=0.0,\n scale=1.0,\n shuffle=True,\n useful_indices=False,\n random_state=None,\n):\n generator = check_random_state(random_state)\n\n # Count features, clusters and samples\n if n_informative + n_redundant + n_repeated > n_features:\n raise ValueError(\n \"Number of informative, redundant and repeated \"\n \"features must sum to less than the number of total\"\n \" features\"\n )\n if 2 ** n_informative < n_classes * n_clusters_per_class:\n raise ValueError(\n \"n_classes * n_clusters_per_class must\"\n \" be smaller or equal 2 ** n_informative\"\n )\n if weights and len(weights) not in [n_classes, n_classes - 1]:\n raise ValueError(\n \"Weights specified but incompatible with number \" \"of classes.\"\n )\n\n n_useless = n_features - n_informative - n_redundant - n_repeated\n n_clusters = n_classes * n_clusters_per_class\n\n if weights and len(weights) == (n_classes - 1):\n weights = weights + [1.0 - sum(weights)]\n\n if weights is None:\n weights = [1.0 / n_classes] * n_classes\n weights[-1] = 1.0 - sum(weights[:-1])\n\n # Distribute samples among clusters by weight\n n_samples_per_cluster = []\n for k in range(n_clusters):\n n_samples_per_cluster.append(\n int(n_samples * weights[k % n_classes] / n_clusters_per_class)\n )\n for i in range(n_samples - sum(n_samples_per_cluster)):\n n_samples_per_cluster[i % n_clusters] += 1\n\n # Initialize X and y\n X = np.zeros((n_samples, n_features))\n y = np.zeros(n_samples, dtype=np.int)\n\n # Build the polytope whose vertices become cluster centroids\n centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(float)\n centroids *= 2 * class_sep\n centroids -= class_sep\n if not hypercube:\n centroids *= generator.rand(n_clusters, 1)\n centroids *= generator.rand(1, n_informative)\n\n # Initially draw informative features from the standard normal\n X[:, :n_informative] = generator.randn(n_samples, n_informative)\n\n # Create each cluster; a variant of make_blobs\n stop = 0\n for k, centroid in enumerate(centroids):\n start, stop = stop, stop + n_samples_per_cluster[k]\n y[start:stop] = k % n_classes # assign labels\n X_k = X[start:stop, :n_informative] # slice a view of the cluster\n\n A = 2 * generator.rand(n_informative, n_informative) - 1\n X_k[...] = np.dot(X_k, A) # introduce random covariance\n\n X_k += centroid # shift the cluster to a vertex\n\n # Create redundant features\n if n_redundant > 0:\n B = 2 * generator.rand(n_informative, n_redundant) - 1\n X[:, n_informative : n_informative + n_redundant] = np.dot(\n X[:, :n_informative], B\n )\n\n # Repeat some features\n if n_repeated > 0:\n n = n_informative + n_redundant\n indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)\n X[:, n : n + n_repeated] = X[:, indices]\n\n # Fill useless features\n if n_useless > 0:\n X[:, -n_useless:] = generator.randn(n_samples, n_useless)\n\n # Randomly replace labels\n if flip_y >= 0.0:\n flip_mask = generator.rand(n_samples) < flip_y\n y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())\n\n # Randomly shift and scale\n if shift is None:\n shift = (2 * generator.rand(n_features) - 1) * class_sep\n X += shift\n\n if scale is None:\n scale = 1 + 100 * generator.rand(n_features)\n X *= scale\n\n indices = np.arange(n_features)\n if shuffle:\n # Randomly permute samples\n X, y = util_shuffle(X, y, random_state=generator)\n\n # Randomly permute features\n generator.shuffle(indices)\n X[:, :] = X[:, indices]\n\n if useful_indices:\n n_useful = n_informative + n_redundant + n_repeated\n return X, y, indices < n_useful\n else:\n return X, y", "title": "" }, { "docid": "b32cebe6fbd6961e11273e84e0aa9f85", "score": "0.5892194", "text": "def multinomialNB(X_train_dtm, y_train, X_test_dtm, y_test):\n # Create ML-model (NB Multinomial) and fit to training-set\n #(calculate pobabilities)\n mnb = MultinomialNB()\n mnb.fit(X_train_dtm, y_train)\n\n y_pred_class = mnb.predict(X_test_dtm)\n y_true_class = y_test\n return y_pred_class, y_true_class, train_time, test_time", "title": "" }, { "docid": "36208400ba3cdc1bf136012d7e61cd22", "score": "0.58539903", "text": "def _categorical_to_onehot(self, X):\n n_instances, n_features = X.shape\n categorical_features = [self.metadata[\"features\"][i][0] for i in range(n_features) if self.metadata[\"features\"][i][1]!= \"numeric\"]\n X_out = np.ones(n_instances) # start with column of 1's\n\n for i in range(n_features):\n feature = X.columns[i]\n if feature in categorical_features:\n new_cols = label_binarize(X[feature], classes_=self.metadata[\"features\"][i][1])\n else:\n new_cols = np.array(X[feature]) \n\n X_out = np.column_stack((X_out,new_cols))\n \n return X_out", "title": "" }, { "docid": "ba2957b61c0dbf587e9cf153004093d8", "score": "0.5849645", "text": "def one_hot(self, arr, classes):\n if not type(arr) is np.ndarray:\n raise Exception(\"array must be numpy array!\")\n return np.stack([(arr==i)*1.0 for i in range(classes)], axis=-1)", "title": "" }, { "docid": "e0ae924634c54ebe31404bec80e7d051", "score": "0.5834155", "text": "def one_hot(src, num_classes=None, dtype=None):\n\n src = src.to(torch.long)\n src = src.unsqueeze(-1) if src.dim() == 1 else src\n assert src.dim() == 2\n\n if num_classes is None:\n num_classes = src.max(dim=0)[0] + 1\n else:\n if torch.is_tensor(num_classes):\n num_classes = num_classes.tolist()\n\n num_classes = torch.tensor(\n repeat(num_classes, length=src.size(1)),\n dtype=torch.long,\n device=src.device)\n\n if src.size(1) > 1:\n zero = torch.tensor([0], device=src.device)\n src = src + torch.cat([zero, torch.cumsum(num_classes, 0)[:-1]])\n\n size = src.size(0), num_classes.sum()\n out = torch.zeros(size, dtype=dtype, device=src.device)\n out.scatter_(1, src, 1)\n return out", "title": "" }, { "docid": "7f5e8324967e606f509f78297ca07c9b", "score": "0.5830481", "text": "def one_hot(labels, classes=None):\n encoded = k.utils.to_categorical(labels, classes)\n return encoded", "title": "" }, { "docid": "ab013bde3e69d37a88a702b9c1df63da", "score": "0.5818954", "text": "def one_hot(labels, num_classes: int, dtype: torch.dtype = torch.float):\n assert labels.dim() > 0, \"labels should have dim of 1 or more.\"\n\n # if 1D, add singelton dim at the end\n if labels.dim() == 1:\n labels = labels.view(-1, 1)\n\n sh = list(labels.shape)\n\n assert sh[1] == 1, \"labels should have a channel with length equals to one.\"\n sh[1] = num_classes\n\n o = torch.zeros(size=sh, dtype=dtype, device=labels.device)\n labels = o.scatter_(dim=1, index=labels.long(), value=1)\n\n return labels", "title": "" }, { "docid": "be54c37c118e374f48e90b0c0aaf780b", "score": "0.58166665", "text": "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad = True)", "title": "" }, { "docid": "0da2339aeeeb0909ba55ad5547114f3b", "score": "0.58153254", "text": "def onehot(index, tot_classes):\r\n '''new_idx = 0\r\n if index >= tot_classes-1:\r\n new_idx = tot_classes-1\r\n else:\r\n new_idx = index'''\r\n onehot = np.zeros(tot_classes)\r\n onehot[index] = 1.0\r\n # print(onehot)\r\n return onehot", "title": "" }, { "docid": "de1bb709203837b2d5068c4e881da79e", "score": "0.57861924", "text": "def predict(X, parameters, problem_type): \n m = X.shape[1]\n yhat,_ = forwardprop(X, parameters, problem_type)\n preds = np.zeros(yhat.shape)\n if problem_type == 'regression':\n preds = yhat\n elif problem_type == 'binary':\n for i in range(0, yhat.shape[1]):\n if yhat[0,i] > 0.5:\n preds[0,i] = 1\n else:\n preds[0,i] = 0 \n elif problem_type == 'multiclass':\n max_idxs = np.argmax(yhat, axis=0)\n for i in range(m):\n imax = max_idxs[i]\n preds[imax,i] = 1\n return preds", "title": "" }, { "docid": "bfd1ee683ad7bbe1a1d20e3f65a8e169", "score": "0.5779352", "text": "def classifier(self, xs):\n # use the trained model q(y|x) = categorical(alpha(x))\n # compute all class probabilities for the image(s)\n alpha = self.encoder_y.forward(xs)\n\n # get the index (digit) that corresponds to\n # the maximum predicted class probability\n res, ind = torch.topk(alpha, 1)\n\n # convert the digit(s) to one-hot tensor(s)\n ys = xs.new_zeros(alpha.size())\n ys = ys.scatter_(1, ind, 1.0)\n return ys", "title": "" }, { "docid": "b406f16ac8c6a3598961b832672b35a6", "score": "0.5777754", "text": "def create_model(X):\n\n # Building a simple stack of 2 convolution layers with a ReLU activation\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), input_shape=X.shape[1:]))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n # On top adding two fully-connected layers. We end the model with a single\n # unit and a sigmoid function, which is good for binary classification\n model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\n model.add(Dense(64))\n model.add(Activation('relu'))\n\n model.add(Dropout(0.5)) # using dropout to decrease over-fitting\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n adam = optimizers.Adam(learning_rate=0.0005)\n\n model.compile(loss='binary_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "9a1ca06ccac294102c9dbaead2625a51", "score": "0.57767123", "text": "def classify_multi(classifiers, X_train, y_train, X_test, y_test=None, groups=None):\n \n num_samples = X_test.shape[0]\n num_classes = 9\n num_classifiers = len(classifiers)\n print(num_samples, num_classes, num_classifiers)\n probabilities = np.zeros((num_samples, num_classes, num_classifiers))\n predictions = np.zeros((num_samples, num_classifiers))\n scores = np.zeros(num_classifiers)\n \n for i in range(num_classifiers):\n pred, score, clf, proba = classify(classifiers[i], X_train, y_train, X_test, y_test, groups)\n predictions[:, i] = pred\n scores[i] = score\n probabilities[:, :, i] = proba\n \n return predictions, scores, classifiers, probabilities", "title": "" }, { "docid": "e00f0b224145e21a7dc05dfafedc0cd0", "score": "0.5774927", "text": "def build_nn():\n nn = Sequential()\n nn.add(Dense(500, activation='relu', input_shape=(N_FEATURES,)))\n nn.add(Dense(150, activation='relu'))\n nn.add(Dense(N_CLASSES, activation='softmax'))\n nn.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n return nn", "title": "" }, { "docid": "3fef506af3cb04c7dfafda0b607467b1", "score": "0.57730526", "text": "def predict(self, X):\n if len(self.classes_) == 2:\n y = self.predict_proba(X)\n res = np.empty(y.shape[0], dtype=self.classes_.dtype)\n res[y[:, 1] <= 0.5] = self.classes_[0]\n res[y[:, 1] >= 0.5] = self.classes_[1]\n return res\n else:\n return self.multi_.predict(X)", "title": "" }, { "docid": "fc5d85bd622f6004d1f2e92d8f606a66", "score": "0.57614595", "text": "def make_classification(n_samples=100, n_features=20, n_informative=2,\n n_classes=2, shared_support=False, random_state=None,\n w_scale=1., include_intercept=False):\n if isinstance(random_state, int):\n rng = np.random.RandomState(random_state)\n else:\n rng = random_state\n n_not_informative = n_features - n_informative\n\n X = rng.randn(n_samples, n_features)\n X -= X.mean(axis=-1, keepdims=True)\n X /= X.std(axis=-1, keepdims=True)\n\n if n_classes > 2:\n w = rng.randn(n_features, n_classes)\n if include_intercept:\n intercept = rng.randn(1, n_classes)\n intercept -= intercept.max()\n else:\n intercept = np.zeros((1, n_classes))\n if n_not_informative > 0:\n if shared_support:\n idxs = rng.permutation(n_features)[:n_not_informative]\n w[idxs] = 0.\n else:\n for ii in range(n_classes):\n idxs = rng.permutation(n_features)[:n_not_informative]\n w[idxs, ii * np.ones_like(idxs, dtype=int)] = 0.\n else:\n w = rng.randn(n_features, 1)\n if include_intercept:\n intercept = rng.randn(1, 1)\n else:\n intercept = np.zeros((1, 1))\n if n_not_informative > 0:\n idxs = rng.permutation(n_features)[:n_not_informative]\n w[idxs] = 0.\n w *= w_scale\n intercept *= w_scale * 2.\n\n log_p = X.dot(w)\n if include_intercept:\n log_p += intercept\n if n_classes > 2:\n p = softmax(log_p)\n y = np.array([rng.multinomial(1, pi) for pi in p])\n y = y.argmax(axis=-1)\n else:\n p = sigmoid(np.squeeze(log_p))\n y = np.array([rng.binomial(1, pi) for pi in p])\n\n return X, y, w.T, intercept", "title": "" }, { "docid": "fb1abbedde03f2929c12fe2acd586670", "score": "0.5759762", "text": "def one_hot(self, label):\n x = np.zeros(self.num_classes)\n x[label] = 1\n return x", "title": "" }, { "docid": "540e9ae5dcb597fd4df0346ddce3caf6", "score": "0.57585615", "text": "def _one_hot(self, labels, classes, value=1):\n\n one_hot = torch.zeros(labels.size(0), classes)\n #labels and value_added size must match\n labels = labels.view(labels.size(0), -1)\n value_added = torch.Tensor(labels.size(0), 1).fill_(value)\n value_added = value_added.to(labels.device)\n one_hot = one_hot.to(labels.device)\n\n one_hot.scatter_add_(1, labels, value_added)\n return one_hot", "title": "" }, { "docid": "207fcde1c9d116085473f917ebdbe427", "score": "0.57509923", "text": "def one_hot_encoding(labels, num_classes):\n y = torch.eye(num_classes) # [D,D]\n return y[labels] # [N,D]", "title": "" }, { "docid": "14b695d10e8f059bef9d18166de6dad2", "score": "0.5747169", "text": "def _predict(self, X):\n rng = check_random_state(self.random_state)\n return np.array(\n [\n self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]\n for prob in self.predict_proba(X)\n ]\n )", "title": "" }, { "docid": "2824a0a410cae4c51e27e166bd89b73a", "score": "0.5743141", "text": "def one_hot_representation(y_data, number_classes, class_weights=None):\n if isinstance(y_data,list):\n y_data = np.asarray(y_data)\n shape = y_data.shape\n\n if class_weights is not None:\n result = np.zeros((shape[0],) + shape[1:]+(2*number_classes,))\n for i in range(number_classes):\n index = np.where(y_data == i)\n one_hot_index = (index[0], ) + index[1:] + (i,)\n one_hot_index_w = (index[0], ) + index[1:] + (i+number_classes,)\n result[one_hot_index] = 1\n result[one_hot_index_w] = class_weights[i]\n\n else:\n result = np.zeros((shape[0],) + shape[1:]+(number_classes,))\n for i in range(number_classes):\n index = np.where(y_data == i)\n one_hot_index = (index[0], ) + index[1:] + (i,)\n result[one_hot_index] = 1\n return np.asarray(result, dtype=np.float64)", "title": "" }, { "docid": "f320a3a3b4a2914ebcb4cc374280dfa9", "score": "0.57384735", "text": "def predict(W, X):\n return nn.sigmoid(np.dot(X, W))", "title": "" }, { "docid": "621cdf00784b1a1c01789f6ee5f51c38", "score": "0.57375467", "text": "def train(self):\n\n print('pre train')\n print(' training data shape: ', self.X_train.shape)\n print(' training labels shape: ', self.y_train.shape)\n print(' unlabeled data shape: ', self.X_nolabel.shape)\n\n # fit model with labeled data\n self.model.fit(self.X_train, self.y_train)\n\n # predict proba with data with no label\n probs = self.model.predict_proba(self.X_nolabel)\n\n # pull out prob of class 1\n probs = [prob[1] for prob in probs]\n\n # sort by entropy and pull out uncertain pairs\n if self.n_uncertain:\n ent_sorted = sorted([(self._entropy(prob), ind) for prob, ind in zip(probs, self.X_nolabel.index)])\n uncertain = [ind for _, ind in ent_sorted[-self.n_uncertain:]]\n\n # sort by prob and pull out certain pairs such that the class balance of the\n # initial training data is preserved\n prob_sorted = sorted([(prob, ind) for prob, ind in zip(probs, self.X_nolabel.index)])\n\n #n_certain_false low probability pairs and n_certain_true high prob pairs are pulled out\n certain = [ind for _, ind in prob_sorted[:self.n_certain_false]] + [ind for _, ind in prob_sorted[-self.n_certain_true:]]\n\n # create df_uncertain pairs\n if self.n_uncertain:\n self.df_uncertain = self.X_nolabel.loc[uncertain, : ]\n\n # check if certain and uncertain intersect\n # this happens in the case where our most confident positive cases still have very low prob\n # in this situation we don't add the most certain cases - instead we rely on the clerical_review function\n cardinality = len([i for i in uncertain if i in set(certain)])\n if cardinality == 0:\n # add certain pairs to training data\n df_certain = self.X_nolabel.loc[certain, : ]\n self.X_train = self.X_train.append(df_certain)\n y_certain = pd.Series(data=self.model.predict(df_certain), index=df_certain.index)\n self.y_train = self.y_train.append(y_certain)\n\n # remove certain pairs from nolabel data\n self.X_nolabel.drop(df_certain.index, inplace=True)\n\n # if prop of most confident pair is very low instruct user to perform clerical review\n if cardinality > 0:\n print('prob of most confident examples was very low!')\n print('no pairs were added to traning data')\n print('use clerical_review function')\n\n print('post train')\n print(' training data shape: ', self.X_train.shape)\n print(' training labels shape: ', self.y_train.shape)\n print(' unlabeled data shape: ', self.X_nolabel.shape)\n\n # notify user if active learning is set to false\n if not self.n_uncertain:\n print('n_uncertain == False')\n print('if clerical review is required set n_uncertain to number of uncertain pairs to label (int)')", "title": "" }, { "docid": "75cd1b36c22f6781699edff0aaaa7f1d", "score": "0.57188284", "text": "def labels_to_one_hots(batch, num_classes=10):\r\n one_hot_batch = torch.zeros(batch.size(0), num_classes).to(batch.device)\r\n for i in range(batch.size(0)):\r\n one_hot_batch[i, int(batch[i].data.cpu().item())] = 1\r\n return one_hot_batch", "title": "" }, { "docid": "e1e0115ecf23b9d62cacc7cb70016b49", "score": "0.57184744", "text": "def nn(data_c, data_n, algorithm='auto'):\n\n # One-hot enconding\n one_hot = pd.get_dummies(pd.DataFrame(data_n))\n data = np.hstack((data_c, one_hot.values))\n nbrs = NearestNeighbors(n_neighbors=2, algorithm=algorithm).fit(data)\n return nbrs.kneighbors(data)[1]", "title": "" }, { "docid": "267504588b38f5ecf8d68ea9b87648da", "score": "0.5716522", "text": "def indices_to_one_hot(data, nb_classes):\r\n targets = np.array(data).reshape(-1)\r\n\r\n y = np.eye(nb_classes)[targets]\r\n\r\n return y", "title": "" }, { "docid": "b39921464b40e4fe361e0f08a307160b", "score": "0.5715809", "text": "def convert_to_one_hot_labels(input_, target):\n tmp = input_.new(target.size(0), max(0, target.max()) + 1).fill_(0)\n tmp.scatter_(1, target.view(-1, 1), 1.0)\n return tmp.long()", "title": "" }, { "docid": "b8ff041a34aacf758d0b690f358c96f4", "score": "0.570764", "text": "def test_mnnb():\n\n for X in [X2, scipy.sparse.csr_matrix(X2)]:\n # Check the ability to predict the learning set.\n clf = MultinomialNB()\n y_pred = clf.fit(X, y2).predict(X)\n\n assert_array_equal(y_pred, y2)\n\n # Verify that np.log(clf.predict_proba(X)) gives the same results as\n # clf.predict_log_proba(X)\n y_pred_proba = clf.predict_proba(X)\n y_pred_log_proba = clf.predict_log_proba(X)\n assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)", "title": "" }, { "docid": "0f57eb624979d8af3367e9ab29bf4255", "score": "0.5706311", "text": "def classify(X, parameters):\n ### CODE HERE\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n x, _ = layer_forward(X, W1, b1, activation=\"tanh\")\n # assert x.shape == (500,1189)\n YPred, _ = layer_forward(x, W2, b2, activation=\"sigmoid\")\n YPred = np.round(YPred)\n return YPred", "title": "" }, { "docid": "0df7a850aae00ea2f158feaeb0b03b6c", "score": "0.57023484", "text": "def cls_predictor(num_anchors, num_classes):\n return nn.Conv2D(num_anchors*(num_classes+1), kernel_size=3, padding=1)", "title": "" }, { "docid": "939dd799b9fba91d5874dda9c4998e86", "score": "0.5700346", "text": "def one_hot_embedding(labels, num_classes):\n if isinstance(num_classes, int):\n num_classes = [num_classes]\n\n one_hots = []\n for i in range(len(num_classes)):\n num_class = num_classes[i]\n\n if num_class not in one_hot_embedding.Ys:\n one_hot_embedding.Ys[num_class] = cuda(torch.eye(num_class))\n\n y = one_hot_embedding.Ys[num_class]\n one_hots.append(y[labels[:, i]])\n\n return torch.cat(one_hots, dim=1)", "title": "" }, { "docid": "99c0116bf4ac9a6e0f79533cee96564a", "score": "0.57002056", "text": "def predict(self, X, **kwargs):\n C = kwargs.pop('C', None)\n\n if C is None:\n C = np.ones((self.n_classes, self.n_classes))\n np.fill_diagonal(C, 0)\n\n P = self.predict_proba(X, normalize=True)\n return self._rand_arg_min(np.dot(P, C), axis=1)", "title": "" }, { "docid": "35e688b7ea6de148642f2fb2c6c0fd47", "score": "0.5695077", "text": "def one_hot_(y, num_classes=None):\r\n\r\n y = np.array(y, dtype='int')\r\n\r\n input_shape = y.shape\r\n\r\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\r\n\r\n input_shape = tuple(input_shape[:-1])\r\n\r\n y = y.ravel()\r\n\r\n if not num_classes:\r\n\r\n num_classes = np.max(y) + 1\r\n\r\n n = y.shape[0]\r\n\r\n categorical = np.zeros((n, num_classes), dtype=np.float32)\r\n\r\n categorical[np.arange(n), y] = 1\r\n\r\n output_shape = input_shape + (num_classes,)\r\n\r\n categorical = np.reshape(categorical, output_shape)\r\n\r\n return categorical", "title": "" }, { "docid": "a0db8181196ca60152f82417b414fe27", "score": "0.5691201", "text": "def to_onehot(label: torch.tensor, num_classes: int) -> torch.tensor:\n return torch.eye(num_classes)[label].to(device)", "title": "" }, { "docid": "69e3e4dabc6cd9f97f951cf48d6c4784", "score": "0.56874675", "text": "def one_hot(n, y):\n if isinstance(y, int):\n ret = zeros(n)\n ret[y] = 1.0\n return ret\n elif isinstance(y, list):\n ret = zeros((len(y), n))\n ret[np.arange(len(y)),y] = 1.0\n return ret\n else:\n raise ValueError(\"Expected an int or list got: \" + y)", "title": "" }, { "docid": "1a3e9272fc0e6965a43dd5077069e652", "score": "0.56783366", "text": "def encode_onehot(self):\n self.encoding = np.array([ \n 1. if self.name == _c else 0. \\\n for _c in self.OBJ_CLASSES \n ])", "title": "" }, { "docid": "151758e59f43f678c9a13be3f03b9bf6", "score": "0.5678076", "text": "def one_hot_encode(x):\n #global once\n # TODO: Implement Function\n #if (once == False):\n #encoder.fit(x)\n #once = True\n\n #one_hor_labels = encoder.transform(x)\n #num_labels = np.max(x) + 1\n return np.eye(10)[x]", "title": "" }, { "docid": "d5e726c5f1efc9ac13585eef63ad2967", "score": "0.5677843", "text": "def classify(self,X):\n # TODO implement", "title": "" }, { "docid": "356e1fc5ef55e0d30e7287ac3850ebe9", "score": "0.56740624", "text": "def _one_hot(self, labels, classes, value=1):\r\n\r\n one_hot = torch.zeros(labels.size(0), classes)\r\n\r\n # labels and value_added size must match\r\n labels = labels.view(labels.size(0), -1)\r\n value_added = torch.Tensor(labels.size(0), 1).fill_(value)\r\n\r\n value_added = value_added.to(labels.device)\r\n one_hot = one_hot.to(labels.device)\r\n\r\n one_hot.scatter_add_(1, labels, value_added)\r\n\r\n return one_hot", "title": "" }, { "docid": "2ec5c9ea1d26658bb04b6fe8ad6afac7", "score": "0.5664691", "text": "def train_multinomial_nb(self):\n self.get_uniq_vocab_from_training_vocab()\n docs_count = self.get_training_docs_count()\n N = 0\n for k in docs_count:\n N += docs_count[k]\n\n for clas in self.training_classes:\n self.prior[clas] = (docs_count[clas] * 1.0)/N\n Tct = self.get_text_count_for_class(clas)\n\n sum_tct = 0\n for k, v in Tct.iteritems():\n sum_tct += v\n denominator = sum_tct + len(self.Vocab)\n\n for t in self.Vocab:\n if t not in self.condProb:\n self.condProb[t] = {}\n if t not in Tct:\n Tct[t] = 0\n self.condProb[t][clas] = (Tct[t] + 1.0)/denominator", "title": "" }, { "docid": "8ed90d96d1cf1815d7bbb612aa1e185e", "score": "0.56646264", "text": "def test_confusion_matrix_multiclass():\n y_true, y_pred, _ = make_prediction(binary=False)\n\n # compute confusion matrix with default labels introspection\n cm = confusion_matrix(y_true, y_pred)\n assert_array_equal(cm, [[23, 2, 0],\n [5, 5, 20],\n [0, 2, 18]])\n\n # compute confusion matrix with explicit label ordering\n cm = confusion_matrix(y_true, y_pred, labels=[0, 2, 1])\n assert_array_equal(cm, [[23, 0, 2],\n [0, 18, 2],\n [5, 20, 5]])", "title": "" }, { "docid": "90f087c5360a6582ac439e1041d5aec1", "score": "0.5659591", "text": "def __init__(self, num_classes=1000):\n super(AlexNet, self).__init__()\n # input size should be : (b x 3 x 227 x 227)\n # The image in the original paper states that width and height are 224 pixels, but\n # the dimensions after first convolution layer do not lead to 55 x 55.\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), # (b x 64 x 55 x 55)\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(kernel_size=3, stride=2), # (b x 64 x 27 x 27)\n nn.Conv2d(64, 192, kernel_size=5, padding=2), # (b x 192 x 27 x 27)\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(192),\n nn.MaxPool2d(kernel_size=3, stride=2), # (b x 192 x 13 x 13)\n nn.Conv2d(192, 384, kernel_size=3, padding=1), # (b x 384 x 13 x 13)\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), # (b x 256 x 13 x 13)\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1), # (b x 256 x 13 x 13)\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # (b x 256 x 6 x 6)\n )\n # classifier is just a name for linear layers\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )", "title": "" }, { "docid": "752fe8fe0227dded638f5814be572265", "score": "0.56571627", "text": "def get_one_hot(targets, nb_classes):\n res = np.eye(nb_classes)[np.array(targets).reshape(-1)]\n return res.reshape(list(targets.shape)+[nb_classes])", "title": "" }, { "docid": "d21d517385cbb7aac567aa7d7e8edb57", "score": "0.56425714", "text": "def make_basic_picklable_cnn(nb_filters=64, nb_classes=10,\n input_shape=(None, 28, 28, 1)):\n layers = [Conv2D(nb_filters, (8, 8), (2, 2), \"SAME\"),\n ReLU(),\n Conv2D(nb_filters * 2, (6, 6), (2, 2), \"VALID\"),\n ReLU(),\n Conv2D(nb_filters * 2, (5, 5), (1, 1), \"VALID\"),\n ReLU(),\n Flatten(),\n Linear(nb_classes),\n Softmax()]\n model = MLP(layers, input_shape)\n return model", "title": "" }, { "docid": "d643b3276171127ef85c88fb878825dd", "score": "0.5641369", "text": "def create_model_C():\n\n model = Sequential()\n model.add(Convolution2D(32, (3,3), activation='relu', input_shape=(128,128,3), padding='same'))\n model.add(MaxPooling2D((2,2)))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(MaxPooling2D((2,2)))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(MaxPooling2D((2,2)))\n model.add(Convolution2D(128, (3,3), activation='relu'))\n model.add(Convolution2D(128, (3,3), activation='relu'))\n model.add(MaxPooling2D((2,2)))\n model.add(Convolution2D(256, (3,3), activation='relu'))\n model.add(Flatten())\n model.add(Dense(1024, activation='relu'))\n model.add(Dense(42))\n \n return model", "title": "" }, { "docid": "a73bf8d5fa97b4fe494e67aafa4eadd3", "score": "0.56381464", "text": "def create_model(X, args):\n hiddens = [X]\n for i in xrange(args.layers - 1):\n hiddens.append(tf.contrib.layers.fully_connected(hiddens[-1], args.layer_size, activation_fn=tf.nn.relu))\n logits = tf.contrib.layers.fully_connected(hiddens[-1], N_BITS, activation_fn=tf.nn.sigmoid)\n return logits", "title": "" }, { "docid": "b1a2697c22df6b2160041dca8f96258a", "score": "0.5633272", "text": "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "title": "" }, { "docid": "b1a2697c22df6b2160041dca8f96258a", "score": "0.5633272", "text": "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "title": "" }, { "docid": "85216c3a59f0b204bc28ef764d21203e", "score": "0.56312424", "text": "def convert_onehot(label, num_classes):\n one_hot = torch.zeros(label.shape[0], num_classes).scatter_(1, label, 1)\n return one_hot", "title": "" }, { "docid": "d1ded9b159363e988daf6b90bc77c950", "score": "0.56294477", "text": "def smote_nc(X, y):\r\n sample = SMOTENC(categorical_features=[0, 1], random_state=42)\r\n X, y = sample.fit_resample(X, y)\r\n print('after balancing:', X.shape)\r\n return X, y", "title": "" }, { "docid": "79a57c80f24ca8aed7fdf62a68ec90e0", "score": "0.5629099", "text": "def fcnn_classifier_tox21(n_x, n_y):\n classifier = keras.Sequential([\n keras.layers.InputLayer(input_shape=(n_x,)),\n keras.layers.Dense(n_x, activation=tf.nn.relu),\n keras.layers.Dropout(0.25),\n keras.layers.Dense(n_y, activation=tf.nn.sigmoid)\n ])\n\n classifier.compile(optimizer=tf.train.AdamOptimizer(),\n loss='binary_crossentropy',\n metrics=[f1_score])\n\n return classifier", "title": "" }, { "docid": "a8c11a3343f30c6720c5a71d73e2fdc1", "score": "0.56229454", "text": "def label_to_one_hot(targets: torch.Tensor, n_class):\n # batch_size, _, h, w = targets.size()\n # res = torch.zeros([batch_size, nlabels, h, w])\n targets = targets.squeeze(dim=1)\n zeros = torch.zeros(targets.shape).long().to(targets.device)\n\n # del 255.\n targets_ignore = targets > 20\n # print(targets_ignore)\n targets = torch.where(targets <= 20, targets, zeros)\n\n one_hot = torch.nn.functional.one_hot(targets, num_classes=n_class)\n one_hot[targets_ignore] = 0\n # print(one_hot[targets_ignore])\n one_hot = one_hot.transpose(3, 2)\n one_hot = one_hot.transpose(2, 1)\n # print(one_hot.size())\n return one_hot.float()", "title": "" }, { "docid": "28e26ed0205ad641249fae19049f9628", "score": "0.5614009", "text": "def predict(self, X):\r\n predictions = []\r\n if(len(X) == 1):\r\n predictions.append(np.argmax(self.predict_proba(X)))\r\n elif(len(X) > 1):\r\n # Add many predictions\r\n for i in range(len(X)):\r\n relevance_scores = self.predict_proba(X[i])\r\n predictions.append(np.argmax(relevance_scores))\r\n # print(np.argmax(relevance_scores))\r\n print('Ensemble Prediction:')\r\n print(np.array(predictions))\r\n return np.array(predictions) #, one_hot\r", "title": "" }, { "docid": "3238c34a3ea1e57acbdec14a88038d88", "score": "0.56099325", "text": "def binary_classification_model(features, labels, mode, params: utils.Bunch):\n params = utils.Bunch(**params)\n optimizer = params.get('optimizer', 'ftrl')\n l1_reg = params.get('l1_reg', 0.0)\n l2_reg = params.get('l2_reg', 0.0)\n tf.set_random_seed(123)\n labels = tf.cast(labels, tf.float32)\n epsilon = params.perturb_norm_bound\n norm_order = params.get('perturb_norm_order', 2)\n train_perturb_frac = params.train_perturb_frac\n test_perturb_frac = params.test_perturb_frac\n # do the various feature transforms according to the\n # 'feature_column' param, so now we have the feature-vector\n # that we will do computations on.\n x = tf.feature_column.input_layer(features, params.feature_columns)\n # for units in params['hidden_units']:\n # net = tf.layers.dense(net, units=units, activation=tf.nn.relu)\n\n # Compute logits (1 per class).\n #logits = tf.layers.dense(net, params['n_classes'], activation=None)\n #logits = tf.layers.dense(net, 1, activation=None, name='dense')\n dense = tf.layers.Dense(1, activation=None,\n kernel_initializer=\\\n tf.keras.initializers.zeros(),\n #tf.keras.initializers.RandomNormal(seed=123),\n bias_initializer= \\\n tf.keras.initializers.zeros())\n #tf.keras.initializers.RandomNormal(seed=123))\n\n if len(dense.trainable_variables) == 0:\n dense(x) # to force the kernel initialization\n # this is the \"kernel\" i.e. weights, does not include bias\n coefs = dense.trainable_variables[0]\n bias = dense.trainable_variables[1][0]\n perturb_frac = train_perturb_frac if mode == tf.estimator.ModeKeys.TRAIN \\\n else test_perturb_frac\n x_perturbed, _ = RobustLogisticModel.perturb_continuous(\n x, labels, coefs,\n norm_bound=epsilon,\n norm_order=norm_order,\n perturb_frac=perturb_frac,\n seed=123)\n logits = dense(x_perturbed)\n if params.activation == 'sigmoid':\n predictions = tf.sigmoid(logits)\n elif params.activation == 'sign':\n predictions = tf.maximum(0.0, tf.sign(logits))\n else: # assume relu\n predictions = tf.nn.relu(logits)\n labels = tf.reshape(labels, [-1,1])\n # Compute predictions.\n predicted_classes = tf.maximum(tf.sign(predictions - 0.5), 0)\n\n # if mode == tf.estimator.ModeKeys.PREDICT:\n # predictions = {\n # 'class_ids': predicted_classes[:, tf.newaxis],\n # 'probabilities': tf.nn.softmax(logits), # not really used\n # 'logits': logits,\n # }\n # return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Compute loss.\n if params.activation == 'sigmoid':\n loss = tf.reduce_mean(\n tf.keras.backend.binary_crossentropy(target=labels,\n output=logits,\n from_logits=True))\n elif params.activation == 'sign':\n loss = tf.reduce_mean(- (2*labels - 1) * logits )\n else:\n raise Exception(f'loss not known for activation {params.activation}')\n\n if l1_reg > 0 and optimizer != 'ftrl':\n loss = loss + l1_reg * tf.norm(coefs, ord=1)\n if l2_reg > 0 and optimizer != 'ftrl':\n loss = loss + l2_reg * tf.sqrt(tf.maximum(0.0, tf.nn.l2_loss(coefs)))\n\n adv_reg_lambda = params.get('adv_reg_lambda', 0.0)\n if adv_reg_lambda and perturb_frac > 0.0:\n clean_logits = dense(x)\n clean_loss = tf.reduce_mean(\n tf.keras.backend.binary_crossentropy(target=labels,\n output=clean_logits,\n from_logits=True))\n loss = clean_loss + adv_reg_lambda * loss\n\n# Compute evaluation metrics.\n accuracy = tf.metrics.accuracy(labels=labels,\n predictions=predicted_classes,\n name='acc_op')\n\n auc = tf.metrics.auc(labels=labels, predictions=predictions, name = 'auc-op')\n\n\n\n # add metrics etc for tensorboard\n tf.summary.scalar('accuracy', accuracy[1])\n tf.summary.scalar('auc', auc[1])\n tf.summary.scalar('loss', loss)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n # axiomatic attribution (Integrated Grads)\n feat_val_attribs = attribution.logistic_attribution(x, coefs, bias)\n feat_val_corr_stats = attribution.label_corr_stats(x, labels)\n av_attribs = tf.reduce_mean(feat_val_attribs, axis=0)\n attrib_entropy = tf_entropy(feat_val_attribs)\n num_high_attribs = num_above_relative_threshold(\n feat_val_attribs, thresh=params.get('thresh', 0.1))\n av_attrib_entropy = tf.metrics.mean(attrib_entropy)\n av_high_attribs = tf.metrics.mean(num_high_attribs)\n mean_attribs = tf.metrics.mean_tensor(av_attribs, name='attrib')\n xy_av = tf.metrics.mean_tensor(feat_val_corr_stats.xy, name='xy_av')\n x_av = tf.metrics.mean_tensor(feat_val_corr_stats.x, name='x_av')\n y_av = tf.metrics.mean_tensor(feat_val_corr_stats.y, name='y_av')\n xsq_av = tf.metrics.mean_tensor(feat_val_corr_stats.xsq, name='xsq_av')\n ysq_av = tf.metrics.mean_tensor(feat_val_corr_stats.ysq, name='ysq_av')\n\n # ad-hoc attribution (AFVI)\n afvi = attribution.logistic_afvi(x, coefs, bias)\n mean_afvi = tf.metrics.mean_tensor(afvi, name='afvi')\n\n metrics = dict(accuracy=accuracy,\n auc=auc,\n attrib_ent=av_attrib_entropy,\n high_attribs=av_high_attribs,\n attrib=mean_attribs,\n afvi=mean_afvi,\n xy_av=xy_av,\n x_av=x_av,\n y_av=y_av,\n xsq_av=xsq_av,\n ysq_av=ysq_av)\n\n # the histograms don't work in eval mode??\n tf.summary.histogram('attrib', mean_attribs[1])\n tf.summary.histogram('afvi', mean_afvi[1])\n\n return tf.estimator.EstimatorSpec(\n mode, loss=loss, eval_metric_ops=metrics)\n\n # Create training op.\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n #\n if optimizer == 'adam':\n loss_optimizer = tf.train.AdamOptimizer(learning_rate=params.lr)\n elif optimizer == 'ftrl':\n loss_optimizer = tf.train.FtrlOptimizer(learning_rate=params.lr,\n l1_regularization_strength=l1_reg,\n l2_regularization_strength=l2_reg)\n elif optimizer == 'adagrad':\n loss_optimizer = tf.train.AdagradOptimizer(learning_rate=params.lr)\n elif optimizer == 'sgd':\n loss_optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=params.lr)\n else:\n raise Exception(f\"Unknown optimizer: {optimizer}\")\n\n train_op = loss_optimizer.minimize(loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss,\n train_op=train_op)", "title": "" }, { "docid": "6d8325270f0df1dd07835e183875efaf", "score": "0.5609694", "text": "def model_binary():\n\n \n classifier = Sequential()\n classifier.add(Conv2D(32, (3, 3), input_shape = (120, 120, 3), activation = 'relu'))\n classifier.add(MaxPooling2D(pool_size = (2, 2), data_format=\"channels_last\"))\n classifier.add(BatchNormalization(axis = -1))\n classifier.add(Dropout(0.2))\n classifier.add(Conv2D(32, (3, 3), activation = 'relu'))\n classifier.add(MaxPooling2D(pool_size = (2, 2), data_format=\"channels_last\"))\n classifier.add(BatchNormalization(axis = -1))\n classifier.add(Dropout(0.2))\n classifier.add(Flatten())\n classifier.add(Dense(activation = 'relu', units=512))\n classifier.add(BatchNormalization(axis = -1))\n classifier.add(Dropout(0.2))\n classifier.add(Dense(activation = 'relu', units=256))\n classifier.add(BatchNormalization(axis = -1))\n classifier.add(Dropout(0.2))\n classifier.add(Dense(activation = 'sigmoid', units=2))\n classifier.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])\n classifier.summary()\n \n return classifier", "title": "" }, { "docid": "e39aba792bdbc61d67ad34335d56873e", "score": "0.56095546", "text": "def one_hot_embeding(labels, num_classes):\n\n y = torch.eye(num_classes) # [D, D]\n return y[labels] # [N, D]", "title": "" }, { "docid": "9f27790f7dd8e61a4160f7fd16d0a169", "score": "0.56018335", "text": "def one_hot(labels):\n\n\t# Find amount of classes, length of one-hot vector\n\tn_classes = len(set(labels))\n \n\tout = []\n\n\tfor label in labels:\n\t\tone_hot = np.zeros(n_classes)\n\t\t# This works because input labels are integers from 0 to 9\n\t\tone_hot[label] = 1\n\t\tout.append(one_hot)\n\n\treturn out", "title": "" }, { "docid": "1e5ba491fc25ef15d20e755678603d6c", "score": "0.5596548", "text": "def predict(self, X):\n p = self.predict_proba(X)\n idx = np.argmax(p, axis = 1)\n res = [self.classes[i] for i in idx]\n return np.array(res)", "title": "" }, { "docid": "62220b93cd2af32be328e465148f0410", "score": "0.55944264", "text": "def define_mlp_model(n_input):\n model = Sequential()\n num_neurons = 256\n\n # hidden layer\n model.add(Dense(units=num_neurons,\n input_dim=n_input,\n kernel_initializer='he_uniform',\n activation = 'relu'))\n model.add(Dense(units=num_neurons,\n activation = 'relu')) \n model.add(Dropout(0.3))\n model.add(Dense(units=num_neurons,\n activation = 'relu')) \n model.add(Dropout(0.3))\n \n # output layer\n model.add(Dense(units=1,\n activation = 'sigmoid'))\n model.summary()\n\n metrics = [FalseNegatives(name='fn'),\n FalsePositives(name='fp'),\n TrueNegatives(name='tn'),\n TruePositives(name='tp'),\n Precision(name='precision'),\n Recall(name='recall'),\n ]\n #sgd = SGD(lr=0.001, decay=1e-7, momentum=.9)\n adam = Adam(1e-2)\n model.compile(loss='binary_crossentropy', optimizer=adam, metrics=metrics)\n return model", "title": "" }, { "docid": "8b22fd2d536fb3644a8ceba9fb8dad0c", "score": "0.55871993", "text": "def predict(self, X):\n\n # Check is fit had been called\n check_is_fitted(self)\n\n y = []\n for x in X:\n corr_f = {}\n for f in self.classes_:\n S_x, S_y = self.cca.fit_transform(x.T, self.Ym[f].T)\n corr_f[f] = np.corrcoef(S_x.T, S_y.T)[0, 1]\n y.append(self.one_hot[max(corr_f, key=corr_f.get)])\n return y", "title": "" }, { "docid": "099d459fa09a8588ab5dc70b717abe85", "score": "0.55855733", "text": "def predict(self, X):\n # This could be done in parallel\n encoded_classes = np.argmax(self.predict_proba(X), axis=1)\n return self.classes_[encoded_classes]", "title": "" }, { "docid": "0ce5302b933a12ef9f766133439b20b8", "score": "0.55815625", "text": "def novel_cnn(input_shape, num_classes, optimizer):\n model = Sequential()\n\n # CNN - 1 - Conv\n model.add(Conv2D(16, 3, strides=3,\n padding='same',\n kernel_regularizer=l2(0.001),\n input_shape=input_shape\n ))\n model.add(LeakyReLU())\n\n # CNN - 2 - Conv\n model.add(Conv2D(32, 3, strides=3,\n kernel_regularizer=l2(0.001),\n padding='same'))\n model.add(LeakyReLU())\n\n # CNN - 3 - Conv\n model.add(Conv2D(64, 2, strides=2,\n kernel_regularizer=l2(0.001),\n padding='same', ))\n model.add(LeakyReLU())\n\n # CNN - 4 - Conv\n model.add(Conv2D(128, 1, strides=1,\n kernel_regularizer=l2(0.001),\n padding='same', ))\n model.add(LeakyReLU())\n # model.add(MaxPool2D(2, 2))\n # model.add(Dropout(0.1))\n\n\n # CNN - 5 - FCC\n model.add(Flatten())\n model.add(Dense(64))\n model.add(LeakyReLU())\n\n # CNN - 5 Output\n model.add(Dense(num_classes, activation='softmax'))\n\n # Print summary and compile model\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['acc'])\n\n return model", "title": "" }, { "docid": "317d02db396b998a9f2cbecd4dae47bf", "score": "0.5580828", "text": "def predict_proba(self, X):\n \n p = []\n for h in range(len(X)): # Number of loops = number of samples\n max = 0\n Z = 0\n p.append([])\n for i in range(len(self.__classes)):\n Py = self.__p_y[i] # Product of the different probabilities for one class\n for j in range(len(X[0])): # Number of loops = number of features\n temp = X[h][j]-self.__moy[i][j]\n exp_num = math.pow(temp, 2)\n exp_den = 2*self.__var[i][j]\n exp = math.exp(-(exp_num/exp_den))\n temp = 2*math.pi*self.__var[i][j]\n factor_den = math.pow(temp, 1/2)\n Py *= (1/factor_den)*exp\n \n p[h].append(Py)\n Z += Py\n \n for i in range(len(p[h])):\n p[h][i] /= Z\n \n p = np.matrix(p)\n \n return p", "title": "" }, { "docid": "5624347d0fa80552dde51f1617b2beed", "score": "0.5574669", "text": "def forward(ctx, X: Tensor) -> Tensor:\n return one_hot(X.argmax(dim=-1), num_classes=X.shape[-1]).to(X)", "title": "" }, { "docid": "52d66d69610846793257aac35790da29", "score": "0.55699706", "text": "def predict(self, X):\n\n def get_correct_class(y_pred):\n answers = np.zeros(len(y_pred), dtype=np.int)\n for idx, result in enumerate(y_pred):\n objectclass = np.argmax(result)\n print(objectclass)\n answers[idx] = objectclass\n return answers\n\n y_probs = self.model.predict(X)\n y_classes = get_correct_class(y_probs)\n return y_classes", "title": "" }, { "docid": "2119b57d5752054390bb90c344aaabdb", "score": "0.55696607", "text": "def conv_net(nbr_classes, img_size = 28):\n net = nn.Sequential(\n nn.Conv2d(3, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, 2),\n Flatten(),\n nn.Linear(64, nbr_classes))\n\n return net", "title": "" }, { "docid": "6372ec9f5398874d34c7c8675263f445", "score": "0.55668086", "text": "def generate_one_hot_vectors(new_training_labels, num_classes, use_most_common_label):\n #print(\"prev max: \" + str(np.max(new_training_labels)))\n new_training_labels = relabel_superfluous_labels(new_training_labels)\n #print(\"new max: \" +str(np.max(new_training_labels)))\n if use_most_common_label:\n training_labels = []\n for i in range(len(new_training_labels)):\n zeros = np.zeros((num_classes), dtype=np.int)\n #print int(new_training_labels[i])\n zeros[int(new_training_labels[i]) - 1] = 1\n training_labels.append(zeros)\n training_labels = np.asarray(training_labels)\n else:\n training_labels=np.zeros(shape=[new_training_labels.shape[0], new_training_labels.shape[1], num_classes], dtype=np.int)\n for i in range(len(new_training_labels)):\n for j in range(new_training_labels.shape[1]):\n training_labels[i, j, [int(new_training_labels[i,j])-1]] = 1\n #print int(new_training_labels[i,j])\n #print zeros[i, j, :]\n\n\n return training_labels", "title": "" }, { "docid": "511bf352664cbbe6daa3bcf949aa57f7", "score": "0.5563969", "text": "def classify_ex(self,ex):\n return softmax({klass: np.dot(self.W[klass], ex[FEATURES]) for klass in self.Y})", "title": "" } ]
30230a45bc399c7ea7f30a6a1461c888
Retrieve genotypes by sample name
[ { "docid": "10e55688a93184e74c2c36105c726f2a", "score": "0.7850626", "text": "def genotypes_of(self, sample):\n\t\treturn self.genotypes[self._sample_to_index[sample]]", "title": "" } ]
[ { "docid": "c7c15478556ae0a2a23c4ca0fadc4713", "score": "0.7029988", "text": "def get_snp_sample_genotype(current_pos, sample_name):\n record = next((record for record in call_base.snp_record_dict[methyl_record.CHROM]\n if record.POS == current_pos), None)\n sample = next((sample for sample in record.samples if sample.sample == sample_name), None)\n if not sample.called:\n return None\n try:\n return sample.gt_bases\n except IndexError:\n #TODO: break here and see what causes this!\n return None", "title": "" }, { "docid": "ef694f8b8ae3d392f191bff20fccfde0", "score": "0.6749506", "text": "def get_sample(self, name):\n\n return self._samples[name]", "title": "" }, { "docid": "c762968609a3b73e5a3ccb720f2ab49a", "score": "0.66660273", "text": "def get_genotypes(conn, metadata, args):\n idx_to_sample = util.map_indices_to_samples(metadata)\n\n query = \"SELECT v.chrom, v.start, v.end, \\\n v.ref, v.alt, \\\n v.type, v.sub_type, \\\n v.aaf, v.in_dbsnp, v.gene, \\\n v.gts \\\n FROM variants v \\\n ORDER BY chrom, start\"\n res = conn.execute(sql.text(query))\n\n # build a list of all the column indices that are NOT\n # gt_* columns. These will be the columns reported\n (col_names, non_gt_idxs) = \\\n util.get_col_names_and_indices(metadata.tables[\"variants\"], ignore_gt_cols=True)\n col_names.append('sample')\n col_names.append('genotype')\n\n if args.use_header:\n print(args.separator.join(col for col in col_names))\n\n unpack = Z.unpack_genotype_blob\n import zlib\n\n\n for row in res:\n try:\n gts = unpack(row['gts'])\n except zlib.error:\n unpack = Z.snappy_unpack_blob\n gts = unpack(row['gts'])\n\n for idx, gt in enumerate(gts):\n # range(len(row)-1) to avoid printing v.gts\n a = args.separator.join(str(row[i]) for i in range(len(row)-1))\n b = args.separator.join([idx_to_sample[idx], gt])\n print(args.separator.join((a, b)))", "title": "" }, { "docid": "6819e9db1946e808222d9a9d14173fa2", "score": "0.648918", "text": "def get_sample(self, sample_name):\n return", "title": "" }, { "docid": "bf9cc7f658d16c9edb40fdf6ad850a9a", "score": "0.6199335", "text": "def oxstats_genotypes(sample_file, gen_file):\n with open(sample_file) as f:\n data = (line.split() for line in f)\n h1 = next(data)\n h2 = next(data)\n samples = list(data)\n with gzip.open(gen_file, 'rt') as f:\n yield h1, h2, samples, parse_oxstats(f)", "title": "" }, { "docid": "49c2f6624558d707c7a2e84707980582", "score": "0.6167919", "text": "def genotype_likelihoods_of(self, sample):\n\t\treturn self.genotype_likelihoods[self._sample_to_index[sample]]", "title": "" }, { "docid": "b5df72d9e21b172b7a3ae30b382f8990", "score": "0.60577434", "text": "def pop_samples(pop, pop_params):\n names = generate_names(pop_params)\n return [name for name in names if name.startswith(pop)]", "title": "" }, { "docid": "a25c24dd6b966ce0c3c3b4494dec9136", "score": "0.605545", "text": "def sample(request):\n name = request.getfixturevalue(\"name\") \\\n if \"name\" in request.fixturenames else \"testsample\"\n build = request.getfixturevalue(TYPE_PARAM_NAME) \\\n if TYPE_PARAM_NAME in request.fixturenames else LSample\n return build({SAMPLE_NAME_COLNAME: name})", "title": "" }, { "docid": "5f4a64a181d1b78a1ace506f534516f7", "score": "0.59923667", "text": "def fixture_genotypes(genotype_obj) -> List[Genotype]:\n return [genotype_obj]", "title": "" }, { "docid": "5ff2809fc7e8ef700473150737604d21", "score": "0.595378", "text": "def get_sample_type(sample_type, json_file, metagroup):\n index_samples = {}\n metadata = {}\n list_samples = []\n\n for sample in json_file[\"columns\"]:\n if type(sample[\"metadata\"]) == type(None):\n print(\"The metadata in the BIOM file is missing. \\\nPlease check the input metadata and filenames for BiG-MAP.map and the resulting BIOM file.\")\n sys.exit()\n else:\n if sample[\"metadata\"] == {}:\n pass\n elif sample_type == (sample[\"metadata\"][\"SampleType\"]).upper():\n list_samples.append(sample[\"id\"])\n index = json_file[\"columns\"].index(sample)\n index_samples[index] = sample\n metadata[sample[\"id\"]] = sample[\"metadata\"][metagroup]\n return(index_samples, list_samples, metadata)", "title": "" }, { "docid": "49d2169de4763a6716af25f7bfbd744b", "score": "0.5948013", "text": "def get_sample(project_name, sample_name):\r\n samples = bs_project.get_samples(project_name)\r\n for sample in samples:\r\n if sample.get(\"Name\") == sample_name:\r\n return sample\r\n\r\n return None", "title": "" }, { "docid": "864251de14508ee4c127c49d1a8944db", "score": "0.5933641", "text": "def sample_names(self):\n assert set([PARAMS_Columns.SAMPLE_NAMES]).issubset(self.in_df.columns), \\\n \"self.in_df must contain a column 'samples' which lists the name of the samples the variants were called from.\"\n\n return self.in_df[PARAMS_Columns.SAMPLE_NAMES].unique()", "title": "" }, { "docid": "106fbf60b767cd19ab003a02a8d07eb2", "score": "0.5888906", "text": "def simulate_genotypes(minor_allele_frequency, n_samples, sample_list=sample_id_list):\n n_samples = int(n_samples)\n frequencies = hardy_weinberg_principle(minor_allele_frequency[0])\n calls = [[0,0], [0,1], [1,1]]\n if len(minor_allele_frequency) > 1:\n genotype_list = generate_multiallelic_frequencies(minor_allele_frequency, n_samples)\n else:\n genotype_list = random.choices(calls, k=n_samples, weights=frequencies)\n new_lst = [list(x) for x in zip(sample_id_list, genotype_list)]\n genotypes = [{\"sampleId\":x, \"calls\": y} for x, y in new_lst]\n return genotypes", "title": "" }, { "docid": "3eb0cd4bc7ad96ca62e6a9dc5699b19e", "score": "0.5764888", "text": "def return_phenotypes(samples, metadata, phenotype_id):\n labels = []\n for sample in samples: \n labels.append(metadata[sample][phenotype_id])\n return numpy.array(labels)", "title": "" }, { "docid": "141ea51caa7999275bcb73adb14d4219", "score": "0.57552993", "text": "def set_genotypes_of(self, sample, genotypes):\n\t\tassert len(genotypes) == len(self.variants)\n\t\tself.genotypes[self._sample_to_index[sample]] = genotypes", "title": "" }, { "docid": "8c6dec2bc49ecdfbf7ea375ccbbaea59", "score": "0.5749674", "text": "def fixture_genotype_obj() -> Genotype:\n return Genotype(rsnumber=\"rs12\", allele_1=\"A\", allele_2=\"T\")", "title": "" }, { "docid": "eabb0bfc3e8c770591294fe841b820dc", "score": "0.5702735", "text": "def define_samples(pop_params):\n sample_names = []\n for i, pop in enumerate(pop_params):\n times = [years_to_gen(t) for t in pop_params[pop][\"t_sample\"]]\n sample_names.extend([msp.Sample(population=i, time=t) for t in times])\n return sample_names", "title": "" }, { "docid": "c601be282913903825a8a020d602aa59", "score": "0.56982803", "text": "def generate_names(pop_params):\n sample_names = []\n for p in pop_params:\n n_pop = len(pop_params[p][\"t_sample\"])\n sample_names.extend([f\"{p}{i}\" for i in range(n_pop)])\n return sample_names", "title": "" }, { "docid": "4f1e1a1a51613e8fc14c2ab7ea68a2df", "score": "0.56960845", "text": "def get_individual_sample(self, project, sample):\n samples = self.meta.tables['samples']\n stmt = select([samples]).where(samples.c.sample_id == sample).where(samples.c.project == project)\n result = self.conn.execute(stmt)\n\n for send in result:\n return send", "title": "" }, { "docid": "1f71e1592f126562f67e9c45f95e2838", "score": "0.5687256", "text": "def get_samples(dataset):\n return list({ record['SampleName'] for record in helpers.read_sample_sheet(dataset) })", "title": "" }, { "docid": "b2065db563c6be50bb54f4e731985649", "score": "0.56840014", "text": "def get_samples():\n return list(samples[\"sample\"].unique())", "title": "" }, { "docid": "784e2b7c0daa8e91fe80f1e598a9a2c8", "score": "0.56410736", "text": "def fixture_new_genotypes(new_genotype_obj) -> List[Genotype]:\n return [new_genotype_obj]", "title": "" }, { "docid": "74d3f5b145f75ee36aec50c4b878d828", "score": "0.5637434", "text": "def lims_samples_info(sample_name):\n if sample_name not in _lims_samples_info:\n _lims_samples_info[sample_name] = rest_communication.get_document(\n 'lims/sample_info', match={'sample_id': sample_name}\n )\n return _lims_samples_info.get(sample_name)", "title": "" }, { "docid": "e3c387ad61a12c39c49c00d7e29684ae", "score": "0.5558283", "text": "def make_sample(sample, genotype):\n # TODO: CG context\n return vcf.model._Call(sample.site,\n sample.sample,\n (genotype,sample.data.DP, sample.data.AD, sample.data.RO,\n sample.data.AO))", "title": "" }, { "docid": "67c413037b13e2fdcfec7f5f6bc7ff52", "score": "0.55488133", "text": "def filter_user_genotypes(userdf, aisnps_1kg):\n user_record = pd.DataFrame(index=[\"your_sample\"], columns=aisnps_1kg.columns)\n for snp in user_record.columns:\n try:\n user_record[snp] = userdf.loc[snp][\"genotype\"]\n except KeyError:\n continue\n aisnps_1kg = aisnps_1kg.append(user_record)\n return user_record, aisnps_1kg", "title": "" }, { "docid": "e877d44c12bfdfa8b6026755c95030dd", "score": "0.54991627", "text": "def samples( sample ):\r\n\tstmt = db.session.query( Samples ).statement\r\n\tdf = pd.read_sql_query( stmt, db.session.bind )\r\n\t\r\n\t# Filter the data based on the sample number and\r\n\t# only keep rows with values above 1\r\n\tsample_data = df.loc[ df[ sample ] > 1, [\"otu_id\", \"otu_label\", sample] ]\r\n\t\r\n\t# Sort by sample\r\n\tsample_data.sort_values( by=sample, ascending=False, inplace=True )\r\n\t\r\n\t# Format the data to send as json\r\n\tdata = {\r\n\t\t\"otu_ids\": sample_data.otu_id.values.tolist(),\r\n\t\t\"sample_values\": sample_data[ sample ].values.tolist(),\r\n\t\t\"otu_labels\": sample_data.otu_label.tolist(),\r\n\t}\r\n\treturn jsonify( data )", "title": "" }, { "docid": "ea742fee08dea099d6bc44dc59ae9a9a", "score": "0.54704374", "text": "def samples_in_project(self,project_name):\n project = self.__projects[self.__project_dir(project_name)]\n samples = []\n for sample_name in project:\n if sample_name.startswith('Sample_'):\n samples.append(sample_name.split('_')[1])\n samples.sort()\n return samples", "title": "" }, { "docid": "91c31b3f8fbdaa583f30eefb8b805a80", "score": "0.5467141", "text": "def call_genotypes(self):\n # determine which alt records to keep for watson\n # only alt records that are present with more than 5% in any sample are preserved\n keep_nt = list()\n for pos, nt in enumerate(self.watson_record.ALT[:-1]):\n #TODO: evaluate if expensive calculation is required.\n max_pct = max([sample.data.AD[pos+1] / float(sample.data.DP) for sample in self.watson_record.samples if\n sample.data.DP != 0])\n if max_pct > 0.05:\n keep_nt.append(nt)\n if not keep_nt:\n keep_nt = [None]\n self.watson_record.ALT = keep_nt\n self.watson_record.alleles = [self.watson_record.REF] + keep_nt\n #TODO: give call_samples keep_nt\n self.watson_record = self.call_samples(self.watson_record)\n\n keep_nt = list()\n for pos, nt in enumerate(self.crick_record.ALT[:-1]):\n max_pct = max([sample.data.AD[pos+1] / float(sample.data.DP) for sample in self.crick_record.samples if\n sample.data.DP != 0])\n if max_pct > 0.05:\n keep_nt.append(nt)\n if not keep_nt:\n keep_nt = [None]\n self.crick_record.ALT = keep_nt\n self.crick_record.alleles = [self.crick_record.REF] + keep_nt\n\n self.crick_record = self.call_samples(self.crick_record)", "title": "" }, { "docid": "46350a1bd002146ea027999b93ee90e5", "score": "0.54659986", "text": "def samples_in_project(self,project_name):\n project = self.__projects[self.__project_dir(project_name)]\n samples = []\n for sample_name in project:\n if sample_name.startswith('Sample_'):\n samples.append(sample_name.split('_')[1])\n else:\n samples.append(sample_name)\n samples.sort()\n return samples", "title": "" }, { "docid": "857549357db930a5132c87abe8537ede", "score": "0.546375", "text": "def populateSamples(self):\n\n\t\tsamples = self.project.eg.samples\n\t\tself.sampleName = samples[0]\n\n\t\tself.sampleList.clear()\n\t\tself.sampleList.addItem(\"ALL\")\n\t\tfor sample in samples:\n\t\t\tself.sampleList.addItem(sample)", "title": "" }, { "docid": "12ad77c2c1d5f81a5182afd177b71782", "score": "0.5458962", "text": "def get_1kg_samples():\n onekg_samples = \"data/integrated_call_samples_v3.20130502.ALL.panel\"\n dfsamples = pd.read_csv(onekg_samples, sep=\"\\t\")\n dfsamples.set_index(\"sample\", inplace=True)\n dfsamples.drop(columns=[\"Unnamed: 4\", \"Unnamed: 5\"], inplace=True)\n dfsamples.columns = [\"population\", \"super population\", \"gender\"]\n return dfsamples", "title": "" }, { "docid": "39083385d9caad6a7964d0e8a0593774", "score": "0.5457392", "text": "def fixture_analysis_obj(sample_id: str, genotypes: List[Genotype]) -> Analysis:\n return Analysis(type=\"genotype\", sample_id=sample_id, genotypes=genotypes)", "title": "" }, { "docid": "f74ac88bbef45a7e7876c35fc26c3595", "score": "0.5453278", "text": "def test_get_relation(self, session):\n\n browser = Browser(session)\n assert browser.get(\"Sample\", \"sample_type\") == []\n\n samples = browser.last(NUM_MODELS, \"Sample\")\n sample_types = browser.get(\"Sample\", \"sample_type\")\n\n assert sample_types\n for st in sample_types:\n assert st.__class__.__name__ == \"SampleType\"", "title": "" }, { "docid": "286db9236af87c909e381db4fff3465c", "score": "0.53720105", "text": "def fixture_new_genotype_obj() -> Genotype:\n return Genotype(rsnumber=\"rs12\", allele_1=\"A\", allele_2=\"T\")", "title": "" }, { "docid": "bee5f0192058acb96b3b1b0a464008ae", "score": "0.53705007", "text": "def get_genres(self):\n genres = self.execute(db_queries.get_genres).fetchall()\n\n genres = [g[\"genre\"] for g in genres]\n return [g[0].upper() + g[1:] for g in genres]", "title": "" }, { "docid": "c72f8a63e7298148adbeae964bbd4a24", "score": "0.5366758", "text": "def getSamples(mets_or_mags, sample_dir, nt_ext, pep_ext):\n\n if mets_or_mags == \"mets\":\n samples_nt = [\".\".join(curr.split(\".\")[0:-1]) for \\\n curr in os.listdir(sample_dir) if \\\n curr.split(\".\")[-1] == nt_ext]\n samples_pep = [\".\".join(curr.split(\".\")[0:-1]) for \\\n curr in os.listdir(sample_dir) if \\\n curr.split(\".\")[-1] == pep_ext]\n samples = list(set(samples_nt + samples_pep))\n print(samples)\n if len(samples) == 0:\n print(\"No samples found in sample directory with\",\n \"specified nucleotide or peptide extension.\")\n sys.exit(1)\n else:\n samples = [\".\".join(curr.split(\".\")[0:-1]) for curr \\\n in os.listdir(sample_dir) if curr.split(\".\")[-1] == pep_ext]\n if len(samples) == 0:\n print(\"No samples found in sample directory with\",\n \"specified peptide extension.\")\n sys.exit(1)\n\n return samples", "title": "" }, { "docid": "c29d68e002475964874bab0b113a723b", "score": "0.5344533", "text": "def get_samples(panel):\n\n qSamples = DBSession.query(Sample).join(SamplePanel,SamplePanel.sample_id==Sample.id)\\\n .filter(SamplePanel.panel_name == panel).order_by(Sample.name)\n\n samples = []\n for entry in qSamples:\n samples.append(entry.name)\n\n return samples", "title": "" }, { "docid": "ad1af5f4956283498a15d4acf4a555a7", "score": "0.5344508", "text": "def getSamplePileupData(self, samplename):\n \n #return [ tple for tple in self.pileupList if tple[0]== samplename ]\n return [ tple for tple in self.pileupList if tple.sample== samplename ]", "title": "" }, { "docid": "a47b8492ec784a3fa751ab55ebe63ace", "score": "0.5330151", "text": "def get_genotype_by_rsid(self, rsid):\n if type(rsid) == int:\n rsid = [rsid]\n\n genotypes = {rs: '' for rs in rsid}\n found = self.genome.find({'rs': {'$in': list(set(rsid))}})\n\n if found.count() > 0:\n for record in found:\n genotypes.update({record['rs']: record['genotype']})\n\n return genotypes", "title": "" }, { "docid": "d3cea9e5ce1096e1a130f578129d48b2", "score": "0.5329218", "text": "def populateSamples(self):\n\n\t\tsamples = self.project.eg.samples\n\t\tself.sampleName = samples[0]\n\n\t\tself.sampleList.clear()\n\t\tself.sampleList.addItem(\"NONE\")\n\t\tfor sample in samples:\n\t\t\tself.sampleList.addItem(sample)", "title": "" }, { "docid": "7958faf246c133b7ff0a54bd71f1f365", "score": "0.53176403", "text": "def get_genre(array,index):\n\treturn array[index]", "title": "" }, { "docid": "e5f7f2ce12aa04ed489cb2274c189042", "score": "0.52878475", "text": "def insert_sample_data(self, record):\n for j in range(len(record.samples)):\n self.ids['sample_id'] = self.sample_mapping[record.samples[j].sample.split('.')[0]]\n # Add universally query-able genotype data to the genotype table.\n if self.hasGT:\n insert_sql('genotype', ['sample_id', 'record_id', 'GT'],\n [self.ids['sample_id'], self.ids['record_id'],\n self.genotype_mapping[self.get_field(record, \"GT @samples %d\" % j)]], self.db, self.cursor)\n else:\n insert_sql('genotype', ['sample_id', 'record_id'],\n [self.ids['sample_id'], self.ids['record_id']], self.db, self.cursor)\n\n cols = ['sample_id', 'record_id']\n vals = [self.ids['sample_id'], self.ids['record_id']]\n for i in range(len(self.toolSamples)):\n field = self.get_field(record, \"%s @samples %d\" % (self.toolSamples[i].split('_L')[0], j))\n if field is not None:\n if '_L' in self.toolSamples[i]:\n for k in range(len(field)):\n cols.append('%s_%d' % (self.toolSamples[i].split('_L')[0], k))\n vals.append(field[k])\n elif isinstance(field, list):\n cols.append(self.toolSamples[i])\n vals.append(', '.join(str(v) for v in field))\n else:\n cols.append(self.toolSamples[i])\n vals.append(field)\n insert_sql(\"%s_samples\" % self.ids['tool_id'], cols, vals, self.db, self.cursor)", "title": "" }, { "docid": "15051fdd5cd13ae57a0884afda3c353d", "score": "0.52749085", "text": "def populateSamples(self):\n\n\t\tsamples = self.project.eg.samples\n\t\tself.sampleName = samples[0]\n\n\t\tself.sampleList.clear()\n\t\tfor sample in samples:\n\t\t\tself.sampleList.addItem(sample)", "title": "" }, { "docid": "c33243d0781da8737f7bf0a158d9dc15", "score": "0.5247271", "text": "def create_random_genotype():\n genotype = []\n for chromosome_nr in range(nr_chromosomes):\n chromosome = \"\"\n for gene_nr in range(length_chromosome):\n chromosome += str(hex(randint(0, 15)))[2::].upper()\n genotype.append(chromosome)\n\n return genotype", "title": "" }, { "docid": "2d888e8d085f5aa22a74886a62566b99", "score": "0.5245733", "text": "def populate(self, sample_name=''):\n tx = Populate(sample_name)\n self.execute(tx)", "title": "" }, { "docid": "c6cae4a518a7669459960f78beb40190", "score": "0.5229588", "text": "def retrieve_one_sample(self, sample_name: str) -> dict:\n return self._asynchronous_data_last_samples[sample_name]", "title": "" }, { "docid": "fb8b5d3cdf3088c83b11bfc5df0bfad0", "score": "0.52203035", "text": "def getFieldSamples(self, field_name):", "title": "" }, { "docid": "0351fea66c85946294600b14236c7099", "score": "0.5215154", "text": "def sample_list(self, sample_type_id=None):\n path = \"sample_list\"\n if sample_type_id is not None:\n path += \"/\" + str(sample_type_id)\n return self.session.utils.aqhttp.get(path)", "title": "" }, { "docid": "0d3c6e9f98234a2043b89b4b929d802f", "score": "0.51987773", "text": "def get_samples(api_key):\n url = OCX_API + \"samples\"\n r = _get_ocx_url(url, api_key)\n return {s['filename']:s['id'] for s in r.json()}", "title": "" }, { "docid": "500620caf38da47ea3cfbd3f302d1692", "score": "0.51974195", "text": "def get_gts_based_on_difficulty(dataset, sample_name):\n # Get all ground truth labels and filter to dataset classes\n all_gt_objs = obj_utils.read_labels(dataset.kitti_label_dir, sample_name)\n gt_objs, _ = obj_utils.filter_labels_by_class(all_gt_objs, dataset.classes)\n\n # Filter objects to desired difficulty\n easy_gt_objs, _ = obj_utils.filter_labels_by_difficulty(\n copy.deepcopy(gt_objs), difficulty=Difficulty.EASY)\n medium_gt_objs, _ = obj_utils.filter_labels_by_difficulty(\n copy.deepcopy(gt_objs), difficulty=Difficulty.MODERATE)\n hard_gt_objs, _ = obj_utils.filter_labels_by_difficulty(\n copy.deepcopy(gt_objs), difficulty=Difficulty.HARD)\n\n for gt_obj in easy_gt_objs:\n gt_obj.type = 'Easy GT'\n for gt_obj in medium_gt_objs:\n gt_obj.type = 'Medium GT'\n for gt_obj in hard_gt_objs:\n gt_obj.type = 'Hard GT'\n\n return easy_gt_objs, medium_gt_objs, hard_gt_objs, all_gt_objs", "title": "" }, { "docid": "6abb9d9f65f5ae0f79d90382068032f4", "score": "0.5183113", "text": "def sample(self,\n table: str,\n data_dtypes,\n name: Optional[str] = None) -> replay_sample.ReplaySample:\n with tf.name_scope(name, f'{self._name}_sample', ['sample']) as scope:\n key, probability, table_size, priority, data = gen_reverb_ops.reverb_client_sample(\n self._handle, table, tree.flatten(data_dtypes), name=scope)\n return replay_sample.ReplaySample(\n replay_sample.SampleInfo(\n key=key,\n probability=probability,\n table_size=table_size,\n priority=priority), tree.unflatten_as(data_dtypes, data))", "title": "" }, { "docid": "d026499d7965220000df9efa0f98985c", "score": "0.5177205", "text": "def get_samp_names(samples):\n bcs = []\n sample_names = []\n samp_bad_name_dict = {}\n\n # Get list of barcodes and generate sample inventory file\n for samp in samples:\n bcs.append(samp.bc)\n FNULL = open(os.devnull, 'w')\n subprocess.run(['limfo', 'bar', '-bc', ','.join(bcs), '--report', 'sample_inventory', '--format', 'tsv'], stdout=FNULL, stderr=subprocess.STDOUT)\n\n # open inventory file and create list of good names\n with open('sample_inventory.tsv') as inv_file:\n next(inv_file)\n\n reader = csv.DictReader(inv_file, delimiter='\\t')\n next(reader)\n\n for line in reader:\n sample_names.append(line['DNA'])\n\n # open dilution drop-off and create dictionary associate less than great name with sample barcode\n with open('dilution_drop_off.tsv', 'r')as fin:\n next(fin)\n\n reader = csv.DictReader(fin, delimiter='\\t')\n next(reader)\n\n for line in reader:\n for samp in samples:\n if samp.bc == line['Barcode']:\n samp_bad_name_dict[samp.bc] = line['Content_Desc'].split(' ')\n\n # Use check imperfect names for good sample names and assign name if a match is found\n for name in sample_names:\n for samp in samples:\n if name in samp_bad_name_dict[samp.bc]:\n samp.name = name\n break", "title": "" }, { "docid": "fc7eb808ac062e0912a83d3cfef9e39c", "score": "0.517685", "text": "def sample_names(self, vcf_file):\n vcf_reader = vcf.Reader(open(vcf_file, 'r'))\n return vcf_reader.samples", "title": "" }, { "docid": "8105d631865f1d8bc1030a17a087aa2f", "score": "0.5165929", "text": "def get_sample(self):\n pass", "title": "" }, { "docid": "8105d631865f1d8bc1030a17a087aa2f", "score": "0.5165929", "text": "def get_sample(self):\n pass", "title": "" }, { "docid": "6cea789180f7192d771b2961801cd8ff", "score": "0.5156472", "text": "def extract_trees_from_sample(sample, sample_name):\n trees = {}\n for sentId, users in sample.items():\n for user_id, conll in users.items():\n tree = conll2tree(conll)\n if sentId not in trees:\n trees[sentId] = {\n \"sample_name\": sample_name,\n \"sentence\": tree.sentence(),\n \"conlls\": {},\n \"matches\": {},\n }\n trees[sentId][\"conlls\"][user_id] = conll\n return trees", "title": "" }, { "docid": "26d90800e9b07b8e65387b67b65be0e4", "score": "0.51356876", "text": "def create_samples(self):\n list_length = len(self.data[TYPE_KEY])\n list_of_samples = []\n for i in range(list_length):\n sample_label = self.data[TYPE_KEY][i]\n sample_id = self.data[SAMPLES_KEY][i]\n gene_list = []\n for key in self.data:\n if (key != TYPE_KEY) and (key != SAMPLES_KEY):\n gene_list.append(self.data[key][i])\n list_of_samples.append(Sample(sample_id, gene_list, sample_label))\n return list_of_samples", "title": "" }, { "docid": "6e236515a4e6fbed001ced57cdb61f40", "score": "0.5134659", "text": "def print_samples(data, n_samples=5):\n samples = random.sample(list(data.values()), n_samples)\n for sample in samples:\n print(f\"Source: {sample['src']}\")\n sample[\"gen\"].sort(key=lambda x: x[1], reverse=True)\n for hyp in sample[\"gen\"]:\n print(f\"\\tHyp: {hyp[0]}\")", "title": "" }, { "docid": "f4f911fca83fb95d5907db15860c34e7", "score": "0.5131994", "text": "def get_genres_from_cmdc(self, program_service_id, event_time, lang, region):\n genre_list = self.retrieve_parameter_value_from_cmdc(program_service_id, event_time,\n ['services', 0, 'contents', 0, 'genres'],\n lang, region)\n if genre_list:\n return genre_list\n else:\n return []", "title": "" }, { "docid": "7866ad74e28a0ccf52af96157b149965", "score": "0.51268774", "text": "def getOneIndividualGenotypeList(self, individual_id=None):\n\t\twhere_condition_ls = [\"individual_id==%s\"%(individual_id)]\n\t\tquery = self.polymorphismTable.query(\"\"\" %s \"\"\"%(\" & \".join(where_condition_ls)))\n\t\tgenotypeList = ['']*self.locusTable.nrows\n\t\tfor row in query:\n\t\t\tlocus_index =row['locus_id']-1\n\t\t\tgenotypeList[locus_index] = genotypeList[locus_index] + row['allele_sequence']\n\t\treturn genotypeList", "title": "" }, { "docid": "49539232dc6e86b275a2def446f336a4", "score": "0.5125656", "text": "def _get_exp_sample_subtype_filename(n, t):\n return n + \"_\" + t.__name__ + SAMPLE_YAML_EXT", "title": "" }, { "docid": "8b7749e927af09b2ae779ed7f1a00866", "score": "0.5078752", "text": "def sample(self, nsamples=1):\n samples = []\n for i in range(nsamples):\n samples.append(np.random.gamma(self.vi_shape, 1/self.vi_rate).T)\n return np.stack(samples).T", "title": "" }, { "docid": "f5e5bce38f0ebc112599f6b36a56df11", "score": "0.50776786", "text": "def get_trspname(genename):\n\n\tUTRfile = pd.read_csv(UTRfilestring, sep=',')\n\n\ttrsp = UTRfile.loc[UTRfile['gene_name'] == genename]['#transcript'].item()\n\treturn trsp", "title": "" }, { "docid": "07b94a61dbfd938d62bdc4b1ae1eb0bf", "score": "0.50732875", "text": "def sample(self, shape, name=None):\n name = name if name is not None else (self.name + '_sample')\n with paddle.no_grad():\n return self.rsample(shape, name)", "title": "" }, { "docid": "0259d5eff8e7eb7dbfe3c2340e61df56", "score": "0.50578517", "text": "def give_sample_name(sample='test', user_name='DR'):\n\n RE.md['sample'] = sample\n\n e = energy.position.energy / 1000\n sdd = pil1m_pos.z.position / 1000\n wa = waxs.arc.position + 0.001\n wa = str(np.round(float(wa), 1)).zfill(4)\n\n # Sample name\n name_fmt = (\"{sample}_{energy}keV_wa{wax}_sdd{sdd}m\")\n sample_name = name_fmt.format(\n sample=sample,\n energy=\"%.2f\" % e,\n wax=wa,\n sdd=\"%.1f\" % sdd,\n )\n sample_id(user_name=user_name, sample_name=sample_name)\n print(f\"\\n\\n\\n\\t=== Sample: {sample_name} ===\")", "title": "" }, { "docid": "c755c5141fd2a4f2dd557ab42bcc13b0", "score": "0.5053218", "text": "def upload_genotypes(context: CGConfig, re_upload: bool, family_id: Optional[str]):\n\n status_db: Store = context.status_db\n housekeeper_api: HousekeeperAPI = context.housekeeper_api\n genotype_api: GenotypeAPI = context.genotype_api\n\n click.echo(click.style(\"----------------- GENOTYPES -------------------\"))\n\n if not family_id:\n suggest_cases_to_upload(status_db=status_db)\n raise click.Abort\n case_obj: Family = status_db.get_case_by_internal_id(internal_id=family_id)\n upload_genotypes_api = UploadGenotypesAPI(hk_api=housekeeper_api, gt_api=genotype_api)\n results: dict = upload_genotypes_api.data(case_obj.analyses[0])\n\n if not results:\n LOG.warning(\"Could not find any results to upload\")\n return\n upload_genotypes_api.upload(results, replace=re_upload)", "title": "" }, { "docid": "2c30eeda371f8d49d2d5a79f78286f5b", "score": "0.50460654", "text": "def getSampleData(taxonids = \"\", taxonname = \"\", ageold= \"\", ageyoung = \"\", loc= (), gpid = \"\", altmin = \"\", altmax = \"\", **kwargs):\n endpoint = \"http://api.neotomadb.org/v1/data/sampledata\"\n url = endpoint + \"?\"\n if taxonids != \"\":\n idList = \"\"\n if isinstance(taxonids, list):\n for i in taxonids:\n idList += i.replace(\" \", \"%20\") + \",\"\n elif isinstance(taxonids, str):\n idList = taxonids.replace(\" \", \"%20\")\n url += \"taxonids=\" + idList + \"&\"\n if taxonname != \"\":\n nameList = \"\"\n if isinstance(taxonname, list):\n for i in taxonname:\n nameList += i + \",\"\n elif isinstance(taxonname, str):\n nameList = taxonname.replace(\" \", \"%20\")\n url += \"taxonname=\" + nameList + \"&\"\n if ageold != \"\":\n url += \"ageold=\" + ageold + \"&\"\n if ageyoung != \"\":\n url += \"ageyoung=\" + ageyoung + \"&\"\n if loc != ():\n locString = \"\"\n if not isinstance(loc, tuple) or not isinstance(loc, list):\n print \"Location bounding box must be a list or a tuple.\"\n return False\n for i in loc:\n locString += str(i) + \",\"\n url += \"loc=\" + locString + \"&\"\n if gpid != \"\":\n gpid = str(gpid)\n if gpid.isdigit():\n gpid = int(gpid)\n else:\n gpid = int(lookupGPID(keyword=gpid))\n if altmin!= \"\":\n url += \"altmin=\" + altmin\n if altmax != \"\":\n url += \"altmax=\" + altmax\n\n SC = SampleDataCollection()\n if url[-1] == \"&\":\n url = url[:-1]\n response = urllib2.urlopen(url)\n data = json.load(response)\n success = data['success']\n if success != 1:\n print \"Failed to obtain sample data.\"\n print data['message']\n return False\n else:\n responseData = data['data']\n i = 0\n while i < len(responseData):\n d = responseData[i]\n dTaxaGroup = d['TaxaGroup']\n dValue = d['Value']\n dUnits = d['VariableUnits']\n dTaxonName = d['TaxonName']\n dElement = d['VaraibleElement']\n dContext = d['VariableContext']\n dSampleAge = d['SampleAge']\n dAgeYounger = d['SampleAgeYounger']\n dAgeOlder = d['SampleAgeOlder']\n dDatasetID = d['DatasetID']\n dSiteAltitude = d['SiteAltitude']\n dLatN = d['SiteLatitudeNorth']\n dLatS = d['SiteLatitudeSouth']\n dLongE = d['SiteLongitudeEast']\n dLongW = d['SiteLongitudeWest']\n SD = SampleData(taxagroup=dTaxaGroup, value=dValue, variableUnits=dUnits, taxonName=dTaxonName, variableElement=dElement,\n variableContext=dContext, sampleAge=dSampleAge, sampleAgeYounger=dAgeYounger, sampleAgeOlder=dAgeOlder,\n datasetID=dDatasetID, siteAltitude=dSiteAltitude, siteLatitudeN=dLatN, siteLatitudeS=dLatS,\n siteLongitudeE=dLongE, siteLongitudeW=dLongW)\n SC.addSample(SD)\n i += 1\n print \"Found \" + str(len(SC.items)) + \" sites and returned them as a SampleCollection.\"\n return SC", "title": "" }, { "docid": "9b1f6cc51b7293e39518c0bbb6cd8522", "score": "0.50446767", "text": "def biosample(self, biosample):\n\n for item in biosample:\n literal = Literal(item, datatype=XSD.string)\n self.graph.add((n[self.name], n.has_biosample, literal))", "title": "" }, { "docid": "3d83cf2a951676085ff2f72ec20ffbc0", "score": "0.50415474", "text": "def extra_features(wavs=wavs):\n for file in wavs:\n ob = sf.SoundFile(file)\n subtype.append(ob.subtype)\n #return ob", "title": "" }, { "docid": "296bdc7c7f2dc42c05df6ee2ad7023fb", "score": "0.50351715", "text": "def _get_input_files_merge_genotypes(self, wildcards):\n infix = self.dir_infixes[\"genotype\"]\n tpl = os.path.join(\"work\", infix, \"out\", infix + \".bcf\")\n # return BCF for one tumor\n yield tpl.format(library_name=wildcards.cancer_library.name, **wildcards)\n\n # also create tsv with description of all the samples in this bcf\n infix = self.dir_infixes[\"merge_genotypes\"]\n samples_file_path = os.path.join(\"work\", infix, \"out\", infix + \"samples.tsv\")\n with open(samples_file_path, \"w\") as samples_file:\n # write tumor sample description\n samples_file.write(\"{cancer_library}\\ttumor\\n\".format(**wildcards))\n\n # for all normals\n for pair in self._get_primary_pairs():\n # write sample description\n samples_file.write(\n \"{library_name}\\tcontrol\\n\".format(\n library_name=pair.normal_sample.dna_ngs_library.name\n )\n )\n # return BCF\n yield tpl.format(library_name=pair.normal_sample.dna_ngs_library.name, **wildcards)", "title": "" }, { "docid": "b1872b1945c8421e1bf017e4e29b0cc5", "score": "0.50278866", "text": "def get_all_samples(self, project, depth='ids'):\n samples = self.meta.tables['samples']\n status_stmt = select([samples.c.sample_id, samples.c.mosaic_sample_id]).where(samples.c.project == project)\n result = self.conn.execute(status_stmt)\n \n samples = []\n for data in result:\n if depth == 'ids':\n samples.append(data[0])\n elif depth == 'all':\n samples.append({data[0]:data[1]})\n\n return samples", "title": "" }, { "docid": "5d1c1a21f0641d4d4338704b31b4d8a3", "score": "0.5023804", "text": "def samples(self):\n\n return self._samples.keys()", "title": "" }, { "docid": "774ea74926c50ebf3d9cebdef2f6228b", "score": "0.5022128", "text": "def get_genotypes(glstring):\n parsed = re.split(r'[|^]', glstring)\n genotypes = []\n for genotype in parsed:\n if \"+\" in genotype:\n genotypes.append(genotype)\n return genotypes", "title": "" }, { "docid": "a5ce545750e51f9d36cc7054a5262b1e", "score": "0.5012741", "text": "def genotype(self, conifer_pipeline, samples=None, func=np.median):\n if samples is None:\n samples = set(conifer_pipeline.samples)\n else:\n samples = set(samples)\n gt_matrix = np.zeros([len(self.calls),len(samples)])\n ix = 0\n for _, call in self.calls.iterrows():\n d = conifer_pipeline.r.getExonValuesByRegion(chromosome=call[\"chromosome\"],start=call[\"start\"],stop=call[\"stop\"],sampleList=samples,genotype=False,overlap=True).rpkm\n gt_matrix[ix,:] = func(d, axis=0)\n ix += 1\n for s_ix, s in enumerate(list(samples)):\n self.calls[str(s)] = gt_matrix[:,s_ix]\n return CallTable(self.calls)", "title": "" }, { "docid": "30ab2dd6b75732d097b4067066e786cd", "score": "0.50122994", "text": "def get_phenotype_info(self, phen_name=None):\n dict_list = []\n #self.h5file = self._open(mode=\"r\")\n try:\n if \"phenotypes\" not in self.h5file.root:\n return dict_list\n if not phen_name:\n for phenotype_table in self.h5file.iterNodes(\"/phenotypes\", 'Group'):\n d = {'id':phenotype_table._v_attrs.name, 'name': phenotype_table._v_attrs.name, 'num_values': phenotype_table._v_attrs.num_vals, 'std_dev': phenotype_table._v_attrs.std_dev, 'growth_conditions': phenotype_table._v_attrs.growth_conditions,\n 'phenotype_scoring': phenotype_table._v_attrs.phenotype_scoring, 'method_description': phenotype_table._v_attrs.method_description, 'measurement_scale': phenotype_table._v_attrs.measurement_scale,\n 'is_binary': False}\n d['datasets'] = self._get_phenotype_datasets_(d['name'])\n dict_list.append(d)\n else:\n x = self.h5file.getNode(\"/phenotypes/%s\" % phen_name)\n dict_list = [{'id':x._v_attrs.name, 'name': x._v_attrs.name, 'num_values': x._v_attrs.num_vals, 'std_dev': x._v_attrs.std_dev, 'growth_conditions': x._v_attrs.growth_conditions,\n 'phenotype_scoring': x._v_attrs.phenotype_scoring, 'method_description': x._v_attrs.method_description, 'measurement_scale': x._v_attrs.measurement_scale,\n 'is_binary': False, 'datasets':self._get_phenotype_datasets_(x._v_attrs.name)}]\n except Exception, err:\n raise(err)\n finally:\n test = ''\n #self._close()\n return dict_list", "title": "" }, { "docid": "f4f53cc692d9813573f09b4ce09fb503", "score": "0.5010726", "text": "def get_genres(self) -> List[Genre]:\n raise NotImplementedError", "title": "" }, { "docid": "e82339f0beb369bd4e46dc5b337455df", "score": "0.50089604", "text": "def get_files_by_name(project_name, sample_name):\r\n sample = get_sample(project_name, sample_name)\r\n if sample:\r\n href = sample.get(\"Href\")\r\n api_url = href + \"/files\"\r\n return api_collection(api_url)\r\n return None", "title": "" }, { "docid": "e63623b40984492a6d4695ab4273258c", "score": "0.50037885", "text": "def genotype_distribtion(self, genotype):\n self.types_of_gt += genotype\n self.types_of_gt = np.unique(self.types_of_gt).tolist()", "title": "" }, { "docid": "cb4250c20df1683b3d86568afa90bf7d", "score": "0.5001513", "text": "def get_genotypes_region(vcf, region, field='GT'):\n print(f'Getting {field} for region {region}')\n cmd = 'tabix '+vcf+' '+region\n s = subprocess.check_output(cmd, shell=True, executable='/bin/bash')\n s = s.decode().strip()\n if len(s) == 0:\n raise ValueError(f'No variants in region {region}')\n s = s .split('\\n')\n variant_ids = [si.split('\\t', 3)[-2] for si in s]\n field_ix = s[0].split('\\t')[8].split(':').index(field)\n\n if field == 'GT':\n gt_map = {'0/0':0, '0/1':1, '1/1':2, './.':np.NaN,\n '0|0':0, '0|1':1, '1|0':1, '1|1':2, '.|.':np.NaN}\n s = [[gt_map[i.split(':', field_ix+1)[field_ix]] for i in si.split('\\t')[9:]] for si in s]\n else:\n s = [[i.split(':', field_ix+1)[field_ix] for i in si.split('\\t')[9:]] for si in s]\n\n return pd.DataFrame(data=s, index=variant_ids, columns=get_sample_ids(vcf), dtype=np.float32)", "title": "" }, { "docid": "56d84087bddd3557a9b629a32978d0a9", "score": "0.4999527", "text": "def get(name):\n assert isinstance(name, basestring)\n return __all_features[name]", "title": "" }, { "docid": "1ac84c2fd4423ec5a39a5ea0d70f7fcf", "score": "0.49988523", "text": "def rg_from_sample(wildcards):\n # key thing here is the set ID to {sample} and SM to NMFS_DNA_ID\n return r\"@RG\\tID:{sample}\\tSM:{nmfs}\\tPL:ILLUMINA\".format(\n sample = wildcards.sample,\n nmfs = samples.loc[wildcards.sample, \"NMFS_DNA_ID\"]\n )", "title": "" }, { "docid": "77d4ad206902252047eff2f02bab3e30", "score": "0.49979007", "text": "def get_artist_genres(artist_name):\n genres = artist_details['genres'][artist_name_to_index[artist_name]]\n return genres.translate(str.maketrans('','','[]\\'')).split(', ')", "title": "" }, { "docid": "787d59ef7eadea4d563d7e244a0692fb", "score": "0.49954823", "text": "def get_random_samples_in_tag(request, tag_pk):\n tag = Tag.objects.get(pk=tag_pk)\n perm = SamplePermission()\n samples = [\n sample_rel.sample\n for sample_rel in tag.tagged_samples.all()\n ]\n shuffle(samples)\n n_samples = int(request.GET.get('n', 100))\n samples = samples[:n_samples]\n perm = SamplePermission()\n samples = [\n sample for sample in samples\n if perm.has_object_permission(request, None, sample)\n ]\n samples = [SampleSerializer(sample).data for sample in samples]\n return Response({'results': samples})", "title": "" }, { "docid": "17b8cb63d50642740ea087a3afdf0165", "score": "0.49954665", "text": "def get_sample_identifier_param(\n sample_identifier: HHandle,\n gen_param_name: str\n) -> Union[str, float, int]:\n with HalconOperator(905) as proc:\n proc.set_input_tuple(0, sample_identifier)\n proc.set_input_tuple(1, gen_param_name)\n proc.init_oct(0)\n proc.execute()\n gen_param_value = proc.get_output_tuple_s(0)\n return gen_param_value # type: ignore", "title": "" }, { "docid": "03268e4fef12441079167d6937056e28", "score": "0.498271", "text": "def getsamples(era,channel=\"\",tag=\"\",dtype=[],filter=[],veto=[],moddict={},verb=0):\n import TauFW.PicoProducer.tools.config as GLOB\n CONFIG = GLOB.getconfig(verb=verb)\n filters = filter if not filter or isinstance(filter,list) else [filter]\n vetoes = veto if not veto or isinstance(veto,list) else [veto]\n dtypes = dtype if not dtype or isinstance(dtype,list) else [dtype]\n sampfile = ensurefile(\"samples\",repkey(CONFIG.eras[era],ERA=era,CHANNEL=channel,TAG=tag))\n samppath = sampfile.replace('.py','').replace('/','.')\n if samppath not in moddict:\n moddict[samppath] = importlib.import_module(samppath) # save time by loading once\n if not hasattr(moddict[samppath],'samples'):\n LOG.throw(IOError,\"Module '%s' must have a list of Sample objects called 'samples'!\"%(samppath))\n samplelist = moddict[samppath].samples\n samples = [ ]\n sampledict = { } # ensure for unique names\n LOG.verb(\"getsamples: samplelist=%r\"%(samplelist),verb,3)\n for sample in samplelist:\n if filters and not sample.match(filters,verb): continue\n if vetoes and sample.match(vetoes,verb): continue\n if dtypes and sample.dtype not in dtypes: continue\n if channel and sample.channels and not any(fnmatch(channel,c) for c in sample.channels): continue\n if sample.name in sampledict:\n LOG.throw(IOError,\"Sample short names should be unique. Found two samples '%s'!\\n\\t%s\\n\\t%s\"%(\n sample.name,','.join(sampledict[sample.name].paths),','.join(sample.paths)))\n if 'skim' in channel and sample.dosplit: # split samples with multiple DAS dataset paths, and submit as separate jobs\n for subsample in sample.split():\n samples.append(subsample) # keep correspondence sample to one sample in DAS\n else:\n samples.append(sample)\n sampledict[sample.name] = sample\n return samples", "title": "" }, { "docid": "a44816291525b59ec105b3c4aacfc173", "score": "0.4979587", "text": "def _sample(self, bqm, **kwargs):\n with bqm_to_file(bqm, version=2) as fv:\n sapi_problem_id = self.solver.upload_bqm(fv).result()\n return self.solver.sample_bqm(sapi_problem_id, **kwargs).sampleset", "title": "" }, { "docid": "af4206937487a610396a4ab85dab2928", "score": "0.4972633", "text": "def samples_sample(sample):\n samples_df = pd.read_csv('DataSets/belly_button_biodiversity_samples.csv')\n sample_col = str(sample)\n samples = samples_df[sample_col].to_dict\n otu_ids = []\n sample_values = []\n\n for s in range(0, len(samples)):\n if (samples.values(s) != 0):\n otu_ids.append(s+1)\n sample_values.append(samples.values(s))\n\n\n samples_trace = {\n \"OTU_IDS\": otu_ids,\n \"SAMPLE_VALUES\": sample_values\n } \n\n return jsonify(samples_trace)", "title": "" }, { "docid": "0414a4bf6d2a3dd81cf7a9b89b3a6725", "score": "0.49667192", "text": "def add_sample(self,project_name,sample_name):\n project = self.add_project(project_name)\n sample_dir = self.__sample_dir(sample_name)\n if sample_dir not in project:\n project[sample_dir] = []\n return project[sample_dir]", "title": "" }, { "docid": "0414a4bf6d2a3dd81cf7a9b89b3a6725", "score": "0.49667192", "text": "def add_sample(self,project_name,sample_name):\n project = self.add_project(project_name)\n sample_dir = self.__sample_dir(sample_name)\n if sample_dir not in project:\n project[sample_dir] = []\n return project[sample_dir]", "title": "" }, { "docid": "34e04eef14e14cbcb4422f2d3dd8962f", "score": "0.4962461", "text": "def genotypeparser(self, genotype):\n genotype_cvrt = [[self.genotype_converter[x]] for x in genotype]\n return genotype_cvrt", "title": "" }, { "docid": "6d92eaf9e4d0cb983474fba252ad966e", "score": "0.49604422", "text": "def sample_mass_spectra_review(file_path, input_type, sample_name):\n with open(os.path.join(file_path, input_type, sample_name + \"_replicate_analyte_mass_spectra.pickle\"), \"rb\") as f:\n input_data = pickle.load(f)\n\n for sample in input_data:\n print(sample.replicate_analyte_id)\n print(sample.max_intensity)\n print(sample.replicate_analyte_rt)", "title": "" }, { "docid": "454cf37d39921c71e68d5e97436d1e57", "score": "0.49600962", "text": "def sampleAnnotation(self, sample):\n\t\t# no unit tests\n\t\tannotations={}\n\n\t\tsampleObj=self.q.get('sample',filterop=\"id={0}\".format(str(sample))) # get the sample object\n\t\tsampleObj=sampleObj.fetch()[0]\n\t\tannotations['csSample']=self._annotationDictionary(sampleObj)\n\t\t\n\t\tplates=sampleObj.is_related_with_plate\n\t\tplates=plates.fetch()\n\t\tfor plate in plates:\n\t\t\tannotations['csPlate']=self._annotationDictionary(plate)\n\t\n\t\t# add the study\n\t\tstudies=sampleObj.is_related_with_study\n\t\tstudies=studies.fetch()\n\t\tfor study in studies:\n\t\t\tannotations['study']=self._annotationDictionary(study)\n\t\t\t\n\t\t# recover mapcall information if a reference has been specified\n\t\t# get the mapcall objects; there may be multiple references, so we just select the one we are using\t\t\n\t\tfor id in sampleObj.has_mapcall:\t\n\t\t\tmapcallObj=self.q.get('mapcall',filterop=\"id={0}\".format(id))\t\t\t\n\t\t\tmapcallObj=mapcallObj.fetch()[0]\n\t\t\tannotDict=self._annotationDictionary(mapcallObj)\n\t\t\tif 'contig_id' in annotDict:\n\t\t\t\tif annotDict['contig_id']==self._reference_name:\n\t\t\t\t\tannotations['mapcall']=annotDict\n\t\t\n\t\tx=sampleObj.is_related_with_study.fetch()[0]\t\t# this adds one and only one study\n\t\tannotations['study']=self._annotationDictionary(x)\t\n\t\t\t\t\t\n\t\treturn(annotations)", "title": "" }, { "docid": "679cbea959dec1a518fe4c05a8c04b3d", "score": "0.49587226", "text": "def genres(self) -> Optional[List[str]]:\n\n return list(\n [g[\"genre_title\"] for g in self._track_metadata.get(\"track_genres\")]\n )", "title": "" }, { "docid": "a10b608c0777b9fd4cfcf687c79446f8", "score": "0.49558014", "text": "def add_sample(self, sample):\n \n self._samples[sample.name] = sample", "title": "" }, { "docid": "8dda0591816955035a0ec1f637f4def0", "score": "0.49482775", "text": "def create_samples(self, context, form, num_samples):\n sample_type = get_first_sampletype(context)\n uc = getToolByName(context, 'uid_catalog')\n\n project_uid = form.get('Project_uid', '')\n project = uc(UID=project_uid)[0].getObject()\n\n samples_gen = SampleGeneration(form, project)\n\n samples = []\n for i in range(num_samples):\n sample = samples_gen.create_sample(None, sample_type, context)\n samples.append(sample)\n\n location_uid = form.get('StorageLocation_uid', '')\n storage = []\n if location_uid:\n location = uc(UID=location_uid)[0].getObject()\n if len(location.get_free_positions()) > 0:\n storage.append(location)\n\n if storage:\n samples_gen.store_samples(samples, storage)\n\n return samples", "title": "" }, { "docid": "439f228ec1dda0d58994b989ae76fb0d", "score": "0.49448124", "text": "def _get_samples_by_identifiers(identifiers: click.Tuple([str, str]), store: Store) -> List[Sample]:\n identifier_args = dict(identifiers)\n return list(store.get_samples_by_any_id(**identifier_args))", "title": "" }, { "docid": "b9c4532e73d6d3a12a0b3a3cc4584d82", "score": "0.4929241", "text": "def test_get_single_sample(self):\n library = add_sample_group(is_library=True)\n sample = library.sample('SMPL_01 HHHGJGH')\n with self.client:\n response = self.client.get(\n f'/api/v1/samples/{sample.uuid}',\n content_type='application/json',\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertIn('success', data['status'])\n sample = data['data']['sample']\n self.assertIn('SMPL_01', sample['name'])\n self.assertIn('analysis_result_uuids', sample)\n self.assertIn('created_at', sample)", "title": "" }, { "docid": "9a4830772618a53cc56550ea9af69c22", "score": "0.49174476", "text": "def get_taxa_in_sample(sample_name, out_fp, api_key):\n taxa = get_ocx_analysis_table_for_sample(sample_name, api_key)\n tax_ids = [_['tax_id'] for _ in taxa]\n # Post list to NCBI\n tax_info = list(itertools.chain.from_iterable(_ncbi_get_many_taxa(tax_ids)))\n lineages = []\n for t in tax_info:\n LineageEx = t.get('LineageEx', ())\n _lineage = {r['Rank']: r['ScientificName'] for r in LineageEx}\n _lineage['baltimore'] = get_baltimore_group(LineageEx)\n lineages.append(_lineage)\n # tax_info = [\n # {r['Rank']: r['ScientificName'] for r in t.get('LineageEx', ())}\n # for t in tax_info\n # ]\n assert(len(lineages) == len(taxa))\n out_filename = os.path.join(out_fp, sample_name + \".taxa.tsv\")\n with open(out_filename, 'w') as out:\n writer = csv.DictWriter(\n out,\n fieldnames=['tax_id', 'name', 'readcount'] + TaxRanks,\n delimiter='\\t',\n quoting=csv.QUOTE_MINIMAL,\n extrasaction='ignore',\n restval='NA'\n )\n writer.writeheader()\n rows = ({**x, **y} for x, y in zip(taxa, lineages))\n writer.writerows(rows)\n return out_filename", "title": "" }, { "docid": "c9bc88e0878fde742ca34c8c15014954", "score": "0.49165934", "text": "def sample(self,size):\n raw_samples = pd.DataFrame(columns = self.input_names)\n for var in self.input_names:\n raw_samples[var] = self.greenbox[var].sample(size)\n return raw_samples", "title": "" }, { "docid": "b12a5b89f65f9437b3f82b2020e2f984", "score": "0.49149248", "text": "def get_genomes_sample(self):\n if self.ploidy > 1:\n # The chromosomes get aligned\n # 3D (individuals, loci, bits): [1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4] ->\n #\n # 2D (everything, 2): [[1, 1, 1, 1, 3, 3, 3, 3],\n # [2, 2, 2, 2, 4, 4, 4, 4]] ->\n #\n # 3D (chromosomes, loci, bits // 2): [[1, 1, 1, 1],\n # (individuals * 2, ...) [2, 2, 2, 2],\n # [3, 3, 3, 3],\n # [4, 4, 4, 4]]\n genomes = (\n self.genomes.reshape(-1, 2)\n .transpose()\n .reshape(self.genomes.shape[0] * 2, self.genomes.shape[1], -1)\n )\n else:\n genomes = self.genomes\n\n # TODO check if change in ploidy has implications for popgen stats\n\n # Check if there are enough genomes to sample\n if len(genomes) < 2: # NOTE tajimas_d requires minimum 2\n return None\n\n # Sample genomes\n if 0 < pan.POPGENSTATS_SAMPLE_SIZE_ <= genomes.shape[0]:\n indices = np.random.choice(\n range(genomes.shape[0]),\n pan.POPGENSTATS_SAMPLE_SIZE_,\n replace=False,\n )\n return genomes[indices]\n else:\n return genomes", "title": "" } ]
884cec929793e46311860e5e14300fdd
Frequency Shift This function does not perfrom a Hilbert transfrom when data is complex, NMRPipe seems to. As such the results of the imaginary channel differs from NMRPipe. In addition MAX/MIN value are slightly different than those from NMRPipe.
[ { "docid": "cb26b9cf473d587800e69fd53cbada94", "score": "0.0", "text": "def fsh(dic,data,dir,pts,sw=True):\n if dir not in [\"ls\",\"rs\"]:\n raise ValueError(\"dir must be ls or rs\")\n\n if np.iscomplexobj(data) == False: # real data\n null,data = _ht(dict(dic),data,zf=True)\n del_imag = True\n else: # imaginary data\n del_imag = False\n # NMRPipe always performs a hilbert transform\n # uncommenting the next two lines will match NMRPipe's fsh real\n # channel results, the imaginary channel is a mystery.\n #null,data = _ht(dict(dic),data,zf=True)\n #data = np.array(data,dtype=\"complex64\")\n\n if dir == \"ls\":\n pts = -pts\n\n data = p.fsh(data,pts)\n\n dic = update_minmax(dic,data)\n\n fn = \"FDF\"+str(int(dic[\"FDDIMORDER\"][0])) # F1, F2, etc\n if dic[fn+\"FTFLAG\"] == 1 and sw: # freq domain\n dic[fn+\"CENTER\"] = dic[fn+\"CENTER\"] + pts\n dic = recalc_orig(dic,data,fn)\n if del_imag == False:\n return dic,data\n else:\n return dic,data.real", "title": "" } ]
[ { "docid": "e34f45944a36c1470aefbbbac1be3568", "score": "0.6343489", "text": "def freq_shift(x, freq, fs):\n x = cp.asarray(x)\n return _freq_shift_kernel(x, freq, fs)", "title": "" }, { "docid": "1fc5636acb473f7957d08fa865e58c51", "score": "0.62186146", "text": "def amplificationHarmonic(data):\n shift = 50\n numberOfPoints = shift*2\n gaussian = signal.gaussian(numberOfPoints, std=7)\n dataFreq = fftBlack(data)\n dataFreq = dataFreq[0]\n print(\"lenght of data\")\n print(len(dataFreq))\n for i in range(numberOfPoints):\n try:\n print(\"index\")\n print(indiceOfFundamental * 2 - shift + i)\n print(\"before\")\n\n print(dataFreq[indiceOfFundamental*2-shift+i])\n dataFreq[indiceOfFundamental*2-shift+i] += dataFreq[indiceOfFundamental*2-shift+i]*10**(5*gaussian[i])\n print(\"after\")\n print(dataFreq[indiceOfFundamental * 2 - shift + i])\n print(\"\")\n except IndexError:\n print(\"index error\")\n #print(np.real(ifft(dataFreq)))\n result = np.real(ifft(dataFreq))\n return result", "title": "" }, { "docid": "f0eab382c33bd94046316c443618f451", "score": "0.60313743", "text": "def amplificationHarmonic(data):\n shift = 50\n numberOfPoints = shift*2\n gaussian = signal.gaussian(numberOfPoints, std=7)\n dataFreq = fftBlack(data)\n dataFreq = dataFreq[0]\n print(\"lenght of data in amp harmo\", len(data))\n for i in range(numberOfPoints):\n try:\n #print(\"index\")\n #print(indiceOfFundamental * 2 - shift + i)\n #print(\"before\")\n\n #print(dataFreq[indiceOfFundamental*2-shift+i])\n dataFreq[indiceOfFundamental*2-shift+i] += dataFreq[indiceOfFundamental*2-shift+i]*10**(5*gaussian[i])\n #print(\"after\")\n #print(dataFreq[indiceOfFundamental * 2 - shift + i])\n #print(\"\")\n except IndexError:\n print(\"index error in amp harmo\")\n #print(np.real(ifft(dataFreq)))\n result = np.real(ifft(dataFreq))\n return result", "title": "" }, { "docid": "9b6ffe20a37c522881c907130637e7a0", "score": "0.59643465", "text": "def calc_freq_shift(fq, ft, Np, f0, epsinf, W, Dvv):\n X=Np*pi*(fq-f0)/f0\n Ga0=Ga0_mult[ft]*2*pi*f0*epsinf*W*Dvv*(Np**2)\n C=Ct_mult[ft]*Np*W*epsinf\n Ba=Ga0*(sin(2.0*X)-2.0*X)/(2.0*X**2.0)\n return Ba/C/(2.0*pi)", "title": "" }, { "docid": "7bc076be073971d070fb5f87352e0821", "score": "0.5891276", "text": "def imfft(X):\r\n return fftshift(fft2(X))", "title": "" }, { "docid": "3fe4844d16e4b9e172796bd125b613ea", "score": "0.5711905", "text": "def pitchshift(snd_array, n, window_size=2**13, h=2**11):#ๆ•ดๅˆไปฅไธŠไธคๅŠŸ่ƒฝๅฎž็Žฐๅ˜้ข‘็އ็š„ๅŒๆ—ถไฟๆŒ้•ฟๅบฆ\n factor = 2**(1.0 * n / 12.0) #่ฆๅฐ†้Ÿณ้ซ˜ๆ้ซ˜nไธชๅŠ้Ÿณ็š„่ฏ๏ผŒๆˆ‘ไปฌ้œ€่ฆๅฐ†้ข‘็އไน˜ไธŠ็ณปๆ•ฐ2^(n/12) **ๅ’Œpowไธ€ไธชๆ•ˆๆžœ\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor) #ๅๆญฃๆœ€ๅŽ่Žทๅพ—็š„ๆ˜ฏไธ€ไธฒ็ญ‰้•ฟ็š„numpyๆ•ฐๅˆ—", "title": "" }, { "docid": "b177d989638f0cb525dd4fdeba417110", "score": "0.5694967", "text": "def fourierTransform(ByVals,zVals,freq):\r\n \r\n #Remove all nan values from ByVals\r\n #Locate all the nan values\r\n nanIndArr=np.argwhere(np.isnan(ByVals))\r\n #Remove the values at those indices\r\n for ind in nanIndArr[::-1]:\r\n #print('data len'+str(len(data)))\r\n ByVals=np.delete(ByVals,ind[0])\r\n zVals=np.delete(zVals,ind[0])\r\n \r\n #Length of the simulation space\r\n simLen=np.max(zVals)-np.min(zVals)\r\n \r\n #Sampling rate (number of samples per meter)\r\n sRate=len(zVals)/simLen\r\n \r\n #Take the fourier transform\r\n ByF=np.abs(fftpack.fft(ByVals))\r\n #Set k=0 amplitude to 0 to get rid of any DC effects\r\n ByF[0]=0\r\n #Create the wavenumber array\r\n # kArr=fftpack.fftfreq(len(ByVals))*sRate\r\n kArr=fftpack.fftfreq(len(ByVals),d=zVals[1]-zVals[0])\r\n \r\n #Only return the first half of the array since ByVals is real\r\n ByF=ByF[:int(len(ByF)/2)]\r\n kArr=kArr[:int(len(kArr)/2)]\r\n \r\n plt.plot(2*np.pi*kArr,ByF)\r\n plt.grid()\r\n plt.xlabel(r'$k_{||}$')\r\n plt.xlim(0,6)\r\n plt.ylabel('Amplitude')\r\n plt.title(str(freq)+'KHz')\r\n plt.show()\r\n plt.close()\r\n \r\n return ByF,kArr", "title": "" }, { "docid": "0d9001389a3c26d8f461c28f83601dff", "score": "0.56399316", "text": "def fft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data", "title": "" }, { "docid": "3716f00e4c1acb5f449e7982b24bfad4", "score": "0.55876046", "text": "def unpad_frequency(x, n1_max, n1_stride):\n n1_unpadded = 1 + (n1_max // n1_stride)\n return x[:, :, :n1_unpadded, :]", "title": "" }, { "docid": "8b152d2f4a8513f72652d49876e6c574", "score": "0.5561119", "text": "def fourier_trans(time_series, data, N):\n\n spacing = time_series.max()/N\n #create freq_dom from timestep\n freq_dom = np.linspace(0, 1/(2*spacing), N)\n #perform fft\n power_spectra = fft(data)\n #abs(fft) cut in half\n plottable_spectra = (2/N * np.abs(power_spectra))[:N//2]\n return (freq_dom,power_spectra,plottable_spectra)", "title": "" }, { "docid": "ac01132828d66347cc81e7cb8e9a014f", "score": "0.554546", "text": "def to_freq_space_2d(img):\n \n img_f = np.fft.fft2(img) # FFT\n #img_fshift = np.fft.fftshift(img_f) # FFT shift\n img_f_flat = np.reshape(img_f, (np.product(img_f.shape),))\n idx = sample(range(np.product(img_f.shape)), int(0.3 * np.product(img_f.shape)))\n img_f_flat[idx] = 0\n img_f= np.reshape(img_f_flat, img_f.shape)\n #img_real = img_f.real # Real part: (im_size1, im_size2)\n #img_imag = img_f.imag # Imaginary part: (im_size1, im_size2)\n #img_real_imag = np.dstack((img_real, img_imag)) # (im_size1, im_size2, 2)\n\n return img_f", "title": "" }, { "docid": "1a6c4ac4f934d6369de0539992b04915", "score": "0.54951936", "text": "def freq_filter(signal_filt):\r\n signal_freq=np.fft.rfft(signal_filt)\r\n signal_freq[3000:]=0\r\n\r\n fftinverse=np.fft.irfft(signal_freq,len(signal_filt))\r\n fftinverse=np.array(fftinverse,dtype='int16')\r\n return(fftinverse)", "title": "" }, { "docid": "b4b0a3be3fab6a7236cae41475f9568c", "score": "0.5490435", "text": "def discrete_fourier_transform(self) -> np.ndarray:\n window = np.hanning(len(self.data))\n flat_data = self.data.flatten()\n dft = abs(scipy.fftpack.fft(flat_data * window))\n\n for i in range(LOW_FREQUENCY):\n dft[i] = 0\n\n return dft[:min(len(dft) // 2, HIGH_FREQUENCY)]", "title": "" }, { "docid": "342f314e66e50733c1cf3474506e512e", "score": "0.54828477", "text": "def ifft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.ifft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data", "title": "" }, { "docid": "f15d63257249d6c0f3579d6b9fb2ea04", "score": "0.5479808", "text": "def fftshift(x, dim=None):\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [dim // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = x.shape[dim] // 2\n else:\n shift = [x.shape[i] // 2 for i in dim]\n return roll(x, shift, dim)", "title": "" }, { "docid": "3c092d9745d4ba259f9a7e4b33981f1d", "score": "0.54727125", "text": "def _createFrequency(self, numin=700., numax=1800., nchan=100.):\n\n\t\t# ======================================\n\t\t#\tConvert MHz to Hz\n\t\t# ======================================\n\t\tnumax = numax * self.__mhz\n\t\tnumin = numin * self.__mhz\n\n\t\t# ======================================\n\t\t#\tGenerate an evenly spaced grid\n\t\t#\tof frequencies and store the array\n\t\t# ======================================\n\t\treturn np.arange(nchan)*(numax-numin)/(nchan-1) + numin", "title": "" }, { "docid": "b5871e9d297008cc97ee8b36c99b2a4b", "score": "0.5463345", "text": "def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0, htk=False,\n extra=False):\n\n # 'Center freqs' of mel bands - uniformly spaced between limits\n minmel = hz_to_mel(fmin, htk=htk)\n maxmel = hz_to_mel(fmax, htk=htk)\n\n mels = np.arange(minmel, maxmel + 1, (maxmel - minmel) / (n_mels + 1.0))\n\n if not extra:\n mels = mels[:n_mels]\n\n return mel_to_hz(mels, htk=htk)", "title": "" }, { "docid": "edefeb1ce52fab1769d37b722c30ff14", "score": "0.5444584", "text": "def FourierTransformOfSignal(data):\n AudioInFrequencyDomain = np.fft.rfft(data)\n return AudioInFrequencyDomain", "title": "" }, { "docid": "cd5b784912bcacbcec87ede1366fc30f", "score": "0.54261726", "text": "def calc_fft(self):\n X = scipy.fftpack.fft(self.data)\n amplitudeSpectrum = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in X]\n \n #ๆญฃใฎๅ‘จๆณขๆ•ฐๅธฏๅŸŸใฎใฟๅˆ‡ใ‚Šๅ‡บใ™\n n = int(self.length * self.framerate)\n if n % 2 == 0:\n cut_num = int(n/2)\n else:\n cut_num = int((n-1) / 2 + 1)\n \n plus_ampSpec = amplitudeSpectrum[0 : cut_num]\n return plus_ampSpec", "title": "" }, { "docid": "6aa4d381df8f7fe804b514eab8471488", "score": "0.5422646", "text": "def pitchshift(sound_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretchFunc(sound_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "title": "" }, { "docid": "1b9b0b1c7395d31f28870bfcee4547e4", "score": "0.54068995", "text": "def ifft(kdata):\n tmp = np.fft.ifftshift(kdata)\n tmp = np.fft.ifft2(tmp)\n return np.fft.fftshift(tmp)", "title": "" }, { "docid": "2532a072ff99fe0aeac3e180b8061ef2", "score": "0.53875774", "text": "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "title": "" }, { "docid": "5ffbfdfbe2886c5f8aff6636da440f21", "score": "0.5385482", "text": "def hilbert(uin, nfft=None, axes=-1):\n if nfft is None:\n uin = _np.atleast_1d(uin)\n nfft = _np.shape(uin)[axes]\n # end if\n\n nyq = nfft//2 # Even\n if nfft % 2: # Odd\n# nyq = nfft//2 +1\n nyq = (nfft+1)//2\n # end if\n\n # Forward fourier transform:\n Ufft = _np.fft.fft(uin, n=nfft, axis=axes) # defaults to last axis\n # mfft = nfft - nfft//2 - 1\n\n # zero out the negative frequency components and double\n # the power in the positive frequency components\n# # this is what we are doing:\n# Ufft[_ut.fast_slice(Ufft, axis=axes, start=nfft//2+1, end=None, step=1)] = 0.0\n# Ufft[_ut.fast_slice(Ufft, axis=axes, start=1, end=nfft//2+1, step=1)] *= 2.0\n # this is much faster in general for large arrays:\n Ufft[(slice(None),) * (axes % Ufft.ndim) + (slice(nyq+1, None),)] = 0.0\n Ufft[(slice(None),) * (axes % Ufft.ndim) + (slice(1, nyq),)] *= 2.0\n\n # Inverse Fourier transform is the analytic signal\n return _np.fft.ifft(Ufft, n=nfft, axis=axes).squeeze()", "title": "" }, { "docid": "290afb30c600c0261031493f67adab7e", "score": "0.53825784", "text": "def fft2melmx(n_fft,\n fs=8000,\n nfilts=0,\n width=1.,\n minfreq=0,\n maxfreq=4000,\n htkmel=False,\n constamp=False):\n maxfreq = min(maxfreq, fs / 2.)\n\n if nfilts == 0:\n nfilts = numpy.ceil(hz2mel(maxfreq, htkmel) / 2.)\n\n wts = numpy.zeros((nfilts, n_fft))\n\n # Center freqs of each FFT bin\n fftfrqs = numpy.arange(n_fft / 2 + 1) / n_fft * fs\n\n # 'Center freqs' of mel bands - uniformly spaced between limits\n minmel = hz2mel(minfreq, htkmel)\n maxmel = hz2mel(maxfreq, htkmel)\n binfrqs = mel2hz(minmel + numpy.arange(nfilts + 2) / (nfilts + 1) * (maxmel - minmel), htkmel)\n\n for i in range(nfilts):\n _fs = binfrqs[i + numpy.arange(3, dtype=int)]\n # scale by width\n _fs = _fs[1] + width * (_fs - _fs[1])\n # lower and upper slopes for all bins\n loslope = (fftfrqs - _fs[0]) / (_fs[1] - __fs[0])\n hislope = (_fs[2] - fftfrqs)/(_fs[2] - _fs[1])\n\n wts[i, 1 + numpy.arange(n_fft//2 + 1)] =numpy.maximum(numpy.zeros_like(loslope),numpy.minimum(loslope, hislope))\n\n if not constamp:\n # Slaney-style mel is scaled to be approx constant E per channel\n wts = numpy.dot(numpy.diag(2. / (binfrqs[2 + numpy.arange(nfilts)] - binfrqs[numpy.arange(nfilts)])) , wts)\n\n # Make sure 2nd half of FFT is zero\n wts[:, n_fft // 2 + 1: n_fft] = 0\n\n return wts, binfrqs", "title": "" }, { "docid": "f7df9f1313beb54ec7b8f48ae81a0569", "score": "0.53756493", "text": "def hilbert_1d(uin, nfft=None):\n if nfft is None:\n uin = _np.atleast_1d(uin)\n nfft = len(uin)\n # end if\n\n nyq = nfft//2 # Even\n if nfft % 2: # Odd\n# nyq = nfft//2 +1\n nyq = (nfft+1)//2\n # end if\n\n # Forward fourier transform:\n Ufft = _np.fft.fft(uin, n=nfft, axis=-1) # defaults to last axis\n\n # Create a mask to zero out the negative frequency components and double\n # the power in the positive frequency components\n h = _np.zeros(nfft)\n h[0] = 1.0 # don't change the DC value\n h[1:nyq] = 2.0*_np.ones(nyq-1) # double positive frequencies\n# h[1:nfft//2] = 2.0*_np.ones(nfft//2-1) # double positive frequencies\n h[nyq] = 1.0 # don't forget about the last point in the spectrum\n\n # Inverse Fourier transform is the analytic signal\n return _np.fft.ifft(Ufft*h, n=nfft, axis=-1)", "title": "" }, { "docid": "33f55075f5e1e9a08bbf682a0cab1e82", "score": "0.53619605", "text": "def phase_corrected_operator(spectrum_shape: np.ndarray, sr: int,\n hop_length: int,\n frequency_shift: float) -> np.ndarray:\n\n freq_idx, time_idx = [np.arange(n_idx) for n_idx in spectrum_shape]\n exp_term = np.kron(freq_idx, time_idx) * hop_length * frequency_shift / sr\n exp_term = -2j * np.pi * exp_term\n return np.exp(exp_term).reshape(spectrum_shape)", "title": "" }, { "docid": "04b6417f964b993e948352e4a41ff91d", "score": "0.53579754", "text": "def hz2mel(freq):\n return 2595. * np.log10(1+freq/700.0)", "title": "" }, { "docid": "faaeeb69659ec7d38e864b18b7ec2479", "score": "0.5357381", "text": "def trfbank(fs, nfft, lowfreq, maxfreq, nlinfilt, nlogfilt, midfreq=1000):\n # Total number of filters\n nfilt = nlinfilt + nlogfilt\n\n # ------------------------\n # Compute the filter bank\n # ------------------------\n # Compute start/middle/end points of the triangular filters in spectral\n # domain\n frequences = numpy.zeros(nfilt + 2, dtype=PARAM_TYPE)\n if nlogfilt == 0:\n linsc = (maxfreq - lowfreq) / (nlinfilt + 1)\n frequences[:nlinfilt + 2] = lowfreq + numpy.arange(nlinfilt + 2) * linsc\n elif nlinfilt == 0:\n low_mel = hz2mel(lowfreq)\n max_mel = hz2mel(maxfreq)\n mels = numpy.zeros(nlogfilt + 2)\n # mels[nlinfilt:]\n melsc = (max_mel - low_mel) / (nfilt + 1)\n mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc\n # Back to the frequency domain\n frequences = mel2hz(mels)\n else:\n # Compute linear filters on [0;1000Hz]\n linsc = (min([midfreq, maxfreq]) - lowfreq) / (nlinfilt + 1)\n frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc\n # Compute log-linear filters on [1000;maxfreq]\n low_mel = hz2mel(min([1000, maxfreq]))\n max_mel = hz2mel(maxfreq)\n mels = numpy.zeros(nlogfilt + 2, dtype=PARAM_TYPE)\n melsc = (max_mel - low_mel) / (nlogfilt + 1)\n\n # Verify that mel2hz(melsc)>linsc\n while mel2hz(melsc) < linsc:\n # in this case, we add a linear filter\n nlinfilt += 1\n nlogfilt -= 1\n frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc\n low_mel = hz2mel(frequences[nlinfilt - 1] + 2 * linsc)\n max_mel = hz2mel(maxfreq)\n mels = numpy.zeros(nlogfilt + 2, dtype=PARAM_TYPE)\n melsc = (max_mel - low_mel) / (nlogfilt + 1)\n\n mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc\n # Back to the frequency domain\n frequences[nlinfilt:] = mel2hz(mels)\n\n heights = 2. / (frequences[2:] - frequences[0:-2])\n\n # Compute filterbank coeff (in fft domain, in bins)\n fbank = numpy.zeros((nfilt, int(numpy.floor(nfft / 2)) + 1), dtype=PARAM_TYPE)\n # FFT bins (in Hz)\n n_frequences = numpy.arange(nfft) / (1. * nfft) * fs\n\n for i in range(nfilt):\n low = frequences[i]\n cen = frequences[i + 1]\n hi = frequences[i + 2]\n\n lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)\n left_slope = heights[i] / (cen - low)\n rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1,\n min(numpy.floor(hi * nfft / fs) + 1, nfft), dtype=numpy.int)\n right_slope = heights[i] / (hi - cen)\n fbank[i][lid] = left_slope * (n_frequences[lid] - low)\n fbank[i][rid[:-1]] = right_slope * (hi - n_frequences[rid[:-1]])\n\n return fbank, frequences", "title": "" }, { "docid": "56200b44bff498cc747e0cd8a23b7c5c", "score": "0.5349722", "text": "def fftshift_image(im_in, dy=0.0, dx=0.0, isPeriodic=True, useLog=False):\n\n #\n # scale image to numbers and amplitudes around 1\n #\n if(useLog):\n im = np.log(np.ascontiguousarray(im_in, dtype='float64'))\n else:\n im = np.ascontiguousarray(im_in, dtype='float64')\n \n ny, nx = im.shape\n me = im.mean()\n st = np.std(im)\n \n im = (im-me)/st \n \n #\n # FFT of the input image, check for periodicity\n #\n if(isPeriodic):\n ny1, nx1 = im.shape\n ft = np.fft.rfft2(im)\n else:\n ny, nx = im.shape\n ft = np.zeros((2*ny, 2*nx), dtype='float64', order='c')\n\n ft[0:ny,0:nx] = im\n ft[0:ny,nx::] = im[:,::-1]\n ft[ny::,0:nx] = im[::-1,:]\n ft[ny::,nx::] = im[::-1,::-1]\n\n ny1, nx1 = ft.shape \n ft = np.fft.rfft2(ft)\n\n \n #\n # get spatial frequency mesh, the x-axis has only the positive frequencies\n # because the input data were non-complex numbers, so the negative part is\n # redundant.\n #\n fx, fy = np.meshgrid(np.fft.rfftfreq(nx1), np.fft.fftfreq(ny1))\n\n \n #\n # Multiply by exponential phase factor and return to image space\n #\n\n if(useLog):\n return np.exp((np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me)\n else:\n return (np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me", "title": "" }, { "docid": "44b2a9ea69dc719711abb56319137b35", "score": "0.5345683", "text": "def fft2barkmx(n_fft, fs, nfilts=0, width=1., minfreq=0., maxfreq=8000):\n maxfreq = min(maxfreq, fs / 2.)\n\n min_bark = hz2bark(minfreq)\n nyqbark = hz2bark(maxfreq) - min_bark\n\n if nfilts == 0:\n nfilts = numpy.ceil(nyqbark) + 1\n\n wts = numpy.zeros((nfilts, n_fft))\n\n # bark per filt\n step_barks = nyqbark / (nfilts - 1)\n\n # Frequency of each FFT bin in Bark\n binbarks = hz2bark(numpy.arange(n_fft / 2 + 1) * fs / n_fft)\n\n for i in range(nfilts):\n f_bark_mid = min_bark + i * step_barks\n # Linear slopes in log-space (i.e. dB) intersect to trapezoidal window\n lof = (binbarks - f_bark_mid - 0.5)\n hif = (binbarks - f_bark_mid + 0.5)\n wts[i, :n_fft // 2 + 1] = 10 ** (numpy.minimum(numpy.zeros_like(hif), numpy.minimum(hif, -2.5 * lof) / width))\n\n return wts", "title": "" }, { "docid": "27e23af8f8f312b67127191afa6b2a63", "score": "0.5343945", "text": "def freq_from_HPS(sig, fs):\n windowed = sig * blackmanharris(len(sig))\n\n from pylab import subplot, plot, log, copy, show\n\n # harmonic product spectrum:\n c = abs(rfft(windowed))\n maxharms = 8\n subplot(maxharms, 1, 1)\n plot(log(c))\n for x in range(2, maxharms):\n a = copy(c[::x]) # Should average or maximum instead of decimating\n # max(c[::x],c[1::x],c[2::x],...)\n c = c[:len(a)]\n i = argmax(abs(c))\n true_i = parabolic(abs(c), i)[0]\n print('Pass %d: %f Hz' % (x, fs * true_i / len(windowed)))\n c *= a\n subplot(maxharms, 1, x)\n plot(log(c))\n show()", "title": "" }, { "docid": "c35cc2a85fe4b48be860a955f32e5929", "score": "0.532987", "text": "def Chirplet_Transform(sig, alpha, fLevel=512, WinLen=64, SampFreq=1000):\n\n # data preparation\n SigLen = sig.size\n t = np.arange(0, SigLen) / SampFreq\n sig = hilbert(sig) # calculate the analytical signal\n\n # frequency axis and its half-axis points\n fLevel = np.ceil(fLevel / 2) * 2 + 1\n # round the frequency axis length value fLevel in one direction to the nearest odd number\n Lf = (fLevel - 1) / 2\n # half frequency axis data points (fLevel has been rounded to an odd number)\n # generate gauss window functions\n WinLen = np.ceil(WinLen / 2) * 2 + 1\n # round the length of windows to a nearest odd\n WinFun = np.exp(-6 * np.linspace(-1, 1, WinLen) ** 2)\n # gauss window function, divided into WinLen modulation data points\n Lw = (WinLen - 1) / 2 # half window data points\n # CT spectrum result array ( initialized to 0 )\n spec = np.zeros([int(fLevel), int(SigLen)])\n\n # sliding window signal, data segmentation preparation\n for iLoop in range(1, SigLen + 1):\n # determine the upper and lower limits of the left and right signal index subscripts\n # note that to prevent the edge width from exceeding the time domain, the retrieval error!\n iLeft = min([iLoop - 1, Lw, Lf])\n iRight = min([SigLen - iLoop, Lw, Lf])\n iIndex = np.arange(-iLeft, iRight + 1, dtype='int')\n\n iIndex1 = iIndex + iLoop # subscript index of the orignal signal\n\n iIndex2 = iIndex + int(Lw) + 1 # subscript index of window function vector\n Index = iIndex + int(\n Lf) + 1 # subscript index of the frequency axis (row number) in the spec two-dimensional array\n\n R_operator = np.exp(\n -1j * 0.5 * 2 * np.pi * alpha * t[iIndex1 - 1] ** 2) # frequency rotation operator (shear frequency)\n S_operator = np.exp(\n 1j * 2 * np.pi * alpha * t[iIndex1 - 1] * t[iLoop - 1]) # frequency shift operator (shift frequency)\n\n Sig_Section = sig[iIndex1 - 1] * R_operator * S_operator\n # signal segment after frequency rotation and frequency shift\n spec[iIndex - 1, iLoop - 1] = Sig_Section * np.conj(WinFun[iIndex2 - 1]) # fill the two-dimensional array\n\n # STFT\n spec = np.fft.fft(spec, axis=0)\n # spec = np.transpose(spec)\n spec = spec * 2 / fLevel # restores the true magnitude of FT\n spec = spec[0:int(fLevel) // 2, :] # till the Nyquist frequency\n\n freq = np.linspace(0, 0.5 * SampFreq, fLevel // 2) # Output frequency axis for plotting\n\n return spec, freq, t", "title": "" }, { "docid": "f34d0f12bea81d2373097b56651ccd26", "score": "0.53253925", "text": "def ifft_helper(signal):\n\n signal_copy = signal.copy()\n\n while not is_power_of_2(len(signal_copy)):\n signal_copy.append(0)\n\n N = len(signal_copy)\n\n if not isinstance(signal_copy[0], hpc):\n signal_copy = [hpc(item) for item in signal_copy]\n\n # base cases\n if N == 2:\n return [(signal_copy[0] + signal_copy[1]) / 2, (signal_copy[0] - signal_copy[1]) / 2]\n if N == 1:\n return [signal_copy[0]]\n\n # split X into quadrants\n X0 = signal_copy[:N // 4]\n X1 = signal_copy[N // 4:N // 2]\n X2 = signal_copy[N // 2:N * 3 // 4]\n X3 = signal_copy[N * 3 // 4:N]\n\n # compute for sum_odd and diff_odd used\n sum_odd = [(X0[index] - X2[index]) / 2 for index in range(N // 4)]\n diff_odd = [(X3[index] - X1[index]) / 2j for index in range(N // 4)]\n\n # compute for the fourier-transformed even elements of x\n x_even = []\n x_even.append([X0[index] - sum_odd[index] for index in range(N // 4)]) \n x_even.append([X1[index] + diff_odd[index] * 1j for index in range(N // 4)])\n x_even = flatten_list(x_even)\n\n # compute for the twiddle factors used to compute for sum and diff odd\n w1k = [w(1, k, N) for k in range(N)]\n w3k = [w(1, 3 * k, N) for k in range(N)]\n\n # compute for the fourier-transformed odd1 and odd3 elements of x\n x_odd1 = [(sum_odd[index] + diff_odd[index]) / (w1k[index] * 2) for index in range(N // 4)]\n x_odd3 = [(sum_odd[index] - diff_odd[index]) / (w3k[index] * 2) for index in range(N // 4)]\n\n # compute for the time-domain elements of x by recursively calling ifft_helper()\n x_even = ifft_helper(x_even)\n x_odd1 = ifft_helper(x_odd1)\n x_odd3 = ifft_helper(x_odd3)\n\n # reorder the subsignals into a single list of time-domain elements of x \n x = [None] * N\n\n for index in range(N // 2):\n x[index * 2] = x_even[index]\n\n for index in range(N // 4):\n x[4 * index + 1] = x_odd1[index]\n x[4 * index + 3] = x_odd3[index]\n\n return x", "title": "" }, { "docid": "a7e4a00e2db64fc91a99d70513aedfb7", "score": "0.5320053", "text": "def interp1D_f(data, fat):\n sig= data.copy()\n \n #Remove DC\n m_sig= np.mean(sig)\n sig -= m_sig\n \n #Pad zeros to power of 2\n N= sig.size\n Nfft= np.int32( 2**(np.ceil( np.log2(N) ) ) )\n \n #Create interpolated frequency axis and calculates FFT(Nfft)\n Sfat= np.zeros( fat*Nfft , dtype= np.complex128)\n \n #Transform\n S= np.fft.fft(sig,n=Nfft)\n \n #Fill the interpolated spectrum\n Sfat[:Nfft/2]= S[:Nfft/2]\n \n #Share the Nyquist\n Sfat[Nfft/2+1]= S[Nfft/2+1]/2.\n Sfat[fat*Nfft- (Nfft/2+1)]= S[Nfft/2+1]/2.\n \n #Assign the negative frequencies part\n Sfat[-Nfft/2+2:]= S[-Nfft/2+2:]\n \n #Shift, apply window in frequency domain, deshift\n# Sfat= np.fft.fftshift(Sfat)\n# Sfat = Sfat* np.hamming( (fat*Nfft) )\n# Sfat= np.fft.ifftshift(Sfat)\n \n #Return back in time domain\n xint= np.real( np.fft.ifft(Sfat) )*fat +m_sig\n \n \n return xint[:fat*N]", "title": "" }, { "docid": "a1aabcd950588fb438fde438f1448c2d", "score": "0.53062063", "text": "def fft_1d(gt):\n return np.fft.fftshift(np.fft.fft(gt))", "title": "" }, { "docid": "d153acb4117e396b6e0fb91d5492157f", "score": "0.52924746", "text": "def fft2mel(freq, fs, nfilts, minfreq, maxfreq):\n minmel = hz2mel(minfreq)\n maxmel = hz2mel(maxfreq) \n binfrqs = mel2hz(minmel+sp.arange(nfilts+2)/(float(nfilts)+1)*(maxmel-minmel))\n wts = sp.zeros((nfilts, (freq.size)))\n for i in range(nfilts):\n slp = binfrqs[i + sp.arange(3)]\n loslope = (freq - slp[0])/(slp[1] - slp[0])\n hislope = (slp[2] - freq)/(slp[2] - slp[1])\n wts[i,:] = sp.maximum(0.0, sp.minimum(loslope, hislope));\n wts[:, freq < 0] = 0\n wts = sp.dot(sp.diag(2./(binfrqs[2+sp.arange(nfilts)]-binfrqs[sp.arange(nfilts)])), wts);\n binfrqs = binfrqs[1:nfilts+1]\n return wts, binfrqs", "title": "" }, { "docid": "e72d3ecfc83d80d245847a9e92c76c39", "score": "0.5276877", "text": "def vocode(data, f, n, window_size, h):\n\tfor i, wav in enumerate(data):\n\t\tout = stretchFunc(wav, f, window_size, h)\n\t\tout = pitchshift(out, n, window_size, h)\n\t\tdata[i] = out\n\treturn data", "title": "" }, { "docid": "0db2a5b2bbf537716eb769dc807118d3", "score": "0.5258295", "text": "def notch_all_harmonics(signal, base_freq, sampling_rate):\n if base_freq > sampling_rate/2.0:\n print('Invalid frequency to notch')\n return\n curr_freq = base_freq\n while curr_freq < sampling_rate/2.0:\n b, a = scipy.signal.iirnotch(curr_freq/(sampling_rate/2.0), 30)\n signal = scipy.signal.lfilter(b, a, signal)\n curr_freq = curr_freq + base_freq\n \n return signal", "title": "" }, { "docid": "9ac61dc76d8370607271b49aca7bd1f5", "score": "0.5255312", "text": "def _powerspectrum(inarray, axis=-1):\n rfftd = np.fft.rfft(inarray, axis=axis)\n # Want P(k)= F(k).re*F(k).re+F(k).im*F(k).im\n power = np.abs(rfftd)**2\n #Normalise the FFT so it is independent of input size.\n power /= np.shape(inarray)[axis]**2\n return power", "title": "" }, { "docid": "7a32d252833cb73bb4fb59833c0345a1", "score": "0.5249845", "text": "def time_stretch(signalin, tscale, wsize=512, tstep=128):\n \n # read input and get the timescale factor \n L = signalin.shape[0] \n \n # signal blocks for processing and output\n phi = np.zeros(wsize)\n out = np.zeros(wsize, dtype=complex)\n sigout = np.zeros(L/tscale+wsize)\n \n # max input amp, window\n amp = signalin.max()\n win = np.hanning(wsize)\n p = 0 # position in the original (increment by tstep*tscale)\n pp = 0 # position in the target (increment by tstep)\n \n # TODO change this: the algorithm is stopping too soon: many zeroes on the edges\n while p < L-(wsize+tstep): \n # take the spectra of two consecutive windows\n p1 = int(p)\n spec1 = np.fft.fft(win*signalin[p1:p1+wsize])\n spec2 = np.fft.fft(win*signalin[p1+tstep:p1+wsize+tstep])\n # take their phase difference and integrate\n phi += (np.angle(spec2) - np.angle(spec1))\n # bring the phase back to between pi and -pi\n for phiI in range(len(phi)):\n while phi[phiI] < -pi: phi[phiI] += 2*pi\n while phi[phiI] >= pi: phi[phiI] -= 2*pi\n out.real, out.imag = np.cos(phi), np.sin(phi)\n # inverse FFT and overlap-add\n sigout[pp:pp+wsize] += win*np.fft.ifft(np.abs(spec2)*out)\n pp += tstep\n p += tstep*tscale\n \n return sigout", "title": "" }, { "docid": "efe9493d436ec1846d49cfa0e5742148", "score": "0.52423453", "text": "def calc_pre_scale(self, frequency):\n return int(round(self.__oscillator_clock / (4096.0 * frequency)) - 1)", "title": "" }, { "docid": "9fa2fb2e89b009c261988dcc2c69dc36", "score": "0.5236282", "text": "def _biorthogonal_window(analysis_window, shift):\n fft_size = len(analysis_window)\n assert np.mod(fft_size, shift) == 0\n number_of_shifts = len(analysis_window) // shift\n\n sum_of_squares = np.zeros(shift)\n for synthesis_index in range(0, shift):\n sample_index = np.arange(0, number_of_shifts+1)\n analysis_index = synthesis_index + sample_index * shift\n analysis_index = analysis_index[analysis_index + 1 < fft_size]\n sum_of_squares[synthesis_index] \\\n = np.sum(analysis_window[analysis_index] ** 2)\n sum_of_squares = np.kron(np.ones(number_of_shifts), sum_of_squares)\n synthesis_window = analysis_window / sum_of_squares / fft_size\n\n # Why? Line created by Hai, Lukas does not know, why it exists.\n synthesis_window *= fft_size\n\n return synthesis_window", "title": "" }, { "docid": "5c18964ee7652055bb9a8b55ed16be31", "score": "0.5231546", "text": "def shift_t_for_nfft(t, ofac):\n\n r = ofac * (max(t) - min(t))\n eps = 1E-5\n a = 0.5 - eps\n\n return 2 * a * (t - min(t)) / r - a", "title": "" }, { "docid": "81cd128bb8bdf877b3ef99aa6a4e26c2", "score": "0.5214643", "text": "def stretch(snd_array, factor, window_size, h):#ๅ˜้€Ÿ ๆ”นๅ˜้€Ÿๅบฆ็š„ๅŒๆ—ถไฟๆŒ้Ÿณ้ข‘ ่ฟ™ไธชๅฅฝๅƒ็”จ่ตทๆฅไธๅคชๅฏนๅŠฒ\n phase = np.zeros(window_size)\n hanning_window = np.hanning(window_size)\n result = np.zeros(int(len(snd_array)/ factor) + window_size)\n\n for i in np.arange(0, len(snd_array) - (window_size + h), h*factor):\n # Two potentially overlapping subarrays\n i=int(i)\n a1 = snd_array[i: i + window_size]\n a2 = snd_array[i + h: i + window_size + h]\n\n # The spectra of these arrays\n s1 = np.fft.fft(hanning_window * a1)\n s2 = np.fft.fft(hanning_window * a2)\n\n # Rephase all frequencies\n phase = (phase + np.angle(s2/s1)) % 2*np.pi\n\n a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase))\n i2 = int(i/factor)\n result[i2: i2 + window_size] += hanning_window*a2_rephased.real\n\n # normalize (16bit)\n result = ((2**(16-4)) * result/result.max())\n\n return result.astype('int16')", "title": "" }, { "docid": "173cf99bb936c48154a121f144464165", "score": "0.5213703", "text": "def meiksin(wavelength, redshift, unit=\"nm\", lylim=True, lls_fact=False, Xcut=True, Xlim=10):\n if unit == \"angstroms\":\n wavelength = wavelength /10.\n\n # Check if the wavelength is a scalar or an array\n # Otherwise can not iterate over 0-d array\n wavelength = np.atleast_1d(wavelength)\n n_transitions_low = 10\n n_transitions_max = 31\n gamma = 0.2788 # Gamma(0.5,1) i.e., Gamma(2-beta,1) with beta = 1.5\n n0 = 0.25\n lambda_limit = 91.2 # Lyman limit in nm\n\n lambda_n = np.empty(n_transitions_max)\n z_n = np.empty((n_transitions_max, len(wavelength)))\n for n in range(2, n_transitions_max):\n lambda_n[n] = lambda_limit / (1.0 - 1.0 / float(n * n))\n z_n[n, :] = (wavelength / lambda_n[n]) - 1.0\n\n # From Table 1 in Meiksin (2006), only n >= 3 are relevant.\n # fact has a length equal to n_transitions_low.\n fact = np.array(\n [1.0, 1.0, 1.0, 0.348, 0.179, 0.109, 0.0722, 0.0508, 0.0373, 0.0283]\n )\n # ------------- Attenuation due to Lyman series lines -----------------\n\n # First, tau_alpha is the mean Lyman alpha transmitted flux,\n # Here n = 2 => tau_2 = tau_alpha\n tau_n = np.zeros((n_transitions_max, len(wavelength)))\n if redshift <= 4:\n tau_a = 0.00211 * np.power(1.0 + redshift, 3.7)\n tau_n[2, :] = 0.00211 * np.power(1.0 + z_n[2, :], 3.7)\n elif redshift > 4:\n tau_a = 0.00058 * np.power(1.0 + redshift, 4.5)\n tau_n[2, :] = 0.00058 * np.power(1.0 + z_n[2, :], 4.5)\n\n # Then, tau_n is the mean optical depth value for transitions\n # n = 3 - 9 -> 1\n for n in range(3, n_transitions_max):\n if n <= 5:\n w = np.where(z_n[n, :] < 3)\n tau_n[n, w] = (\n tau_a * fact[n] * np.power(0.25 * (1.0 + z_n[n, w]), (1.0 / 3.0))\n )\n w = np.where(z_n[n, :] >= 3)\n tau_n[n, w] = (\n tau_a * fact[n] * np.power(0.25 * (1.0 + z_n[n, w]), (1.0 / 6.0))\n )\n elif 5 < n <= 9:\n tau_n[n, :] = (\n tau_a * fact[n] * np.power(0.25 * (1.0 + z_n[n, :]), (1.0 / 3.0))\n )\n else:\n tau_n[n, :] = tau_n[9, :] * 720.0 / (float(n) * (float(n * n - 1.0)))\n for n in range(2, n_transitions_max):\n w = np.where(z_n[n, :] >= redshift)\n tau_n[n, w] = 0.0\n\n # ----------Contribution due to photoelectric absorption: IGM + LLS ------\n tau_l_igm = np.zeros_like(wavelength)\n tau_l_lls = np.zeros_like(wavelength)\n if lylim:\n # IGM Meiksin eq.5\n z_l = wavelength / lambda_limit - 1.0\n w = np.where(z_l < redshift)\n\n tau_l_igm[w] = (\n 0.805\n * np.power(1.0 + z_l[w], 3)\n * (1.0 / (1.0 + z_l[w]) - 1.0 / (1.0 + redshift))\n )\n\n # LLS Meiksin eq.7\n term1 = gamma - np.exp(-1.0)\n\n n = np.arange(n_transitions_low - 1)\n term2 = np.sum(np.power(-1.0, n) / (factorial(n) * (2 * n - 1)))\n\n term3 = (1.0 + redshift) * np.power(\n wavelength[w] / lambda_limit, 1.5\n ) - np.power(wavelength[w] / lambda_limit, 2.5)\n\n term4 = np.sum(\n np.array(\n [\n (\n (\n 2.0\n * np.power(-1.0, n)\n / (factorial(n) * ((6 * n - 5) * (2 * n - 1)))\n )\n * (\n (1.0 + redshift) ** (2.5 - (3 * n))\n * (wavelength[w] / lambda_limit) ** (3 * n)\n - (wavelength[w] / lambda_limit) ** 2.5\n )\n )\n for n in np.arange(1, n_transitions_low)\n ]\n ),\n axis=0,\n )\n\n tau_l_lls[w] = n0 * ((term1 - term2) * term3 - term4)\n\n tau_taun = np.sum(tau_n[2:n_transitions_max, :], axis=0)\n\n lambda_min_igm = (1 + redshift) * 70.0\n w = np.where(wavelength < lambda_min_igm)\n\n weight = np.ones_like(wavelength)\n\n if not lls_fact:\n # Below lambda_min_igm, transmission set to 0\n weight[w] = 0\n else:\n weight[w] = np.power(wavelength[w] / lambda_min_igm, 2.0)\n # Another weight using erf function can be used.\n # However, you would need to add: from scipy.special import erf\n # weight[w] = 0.5*(1.+erf(0.05*(wavelength[w]-lambda_min_igm)))\n\n igm_transmission = np.exp(-tau_taun - tau_l_igm - tau_l_lls) * weight\n\n # In case of using this module with data containing XRay data\n # The transmisson has to be set to 1 below an arbitrary wavelength\n # Otherwise there will be no flux in Xray. We chose 10 nm\n if Xcut:\n w2 = np.where(wavelength < Xlim * (1 + redshift))\n igm_transmission[w2] = 1\n\n # Check for acceptable values\n w = np.where(igm_transmission < 0)\n igm_transmission[w] = 0\n w = np.where(igm_transmission > 1)\n igm_transmission[w] = 1\n\n return igm_transmission", "title": "" }, { "docid": "322973b98b784da193b9cf7b3937b5b6", "score": "0.52103126", "text": "def __init__(self, originalFrameSizeMs, frameShiftMs, sampleRate, melCoeffCount, numReconstructionIterations = 5, extraContext = 0, cutoff = 7900, normFactor = 1.0, useLogMels = True, name='GriffinLim'):\n super(GriffinLimSynthesis, self).__init__(name=name)\n self.useLogMels = useLogMels\n\n # Make sure no integer accidents happen\n frameSizeMs = float(originalFrameSizeMs)\n frameShiftMs = float(frameShiftMs)\n sampleRate = float(sampleRate)\n\n # Frame size and shift\n self.frameShiftMs = frameShiftMs\n self.sampleRate = sampleRate\n self.fftSize = int((frameSizeMs / 1000.0) * self.sampleRate)\n self.frameShift = int((frameShiftMs / 1000.0) * self.sampleRate)\n\n # Set block length accounting for overlap\n self.contextWidth = int(frameSizeMs / frameShiftMs)\n self.blockLen = self.contextWidth * 2 + 1 + extraContext\n\n # Length for ring buffers\n self.inputBufferLength = int(self.blockLen * 2.5)\n self.outputBufferLength = int(self.fftSize + self.frameShift * self.blockLen * 2.5)\n\n # Buffers and positions\n self.inputBuffer = []\n self.outputBuffer = []\n self.windowBuffer = []\n self.inputBufferPos = 0\n self.outputBufferPosMs = 0\n self.framePos = 0\n self.rfc = 0\n self.startTime = time.time()\n\n # Processing parameters\n self.normFactor = normFactor\n\n self.fftWindow = scipy.blackman(self.fftSize)\n self.numReconstructionIterations = numReconstructionIterations\n\n filterOrd = int((sampleRate / 1000.0) * frameShiftMs / 32.0)\n self.filterNumerator, self.filterDenominator = signal.iirfilter(\n filterOrd,\n float(cutoff) / float((sampleRate / 2)),\n btype=\"lowpass\"\n )\n self.filterState = signal.lfiltic(self.filterNumerator, self.filterDenominator, np.array([]))\n\n specSize = int(int((frameSizeMs / 1000.0) * sampleRate) / 2 + 1)\n self.melFilter = mel.MelFilterBank(specSize, melCoeffCount, sampleRate)", "title": "" }, { "docid": "24b101c5b798b48942013047f3034c6c", "score": "0.5209045", "text": "def powerSpectrum(input, nfft):\n freq = fft(input, nfft)\n return freq.real**2 + freq.imag**2", "title": "" }, { "docid": "cf6741d7653b821d3db4c21c87d001d0", "score": "0.5208492", "text": "def create_cepstrum(data):\n spectrum = np.fft.fft(data)\n freqs = np.fft.fftfreq(len(spectrum))\n #l = len(data)\n imax = np.argmax(np.abs(spectrum))\n fs_max = freqs[imax]\n frequencies_max = abs(fs_max * frame_rate)\n #freq = imax * fs / l\n #frequencies.append(freq)\n #print(frequencies)\n log_spectrum = np.log(np.abs(spectrum))\n cepstrum = np.fft.ifft(log_spectrum).real\n min_freq, max_freq = 600, 700\n start = int(frame_rate / max_freq)\n end = int(frame_rate / min_freq)\n narrowed_cepstrum = cepstrum[start:end]\n peak_ix = narrowed_cepstrum.max()\n freq0 = frame_rate / (start + peak_ix)\n if freq0 < min_freq or freq0 > max_freq:\n # Ignore the note out of the desired frequency range\n return\n\n return freq0", "title": "" }, { "docid": "8f2466f73ee092489f999e03b8886929", "score": "0.51975805", "text": "def power_spectrum(input_sig,\n fs=8000,\n win_time=0.025,\n shift=0.01,\n prefac=0.97):\n window_length = int(round(win_time * fs))\n overlap = window_length - int(shift * fs)\n framed = framing(input_sig, window_length, win_shift=window_length-overlap).copy()\n \n\n # Pre-emphasis filtering is applied after framing to be consistent with stream processing\n #framed = pre_emphasis(framed, prefac)\n\n l = framed.shape[0]\n n_fft = 2 ** int(numpy.ceil(numpy.log2(window_length)))\n # Windowing has been changed to hanning which is supposed to have less noisy sidelobes\n # ham = numpy.hamming(window_length)\n window = numpy.hanning(window_length+2)\n window=window[1:-1]\n spec = numpy.ones((l, int(n_fft / 2) + 1), dtype=PARAM_TYPE)\n #log_energy = numpy.log((framed**2).sum(axis=1))\n dec = 500000\n start = 0\n stop = min(dec, l)\n while start < l:\n ahan = framed[start:stop, :] * numpy.matlib.repmat(window, stop-start, 1)\n mag = numpy.fft.rfft(ahan, n_fft, axis=-1)\n spec[start:stop, :] = mag.real**2 + mag.imag**2\n start = stop\n stop = min(stop + dec, l)\n\n return spec #, log_energy", "title": "" }, { "docid": "41c411fb46a20c8f9c4d67078434de56", "score": "0.5192051", "text": "def calc_frequency(self, prescale):\n return int(round(self.__oscillator_clock / ((prescale + 1) * 4096.0)))", "title": "" }, { "docid": "1889238ec9aa3afd7f4e06a210d8ebe6", "score": "0.5185577", "text": "def frequency_spectrum(self,data,sampling_frequency=1):\n\n fourierTransform = np.fft.fft(data)/len(data)\n fourierTransform = fourierTransform[range(int(len(data)/2))]\n\n tpCount=len(data)\n values=np.arange(int(tpCount/2))\n timePeriod=tpCount/sampling_frequency\n frequencies=values/timePeriod\n\n return frequencies, np.absolute(fourierTransform)/(np.sum(np.absolute(fourierTransform)))", "title": "" }, { "docid": "d9d72a3882b3ef970ffaa1d2b2efc7f5", "score": "0.5181761", "text": "def _flux_power_bins(vmax, npix):\n #Get the frequency component\n kf = np.fft.rfftfreq(npix)\n #Units:\n #The largest frequency scale is the velocity scale of the box,\n #not 1/nbins as rfftfreq gives.\n #Adjust Fourier convention.\n kf *= 2.0*math.pi * npix/vmax\n return kf", "title": "" }, { "docid": "9dff0acb9b0990375bfbe20892bf6885", "score": "0.5180949", "text": "def frequency(data, output='spectraldensity', scaling='power', sides='one',\n taper=None, halfbandwidth=3, NW=None, duration=None,\n overlap=0.5, step=None, detrend='linear', n_fft=None,\n log_trans=False, centend='mean'):\n if output not in ('spectraldensity', 'complex', 'csd'):\n raise TypeError('output can be \"spectraldensity\", \"complex\" or \"csd\",'\n ' not \"{output}\"')\n if 'time' not in data.list_of_axes:\n raise TypeError('\\'time\\' is not in the axis ' + str(data.list_of_axes))\n if len(data.list_of_axes) != data.index_of('time') + 1:\n raise TypeError('\\'time\\' should be the last axis') # this might be improved\n\n if duration is not None and output == 'complex':\n raise ValueError('cannot average the complex spectrum over multiple epochs')\n\n if output == 'csd' and data.number_of('chan') != 2:\n raise ValueError('CSD can only be computed between two channels')\n\n if duration is not None:\n nperseg = int(duration * data.s_freq)\n if step is not None:\n nstep = int(step * data.s_freq)\n else:\n nstep = nperseg - int(overlap * nperseg)\n\n freq = ChanFreq()\n freq.attr = deepcopy(data.attr)\n freq.s_freq = data.s_freq\n freq.start_time = data.start_time\n freq.axis['chan'] = copy(data.axis['chan'])\n freq.axis['freq'] = empty(data.number_of('trial'), dtype='O')\n if output == 'complex':\n freq.axis['taper'] = empty(data.number_of('trial'), dtype='O')\n freq.data = empty(data.number_of('trial'), dtype='O')\n\n for i in range(data.number_of('trial')):\n x = data(trial=i)\n if duration is not None:\n x = _create_subepochs(x, nperseg, nstep)\n\n f, Sxx = _fft(x,\n s_freq=data.s_freq,\n detrend=detrend,\n taper=taper,\n output=output,\n sides=sides,\n scaling=scaling,\n halfbandwidth=halfbandwidth,\n NW=NW,\n n_fft=n_fft)\n\n if log_trans:\n Sxx = log(Sxx)\n\n if duration is not None:\n if centend == 'mean':\n Sxx = Sxx.mean(axis=-2)\n elif centend == 'median':\n Sxx = median(Sxx, axis=-2)\n else:\n raise ValueError('Invalid central tendency measure. '\n 'Use mean or median.')\n\n freq.axis['freq'][i] = f\n if output == 'complex':\n freq.axis['taper'][i] = arange(Sxx.shape[-1])\n if output == 'csd':\n newchan = ' * '.join(freq.axis['chan'][i])\n freq.axis['chan'][i] = asarray([newchan], dtype='U')\n freq.data[i] = Sxx\n\n return freq", "title": "" }, { "docid": "e4d5fa6ed8e74199ae8f2b1067351d7d", "score": "0.51797026", "text": "def windowing(input):\n w = hamming(input.shape[1], sym=False)\n\n # window shape (for explanation)\n # plt.figure()\n # plt.plot(w)\n # plt.title('Hamming window')\n # plt.xlabel('sample')\n # plt.ylabel('amplitude')\n # plt.show()\n\n return input * w", "title": "" }, { "docid": "d4184a10844f3f34705ea56681b6eac0", "score": "0.5179686", "text": "def EncodeSingleChannel(data,codingParams):\r\n\r\n # prepare various constants\r\n halfN = codingParams.nMDCTLines\r\n N = 2*halfN\r\n nScaleBits = codingParams.nScaleBits\r\n maxMantBits = (1<<codingParams.nMantSizeBits) # 1 isn't an allowed bit allocation so n size bits counts up to 2^n\r\n if maxMantBits>16: maxMantBits = 16 # to make sure we don't ever overflow mantissa holders\r\n sfBands = codingParams.sfBands\r\n # vectorizing the Mantissa function call\r\n# vMantissa = np.vectorize(Mantissa)\r\n\r\n # compute target mantissa bit budget for this block of halfN MDCT mantissas\r\n bitBudget = codingParams.targetBitsPerSample * halfN # this is overall target bit rate\r\n bitBudget -= nScaleBits*(sfBands.nBands +1) # less scale factor bits (including overall scale factor)\r\n bitBudget -= codingParams.nMantSizeBits*sfBands.nBands # less mantissa bit allocation bits\r\n\r\n\r\n # window data for side chain FFT and also window and compute MDCT\r\n timeData = data\r\n mdctTimeData = win(data)\r\n mdctLines = MDCT(mdctTimeData, halfN, halfN)[:halfN]\r\n\r\n # compute overall scale factor for this block and boost mdctLines using it\r\n maxLine = np.max( np.abs(mdctLines) )\r\n overallScale = ScaleFactor(maxLine,nScaleBits) #leading zeroes don't depend on nMantBits\r\n mdctLines *= (1<<overallScale)\r\n\r\n\r\n\r\n # compute the mantissa bit allocations\r\n # compute SMRs in side chain FFT\r\n SMRs = CalcSMRs(timeData, mdctLines, overallScale, codingParams.sampleRate, sfBands)\r\n # perform bit allocation using SMR results\r\n\r\n # global bitAlloc\r\n bitAlloc = BitAlloc(bitBudget, maxMantBits, sfBands.nBands, sfBands.nLines, SMRs)\r\n\r\n \r\n # given the bit allocations, quantize the mdct lines in each band\r\n scaleFactor = np.empty(sfBands.nBands,dtype=np.int32)\r\n nMant=halfN\r\n for iBand in range(sfBands.nBands):\r\n if not bitAlloc[iBand]: nMant-= sfBands.nLines[iBand] # account for mantissas not being transmitted\r\n mantissa=np.empty(nMant,dtype=np.int32)\r\n iMant=0\r\n for iBand in range(sfBands.nBands):\r\n lowLine = sfBands.lowerLine[iBand]\r\n highLine = sfBands.upperLine[iBand] + 1 # extra value is because slices don't include last value\r\n nLines= sfBands.nLines[iBand]\r\n scaleLine = np.max(np.abs( mdctLines[lowLine:highLine] ) )\r\n scaleFactor[iBand] = ScaleFactor(scaleLine, nScaleBits, bitAlloc[iBand])\r\n if bitAlloc[iBand]:\r\n mantissa[iMant:iMant+nLines] = vMantissa(mdctLines[lowLine:highLine],scaleFactor[iBand], nScaleBits, bitAlloc[iBand])\r\n iMant += nLines\r\n # end of loop over scale factor bands\r\n\r\n # return results\r\n return (scaleFactor, bitAlloc, mantissa, overallScale)", "title": "" }, { "docid": "2b09b83ddd3af448bd957494192513c9", "score": "0.5177151", "text": "def gen_phase_shift(freqs,offset):\n w = 2*numpy.pi*freqs * 1e6 #Convert MHz -> Hz\n return numpy.exp(1j*w*offset*1e-9) #delay in ns", "title": "" }, { "docid": "636d13767614eb4841693c34b8f77d96", "score": "0.5172169", "text": "def scale_freq(self, factor):\n return zulko.scale_freq(self.get_numpy_samples(), factor)", "title": "" }, { "docid": "bfc742293916fe2560b4103fea8a41d2", "score": "0.5171486", "text": "def reduce(self):\n nshift=(self.degree/2)*2 # minimum shift we want\n ashift=self.shift # actual shift we have\n nshift=max(((ashift+1)/2)*2,nshift) # new shift\n # WARNING: The following is suboptimal, the signal could be extended\n # asymmetrically\n if sometrue((array(self.c.shape)+1) % 2): # some dimension is even -> extend\n nshift+=1\n if ashift<nshift: # extend the signal using MirrorOnBoundary if needed\n c=bigtools.maddmirrored(self.c,nshift-ashift)\n else:\n c=self.c\n # now the shift is nshift and it is even\n c=bigpyramid.mbsreduce(c,self.degree,\n bcond=TBoundaryConvention['MirrorOnBounds']) # reduce\n return MSplineSignal(c=c,xmax=self.xmax/2.0,shift=nshift/2,\n degree=self.degree)", "title": "" }, { "docid": "3dc038e3e2cc1f5da35bc18f5d80647d", "score": "0.51621264", "text": "def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0):\n def mel_to_hz(mels):\n \"\"\"Convert mel bin numbers to frequencies\n \"\"\"\n mels = np.atleast_1d(mels)\n\n # Fill in the linear scale\n f_min = 0.0\n f_sp = 200.0 / 3\n freqs = f_min + f_sp * mels\n\n # And now the nonlinear scale\n min_log_hz = 1000.0 # beginning of log region (Hz)\n min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)\n logstep = np.log(6.4) / 27.0 # step size for log region\n log_t = (mels >= min_log_mel)\n\n freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))\n\n return freqs\n\n def hz_to_mel(frequencies):\n \"\"\"Convert Hz to Mels\n \"\"\"\n frequencies = np.atleast_1d(frequencies)\n\n # Fill in the linear part\n f_min = 0.0\n f_sp = 200.0 / 3\n\n mels = (frequencies - f_min) / f_sp\n\n # Fill in the log-scale part\n\n min_log_hz = 1000.0 # beginning of log region (Hz)\n min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)\n logstep = np.log(6.4) / 27.0 # step size for log region\n\n log_t = (frequencies >= min_log_hz)\n mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep\n\n return mels\n\n ''' mel_frequencies body starts '''\n # 'Center freqs' of mel bands - uniformly spaced between limits\n min_mel = hz_to_mel(fmin)\n max_mel = hz_to_mel(fmax)\n\n mels = np.linspace(min_mel, max_mel, n_mels)\n\n return mel_to_hz(mels)", "title": "" }, { "docid": "eab8515c5e8a2a3633da8634ea7659bb", "score": "0.5140852", "text": "def FT(u):\r\n U = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(u)))\r\n return U", "title": "" }, { "docid": "ee62783ef65fd6ca04282ddbe10e4982", "score": "0.5139725", "text": "def qtransform(fseries, Q, f0):\n\n # q-transform data for each (Q, frequency) tile\n\n # initialize parameters\n qprime = Q / 11**(1/2.) # ... self.qprime\n dur = fseries.to_timeseries().duration\n\n # check for sampling rate\n sampling = (len(fseries) - 1) * 2 * fseries.delta_f\n\n # window fft\n window_size = 2 * int(f0 / qprime * dur) + 1\n\n # get start and end indices\n start = (f0 - (f0 / qprime)) * dur\n end = start + window_size\n\n # apply window to fft\n # normalize and generate bi-square window\n norm = np.sqrt(315. * qprime / (128. * f0))\n windowed = fseries[start:end] * (bisquare(window_size) * norm)\n\n # choice of output sampling rate\n output_sampling = sampling # Can lower this to highest bandwidth\n output_samples = dur * output_sampling\n\n # pad data, move negative frequencies to the end, and IFFT\n padded = np.pad(windowed, padding(window_size, output_samples), mode='constant')\n wenergy = npfft.ifftshift(padded)\n\n # return a 'TimeSeries'\n wenergy = FrequencySeries(wenergy, delta_f=1./dur)\n tdenergy = TimeSeries(zeros(output_samples, dtype=np.complex128),\n delta_t=1./sampling)\n ifft(wenergy, tdenergy)\n cenergy = TimeSeries(tdenergy,\n delta_t=tdenergy.delta_t, copy=False)\n energy = type(cenergy)(\n cenergy.real() ** 2. + cenergy.imag() ** 2.,\n delta_t=1, copy=False)\n medianenergy = np.median(energy)\n norm_energy = energy / medianenergy\n \n return norm_energy, cenergy", "title": "" }, { "docid": "0f91aadd8ac73cbbb01fef1bae48d801", "score": "0.5136222", "text": "def lazy_fft(x):\n return np.log(np.abs(np.fft.fftshift(np.fft.fft2(x))))", "title": "" }, { "docid": "8c52d798c91a4a42f147980ddfe33936", "score": "0.5119014", "text": "def fft_gr_to_fq(g, rstep, rmin):\n if g is None:\n return g\n padrmin = int(round(rmin / rstep))\n npad1 = padrmin + len(g)\n\n # pad to the next power of 2 for fast Fourier transformation\n npad2 = (1 << int(math.ceil(math.log(npad1, 2)))) * 2\n # sine transformations needs an odd extension\n\n npad4 = 4 * npad2\n # gpadc array has to be doubled for complex coefficients\n gpadc = np.zeros(npad4)\n # gpadc2 = np.zeros(npad4)\n # copy the original g signal\n # ilo = 0\n # ilo = padrmin\n # ilo = len(g)\n gpadc[:2 * len(g):2] = g[:]\n # gpadc2[:2 * len(g):2] = g[:]\n '''\n for i in range(len(g)):\n gpadc[2 * ilo] = g[i]\n ilo += 1\n # '''\n # assert_allclose(gpadc2, gpadc)\n # copy the odd part of g skipping the first point,\n # because it is periodic image of gpadc[0]\n gpadc[-2:-2 * len(g) + 1:-2] = -1 * g[1:]\n # gpadc2[-2:-2 * len(g) + 1:-2] = -1 * g[1:]\n '''\n ihi = 2 * npad2 - 1\n for ilo in range(1, npad2):\n gpadc[2 * ihi] = -1 * gpadc[2 * ilo]\n ihi -= 1\n '''\n # assert_allclose(gpadc2, gpadc)\n # plt.plot(gpadc)\n # plt.show()\n\n # gpadcfft = np.fft.ihfft(gpadc)\n gpadcfft = np.fft.ifft(gpadc)\n # plt.plot(gpadcfft.imag)\n # plt.show()\n\n f = np.zeros(npad2, dtype=complex)\n # f2 = np.zeros(npad2, dtype=complex)\n f[:] = gpadcfft[:npad2 * 2:2] * npad2 * rstep\n # f2[:] = gpadcfft[:npad2 * 2:2] * npad2 * rstep\n '''\n for i in range(npad2):\n # f[i] = gpadcfft[2 * i + 1] * npad2 * rstep\n f[i] = gpadcfft[2 * i] * npad2 * rstep\n assert_allclose(f2, f)\n # '''\n return f.imag", "title": "" }, { "docid": "c1a7177967fc31519be6eda65af41013", "score": "0.51113904", "text": "def compute_ffmc2d(X):\n # 2d-fft\n fft2 = scipy.fftpack.fft2(X)\n\n # Magnitude\n fft2m = magnitude(fft2)\n\n # FFTshift and flatten\n fftshift = scipy.fftpack.fftshift(fft2m).flatten()\n\n #cmap = plt.cm.get_cmap('hot')\n #plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation=\"nearest\",\n # aspect=\"auto\", cmap=cmap)\n #plt.show()\n\n # Take out redundant components\n return fftshift[:fftshift.shape[0] // 2 + 1]", "title": "" }, { "docid": "2b54635c67261d21258fc1c51d221755", "score": "0.5106731", "text": "def butterworth_hp(nyq, nsamp, fc, n, return_freq=False):\r\n dt = 0.5/nyq\r\n \r\n freq = np.fft.fftshift(np.fft.fftfreq(nsamp, dt))\r\n w = 2*np.pi*freq\r\n wc = 2*np.pi*fc\r\n \r\n filt = np.sqrt(-1/(1 + (w/wc)**(2.0*n)) + 1)\r\n \r\n # Un-shift the frequencies\r\n filt = np.fft.ifftshift(filt)\r\n freq = np.fft.ifftshift(freq)\r\n \r\n if return_freq:\r\n return filt, freq\r\n else:\r\n return filt", "title": "" }, { "docid": "943a0188087fe92636880807e3c931ed", "score": "0.5095792", "text": "def fft_helper(signal):\n\n signal_copy = signal.copy()\n\n while not is_power_of_2(len(signal_copy)):\n signal_copy.append(0)\n\n N = len(signal_copy)\n\n if not isinstance(signal_copy[0], hpc):\n signal_copy = [hpc(item) for item in signal_copy]\n\n # base cases\n if N == 2:\n return [signal_copy[0] + signal_copy[1], signal_copy[0] - signal_copy[1]]\n if N == 1:\n return [signal_copy[0]]\n\n # split X to even, odd1, and odd3 elements\n X_even = fft_helper([signal[index] for index in range(N) if index % 2 == 0])\n X_odd1 = fft_helper([signal[index] for index in range(N) if index != 0 and (index - 1) % 4 == 0])\n X_odd3 = fft_helper([signal[index] for index in range(N) if index != 0 and (index - 3) % 4 == 0])\n\n # solve for the twiddle factors to be used\n w1k = [w(1, k, N) for k in range(N)]\n w3k = [w(1, 3 * k, N) for k in range(N)]\n\n # compute for sum and diff odd\n sum_odd = [X_odd1[index] * w1k[index] + X_odd3[index] * w3k[index] for index in range(len(X_odd1))]\n diff_odd = [X_odd1[index] * w1k[index] - X_odd3[index] * w3k[index] for index in range(len(X_odd1))]\n\n # Compute for the quadrants of X\n E = len(X_even) // 2\n\n X0 = [X_even[index] + sum_odd[index] for index in range(E)]\n X1 = [X_even[index + E] - diff_odd[index] * 1j for index in range(E)]\n X2 = [X_even[index] - sum_odd[index] for index in range(E)]\n X3 = [X_even[index + E] + diff_odd[index] * 1j for index in range(E)]\n\n X = flatten_list([X0, X1, X2, X3])\n\n return X", "title": "" }, { "docid": "4ff7b283c69916a94591ca666c304da9", "score": "0.5095541", "text": "def fft(self):\r\n # Create a linear space for equally distributed samples\r\n n_of_samples = len(self.edge2ref_dist[0])\r\n edge_length = self.edge2ref_dist[0][-1]\r\n t = np.linspace(0, edge_length, n_of_samples, endpoint=True)\r\n\r\n # Print information\r\n dt = t[1] - t[0]\r\n fa = 1.0 / dt # scan frequency\r\n # print('dt=%.5f mm (Sampling distance)' % dt)\r\n # print('fa=%.2f samples/mm' % fa)\r\n\r\n # Displacement values (the signal)\r\n s = self.edge2ref_dist[1]\r\n\r\n # Perform fft without windowing\r\n Y = np.fft.fft(s)\r\n N = n_of_samples // 2 + 1\r\n\r\n # Frequency domain x-Axis with 'frequencies' up to Nyquist\r\n X = edge_length * np.linspace(0, fa / 2, N, endpoint=True)\r\n\r\n # Perform fft with windowing\r\n # Window functions: Choose one of the three\r\n black = np.blackman(len(s))\r\n Yblack = np.fft.fft(black * s)\r\n\r\n # With different windowing filters\r\n hann = np.hanning(len(s))\r\n hamm = np.hamming(len(s))\r\n # Yhann = np.fft.fft(hann * s)\r\n # Yhamm = np.fft.fft(hamm * s)\r\n\r\n # Collect the results\r\n fft_results = {\r\n \"widths\": edge_length / np.linspace(2, 40, 20),\r\n \"width_ratios\": np.linspace(2, 40, 20),\r\n \"amps\": 2.0 * np.abs(Y[1:21]) / N,\r\n \"amps_blackman\": 2.0 * np.abs(Yblack[1:21]) / N,\r\n \"amps/width\": (2.0 * np.abs(Y[1:21]) / N)/(edge_length / np.linspace(2, 40, 20)),\r\n \"amps_black/width\": (2.0 * np.abs(Yblack[1:21]) / N)/(edge_length / np.linspace(2, 40, 20))\r\n }\r\n\r\n self.fft_results = fft_results", "title": "" }, { "docid": "32ad90ac26b9c806633f784514b83ad0", "score": "0.5095044", "text": "def _inv_shift_fourier(self, k: torch.Tensor, n_dims: int) -> torch.tensor:\n out: torch.tensor = torch.fft.ifftn(\n torch.fft.ifftshift(k, dim=tuple(range(-n_dims, 0))), dim=tuple(range(-n_dims, 0))\n ).real\n return out", "title": "" }, { "docid": "19f9cd764185e47d2fefda9c88cd413c", "score": "0.50877476", "text": "def _rotate_powerlaw(data, Fs, delta_f, f_rotation=30):\n\n # compute FFT and frequency axis\n FC = np.fft.fft(data)\n f_axis = np.fft.fftfreq(len(data), 1. / Fs)\n\n # make the 1/f mask\n f_mask = np.zeros_like(f_axis)\n f_mask[1:] = 10**(np.log10(np.abs(f_axis[1:])) * (delta_f / 2))\n f_mask[0] = 1.\n\n # normalize power at rotation frequency\n f_mask = f_mask / f_mask[np.where(f_axis >= f_rotation)[0][0]]\n\n return np.real(np.fft.ifft(FC * f_mask))", "title": "" }, { "docid": "d7b8fac57140727e8c40d4f7e161f1b0", "score": "0.50876045", "text": "def unscaled_FFT(signal, dt):\n N = len(signal)\n dc = np.mean(signal) # DC component\n signal = signal - np.mean(signal) # Removing DC component \n df = 1/(N*dt)\n\n L = np.arange(0, np.floor(N/2), dtype=int) # 1st half of the spectrum\n \n # Frequency domain\n freq = df*np.arange(N)\n fhat = fft(signal, N) \n mag = abs(fhat)\n \n power = mag**2 \n\n return freq, mag, fhat, L, dc, power", "title": "" }, { "docid": "12c6829b3d77222762d94fb45d48b04b", "score": "0.50846225", "text": "def istft(real,imag,length,\n sample_rate=44100,\n frame_length=46,\n frame_shift=10,\n window_type=\"hanning\",\n preemphasis=0.0,\n device=torch.device('cuda'),\n square_root_window=True):\n real = real.permute(0,3,2,1)\n imag = imag.permute(0,3,2,1)\n spectrum = torch.cat([real,imag],dim=-1)\n\n hop_length = int(sample_rate * frame_shift / 1000)\n win_length = int(sample_rate * frame_length / 1000)\n\n # num_point = fft_point(win_length)\n num_point = win_length\n if ('cuda' in str(device)):\n window = get_window(num_point, window_type, square_root_window).cuda(device)\n else:\n window = get_window(num_point, window_type, square_root_window)\n\n wav = torch_istft(spectrum, num_point, hop_length=hop_length,\n win_length=window.shape[0], window=window)\n return wav[...,:length]", "title": "" }, { "docid": "f6bd8ff31376ecc52ba07b564c5e2b6e", "score": "0.5069331", "text": "def ifftshift(x, dim=None):\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [(dim + 1) // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = (x.shape[dim] + 1) // 2\n else:\n shift = [(x.shape[i] + 1) // 2 for i in dim]\n return roll(x, shift, dim)", "title": "" }, { "docid": "36f1b9693f9ad52bed24c53deaba70da", "score": "0.5067922", "text": "def bispectrum_signal(signal: np.ndarray,\n n_fft: int = 256,\n hop_length: int = 128,\n eps: float = 1e-8):\n\n _stft = stft(signal, n_fft=n_fft, hop_length=hop_length)\n nfft = _stft.shape[0]\n freq_sum = np.arange(nfft)[:, None] + np.arange(nfft)\n cut_freq = np.min(np.nonzero(np.diagonal(freq_sum) >= nfft - 1)[0])\n arg = np.arange(cut_freq)\n\n num = np.mean(_stft[arg, None, :] * _stft[None, arg, :] *\n np.conjugate(_stft[freq_sum[:cut_freq, :cut_freq], :]), axis=-1)\n\n denum = np.sqrt(np.mean(np.abs(_stft[arg, None, :] * _stft[None, arg, :]) ** 2, axis=-1) *\n np.mean(np.abs(_stft[freq_sum[:cut_freq, :cut_freq], :]) ** 2, axis=-1))\n\n bispectrum = num / (denum + eps)\n magnitude = np.abs(bispectrum)\n phase = np.arctan2(bispectrum.imag, bispectrum.real)\n\n return magnitude, phase", "title": "" }, { "docid": "5bdbd3407e868122c3a71a455f7f5f35", "score": "0.5058911", "text": "def GammaTEn0(n, l, freq):\n return np.pi * sqrt(((2*n+1)/l)**2 - (2 * freq / c0)**2)", "title": "" }, { "docid": "8833d301700a5794a6284d6f80e44486", "score": "0.5055989", "text": "def get_freq(self):\n scales=self.cw.getscales()\n # scale frequencies and correct with sample rate\n freq=1/(self.cw.fourierwl*scales)*self.sample_rate\n return freq", "title": "" }, { "docid": "2f2d05b6529c01bfbb283dc51abdeee9", "score": "0.5054318", "text": "def sinkhorn(x, steps=1, temp=1):\n x = F.softmax(x / temp, dim=-1)\n for _ in range(steps):\n x = x / x.sum(dim=-1, keepdim=True).clamp(min=1e-8)\n x = x / x.sum(dim=-2, keepdim=True).clamp(min=1e-8)\n return x", "title": "" }, { "docid": "14df80c4584cab29eb6d81aca7267fe6", "score": "0.5051384", "text": "def iFT(u):\r\n U = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(u)))\r\n return U", "title": "" }, { "docid": "3bdccfc070ddf436ad800165f91a593a", "score": "0.5050343", "text": "def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):\n\n if len(freq) != len(gain):\n raise ValueError('freq and gain must be of same length.')\n\n if nfreqs is not None and numtaps >= nfreqs:\n raise ValueError('ntaps must be less than nfreqs, but firwin2 was '\n 'called with ntaps=%d and nfreqs=%s'\n % (numtaps, nfreqs))\n\n if freq[0] != 0 or freq[-1] != nyq:\n raise ValueError('freq must start with 0 and end with `nyq`.')\n d = np.diff(freq)\n if (d < 0).any():\n raise ValueError('The values in freq must be nondecreasing.')\n d2 = d[:-1] + d[1:]\n if (d2 == 0).any():\n raise ValueError('A value in freq must not occur more than twice.')\n\n if numtaps % 2 == 0 and gain[-1] != 0.0:\n raise ValueError(\"A filter with an even number of coefficients must \"\n \"have zero gain at the Nyquist rate.\")\n\n if nfreqs is None:\n nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))\n\n # Tweak any repeated values in freq so that interp works.\n eps = np.finfo(float).eps\n for k in range(len(freq)):\n if k < len(freq) - 1 and freq[k] == freq[k + 1]:\n freq[k] = freq[k] - eps\n freq[k + 1] = freq[k + 1] + eps\n\n # Linearly interpolate the desired response on a uniform mesh `x`.\n x = np.linspace(0.0, nyq, nfreqs)\n fx = np.interp(x, freq, gain)\n\n # Adjust the phases of the coefficients so that the first `ntaps` of the\n # inverse FFT are the desired filter coefficients.\n shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)\n fx2 = fx * shift\n\n # Use irfft to compute the inverse FFT.\n out_full = irfft(fx2)\n\n if window is not None:\n # Create the window to apply to the filter coefficients.\n from scipy.signal.signaltools import get_window\n wind = get_window(window, numtaps, fftbins=False)\n else:\n wind = 1\n\n # Keep only the first `numtaps` coefficients in `out`, and multiply by\n # the window.\n out = out_full[:numtaps] * wind\n\n return out", "title": "" }, { "docid": "325e5f60ee80415849d730056d97ac8c", "score": "0.50400907", "text": "def mel_to_hz(mels):\n mels = np.atleast_1d(mels)\n\n # Fill in the linear scale\n f_min = 0.0\n f_sp = 200.0 / 3\n freqs = f_min + f_sp * mels\n\n # And now the nonlinear scale\n min_log_hz = 1000.0 # beginning of log region (Hz)\n min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)\n logstep = np.log(6.4) / 27.0 # step size for log region\n log_t = (mels >= min_log_mel)\n\n freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))\n\n return freqs", "title": "" }, { "docid": "0e966ea31b43a76c91a401f972b92a83", "score": "0.5039109", "text": "def clean_wav(self, x, wsize=512, stride=None,\n n_fft=512, cuda=False):\n from scipy.fftpack import fft\n from scipy.signal import istft\n assert isinstance(x, np.ndarray), type(x)\n if stride is None:\n stride = wsize\n phases = []\n mags = []\n X_ = librosa.stft(x, n_fft=n_fft)\n #X_mag = np.log(np.abs(X_) ** 2 + 1)\n X_mag = np.log(np.abs(X_) ** 2)\n X_pha = np.angle(X_)\n X_mag = Variable(torch.FloatTensor(X_mag)).t()\n if cuda:\n X_mag = X_mag.cuda()\n if self.in_frames > 1:\n expns = []\n F_dim = int(X_mag.size(1))\n r = self.in_frames\n z_t = torch.zeros(r // 2, F_dim)\n if cuda:\n z_t = z_t.cuda()\n z_t = Variable(z_t)\n p_mag = torch.cat((z_t, X_mag, z_t), dim=0)\n for n in range(0, p_mag.size(0) - (r - 1)):\n mag_expn = p_mag[n:n+r, :].contiguous().view(1,\n -1)\n expns.append(mag_expn)\n expns = torch.cat(expns, dim=0)\n X_mag = expns\n pred_mag = self.dnn(X_mag)\n pred_mag = pred_mag.cpu().data.numpy()\n #pred_mag = np.exp(pred_mag) - 1\n pred_mag = np.exp(pred_mag)\n # trim negative if available\n #pred_mag[np.where(pred_mag < 0)] = 0\n pred_mag = np.sqrt(pred_mag).T\n X_back = pred_mag * np.exp(1j * X_pha)\n Y = librosa.istft(X_back)\n return Y", "title": "" }, { "docid": "e3a7dd341f4492e4d35516123669f25d", "score": "0.5036405", "text": "def data_FFT(data):\r\n data_fft = []\r\n for i in range(len(data)):\r\n rank_i = data[i]\r\n # print(rank1)\r\n times = np.arange(rank_i.size)\r\n freqs = nf.fftfreq(times.size, times[1] - times[0])\r\n xs = np.abs(freqs)\r\n complex_array = nf.fft(rank_i)\r\n ys = np.abs(complex_array)\r\n # ## plot signal in time domain\r\n # plt.figure()\r\n # plt.plot(times, rank_i)\r\n # plt.title(\"Signal[0] in Time Domain\")\r\n # plt.xlabel(\"Time\")\r\n # plt.ylabel(\"Amplitude\")\r\n # plt.show()\r\n # ## plot signal in frequency domain\r\n # plt.figure()\r\n # plt.plot(xs, ys)\r\n # plt.xlabel(\"Frequency\")\r\n # plt.title('Frequency Domain', fontsize=16)\r\n # plt.ylabel('Amplitude', fontsize=12)\r\n # plt.tick_params(labelsize=10)\r\n # plt.grid(linestyle=':')\r\n # plt.show()\r\n\r\n ## find peaks in frequency domain\r\n peak_id, peak_property = find_peaks(ys, height=6, distance=10)\r\n peak_freq = xs[peak_id]\r\n peak_height = peak_property['peak_heights']\r\n peak_freq = np.unique(peak_freq)\r\n if peak_freq is not None:\r\n peak_freq = np.append(peak_freq[0], peak_freq[-1]) # select first and last peaks\r\n peak_height = np.unique(peak_height)\r\n if peak_height is not None:\r\n peak_height = np.append(peak_height[0], peak_height[-1])\r\n else:\r\n print(\"peak_freq not found, change params\")\r\n # print('peak_freq',peak_freq)\r\n # print('peak_height',peak_height)\r\n data_i_fft = np.append(peak_freq, peak_height)\r\n # print(data_i_fft)\r\n data_fft.append(data_i_fft)\r\n data_fft = np.asarray(data_fft).reshape(len(data), 4) # generate new x_train from frequency domain\r\n # print(data_fft)\r\n return data_fft", "title": "" }, { "docid": "8b78fffeadbf21ab191c44e3d0fe79b0", "score": "0.50291264", "text": "def powerSpectrum(input, nfft):\n from scipy.fftpack import fft\n freq = fft(input, nfft)\n return np.abs(freq) ** 2", "title": "" }, { "docid": "2c1143f409858ba570b931e578d5e0cb", "score": "0.5026813", "text": "def inverse_fourier(constants, n0, steps=100):\n ts = np.linspace(0, 1, steps+1, dtype=np.float64)\n f = []#np.zeros(steps, dtype=np.complex128)\n ns = np.array(range(n0, n0+len(constants)))\n for i, t in enumerate(ts, start=0):\n # Not sure why I need to divide by (len(ns)/steps) but data is bad otherwise\n f_t = constants*(e**(-ns*2*pi*complex(0, 1)*t))/(len(ns)/steps)\n f.append(sum(f_t))\n\n return np.array(f)\n\n\n#def set_convolve_(sig1, sig2):\n steps = len(sig1)\n dt = 1/steps\n sig3 = np.zeros((steps,))\n for x in range(steps):\n total = 0\n for t in range(0, steps):\n total += sig1[t]*sig2[x-t]*dt\n sig3[x] = total\n return sig3\n\n\n#def set_convolve(sig1, sig2, a, b, x):\n steps = max(len(sig1), len(sig2))\n kernel_len = min(len(sig1), len(sig2))\n ts = np.linspace(a, b, steps)\n index_x = len(np.array([t for t in ts if not t <= x])) #getindex(ts, x)\n dt = 1/steps\n total = 0\n for t in range(0, kernel_len):\n if index_x - t < 0: continue\n total += sig1[t]*sig2[index_x-t]*dt\n return total", "title": "" }, { "docid": "ab5dade4f512b8f084021cad25b09caa", "score": "0.50206673", "text": "def windowing(input):\n from scipy.signal import hamming\n return input * hamming(input.shape[1], sym=False)", "title": "" }, { "docid": "f7414dabbb45d9175a91818d1d74dd9c", "score": "0.50203925", "text": "def forward(ctx, input, args, val=0, get_mask=get_hyper_mask,\n onesided=True):\n # ctx.save_for_backward(input)\n # print(\"round forward\")\n FFTBandFunctionComplexMask2D.mark_dirty(input)\n\n N, C, H, W = input.size()\n\n if H != W:\n raise Exception(\"We support only squared input.\")\n\n if args.next_power2:\n H_fft = next_power2(H)\n W_fft = next_power2(W)\n pad_H = H_fft - H\n pad_W = W_fft - W\n input = torch_pad(input, (0, pad_W, 0, pad_H), 'constant', 0)\n else:\n H_fft = H\n W_fft = W\n xfft = torch.rfft(input,\n signal_ndim=FFTBandFunctionComplexMask2D.signal_ndim,\n onesided=onesided)\n del input\n\n _, _, H_xfft, W_xfft, _ = xfft.size()\n # assert H_fft == W_xfft, \"The input tensor has to be squared.\"\n\n mask, _ = get_mask(H=H_xfft, W=W_xfft,\n compress_rate=args.compress_fft_layer,\n val=val, interpolate=args.interpolate,\n onesided=onesided)\n mask = mask[:, 0:W_xfft, :]\n # print(mask)\n mask = mask.to(xfft.dtype).to(xfft.device)\n xfft = xfft * mask\n\n if ctx is not None:\n ctx.xfft = xfft\n if args.is_DC_shift:\n ctx.xfft = shift_DC(xfft, onesided=onesided)\n\n # xfft = shift_DC(xfft, onesided=onesided, shift_to=\"center\")\n # xfft = shift_DC(xfft, onesided=onesided, shift_to=\"corner\")\n out = torch.irfft(input=xfft,\n signal_ndim=FFTBandFunctionComplexMask2D.signal_ndim,\n signal_sizes=(H_fft, W_fft),\n onesided=onesided)\n out = out[..., :H, :W]\n return out", "title": "" }, { "docid": "77fc5e4a56623a50ebe8a3626496ce7b", "score": "0.5007808", "text": "def morlet_freq_1d(filt_opt): \n\n# sigma0 = 2/sqrt(3);\n sigma0 = 2./np.sqrt(3)\n \n # Calculate logarithmically spaced, band-pass filters.\n# xi_psi = filt_opt.xi_psi * 2.^((0:-1:1-filt_opt.J)/filt_opt.Q);\n# sigma_psi = filt_opt.sigma_psi * 2.^((0:filt_opt.J-1)/filt_opt.Q);\n xi_psi = filt_opt['xi_psi']*np.power(2., (np.arange(0, -filt_opt['J'], step=-1, dtype=float)/filt_opt['Q']))\n sigma_psi = filt_opt['sigma_psi']*np.power(2., (np.arange(filt_opt['J'], dtype=float)/filt_opt['Q']))\n \n # Calculate linearly spaced band-pass filters so that they evenly\n # cover the remaining part of the spectrum\n# step = pi * 2^(-filt_opt.J/filt_opt.Q) * ...\n# (1-1/4*sigma0/filt_opt.sigma_phi*2^(1/filt_opt.Q))/filt_opt.P;\n if filt_opt['P'] > 0:\n step = np.pi * 2**(-float(filt_opt['J'])/filt_opt['Q']) * (1-1./4*sigma0/filt_opt['sigma_phi'] * 2**(1./filt_opt['Q']))/filt_opt['P']\n else:\n assert filt_opt['P'] >= 0\n step = 0.\n# xi_psi(filt_opt.J+1:filt_opt.J+filt_opt.P) = filt_opt.xi_psi * ...\n# 2^((-filt_opt.J+1)/filt_opt.Q) - step * (1:filt_opt.P);\n xi_psi = np.concatenate((xi_psi, filt_opt['xi_psi']*np.power(2., float(-filt_opt['J']+1)/filt_opt['Q'])-step*np.r_[1:filt_opt['P']+1]))\n# sigma_psi(filt_opt.J+1:filt_opt.J+1+filt_opt.P) = ...\n# filt_opt.sigma_psi*2^((filt_opt.J-1)/filt_opt.Q);\n sigma_psi = np.concatenate((sigma_psi, np.ones(filt_opt['P']+1,dtype=float)*filt_opt['sigma_psi']*np.power(2., float(filt_opt['J']-1)/filt_opt['Q'])))\n #sigma_psi[(filt_opt['J']+1):(filt_opt['J']+filt_opt['P']+2)] = filt_opt['sigma_psi']*np.power(2.,[(filt_opt['J']-1)/filt_opt['Q']])\n\n #print sigma_psi[2]\n # Calculate band-pass filter\n# sigma_phi = filt_opt.sigma_phi * 2^((filt_opt.J-1)/filt_opt.Q);\n# \n sigma_phi = filt_opt['sigma_phi']*np.power(2., float(filt_opt['J']-1)/filt_opt['Q'])\n \n # Convert (spatial) sigmas to (frequential) bandwidths\n# bw_psi = pi/2 * sigma0./sigma_psi;\n bw_psi = np.pi/2 *sigma0/sigma_psi\n \n# if ~filt_opt.phi_dirac\n# bw_phi = pi/2 * sigma0./sigma_phi;\n# else\n# bw_phi = 2 * pi;\n# end\n if not filt_opt['phi_dirac']:\n bw_phi = np.pi/2 *sigma0/sigma_phi\n else:\n bw_phi = 2*np.pi\n bw_phi = np.atleast_1d(bw_phi)\n\n return xi_psi, bw_psi, bw_phi", "title": "" }, { "docid": "46e30f58169be6d637410d07ee69b67c", "score": "0.5005388", "text": "def freq_eff(self):", "title": "" }, { "docid": "cab7aad76bc7bc97f2b9dc00b5b21db2", "score": "0.49975318", "text": "def fft2_im(image):\n\n attrs = {'units': 'm^-1'}\n\n freq_x = fftshift(fftfreq(len(image.x), np.diff(image.x)[0]))\n freq_x = xr.DataArray(freq_x, dims=('freq_x',), coords={'freq_x': freq_x}, attrs=attrs)\n\n freq_y = fftshift(fftfreq(len(image.y), np.diff(image.y)[0]))\n freq_y = xr.DataArray(freq_y, dims=('freq_y',), coords={'freq_y': freq_y}, attrs=attrs)\n\n return xr.DataArray(fftshift(fft2(image.data)), coords=(freq_x, freq_y), )", "title": "" }, { "docid": "dfa4502f890a288160df74001dfa6eed", "score": "0.49946326", "text": "def rebin(data,masked,freq,binscale):\n if binscale > 1:\n new_data = zeros(len(data)/binscale)\n new_mask = zeros(len(data)/binscale)\n new_freq = zeros(len(data)/binscale)\n f=0\n for f in range(0, len(new_data)-1):\n if len(masked[f*binscale:(f+1)*binscale])==sum(masked[f*binscale:(f+1)*binscale]):\n new_data[f] = 1.0\n else: \n test_data = ma.array(data[f*binscale:(f+1)*binscale],mask=masked[f*binscale:(f+1)*binscale])\n test_data_con = ma.compressed(test_data)\n new_data[f] = ma.mean(test_data_con)\n if sum(masked[f*binscale:(f+1)*binscale])>=binscale/2.:\n new_mask[f] = 1.0\n new_freq[f] = ma.mean(freq[f*binscale:(f+1)*binscale])\n if len(masked[(f+1)*binscale:-1])==sum(masked[(f+1)*binscale:-1]):\n new_data[-1] = 1.0\n else:\n test_data = ma.array(data[(f+1)*binscale:-1],mask=masked[(f+1)*binscale:-1])\n test_data_con = ma.compressed(test_data) \n new_data[-1] = ma.mean(test_data_con)\n if sum(masked[(f+1)*binscale:-1])>=1.:\n new_mask[-1] = 1.0\n new_freq[-1] = ma.mean(freq[(f+1)*binscale:-1])\n elif binscale == 1:\n new_data = data\n new_mask = masked\n new_freq = freq\n \n return new_data,new_mask,new_freq", "title": "" }, { "docid": "d3304054792ae3896e21e936e1efd0a1", "score": "0.49944085", "text": "def fourier_transform_peak(sig, time_step):\n ft = abs(np.fft.fft(sig, n=50000))\n # freq = np.fft.fftfreq(len(sig), time_step)\n freq = np.fft.fftfreq(50000, time_step)\n peak = np.argmax(ft)\n return abs(freq[peak])", "title": "" }, { "docid": "aa16c40f8cf278d034c27fcfdfb884f9", "score": "0.49921092", "text": "def ormsby(f1, f2, f3, f4, dt, nsamp):\r\n \r\n f1 = float(f1)\r\n f2 = float(f2)\r\n f3 = float(f3)\r\n f4 = float(f4)\r\n dt = float(dt)\r\n nsamp = int(nsamp)\r\n \r\n # Calculate slope and y-int for low frequency ramp\r\n if f1 == f2:\r\n pass\r\n else:\r\n M1 = 1/(f2-f1)\r\n b1 = -M1*f1\r\n \r\n # Calculate slope and y-int for high frequency ramp\r\n if f3 == f4:\r\n pass\r\n else:\r\n M2 = -1/(f4-f3)\r\n b2 = -M2*f4\r\n \r\n # Initialize frequency and filter arrays\r\n freq = np.fft.fftfreq(nsamp, dt)\r\n freq = np.fft.fftshift(freq)\r\n filt = np.zeros(nsamp)\r\n \r\n # Build low frequency filter ramp\r\n idx = np.nonzero((np.abs(freq)>=f1) & (np.abs(freq)<f2))\r\n if f1 == f2:\r\n filt[idx] = 0\r\n else:\r\n filt[idx] = M1*np.abs(freq)[idx]+b1\r\n \r\n # Build central filter flat\r\n idx = np.nonzero((np.abs(freq)>=f2) & (np.abs(freq)<=f3))\r\n filt[idx] = 1.0\r\n \r\n # Build high frequency filter ramp\r\n idx = np.nonzero((np.abs(freq)>f3) & (np.abs(freq)<=f4))\r\n if f3 == f4:\r\n filt[idx] = 0\r\n else:\r\n filt[idx] = M2*np.abs(freq)[idx]+b2\r\n \r\n # Un-shift the frequencies\r\n filt = np.fft.ifftshift(filt)\r\n \r\n return filt", "title": "" }, { "docid": "aa16c40f8cf278d034c27fcfdfb884f9", "score": "0.49921092", "text": "def ormsby(f1, f2, f3, f4, dt, nsamp):\r\n \r\n f1 = float(f1)\r\n f2 = float(f2)\r\n f3 = float(f3)\r\n f4 = float(f4)\r\n dt = float(dt)\r\n nsamp = int(nsamp)\r\n \r\n # Calculate slope and y-int for low frequency ramp\r\n if f1 == f2:\r\n pass\r\n else:\r\n M1 = 1/(f2-f1)\r\n b1 = -M1*f1\r\n \r\n # Calculate slope and y-int for high frequency ramp\r\n if f3 == f4:\r\n pass\r\n else:\r\n M2 = -1/(f4-f3)\r\n b2 = -M2*f4\r\n \r\n # Initialize frequency and filter arrays\r\n freq = np.fft.fftfreq(nsamp, dt)\r\n freq = np.fft.fftshift(freq)\r\n filt = np.zeros(nsamp)\r\n \r\n # Build low frequency filter ramp\r\n idx = np.nonzero((np.abs(freq)>=f1) & (np.abs(freq)<f2))\r\n if f1 == f2:\r\n filt[idx] = 0\r\n else:\r\n filt[idx] = M1*np.abs(freq)[idx]+b1\r\n \r\n # Build central filter flat\r\n idx = np.nonzero((np.abs(freq)>=f2) & (np.abs(freq)<=f3))\r\n filt[idx] = 1.0\r\n \r\n # Build high frequency filter ramp\r\n idx = np.nonzero((np.abs(freq)>f3) & (np.abs(freq)<=f4))\r\n if f3 == f4:\r\n filt[idx] = 0\r\n else:\r\n filt[idx] = M2*np.abs(freq)[idx]+b2\r\n \r\n # Un-shift the frequencies\r\n filt = np.fft.ifftshift(filt)\r\n \r\n return filt", "title": "" }, { "docid": "c2e1199e62fc69be410f82256f1c617a", "score": "0.4990501", "text": "def _shift_fourier(self, x: torch.Tensor, n_dims: int) -> torch.tensor:\n out: torch.tensor = torch.fft.fftshift(torch.fft.fftn(x, dim=tuple(range(-n_dims, 0))), \n dim=tuple(range(-n_dims, 0)))\n return out", "title": "" }, { "docid": "d7c325fd4b6d68b31b6a40138ed250d8", "score": "0.4985221", "text": "def _fft(x, s_freq, detrend='linear', taper=None, output='spectraldensity',\n sides='one', scaling='power', halfbandwidth=4, NW=None, n_fft=None):\n if output == 'complex' and sides == 'one':\n print('complex always returns both sides')\n sides = 'two'\n\n axis = x.ndim - 1\n n_smp = x.shape[axis]\n\n if n_fft is None:\n n_fft = n_smp\n\n if sides == 'one':\n freqs = np_fft.rfftfreq(n_fft, 1 / s_freq)\n elif sides == 'two':\n freqs = fftpack.fftfreq(n_fft, 1 / s_freq)\n\n if taper is None:\n taper = 'boxcar'\n\n if taper == 'dpss':\n if NW is None:\n NW = halfbandwidth * n_smp / s_freq\n tapers, eig = dpss_windows(n_smp, NW, 2 * NW - 1)\n if scaling == 'chronux':\n tapers *= sqrt(s_freq)\n\n else:\n if taper == 'hann':\n tapers = windows.hann(n_smp, sym=False)[None, :]\n else:\n # TODO: it'd be nice to use sym=False if possible, but the difference is very small\n tapers = get_window(taper, n_smp)[None, :]\n\n if scaling == 'energy':\n rms = sqrt(mean(tapers ** 2))\n tapers /= rms * sqrt(n_smp)\n elif scaling != 'chronux':\n # idk how chronux treats other windows apart from dpss\n tapers /= norm(tapers)\n\n if detrend is not None:\n has_nan = isnan(x).any(axis=axis)\n if has_nan.any():\n x = x.copy()\n x[has_nan] = 0\n\n x = detrend_func(x, axis=axis, type=detrend)\n\n if has_nan.any():\n x[has_nan] = NaN\n\n tapered = tapers * x[..., None, :]\n\n if sides == 'one':\n result = np_fft.rfft(tapered, n=n_fft)\n elif sides == 'two':\n result = fftpack.fft(tapered, n=n_fft)\n\n if scaling == 'chronux':\n result /= s_freq\n elif scaling == 'fieldtrip':\n result *= sqrt(2 / n_smp)\n\n if output == 'spectraldensity':\n result = (result.conj() * result)\n elif output == 'csd':\n result = (result[None, 0, ...].conj() * result[None, 1, ...])\n\n if (sides == 'one' and output in ('spectraldensity', 'csd')\n and scaling != 'chronux'):\n if n_fft % 2:\n result[..., 1:] *= 2\n else:\n # Last point is unpaired Nyquist freq point, don't double\n result[..., 1:-1] *= 2\n\n if scaling == 'power':\n scale = 1.0 / s_freq\n elif scaling == 'energy':\n scale = 1.0 / n_smp\n else:\n scale = 1\n if output == 'complex' and scaling in ('power', 'energy'):\n scale = sqrt(scale)\n result *= scale\n\n if scaling == 'fieldtrip' and output in ('spectraldensity', 'csd'):\n # fieldtrip uses only one side\n result /= 2\n\n if output in ('spectraldensity', 'csd'):\n if output == 'spectraldensity':\n result = result.real\n result = mean(result, axis=axis)\n elif output == 'complex':\n # dpss should be last dimension in complex, no mean\n result = swapaxes(result, axis, -1)\n\n return freqs, result", "title": "" }, { "docid": "dfac6a0ae77a440d2e35d56ddc52e5aa", "score": "0.49845272", "text": "def _default_conversion_fn(x):\n \n freqs = np.linspace(0.100,0.200,N_FREQS) \n channel_width_in_GHz = np.mean(np.diff(freqs))\n\n return np.array(x) / channel_width_in_GHz", "title": "" }, { "docid": "b2ef60cee320e9204f3859c2f1bea125", "score": "0.49844417", "text": "def butterworth_lp(nyq, nsamp, fc, n, return_freq=False):\r\n dt = 0.5/nyq\r\n \r\n freq = np.fft.fftshift(np.fft.fftfreq(nsamp, dt))\r\n w = 2*np.pi*freq\r\n wc = 2*np.pi*fc\r\n\r\n filt = np.sqrt(1/(1+(w/wc)**(2.0*n)))\r\n \r\n # Un-shift the frequencies\r\n filt = np.fft.ifftshift(filt)\r\n freq = np.fft.ifftshift(freq)\r\n \r\n if return_freq:\r\n return filt, freq\r\n else:\r\n return filt", "title": "" }, { "docid": "4231026222ccb687bd76e8cfa30fbb5e", "score": "0.49832383", "text": "def frequency(data, interval):\n last_d = None\n for d in phase_inversions(data, interval):\n if last_d is None:\n last_d = d\n continue\n # 500 because we a looking for half cycles over milliseconds\n yield ((d - last_d) / 2 + d), 500 / (d - last_d)\n last_d = d", "title": "" }, { "docid": "2a369c0ee11fb67a954c34d68654b3e0", "score": "0.49811098", "text": "def harmonic_product_spectrum(self) -> float:\n window = np.hanning(len(self.data)) * self.data.flatten()\n rate = 1 / self.duration\n\n dft = abs(scipy.fftpack.fft(window)[:len(window) // 2])\n dft = self.reduce_white_noise(dft, rate)\n dft = self.interpolate_spectrum(dft)\n\n product_spectrum = copy.deepcopy(dft)\n\n for start in range(MAX_DOWNSAMPLING):\n product_spectrum = np.multiply(product_spectrum[:int(np.ceil(len(dft) / (start + 1)))], dft[::(start + 1)])\n\n return np.argmax(product_spectrum) * rate / MAX_DOWNSAMPLING", "title": "" }, { "docid": "34078d7181f725fdd74a48220e0a627e", "score": "0.4977533", "text": "def istft(X, window=sinebell(2048), hopsize=256.0, nfft=2048.0):\n lengthWindow = np.array(window.size)\n numberFrequencies, numberFrames = np.array(X.shape)\n lengthData = hopsize * (numberFrames - 1) + lengthWindow\n \n data = np.zeros(lengthData)\n for n in np.arange(numberFrames):\n beginFrame = n * hopsize\n endFrame = beginFrame + lengthWindow\n frameTMP = np.fft.irfft(X[:,n], nfft)\n frameTMP = frameTMP[:lengthWindow]\n data[beginFrame:endFrame] = data[beginFrame:endFrame] \\\n + window * frameTMP\n \n # remove the extra bit before data that was - supposedly - added\n # in the stft computation:\n data = data[(lengthWindow / 2.0):] \n return data", "title": "" }, { "docid": "2123d3dedf04d2b1c9e0b4ff7260a17c", "score": "0.49729487", "text": "def wavelet_transform(X, n_freqs, fsample, fmin, fmax,\n prob=True, omega0=5.0, log_scale=True,\n n_jobs=1, gpu=False):\n\n if gpu is True and cp is None:\n gpu = False\n warnings.warn('`gpu` set to True, but CuPy was not found, '\n 'using CPU with {:+.0f} thread(s). '\n 'See https://github.com/cupy/cupy#installation '\n 'for installation instructions'.format(n_jobs))\n\n X = X.astype(np.float32)\n # n_samples = X.shape[0]\n # n_features = X.shape[1]\n\n dtime = 1. / fsample\n\n # tmin = 1. / fmax\n # tmax = 1. / fmin\n\n # exponent = np.arange(0, n_freqs, dtype=np.float64)\n # exponent *= np.log(tmax / tmin)\n # exponent /= (np.log(2) * (n_freqs - 1))\n\n # periods = tmin * 2**exponent\n # freqs = np.flip(1. / periods, axis=0)\n\n if log_scale:\n fmin_log2 = np.log(fmin) / np.log(2)\n fmax_log2 = np.log(fmax) / np.log(2)\n freqs = np.logspace(fmin_log2, fmax_log2,\n n_freqs, base=2)\n else:\n freqs = np.linspace(fmin, fmax, n_freqs)\n\n scales = (omega0 + np.sqrt(2 + omega0**2)) / (4 * np.pi * freqs)\n\n feed_dicts = [{\"X\": feature,\n \"freqs\": freqs,\n \"scales\": scales,\n \"dtime\": dtime,\n \"omega0\": omega0,\n \"gpu\": gpu}\n for feature in X.T]\n\n if n_jobs is not 1 and not gpu:\n pool = Parallel(n_jobs)\n convolved = pool.process(_morlet_fft_convolution_parallel, feed_dicts)\n pool.close()\n else:\n convolved = list(map(_morlet_fft_convolution_parallel, feed_dicts))\n\n X_new = np.concatenate(convolved, axis=1)\n\n # for idx, conv in enumerate(convolved):\n # X_new[:, (n_freqs * idx):(n_freqs * (idx + 1))] = conv.T\n\n power = X_new.sum(axis=1, keepdims=True)\n\n if prob:\n X_new /= power\n\n if gpu:\n mempool = cp.get_default_memory_pool()\n pinned_mempool = cp.get_default_pinned_memory_pool()\n mempool.free_all_blocks()\n pinned_mempool.free_all_blocks()\n\n return freqs, power.flatten(), X_new", "title": "" }, { "docid": "1dfe1c2634e9ea03bf2d7c3f35ca68ab", "score": "0.49634263", "text": "def scaleogram(data, samp_rate = 100.0,wavelet = 'morlet' ,bb=6,what = 'Amp',axis = None):\n # enforce float for samp_rate\n samp_rate = float(samp_rate)\n\n # nfft needs to be an integer, otherwise a deprecation will be raised\n\n\n dscale = 0.1\n dtime = 1./samp_rate\n npts = data.shape[0]\n tt = np.arange(0,npts/samp_rate,1/samp_rate)\n xx = ml.autoscales(N=data.shape[0], dt=dtime, dj=dscale, wf=wavelet, p=bb)\n X = ml.cwt(x=data, dt=dtime, scales=xx, wf=wavelet, p=bb)\n freq = ml.fourier_from_scales(xx, wavelet, bb)\n freq = 1./freq\n\n# amp = X\n amp = np.abs(X)\n phase = np.angle(X)\n\n if not axis:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n else:\n ax = axis\n\n ax.set_yscale('log')\n if what == 'Amp':\n im=ax.pcolormesh(tt,freq,amp)\n else:\n im=ax.pcolormesh(tt,freq,phase)\n\n # set correct way of axis, whitespace before and after with window\n # length\n\n ax.axis('tight')\n # ax.set_xlim(0, end)\n ax.grid(False)\n ax.set_xlabel('Time [s]')\n ax.set_ylabel('Frequency [Hz]')\n\n if axis:\n return ax, im", "title": "" } ]
0d6919f49c0ebb2e84b311fbdd4ea193
This function returns the nth Fibonacci number.
[ { "docid": "13f70311b75a2ea21d39ab6d0482438f", "score": "0.7792695", "text": "def fib(n):\r\n i = 0\r\n j = 1\r\n n = n - 1\r\n\r\n while n >= 0:\r\n i, j = j, i + j\r\n n = n - 1\r\n \r\n return i", "title": "" } ]
[ { "docid": "270d8c585cead31811c63b02a378ee30", "score": "0.8607473", "text": "def nthFibonacci(index):\n # TODO: validate input (negatives, or non-integer)\n # happy case, weve already generated the sequence up to that point\n # we can return it in O(1)\n if len(fibonacci_sequence) > index:\n return fibonacci_sequence[index]\n\n # Well have to generate the sequence\n current_index = len(fibonacci_sequence) - 1\n for i in range(index - current_index):\n current_value = fibonacci_sequence[current_index + i]\n prev_value = fibonacci_sequence[current_index + i - 1]\n fibonacci_sequence.append(current_value + prev_value)\n\n return fibonacci_sequence[index]", "title": "" }, { "docid": "abc9cc477c1db102150fea0731a2e156", "score": "0.86073977", "text": "def fibonacci(n):\n return HelperFunctions._fibonacci_doubling(n)[0]", "title": "" }, { "docid": "be287c473b6a89db781537cacb773745", "score": "0.83856314", "text": "def fibnum(n):\n return fib(n)[-1]", "title": "" }, { "docid": "cdc6916fad0198ef4cd6b27022e4bab5", "score": "0.8369816", "text": "def Fibonacci_digit(n):\n if n == 1:\n return 1\n \n F = [1,1]\n while len(str(F[-1])) < n:\n F.append(F[-1] + F[-2])\n \n return F.index(F[-1]) + 1", "title": "" }, { "docid": "6aa1cafc664063f3a5773e04ff5139bf", "score": "0.8148372", "text": "def Fib(n):\n if n == 0: return 0\n elif n == 1: return 1\n else: return Fib(n-1)+Fib(n-2)", "title": "" }, { "docid": "375a3ab1f5fff093122f21ded0635454", "score": "0.8139003", "text": "def fib(n):\n if n <= 2:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "c147d2d1ceda77ab5d2558a5ad41b255", "score": "0.8122592", "text": "def fib(n):\n\tif (n < 1): return 0\n\tif (n == 1 or n == 2): return n\n\tthisfib = 0\n\ti = 3\n\tprevprev = 1\n\tprev = 2\n\twhile (i <= n):\n\t\tthisfib = prevprev + prev\n\t\tprevprev = prev\n\t\tprev = thisfib\n\t\ti = i + 1\n\treturn thisfib", "title": "" }, { "docid": "c7adf2f4cc7ec99c92fa45c8203ecb21", "score": "0.81157184", "text": "def fibonacci(n: int) -> int:\n if n < 2:\n return n\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "title": "" }, { "docid": "e729726ebefcca0f88e8a6f66b983d64", "score": "0.80817163", "text": "def fib(n):\n if n < 2:\n return n\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "198c400901b159d96d95f311af28111a", "score": "0.80552936", "text": "def get_fibonacci_last_digit_naive(n):\n\n if n <= 1:\n return n\n\n # Initialize array to store Fibonacci sequence\n fib_array = []\n for i in range(n):\n fib_array.append(0)\n\n fib_array[0] = 1\n fib_array[1] = 1\n for i in range(2,n):\n fib_array[i] = (fib_array[i-1] + fib_array[i-2]) % 10\n\n number = fib_array[n-1]\n return number", "title": "" }, { "docid": "6567d6d0a7919aeabf8d01aeb9ab2891", "score": "0.80542284", "text": "def fib(n):", "title": "" }, { "docid": "aa7e03b4098b6ee3d8cc1cc6deabd545", "score": "0.8045241", "text": "def fibonacci(n):\r\n\tpass", "title": "" }, { "docid": "6da2ced7f96a14303a715b02de2913b4", "score": "0.8043837", "text": "def fib(n):\n\n if n < 2:\n return n\n else:\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "01996a3abe7eaa607490bd93a57fb8d0", "score": "0.8036908", "text": "def fib(n):\n if n <= 1:\n return n\n else:\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "7d4a8adb854416bbb5a6a8147ebf23bf", "score": "0.80240047", "text": "def fibonacci(n):\n \n if (n < 0):\n print(\"Please enter a postive integer for n.\")\n elif (n == 0):\n return 0\n elif (n == 1):\n return 1\n else:\n nth = fibonacci(n-1) + fibonacci(n-2)\n return nth", "title": "" }, { "docid": "d2b31ab7a2258dd1f83408fc112dd560", "score": "0.79995245", "text": "def fibonacci(n):\n if n < 2:\n return 1\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "0050ac8182bc24278fc018f47481d819", "score": "0.7984205", "text": "def fib(n):\n if n == 0 or n == 1:\n return n\n else:\n return fib(n-2) + fib(n-1)", "title": "" }, { "docid": "74462425bbed98362becb3c00e352a33", "score": "0.79779446", "text": "def fib(n):\n if (n==1):\n return 1\n if (n==2):\n return 2\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "973dc119e7a0394f83ed58b6a3b28959", "score": "0.79774475", "text": "def fibonacci(n):\n if n == 1: return 0\n elif n == 2: return 1\n else: return fibonacci(n-1) + fibonacci(n-2)", "title": "" }, { "docid": "c1bec9e887fcf89bd998cf346137c539", "score": "0.7970798", "text": "def fibonacci(n: int) -> int:\n if n in (0, 1):\n return n\n return fibonacci(n - 2) + fibonacci(n - 1)", "title": "" }, { "docid": "4765d6d74d94666fae02ad388301d15c", "score": "0.7948881", "text": "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "030fb7c78d199ee615fe5f2766a31e20", "score": "0.7945208", "text": "def fib(n):\n if n < 2:\n return n\n return exp(Matrix([[1, 1], [1, 0]]), n - 1)[0][0]", "title": "" }, { "docid": "70354751d7e898565821971f46d7555c", "score": "0.7944934", "text": "def fibonacci(n):\r\n if n > 1:\r\n return fibonacci(n-1) + fibonacci(n-2)\r\n else:\r\n return 1", "title": "" }, { "docid": "b531b8a5bfe72398b99a211ebffdb16c", "score": "0.79389566", "text": "def fibonacci(self, n):\n if n == 1 or n == 2:\n return 1\n return self.fibonacci(n - 1) + self.fibonacci(n - 2)", "title": "" }, { "docid": "2c55cd76f9fd6cd5cdf7a20abedab23e", "score": "0.79261494", "text": "def _calc_fib(n):\n if n <= 1:\n return n\n\n else:\n fibs = [0, 1]\n for _ in range(2, n):\n fibs.append(fibs[-1] + fibs[-2])\n return fibs[-1] + fibs[-2]", "title": "" }, { "docid": "f71ffdc1b334402c951f3c8ed94936c9", "score": "0.79217863", "text": "def fibonacci(n):\n if 0 <= n <= 1:\n return n\n\n n_minus1, n_minus2 = 1, 0\n\n result = None\n for f in range(n - 1):\n result = n_minus2 + n_minus1\n n_minus2 = n_minus1\n n_minus1 = result\n\n return result", "title": "" }, { "docid": "7a44f564d3249735ee3b21ba29c1d7f3", "score": "0.79199684", "text": "def fib(n):\n if n == 0 or n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "title": "" }, { "docid": "868f3763baf19192c25251697048c683", "score": "0.7913614", "text": "def fibonacci(n):\n if n<=1:\n return n\n else:\n return fibonacci(n-2)+fibonacci(n-1)", "title": "" }, { "docid": "523263dd2dcfd4e65f0da61dc62b3858", "score": "0.7910879", "text": "def fibonacci(n: int) -> int:\n if 0 <= n <=1:\n return n\n n_minus1, n_minus2 = 1, 0\n\n result = None\n for f in range(n - 1):\n result = n_minus2 +n_minus1\n n_minus2 = n_minus1\n n_minus1 = result\n return result", "title": "" }, { "docid": "8a4d7801cfdb90478812fb4ef583e03f", "score": "0.7896917", "text": "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1)+fib(n-2)", "title": "" }, { "docid": "f7482004f09fcef44914fc7b0ce701ee", "score": "0.7878671", "text": "def fib(n):\n if n == 1:\n return 0\n a, b = 0, 1\n k = 2\n while k < n:\n a, b = b, a + b\n k += 1\n return b", "title": "" }, { "docid": "b8476d043cf16645ed6c894f2c6f5fbe", "score": "0.78757143", "text": "def fibbonacci(n):\n if n in (0, 1):\n return n\n return fibbonacci(n-1) + fibbonacci(n-2)", "title": "" }, { "docid": "0c03f5f3600f6d498ba2f6f667eae598", "score": "0.7873991", "text": "def fibonacci(n):\n\n if n == 0:\n return 0\n\n if n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "a15cf5358fda30c1753775ca13226e2c", "score": "0.7868131", "text": "def fibonacci(n):\n\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "e7452decdf2559f7087cddccc691a120", "score": "0.78647274", "text": "def nextfib(n1,n2):\n n=n1+n2\n return n", "title": "" }, { "docid": "f2c42e94c96ac7994dc5f694b32a2cca", "score": "0.7862034", "text": "def fibonacci(n):\n if n ==1 or n ==2:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "title": "" }, { "docid": "327dca7e5a3b9fecd7522b0beec2beca", "score": "0.7861509", "text": "def fib(n: int) -> int:\n fib1 = 1\n fib2 = 1\n for _ in range(3, n+1):\n fib1, fib2 = fib2, fib1+fib2\n\n return fib2", "title": "" }, { "docid": "9a32f89b2def3ec36eea79b5d9c7cd96", "score": "0.78587115", "text": "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n return fibonacci(n-1) + fibonacci(n-2)", "title": "" }, { "docid": "378a28123c1c547b0b0672c70d5bb987", "score": "0.7846873", "text": "def fib(n):\n a, b, count = 0, 1, 1\n if n == 0:\n return a\n while count < n:\n a, b, count = b, a+b, count+1 # notice no need for temp!\n return b", "title": "" }, { "docid": "78f6ab11ba59cf2237a2689093f0d5b9", "score": "0.78442", "text": "def fibonacci(n):\n if n == 0:\n return 0\n if n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "d1308f472abb4ea6b0b05c27ca45d4eb", "score": "0.78252274", "text": "def fibonacci(n):\n return sum_series(n)", "title": "" }, { "docid": "ec8d219c20c15dcc3b74597e1ebd46ef", "score": "0.7824134", "text": "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 2) + fibonacci(n - 1)\n pass", "title": "" }, { "docid": "f0df9179ab3900e82e591dbf65e1be7d", "score": "0.78239584", "text": "def fib2 (n : int) -> int:\n\t\n\tif n == 0:\n\t\treturn 0\n\tif n == 1:\n\t\treturn 1\n\ta : int = 0\n\tb : int = 1\n\tc : int = 1\n\tfor _ in range(1, n):\n\t\tc = a + b\n\t\ta = b\n\t\tb = c\n\treturn c", "title": "" }, { "docid": "791eba1f0e8d295ff6b17e7051547708", "score": "0.78135055", "text": "def fib3(n):\n if n < 2:\n return n\n return fib3(n-1) + fib3(n-2)", "title": "" }, { "docid": "da931b2c088f47f200967517d3dba7f9", "score": "0.78021014", "text": "def calc_fib_iterative(n: int) -> int:\n previous, current = 0, 1\n\n for _ in range(2, n):\n previous, current = current, previous + current\n\n return previous + current", "title": "" }, { "docid": "0b82c6d4b443b1743d213d8ce33d5ba4", "score": "0.77992386", "text": "def calc_nth_fib(nth:int) -> int:\n # Check arguments.\n utils.check_arguments(\n antns=calc_nth_fib.__annotations__,\n lcls=locals())\n if nth < 0:\n raise ValueError(\n (\"`nth` must be >= 0\\n\" +\n \"nth = {nth}\").format(nth=nth))\n # Initialize Fibonacci sequence to reach nth Fibonacci number.\n maxlen = 2\n fibs_prev = collections.deque(maxlen=maxlen)\n fibs_prev.extend(range(maxlen))\n if nth < maxlen:\n fib = fibs_prev[nth]\n else:\n idxs_res = range((nth - maxlen) + 1)\n [fibs_prev.append(sum(fibs_prev)) for idx in idxs_res]\n fib = fibs_prev[-1]\n return fib", "title": "" }, { "docid": "e000d795b6cdd89b70ae86868c3dc977", "score": "0.7796089", "text": "def fibonacci(n):\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "09dc3c0546fec1dd32c91cb5422d92ad", "score": "0.7791112", "text": "def fibonacci(n):\r\n fn = 0\r\n fn_1, fn_2 = 1, 1\r\n if n == 1 or n == 2: # checking if n is 1 or 2 then fibonacci value is 1\r\n return fn_1\r\n else:\r\n for i in range(n - 2):\r\n fn = fn_1 + fn_2\r\n fn_2, fn_1 = fn_1, fn\r\n return fn", "title": "" }, { "docid": "57700fa57a20ca317f3ffd2e30eb947e", "score": "0.77883065", "text": "def fib(n, cnt):\n cnt.increment()\n if n < 3:\n return 1\n else:\n return fib(n - 1, cnt) + fib(n - 2, cnt)", "title": "" }, { "docid": "588b2dfb9c4cbad3edceabbc5a3f33b0", "score": "0.7788249", "text": "def fibonacci(n):\n a, b = decimal.Decimal(0), decimal.Decimal(1)\n for i in decimal_range(0, n):\n a, b = b, a + b\n return a", "title": "" }, { "docid": "b1d1f5c0799ec9399054e1fe48e70a2f", "score": "0.778498", "text": "def fib4(n):\n if n == 0:\n return 0\n last, next_ = 0, 1\n for _ in range(1, n):\n last, next_ = next_, last + next_\n return next_", "title": "" }, { "docid": "5298bf14eee3f08750b00bbdd5571ff2", "score": "0.7783967", "text": "def fib(n):\n\tif n < 1:\n\t\treturn -1\n\telif n == 1 or n == 2:\n\t\treturn 1\n\telse:\n\t\treturn fib(n-1) + fib(n-2)", "title": "" }, { "docid": "17d5d392eee4668a1056a5da4defc4c3", "score": "0.7782109", "text": "def fibo(n):\n if n <= 1:\n return n\n else:\n return(fibo(n-1) + fibo(n-2))", "title": "" }, { "docid": "62bc8c0fd3c43d153b640e6322d860b8", "score": "0.7768476", "text": "def fibonacci(n):\n f2, f1 = 0, 1\n f = f2 + f1\n if n <= 0:\n return f2\n elif n == 1:\n return f1\n\n for _ in range(2, n + 1):\n f = f2 + f1\n f2, f1 = f1, f\n\n return f", "title": "" }, { "docid": "676f15fea2e630ef26587ef49f5f6eaf", "score": "0.7767489", "text": "def fib(n):\r\n\tif n < 1:\r\n\t\treturn None\r\n\tif n < 3:\r\n\t\treturn 1\r\n\treturn fib(n - 1) + fib(n - 2)", "title": "" }, { "docid": "a0595222d661bd74ca0a5b283eedf84f", "score": "0.77488077", "text": "def fibonacci(n):\n if n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n results = []\n results.append(0)\n results.append(1)\n for i in range(2, n+1):\n new_value = fibonacci(i - 2) + fibonacci(i - 1)\n results.append(new_value)\n return results[n]", "title": "" }, { "docid": "dc0ebcfb2012cbf02b8daa933967410b", "score": "0.77469915", "text": "def fibonacci(n: int):\n return n if n <= 1 else fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "c771cbbc2a0e112905239e0165e9998e", "score": "0.7746942", "text": "def fib1(n):\n if n < 2:\n return n\n return fib1(n-1) + fib1(n-2)", "title": "" }, { "docid": "9ef08d6d9318da68f8fa879263f17b14", "score": "0.77368397", "text": "def fib_for(n,a,b):\n if n==0:\n return (a)\n elif n==1:\n return (b)\n else:\n F0 = a\n F1 = b\n for i in range(n-1):\n F2 = F0 + F1\n F0 = F1\n F1 = F2\n return (F2)", "title": "" }, { "docid": "23366f15b1961e72574ca1a1891ce6be", "score": "0.77294946", "text": "def fibonacci(n: int) -> int:\n #given (n), value of x(n)?\n # take in an integer\n #without the use of recursion\n\n fibonacci_number_one = 0\n fibonacci_number_two = 1\n if n < 0:\n print(\"invalid number\")\n elif n == 0:\n return fibonacci_number_one\n elif n == 1 or n == 2:\n return fibonacci_number_two\n else:\n for i in range(2, n+1):\n fibonacci_number_three = fibonacci_number_one + fibonacci_number_two\n fibonacci_number_one = fibonacci_number_two\n fibonacci_number_two = fibonacci_number_three\n return fibonacci_number_two", "title": "" }, { "docid": "a149b4d8100c24c45229b8c7bd32e757", "score": "0.7728462", "text": "def fib(digit):\n var_a, var_b = 1, 1\n if digit == 0:\n return 0\n for i in range(digit-1):\n var_a, var_b = var_b, var_a+var_b\n return var_a", "title": "" }, { "docid": "604a047a0f3b9e904e4b281082998b45", "score": "0.77269816", "text": "def fib(n):\n\n if n <= 2:\n return n\n if n in _fib_cache:\n return _fib_cache[n]\n result = fib(n - 1) + fib(n - 2)\n _fib_cache[n] = result\n return result", "title": "" }, { "docid": "7f85381c271da3bf6771b372d3b2ab54", "score": "0.77129406", "text": "def fibonacci(num):\n if num < 2:\n return num\n return fibonacci(num - 2) + fibonacci(num - 1)", "title": "" }, { "docid": "dac5567b885df22c88c929384293f6e4", "score": "0.76951355", "text": "def fib_series(number):\n i = 0\n j = 1\n counter = 2\n while counter < number:\n tmp = i + j\n i = j\n j = tmp\n counter += 1\n return j", "title": "" }, { "docid": "75ef6983e41008b0da10b22b927291d3", "score": "0.7688906", "text": "def fibonacci_generator(n):", "title": "" }, { "docid": "bbb14f2166205bf7de301e34f681d83a", "score": "0.7685499", "text": "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "title": "" }, { "docid": "bbb14f2166205bf7de301e34f681d83a", "score": "0.7685499", "text": "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "title": "" }, { "docid": "dee8f2f126b241c534c8e7a75675b80b", "score": "0.76830596", "text": "def fibonacci(n):\n if not isinstance(n, int):\n raise ValueError('n must be a non-negative integer')\n if n < 0:\n raise ValueError('n must be a non-negative integer') \n fibs = [0, 1]\n for i in range(2, n+1):\n fibs.append(fibs[i-1] + fibs[i-2])\n return fibs[n]", "title": "" }, { "docid": "9daa3307187e831c64332f64f790cbb0", "score": "0.76624477", "text": "def solution(n):\n fib = {0: 0, 1: 1}\n\n def compute_fib(n):\n \"\"\"\n generate Fibonaci number\n \"\"\"\n try:\n return fib[n]\n except KeyError:\n fib[n] = compute_fib(n-1) + compute_fib(n-2)\n return fib[n]\n\n return compute_fib(n)", "title": "" }, { "docid": "3a5981c2a5c49dd60011b3e0232c1bae", "score": "0.76604617", "text": "def fibonacci(n):\r\n\tif n<=0:\r\n\t\treturn \"Incorrect input.\"\r\n\telif n==1:\r\n\t\treturn 0\r\n\telif n==2:\r\n\t\treturn 1\r\n\telse:\r\n\t\treturn fibonacci(n-1)+fibonacci(n-2)", "title": "" }, { "docid": "4dd392648d759558b3a94e0504e70511", "score": "0.76464826", "text": "def get_fib_iteritive(pos):\n first = 0\n second = 1\n for fib_pos in range(pos - 1):\n second = first + second #set second as the sum of the previous two\n first = second - first #set first equal the previous second\n print second\n return second", "title": "" }, { "docid": "9cf7533b28741dd28600da31bf7a7d5b", "score": "0.76341593", "text": "def fibonacci_terc(n):\n return fibonacci(n - 1) + fibonacci(n - 2) if n >= 2 else (n if n >= 0 else 0)", "title": "" }, { "docid": "bfbc65852a43d9c174dc77adc108060f", "score": "0.76142406", "text": "def fibonacci(n):\n fibo = [0, 1]\n i = 1\n \n if n == 0:\n return 0\n else:\n while i <= n - 1:\n new = fibo[i - 1] + fibo[i]\n fibo.append(new)\n i += 1\n print(f\"Fibonacci of {n} is {fibo[-1]}\")\n return fibo[-1]", "title": "" }, { "docid": "567ef7b7f3d296a5edf69c7779db02dd", "score": "0.76047707", "text": "def fib(n): # write Fibonacci series up to n\n a, b = 0, 1\n evens = 0\n while a < n:\n if a%2 == 0:\n evens = evens + a\n a, b = b, a+b\n return evens", "title": "" }, { "docid": "31ec00eaac5f53eb7cebe172a8ebdc2c", "score": "0.75934047", "text": "def fibonacci(n):\n assert (n >= 0), 'n must be >= 0'\n\n if n in cache_fib:\n return cache_fib[n]\n res = fibonacci(n - 1) + fibonacci(n - 2)\n cache_fib[n] = res\n return res", "title": "" }, { "docid": "39e429d309a65fd33513c764bba2fc15", "score": "0.7585057", "text": "def fib_whl(n,a,b):\n if n==0:\n return (a)\n elif n==1:\n return (b)\n else:\n F0 = a\n F1 = b\n i = 1\n while i<n:\n F2 = F0 + F1\n F0 = F1\n F1 = F2\n i += 1\n return (F2)", "title": "" }, { "docid": "3440e73aee74dd74d5c18bdd3f9c75a2", "score": "0.7583712", "text": "def fib(n):\n prev, curr = 0, 1\n k = 2\n while k < n:\n prev, curr = curr, prev + curr\n k = k + 1\n return curr", "title": "" }, { "docid": "e8c3f91dd3056479ad6395a676c5cf37", "score": "0.7582562", "text": "def fibonacci(n):\n\tassert n > 0\n\tif n <= 317:\n\t\tf2, f1 = 1, 1\n\t\tfor x in xrange(2, n):\n\t\t\tf2, f1 = f2 + f1, f2\n\t\treturn f2\n\telse:\n\t\tans = [[1, 0], [0, 1]]\n\t\tfib = [[1, 1], [1, 0]]\n\t\twhile n != 0:\n\t\t\tif n%2 == 1:\n\t\t\t\tans = [[ans[0][0]*fib[0][0]+ans[0][1]*fib[1][0], ans[0][0]*fib[0][1]+ans[0][1]*fib[1][1]], [ans[1][0]*fib[0][0]+ans[1][1]*fib[1][0], ans[1][0]*fib[0][1]+ans[1][1]*fib[1][1]]]\n\t\t\tfib = [[fib[0][0]**2+fib[0][1]*fib[1][0], fib[0][0]*fib[0][1]+fib[0][1]*fib[1][1]], [fib[1][0]*fib[0][0]+fib[1][1]*fib[1][0], fib[1][0]*fib[0][1]+fib[1][1]**2]]\n\t\t\tn /= 2\n\t\treturn ans[0][1]", "title": "" }, { "docid": "0a9596e0ddd605be4e27ad07c0961471", "score": "0.7577191", "text": "def fibonacci(n):\n if n < 0:\n return None #bad input, throw error maybe\n if n == 0:\n return 0\n if n == 1:\n return 1\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "97998a2e3249a57c38130c92cd1e5970", "score": "0.75749063", "text": "def fib(n):\n cur, prev = 1,0\n ct = 0\n while ct < n:\n cur,prev = cur+prev,cur\n ct+=1\n print(prev)", "title": "" }, { "docid": "959f1898a26828103195bbdafe4cb92f", "score": "0.7565328", "text": "def fibonacci(n):\n\twhile n > 0:\n\t\tprint int(((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)))\n\t\tn -= 1", "title": "" }, { "docid": "50752848857e8ae6bb53635ed1abb900", "score": "0.75614125", "text": "def fibo(num):\n if num < 3:\n if num == 1:\n return 1\n else:\n return 2\n else:\n return fibo(num-1) + fibo(num-2)", "title": "" }, { "docid": "50eea871c58925dba54abad7ad7baeac", "score": "0.7558028", "text": "def fibonacci(n: int) -> int:\r\n if not isinstance(n, int):\r\n raise TypeError(\r\n f'Fibonacci is only defined on integers, {type(n)} given.')\r\n if n <= 0:\r\n raise ValueError(\r\n f'Fibonacci is only defined on positive integers, {n} given.')\r\n if n <= 2:\r\n return (n)\r\n else:\r\n return fibonacci(n-1) + fibonacci(n-2)", "title": "" }, { "docid": "147738cb03601b6ea6931b44a1d5f78a", "score": "0.75523823", "text": "def fibonacci(n):\n fib_seq = [0, 1]\n\n for i in range(n):\n fib_seq.append(add(fib_seq[-1], fib_seq[-2]))\n print(fib_seq)\n return fib_seq[n]", "title": "" }, { "docid": "32826b428252f324e6c037e39fc9f6b2", "score": "0.7551179", "text": "def fibonacci(n):\n if n < 1:\n return None\n elif n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "title": "" }, { "docid": "df92148af2e1b7a371fa7b04747b96f3", "score": "0.7544068", "text": "def fib(n):\n\n if n == 0 or n==1:\n return n\n elif n<0:\n print(\"please input a valid number\") \n \n else:\n return fib(n-1)+fib(n-2)", "title": "" }, { "docid": "0d5dad087fe5e826a04a1237b567d5ca", "score": "0.75392187", "text": "def fibonacci_list(n):", "title": "" }, { "docid": "90d1df216e71102ee6d4232d6527a114", "score": "0.7525757", "text": "def fibonacci(n):\n\n number = [0, 1] # numbers to add together\n\n if n == 0:\n return(number[0])\n elif n == 1:\n return(number[1])\n else:\n for item in range(n - 1): # subtracts 1 to account for 0 index\n fib = number[0] + number[1]\n number.append(fib)\n number.pop(0)\n return(number[1])", "title": "" }, { "docid": "cb0251a4dd6b1cde42643fbbf2104a56", "score": "0.7509983", "text": "def v_fibonacci(n):\n if 0 <= n <= 1:\n return n\n n_minus1, n_minus2 = 1, 0\n result = None\n\n for i in range(n -1):\n result = n_minus1 + n_minus2\n n_minus2 = n_minus1\n n_minus1 = result\n return result", "title": "" }, { "docid": "76bd7f1ada0a028de8c573bcde3fae3d", "score": "0.75093055", "text": "def FibIter(n):\n # implement here\n if n < 0:\n print(\"Error: Input should be 0 or positive integer.\")\n return 0\n if n == 0: return 0\n\n # n>=1\n fib_big = 1; fib_small = 0\n while n > 1:\n tmp = fib_small\n fib_small = fib_big\n fib_big = Modulo(fib_small + tmp)\n n = n - 1\n return fib_big", "title": "" }, { "docid": "ae61dddb629744555616e46a248f3b4c", "score": "0.74954885", "text": "def fibonacci_dynamic(n):\n if n == 1 or n == 2:\n return 1\n b_up = [None] * 40\n b_up[0] = 0\n b_up[1] = 1\n b_up[2] = 2\n for i in range(2, n + 1):\n b_up[i] = b_up[i - 1] + b_up[i - 2]\n return b_up[n]", "title": "" }, { "docid": "761481eb28bfb9603ac9f49d59727c06", "score": "0.7487941", "text": "def fibonacci(term_num):\n\n term_num = int(term_num)\n a, b = 0, 1\n for i in range(1, term_num):\n a, b = b, b+a\n\n print(str(b))", "title": "" }, { "docid": "7d2c5158d3b638caf305c04cd66e7467", "score": "0.7483444", "text": "def fib(self, N: int) -> int:\n if N == 0 or N == 1:\n return N\n a, b = 0, 1\n for i in range(2, N + 1): # ่ฆๆ‰ง่กŒๅˆฐ N\n a, b = b, a + b\n return b", "title": "" }, { "docid": "ceb53ffa71032abf4730bc95abfcdad5", "score": "0.74819106", "text": "def fib_slow(number):\n number = int(number)\n if number in [0, 1]:\n return number\n else:\n return fib_slow(number-1) + fib_slow(number-2)", "title": "" }, { "docid": "eef3942cda0d4395e2525a82135ac1fc", "score": "0.74699664", "text": "def get_fibonacci_last_digit_last(n):\n arr = np.empty(shape=[n+1])\n\n for i in range(0, n+1):\n if i<=1:\n arr[i] = i\n else:\n # Store the last digit by finding the remainder using mod\n arr[i] = math.fmod(arr[i-1] + arr[i-2],10)\n\n return int(arr[n])", "title": "" }, { "docid": "dfbc529aea20a49dddcfb6fc04bc3b43", "score": "0.7456984", "text": "def fibonacci(x):\n if(x <= 1):\n return x\n return fibonacci(x-1) + fibonacci(x-2)", "title": "" }, { "docid": "7e72889300599e8ca4cea1b908d533fd", "score": "0.745298", "text": "def dynamic_fibonacci(num):\n answers = [0, 1]\n for num_x in range(2,num+1):\n answers.append(answers[num_x - 2] + answers[num_x - 1])\n return answers[-1]", "title": "" }, { "docid": "42a01f238d7d7a343d3f42263495165e", "score": "0.745041", "text": "def fib_flr(n,a,b):\n phi = (1+np.sqrt(5))/2\n return (int(np.floor(phi**n/np.sqrt(5)+1/2))+a+b-1)", "title": "" }, { "docid": "85504e2f578bc4be215fa36f9ff60f17", "score": "0.74098045", "text": "def fibonacci(n):\r\n # Base case\r\n if n <= 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n\r\n # Recursive case\r\n return fibonacci(n-1) + fibonacci(n-2)", "title": "" } ]
cb42e71e00225186d6bb77be3ae9fbbc
Mask generator for coupling layers.
[ { "docid": "0212f6da636384e44ce7bdc4a481a1c7", "score": "0.53870505", "text": "def get_coupling_mask(n_dim, n_channel, n_mask, split_type='ChessBoard'):\n with jt.no_grad():\n masks = []\n if split_type == 'ChessBoard':\n if n_channel == 1:\n mask = jt.arange(0, n_dim, dtype='float32') % 2\n for i in range(n_mask):\n masks.append(mask)\n mask = 1. - mask\n else:\n raise NotImplementedError()\n return masks", "title": "" } ]
[ { "docid": "3864f70cbd28508ad8c3451c62984087", "score": "0.62455934", "text": "def gen_mask(word):\n return chain.from_iterable(\n [gen_mask_direction(word, mut) for mut in _directions])", "title": "" }, { "docid": "0a3928e295f68c9c0d3c5fa413c7530d", "score": "0.61573017", "text": "def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)\n return x_masks.unsqueeze(-2)", "title": "" }, { "docid": "192708968f6843f966902e54eee4d18c", "score": "0.60119015", "text": "def test_make_mask(self):\n pass", "title": "" }, { "docid": "a5ce41a015abdc8f00bb2772d4f360af", "score": "0.59735143", "text": "def _generate_scribble_mask(self, mask):\n mask = np.asarray(mask, dtype=np.uint8)\n side = np.sqrt(np.sum(mask > 0))\n\n mask_ = mask\n # kernel_size = int(self.kernel_size * side)\n kernel_radius = self.kernel_size * side * .5\n kernel_radius = min(kernel_radius, self.max_kernel_radius)\n # logging.verbose(\n # 'Erosion and dilation with kernel radius: {:.1f}'.format(\n # kernel_radius), 2)\n compute = True\n while kernel_radius > 1. and compute:\n kernel = disk(kernel_radius)\n mask_ = rank.minimum(mask.copy(), kernel)\n mask_ = rank.maximum(mask_, kernel)\n compute = False\n if mask_.astype(np.bool).sum() == 0:\n compute = True\n prev_kernel_radius = kernel_radius\n kernel_radius *= .9\n # logging.verbose('Reducing kernel radius from {:.1f} '.format(\n # prev_kernel_radius) +\n # 'pixels to {:.1f}'.format(kernel_radius), 1)\n\n mask_ = np.pad(\n mask_, ((1, 1), (1, 1)), mode='constant', constant_values=False)\n skel = medial_axis(mask_.astype(np.bool))\n skel = skel[1:-1, 1:-1]\n return skel", "title": "" }, { "docid": "459817a174b66337ccedf24359f60868", "score": "0.5831037", "text": "def generate_masks(self, enc_hiddens, source_lengths) -> torch.Tensor:\n enc_masks = torch.zeros(enc_hiddens.size(0), enc_hiddens.size(1), dtype=torch.float)\n for e_id, src_len in enumerate(source_lengths):\n enc_masks[e_id, src_len:] = 1\n return enc_masks.to(self.device)", "title": "" }, { "docid": "2881db95663227bd0bbe6b980133d5dd", "score": "0.5818742", "text": "def generate_scribbles(self, mask):\n\n # generate scribbles\n skel_mask = self._generate_scribble_mask(mask)\n G, P = self._mask2graph(skel_mask)\n S = self._acyclics_subgraphs(G)\n longest_paths_idx = [self._longest_path_in_tree(s) for s in S]\n longest_paths = [P[idx] for idx in longest_paths_idx]\n scribbles_paths = [\n bezier_curve(p, self.nb_points) for p in longest_paths\n ]\n\n output_resolution = tuple([mask.shape[0], mask.shape[1]])\n scribble_mask = scribbles2mask(scribbles_paths, output_resolution)\n\n return scribble_mask", "title": "" }, { "docid": "f7c4dd485ffbfc1a3e972d025c880915", "score": "0.5794566", "text": "def test_maskedgenerator():\n \n @maskedgenerator\n def genfun():\n for i in range(10):\n yield i\n \n tests = [\n ([True], [0,1,2,3,4,5,6,7,8,9]),\n ([False], []),\n ([True, False], [0,2,4,6,8]),\n ([False, True], [1,3,5,7,9]),\n ([True, False, True], [0,2,3,5,6,8,9]),\n ([True, False, False, False], [0,4,8])\n ]\n \n for mask, expected in tests:\n obtained = list(genfun(mask=mask))\n print(\"Mask\", mask)\n print(\"Expected\", expected)\n print(\"Obtained\", obtained)\n assert(expected == obtained)", "title": "" }, { "docid": "ac804861d8b551d0ca76914a7039aa09", "score": "0.5793494", "text": "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x", "title": "" }, { "docid": "83ed2465eda4856984d49e4aa5b5fa65", "score": "0.5676019", "text": "def createInitialMask(self, globalMapNoGo):\n self.mask = globalMapNoGo", "title": "" }, { "docid": "6acfbbccd36705384965a44074ea6761", "score": "0.56351286", "text": "def Mask(self) -> str:", "title": "" }, { "docid": "2720ed279136e228fb4be7eb49fa9362", "score": "0.5607031", "text": "def create_masks(inp, tar):\n # Encoder padding mask\n enc_padding_mask = create_padding_mask(inp)\n\n # Used in the 2nd attention block in the decoder.\n # This padding mask is used to mask the encoder outputs.\n dec_padding_mask = create_padding_mask(inp)\n\n # Used in the 1st attention block in the decoder.\n # It is used to pad and mask future tokens in the input received by\n # the decoder.\n look_ahead_mask = create_look_ahead_mask(tar.shape[1])\n dec_target_padding_mask = create_padding_mask(tar)\n look_ahead_mask = torch.maximum(dec_target_padding_mask, look_ahead_mask)\n return enc_padding_mask, look_ahead_mask, dec_padding_mask", "title": "" }, { "docid": "a3ffe524acb238a41b1266e3672cba14", "score": "0.5597936", "text": "def generate_lattice_circular(self, layers):", "title": "" }, { "docid": "99dbbc3b336cdc97bbf3b6b3b5090fb1", "score": "0.5586949", "text": "def special_target_masks_generation(self, masks: Dict[str, Dict[str, Tensor]]) -> Dict[str, Dict[str, Tensor]]:\n for module_name, module_masks in masks.items():\n # generate bias mask, this may move to wrapper in the future\n weight_mask = module_masks.get('weight', None)\n wrapper = self.pruner.get_modules_wrapper()[module_name]\n old_bias_mask = getattr(wrapper, 'bias_mask', None)\n if weight_mask is not None and old_bias_mask is not None and weight_mask.shape[0] == old_bias_mask.shape[0]:\n # keep dim 0 and reduce all other dims by sum\n reduce_dims = [reduce_dim for reduce_dim in range(1, len(weight_mask.shape))]\n # count unmasked number of values on dim 0 (output channel) of weight\n unmasked_num_on_dim0 = weight_mask.sum(reduce_dims) if reduce_dims else weight_mask\n module_masks['bias'] = (unmasked_num_on_dim0 != 0).type_as(weight_mask)\n return masks", "title": "" }, { "docid": "82a6a7e2397f527333ec1265e06a8592", "score": "0.5537495", "text": "def masker(self, x):\r\n return self.Masker.masker(x)", "title": "" }, { "docid": "dfba99908935ba3e534b1c854b4247a4", "score": "0.5517679", "text": "def genInImageMask(mask, vx, vy, interval):", "title": "" }, { "docid": "bc17358e99f2603de9204d00d95db735", "score": "0.5512355", "text": "def _generate_image_mask(self, inputs: tf.Tensor,\n target_shape: tf.Tensor) -> tf.Tensor:\n mask = tf.expand_dims(\n tf.cast(tf.not_equal(tf.reduce_sum(inputs, axis=-1), 0), inputs.dtype),\n axis=-1)\n mask = tf.image.resize(\n mask, target_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n return mask", "title": "" }, { "docid": "e03be84891307d7060b5daa7b2109db6", "score": "0.55054265", "text": "def generateMask(self, dim=(400, 400)):\n width, height = dim\n\n masks = dict()\n nm = len(self.objects)\n lines = dict()\n for i, obj in enumerate(self.objects):\n lines[obj] = (i+1)*255/nm\n\n for f, objects in self.data.iteritems():\n img = Image.new('L', (width, height), 0)\n if objects == None:\n masks[f] = np.array(img)\n else:\n for obj in objects:\n name = obj.keys()[0]\n l = lines[name]\n poly = obj[name][0]\n ImageDraw.Draw(img).polygon(poly, outline=l, fill=l)\n masks[f] = np.array(img)\n\n return masks", "title": "" }, { "docid": "1fbdb76f953cdcb5c7a4fde29a02530c", "score": "0.5499105", "text": "def _build_mask(self,nc):\n ma = to_cuda(torch.zeros([nc,1,784]))\n for i in range(nc):\n ma[i] = i*torch.ones([1,784])\n return to_cuda(ma)", "title": "" }, { "docid": "288769884c350a75147a0145a8da8c66", "score": "0.5471082", "text": "def _gen_bias_mask(max_length):\n np_mask = np.triu(np.full([max_length, max_length], -np.inf), 1)\n torch_mask = torch.from_numpy(np_mask).type(torch.FloatTensor)\n \n return torch_mask.unsqueeze(0).unsqueeze(1)", "title": "" }, { "docid": "8e363214520fa449935118ee88e8d50d", "score": "0.5468897", "text": "def createMask(self):\n tokens_ids = self.tokensToIds()\n mask = [[float(token > 0) for token in token_id] for token_id in tokens_ids]\n return mask", "title": "" }, { "docid": "0f6e60b257ae7c276c4cd0a91c654165", "score": "0.54666936", "text": "def generate_encoded_layers(self, embedding, attention_mask) -> List[Tensor]:", "title": "" }, { "docid": "7ce55926342a9cabae4bf25334120bce", "score": "0.5465139", "text": "def make_mask(image, prob_masked=0.5):\n return 1 - bernoulli.rvs(p=prob_masked, size=image.shape)", "title": "" }, { "docid": "40873166df05dda67ff0057e0995760a", "score": "0.54474825", "text": "def _get_mask(self):\n\n return self.tensors.get_placeholder_by_name('mask')", "title": "" }, { "docid": "83c81166db6d3050697d00a754e9d6c9", "score": "0.5446269", "text": "def generate_masks(self, input_lens):\n masks = torch.zeros((len(input_lens), input_lens.max())).to(self.args['device'])\n for i in range(len(input_lens)):\n masks[i, input_lens[i]:] = 1\n return masks", "title": "" }, { "docid": "83c81166db6d3050697d00a754e9d6c9", "score": "0.5446269", "text": "def generate_masks(self, input_lens):\n masks = torch.zeros((len(input_lens), input_lens.max())).to(self.args['device'])\n for i in range(len(input_lens)):\n masks[i, input_lens[i]:] = 1\n return masks", "title": "" }, { "docid": "e25d1ed120c548976f3331b5183c1ef0", "score": "0.5431723", "text": "def test_generate_mask():\n np.testing.assert_array_equal(\n generate_mask(MASK_COORDINATES),\n MASK_ARRAY,\n )", "title": "" }, { "docid": "4d17833e6c133300e2da895efae607af", "score": "0.5416431", "text": "def _make_masks(ilens, olens):\r\n in_masks = make_non_pad_mask(ilens) # (B, T_in)\r\n out_masks = make_non_pad_mask(olens) # (B, T_out)\r\n return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)\r", "title": "" }, { "docid": "409e5979c0b2669655bcbda076afefb5", "score": "0.5413044", "text": "def mask(self):\n return self.__mask", "title": "" }, { "docid": "6a0859150234a3accda611350567aea5", "score": "0.5409406", "text": "def _gen_bias_mask(max_length):\n np_mask = np.triu(np.full([max_length, max_length], -np.inf), 1)\n torch_mask = torch.from_numpy(np_mask).type(torch.FloatTensor)\n\n return torch_mask.unsqueeze(0).unsqueeze(1)", "title": "" }, { "docid": "6565db42ad15217eea99054e6bba3a71", "score": "0.53875434", "text": "def sample_new_mask(self, batch_size=None):\n device = self.get_param_device()\n if batch_size:\n self.batch_size = batch_size\n # Sample dropout random masks\n self.input_mask = torch.bernoulli(\n torch.ones(batch_size, self.input_dim) * (1 - self.drop_prob)).to(device)\n self.hidden1_mask = torch.bernoulli(\n torch.ones(batch_size, self.hidden_size) * (1 - self.drop_prob)).to(device)\n self.hidden2_mask = torch.bernoulli(\n torch.ones(batch_size, self.hidden_size) * (1 - self.drop_prob)).to(device)", "title": "" }, { "docid": "56dc371d27771709f992344dbea1c4e6", "score": "0.53618616", "text": "def __init__(self, mask_center, *args, **kwargs):\n super().__init__(*args, **kwargs)\n i, o, h, w = self.weight.shape\n mask = torch.zeros((i, o, h, w))\n mask.data[:, :, : h // 2, :] = 1\n mask.data[:, :, h // 2, : w // 2 + int(not mask_center)] = 1\n self.register_buffer(\"mask\", mask)", "title": "" }, { "docid": "a70791feb93c1e265104ae7dd5b0ba86", "score": "0.5355643", "text": "def generate_mask_label(self, gt_twins, feat_len):\n batch_size = gt_twins.size(0)\n mask_label = torch.zeros(batch_size, feat_len).type_as(gt_twins)\n for b in range(batch_size):\n single_gt_twins = gt_twins[b]\n single_gt_twins[:, :2] = (single_gt_twins[:, :2] / self.feat_stride).int()\n twins_start = single_gt_twins[:, 0]\n _, indices = torch.sort(twins_start)\n single_gt_twins = torch.index_select(single_gt_twins, 0, indices).long().cpu().numpy()\n\n starts = np.minimum(np.maximum(0, single_gt_twins[:,0]), feat_len-1)\n ends = np.minimum(np.maximum(0, single_gt_twins[:,1]), feat_len)\n for x in zip(starts, ends):\n mask_label[b, x[0]:x[1]+1] = 1\n\n return mask_label", "title": "" }, { "docid": "d4f256dd68e677dd51e85b994059f0f9", "score": "0.53207797", "text": "def create_mask(nr, nc, Eval):\n \n # --------------------------\n # set mask\n # --------------------------\n # create binary mask. True for flow, False for blocked region.\n if len(Eval.shape) == 1:\n # mask = np.reshape(a=Eval, newshape=(X.shape[1], X.shape[0]))\n mask = np.reshape(a=Eval, newshape=(nc, nr))\n mask = np.transpose(mask)\n else:\n mask = Eval\n\n return mask", "title": "" }, { "docid": "605b828f66244aa82363ee1f65eb8149", "score": "0.53205806", "text": "def mask_link(self, input, mask):\n def split(x, n, dim):\n if x.ndim == 1:\n return x[n*dim:(n+1)*dim]\n else:\n return x[:, n*dim:(n+1)*dim]\n\n def recurrence(x_t, m_, c_tm1, h_tm1):\n p = x_t + T.dot(h_tm1, self.U)\n i = T.nnet.sigmoid(split(p, 0, self.hidden_dim))\n f = T.nnet.sigmoid(split(p, 1, self.hidden_dim) + 1.0)\n o = T.nnet.sigmoid(split(p, 2, self.hidden_dim))\n c = T.tanh(split(p, 3, self.hidden_dim))\n c = f * c_tm1 + i * c\n h = o * T.tanh(c)\n h = m_[:, None] * h + (1. - m_)[:, None] * h_tm1\n\n return c, h\n \n # input: seq, batch, input_dim\n self.input = input.dimshuffle(1, 0, 2) # seq, batch, input\n self.mask = mask.dimshuffle(1, 0) # seq, batch\n \n preact = T.dot(self.input, self.W) + self.b\n outputs_info = [T.alloc(x, self.input.shape[1], self.hidden_dim) for x in [self.c_0, self.h_0]]\n\n [_, h], _ = theano.scan(\n fn=recurrence,\n sequences=[preact, self.mask],\n outputs_info=outputs_info,\n n_steps=self.input.shape[0]\n )\n self.h = h\n self.output = h[-1]\n\n return h", "title": "" }, { "docid": "4d9690490e2546936c028cd6ca532506", "score": "0.53173995", "text": "def _target_mask(self, olens: torch.Tensor) -> torch.Tensor:\n y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)\n s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)\n return y_masks.unsqueeze(-2) & s_masks", "title": "" }, { "docid": "4f16f1f93d0c12dffdfe44c7f311bb56", "score": "0.5313549", "text": "def mask(self):\n mask_w = mask_h = int(\n (self.gridmask_size_ratio + 1) * max(self.h, self.w)\n )\n mask = tf.zeros(shape=[mask_h, mask_w], dtype=tf.int32)\n gridblock = tf.random.uniform(\n shape=[],\n minval=int(min(self.h * 0.5, self.w * 0.3)),\n maxval=int(max(self.h * 0.5, self.w * 0.3)),\n dtype=tf.int32,\n )\n\n if self.ratio == 1:\n length = tf.random.uniform(\n shape=[], minval=1, maxval=gridblock, dtype=tf.int32\n )\n else:\n length = tf.cast(\n tf.math.minimum(\n tf.math.maximum(\n int(tf.cast(gridblock, tf.float32) * self.ratio + 0.5),\n 1,\n ),\n gridblock - 1,\n ),\n tf.int32,\n )\n\n for _ in range(2):\n start_w = tf.random.uniform(\n shape=[], minval=0, maxval=gridblock, dtype=tf.int32\n )\n for i in range(mask_w // gridblock):\n start = gridblock * i + start_w\n end = tf.math.minimum(start + length, mask_w)\n indices = tf.reshape(tf.range(start, end), [end - start, 1])\n updates = (\n tf.ones(shape=[end - start, mask_w], dtype=tf.int32)\n * self.fill\n )\n mask = tf.tensor_scatter_nd_update(mask, indices, updates)\n mask = tf.transpose(mask)\n\n return mask", "title": "" }, { "docid": "920f6d9dca6b91e6212e560afd4e428b", "score": "0.53106445", "text": "def stack_mask(mask: torch.ByteTensor, n_heads: int) -> torch.ByteTensor:\n\n return mask.repeat(n_heads, 1, 1)", "title": "" }, { "docid": "d40394fa75fb690f53a2fb8bcc118769", "score": "0.5306672", "text": "def get_masks(args, tasks, masker: typing.Callable):\n with torch.no_grad():\n model = get_model(args)\n reversed_modules = reversed(list(model.modules()))\n output_mask = torch.tensor(list(range(tasks)) * args.labels)\n reversed_masks = [\n masker(module) for module in reversed_modules\n if nn.layers.spatial(module)\n ]\n hidden_masks = list(reversed(reversed_masks[:-1])) # drop the last mask\n masks = hidden_masks + [output_mask]\n return masks", "title": "" }, { "docid": "744531bda8354f6b2719430a2ff479c1", "score": "0.5298765", "text": "def create_masks(inputs, target):\n\n # padding mask same for encoder and decoder\n padding_mask, intent_mask = create_padding_mask(inputs)\n\n # Used in the 1st attention block in the decoder.\n # It is used to pad and mask future tokens in the input received by\n # the decoder.\n look_ahead_mask = create_look_ahead_mask(tf.shape(target)[1])\n dec_target_padding_mask, _ = create_padding_mask(target)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n\n return padding_mask, combined_mask, intent_mask", "title": "" }, { "docid": "f0004f711948b67a4b12176c84a1683c", "score": "0.5298696", "text": "def make_mask(nii_path, layer):\n\n mask = nib.load(nii_path).dataobj[:,:,layer]\n masks = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.float32)\n\n for idx in range(4):\n masks[:, :, idx] = ((mask == idx + 1) + 0)\n \n return masks", "title": "" }, { "docid": "9accdb420b51cc3bfb0afce895033f87", "score": "0.5298354", "text": "def make_src_mask(self, src):\n src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)\n return src_mask", "title": "" }, { "docid": "32a38e2ef3a8a4c34ea9ef5ae0e37d70", "score": "0.52755815", "text": "def create_mask(\n self,\n mask_params: dict[str, dict[str, int]],\n ) -> torch.Tensor:\n\n def mask_from_params(params: dict[str, int]) -> torch.Tensor:\n mask = torch.zeros(self.img_height, self.img_width, dtype=torch.int32)\n\n if self.cc_board == \"cc_passport\":\n for row in range(4):\n for col in range(6):\n idx = row * 6 + (5 - col) + 1\n left_min = params[\"offset_left\"] + row * (\n params[\"square_size\"] + params[\"square_dist_horizontal\"]\n )\n top_min = params[\"offset_top\"] + col * (params[\"square_size\"] + params[\"square_dist_vertical\"])\n mask[top_min : top_min + params[\"square_size\"], left_min : left_min + params[\"square_size\"]] = (\n idx\n )\n\n else:\n for row in range(4):\n for col in range(6):\n idx = row * 6 + col + 1\n top_min = params[\"offset_top\"] + row * (params[\"square_size\"] + params[\"square_dist_vertical\"])\n left_min = params[\"offset_left\"] + col * (\n params[\"square_size\"] + params[\"square_dist_horizontal\"]\n )\n\n mask[top_min : top_min + params[\"square_size\"], left_min : left_min + params[\"square_size\"]] = (\n idx\n )\n\n if self.rot_angle is not None:\n mask = rotate(mask.numpy(), -self.rot_angle, resize=False, mode=\"constant\", cval=0, order=0)\n mask = torch.from_numpy(mask)\n\n return mask\n\n if self.cc_board == \"cc_passport\":\n assert len(mask_params) == 2, f\"Two masks are supported for cc_passport, but {len(mask_params)} were given\"\n left_mask = mask_from_params(mask_params[\"mask_0\"])\n right_mask = mask_from_params(mask_params[\"mask_1\"])\n right_mask[right_mask > 0] = right_mask[right_mask > 0] + 24\n\n return left_mask + right_mask\n else:\n assert (\n len(mask_params) == 1\n ), f\"Only one mask is supported for cc_classic, but {len(mask_params)} were given\"\n return mask_from_params(mask_params[\"mask_0\"])", "title": "" }, { "docid": "48fd2209bcbea8c29cd2f5a950e1cead", "score": "0.5274885", "text": "def mask(self):\n\n assert self._append_boundary_sym is False\n\n return nn_utils.length_array_to_mask_tensor([len(x) for x in self.code_list], device=self.device)", "title": "" }, { "docid": "453684c1eef5fb51f7a8013499aa8b55", "score": "0.5266578", "text": "def create_mask(prefix):\n \n # Creates paths for the image\n image_file = \"../data/test/images/\" + prefix + \"_image.jpg\"\n tuned_prediction_file = \"../data/test/tuned_masks/\" + prefix + \"_.jpg\"\n first_prediction_file = \"../data/test/first_masks/\" + prefix + \"_.jpg\"\n float_prediction_file = \"../data/test/float_masks/\" + prefix + \"_.jpg\"\n correct_mask_file = \"../data/test/masks/\" + prefix + \"_mask.png\"\n \n # Processes the image and adds padding\n motorcycle = process.Masked_Image(image_file, tuned_prediction_file, float_prediction_file, tuned_prediction_file)\n motorcycle.add_padding(2*buffer, 2*buffer)\n\n # Creates a list of all boundary points in the image\n boundary_list = motorcycle.list_boundary()\n \n # Initializes an array to store the information for the new predicted mask\n height = motorcycle.height\n width = motorcycle.width\n prediction_array = np.zeros((height, width, 2))\n # For each row and column, the first entry stores the sum of all weighted predictions for that point, \n # while the second entry stores the sum of all weights associated with those predictions. \n # This allows us to take a weighted average after obtaining all the data, \n # by dividing the first entry by the second entry. \n \n # Creates an array to store the input data for each region centered at a boundary point\n region_array = np.zeros((len(boundary_list), 2*buffer, 2*buffer, 4))\n boundary_index = 0\n\n for boundary_point in boundary_list:\n row = boundary_point[0]\n col = boundary_point[1]\n \n region = motorcycle.find_subregion(row, col, buffer)\n region_array[boundary_index, :, :, :] = region[:,:,:4]\n \n boundary_index += 1\n \n # Uses the u-net to make new predictions for each region, and combines them to make a new mask\n total_prediction = unet.predict(region_array)\n boundary_index = 0\n\n for boundary_point in boundary_list:\n row = boundary_point[0]\n col = boundary_point[1]\n \n local_prediction = total_prediction[boundary_index, :, :, 0]\n \n prediction_array[row-buffer:row+buffer, col-buffer:col+buffer,0] += local_prediction * gaussian\n prediction_array[row-buffer:row+buffer, col-buffer:col+buffer,1] += gaussian\n \n boundary_index += 1\n \n # Scales the prediction array so it doesn't overwhelm the initial prediction\n prediction_array = prediction_array / 20\n \n # Incorporates the initial predicted foreground into the prediction array\n prediction_array[:,:,0] += motorcycle.float_foreground\n prediction_array[:,:,1] += np.ones((height, width))\n \n # Sets all 0s equal to 1 in the second entry in dimension 2, to avoid dividing by 0 in the next step\n prediction_array[:,:,1] += (prediction_array[:,:,1] == 0.0)\n \n # Divides the sum of all weighted predictions by the sum of all weights, \n # obtaining a weighted average\n new_data = prediction_array[:,:,0]/prediction_array[:,:,1]\n \n # Rounds the new prediction to the nearest integer, creating a mask\n new_prediction = np.round(new_data)[2*buffer:height-2*buffer, 2*buffer:width-2*buffer]\n \n # Creates an image of the new mask\n plt.imshow(new_prediction)\n plt.show\n \n # Loads the previous predicted masks (with and without fine tuning) for comparison\n tuned_predicted_mask = io.imread(tuned_prediction_file)\n first_predicted_mask = io.imread(first_prediction_file)\n \n # Prints the IoUs for all three masks\n print(\"The IoU for the initial prediction was:\")\n print(find_iou(correct_mask_file, first_predicted_mask))\n print(\"The IoU for the fine tuned prediction was:\")\n print(find_iou(correct_mask_file, tuned_predicted_mask))\n print(\"The IoU for the u-net's prediction is:\")\n print(find_iou(correct_mask_file, new_prediction))\n print()\n print(\"Here is the u-net's prediction:\")\n \n return new_prediction", "title": "" }, { "docid": "21f6f19119c8dedd2193b99a2f74584d", "score": "0.5256583", "text": "def _init_chn_node_mask(self):\n chn_mask = self.base_net_desc.backbone.chn_mask\n return chn_mask", "title": "" }, { "docid": "f6280ed5f9d6c48852bb1fa2bb81265b", "score": "0.5253149", "text": "def test_infbatchmaskgenerator():\n \n @infinitegenerator\n @batchedgenerator\n @maskedgenerator\n def genfun():\n for i in range(10):\n yield i\n \n tests = [\n (1, [True], 10, [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]),\n (1, [True, False], 10, [[0],[2],[4],[6],[8],[0],[2],[4],[6],[8]]),\n (2, [True], 10, [[0,1],[2,3],[4,5],[6,7],[8,9],\n [0,1],[2,3],[4,5],[6,7],[8,9]]),\n (2, [False, True], 10, [[1,3],[5,7],[9],[1,3],[5,7],[9],\n [1,3],[5,7],[9],[1,3]]),\n (3, [False, True, False], 10, [[1,4,7],[1,4,7],[1,4,7],[1,4,7],[1,4,7],\n [1,4,7],[1,4,7],[1,4,7],[1,4,7],[1,4,7]])\n ]\n \n for batchsize, mask, nelems, expected in tests:\n generator = genfun(infinite=True,batchsize=batchsize,mask=mask)\n obtained = list(islice(generator, nelems))\n print(\"Mask\", mask)\n print(\"Batch size\", batchsize)\n print(\"Elements taken\", nelems)\n print(\"Expected\", expected)\n print(\"Obtained\", obtained)\n assert(expected == obtained)", "title": "" }, { "docid": "8242e70e49ab146b272d622097ab31f0", "score": "0.52518016", "text": "def pixel_mask(self):\n template = posixpath.join(self.current_calib, '{disc}.chip{chip_idx}')\n return [template.format(disc=\"pixelmask\",\n chip_idx=idx) for idx in self.chip_range]", "title": "" }, { "docid": "2d43ca6d4bb4b436ca38ff92879f3f4b", "score": "0.5236829", "text": "def CreateMaskBand(self, *args):\n return _gdal.Dataset_CreateMaskBand(self, *args)", "title": "" }, { "docid": "1c9026a1d664cea5fe677cd6e296421a", "score": "0.52294916", "text": "def stack_mask(self, mask):\n return mask.repeat(self.n_heads, 1, 1)", "title": "" }, { "docid": "1c9026a1d664cea5fe677cd6e296421a", "score": "0.52294916", "text": "def stack_mask(self, mask):\n return mask.repeat(self.n_heads, 1, 1)", "title": "" }, { "docid": "ec71aff23bf5962e5910226cff2b3c01", "score": "0.5228402", "text": "def _tf_mask(self, feats: th.Tensor, num_spks: int) -> List[th.Tensor]:\n # N x C x T\n x = self.proj(feats)\n # n x B x T\n x = self.conv(x)\n # N x F* x T\n masks = self.non_linear(self.mask(x))\n # [N x F x T, ...]\n return th.chunk(masks, self.num_spks, 1)", "title": "" }, { "docid": "541c427411077ceb1d01dc93c134da47", "score": "0.5220853", "text": "def blackout_all(self):\n msg = '#MASK'\n for output in range(1, self.OUTPUT_COUNT + 1):\n msg += ' {}'.format(self.to_letter(output))\n self.send_line(msg)", "title": "" }, { "docid": "bfba6c41b8d9be065e5f697b0becdc16", "score": "0.52172405", "text": "def mask(self):\n return self._mask", "title": "" }, { "docid": "bfba6c41b8d9be065e5f697b0becdc16", "score": "0.52172405", "text": "def mask(self):\n return self._mask", "title": "" }, { "docid": "bfba6c41b8d9be065e5f697b0becdc16", "score": "0.52172405", "text": "def mask(self):\n return self._mask", "title": "" }, { "docid": "7651173fd14d5919d4673ee1d831291f", "score": "0.52119786", "text": "def getMasks(cls):\n def findBitSet(bitmask: ReferenceLineStatus) -> int:\n \"\"\"Return the position of the only single bit set, or zero\n\n https://stackoverflow.com/a/51094793/834250\n \"\"\"\n value = bitmask.value\n if value == 0 or (value & (value - 1)) != 0:\n return 0\n return int(log2(value)) + 1\n\n bits = {name: findBitSet(value) for name, value in cls.__members__.items()}\n return MaskHelper(**{name: number - 1 for name, number in bits.items() if number != 0})", "title": "" }, { "docid": "380168573a7b815b1c04e3483c693c7d", "score": "0.52073056", "text": "def mask(self, observation, output_mask, rendered=None):\n raise NotImplementedError()", "title": "" }, { "docid": "ece84ce3433745d031cef82bc7cec88c", "score": "0.5197961", "text": "def apply_mask(self):\n self._apply_grad_mask()\n self._apply_weight_mask()", "title": "" }, { "docid": "8f7433e4ac06c5b7c4cd795bfd7ebd26", "score": "0.5196824", "text": "def applyChannelMask(self,chanmask,outfilename=None,gulp=512,back_compatible=True):\n if outfilename is None:\n outfilename = \"%s_masked.fil\"%(self.header.basename)\n mask = np.array(chanmask).astype(\"ubyte\")\n out_file = self.header.prepOutfile(outfilename,back_compatible=back_compatible)\n for nsamps,ii,data in self.readPlan(gulp):\n self.lib.maskChannels(as_c(data),\n as_c(mask),\n C.c_int(self.header.nchans),\n C.c_int(nsamps))\n out_file.cwrite(data)\n return out_file.name", "title": "" }, { "docid": "789cce81f989154e50c458ec5182cb29", "score": "0.5194272", "text": "def shift_mask(mask, coord_shift, ipm_mask=None):", "title": "" }, { "docid": "4c8dee54f28164329c3a9b34e8e00379", "score": "0.5180447", "text": "def generate(self):\n # Minimum initial topology\n # Input to output only, no hidden layer\n for i in range(self._inputs):\n for j in range(self._inputs, self._unhidden):\n self.add_edge(i+1, j+1)\n\n self.reset()", "title": "" }, { "docid": "a7e004245d554525504ed87dcab9b7d7", "score": "0.51786286", "text": "def get_mask(input_size, num_of_units, num_of_classes, curr_active_neurons):\n # make mask the right size with all zeros\n mask = [np.zeros((np.prod(input_size), num_of_units)), np.zeros((num_of_units, num_of_units)), np.zeros((num_of_units, num_of_classes))]\n # add ones to mask to represent currently active neurons\n mask[0][:, :curr_active_neurons] = 1\n mask[1][:curr_active_neurons, :curr_active_neurons] = 1\n mask[2][:curr_active_neurons, :] = 1\n return mask", "title": "" }, { "docid": "117a791a7f8f48bac5e2dc862d3afb25", "score": "0.5177103", "text": "def _create_mask(zero_p: float = 0.4) -> torch.Tensor:\n hint_count = 0\n\n if random.random() < zero_p:\n if random.random() < 0.4:\n hint_count = random.randint(1, 5)\n else:\n hint_count = random.randint(\n random.randint(5, 32),\n random.randint(42, 65))\n\n area = 128 * 128\n\n zero = np.zeros(shape=[area - hint_count], dtype=np.uint8)\n one = np.ones(shape=[hint_count], dtype=np.uint8)\n mask = np.concatenate([zero, one], -1)\n np.random.shuffle(mask)\n mask = np.reshape(mask, newshape=[128, 128]) * 255\n _, mask = cv2.threshold(mask,\n 127, 255,\n cv2.THRESH_BINARY)\n return to_tensor(mask)", "title": "" }, { "docid": "b91bd6bc39f98c88e7698deaf3cc6cd7", "score": "0.51650333", "text": "def layerMaskExample(obj):\n layer_mask = obj.getOctLayerMask(3,4,True)\n myoct = np.copy(obj.octdata)\n myoct[layer_mask] = 255\n plt.imshow(myoct[0,:,:])\n plt.show()", "title": "" }, { "docid": "93675d158d81885abb55498e908980b9", "score": "0.5160446", "text": "def decoder_mask():\n ones = np.ones([batch_size,hp.max_seq_len])\n ones[trXlen[:,None] <= np.arange(trXlen.shape[1])] = 0\n np.repeat(d[:, :, np.newaxis], 2, axis=2)", "title": "" }, { "docid": "561384c695adf1703234a72bcd2f3135", "score": "0.51551425", "text": "def address_generator(address, mask):\n address_string = to_binary_string(\n address | int(mask.replace(\"X\", \"0\"), 2), len(mask)\n )\n address_string = \"\".join(m if m == \"X\" else a for a, m in zip(address_string, mask))\n num_floating_bits = address_string.count(\"X\")\n for i in range(2 ** num_floating_bits):\n fill_values = list(to_binary_string(i, num_floating_bits))\n new_address_string = \"\".join(\n a if a != \"X\" else fill_values.pop() for a in address_string\n )\n yield int(new_address_string, 2)", "title": "" }, { "docid": "91a68c56135547154418ec10527324b1", "score": "0.51542884", "text": "def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n # dimensions of mask should be (batch_size, time_steps)\n assert K.ndim(mask) == 2\n # add a dummy dimension so that the shape is now\n # (batch_size, time_steps, 1)\n mask = K.expand_dims(mask, 2)\n # now add a fake 2nd spatial dimension\n # (batch_size, time_steps, 1, 1)\n mask = K.expand_dims(mask, 3)\n strides = self.strides + (1,)\n pool_size = self.pool_size + (1,)\n mask = K.pool2d(\n mask,\n pool_size=pool_size,\n strides=strides,\n padding=self.padding,\n data_format=\"channels_last\",\n pool_mode='max')\n # get rid of dummy dimensions\n mask = K.squeeze(mask, 3)\n mask = K.squeeze(mask, 2)\n return mask", "title": "" }, { "docid": "0340c45e8a017c58c8d33f7fc5317534", "score": "0.5149613", "text": "def generate_mask(box, img_size):\r\n box = box.astype(int)\r\n box[box < 0] = 0\r\n x1, y1, w, h = box\r\n x2, y2 = [x1 + w, y1 + h]\r\n mask = np.zeros(img_size, dtype=np.uint8)\r\n\r\n if x1 > x2:\r\n x1, x2 = x2, x1\r\n\r\n if y1 > y2:\r\n y1, y2 = y2, y1\r\n\r\n mask[y1:y2, x1:x2] = 255\r\n\r\n return mask", "title": "" }, { "docid": "137522e2c7635033a03f70f98b8c8d57", "score": "0.51467776", "text": "def _get_mask(id_to_node, node_to_id, num_nodes, masked_nodes_valid, masked_nodes_test, additional_mask_rate):\n train_mask = np.ones(num_nodes)\n valid_mask = np.zeros(num_nodes) \n test_mask = np.zeros(num_nodes)\n for node_id in masked_nodes_valid:\n train_mask[id_to_node[node_id]] = 0\n valid_mask[id_to_node[node_id]] = 1\n for node_id in masked_nodes_test:\n train_mask[id_to_node[node_id]] = 0\n test_mask[id_to_node[node_id]] = 1\n if additional_mask_rate and additional_mask_rate < 1:\n unmasked = np.array([idx for idx in range(num_nodes) if node_to_id[idx] not in masked_nodes])\n yet_unmasked = np.random.permutation(unmasked)[:int(additional_mask_rate*num_nodes)]\n train_mask[yet_unmasked] = 0\n return train_mask, valid_mask, test_mask", "title": "" }, { "docid": "a2a1ee9f95c03457c1009fb5e42e8c5d", "score": "0.5140831", "text": "def outer_mask(self):\n for p in flex.nested_loop(self.parent.grid_size()):\n if self._outer_mask_binary[self.indexer(p)]: yield p", "title": "" }, { "docid": "9d6bef64ef05970b199be8070b3d39a1", "score": "0.5140286", "text": "def mask_x(self):\n return torch.sigmoid(self._mask_x)", "title": "" }, { "docid": "575f074ebf37c0617b726eb9d48c7e51", "score": "0.5138418", "text": "def build_generator_net(self):\n layer_1 = self.activation_func(tf.add(tf.matmul(self.z, self.gen_h1), self.gen_b1))\n layer_2 = self.activation_func(tf.add(tf.matmul(layer_1, self.gen_h2), self.gen_b2))\n\n x_reconstruction_mean = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, self.gen_out_mean),\n self.gen_out_mean_bias))\n return x_reconstruction_mean", "title": "" }, { "docid": "87257da94497fa303119e5590eee4cf6", "score": "0.51282763", "text": "def __init__(\n self,\n bottleneck: int,\n n_iaf: int,\n l1_coef: float,\n l2_coef: float,\n masking_dim=320,\n n_made=2,\n activation=\"relu\",\n name='iaf',\n **kwargs\n ):\n super().__init__(name=name, **kwargs)\n self.m_fwd_pass = []\n self.s_fwd_pass = []\n for i in range(n_iaf):\n self.m_fwd_pass.append(\n MaskingDense(\n units=masking_dim,\n out_units=bottleneck,\n hidden_layers=n_made,\n activation=activation,\n out_activation='linear',\n # batchnorm=True,\n name='mean_%s' % i)\n )\n self.s_fwd_pass.append(\n MaskingDense(\n units=masking_dim,\n out_units=bottleneck,\n hidden_layers=n_made,\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1_coef, l2=l2_coef),\n out_bias_initializer='ones',\n activation=activation,\n out_activation='sigmoid',\n # batchnorm=True,\n name='sigma_%s' % i)\n )", "title": "" }, { "docid": "ba0012c7256040fb05532bbfbdf4a15a", "score": "0.5128221", "text": "def masked(\n inner: GradientTransformation,\n mask: OptState | Callable[[Params], OptState] | None = None,\n) -> GradientTransformation:\n return _masked(inner=inner, mask=mask, already_flattened=False)", "title": "" }, { "docid": "ac403375a61e6acd1f184b888a0ec020", "score": "0.5125632", "text": "def CreateMaskBand(self, *args):\n return _gdal.Band_CreateMaskBand(self, *args)", "title": "" }, { "docid": "a52f4dc2be559dcd57d95ca801a0f19a", "score": "0.51247895", "text": "def hidden_layers(self):\n for l in self.layers[1:-1]:\n yield l", "title": "" }, { "docid": "204d873d6bd05b2ed8dfdd1b9f441abc", "score": "0.5117883", "text": "def _gen_mask(valid_step: torch.Tensor, batch_size: int, seq_len: int):\n assert valid_step.shape == (batch_size, 1)\n assert (1 <= valid_step).all()\n assert (valid_step <= seq_len).all()\n device = valid_step.device\n mask = torch.arange(seq_len, device=device).repeat(batch_size, 1)\n mask = (mask >= (seq_len - valid_step)).float()\n return mask", "title": "" }, { "docid": "f6cd22170109dbed1fdf8cc85f474450", "score": "0.51173013", "text": "def generate_generator(self):", "title": "" }, { "docid": "adafdafaac70c0a11bd00191ccdea638", "score": "0.51099735", "text": "def mask(self, path):\n return PathMask(path)", "title": "" }, { "docid": "98d95f59549c2e9c0f7f47b05305abe4", "score": "0.51088285", "text": "def __init__(self, conv_layer, weight_mask, bias_mask=None):\n super(Conv2dMasked, self).__init__(conv_layer, weight_mask, bias_mask)\n assert isinstance(conv_layer, nn.Conv2d), \"Layer must be a Conv2d layer\"\n for attr in ['in_channels', 'out_channels', 'kernel_size', 'dilation',\n 'stride', 'padding', 'padding_mode', 'groups']:\n setattr(self, attr, getattr(conv_layer, attr))", "title": "" }, { "docid": "6f77af22ecbd0bf7186fcd02b2474445", "score": "0.5107828", "text": "def _get_mask(self, minfo: PrunedModuleInfo):\n raise NotImplementedError", "title": "" }, { "docid": "882494bf87d3ebeb73a550b446f0bace", "score": "0.510628", "text": "def create_mask_irrelevant(self):\n if self.nodules is None:\n logger.warning(\"Info about nodules location must \" +\n \"be loaded before calling this method. \" +\n \"Nothing happened.\")\n self.segmentation = np.zeros_like(self.images)\n\n center_pix = np.abs(self.nodules.nodule_center -\n self.nodules.origin) / self.nodules.spacing\n start_pix = (center_pix - np.rint(self.nodules.nodule_size /\n self.nodules.spacing / 2))\n start_pix = np.rint(start_pix).astype(np.int)\n make_rect_mask_numba(self.segmentation, self.nodules.offset,\n self.nodules.img_size + self.nodules.offset, start_pix,\n np.rint(self.nodules.nodule_size / self.nodules.spacing))\n\n return self", "title": "" }, { "docid": "233e396ecbc9bfe285723d45ddd71a76", "score": "0.5099605", "text": "def inner_mask(self):\n for p in flex.nested_loop(self.parent.grid_size()):\n if self._inner_mask_binary[self.indexer(p)]: yield p", "title": "" }, { "docid": "5e6b8dc5c9412e10ed93bdcb9e35e6b0", "score": "0.5096844", "text": "def subsequent_mask(size):\n return torch.tril(torch.ones(size, size, dtype=torch.uint8))", "title": "" }, { "docid": "6687cc9b0738cb163abd601ae88b3ac1", "score": "0.5090792", "text": "def _mask_block(self, sentence, tagmap):\n block = self.masking_scheme.mask(sentence, tagmap)\n return block", "title": "" }, { "docid": "97e2c727308a74fd62145b70671fa14c", "score": "0.50888467", "text": "def __init__(self,splits=None,wmap=None,mask=None,kmask=None,directory=None,spec_smooth_width=2.,skip_beam=True,skip_mask=True,skip_kmask=True,skip_cross=True,iau=False):\n\n if directory is not None:\n self._load(directory,skip_beam,skip_mask,skip_kmask,skip_cross)\n else:\n \n shape = splits[0].shape\n wcs = splits[0].wcs\n\n \n if wmap is None: wmap = enmap.ones(shape[-2:],wcs)\n if mask is None: mask = np.ones(shape[-2:])\n if kmask is None: kmask = np.ones(shape[-2:])\n\n wmap = enmap.ndmap(wmap,wcs)\n\n osplits = [split*mask for split in splits]\n fc = FourierCalc(shape,wcs,iau)\n n2d, p2d = noise_from_splits(osplits,fc)\n del osplits\n del splits\n w2 = np.mean(mask**2.)\n n2d *= (1./w2)\n p2d *= (1./w2)\n\n n2d = np.fft.ifftshift(enmap.smooth_spectrum(np.fft.fftshift(n2d), kernel=\"gauss\", weight=\"mode\", width=spec_smooth_width))\n self.spec_smooth_width = spec_smooth_width\n ncomp = shape[0] if len(shape)>2 else 1\n \n self.cross2d = p2d\n self.cross2d *= kmask\n n2d *= kmask\n self.noise2d = n2d.reshape((ncomp,ncomp,shape[-2],shape[-1]))\n self.mask = mask\n self.kmask = kmask\n self.wmap = wmap\n self.shape = shape\n self.wcs = wcs\n self.ngen = MapGen(self.shape,self.wcs,self.noise2d)\n # self.noise_modulation = 1./np.sqrt(self.wmap)/np.sqrt(np.mean((1./self.wmap)))\n wt = 1./np.sqrt(self.wmap)\n wtw2 = np.mean(1./wt**2.)\n self.noise_modulation = wt*np.sqrt(wtw2)", "title": "" }, { "docid": "12cf78b66696c63780ee99b2ef8f336e", "score": "0.5086359", "text": "def src_mask(self):\n return self._get_component(\"src_mask\")", "title": "" }, { "docid": "abced37224d3ed5ce7363778ba9fe314", "score": "0.5080844", "text": "def net_mask(self):\n mask = (0xffffffff >> (32 - self.cidr)) << (32 - self.cidr)\n\n return Subnet.__create_mask(mask)", "title": "" }, { "docid": "7c3439697d7d91d3d350d6a775881ac6", "score": "0.5073962", "text": "def generator(self, constants):\n\t\tprint \"CoPopulation Generator not over ridden\"", "title": "" }, { "docid": "b70317a8769931335b4cf2e2fe480b89", "score": "0.5067706", "text": "def _mask(self):\n return self.raw_network.netmask().strNormal(0)", "title": "" }, { "docid": "a2c69e860870501c435f7a8eedca9080", "score": "0.50557476", "text": "def GetMaskImage(self) -> \"itkImageUS2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUS2_GetMaskImage(self)", "title": "" }, { "docid": "d09a4b08a970868c4e462260d5976b32", "score": "0.50532013", "text": "def make_train_generator(train_images, train_masks, batch_size=1, custom_distortions=False, seed=2):\n if custom_distortions:\n images_datagen = ImageDataGenerator(preprocessing_function=data_augmentation_with_distortions_for_images)\n else:\n images_datagen = ImageDataGenerator(preprocessing_function=data_augmentation_no_distortions)\n \n masks_datagen = ImageDataGenerator(preprocessing_function=data_augmentation_no_distortions)\n\n # Provide the same seed and keyword arguments to the flow method so that images and masks transformed the same way.\n images_generator = images_datagen.flow(train_images, batch_size=batch_size, seed=seed)\n masks_generator = masks_datagen .flow(train_masks , batch_size=batch_size, seed=seed)\n\n # Combine generators into one which yields image and masks.\n train_generator = zip(images_generator, masks_generator)\n\n return train_generator", "title": "" }, { "docid": "e622254f7c02a13b030c8fb2fffdf06a", "score": "0.50480443", "text": "def batch(self, batch_size=32):\n l = len(self.masks)\n for ndx in range(0, l, batch_size):\n yield self.masks[ndx:min(ndx + batch_size, l)]", "title": "" }, { "docid": "dccd1c17664909deec0c70b74245bfd0", "score": "0.5047341", "text": "def get_masked_bits(self):\n\n return self._masked_bits", "title": "" }, { "docid": "cdc470f3c456b090a7639be09357812a", "score": "0.50326693", "text": "def mask(self, name_or_num):\n if isinstance(name_or_num, int):\n return 2**name_or_num\n else:\n return 2**self._bitnum[name_or_num]", "title": "" }, { "docid": "a64c00e4f821e7b76213bc984a340765", "score": "0.50303054", "text": "def mask_edges(self, mask, mrows=1, mcols=1) :\n return gu.mask_edges(mask, mrows, mcols)", "title": "" }, { "docid": "db40cbf4c85f6edd186ac5330d0b9cba", "score": "0.50289804", "text": "def generate_mask_tensor(mask):\n assert isinstance(mask, np.ndarray), \"input for generate_mask_tensor\" \\\n \"should be an numpy ndarray\"\n if F.backend_name == 'mxnet':\n return F.tensor(mask, dtype=F.data_type_dict['float32'])\n else:\n return F.tensor(mask, dtype=F.data_type_dict['bool'])", "title": "" }, { "docid": "0955de691a7b5e88e7ec6b57ad244a48", "score": "0.50279623", "text": "def _gen_mask(lengths_x, lengths_y, max_len_x, max_len_y):\n device = lengths_x.device\n # x_mask: (batch_size, max_len_x)\n x_mask = th.arange(max_len_x, device=device).unsqueeze(\n 0\n ) < lengths_x.unsqueeze(1)\n # y_mask: (batch_size, max_len_y)\n y_mask = th.arange(max_len_y, device=device).unsqueeze(\n 0\n ) < lengths_y.unsqueeze(1)\n # mask: (batch_size, 1, max_len_x, max_len_y)\n mask = (x_mask.unsqueeze(-1) & y_mask.unsqueeze(-2)).unsqueeze(1)\n return mask", "title": "" }, { "docid": "a9dc501179068ae6051497660055b677", "score": "0.5027626", "text": "def generator_helper(\n noise, is_conditional, one_hot_labels, weight_decay, is_training):\n net = _dense(noise, 1024, weight_decay)\n net = _batch_norm(net, is_training)\n net = tf.nn.relu(net)\n\n if is_conditional:\n net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)\n\n net = _dense(net, 7 * 7 * 128, weight_decay)\n net = _batch_norm(net, is_training)\n net = tf.nn.relu(net)\n\n net = tf.reshape(net, [-1, 7, 7, 128])\n\n net = _deconv2d(net, 64, 4, 2, weight_decay)\n net = _batch_norm(net, is_training)\n net = tf.nn.relu(net)\n\n net = _deconv2d(net, 32, 4, 2, weight_decay)\n net = _batch_norm(net, is_training)\n net = tf.nn.relu(net)\n\n # Output should have 1 pixel (grayscale).\n net = _conv2d(net, 1, 4, 1, weight_decay)\n\n # Make sure that generator output is in the same range as `inputs`\n # ie [-1, 1].\n net = tf.tanh(net)\n\n return net", "title": "" }, { "docid": "9e602bada972e84d02dcf908215a9dc5", "score": "0.5024491", "text": "def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n exp_blocked_to_pad = tf.concat([\n to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:,\n 3:-1]\n ], 2)\n band_mask = tf.einsum(\"BLQ,BLK->BLQK\", from_blocked_mask[:, 2:-2],\n exp_blocked_to_pad)\n band_mask = tf.expand_dims(band_mask, 1)\n return band_mask", "title": "" } ]
2a35d1556d926281ff1c66b1f8595185
Automate the process of generating metadata from TeX formulas.
[ { "docid": "57d490b7a975ff5f9fac15062ce902f5", "score": "0.0", "text": "def __init__(self, length, *args):\n assert length >= 1\n self.length = length\n self.f = args\n super().__init__(self, text=None)", "title": "" } ]
[ { "docid": "c54f0bb68ce7bada5f80ae2c7db2c0f6", "score": "0.5863953", "text": "def generate_meta(tree, bill_id):\n meta = E(\"meta\")\n \n meta.append(generate_identification(tree, bill_id))\n meta.append(generate_publication(tree, bill_id))\n meta.append(generate_lifecycle(tree, bill_id))\n meta.append(generate_analysis(tree))\n\n meta.append(generate_references(tree))\n return meta", "title": "" }, { "docid": "dbcc3f18829860860689f28205c1f1fd", "score": "0.5534738", "text": "def setUpFormulas(self):\n self.formulas = {}\n self.alias = {}\n\n self.alias['atmosphere_ln_pressure_coordinate'] = 'atmosphere_ln_pressure_coordinate'\n self.alias['atmosphere_sigma_coordinate'] = 'sigma'\n self.alias['sigma'] = 'sigma'\n self.alias['atmosphere_hybrid_sigma_pressure_coordinate'] = 'hybrid_sigma_pressure'\n self.alias['hybrid_sigma_pressure'] = 'hybrid_sigma_pressure'\n self.alias['atmosphere_hybrid_height_coordinate'] = 'atmosphere_hybrid_height_coordinate'\n self.alias['atmosphere_sleve_coordinate'] = 'atmosphere_sleve_coordinate'\n self.alias['ocean_sigma_coordinate'] = 'ocean_sigma_coordinate'\n self.alias['ocean_s_coordinate'] = 'ocean_s_coordinate'\n self.alias['ocean_s_coordinate_g1'] = 'ocean_s_coordinate_g1'\n self.alias['ocean_s_coordinate_g2'] = 'ocean_s_coordinate_g2'\n self.alias['ocean_sigma_z_coordinate'] = 'ocean_sigma_z_coordinate'\n self.alias['ocean_double_sigma_coordinate'] = 'ocean_double_sigma_coordinate'\n\n self.formulas['atmosphere_ln_pressure_coordinate'] = ['p(k)=p0*exp(-lev(k))']\n self.formulas['sigma'] = ['p(n,k,j,i)=ptop+sigma(k)*(ps(n,j,i)-ptop)']\n\n self.formulas['hybrid_sigma_pressure'] = ['p(n,k,j,i)=a(k)*p0+b(k)*ps(n,j,i)',\n 'p(n,k,j,i)=ap(k)+b(k)*ps(n,j,i)']\n\n self.formulas['atmosphere_hybrid_height_coordinate'] = ['z(n,k,j,i)=a(k)+b(k)*orog(n,j,i)']\n\n self.formulas['atmosphere_sleve_coordinate'] = ['z(n,k,j,i) = a(k)*ztop + b1(k)*zsurf1(n,j,i) + b2(k)*zsurf2(n,j,i)']\n\n self.formulas['ocean_sigma_coordinate'] = ['z(n,k,j,i)=eta(n,j,i)+sigma(k)*(depth(j,i)+eta(n,j,i))']\n \n self.formulas['ocean_s_coordinate'] = ['z(n,k,j,i)=eta(n,j,i)*(1+s(k))+depth_c*s(k)+(depth(j,i)-depth_c)*C(k)',\n 'C(k)=(1-b)*sinh(a*s(k))/sinh(a)+b*[tanh(a*(s(k)+0.5))/(2*tanh(0.5*a))-0.5]']\n\n self.formulas['ocean_s_coordinate_g1'] = ['z(n,k,j,i) = S(k,j,i) + eta(n,j,i) * (1 + S(k,j,i) / depth(j,i))',\n 'z(n,k,j,i) = S(k,j,i) + eta(n,j,i) * (1 + S(k,j,i) / depth(j,i))']\n\n self.formulas['ocean_s_coordinate_g2'] = ['z(n,k,j,i) = eta(n,j,i) + (eta(n,j,i) + depth(j,i)) * S(k,j,i)',\n 'S(k,j,i) = (depth_c * s(k) + depth(j,i) * C(k)) / (depth_c + depth(j,i))']\n\n self.formulas['ocean_sigma_z_coordinate'] = ['z(n,k,j,i)=eta(n,j,i)+sigma(k)*(min(depth_c,depth(j,i))+eta(n,j,i))',\n 'z(n,k,j,i)=zlev(k)']\n\n self.formulas['ocean_double_sigma_coordinate'] = ['z(k,j,i)=sigma(k)*f(j,i)',\n 'z(k,j,i)=f(j,i)+(sigma(k)-1)*(depth(j,i)-f(j,i))',\n 'f(j,i)=0.5*(z1+z2)+0.5*(z1-z2)*tanh(2*a/(z1-z2)*(depth(j,i)-href))']\n\n # Set up nested dictionary of:\n # 1) valid standard_names for variables named by the formula_terms attribute\n # 2) computed_standard_names (csn) for the variable specifying the formula_terms attribute\n\n self.ft_var_stdnames=defaultdict(dict)\n\n self.ft_var_stdnames['atmosphere_ln_pressure_coordinate'] = {'p0': ['reference_air_pressure_for_atmosphere_vertical_coordinate'],\n 'csn': ['air_pressure']}\n\n self.ft_var_stdnames['sigma'] = {'ptop': ['air_pressure_at_top_of_atmosphere_model'],\n 'ps': ['surface_air_pressure'],\n 'csn': ['air_pressure']}\n\n self.ft_var_stdnames['hybrid_sigma_pressure'] = {'p0': ['reference_air_pressure_for_atmosphere_vertical_coordinate'],\n 'ps': ['surface_air_pressure'],\n 'csn': ['air_pressure']}\n\n self.ft_var_stdnames['atmosphere_hybrid_height_coordinate'] = {'orog': ['surface_altitude', 'surface_height_above_geopotential_datum'],\n 'a': ['atmosphere_hybrid_height_coordinate'],\n 'csn': ['altitude', 'height_above_geopotential_datum']}\n\n self.ft_var_stdnames['atmosphere_sleve_coordinate'] = {'ztop': ['altitude_at_top_of_atmosphere_model', 'height_above_geopotential_datum_at_top_of_atmosphere_model'],\n 'csn': ['altitude', 'height_above_geopotential_datum']}\n\n self.ft_var_stdnames['ocean_sigma_coordinate'] = {'eta': ['set'], 'depth': ['set'], 'csn': ['set']}\n self.ft_var_stdnames['ocean_s_coordinate'] = {'eta': ['set'], 'depth': ['set'], 'csn': ['set']}\n self.ft_var_stdnames['ocean_s_coordinate_g1'] = {'eta': ['set'], 'depth': ['set'], 'csn': ['set']}\n self.ft_var_stdnames['ocean_s_coordinate_g2'] = {'eta': ['set'], 'depth': ['set'], 'csn': ['set']}\n self.ft_var_stdnames['ocean_sigma_z_coordinate'] = {'eta': ['set'], 'depth': ['set'], 'zlev': ['set'], 'csn': ['set']}\n self.ft_var_stdnames['ocean_double_sigma_coordinate'] = {'depth': ['set'], 'csn': ['set']}\n\n self.ft_stdname_sets = defaultdict(dict)\n self.ft_stdname_sets[0] = {'zlev': ['altitude'],\n 'eta': ['sea_surface_height_above_geoid'],\n 'depth': ['sea_floor_depth_below_geoid'],\n 'csn': ['altitude']}\n\n self.ft_stdname_sets[1] = {'zlev': ['height_above_geopotential_datum'],\n 'eta': ['sea_surface_height_above_geopotential_datum'],\n 'depth': ['sea_floor_depth_below_geopotential_datum'],\n 'csn': ['height_above_geopotential_datum']}\n\n self.ft_stdname_sets[2] ={'zlev': ['height_above_reference_ellipsoid'],\n 'eta': ['sea_surface_height_above_reference_ellipsoid'],\n 'depth': ['sea_floor_depth_below_reference_ellipsoid'],\n 'csn': ['height_above_reference_ellipsoid']}\n\n self.ft_stdname_sets[3] = {'zlev': ['height_above_mean_sea_level'],\n 'eta': ['sea_surface_height_above_mean_ sea_level'],\n 'depth': ['sea_floor_depth_below_mean_ sea_level'],\n 'csn': ['height_above_mean_sea_level']}", "title": "" }, { "docid": "96aa04a3fcdfb573f9c5f306f221751a", "score": "0.52738154", "text": "def __resource_meta__(self, resource, text):\n if not resource.source_file.kind == 'md':\n return text \n \n # Separate out any existing Jinja headers\n md = re.sub(r\"^\\s*(---|===)\\s*$\", '', text, flags=re.MULTILINE)\n \n # Split entire article into an array of paragraphs\n md_by_paragraph = re.split('\\n\\s*\\n',md) \n \n # The header is the first paragraph\n md_hdr = md_by_paragraph[0]\n \n # Replace any '# This is an H1 #' or '# This is an H1' markdown headers with 'title: This is an H1'\n md_hdr = re.sub('^#{1}([^#].*).#*$', 'title:\\\\1', md_hdr, flags=re.MULTILINE)\n \n # Replace 'date: YYYY-MM-DD' with \"created: !!timestamp 'YYY-MM-DD'\"\n md_hdr = re.sub('^date:\\s(.*)$', 'created: !!timestamp \\'\\\\1\\'', md_hdr, flags=re.MULTILINE|re.IGNORECASE)\n \n \n text = '\\n\\n'.join(md_by_paragraph[1::]) \n \n #This seems to fire for tagged posts, need to run only body, not header\n # Code: (four spaces at line start), --> Add code: true to metadata\n md_code = re.compile(\"^ .+\", re.UNICODE|re.MULTILINE)\n html_code = re.compile(\"<code>.*</code>\", re.UNICODE|re.MULTILINE)\n if md_code.search(text):\n md_hdr = 'code: true\\n' + md_hdr\n else:\n if html_code.search(text):\n md_hdr = 'code: true\\n' + md_hdr\n \n # Gallery !{![]()[]()}, --> Add gallery: true to metadata \n md_gallery = re.compile(\"\\!\\{(\\!*\\[.*?\\]\\(.+?\\))+\\}\", re.UNICODE|re.MULTILINE)\n if md_gallery.search(text):\n md_hdr = 'gallery: true\\n' + md_hdr \n \n # Math: $$...$$, \\\\[...\\\\], \\\\(...\\\\) --> Add math: true to metadata\n md_math = re.compile(\"[$][$].+[$][$]|\\\\\\\\\\[.+\\\\\\\\\\]|\\\\\\\\\\(.+\\\\\\\\\\)\", re.UNICODE|re.MULTILINE)\n if md_math.search(text):\n md_hdr = 'math: true\\n' + md_hdr\n \n # Image: ![]() or index.html --> Add image: true to metadata \n md_image = re.compile(\"\\!\\[(.*)\\]\\((.*)\\)\", re.UNICODE|re.MULTILINE)\n md_index = re.compile(\"index[.]html$\",re.UNICODE)\n if md_image.search(text):\n md_hdr = 'image: true\\n' + md_hdr\n else: \n if md_index.search(resource.relative_deploy_path):\n md_hdr = 'image: true\\n' + md_hdr\n \n return md_hdr", "title": "" }, { "docid": "027021d79f260f96944920ac9055a768", "score": "0.52665955", "text": "def metadata(matrix, xena_dtypes):\n\n # Generate metadata.\n print('Creating metadata file ...', end='')\n sys.stdout.flush()\n jinja2_env = jinja2.Environment(\n loader=jinja2.PackageLoader('xena_gdc_etl', 'resources')\n )\n metadata_template = jinja2_env.get_template(METADATA_TEMPLATE[xena_dtypes])\n matrix_date = time.strftime(\n \"%m-%d-%Y\", time.gmtime(os.path.getmtime(matrix))\n )\n variables = {\n 'project_id': 'GDC-PANCAN',\n 'date': matrix_date,\n 'gdc_release': GDC_RELEASE_URL + '#data-release-90',\n 'xena_cohort': 'GDC Pan-Cancer (PANCAN)',\n }\n try:\n variables.update(METADATA_VARIABLES[xena_dtypes])\n except KeyError:\n pass\n outmetadata = matrix + '.json'\n with open(outmetadata, 'w') as f:\n f.write(metadata_template.render(**variables))\n print('\\rMetadata JSON is saved at {}.'.format(outmetadata))", "title": "" }, { "docid": "f3faca439a3bf9bff267e7fba4b206ac", "score": "0.52564967", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')):\n meta_data = collections.OrderedDict()\n\n # Read files in\n data = MetaDataContainer(\n filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')\n ).load()\n\n # Load filename mapping\n map_filename = os.path.join(self.evaluation_setup_path, 'map.txt')\n if os.path.exists(map_filename):\n filename_map = OneToOneMappingContainer(filename=map_filename).load()\n else:\n filename_map = {}\n\n for item in data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False,\n filename_map=filename_map\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f68d6d2858db56fe9a61b3db0a35f0ba", "score": "0.51897764", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate'\n )\n\n eval_file = MetaDataContainer(filename=evaluate_filename)\n if eval_file.exists():\n eval_data = eval_file.load()\n meta_data = {}\n for item in eval_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "7b059c2d61e410891437100c8cc41bbd", "score": "0.5139644", "text": "def getAnalyticFormulasForGivenFeatures(data, features, label_names, l1_ratio=1, try_alpha=None, cv_count=10, normalize=True, output_file='formulas.txt'):\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\r\n if try_alpha is None: try_alpha = [0.001, 0.01, 0.1, 1, 10, 100, 1000]\r\n data = data.loc[:,list(features)+list(label_names)]\r\n data = data.sample(frac=1).reset_index(drop=True)\r\n if isinstance(features, list):\r\n features = np.array(features)\r\n for fname in features:\r\n assert np.all(~np.isnan(data[fname])), f'{fname} contains NaNs:\\n' + str(data[fname])\r\n if normalize:\r\n mean = data.mean()\r\n std = data.std()\r\n data1 = (data - mean) / std\r\n else:\r\n data1 = data\r\n data2 = ML.transformFeatures2Quadric(data1.loc[:, features], addConst=False)\r\n data2_features = np.array(data2.columns)\r\n for label in label_names: data2[label] = data1[label]\r\n dataSets = [data1, data2]\r\n featureSets = [features, data2_features]\r\n result_file = open(output_file, 'w')\r\n for label in label_names:\r\n # label_data = (data[label]+data[label].min())**2\r\n label_data = data1[label]\r\n for di in range(len(dataSets)):\r\n d = dataSets[di]\r\n features1 = featureSets[di]\r\n # check possibility\r\n model = sklearn.ensemble.ExtraTreesRegressor(n_estimators=100, random_state=0, min_samples_leaf=10)\r\n # model = lgb.LGBMRegressor(num_leaves=31, learning_rate=0.02, n_estimators=100)\r\n score = np.mean(sklearn.model_selection.cross_val_score(model, d, label_data, cv=cv_count))\r\n if score <= 0.5:\r\n model_string = f'{label} can\\'t be expressed in terms of features: '+str(features1)\r\n else:\r\n score, model, model_string = getLinearAnalyticModel(d, features1, label, l1_ratio=l1_ratio, try_alpha=try_alpha, cv_count=cv_count)\r\n print(model_string)\r\n result_file.write(model_string+'\\n')\r\n\r\n if score <= 0.5: continue\r\n # get simple models\r\n\r\n def searchBestSimple(subsets, max_print_num):\r\n simpleModels = []\r\n for f in subsets:\r\n score, model, model_string = getLinearAnalyticModel(d, f, label, l1_ratio=l1_ratio, try_alpha=try_alpha, cv_count=cv_count)\r\n simpleModels.append({'score':score, 'model':model, 'model_string':model_string, 'features':f})\r\n simpleModels = sorted(simpleModels, key=lambda r:r['score'], reverse=True)\r\n print_num = 0\r\n for sm in simpleModels:\r\n if sm['score'] > 0.5:\r\n print(' ' * 8 + sm['model_string'])\r\n result_file.write(' ' * 8 + sm['model_string'] + '\\n')\r\n print_num += 1\r\n if print_num >= max_print_num: break\r\n else: break\r\n return simpleModels\r\n\r\n searchBestSimple(features1, 1)\r\n simpleModels2 = searchBestSimple(itertools.combinations(features1, 2), 2)\r\n best2features = simpleModels2[0]['features']\r\n subsets = [[*best2features,f] for f in features1 if f not in best2features]\r\n searchBestSimple(subsets, 3)\r\n result_file.close()", "title": "" }, { "docid": "da0f763b12992c748ea7ccf6fe1fe261", "score": "0.51023144", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate',\n scene_label=self.scene_labels()[0]\n )\n eval_file = MetaDataContainer(filename=evaluate_filename)\n\n if eval_file.exists():\n # Get meta data from evaluation file\n meta_data = MetaDataContainer()\n eval_file.load()\n for item in eval_file:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += eval_file\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n elif os.path.isdir(os.path.join(self.local_path, 'meta')):\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n\n # Get meta data from annotation files\n meta_data = MetaDataContainer()\n\n for annotation_filename in annotation_files:\n data = MetaDataContainer(filename=annotation_filename).load()\n for item in data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "bd6c9482797fdae526009ac7d392a9d3", "score": "0.5085736", "text": "def meta_TXT(desc, dataset):\n metaName = get_dirName('META', dataset)+'meta_{}.txt'.format(dataset)\n f = open(metaName, 'w') \n f.write('# Description of Data Set\\n\\n\\t'+desc+''\n '\\n\\n# Devices Used\\n')\n f.close()", "title": "" }, { "docid": "5cb484d3b9dad09d169cdafc4ee130a3", "score": "0.50815696", "text": "def make_summary_latex():\n path = \"results/summary/data.tex\"\n log.info(f\"Writing {path}\")\n\n table = open(path, \"w\")\n table.write(r\"\\begin{tabular}{lrrrrrl}\")\n table.write(\"\\n\")\n table.write(r\"\\hline\")\n table.write(\"\\n\")\n table.write(\n r\"Dataset & $T_{\\rm obs}$ & $E_{\\rm min}$ & $E_{\\rm max}$ & $N_{\\rm on}$ & $N_{\\rm bkg}$ & $R_{\\rm on}$ \\\\\"\n )\n table.write(\"\\n\")\n table.write(\n r\" & & TeV & TeV & & & deg \\\\ \\hline\"\n )\n table.write(\"\\n\")\n\n for name in config.all_datasets:\n dataset = config.datasets[name]\n e_min_list = []\n for _spec in dataset.get_SpectrumObservationList():\n e_min_list.append(_spec.lo_threshold)\n spec = dataset.get_SpectrumObservationList().stack()\n stats = spec.total_stats\n\n row_name = rf\"\\{name} & \"\n if name == \"fermi\":\n T_obs = r\"$\\sim$7 yr & \"\n e_min = dataset.energy_range[0].to(\"TeV\").value\n E_min = f\"{e_min:.2f} & \"\n else:\n data_store = DataStore.from_dir(f\"data/{dataset.name}\")\n ontime = sum(data_store.obs_table[\"ONTIME\"]) * u.s\n ontime = ontime.to(\"h\").value\n T_obs = f\"{ontime:.2f} h & \"\n # in case of the IACT e_min is taken from the staked obs\n e_min = min(e_min_list)\n E_min = f\"{e_min.to('TeV').value:.2f} & \"\n\n e_max = dataset.energy_range[1].to(\"TeV\").value\n E_max = f\"{e_max:.0f} & \"\n N_on = f\"{stats.n_on} & \"\n N_bkg = f\"{stats.background:.1f} & \"\n r_on = dataset.on_radius.to(\"deg\").value\n R_on = rf\"{r_on:.2f} \\\\\"\n\n table.write(row_name + T_obs + E_min + E_max + N_on + N_bkg + R_on)\n table.write(\"\\n\")\n\n table.write(\"\\hline\")\n table.write(\"\\n\")\n table.write(\"\\end{tabular}\")\n table.close()", "title": "" }, { "docid": "8c0344a8c311c9a08df150c4bab7baae", "score": "0.5062553", "text": "def get_meta_information() -> Dict:\n return {'name': 'Tabular Benchmarks for Hyperparameter Optimization and Neural Architecture Search',\n 'references': ['Aaron Klein, Frank Hutter',\n 'Tabular Benchmarks for Joint Architecture and Hyperparameter Optimization',\n 'https://arxiv.org/abs/1905.04970',\n 'https://github.com/automl/nas_benchmarks'],\n }", "title": "" }, { "docid": "b4e84b8a11971ae023c19f0440b3e243", "score": "0.5061776", "text": "def meta_test_corpus(self):\n ...", "title": "" }, { "docid": "78c2cb05c89a653b10b7d6a6222d0647", "score": "0.5048847", "text": "def test_patsyfreedummytest_parse_formulas(self):\n\n # define distribution\n cur_distribution = 'Normal'\n family = Family(cur_distribution)\n\n\n # define formulas and network shape\n formulas = dict()\n formulas['loc'] = '~1'\n formulas['scale'] = '~1'\n\n degrees_of_freedom = {'loc': 4, 'scale': 4}\n\n deep_models_dict = dict()\n\n prepare_data = PrepareData(formulas, deep_models_dict, degrees_of_freedom)\n prepare_data.fit(self.x)\n datadict = prepare_data.transform(self.x)\n dm_info_dict = prepare_data.dm_info_dict\n network_info_dict = prepare_data.network_info_dict\n P = prepare_data.P\n #call parse_formulas\n ground_truth = np.ones([len(self.x),1])\n ground_truth = torch.from_numpy(ground_truth).float()\n \n #test if shapes of design matrices and P are as correct\n self.assertTrue((datadict['loc']['structured'] == ground_truth).all())\n self.assertTrue((datadict['loc']['structured'].shape == ground_truth.shape),'shape missmatch')\n self.assertEqual(network_info_dict['loc']['struct_shapes'], 1)\n self.assertEqual(P['loc'].shape, (1, 1))\n self.assertEqual(P['loc'], 0)\n\n self.assertTrue((datadict['scale']['structured'].shape == ground_truth.shape), 'shape missmatch')\n self.assertTrue((datadict['scale']['structured'] == ground_truth).all())\n self.assertEqual(network_info_dict['scale']['struct_shapes'], 1)\n self.assertEqual(P['scale'].shape, (1, 1))\n self.assertEqual(P['scale'], 0)\n\n\n # test if dm_info_dict is correct\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_input_features'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_input_features'] == [])", "title": "" }, { "docid": "c819ba64d6f59dd6199775d2fe3a2452", "score": "0.5037137", "text": "def create_recipe_metadata(self):\n topmeta = Metadata(metafile=\"%s/metadata.txt\" % self.working_directory)\n topmeta.write_data('directory_created', self.asctime)\n topmeta.write_data('system_name', self.sysname)\n topmeta.write_data('origin_dir', self.origin_dir)\n topmeta.write_data('working_directory', self.working_directory)\n topmeta.write_data('timestamp', self.timestamp)\n return", "title": "" }, { "docid": "8e014bdaa63f7eec1492a815a6d712f5", "score": "0.50194395", "text": "def main():\n config_path = \"/home/sevvy/PycharmProjects/CardioDiscover/test_config.json\"\n meta_doc, study_list, _ = read_meta(path=config_path)\n\n for i, study_doc in enumerate(study_list):\n # doc updated separator for the columns\n study_doc = read_GWAS.init_reader(study_doc)\n # updated doc with headers and indices\n study_doc = classify_columns.init_classifier(study_doc)\n # update original meta_doc with the new meta data\n study_list[i] = study_doc\n meta_doc = update_meta(meta_doc, study_list)\n # write doc with additional info to json config file\n write_meta(meta_doc, config_path)", "title": "" }, { "docid": "888c7a77dffca103764d1eff860040ad", "score": "0.49969453", "text": "def emit_machinery():\n machinery = [\n PROJECT / \"packaging\" / \"homebrew-package.sh\",\n PROJECT / \"packaging\" / \"homebrew-formula.rb\",\n PROJECT / \"ci\" / \"release-in-docker.sh\"\n ]\n for item in machinery:\n dest = DIST / item.name\n dest.write_bytes(item.read_bytes())\n dest.chmod(item.stat().st_mode)", "title": "" }, { "docid": "a716bf5c3a7da09463577fe2277c7343", "score": "0.49856973", "text": "def build_formulas(y_col, outcomes):\n if 'risk_score' in y_col:\n predictors = ['risk_score_t']\n else:\n predictors = ['{}_hat'.format(y_col)]\n\n # build all y ~ x formulas\n all_formulas = []\n for y in outcomes:\n for x in predictors:\n formula = '{} ~ {}'.format(y, x)\n all_formulas.append(formula)\n return all_formulas", "title": "" }, { "docid": "7d17f9a07a705243da08327db814398f", "score": "0.49630755", "text": "def build_metadata(sequence, prefix, idx, keys, metadata_type=\"synergy\"):\n # generate for all keys\n metadata = {}\n for key in keys:\n metadata[key] = 0\n\n # adjust some manually\n metadata[DataKeys.SEQ_METADATA] = \"features={}\".format(prefix)\n metadata[DataKeys.GC_CONTENT] = float(sequence.count(\"G\") + sequence.count(\"C\")) / len(sequence)\n metadata[\"sequence.nn\"] = sequence\n metadata[\"example_id\"] = \"{}-{}\".format(prefix, idx)\n metadata[\"edge_indices\"] = \"60.,100.\"\n\n # adjust for specific library types\n if metadata_type == \"synergy\":\n metadata[\"example_combo_id\"] = \"{}-{}.combo-NULL\".format(prefix, idx)\n metadata[\"combos\"] = -1\n metadata[\"motifs\"] = prefix\n\n # make into df\n metadata = pd.DataFrame(metadata, index=[0])\n \n return metadata", "title": "" }, { "docid": "f722f44d7c65ba55332a232dce8adea5", "score": "0.49470893", "text": "def prepare(self):\n\n scene_label = 'synthetic'\n subset_map = {'test': 'evaltest'}\n param_hash = 'bbb81504db15a03680a0044474633b67'\n # Make sure evaluation_setup directory exists\n Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))\n\n if not self.meta_container.exists() and self.reference_data_present:\n # Collect meta data\n meta_data = MetaDataContainer()\n for class_label in self.event_labels():\n for subset_label, subset_name_on_disk in iteritems(subset_map):\n subset_name_on_disk = subset_map[subset_label]\n\n mixture_path = os.path.join(\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'audio'\n )\n\n mixture_meta_path = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'meta'\n )\n\n event_list_filename = os.path.join(\n mixture_meta_path,\n 'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'\n )\n\n if os.path.isfile(event_list_filename):\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n\n for item in current_meta:\n item.filename = os.path.join(mixture_path, item.filename)\n item.scene_label = scene_label\n\n meta_data += current_meta\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n\n test_filename = self.evaluation_setup_filename(\n setup_part='test',\n fold=None,\n file_extension='txt'\n )\n\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=None,\n file_extension='txt'\n )\n\n # Check that evaluation setup exists\n evaluation_setup_exists = True\n if not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):\n evaluation_setup_exists = False\n\n if not evaluation_setup_exists:\n # Get parameter hash\n mixture_meta_path_test = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash,\n 'meta'\n )\n mixture_path_test = os.path.join(\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash,\n 'audio'\n )\n\n test_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n current_meta_ = MetaDataContainer()\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n current_meta_.append(MetaDataItem(\n {\n 'filename': item.filename,\n 'scene_label': scene_label\n }\n ))\n test_meta += current_meta_\n test_meta.save(filename=test_filename)\n\n eval_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n item.scene_label = scene_label\n\n eval_meta += current_meta\n eval_meta.save(filename=evaluate_filename)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "42ab56ce899ba19cc97100b6a5d5babf", "score": "0.49374956", "text": "def run_metadata_driven(self, dd, sampling_budget=20):\n if self.VERBOSE:\n print(\"------------------------------------------------------------------------\\n\"\n \"--------------------------Running Metadata Driven-----------------------\\n\"\n \"------------------------------------------------------------------------\")\n d = raha.dataset.Dataset(dd)\n actual_errors_dictionary = d.get_actual_errors_dictionary()\n dboost_output = self.run_dboost(dd)\n nadeef_output = self.run_nadeef(dd)\n katara_output = self.run_katara(dd)\n lfv = {}\n columns_frequent_values = {}\n for j, attribute in enumerate(d.dataframe.columns.tolist()):\n fd = {}\n for value in d.dataframe[attribute].tolist():\n if value not in fd:\n fd[value] = 0\n fd[value] += 1\n sorted_fd = sorted(fd.items(), key=operator.itemgetter(1), reverse=True)[:int(d.dataframe.shape[0] / 10.0)]\n columns_frequent_values[j] = {v: f for v, f in sorted_fd}\n cells_list = list(itertools.product(range(d.dataframe.shape[0]), range(d.dataframe.shape[1])))\n for cell in cells_list:\n lfv[cell] = []\n lfv[cell] += [1 if cell in dboost_output else 0]\n lfv[cell] += [1 if cell in nadeef_output else 0]\n lfv[cell] += [1 if cell in katara_output else 0]\n value = d.dataframe.iloc[cell[0], cell[1]]\n lfv[cell] += [1 if value in columns_frequent_values[cell[1]] else 0]\n lfv[cell] += [1 if re.findall(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\", value) else 0]\n lfv[cell] += [1 if re.findall(\"https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+\", value) else 0]\n lfv[cell] += [1 if re.findall(\"^[\\d]+$\", value) else 0]\n lfv[cell] += [1 if re.findall(r\"[\\w.-]+@[\\w.-]+\", value) else 0]\n lfv[cell] += [1 if re.findall(\"^[\\d]{16}$\", value) else 0]\n lfv[cell] += [1 if value.lower() in [\"m\", \"f\"] else 0]\n lfv[cell] += [1 if re.findall(\"^[\\d]{4,6}$\", value) else 0]\n lfv[cell] += [1 if not value else 0]\n for la, ra in self.DATASET_CONSTRAINTS[d.name][\"functions\"]:\n lfv[cell] += [1 if d.dataframe.columns.tolist()[cell[1]] in [la, ra] else 0]\n random_tuples_list = [i for i in random.sample(range(d.dataframe.shape[0]), d.dataframe.shape[0])]\n labeled_tuples = {i: 1 for i in random_tuples_list[:sampling_budget]}\n x_train = []\n y_train = []\n for cell in cells_list:\n if cell[0] in labeled_tuples:\n x_train.append(lfv[cell])\n y_train.append(int(cell in actual_errors_dictionary))\n detection_dictionary = {}\n if sum(y_train) != 0:\n x_test = [lfv[cell] for cell in cells_list]\n test_cells = [cell for cell in cells_list]\n if sum(y_train) != len(y_train):\n model = sklearn.ensemble.AdaBoostClassifier(n_estimators=6)\n model.fit(x_train, y_train)\n predicted_labels = model.predict(x_test)\n else:\n predicted_labels = len(test_cells) * [1]\n detection_dictionary = {}\n for index, pl in enumerate(predicted_labels):\n cell = test_cells[index]\n if cell[0] in labeled_tuples:\n if cell in actual_errors_dictionary:\n detection_dictionary[cell] = \"JUST A DUMMY VALUE\"\n elif pl:\n detection_dictionary[cell] = \"JUST A DUMMY VALUE\"\n return detection_dictionary", "title": "" }, { "docid": "76bb5c4b6c8a0f727f0254ff09ff08c8", "score": "0.49280396", "text": "def build_meta(self):\n from datetime import datetime\n now = datetime.now().strftime('%Y-%m-%d')\n self.sheets['_meta']['fd'].write_row('A1', ['dump_date', now])\n self.sheets['_meta']['fd'].write_row('A2', ['header_rows', self.header_row])", "title": "" }, { "docid": "8600427f1abf02d5b34c7c6f2459da14", "score": "0.48955962", "text": "def test_structured_parse_formulas(self):\n\n\n # define distribution\n cur_distribution = 'Normal'\n family = Family(cur_distribution)\n\n\n # define formulas and network shape\n formulas = dict()\n formulas['loc'] = '~1'\n formulas['scale'] = '~1 + x1'\n\n degrees_of_freedom = {'loc': 4, 'scale': 4}\n\n deep_models_dict = dict()\n\n\n #call parse_formulas\n prepare_data = PrepareData(formulas, deep_models_dict, degrees_of_freedom)\n prepare_data.fit(self.x)\n datadict = prepare_data.transform(self.x)\n dm_info_dict = prepare_data.dm_info_dict\n network_info_dict = prepare_data.network_info_dict\n P = prepare_data.P\n \n ground_truth_loc = dmatrix(formulas['loc'], self.x, return_type='dataframe').to_numpy()\n ground_truth_scale = dmatrix(formulas['scale'], self.x, return_type='dataframe').to_numpy()\n ground_truth_loc = torch.from_numpy(ground_truth_loc).float()\n ground_truth_scale = torch.from_numpy(ground_truth_scale).float()\n\n\n #test if shapes of design matrices and P are as correct\n self.assertTrue((datadict['loc']['structured'] == ground_truth_loc).all())\n self.assertTrue((datadict['loc']['structured'].shape == ground_truth_loc.shape),'shape missmatch')\n self.assertEqual(network_info_dict['loc']['struct_shapes'], 1)\n self.assertEqual(P['loc'].shape, (1, 1))\n self.assertTrue((P['loc']==0).all())\n\n self.assertTrue((datadict['scale']['structured'].shape == ground_truth_scale.shape), 'shape missmatch')\n self.assertTrue((datadict['scale']['structured'] == ground_truth_scale).all())\n self.assertEqual(network_info_dict['scale']['struct_shapes'], 2)\n self.assertEqual(P['scale'].shape, (2, 2))\n self.assertTrue((P['scale']==0).all())\n\n\n # test if dm_info_dict is correct\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_input_features'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_input_features'] == [])", "title": "" }, { "docid": "dc484aba9980cfcc463e0d33001dd5ac", "score": "0.4855985", "text": "def create_metadata(self, cond_type):\n # checks file exists \n file = self.file_check(self.meta_dir, cond_type)\n\n # separates file type and string name of file.\n fileparts = os.path.basename( file ).split('.')\n if len( fileparts ) != 2:\n logger.criticalcal(\"Error processing file: %s, too many . in filebase\"%file)\n \n # take file parts for processing. \n basefile, filetype = fileparts\n\n # IMPORTANT, transfering the meta file description\n logger.info(\"Matching '%s' to one of: %s meta types\" %(basefile, self.meta_types))\n pattern = [ t for t in self.meta_types if re.search(t, basefile) is not None ]\n logger.info(\"Found %s to bring into HDF5.\"%basefile)\n\n # catch any potential errors. \n if len(pattern)>1:\n logger.critical(\n 'Multiple patterns matched for meta files.'\\\n 'Must change meta file names to match a type from list below.'\\\n '%s' %self.meta_types)\n else:\n logger.debug(\"Matched %s with %s\" % (pattern[0], basefile) )\n attr_name = pattern[0]\n\n # now we can load in the file.\n if filetype == 'csv':\n info = np.recfromcsv( file, names=['index', attr_name], delimiter=',' )\n\n elif filetype == 'txt':\n info = np.recfromcsv( file, names=['index', attr_name], delimiter='\\t' )\n\n else:\n logger.exception( \"%s, not .txt or .csv, chill out, we're not there yet...\"%filename)\n\n # check to make sure we don't have more than 2 columns in meta data file\n if len(info[0])>2:\n logger.exception('The metadata file format is [full scan index, value at index]')\n\n # checks that the length of the info file is the same length as the number of TRs. \n if len( info ) != (self.n_TRs):\n logger.exception( \"Length of %s, does not match total # of TRs.\" % filename )\n\n # cycle through each run\n for i,r in self.hdf['func'].iteritems():\n walker = []\n\n # make sure run index matches\n run = self.hdf['func'][i].attrs['run']\n\n # cycles through each TR\n for j in self.hdf['func'][i].attrs['f_ind']:\n # append to vector if run index matches global index\n if run == info[j][0]:\n walker.append( info[j][1] )\n else:\n logger.critical(\"Run index and meta file do not match! AHH\")\n\n # write to hdf5\n logger.info(\"No obvious errors processing %s file into hdf5\"%filename)\n self.hdf['func'][i].attrs[attr_name] = np.array( walker )", "title": "" }, { "docid": "a2befd192159107d90262aebd06648ef", "score": "0.48540595", "text": "def test_orthogonalization_of_unstructured_part_in_parse_formulas(self):\n\n\n # define distributions\n cur_distribution = 'Normal'\n family = Family(cur_distribution)\n\n\n # define formulas and network shape\n formulas = dict()\n formulas['loc'] = \"~1 + x1 + x2 + spline(x3, bs='bs', df=9, degree=3) + d1(x1) + d2(x1,x3)\"\n formulas['scale'] = '~1 + x1 + d1(x1)'\n \n degrees_of_freedom = {'loc': 4, 'scale': 4}\n\n deep_models_dict = dict()\n deep_models_dict['d1'] = {'model': nn.Sequential(nn.Linear(1, 15)), 'output_shape': 42}\n deep_models_dict['d2'] = {'model': nn.Sequential(nn.Linear(2, 15)), 'output_shape': 42}\n\n\n #call parse_formulas\n prepare_data = PrepareData(formulas, deep_models_dict, degrees_of_freedom)\n prepare_data.fit(self.x)\n datadict = prepare_data.transform(self.x)\n dm_info_dict = prepare_data.dm_info_dict\n network_info_dict = prepare_data.network_info_dict\n \n X=dmatrix(\"~1 + x1 + x2 + spline(x3, bs='bs', df=9, degree=3)\", self.x, return_type='dataframe')\n \n orthogonalization_pattern = network_info_dict['loc']['orthogonalization_pattern']['d1']\n true_column_names = []\n true_column_names.append('Intercept')\n true_column_names.append('x1')\n column_names = set([list(X.iloc[:,sl].columns)[-1] for sl in orthogonalization_pattern])\n self.assertTrue(len(column_names.symmetric_difference(set(true_column_names))) == 0) #test if column names and true_column_names are identical\n \n orthogonalization_pattern = network_info_dict['loc']['orthogonalization_pattern']['d2']\n true_column_names = []\n true_column_names.append('Intercept')\n true_column_names.append('x1')\n true_column_names.append(\"spline(x3, bs='bs', df=9, degree=3)[8]\")\n column_names = set([list(X.iloc[:,sl].columns)[-1] for sl in orthogonalization_pattern])\n self.assertTrue(len(column_names.symmetric_difference(set(true_column_names))) == 0) #test if column names and true_column_names are identical\n \n X=dmatrix(\"~1 + x1\", self.x, return_type='dataframe')\n \n orthogonalization_pattern = network_info_dict['loc']['orthogonalization_pattern']['d1']\n true_column_names = []\n true_column_names.append('Intercept')\n true_column_names.append('x1')\n column_names = set([list(X.iloc[:,sl].columns)[-1] for sl in orthogonalization_pattern])\n self.assertTrue(len(column_names.symmetric_difference(set(true_column_names))) == 0) #test if column names and true_column_names are identical", "title": "" }, { "docid": "f6901d3995603c7a1d33916fc014d578", "score": "0.48536235", "text": "def setupMAEC(self):\r\n if self.results[\"target\"][\"category\"] == \"file\":\r\n self.id_generator = Generator(self.results[\"target\"][\"file\"][\"md5\"])\r\n elif self.results[\"target\"][\"category\"] == \"url\":\r\n self.id_generator = Generator(hashlib.md5(self.results[\"target\"][\"url\"]).hexdigest())\r\n else:\r\n raise CuckooReportError(\"Unknown target type\")\r\n\r\n # Generate Package.\r\n self.package = Package(self.id_generator.generate_package_id())\r\n # Generate Malware Subject.\r\n self.subject = MalwareSubject(self.id_generator.generate_malware_subject_id())\r\n # Add the Subject to the Package.\r\n self.package.add_malware_subject(self.subject)\r\n # Generate dynamic analysis bundle.\r\n self.dynamic_bundle = Bundle(self.id_generator.generate_bundle_id(), False, \"4.0.1\", \"dynamic analysis tool output\")\r\n # Add the Bundle to the Subject.\r\n self.subject.add_findings_bundle(self.dynamic_bundle)\r\n # Generate Static Analysis Bundles, if static results exist.\r\n if self.options[\"static\"] and \"static\" in self.results and self.results[\"static\"]:\r\n self.static_bundle = Bundle(self.id_generator.generate_bundle_id(), False, \"4.0.1\", \"static analysis tool output\")\r\n self.subject.add_findings_bundle(self.static_bundle)\r\n if self.options[\"strings\"] and \"strings\" in self.results and self.results[\"strings\"]:\r\n self.strings_bundle = Bundle(self.id_generator.generate_bundle_id(), False, \"4.0.1\", \"static analysis tool output\")\r\n self.subject.add_findings_bundle(self.strings_bundle)\r\n if self.options[\"virustotal\"] and \"virustotal\" in self.results and self.results[\"virustotal\"]:\r\n self.virustotal_bundle = Bundle(self.id_generator.generate_bundle_id(), False, \"4.0.1\", \"static analysis tool output\")\r\n self.subject.add_findings_bundle(self.virustotal_bundle)", "title": "" }, { "docid": "daf501aa6a1f91d4325718d038829cfa", "score": "0.4848795", "text": "def test_unstructured_parse_formulas(self):\n\n\n # define distributions\n cur_distribution = 'Normal'\n family = Family(cur_distribution)\n\n\n # define formulas and network shape\n formulas = dict()\n formulas['loc'] = '~1 + d1(x2,x1,x3)'\n formulas['scale'] = '~1 + x1 + d2(x1)'\n \n degrees_of_freedom = {'loc': 4, 'scale': 4}\n\n deep_models_dict = dict()\n deep_models_dict['d1'] = {'model': nn.Sequential(nn.Linear(1, 15)), 'output_shape': 42}\n deep_models_dict['d2'] = {'model': nn.Sequential(nn.Linear(1, 15)), 'output_shape': 42}\n\n\n #call parse_formulas\n prepare_data = PrepareData(formulas, deep_models_dict, degrees_of_freedom)\n prepare_data.fit(self.x)\n datadict = prepare_data.transform(self.x)\n dm_info_dict = prepare_data.dm_info_dict\n network_info_dict = prepare_data.network_info_dict\n P = prepare_data.P\n \n ground_truth_loc = dmatrix('~1', self.x, return_type='dataframe').to_numpy()\n ground_truth_scale = dmatrix('~1 + x1', self.x, return_type='dataframe').to_numpy()\n ground_truth_loc = torch.from_numpy(ground_truth_loc).float()\n ground_truth_scale = torch.from_numpy(ground_truth_scale).float()\n \n x2x1x3 = self.x[['x2','x1','x3']] \n x1 = self.x[['x1']] \n \n #test if shapes of design matrices and P are as correct\n self.assertTrue((datadict['loc']['structured'] == ground_truth_loc).all())\n self.assertTrue((datadict['loc']['structured'].shape == ground_truth_loc.shape),'shape missmatch')\n self.assertTrue(((datadict['loc']['d1'].numpy() - x2x1x3.to_numpy()) < 0.0001).all())\n self.assertTrue((datadict['loc']['d1'].shape == self.x[['x2','x1','x3']].shape),'shape missmatch for neural network input')\n self.assertEqual(network_info_dict['loc']['struct_shapes'], 1)\n self.assertEqual(P['loc'].shape, (1, 1))\n self.assertTrue((P['loc']==0).all())\n self.assertEqual(list(network_info_dict['loc']['deep_models_dict'].keys()), ['d1'])\n self.assertEqual(network_info_dict['loc']['deep_models_dict']['d1'], deep_models_dict['d1']['model'])\n self.assertEqual(network_info_dict['loc']['deep_shapes']['d1'], deep_models_dict['d1']['output_shape'])\n\n self.assertTrue((datadict['scale']['structured'] == ground_truth_scale).all())\n self.assertTrue((datadict['scale']['structured'].shape == ground_truth_scale.shape), 'shape missmatch')\n self.assertTrue(((datadict['scale']['d2'].numpy() - x1.to_numpy()) < 0.0001).all())\n self.assertTrue((datadict['scale']['d2'].shape == self.x[['x1']].shape),'shape missmatch for neural network input')\n self.assertEqual(network_info_dict[\"scale\"]['struct_shapes'], 2)\n self.assertEqual(P['scale'].shape, (2, 2))\n self.assertTrue((P['scale']==0).all())\n self.assertEqual(list(network_info_dict['scale']['deep_models_dict'].keys()), ['d2'])\n self.assertEqual(network_info_dict['scale']['deep_models_dict']['d2'],deep_models_dict['d2']['model'])\n self.assertEqual(network_info_dict['scale']['deep_shapes']['d2'], deep_models_dict['d2']['output_shape'])\n\n\n # test if dm_info_dict is correct\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_slices'] == [])\n self.assertTrue(dm_info_dict['loc']['spline_info']['list_of_spline_input_features'] == [])\n self.assertTrue(dm_info_dict['scale']['spline_info']['list_of_spline_input_features'] == [])", "title": "" }, { "docid": "9c5bd6a416f9384e0a573354f3ee9ca4", "score": "0.48282272", "text": "def get_meta():\n inference_engine_names = {\n 1: 'tflite',\n 2: 'openvino',\n 3: 'darknet',\n 4: 'tensorflow',\n 5: 'pytorch',\n 6: 'keras',\n 7: 'mxnet',\n 8: 'caffe2',\n 9: 'caffe',\n 10: 'movidius',\n 11: 'others'\n }\n # model, label, config are full paths for copying them into\n # target package directory. They will be updated to relative paths\n # in the model package.\n meta = {}\n config_files = {}\n meta['name'] = input('Package name: ')\n meta['version'] = input('Package version: ')\n meta['model'] = os.path.abspath(input('Model filepath: '))\n meta['label'] = os.path.abspath(input('Label filepath: '))\n while True:\n key = input('Config name (press enter directly to stop): ')\n if len(key) != 0:\n value = os.path.abspath(input('Config filepath: '))\n config_files[key] = value\n else:\n break\n meta['config'] = config_files\n engine_index = int(input(\n (\n 'Inference engine\\n'\n '\\t 1. TFLite\\n'\n '\\t 2. OpenVINO\\n'\n '\\t 3. Darknet\\n'\n '\\t 4. TensorFlow\\n'\n '\\t 5. PyTorch\\n'\n '\\t 6. Keras\\n'\n '\\t 7. MXNet\\n'\n '\\t 8. Caffe2\\n'\n '\\t 9. Caffe\\n'\n '\\t10. Movidius\\n'\n '\\t11. Others\\n'\n ': '\n )\n ))\n meta['inference-engine'] = inference_engine_names[engine_index]\n return meta", "title": "" }, { "docid": "4f258e1ecfc79b741edc8477a40aa075", "score": "0.48084834", "text": "def main():\n records = pd.read_csv(\"/metadata/metadata.tsv\", sep=\"\\t\").to_dict(\"record\")\n print(f\"{len(records)} records to insert: \\n{records[:5]}\")\n create_tmp_collections(records)\n\n update_tmp_phylo_collection()\n update_tmp_lineage_collection()\n copy_to_prod_collections()", "title": "" }, { "docid": "c99bfd5965968790a0c3948869f99cf2", "score": "0.4808039", "text": "def meta_SPEC(dataset, instr):\n metaName = get_dirName('META', dataset)+'meta_{}.txt'.format(dataset)\n f = open(metaName, 'r')\n contents = f.readlines()\n f.close()\n\n contents.append('\\n\\tSpectrometer : %s' % instr.serial[0])\n contents.append('\\n')\n \n f = open(metaName, 'w')\n contents = ''.join(contents)\n f.write(contents) \n f.close()", "title": "" }, { "docid": "feb257fde0b9b21713dc7e6bd4b49fe7", "score": "0.48061046", "text": "def process_meta():\n try:\n meta = [_.strip(' ') for _ in open(METAFILE).read().splitlines()]\n idxs = [idx for idx, val in enumerate(meta) if \"<\" in val]\n tables = [meta[i + 1:j] for i, j in zip(idxs[::2], idxs[1::2])]\n meta = {table[0]:table[1:] for table in tables}\n attr = list(meta.values())\n cols = set(chain(*attr))\n idt = { col: [idx for idx, ls in enumerate(attr) if col in ls] for col in cols}\n tables = list(meta.keys())\n except Exception as e:\n print(f\"metaerror: {e}\")\n tables, idt, meta = [], {}, {}\n return tables, idt, meta", "title": "" }, { "docid": "310c2b756d620776f3d7b809e5137168", "score": "0.48016256", "text": "def print_imeta2(samp):\n os.system('printf \"#!/bin/bash\\\\nset -e\\\\n\\\\\\n\" > imeta.sh')\n os.system(\n 'imeta qu -z seq -d sample = {SAMPLE} and target = 1 >> imeta.sh'\n .format(SAMPLE=samp))\n fh = open('imeta.sh', 'r')\n string_list = fh.readlines()\n fh.close()\n os.system('rm imeta.sh')\n out_list = []\n for i in string_list:\n if re.search('collection:|dataObj:', i):\n if re.search('collection: ', i):\n ir = re.sub('collection: ', '', i)\n ir = ir.rstrip()\n out_list.append(ir + '/')\n elif re.search('dataObj: ', i):\n ir = re.sub('dataObj: ', '', i)\n out_list.append(ir)\n dataObj_list = \"\".join(out_list).strip().split('\\n')\n fh.close()\n libtype = []\n for dataObj in dataObj_list:\n meta = os.popen(\n 'imeta ls -d {dataObj}'.format(dataObj=dataObj.strip())).read()\n meta_list = meta.strip().split('\\n')\n attr = [\n re.sub('attribute: ', '', x).strip() for x in meta_list\n if re.search('attribute: ', x)\n ]\n val = [\n re.sub('value: ', '', x).strip() for x in meta_list\n if re.search('value:', x)\n ]\n ser = pd.Series(dict(zip(attr, val)))\n libtype.append(ser.loc['library_type'])\n for dataObj, libt in zip(dataObj_list,\n [re.sub(' ', '_', x) for x in libtype]):\n if not os.path.exists(libt):\n os.makedirs(libt)\n os.system('printf \"#!/bin/bash\\\\nset -e\\\\n\\\\\\niget -K {dataObj}\" > '.\n format(dataObj=dataObj) + libt + '/imeta.sh')\n os.system('sed \"/.fastq.gz/d\" -i ' + libt + '/imeta.sh')", "title": "" }, { "docid": "615bf8ffefcae40f089d595dc19da583", "score": "0.4798562", "text": "def main():\n template_generator = flame.TemplateGenerator()\n template_generator.extract_data()\n template_generator.output_template_and_data()", "title": "" }, { "docid": "5e297af6e36bd26fa12e8df385730b92", "score": "0.47898075", "text": "def readme_creation(template_name, data, license_params, readme_text, readme_location, template_location):\n folder_loc = 'readme_files/'\n base_readme = folder_loc + 'base.README.md'\n misc_readme = folder_loc + 'misc.README.txt'\n final_readme = readme_location + 'README.md'\n with open(base_readme, 'r') as readme:\n readme = readme.read()\n post_config_text = ''; sp_text = ''; extra_prereq_text = ''\n\n ####### Text Values for README templates #######\n title_text = readme_text['title_text'][template_name]\n intro_text = readme_text['intro_text'][template_name]\n stack_type_text = stack_type_check(template_location, readme_text)\n if 'supported' in readme_location:\n help_text = readme_text['help_text']['supported']\n else:\n help_text = readme_text['help_text']['experimental']\n version_map = md_version_map(data, readme_text)\n deploy_links = create_deploy_links(readme_text['deploy_links']['version_tag'], readme_text['deploy_links']['lic_support'][template_name], template_location)\n bash_script = readme_text['bash_script']\n ps_script = readme_text['ps_script']\n example_text = readme_text['config_example_text'][template_name]\n\n ### Check for optional readme items ###\n # Add service principal text if needed\n if param_exist(data, 'servicePrincipalSecret'):\n sp_text = misc_readme_grep('<SERVICE_PRINCIPAL_TXT>', misc_readme)\n extra_prereq_text += ' - ' + readme_text['prereq_text']['service_principal'] + '\\n'\n # Post-Deployment Configuration Text Substitution\n if 'autoscale' in template_name:\n post_config_text = misc_readme_grep('<POST_CONFIG_AUTOSCALE_TXT>', misc_readme)\n extra_prereq_text += ' - ' + readme_text['prereq_text']['post_config'] + '\\n'\n elif param_exist(data, 'numberOfExternalIps'):\n extra_prereq_text += ' - ' + readme_text['prereq_text']['post_config'] + '\\n'\n if template_name in 'ha-avset':\n post_config_text = misc_readme_grep('<POST_CONFIG_FAILOVER_TXT>', misc_readme)\n extra_prereq_text += ' - ' + readme_text['prereq_text']['rg_limit'] + '\\n'\n else:\n post_config_text = misc_readme_grep('<POST_CONFIG_TXT>', misc_readme)\n\n ### Map in dynamic values ###\n readme = readme.replace('<TITLE_TXT>', title_text)\n readme = readme.replace('<INTRO_TXT>', intro_text)\n readme = readme.replace('<STACK_TYPE_TXT>', stack_type_text)\n readme = readme.replace('<EXTRA_PREREQS>', extra_prereq_text)\n readme = readme.replace('<VERSION_MAP_TXT>', version_map)\n readme = readme.replace('<HELP_TXT>', help_text)\n readme = readme.replace('<DEPLOY_LINKS>', deploy_links)\n readme = readme.replace('<EXAMPLE_PARAMS>', md_param_array(data, license_params))\n readme = readme.replace('<PS_SCRIPT>', ps_script)\n readme = readme.replace('<BASH_SCRIPT>', bash_script)\n readme = readme.replace('<EXAMPLE_TEXT>', example_text)\n readme = readme.replace('<POST_CONFIG_TXT>', post_config_text)\n readme = readme.replace('<SERVICE_PRINCIPAL>', sp_text)\n\n # Write to solution location\n with open(final_readme, 'w') as readme_complete:\n readme_complete.write(readme)\n ## End README creation Function\n return 'README Created for ' + template_name", "title": "" }, { "docid": "bb5257fd14071114da7ec03124f50cba", "score": "0.47880632", "text": "def create_metafile(meta, package_dirpath):\n checksums = {}\n for cksum_key in ['model', 'label']:\n target_path = os.path.join(package_dirpath, meta[cksum_key])\n checksums[meta[cksum_key]] = get_sha256(target_path)\n for k, v in meta['config'].items():\n target_path = os.path.join(package_dirpath, v)\n checksums[v] = get_sha256(target_path)\n meta['checksums-sha256'] = checksums\n\n with open(os.path.join(package_dirpath, 'meta.json'), 'w') as f:\n json.dump(meta, f, indent=4)", "title": "" }, { "docid": "82e076c8c13cfab37ca6d7fe89346109", "score": "0.4775918", "text": "def task_substitute_macros():\n script = SOURCE / \"prepare_paper_macros.py\"\n file_deps = {\n \"--macro-template-file\": SOURCE / \"macros.tex.template\",\n \"--plot-data-path\": ROOT / \"plotoverline.csv\",\n }\n\n def create_cmd():\n cmd = f\"python {script}\"\n for key, value in file_deps.items():\n cmd += f\" {key} {value}\"\n cmd += \" --domain-size {size}\"\n cmd += \" --num-dofs {num_dofs}\"\n cmd += \" --output-macro-file {targets}\"\n return cmd\n\n return {\n \"file_dep\": [script] + list(file_deps.values()),\n \"actions\": [create_cmd()],\n \"getargs\": {\n \"size\": (\"get_domain_size\", \"size\"),\n \"num_dofs\": (\"get_num_dofs\", \"num_dofs\"),\n },\n \"targets\": [ROOT / \"macros.tex\"],\n \"clean\": True,\n }", "title": "" }, { "docid": "3220064e6eb8d94a64d07218be56266c", "score": "0.47754058", "text": "def update_with_ted_guidance(filename):\n\n # Collect the guidance.\n dfs = []\n for path in sorted(mappingdir.glob(\"*.csv\")):\n df = pd.read_csv(path)\n # Ignore rows without guidance (like defence forms), or for which the guidance is to discard.\n df = df[df[\"guidance\"].notna()]\n # Prefix the XPath to match the spreadsheet used in `update-with-xpath`.\n df[\"xpath\"] = f\"TED_EXPORT/FORM_SECTION/{path.stem}\" + df[\"xpath\"]\n # Add the form for more concise reporting.\n df[\"form\"] = path.stem.replace(\"_2014\", \"\")\n dfs.append(df)\n\n # ignore_index is required, as each data frame repeats indices.\n df = pd.concat(dfs, ignore_index=True).rename(columns={\"guidance\": \"TED guidance\"}, errors=\"raise\")\n # This drops \"index\" and \"comment\", which are of no assistance to mapping, and \"label-key\".\n df = df.groupby(\"xpath\").agg({\"TED guidance\": unique, \"form\": \"first\"})\n # We need to promote the \"xpath\" index to a column for it to be returned by `write`.\n df[\"index\"] = df.index\n\n df = write(filename, df, [\"TED guidance\"], explode=[\"TED Xpath\"], left_on=\"TED Xpath\", right_on=\"xpath\")\n\n # Ignore unmerged rows whose guidance is to discard.\n df = df[~df[\"TED guidance\"].astype(str).str.startswith((\"['Discard\", '[\"Discard'))]\n # Reduce duplication in the unmerged rows.\n df[\"index\"] = df[\"index\"].str.replace(r\"TED_EXPORT/FORM_SECTION/[^/]+\", \"\", regex=True)\n df = df.groupby(\"index\").agg({\"form\": unique})\n df[\"xpath\"] = df.index\n\n # Some TED elements cannot be converted to eForms.\n # https://github.com/OP-TED/ted-xml-data-converter/blob/main/ted-elements-not-convertible.md\n url = \"https://raw.githubusercontent.com/OP-TED/ted-xml-data-converter/main/ted-elements-not-convertible.md\"\n elements = []\n for line in get(url).text.splitlines():\n if match := re.search(r\"^\\| ([A-Z_]+) \\|\", line):\n elements.append(match.group(1))\n\n report_unmerged_rows(df, [\"form\", \"xpath\"], ~df[\"xpath\"].str.endswith(tuple(elements)), unformatted=[\"form\"])", "title": "" }, { "docid": "2867596160c70aedc4507943780a9e82", "score": "0.4770945", "text": "def get_metadata():\n train_x, train_y = _read_metaset(modeldata_template.format('train.csv'))\n valid_x, valid_y = _read_metaset(modeldata_template.format('valid.csv'))\n test_x, test_y = _read_metaset(modeldata_template.format('test.csv'))\n\n return train_x, train_y, valid_x, valid_y, test_x, test_y", "title": "" }, { "docid": "7cc9aeb18eae5f964c5fe516b06cac91", "score": "0.47634456", "text": "def init_target_metadata(\n self,\n train_data: textdata.Dataset,\n eval_data: textdata.Dataset,\n test_data: textdata.Dataset,\n ):\n self.metadata.target = self.metadata.features[DatasetFieldName.TEXT_FIELD]", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "9ed87571d0430b84ae1577f2cfdb172d", "score": "0.47584566", "text": "def prepare(self):\n\n if not self.meta_container.exists() and self.reference_data_present:\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "fbd06e90d48b79ecf15c21e2be3ab883", "score": "0.4757861", "text": "def substitute_in_formulas(dct, substitutions):\n visitor = FormulaSubstitutor(substitutions)\n\n for obj in dct.values():\n if \"formula\" in obj:\n obj[\"formula\"] = visitor(obj[\"formula\"])\n\n return dct", "title": "" }, { "docid": "e337566d556f6a75123f80d0dd08016c", "score": "0.47575805", "text": "def main():\n # clean output directory\n outpath = Path(\"dictionary\", \"content\")\n [f.unlink() for f in outpath.glob(\"*\") if f.is_file() and f.suffix == \".md\"]\n\n # get all tables, and save markdown output for each\n tbls = list_tables()\n for tbltup in tbls:\n save_markdown(tbltup)\n print(f\"{len(tbls)} files written to {outpath}\")", "title": "" }, { "docid": "1bab4d2428368d270baa63ef00137bd5", "score": "0.47536632", "text": "def _creates_gene_metadata_dict(self) -> Dict:\n\n log_str = 'Generating Metadata for Gene Identifiers'; print('\\t- ' + log_str); logger.info(log_str)\n\n f_name = 'Homo_sapiens.gene_info'\n x = downloads_data_from_gcs_bucket(self.bucket, self.original_data, self.processed_data, f_name, self.temp_dir)\n data = pandas.read_csv(x, header=0, delimiter='\\t', low_memory=False)\n data = data.loc[data['#tax_id'].apply(lambda i: i == 9606)]\n data.fillna('None', inplace=True); data.replace('-', 'None', inplace=True, regex=False)\n # create metadata\n genes, lab, desc, syn = [], [], [], []\n for idx, row in tqdm(data.iterrows(), total=data.shape[0]):\n gene_id, sym, defn, gene_type = row['GeneID'], row['Symbol'], row['description'], row['type_of_gene']\n chrom, map_loc, s1, s2 = row['chromosome'], row['map_location'], row['Synonyms'], row['Other_designations']\n if gene_id != 'None':\n genes.append('http://www.ncbi.nlm.nih.gov/gene/' + str(gene_id))\n if sym != 'None' or sym != '': lab.append(sym)\n else: lab.append('Entrez_ID:' + gene_id)\n if 'None' not in [defn, gene_type, chrom, map_loc]:\n desc_str = \"{} has locus group '{}' and is located on chromosome {} ({}).\"\n desc.append(desc_str.format(sym, gene_type, chrom, map_loc))\n else: desc.append(\"{} locus group '{}'.\".format(sym, gene_type))\n if s1 != 'None' and s2 != 'None':\n syn.append('|'.join(set([x for x in (s1 + s2).split('|') if x != 'None' or x != ''])))\n elif s1 != 'None': syn.append('|'.join(set([x for x in s1.split('|') if x != 'None' or x != ''])))\n elif s2 != 'None': syn.append('|'.join(set([x for x in s2.split('|') if x != 'None' or x != ''])))\n else: syn.append('None')\n # combine into new data frame then convert it to dictionary\n metadata = pandas.DataFrame(list(zip(genes, lab, desc, syn)), columns=['ID', 'Label', 'Description', 'Synonym'])\n metadata = metadata.astype(str); metadata.drop_duplicates(subset='ID', inplace=True)\n metadata.set_index('ID', inplace=True); gene_metadata_dict = metadata.to_dict('index')\n\n return gene_metadata_dict", "title": "" }, { "docid": "9d6bb26ce7ca4dd43b9a8d8dbe3be05c", "score": "0.47349995", "text": "def gen_formula_tag(c_tag, right_formula):\n c_tag.append(etree.Element(\"f\"))\n f_tag = c_tag.xpath(\"*[local-name()='f']\")\n f_tag[0].text = right_formula\n del c_tag.attrib[\"t\"]", "title": "" }, { "docid": "1d86e1cd1b1ce7523c88f3d5ca443853", "score": "0.47272843", "text": "def meta_IMAGE(dataset, instr):\n #print(\"Writing image meta\")\n metaName = get_dirName('META', dataset)+'meta_{}.txt'.format(dataset)\n f = open(metaName, 'r')\n contents = f.readlines()\n f.close()\n \n contents.append('\\n\\tCamera :')\n contents.append('\\n\\t\\t{}'.format(instr.serial))\n contents.append('\\n')\n \n #print(contents)\n f = open(metaName, 'w')\n contents = ''.join(contents)\n f.write(contents)\n f.close()", "title": "" }, { "docid": "5da5a7297c561d78da85c10c5b05ab0f", "score": "0.4724517", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = MetaDataContainer()\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n for annotation_filename in annotation_files:\n data = MetaDataContainer(filename=annotation_filename).load()\n for item in data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "b7c83b71b05bc44f29f3889fc8bbf48d", "score": "0.47102478", "text": "def make_makedoc(filename):\n dataframe = pd.read_csv(filename, index_col=0).set_index('index')\n directory = os.path.dirname(filename)\n dataframe['path'] = dataframe['url'].apply(lambda x:\n os.path.join(directory, x))\n minx, maxx, rangex = _column_range(dataframe['x'])\n miny, maxy, rangey = _column_range(dataframe['y'])\n\n def makedoc(doc):\n source = ColumnDataSource(dataframe)\n image_holder = ColumnDataSource({'image': [], 'x': [], 'y': [],\n 'dx': [], 'dy': []})\n tools = [ResetTool(), PanTool(), WheelZoomTool(), TapTool(),\n BoxSelectTool(), PolySelectTool(), UndoTool(), RedoTool()]\n pca = figure(title='PCA',\n x_range=[minx - 0.05 * rangex, maxx + 0.05 * rangex],\n y_range=[miny - 0.05 * rangey, maxy + 0.05 * rangey],\n sizing_mode='scale_both', tools=tools)\n glyphs = pca.circle(source=source, x='x', y='y')\n\n sel = figure(title='Selected image', x_range=[0, 1], y_range=[0, 1],\n sizing_mode='scale_both')\n image_canvas = sel.image_rgba('image', 'x', 'y', 'dx', 'dy',\n source=image_holder)\n\n def load_selected(attr, old, new):\n print('new index: ', new.indices)\n if len(new.indices) == 1: # could be empty selection\n update_image_canvas_single(new.indices[0], data=dataframe,\n source=image_holder)\n elif len(new.indices) > 1:\n update_image_canvas_multi(new.indices, data=dataframe,\n source=image_holder)\n\n glyphs.data_source.on_change('selected', load_selected)\n\n fig = row([pca, sel], sizing_mode='stretch_both')\n doc.title = 'Bokeh microscopium app'\n doc.add_root(fig)\n\n print('ready!')\n return makedoc", "title": "" }, { "docid": "ee02df5762862f3cee0b10b7adba2408", "score": "0.47085884", "text": "def add_name_formula_label_col(info_df, metab, formula, iso_tracers):\n\n ##redundant inputs?\n ##metab and formula are not being used anywhere\n info_df=create_label_column_frm_isotope_columns(info_df, iso_tracers)\n info_df[cons.NAME_COL]= metab\n info_df[cons.FORMULA_COL] = formula\n return info_df", "title": "" }, { "docid": "b0243bd3786f3464cfbbf6909624c05e", "score": "0.46994773", "text": "def transform_percentage(x):\n \n \n#def make_SF0_col_handler():\n# codes = load_SF0_qd_codes()\n# codes.set_index('Code', inplace=True)\n# codes.sort_index(inplace=True)\n# def res(col_name):\n# qd_code = 'SF0/%s' % col_name\n# try:\n# name = codes.loc[qd_code]['Name']\n# except:\n# name = \"%s: %s\" % (qd_code, \"Missing\")\n# return(name)\n# return(res)\n \n \n\n \"\"\"\n equity_info.dbs.INDUSTRY. \\\n set_path('download_and_save', 'SF0-tickers.csv'). \\\n downloader(creator=meta_loader_creator).set(no_compute_codes=True). \\\n set(english_to_symbol_indicator=default_english_to_symbol_indicator). \\\n set(indicator_handler=make_default_indicator_handler([], [])). \\\n set_path('process', load=True, path='SF0-tickers.csv'). \\\n set(symbol_name=\"Security\", date_name=\"Date\") \n \"\"\"\n \n #set_path('downloaded_data', 'SF0-tickers.csv'). \\\n #'INDUSTRY-processed-data.csv',\n # compute_names=make_default_compute_names([], identity),\n # load=False, idx_to_datetime=identity, resample_method=None). \\\n \n #Doing this differently: res.add_transformation(identity, ('INDUSTRY', 'Sector'), name=('CALC', 'Sector'), drop_old=False) \n \n #res.add_transformation(log, ('SF0', 'Revenues (USD)'), name=('CALC', 'size'), drop_old=False)\n #res.add_transformation(identity, ('SF0', 'Current Ratio'), name=('CALC', 'Current Ratio'), drop_old=False)\n #res.add_transformation(identity, ('SF0', 'Debt to Equity Ratio'), name=('CALC', 'Debt to Equity Ratio'), drop_old=False) \n #res.add_transformation(smart_divide, ('SF0', 'Gross Profit'), ('SF0', 'Revenues (USD)'), name=('CALC', 'Gross Margin'), drop_old=False)", "title": "" }, { "docid": "d30bcf672f9e742bd707245615526650", "score": "0.46967587", "text": "def get_meta_information() -> Dict:\n return {'name': 'NAS-Bench-1Shot1: Benchmarking and Dissecting One-shot Neural Architecture Search',\n 'references': ['@inproceedings{Zela2020NAS-Bench-1Shot1:, '\n 'title = {NAS-Bench-1Shot1: '\n ' Benchmarking and Dissecting One-shot Neural Architecture Search},'\n 'author = {Arber Zela and Julien Siems and Frank Hutter},'\n 'booktitle = {International Conference on Learning Representations},'\n 'year = {2020},'\n 'url = {https://openreview.net/forum?id=SJx9ngStPH}}',\n ],\n 'code': 'https://github.com/automl/nasbench-1shot1',\n }", "title": "" }, { "docid": "b53f2551b032790fa6fe6e320a9c5c92", "score": "0.46930942", "text": "def add_image_meta(fileName, meta):\n metaBYTE = str(meta).encode()\n metaBASE = str(base64.b64encode(metaBYTE), 'ascii')\n subprocess.call('tiffset -s 270 ' + metaBASE + ' ' + fileName, shell=True)", "title": "" }, { "docid": "424ab2e0d9b162b2113f70eca95f9039", "score": "0.46879092", "text": "def get_meta_data(app):\n #Count of methods\n method_count = 0\n #Count of files\n file_count = 0\n #Number of apis\n api_calls = defaultdict(int)\n api_calls_count = 0\n #Number of unique invocations\n invokes = defaultdict(int)\n #Number of unique packages\n packages = defaultdict(int)\n #reflection count\n reflections = 0\n \n for file in tqdm(app):\n file_count += 1\n parsed_file = parseSmaliFile(open(file))\n reflections += parsed_file['Metrics']['Reflections']\n for method in parsed_file['Methods']:\n method_count += 1\n for api_call in method['Android API']:\n #API\n api_calls_count += 1\n api = api_call.replace('\\n','').split(' ')[-1]\n if api not in api_calls.keys(): api_calls[api] = 1\n else: api_calls[api] += 1\n #Invokes\n invoke = api_call.split(',')[0].split('}, ')[0]\n if invoke[-1] != '}': invoke += '}'\n if invoke not in invokes.keys(): invokes[invoke] = 1\n else: invokes[invoke] += 1\n package = api_call.split('->')[0].split(' ')[-1]\n if package not in packages.keys(): packages[package] = 1\n else: packages[package] += 1 \n\n \n #Sorted dicts\n api_calls = sorted(api_calls.items(), key=lambda item: item[1])\n invokes = sorted(invokes.items(), key=lambda item: item[1])\n packages = sorted(packages.items(), key=lambda item: item[1])\n \n #Unique counts\n api_calls_unique = len(api_calls)\n invokes_unique = len(invokes)\n packages_unique = len(packages)\n \n #Average methods per file\n average_methods_per_file = round(method_count / file_count,2)\n \n meta_data = {'Method Count': [method_count],\n 'File Count': [file_count],\n 'Average Method Count per File': [average_methods_per_file],\n 'Top API Call': [api_calls[-1]],\n 'Top Invoke': [invokes[-1]],\n 'Top Package': [packages[-1]],\n 'Reflections': [reflections],\n 'API Call Count':[api_calls_count],\n 'Unique API Calls': [api_calls_unique],\n 'Unique Invokes': [invokes_unique],\n 'Unique Packages': [packages_unique]}\n \n return meta_data", "title": "" }, { "docid": "446acf49aae6bb909f8e328c08e5172f", "score": "0.46846265", "text": "def _creates_transcript_metadata_dict(self) -> Dict:\n\n log_str = 'Generating Metadata for Transcript Identifiers'; print('\\t- ' + log_str); logger.info(log_str)\n\n f_name = 'ensembl_identifier_data_cleaned.txt'\n x = downloads_data_from_gcs_bucket(self.bucket, self.original_data, self.processed_data, f_name, self.temp_dir)\n dup_cols = ['transcript_stable_id', 'transcript_name', 'ensembl_transcript_type']\n data = pandas.read_csv(x, header=0, delimiter='\\t', low_memory=False)\n data = data.loc[data['transcript_stable_id'].apply(lambda i: i != 'None')]\n data.drop(['ensembl_gene_id', 'symbol', 'protein_stable_id', 'uniprot_id', 'master_transcript_type',\n 'entrez_id', 'ensembl_gene_type', 'master_gene_type', 'symbol'], axis=1, inplace=True)\n data.drop_duplicates(subset=dup_cols, keep='first', inplace=True); data.fillna('None', inplace=True)\n # create metadata\n rna, lab, desc, syn = [], [], [], []\n for idx, row in tqdm(data.iterrows(), total=data.shape[0]):\n rna_id, ent_type, nme = row[dup_cols[0]], row[dup_cols[2]], row[dup_cols[1]]\n rna.append('https://uswest.ensembl.org/Homo_sapiens/Transcript/Summary?t=' + rna_id)\n if nme != 'None': lab.append(nme)\n else: lab.append('Ensembl_Transcript_ID:' + rna_id); nme = 'Ensembl_Transcript_ID:' + rna_id\n if ent_type != 'None': desc.append(\"Transcript {} is classified as type '{}'.\".format(nme, ent_type))\n else: desc.append('None')\n syn.append('None')\n # combine into new data frame then convert it to dictionary\n metadata = pandas.DataFrame(list(zip(rna, lab, desc, syn)), columns=['ID', 'Label', 'Description', 'Synonym'])\n metadata = metadata.astype(str); metadata.drop_duplicates(subset='ID', inplace=True)\n metadata.set_index('ID', inplace=True); rna_metadata_dict = metadata.to_dict('index')\n\n return rna_metadata_dict", "title": "" }, { "docid": "25878dbf3a2d84aa48e3e99d54db95e1", "score": "0.46833357", "text": "def get_meta(self):\n import __main__\n return '<meta name=\"generator\" content=\"HT2HTML/%s\">' \\\n % __main__.__version__", "title": "" }, { "docid": "d6f9ebc092674fa41d19ff8b9d2027ce", "score": "0.46751222", "text": "def tsakorpus_file(text):\n meta = {}\n for i in text.informators:\n meta[i.code] = {'gender': str_none(i.gender),\n 'birth_village': str_none(i.birth_village),\n 'birth_district': str_none(i.birth_district),\n 'birth_region': str_none(i.birth_region),\n 'current_village': str_none(i.current_village),\n 'current_district': str_none(i.current_district),\n 'current_region': str_none(i.current_region)\n }\n if i.birth_year is not None:\n meta[i.code]['age'] = str(text.year - i.birth_year)\n meta[i.code]['birth_year'] = str(i.birth_year)\n textmeta = {\n \"year\": str(text.year),\n \"id\": str(text.id),\n \"region\": text.geo.region.name,\n \"village\": text.geo.village.name,\n \"district\": text.geo.district.name,\n \"title\": \"N {}, {}, {}, {}, {}\".format(\n text.id, text.year, text.geo.region.name, text.geo.village.name, text.geo.district.name)\n }\n result = {'sentences': sentences(text.raw_text, meta),\n 'meta': textmeta}\n return result", "title": "" }, { "docid": "9ebbc507bdff71b510638f9b6e51c939", "score": "0.46612224", "text": "def set_formula(formula, output_structure):\n\telements = parse_formula(formula=formula)\n\toutput_structure['elements'] = elements\n\treturn output_structure", "title": "" }, { "docid": "99e350423e23b1a71c1cfdcebf14f0b8", "score": "0.4659854", "text": "def helper(self):\n # Simple assembly without requiring accessory files (SampleSheet.csv, etc).\n if self.basicassembly:\n self.runmetadata = Basic(inputobject=self)\n else:\n # Populate the runmetadata object by parsing the SampleSheet.csv, GenerateFASTQRunStatistics.xml, and\n # RunInfo.xml files\n self.runinfo = os.path.join(self.path, 'RunInfo.xml')\n self.runmetadata = runMetadata.Metadata(passed=self)\n # Extract the flowcell ID and the instrument name if the RunInfo.xml file was provided\n self.runmetadata.parseruninfo()\n # Extract PhiX mapping information from the run\n phi = phix.PhiX(inputobject=self)\n phi.main()\n # Populate the lack of bclcall and nohup call into the metadata sheet\n for sample in self.runmetadata.samples:\n sample.commands = GenObject()\n sample.commands.nohupcall = 'NA'\n sample.commands.bclcall = 'NA'\n # Move/link the FASTQ files to strain-specific working directories\n fastqmover.FastqMover(inputobject=self)\n # Print the metadata to file\n metadataprinter.MetadataPrinter(inputobject=self)", "title": "" }, { "docid": "c93902daa50b9287e175709a3d4bc6bf", "score": "0.46442655", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = {}\n for fold in range(1, self.crossvalidation_folds):\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='train', fold=fold)\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)\n ).load()\n\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n return self", "title": "" }, { "docid": "ff1adb101c5ef6649d6d09684846edd1", "score": "0.46438015", "text": "def meta_SET(dataset, instr):\n metaName = get_dirName('META', dataset)+'meta_{}.txt'.format(dataset)\n f = open(metaName, 'r')\n contents = f.readlines()\n f.close()\n \n if instr.address== '/dev/ttyACM0': \n contents.append('\\n\\tVacuum Gauge : %s' % instr.address)\n# contents.append('\\n\\tPower Supply : %s' % instr.serial[0])\n contents.append('\\n')\n \n f = open(metaName, 'w')\n contents = \"\".join(contents)\n f.write(contents)\n f.close()", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "f73d0f6aeb06cd236e3592b8561c42db", "score": "0.464109", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "63d1e7f6ecb7032fe412d659ea44a9e9", "score": "0.46384", "text": "def semantic_transformation(data_path, sheet_name, columns, short_text_name):\n\n wo_data = pd.read_excel(data_path, sheet_name=sheet_name)\n selected_wo_data = pd.DataFrame(wo_data, columns=[\"ShortText\"])\n short_text_list = selected_wo_data[short_text_name] # just get short text\n\n # Step 1: Tokenization\n # Generates a token list with punctuation removed\n transformed_text_list = []\n\n for short_text in short_text_list:\n tokenized = tokenization(short_text)\n new_text = ''\n for token in tokenized:\n new_text += token.lower()\n new_text += ' '\n\n # Step 2: Semantic Transformation\n # Generates a token list transformed against regex matches\n transformed_text = semantic_transform(new_text)\n transformed_text_list.append(transformed_text)\n\n # Write output to file\n print_to_file(v.transformed_text_path_stage_1, transformed_text_list, v.transformed_text_heading)", "title": "" }, { "docid": "9a6ec92605a7730d3492e73d55e42cac", "score": "0.46348983", "text": "def _generate_runtime_meta(self, function_name):\n logger.info(f'Extracting runtime metadata from: {function_name}')\n payload = {'log_level': logger.getEffectiveLevel(), 'get_metadata': True}\n try:\n res = self.fc_client.invoke_function(\n self.service_name, function_name,\n payload=json.dumps(payload, default=str),\n headers={'x-fc-invocation-type': 'Sync'}\n )\n runtime_meta = json.loads(res.data)\n\n except Exception:\n raise Exception(\"Unable to extract runtime metadata\")\n\n if not runtime_meta or 'preinstalls' not in runtime_meta:\n raise Exception(runtime_meta)\n\n logger.debug(\"Metadata extracted successfully\")\n return runtime_meta", "title": "" }, { "docid": "0a3f8a867fdd0d12eaf82379943e923d", "score": "0.46258658", "text": "def prepare(self):\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='train', fold=fold)\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self", "title": "" }, { "docid": "e4bf912defb07ef50683c486e4ff4276", "score": "0.4619191", "text": "def generate_data():\n\n # grab files in directory\n find_files()\n\n # separate the names from the specs and instances for creating the CSV\n spec_names = [['MATRIX']] # list of pattern specs\n instances_names = [] # list of instance_names\n for key, values in SPECS.items():\n spec_names.append(key)\n for key, values in INSTANCES.items():\n instances_names.append(key)\n\n # calculate the scores\n for spec, spec_file in SPECS.items():\n spec_file = open(SPECS.get(spec), 'r') # open Spec file\n spec_lines = spec_file.readlines() # break down spec file into list of lines\n for instance, instance_file in INSTANCES.items():\n instance_file = open(INSTANCES.get(instance), 'r') # open instance file\n instance_lines = instance_file.readlines() # break down into list of lines\n\n score = edit_distance(spec_lines, instance_lines)\n print(\"====SCORE: \"+str(score))\n RESULTS[instance].append(score)\n\n # populate proximity_matrix csv with edit_distance scores\n score_writer = csv.writer(open('proximity_matrix.csv', 'at', encoding='utf8'))\n\n # header\n score_writer.writerow(spec_names)\n # write rows of scores\n for instance, scores in RESULTS.items():\n temp_list = [instance]+scores\n score_writer.writerow(temp_list)", "title": "" }, { "docid": "a15c86af215c013212c49cf29281571f", "score": "0.46185696", "text": "def dotexport_metamodel_cmd(textx_ls, args):\n ws_path = to_fs_path(textx_ls.root_uri)\n\n metamodel_path = textx_ls.configuration.grammar_path\n mm = metamodel_from_file(metamodel_path)\n mm_file_name = splitext(basename(metamodel_path))[0] or uuid.uuid4().hex\n\n path = join(ws_path, mm_file_name + '_metamodel.dot')\n metamodel_export(mm, path)\n textx_ls.workspace.show_message(\"Metamodel is exported at {}\".format(path))", "title": "" }, { "docid": "bf6f01168fb104ba80cf9b583b759ecb", "score": "0.46147692", "text": "def __init__(self):\n self.label = \"Merge a selected metadata record with a saved template\"\n self.description = \"This tool replaces the elements in a metadata record with elements from a saved template record. Elements from the template will overwrite their equivalents in the selected record based on the xpath rules provided in GenericTemplateXpathSettings.xml. The provided template (GenericMetadataTemplate_EMEPro.xml) can be used as a custom Set-To-Default tool. Both files are deployed with the python tool. Caution is urged when using this tool.\"\n self.canRunInBackground = False", "title": "" }, { "docid": "41e309c37a49718c3c0fd15cc5de8ea0", "score": "0.46112934", "text": "def __instructions(self):\n\n self += comment('Mellanox HPC-X version {}'.format(self.__version))\n self += packages(ospackages=self.__ospackages)\n self += shell(commands=self.__commands)\n self += environment(variables=self.environment_step())", "title": "" }, { "docid": "338bb238b5e857e239356eed383a430c", "score": "0.46071383", "text": "def create_formula_field(self, field_name, formula):\n self.selenium.wait_until_page_contains_element(formula_locator, 60)\n self.selenium.click_element(formula_locator)\n self.selenium.wait_until_page_contains_element(next_button, 60)\n self.selenium.click_element(next_button)\n self.salesforce.populate_field(\"Field Label\", field_name)\n self.selenium.wait_until_page_contains_element(checkbox_option, 60)\n self.selenium.click_element(checkbox_option)\n self.selenium.click_element(next_button)\n self.selenium.wait_until_page_contains_element(formula_txtarea, 60)\n self.selenium.get_webelement(formula_txtarea).send_keys(formula)\n self.selenium.click_element(check_syntax)\n self.selenium.click_element(next_button)\n self.selenium.click_element(next_button)\n self.selenium.click_element(save_button)\n self.selenium.wait_until_location_contains(\n \"FieldsAndRelationships/view\",\n timeout=90,\n message=\"Detail page did not load in 1 min\",\n )", "title": "" }, { "docid": "33886ae3483413ecc04c06ee76550bca", "score": "0.46051475", "text": "def get_data():\n\n # The csv files that will be used to extract the medical data\n df_medoc = export_to_df(dbname='ghpsj-v2', table='raw_medicament_cis_atc_mol')\n df_term_medicaux = export_to_df(dbname='ghpsj-v2', table='raw_medical_terms_dictionary_m2osw')\n\n # Apply the 'clean' function to 'libelle_atc' column of the df_medoc data frame\n df_medoc[\"libelle_atc\"] = df_medoc[\"libelle_atc\"].apply(clean)\n\n # Delete rows with NaN values from df_term_medicaux\n df_term_medicaux.dropna(inplace=True)\n df_term_medicaux.reset_index(drop=True, inplace=True)\n\n # Create the annotation dict\n ann_dict = dict()\n ann_dict.setdefault(\"LIBEL\", [])\n ann_dict.setdefault(\"TRAIT\", [])\n ann_dict.setdefault(\"MAL\", [])\n ann_dict.setdefault(\"EXAM\", [])\n ann_dict.setdefault(\"BIO\", [])\n ann_dict.setdefault(\"SYM\", [])\n\n # Add the terms from the data frame to the dict\n for medoc in df_medoc[\"libelle_atc\"].unique():\n ann_dict[\"LIBEL\"].append(medoc)\n for medoc in df_medoc[\"med_lib\"].unique():\n ann_dict[\"TRAIT\"].append(medoc)\n\n # Create a column in df_term_medicaux containing only the first word of the column 'definition'\n for index, row in df_term_medicaux.iterrows():\n tr = clean2(df_term_medicaux.loc[index, \"definition\"])\n df_term_medicaux.loc[index, \"mot\"] = tr.split(\" \")[0]\n del tr\n\n # Add the diseases, drugs and exams to the annotation dict\n for index, row in df_term_medicaux.iterrows():\n if df_term_medicaux.loc[index, \"mot\"] == \"maladie\" and df_term_medicaux.loc[index, \"term\"] not in \\\n ann_dict[\"MAL\"]:\n ann_dict[\"MAL\"].append(df_term_medicaux.loc[index, \"term\"])\n elif df_term_medicaux.loc[index, \"mot\"] == \"medicament\" and df_term_medicaux.loc[index, \"term\"] not in \\\n ann_dict[\"TRAIT\"]:\n ann_dict[\"TRAIT\"].append(df_term_medicaux.loc[index, \"term\"])\n elif df_term_medicaux.loc[index, \"mot\"] == \"examen\" and df_term_medicaux.loc[index, \"term\"] not in \\\n ann_dict[\"EXAM\"]:\n ann_dict[\"EXAM\"].append(df_term_medicaux.loc[index, \"term\"])\n\n # BIOLOGY\n bio = pd.read_excel(\"../data/ia/biologie_diabete2.xlsx\")\n bio.label = bio.label.apply(clean2)\n\n for index, rox in bio.iterrows():\n tr = []\n bio_labels = bio.loc[index, \"label\"].split()\n for mot in range(len(bio_labels)):\n if len(bio_labels[mot]) > 2:\n tr.append(bio_labels[mot])\n bio.loc[index, \"mots_nouveaux\"] = \" \".join(tr)\n\n # Add the BIO terms to the annotation dict\n for b in bio[\"mots_nouveaux\"].unique():\n ann_dict[\"BIO\"].append(b)\n\n # Use a second file to add more annotation to BIO\n bio2 = pd.read_csv('../data/ia/freq_terms_biology.csv', sep='\\t', encoding='ISO-8859-1', header=None,\n names=['terme', 'nbres sรฉjour', 't'])\n bio_terms = list(bio2['terme'])\n\n # Add the new BIO terms to the dict\n for b in bio_terms:\n if unidecode(b) not in ann_dict['BIO']:\n ann_dict['BIO'].append(b)\n\n # SYMPTOM\n # get the symptoms from a csv file from wikidata\n symptoms = pd.read_csv('../data/ia/symptoms.csv')\n symptom_terms = list(symptoms['symptomLabel'].unique())\n # to lowercase\n symptom_terms = [unidecode(s.lower()) for s in symptom_terms]\n # add the symptoms to the annotation dict\n ann_dict['SYM'] = symptom_terms\n\n # EXAM\n # get additional exam terms from a csv file from wikidata\n exams = pd.read_csv('../data/ia/exams.csv')\n for exam in exams['examLabel'].unique():\n if unidecode(exam) not in ann_dict['EXAM']:\n ann_dict['EXAM'].append(unidecode(exam))\n\n # Additional terms from the 'diabete_concepts' csv file\n diabete = pd.read_csv('../data/ia/diabete_concepts.csv', sep=';', encoding = \"ISO-8859-1\")\n col_names = diabete.columns\n for index, row in diabete.iterrows():\n if row['categorie'] == 'maladie':\n fill_ann_dict_from_diabete(k='MAL', col_names=col_names, row=row, ann_dict=ann_dict)\n elif row['categorie'] == 'symptomes':\n fill_ann_dict_from_diabete(k='SYM', col_names=col_names, row=row, ann_dict=ann_dict)\n elif row['categorie'] == 'biologie':\n fill_ann_dict_from_diabete(k='BIO', col_names=col_names, row=row, ann_dict=ann_dict)\n elif row['categorie'] == 'examen':\n fill_ann_dict_from_diabete(k='EXAM', col_names=col_names, row=row, ann_dict=ann_dict)\n elif row['categorie'] == 'Traitement':\n fill_ann_dict_from_diabete(k='TRAIT', col_names=col_names, row=row, ann_dict=ann_dict)\n\n return ann_dict", "title": "" }, { "docid": "88b5847416d0f0888ae77dc7bed7a6c0", "score": "0.46033457", "text": "def main():\n\n \n # Get root of data directory.\n data_root = '../../data/'\n mta(data_root)\n citibike(data_root)", "title": "" }, { "docid": "755ac8f1f156b5974ff23af708530493", "score": "0.45984858", "text": "def main(opts):\n print(\"\"\" Remember:\\n\n * Put the charge multiplicity in comment line of xyz\\n\n * (For Products) Put the bond-forming atoms as the first two atoms in xyz\\n\n * Create a tag file (`touch tag`)\\n\n \"\"\")\n conformers(opts.xyzfile,\n opts.nconf,\n opts.force,\n templatefiles=opts.templatefiles)", "title": "" }, { "docid": "d09f293dab7e084151ef59bf2249719c", "score": "0.45981735", "text": "def _run(self):\n # We don't need to run _retrieveAndCheckFilePaths() here because the\n # base class ensures that _isBuildRequired() will always be called\n # prior to this method, so _retrieveAndCheckFilePaths() will have\n # already been run.\n\n timer = BasicTimer()\n timer.start()\n\n # Get the imports modules IRIs from the imports build target.\n importsIRIs = [info.iristr for info in self.ibt.getImportsInfo()]\n\n fileoutpath = self.getOutputFilePath()\n\n # Create the destination directory, if needed. We only need to check\n # this for in-source builds, since the BuildDirTarget dependency will\n # take care of this for out-of-source builds.\n if self.config.getDoInSourceBuilds():\n destdir = os.path.dirname(fileoutpath)\n if not(os.path.isdir(destdir)):\n self._makeDirs(destdir)\n\n ontbuilder = OWLOntologyBuilder(self.base_ont_path)\n # Add an import declaration for each import module.\n for importIRI in importsIRIs:\n ontbuilder.getOntology().addImport(importIRI, True)\n\n # Process each source file. In this step, entities and label\n # annotations are defined, but processing of all other axioms (e.g.,\n # text definitions, comments, equivalency axioms, subclass of axioms,\n # etc.) is deferred until after all input files have been read. This\n # allows forward referencing of labels and term IRIs and means that\n # entity descriptions and source files can be processed in any\n # arbitrary order.\n for termsfile in self.termsfile_paths:\n with TableReaderFactory(termsfile) as reader:\n logger.info('Parsing ' + termsfile + '...')\n for table in reader:\n table.setRequiredColumns(REQUIRED_COLS)\n table.setOptionalColumns(OPTIONAL_COLS)\n \n for t_row in table:\n if not(t_row['Ignore'].lower() in TRUE_STRS):\n # Collapse all spaces in the \"Type\" string so that,\n # e.g., \"DataProperty\" and \"Data Property\" will\n # both work as expected.\n typestr = t_row['Type'].lower().replace(' ', '')\n \n if typestr == 'class':\n ontbuilder.addOrUpdateClass(t_row)\n elif typestr == 'dataproperty':\n ontbuilder.addOrUpdateDataProperty(t_row)\n elif typestr == 'objectproperty':\n ontbuilder.addOrUpdateObjectProperty(t_row)\n elif typestr == 'annotationproperty':\n ontbuilder.addOrUpdateAnnotationProperty(t_row)\n elif typestr == 'individual':\n ontbuilder.addOrUpdateIndividual(t_row)\n elif typestr == '':\n raise EntityDescriptionError(\n 'The entity type (e.g., \"class\", \"data '\n 'property\") was not specified.',\n t_row\n )\n else:\n raise EntityDescriptionError(\n 'The entity type \"' + t_row['Type']\n + '\" is not supported.', t_row\n )\n\n # Define all deferred axioms from the source entity descriptions.\n logger.info('Defining all remaining entity axioms...')\n ontbuilder.processDeferredEntityAxioms(self.expanddefs)\n\n # Set the ontology IRI.\n ontIRI = self.config.generateDevIRI(fileoutpath)\n ontbuilder.getOntology().setOntologyID(ontIRI)\n\n # Write the ontology to the output file.\n logger.info('Writing compiled ontology to ' + fileoutpath + '...')\n ontbuilder.getOntology().saveOntology(\n fileoutpath, self.config.getOutputFormat()\n )\n\n logger.info(\n 'Main ontology build completed in {0} s.\\n'.format(timer.stop())\n )", "title": "" }, { "docid": "6635f86f5662c42a4b52f9ffa301433c", "score": "0.45956817", "text": "def get_meta_information(self) -> Dict:\n return {'name': 'XGBoost',\n 'references': ['@article{probst2019tunability,'\n 'title={Tunability: Importance of hyperparameters of machine learning algorithms.},'\n 'author={Probst, Philipp and Boulesteix, Anne-Laure and Bischl, Bernd},'\n 'journal={J. Mach. Learn. Res.},'\n 'volume={20},'\n 'number={53},'\n 'pages={1--32},'\n 'year={2019}'\n '}'],\n 'code': 'https://github.com/automl/HPOlib1.5/blob/development/hpolib/benchmarks/ml/'\n 'xgboost_benchmark_old.py',\n 'shape of train data': self.x_train.shape,\n 'shape of test data': self.x_test.shape,\n 'shape of valid data': self.x_valid.shape,\n 'initial random seed': self.rng,\n 'task_id': self.task_id\n }", "title": "" }, { "docid": "e893904aaad55157050ba7bcfb0047e1", "score": "0.459325", "text": "def tabularize_recipe(recipe, process, doc_id, documents=None):\n recipe_tsv = \"\"\n\n process_order = [\"name\", \"title\", \"descr\", \"prep_time\", \"cook_time\", \"serves\",\n \"dietary\", \"chef\", \"show\", \"ingredients\", \"methods\", \"img_url\"]\n size_doc = 0\n de = DocEntry(id=doc_id)\n\n for key in process_order:\n value = recipe[key]\n\n if value == \"\":\n continue\n\n elif key == \"name\":\n de.set_name(value)\n\n elif key == \"img_url\":\n de.set_img_url(value)\n\n elif key == \"ingredients\":\n for val in value:\n if val == \"\":\n continue\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(val)\n pp_field = preprocess_field(decoded_value, process)\n de.set_size_ingr(de.get_size_ingr() + len(pp_field))\n size_doc += len(pp_field)\n\n recipe_tsv += \"III\" + \"\\t\"\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n recipe_tsv += \"III\" + \"\\t\"\n\n elif key == \"methods\":\n for val in value:\n if val == \"\":\n continue\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(val)\n pp_field = preprocess_field(decoded_value, process)\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n size_doc += len(pp_field)\n\n elif key == \"title\":\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(value)\n pp_field = preprocess_field(decoded_value, process)\n de.set_title(value)\n de.set_title_size(len(pp_field))\n size_doc += len(pp_field)\n\n recipe_tsv += \"TTT\" + \"\\t\"\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n recipe_tsv += \"TTT\" + \"\\t\"\n\n elif key == \"descr\":\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(value)\n pp_field = preprocess_field(decoded_value, process)\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n de.set_desc(value)\n size_doc += len(pp_field)\n\n elif key == \"dietary\" and value == \"Vegetarian\":\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(value)\n pp_field = preprocess_field(decoded_value, process)\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n de.set_veggie(True)\n size_doc += len(value)\n\n else:\n decoded_value = unicode_ascii_decoder.unicode_to_ascii(value)\n pp_field = preprocess_field(decoded_value, process)\n recipe_tsv += \"\\t\".join(pp_field) + \"\\t\"\n size_doc += len(pp_field)\n\n de.set_size(size_doc)\n\n if documents is not None:\n documents[doc_id] = de\n\n return recipe_tsv", "title": "" }, { "docid": "b086ed385b819e4d5847c1cdac19fd08", "score": "0.45893908", "text": "def generate(self):\n self.bind = db.engine\n self.meta = db.metadata\n self.meta.bind = self.bind\n\n self._ensure_table(self.meta, self.name + '_entry')\n for field in self.fields:\n field.generate(self.meta, self.table)\n self.alias = self.table.alias('entry')", "title": "" }, { "docid": "98b956808e5f15ade88d5340bf74c1b7", "score": "0.45879546", "text": "def run_exp(meta_info, expconfig):\n\n cfg = expconfig.copy()\n output = None\n\n try:\n if 'cnf_add_to_result' not in cfg:\n cfg['cnf_add_to_result'] = {}\n\n cfg['cnf_add_to_result'].update({\n \"Guid\": cfg['guid'],\n \"DataId\": cfg['dataid'],\n \"DataVersion\": cfg['dataversion'],\n \"NodeId\": cfg['nodeid'],\n \"Time\": time.strftime('%Y%m%d-%H%M%S',cfg['timestamp']),\n \"Interface\": cfg['modeminterfacename'],\n \"cnf_astream_mpd\": cfg['cnf_astream_mpd'],\n \"cnf_astream_server_host\": cfg['cnf_astream_server_host'],\n \"cnf_astream_server_port\": cfg['cnf_astream_server_port'],\n \"cnf_astream_playback\": cfg['cnf_astream_playback'],\n \"cnf_astream_segment_limit\": cfg['cnf_astream_segment_limit'],\n \"cnf_astream_download\": cfg['cnf_astream_download'],\n \"cnf_astream_video\": cfg['cnf_astream_video'],\n \"cnf_astream_tag\": cfg['cnf_astream_tag'],\n \"cnf_astream_q1\": cfg['cnf_astream_q1'],\n \"cnf_astream_q2\": cfg['cnf_astream_q2'],\n \"cnf_astream_q3\": cfg['cnf_astream_q3'],\n \"cnf_astream_q4\": cfg['cnf_astream_q4'],\n \"ContainerVersion\": CONTAINER_VERSION,\n \"DEBUG\": DEBUG\n })\n\n if 'ICCID' in meta_info:\n cfg['cnf_add_to_result']['Iccid'] = meta_info['ICCID']\n if 'Operator' in meta_info:\n cfg['cnf_add_to_result']['Operator'] = meta_info['Operator']\n if 'IMSIMCCMNC' in meta_info:\n cfg['cnf_add_to_result']['IMSIMCCMNC'] = meta_info['IMSIMCCMNC']\n if 'NWMCCMNC' in meta_info:\n cfg['cnf_add_to_result']['NWMCCMNC'] = meta_info['NWMCCMNC']\n\n # Add metadata if requested\n if cfg['add_modem_metadata_to_result']:\n\n for k,v in meta_info.items():\n cfg['cnf_add_to_result']['info_meta_modem_' + k] = v\n\n towrite_data = cfg['cnf_add_to_result']\n ifname = meta_info[cfg['modeminterfacename']]\n towrite_data['Interface']=ifname\n\n #CM: constructing filename prefix and output directory\n prefix_timestamp=time.strftime('%Y%m%d-%H%M%S',cfg['timestamp'])\n prefix_astream=get_prefix(data=cfg, postfix=None, tstamp=prefix_timestamp, interface=ifname)\n\n resultdir=cfg['resultdir']\n resultdir_astream=resultdir+'astream/'\n resultdir_astream_video = resultdir_astream + cfg['cnf_astream_download_directory']+ '/'\n if not os.path.exists(resultdir_astream_video):\n os.makedirs(resultdir_astream_video)\n\n\n if cfg['verbosity'] > 2:\n print('\\n-----------------------------')\n print('DBG: AStream prefix: '+prefix_astream)\n print('DBG: result directory: '+resultdir_astream)\n print('DBG: result directory (videos): '+resultdir_astream_video)\n print('-----------------------------')\n\n #CM: running AStream\n try:\n\n if cfg['verbosity'] > 1:\n print('\\n-----------------------------')\n print('DBG: running MONROE-AStream')\n print('-----------------------------')\n\n run_astream(cfg['cnf_astream_mpd'],cfg['cnf_astream_server_host'],cfg['cnf_astream_server_port'],cfg['cnf_astream_video'],cfg['cnf_astream_playback'],cfg['cnf_astream_segment_limit'],cfg['cnf_astream_download'],resultdir_astream_video)\n\n except Exception as e:\n if cfg['verbosity'] > 0:\n print ('[Exception #2] Execution or parsing failed for error: {}').format(e)\n\n #CM: checking results\n astream_segment_log = glob.glob(resultdir_astream+'ASTREAM*.json')[0]\n astream_buffer_log = glob.glob(resultdir_astream+'DASH_BUFFER*.csv')[0]\n astream_runtime_log = glob.glob(resultdir_astream+'DASH_RUNTIME*.log')[0]\n\n if cfg['verbosity'] > 2:\n print('\\n-----------------------------')\n print('DBG: output files from AStream core:')\n print(astream_segment_log)\n print(astream_buffer_log)\n print(astream_runtime_log)\n print('-----------------------------')\n\n #CM: generating summary output\n out_astream = None\n\n if not DEBUG:\n try:\n out_astream = getOutput(astream_segment_log,astream_buffer_log,cfg['cnf_astream_q1'],cfg['cnf_astream_q2'],cfg['cnf_astream_q3'],cfg['cnf_astream_q4'],cfg['cnf_astream_segment_duration'])\n\n if out_astream is not None:\n if cfg['verbosity'] > 2:\n print('\\n-----------------------------')\n print('DBG: AStream summary:')\n print(out_astream)\n print('-----------------------------')\n\n out_astream_fields = out_astream.split(',')\n summary_astream_fields = cfg['cnf_astream_out_fields'].split(',')\n\n if len(out_astream_fields) == len(summary_astream_fields):\n for i in xrange(0,len(summary_astream_fields)-1):\n towrite_data[summary_astream_fields[i]]=out_astream_fields[i]\n else:\n for i in xrange(0,len(summary_astream_fields)-1):\n towrite_data[summary_astream_fields[i]]='NA'\n\n except Exception as e:\n if cfg['verbosity'] > 0:\n print ('[Exception #3] Execution or parsing failed for error: {}').format(e)\n\n if cfg['verbosity'] > 1:\n print('\\n-----------------------------')\n print('DBG: saving results')\n print('-----------------------------')\n\n #CM: compressing all outputs other than summary JSON\n if 'cnf_astream_compress_additional_results' in cfg and cfg['cnf_astream_compress_additional_results']:\n foldername_zip=get_filename(data=cfg, postfix=None, ending=\"extra\", tstamp=prefix_timestamp, interface=ifname)\n basename_zip=os.path.join(resultdir,foldername_zip)\n #print(foldername_zip)\n\n shutil.move(resultdir_astream,basename_zip)\n shutil.make_archive(base_name=basename_zip, format='gztar', root_dir=resultdir, base_dir=foldername_zip)#\"./\")\n shutil.rmtree(basename_zip)\n\n save_output(data=cfg, msg=json.dumps(towrite_data), postfix=\"summary\", tstamp=prefix_timestamp, outdir=cfg['resultdir'], interface=ifname)\n\n except Exception as e:\n if cfg['verbosity'] > 0:\n print ('[Exception #1] Execution or parsing failed for error: {}').format(e)", "title": "" }, { "docid": "004b8b76cd58f5729318124c0d2d56e7", "score": "0.4586375", "text": "def run():\n\tprint(str(datetime.datetime.now()))\n\t#ย 1. create rdf/itatti_attributions.nq\n\tif os.path.isfile(itatti_rdf) == False:\n\t\titatti.itatti_to_rdf(initial_csv,itatti_rdf)\n\t\tprint(\"itatti - created rdf/itatti_attributions.nq\"+str(datetime.datetime.now()))\n\t\n\t# 3. create rdf/linkset_artists_itatti.nq\n\tif os.path.isfile(artists_csv_revised):\n\t\titatti.artists_linkset(artists_csv_revised, linkset_artists_itatti)\n\t\tprint(\"itatti - created rdf/linkset_artists_itatti.nq\"+str(datetime.datetime.now()))\n\telse:\n\t\t# 2. create a csv with artists reconciliation to viaf to be manually revised and renamed artists_csv_revised\n\t\titatti.reconcile_artists_to_viaf(artists_csv, itatti_rdf) \n\t\tprint(\"itatti - created a csv with artists reconciliation to viaf to be manually revised and renamed artists_csv_revised\"+str(datetime.datetime.now()))\n\t\n\tif os.path.isfile(linkset_itatti_zeri_artworks) == False:\n\t\t# 4. create rdf/linkset_itatti_zeri_artworks.nq\n\t\titatti.reconcile_itatti_artworks_to_zeri(pastec_data,zeri_data,itatti_rdf,linkset_itatti_zeri_artworks)\n\t\tprint(\"itatti - created rdf/linkset_itatti_zeri_artworks.nq\"+str(datetime.datetime.now()))\n\t\n\t#ย 5. add criteria to rdf/itatti_attributions.nq\n\titatti.methodology_itatti(itatti_rdf, initial_csv)\n\tprint(\"itatti - added criteria to rdf/itatti_attributions.nq\"+str(datetime.datetime.now()))\n\n\t#ย 7. create rdf/linkset_arthistorians_itatti.nq\n\tif os.path.isfile(historians_revised):\n\t\titatti.historians_linkset(itatti_rdf,historians_revised,linkset_arthistorians_itatti)\n\t\tprint(\"itatti - created rdf/linkset_arthistorians_itatti.nq\"+str(datetime.datetime.now()))\n\n\telse:\n\t\t# 6. create csv/historians_itatti_viaf.csv to be manually revised and renamed historians_revised\n\t\titatti.reconcile_historians_to_viaf(itatti_rdf, historians_viaf)\n\t\tprint(\"itatti - created csv/historians_itatti_viaf.csv to be manually revised and renamed historians_revised\"+str(datetime.datetime.now()))\n\t\n\t#ย 8. upload all files\n\tserver = sparql.SPARQLServer('http://127.0.0.1:9999/blazegraph/sparql')\n\tserver.update('load <file://'+itatti_rdf+'>') \n\tserver.update('load <file://'+linkset_itatti_zeri_artworks+'>') \n\tserver.update('load <file://'+linkset_arthistorians_itatti+'>') \n\tserver.update('load <file://'+linkset_artists_itatti+'>') \n\tprint(\"itatti - uploaded all files\"+str(datetime.datetime.now()))", "title": "" }, { "docid": "591f013756d5bd29429bb71204a139d0", "score": "0.45841715", "text": "def set_up_recipe(self):\n self.set_input_options()\n self.set_class_attributes()\n self.make_working_directory()\n self.create_recipe_metadata()\n self.copy_input_file()\n self.parse_recipe_template()\n self.create_recipe_plan()\n self.create_archive_files()\n self.copy_posfile()\n self.copy_structure_index_files()", "title": "" }, { "docid": "41fc7232f11114c7c6c4cd985943bfd8", "score": "0.4583535", "text": "def visit_metadata(self, obj, meta):", "title": "" }, { "docid": "dd118a84c076efa3cc73b15fb096c752", "score": "0.4578238", "text": "def _set_meta_from_biotool(self, biotool):\n self.tool.metadata = cwlgen.Metadata()\n self.tool.metadata.name = biotool.name\n self.tool.metadata.about = biotool.description\n self.tool.metadata.url = biotool.homepage\n if biotool.informations.language:\n self.tool.metadata.programmingLanguage = biotool.informations.language", "title": "" } ]
6ae63b9b8b80d14d27de932f9eb1e03c
Get an agent from the kqml cljson representation (KQMLList).
[ { "docid": "685395fcba5b3ca5e845987b23be1b76", "score": "0.66371286", "text": "def get_agent(cls, cl_agent):\n agent_json = cls.converter.cl_to_json(cl_agent)\n if isinstance(agent_json, list):\n return [ensure_agent_type(Agent._from_json(agj))\n for agj in agent_json]\n else:\n return ensure_agent_type(Agent._from_json(agent_json))", "title": "" } ]
[ { "docid": "4d7dd4b8753496023f532f5b4858d14a", "score": "0.5037742", "text": "def getAgent(self):\n return self._agent", "title": "" }, { "docid": "93e38bdce58e1707d76e0deb2486f062", "score": "0.5006577", "text": "def get_agent(payload, agent_id):\n agent = Agents.query.filter_by(id=agent_id).first_or_404()\n\n if agent is None:\n abort(404, 'Sorry, we could not find any Agents to display.')\n\n return jsonify({\n 'agent': agent.format(),\n })", "title": "" }, { "docid": "207a21faf5429bb7db1836de33b097a5", "score": "0.4986502", "text": "def get_agent(self, mid):\n return self._agents[mid]", "title": "" }, { "docid": "b67b8bb9831c64211d71129dffc887fe", "score": "0.4930313", "text": "def agent_get(self) -> Agent:\n agent: Optional[Agent] = self.agent\n assert isinstance(agent, Agent), \"No agent is available\"\n return agent", "title": "" }, { "docid": "ec30bece1f5b12300f74b95cacb5b2d8", "score": "0.48187667", "text": "def agent(self, agent_id):\n response = self._request(\"GET\", [ROUTE_AGENTS, agent_id])\n if not response.ok:\n logging.error(\"Error agent id::{}\".format(response.text))\n return None\n return self._cbw_parser(response)", "title": "" }, { "docid": "b7f6fb64b7f15b03c165c43c1870bba5", "score": "0.47924715", "text": "async def test_get_agent_list(\n hass: HomeAssistant,\n init_components,\n mock_agent,\n mock_agent_support_all,\n hass_ws_client: WebSocketGenerator,\n snapshot: SnapshotAssertion,\n) -> None:\n client = await hass_ws_client(hass)\n\n await client.send_json_auto_id({\"type\": \"conversation/agent/list\"})\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot\n\n await client.send_json_auto_id(\n {\"type\": \"conversation/agent/list\", \"language\": \"smurfish\"}\n )\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot\n\n await client.send_json_auto_id(\n {\"type\": \"conversation/agent/list\", \"language\": \"en\"}\n )\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot\n\n await client.send_json_auto_id(\n {\"type\": \"conversation/agent/list\", \"language\": \"en-UK\"}\n )\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot\n\n await client.send_json_auto_id(\n {\"type\": \"conversation/agent/list\", \"language\": \"de\"}\n )\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot\n\n await client.send_json_auto_id(\n {\"type\": \"conversation/agent/list\", \"language\": \"de\", \"country\": \"ch\"}\n )\n msg = await client.receive_json()\n assert msg[\"type\"] == \"result\"\n assert msg[\"success\"]\n assert msg[\"result\"] == snapshot", "title": "" }, { "docid": "b64ee03fb66924e12c0fb3296f513efe", "score": "0.47515976", "text": "def GetAgent(self):\n if not self._agent:\n self._Connect()\n return self._agent", "title": "" }, { "docid": "2bd3533aa59f94658b7f5a7b876ee425", "score": "0.474963", "text": "def get_agent(self, agent_id):\n\n r = self._request(f\"agents/{agent_id}\")\n logger.info(f\"Got {agent_id} agent.\")\n return r", "title": "" }, { "docid": "8d6f2852e9096e1e34f5848de4f31d32", "score": "0.47101155", "text": "def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self._get_agent(agent_id=agent_id)", "title": "" }, { "docid": "cc81c187d51654e2b1d7c007e835acc1", "score": "0.47035563", "text": "def get_agent(self, agent_id):\n return # osid.authentication.Agent", "title": "" }, { "docid": "62f7bc26aaa3e544171a4987e81e0c3c", "score": "0.46993935", "text": "def get_agent(self):\n try:\n agent = self._data.find('ul', {'class': 'links'}).text\n return agent.split(':')[1].strip()\n except:\n return", "title": "" }, { "docid": "a52578a8522bc88ed72fda948ce0bfe7", "score": "0.4697601", "text": "def parse(kls, json):\n raise NotImplementedError", "title": "" }, { "docid": "20ad1dad576be23dd4a95946ad9696cf", "score": "0.46614766", "text": "def get_agent(self, chassis):\n raise NotImplementedError", "title": "" }, { "docid": "1b93f7e6f6f7d06efadc99d4fa78db33", "score": "0.4599513", "text": "def get_agents(self):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "fbda66f8d42726938738d4a61bcd61bb", "score": "0.45834517", "text": "def get_agents(self):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "1784d53eaa2040bacbd6622c15c42b1c", "score": "0.4573968", "text": "def _extract_automations_from_houses_json(json_dict: dict) -> list[dict[str, Any]]:\n return _extract_items(\n json_dict[\"result\"][\"_links\"][\"child\"][AUTOMATIONS_ELEMENT][\"data\"]\n )", "title": "" }, { "docid": "d445c72e5959b4ffa9e3bc08197267e5", "score": "0.45598742", "text": "def get_agent(game):\n actions = ['left', 'forward', 'right']\n # Use a MetaAgent with QLearners at leaf nodes\n player = agent.MetaAgent(game, actions, epsilon=0.1, fov=3, learner_class=learners.QLearn)\n # Use a flat Agent with the default SARSA learner\n# player = agent.Agent(game, actions, epsilon=0.1, fov=3)\n return player", "title": "" }, { "docid": "80d036d008d424753e527e0203b6a6ca", "score": "0.45390698", "text": "async def websocket_list_agents(\n hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict\n) -> None:\n manager = _get_agent_manager(hass)\n\n country = msg.get(\"country\")\n language = msg.get(\"language\")\n agents = []\n\n for agent_info in manager.async_get_agent_info():\n agent = await manager.async_get_agent(agent_info.id)\n\n supported_languages = agent.supported_languages\n if language and supported_languages != MATCH_ALL:\n supported_languages = language_util.matches(\n language, supported_languages, country\n )\n\n agent_dict: dict[str, Any] = {\n \"id\": agent_info.id,\n \"name\": agent_info.name,\n \"supported_languages\": supported_languages,\n }\n agents.append(agent_dict)\n\n connection.send_message(websocket_api.result_message(msg[\"id\"], {\"agents\": agents}))", "title": "" }, { "docid": "e300a9d4dfc45a099d9deab8534cfd47", "score": "0.45230985", "text": "def test_get_agent(self, fake_client):\n agents = Agents(fake_client, \"base\")\n agents.get_agent(\"org_slug\", \"agent_id\")\n url = \"base/organizations/org_slug/agents/agent_id\"\n fake_client.get.assert_called_with(url)", "title": "" }, { "docid": "4fe646a7c00d3e581a3160d5ff52c78a", "score": "0.45020264", "text": "def test_scene_list(bridge: Bridge):\n scenes = bridge.target.get_scenes()\n assert scenes == {\"1\": {\"scene_id\": \"1\", \"name\": \"scene 1\"}}\n scene = bridge.target.get_scene_by_id(\"1\")\n assert scene == {\"scene_id\": \"1\", \"name\": \"scene 1\"}", "title": "" }, { "docid": "292f808314883dbf8193aa81079e2d8e", "score": "0.44742718", "text": "def init_agent(self, name):\n return self.agent_defs[name]['class'](\n self.agent_defs[name]['color'], name)", "title": "" }, { "docid": "e3e9c911ffca15700362bac3d14ff695", "score": "0.4461936", "text": "def consume_api(self, agent_api):", "title": "" }, { "docid": "6153cedd4ceb1e4db5489cd056ebad2c", "score": "0.4432789", "text": "def get_agent(agent_uri):\n query = '''\n PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n PREFIX prov: <http://www.w3.org/ns/prov#>\n SELECT DISTINCT (<''' + agent_uri + '''> AS ?ag) ?n ?ag2\n WHERE {\n GRAPH ?g {\n {\n { ?e a prov:Entity . }\n UNION\n { ?e a prov:Plan . }\n OPTIONAL{ ?e prov:wasAttributedTo <''' + agent_uri + '''> . }\n OPTIONAL{ <''' + agent_uri + '''> foaf:name ?n . }\n OPTIONAL{ <''' + agent_uri + '''> prov:actedOnBehalfOf ?ag2 . }\n }\n UNION\n {\n ?e a prov:Activity .\n OPTIONAL{ ?e prov:wasAssociatedWith <''' + agent_uri + '''> . }\n OPTIONAL{ <''' + agent_uri + '''> foaf:name ?n . }\n OPTIONAL{ <''' + agent_uri + '''> prov:actedOnBehalfOf ?ag2 . }\n }\n UNION\n {\n ?aoo a prov:Agent .\n ?aoo prov:actedOnBehalfOf <''' + agent_uri + '''> .\n OPTIONAL{ <''' + agent_uri + '''> foaf:name ?n . }\n }\n UNION\n {\n <''' + agent_uri + '''> a prov:Agent .\n OPTIONAL{ <''' + agent_uri + '''> foaf:name ?n . }\n OPTIONAL{ <''' + agent_uri + '''> prov:actedOnBehalfOf ?ag2 . }\n }\n }\n }\n '''\n return functions_db.db_query_secure(query)", "title": "" }, { "docid": "fa4c9dab588387ed70ccfa74938ab521", "score": "0.44079858", "text": "def getAgents(self):\n return self.agentList[:]", "title": "" }, { "docid": "c04c03c909efc7e9c41d869fbe725dde", "score": "0.43957353", "text": "def case_get_agent(self):\n self.send_agent()", "title": "" }, { "docid": "091bba53a34ec8e0ec9c0dea7d454919", "score": "0.43689954", "text": "def do_node_get(c, args):\n fields = {}\n\n # Filter out None values from args.\n if not hasattr(args, 'type') or not args.type:\n msg = \"too few arguments: type is required.\"\n utils.exit(msg)\n if not hasattr(args, 'json_data') or not args.json_data:\n msg = \"too few arguments: json_data is required.\"\n utils.exit(msg)\n\n try:\n fields = json.loads(args.json_data)\n except Exception:\n msg = \"json format is incorrect.\"\n utils.exit(msg)\n\n fields['function_type'] = args.type\n\n # Call Client\n data_list = c.node.get(fields)\n\n # Show result.\n pprint(data_list)", "title": "" }, { "docid": "439b11f8b6ac89a7e0f3c9059042da08", "score": "0.4368492", "text": "def index(self):\n\n req_url = self._vsm_url + \"/agents\"\n req = urllib2.Request(req_url)\n req.get_method = lambda: 'GET'\n req.add_header(\"content-type\", \"application/json\")\n req.add_header(\"X-Auth-Token\", self._token)\n resp = urllib2.urlopen(req)\n recive_data = json.loads(resp.read())\n return recive_data", "title": "" }, { "docid": "0901023eef8195785edab1d42607d2c2", "score": "0.43637887", "text": "def loadAgent(self):\n agentfile = askopenfilename(filetypes=[(\"Agent Files\", \"*.py\")], initialdir=[\"./agents\"])\n if agentfile:\n self.simulator.loadAgent(agentfile)", "title": "" }, { "docid": "fc19eba5635b17006e24ed65d2fc0d2b", "score": "0.43320686", "text": "def get_agents_by_genus_type(self, agent_genus_type):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "578ac02b99faaf4f94d2253544a9eee6", "score": "0.43204287", "text": "def get(client):\n _method = \"/bots/\"\n\n data = client.get(_method)\n\n # Prepare retval list\n retval = list()\n for item in data:\n retval.append(Bot(item))\n\n return retval", "title": "" }, { "docid": "584fa554db90dab12d4657e26168d009", "score": "0.43146268", "text": "def get_agents_by_parent_genus_type(self, agent_genus_type):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "d4d16d2bd6575c8762d2d34972833e09", "score": "0.4303025", "text": "def load(path):\n if not path.endswith(QLearningAgent.QLAGENT_FILE_SUFFIX) and not os.path.isfile(path):\n path += QLearningAgent.QLAGENT_FILE_SUFFIX\n try:\n agent = pickle.load(open(path, \"rb\"))\n print(\"Q-Learning Agent successfully loaded\")\n return agent\n except IOError:\n print(\"No existing agent found\")", "title": "" }, { "docid": "3ba0a337af0b868c3655292a31b3345e", "score": "0.4301452", "text": "def Agent(self):\n return self._agent", "title": "" }, { "docid": "4558a8399494c9941faa8d3f7d805f52", "score": "0.42868525", "text": "def get_agents_by_ids(self, agent_ids):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "0a4761cae8d2bc696df6206409f78b30", "score": "0.42788002", "text": "def load_agent(self, file_name):\n try:\n import dill\n except ImportError:\n print \"ERROR: Module 'dill' is required to load agents.\"\n return None\n\n file_path = os.path.join(settings.AGENT_DIR, file_name)\n a = dill.load(open(file_path, \"rb\"))\n return a", "title": "" }, { "docid": "2651e7fb373427b701effca12bcb116d", "score": "0.42785493", "text": "def agent_class(self):\n return self._agent_class", "title": "" }, { "docid": "91c617e592f920fe0098063a5b73b5a3", "score": "0.42739347", "text": "def agent_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"agent_arns\")", "title": "" }, { "docid": "91c617e592f920fe0098063a5b73b5a3", "score": "0.42739347", "text": "def agent_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"agent_arns\")", "title": "" }, { "docid": "898405268a91daf21d1ae75d94f36e5d", "score": "0.4272685", "text": "def agent(request):\n return {'agent': getattr(request, 'agent', None)}", "title": "" }, { "docid": "3379e6db58154bec9fb06321d412bd94", "score": "0.42616466", "text": "def _get_agent_manager(hass: HomeAssistant) -> AgentManager:\n manager = AgentManager(hass)\n manager.async_setup()\n return manager", "title": "" }, { "docid": "7fbcdff4829adf41125da955055eca56", "score": "0.4251915", "text": "def get_agents(payload):\n agents = Agents.query.order_by(Agents.id).all()\n\n if len(agents) == 0:\n abort(404, 'Sorry, we could not find any Agents.')\n\n return jsonify({\n 'agents': paginate(request, agents),\n 'total_agents': len(agents)\n })", "title": "" }, { "docid": "f22b750158724d7fd42facf9db83548c", "score": "0.42503795", "text": "def get_nodes_for_agents(self, agents, check=True):\n fqdns = [self.get_fqdn_by_host_name(agent['host']) for agent in agents]\n nodes = self.get_nodes(fqdns=fqdns, check=check)\n\n return nodes", "title": "" }, { "docid": "08ed0291078cbf3bfbfddaba1573597e", "score": "0.42275774", "text": "def retrieve_list(self, parsed_args):\n blazar_client = self.get_client()\n body = self.args2body(parsed_args)\n resource_manager = getattr(blazar_client, self.resource)\n data = resource_manager.list_capabilities(**body)\n return data", "title": "" }, { "docid": "bc2d460b74814035b815a851266384fa", "score": "0.42165798", "text": "def reconstruct_from_klee_json(kleejson):\n assert kleejson.endswith(\"klee.json\")\n\n klees = OrderedDict()\n with open(kleejson, 'r') as jsonfile:\n klees = json.load(jsonfile)\n\n result = []\n for _, kinfo in sorted(klees.items(), key=operator.itemgetter(0)):\n # Read the KLEE's output\n stdoutput = \"\"\n if not path.isfile(path.join(kinfo[\"folder\"], \"output.txt\")):\n continue\n with open(path.join(kinfo[\"folder\"], \"output.txt\"), 'r') as out:\n stdoutput = out.read()\n\n # Build a new KLEE object for the result list\n result.append(KleeResult(\n kinfo[\"bcfile\"],\n kinfo[\"function\"] if \"function\" in kinfo else kinfo[\"caller\"],\n kinfo[\"folder\"],\n stdoutput\n ))\n\n return result", "title": "" }, { "docid": "62619003070e06bfdcbdabc2c8a339b0", "score": "0.42018762", "text": "def LoadAgent(agent_type, game, player_id, rng):\n seed = 12761381\n if agent_type == \"random\":\n return uniform_random.UniformRandomBot(player_id, rng)\n elif agent_type == \"agent\":\n return get_agent_for_tournament(player_id)\n elif agent_type == \"check_call\":\n policy = pyspiel.PreferredActionPolicy([1, 0])\n return pyspiel.make_policy_bot(game, player_id, seed, policy)\n elif agent_type == \"fold\":\n policy = pyspiel.PreferredActionPolicy([0, 1])\n return pyspiel.make_policy_bot(game, player_id, seed, policy)\n elif agent_type == \"50/50\":\n return fold_call_bot.FoldCallBot(player_id, rng)\n else:\n raise RuntimeError(\"Unrecognized agent type: {}\".format(agent_type))", "title": "" }, { "docid": "123911aa3a2cdb473ba6ea5be3e8944d", "score": "0.41919887", "text": "def extract_roles(json_data):\n return json_data.get(\"cern_roles\")", "title": "" }, { "docid": "28ae2cf9cfe4fc144df3db3cd1d24c65", "score": "0.41888282", "text": "def ld(self):\n data = self.response.read()\n return json.loads(data)", "title": "" }, { "docid": "c80a40f40455d64b2f6f7146924bb84d", "score": "0.41828984", "text": "def get_object(self):\n return CloudManAPI.from_request(self.request).clusters.get(\n self.kwargs[\"pk\"])", "title": "" }, { "docid": "212e5d9f7e4b302a031164ef01584fa4", "score": "0.41821504", "text": "def test_get_agents_list(self):\r\n response = self.client.get(\r\n path=reverse('api_services_agents'), **self.authentication_headers)\r\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "80308c058922737a2fa91f604b809d84", "score": "0.41814947", "text": "def load_agent(self, agent, remember_agent=True):\n\n if isinstance(agent, str):\n try:\n agent_loaded = pickle.load(file=open(agent, \"rb\"))\n except FileNotFoundError:\n print(f\"File {agent} does not exist.\") \n agent_loaded = None\n else:\n agent_loaded = agent\n \n if remember_agent:\n self.agent = agent_loaded\n\n return agent_loaded", "title": "" }, { "docid": "4dc0ca2277346023f95017723b786629", "score": "0.4174147", "text": "def getMObject(node):\n selectionList = om.MSelectionList()\n selectionList.add(node)\n oNode = selectionList.getDependNode(0)\n # print \"APItype of {0} is {1}\".format(node, oNode.apiTypeStr)\n return oNode", "title": "" }, { "docid": "aa4edb5f73a3caeeee27c1bf2b3e7a7a", "score": "0.41675597", "text": "def from_json(json_obj):\n return rhino3dm.CommonObject.Decode(json_obj)", "title": "" }, { "docid": "47fa08eef5afc9a7a7194b304b2af89e", "score": "0.41671935", "text": "def getAgents(self):\n try:\n self.cv.acquire()\n return self.agents.values()\n finally:\n self.cv.release()", "title": "" }, { "docid": "a49e26e959e04009a14ab1fa7644f10b", "score": "0.415728", "text": "def get_agents_by_record_type(self, agent_record_type):\n return # osid.authentication.AgentList", "title": "" }, { "docid": "a74a36ee4ec5bc9d96a5ea9b84813100", "score": "0.41449738", "text": "def create_agent(name, hg):\n if name == 'actors':\n return Actors(hg)\n elif name == 'claim_actors':\n return ClaimActors(hg)\n elif name == 'claims':\n return Claims(hg)\n elif name == 'conflicts':\n return Conflicts(hg)\n elif name == 'corefs_dets':\n return CorefsDets(hg)\n elif name == 'corefs_names':\n return CorefsNames(hg)\n elif name == 'corefs_onto':\n return CorefsOnto(hg)\n elif name == 'corefs_unidecode':\n return CorefsUnidecode(hg)\n elif name == 'reddit_parser':\n return RedditParser(hg)\n elif name == 'taxonomy':\n return Taxonomy(hg)\n elif name == 'txt_parser':\n return TxtParser(hg)\n else:\n RuntimeError('unknown agent: {}'.format(name))", "title": "" }, { "docid": "30f62486f3a0a26b385de0f2380f74e9", "score": "0.41334313", "text": "def load_from_json(cluster_name):\n data = Data()\n json_data = data.read_cluster_json(cluster_name)\n if json_data is None:\n return None\n\n ambari_server_vm = []\n service_server_vm_list = []\n ambari_agent_vm_list = []\n\n for vm_json in json_data[\"ambari_server_vm\"]:\n ambari_server_vm.append(VM.load_from_json(vm_json))\n\n for vm_json in json_data[\"service_server_vm_list\"]:\n service_server_vm_list.append(VM.load_from_json(vm_json))\n\n for vm_json in json_data[\"ambari_agent_vm_list\"]:\n ambari_agent_vm_list.append(VM.load_from_json(vm_json))\n\n cluster = Cluster()\n cluster.cluster_name = cluster_name\n cluster.state = json_data[\"state\"]\n cluster.create_time = json_data[\"create_time\"]\n cluster.ambari_server_vm = ambari_server_vm\n cluster.service_server_vm_list = service_server_vm_list\n cluster.ambari_agent_vm_list = ambari_agent_vm_list\n return cluster", "title": "" }, { "docid": "7e2026fcf730a58013072124b40210f0", "score": "0.4120314", "text": "def list_targets(self):\n json_targets = self.run_ffx(['target', 'list', '-f', 'json'])\n if not json_targets:\n return []\n try:\n return json.loads(json_targets)\n except ValueError:\n # TODO(grt): Change to json.JSONDecodeError once p3 is supported.\n return []", "title": "" }, { "docid": "3e1da26f9da60683dcbeb2bc9e67e5ca", "score": "0.41030955", "text": "def get_humanoid_object_from_look_command(action, list_of_humanoids):\n humanoid_name = action[8:len(action)]\n list_of_humanoid_names = []\n\n for humanoid in list_of_humanoids:\n list_of_humanoid_names.append(humanoid.name.lower())\n\n if humanoid_name in list_of_humanoid_names:\n index = list_of_humanoid_names.index(humanoid_name)\n return list_of_humanoids[index]", "title": "" }, { "docid": "1d2c804bc4b8ff79d9aa64b198f7126f", "score": "0.40943533", "text": "def agents(self):\n raise NotImplementedError()", "title": "" }, { "docid": "87dfddccacaee5b72edabf112bc202e6", "score": "0.40870374", "text": "def read_platform_agent(self, platform_agent_id=''):\n return self.platform_agent.read_one(platform_agent_id)", "title": "" }, { "docid": "0da2b5dcaeeffc7a67ccd21cd4474736", "score": "0.4084161", "text": "def requestAgents(self):\n self._agents = {}\n agents_iq = Iq(type='get')\n agents_iq.setQuery(NS_AGENTS)\n self.SendAndWaitForResponse(agents_iq)\n self.DEBUG(\"agents -> %s\" % ustr(self._agents),DBG_NODE_IQ)\n return self._agents", "title": "" }, { "docid": "d9b84c70dcf25d0a53f315614487c79b", "score": "0.40577957", "text": "def get_bone(bone_name, list_of_bones_json):\n for bone in list_of_bones_json:\n if (bone[\"name\"] == bone_name):\n return bone\n print(type(bone_name))\n print(\"ERROR: Bone \" + bone_name + \" not found...\")", "title": "" }, { "docid": "cb4d08ca8f46640bb93a236e9d2cd7e1", "score": "0.4053727", "text": "def loadJSON(client, file_name, uname=None, mesh_class = 'Neuropil'):\n with open(file_name) as f:\n data = json.load(f)\n if uname == None:\n uname = file_name.split('.')[0]\n rid = '#'+uname\n mesh_data = {'data': {'data': {rid: {'name': uname,\n 'uname': uname,\n 'morph_type': 'mesh',\n 'faces': data['faces'],\n 'vertices': data['vertices'],\n 'class': mesh_class}},\n 'queryID': '0-0'},\n 'messageType': 'Data',\n 'widget': 'NLP'}\n client.tryComms(mesh_data)", "title": "" }, { "docid": "acf2e7e27d36ca46b90d42607baaefed", "score": "0.4042796", "text": "def get_agent_details(self, agent_id):\n url = u\"{0}/agents\".format(self.base_url)\n\n params = {\n 'ids': agent_id\n }\n\n response = self.rc.execute(\"GET\", url, headers=self.headers, params=params, \n verify=self.verify, proxies=self.rc.get_proxies())\n\n return response.json()", "title": "" }, { "docid": "accea2997f1314056ebc5438e422aaa3", "score": "0.40392008", "text": "def getAgent(self, brokerBank, agentBank):\n bankKey = str(agentBank)\n try:\n self.cv.acquire()\n if bankKey in self.agents:\n return self.agents[bankKey]\n finally:\n self.cv.release()\n return None", "title": "" }, { "docid": "c23c4caafada96a180415f33282b53a4", "score": "0.40326607", "text": "def parse(location):\n if not is_haxelib_json(location):\n return\n\n with io.open(location, encoding='utf-8') as loc:\n package_data = json.load(loc)\n return build_package(package_data)", "title": "" }, { "docid": "e01c98277fb88b44099ac46ccac3a737", "score": "0.40324464", "text": "def agents(self, params=None):\n response = self._get_pages(\"GET\", [ROUTE_AGENTS], params)\n return response", "title": "" }, { "docid": "45ee936ed74f1d5fa7bbdb0763b1ddb0", "score": "0.40291774", "text": "def getLaser():\n mrds.request('GET', '/lokarria/laser/echoes')\n response = mrds.getresponse()\n if (response.status == 200):\n laserData = response.read()\n return json.loads(laserData)\n else:\n return response", "title": "" }, { "docid": "fa199e7ddc924e1cde2491a0c0f452d8", "score": "0.4027985", "text": "def agents(self) -> List[AbstractAgent]:\n return self._agents", "title": "" }, { "docid": "ff4e7a994541392a4ca6dc59ae62ba57", "score": "0.40278476", "text": "def get_agents_dict():\n query = '''\n PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n PREFIX prov: <http://www.w3.org/ns/prov#>\n SELECT DISTINCT ?ag ?n\n WHERE {\n GRAPH ?g {\n {\n { ?e a prov:Entity . }\n UNION\n { ?e a prov:Plan . }\n ?e prov:wasAttributedTo ?ag .\n OPTIONAL{ ?ag foaf:name ?n . }\n }\n UNION\n {\n ?a a prov:Activity .\n ?a prov:wasAssociatedWith ?ag .\n OPTIONAL{ ?ag foaf:name ?n . }\n }\n UNION\n {\n ?ag1 a prov:Agent .\n ?ag1 prov:actedOnBehalfOf ?ag .\n OPTIONAL{ ?ag foaf:name ?n . }\n }\n }\n }\n '''\n agents = functions_db.db_query_secure(query)\n agent_items = []\n if agents and 'results' in agents:\n for agent in agents['results']['bindings']:\n ret = {}\n ret['ag'] = urllib.parse.quote(str(agent['ag']['value']))\n ret['ag_u'] = str(agent['ag']['value'])\n if agent.get('n'):\n ret['n'] = str(agent['n']['value'])\n agent_items.append(ret)\n return agent_items", "title": "" }, { "docid": "e8436df276f2a6d1885a353ecc64cf92", "score": "0.40274596", "text": "def get_agents(self, params):\n url = u\"{0}/agents\".format(self.base_url)\n\n response = self.rc.execute(\"GET\", url, headers=self.headers, params=params, \n verify=self.verify, proxies=self.rc.get_proxies())\n\n return response.json()", "title": "" }, { "docid": "2ae6dcf0c9ad5008aa2900fe0dd379bb", "score": "0.40244552", "text": "def get_agent(cls):\n agt = cls.agent\n if agt is None:\n raise ValueError('Outstation has no configured agent')\n return agt", "title": "" }, { "docid": "764d7dfdf77ce75f1d0662bd8ef53f58", "score": "0.40224984", "text": "def agent_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"agent_arns\")", "title": "" }, { "docid": "764d7dfdf77ce75f1d0662bd8ef53f58", "score": "0.40224984", "text": "def agent_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"agent_arns\")", "title": "" }, { "docid": "decd8d6d62b5ed00874aabc8ad965e75", "score": "0.4010371", "text": "def getLearner(self): \n return self.learner", "title": "" }, { "docid": "bbcdf695c098cd94335c1e57932898e7", "score": "0.4010169", "text": "def do_agent(self, arg):\n if not arg:\n print('Invalid command {0}: command \"agent\" requires at least one argument: agent name'.format(arg))\n return\n\n args = arg.split(' ')\n # arg[0] = agent_name\n # arg[1] = command\n if args[0] == 'find':\n # this command does not need agent name\n agent_name = 'all'\n else:\n agent_name = args.pop(0)\n\n work_args = ' '.join(args)\n if not work_args:\n sub_cmd = agent_commands.AgentCommands(agent_name, self.base_url, self.token, self.netid,\n region=self.current_region)\n sub_cmd.cmdloop()\n else:\n sub_cmd = agent_commands.AgentCommands(agent_name, self.base_url, self.token, self.netid,\n region=self.current_region)\n sub_cmd.onecmd(work_args)", "title": "" }, { "docid": "70693a96dc59c8725d096885e9f2531a", "score": "0.40069", "text": "def get_agent_details_svg(agent_dict):\n a_uri = agent_dict.get('uri');\n if agent_dict.get('n'):\n a_label = agent_dict.get('n')\n else:\n aLabel = a_uri\n aLabel = aLabel.split('#')\n if len(aLabel) < 2:\n aLabel = a_uri.split('/')\n name = aLabel[-1]\n script = '''\n var aLabel = \"''' + a_label + '''\";\n var agent = addAgent(310, 200, aLabel, \"\");\n '''\n\n #print actedOnBehalfOf, if it has one\n if agent_dict.get('ag2'):\n agent_uri = agent_dict.get('ag2')\n agent_uri_encoded = urllib.parse.quote(agent_uri)\n agent_name = agent_uri\n agent_name = agent_name.split('#')\n if len(agent_name) < 2:\n agent_name = agent_uri.split('/')\n agent_name = agent_name[-1]\n script += '''\n var agentAOBO = addAgent(310, 5, \"''' + agent_name + '''\", \"''' + settings.PROMS_INSTANCE_NAMESPACE_URI + \"id/agent/?uri=\" + agent_uri_encoded + '''\");\n addLink(agent, agentWOBO, \"prov:actedOnBehalfOf\", RIGHT);\n '''\n return [True, script]", "title": "" }, { "docid": "521e6b5729c969c1f0acdbc53d67e013", "score": "0.40046138", "text": "def find_child_system_from_json(self, jsonobj):\n if \"proxy\" in jsonobj:\n path = jsonobj[\"proxy\"]\n if path == \"local/local\":\n return self\n else:\n return self\n # This works - could be a bit slow if you have lots of child nodes...\n q = \"\"\"MATCH (drone)<-[:parentsys*]-(child)\n WHERE ID(drone) = {id} AND child.childpath = {path}\n RETURN child\"\"\"\n store = self.association.store\n child = store.load_cypher_node(q, {\"id\": self.association.node_id, \"path\": path})\n if child is None:\n raise (\n ValueError(\n \"Child system %s from %s [%s] was not found.\"\n % (path, str(self), str(self.association.node_id))\n )\n )\n return child", "title": "" }, { "docid": "7c169acc0a515547bfd4250dd4ff9ca0", "score": "0.40045378", "text": "def adj(cli_opts: bunch.Bunch, json: bool, areas: List[str]): # noqa: B902\n\n nodes = parse_nodes(cli_opts, \"\")\n lm.LMAdjCmd(cli_opts).run(nodes, json, areas)", "title": "" }, { "docid": "32ef73144fc835c18008e0d8004b7e79", "score": "0.39989415", "text": "def agent(self):\n return (self.__agentX, self.__agentY)[int(self.ply.agent.value)]", "title": "" }, { "docid": "f1be04e5a0bf834d653bf29fbf5f733b", "score": "0.39961782", "text": "def from_json(cls, json_str: str) -> LocationMessageContent:\n return cls.from_dict(json.loads(json_str))", "title": "" }, { "docid": "a896c7b98f7ef06c7fded7b10332ae2b", "score": "0.39956445", "text": "def random_agent(env):\n s=env.v[env.v.keys()[randint(0,len(env.v)-1)]]\n g=env.v[env.v.keys()[randint(0,len(env.v)-1)]]\n name = ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(6))\n return GraphAgent(s, g, name)", "title": "" }, { "docid": "fcff7e4b7071f11462d7d0378dd80c7e", "score": "0.39901683", "text": "def parse_hdlConvertor_json(j):\n assert isinstance(j, list), j\n ctx = HdlContext()\n for jo in j:\n o = _parse_hdlConvertor_json(jo)\n ctx.objs.append(o)\n return ctx", "title": "" }, { "docid": "5eff723d36e14c5dcd7828bc380d7e7b", "score": "0.39841434", "text": "def test_list_agents(self):\n a1 = Agent(\"A-001\", \"Paco\", \"Martin\", \"1/1/1990\", 200.0, \"[email protected]\")\n o = Organization(\"ORG0001\", \"SampleCompany\")\n o.add_agent(a1)\n o.remove_agent(a1)\n self.assertIsNone(AgentController.list_agents(o),True)", "title": "" }, { "docid": "1ce8bbeb31fb3a0249f243f5b54a4b72", "score": "0.3978318", "text": "def get_all_agents(self) -> List[KappaAgent]:\n # replace commas with spaces, then split string into a list at closing parenthesis\n return self._agents", "title": "" }, { "docid": "16b07166bfc828273b08152d689b4aeb", "score": "0.3973336", "text": "def get_agents_from_performance(data_dict):\n\n _agents = []\n for key, value in data_dict.iteritems():\n _agents.append(value['agent'])\n return _agents", "title": "" }, { "docid": "cf2a74e202c066d6868ce56a79fbe747", "score": "0.3967745", "text": "def Load(listName):\r\n return ApiClient.Request('GET', '/list/load', {\r\n 'listName': listName})", "title": "" }, { "docid": "228510b718dfd4697625c81130a9a37e", "score": "0.39664477", "text": "def get_agent_cmd():\n rsurl = 'http://' + rsip + ':8080/v2-beta/registrationtokens?name=token_' + rancher_proj_id\n resp = requests.get(rsurl)\n if resp.status_code != 200:\n raise Exception(\"ERROR : Step 2 : Failed to get Agent command from Rancher Server {}\".format(rsurl))\n return resp.json()['data'][0]['command']", "title": "" }, { "docid": "009216e4da02fbec0a3669f57218a09a", "score": "0.3964302", "text": "def agentsAt(self, row, col):\n return self.agentMap[row, col]", "title": "" }, { "docid": "0c04027d5a32f163ec980f3530209854", "score": "0.396105", "text": "def get_agent_dict(agent_uri):\n agent_detail = get_agent(agent_uri)\n ret = {}\n if agent_detail and 'results' in agent_detail and len(agent_detail['results']['bindings']) > 0:\n ret['uri'] = agent_uri\n ret['uri_html'] = urllib.parse.quote(agent_uri)\n if 'n' in agent_detail['results']['bindings'][0]:\n ret['n'] = agent_detail['results']['bindings'][0]['n']['value']\n else:\n ret['n'] = agent_uri\n if 'ag2' in agent_detail['results']['bindings'][0]:\n ret['ag2'] = agent_detail['results']['bindings'][0]['ag2']['value']\n # TODO: Re-enable when it's more than just the Agent being displayed\n svg_script = get_agent_details_svg(ret)\n if svg_script[0] == True:\n a_script = svg_script[1]\n a_script += get_agent_was_attributed_to_svg(agent_uri)\n a_script += get_agent_was_associated_with_svg(agent_uri)\n ret['a_script'] = a_script\n return ret", "title": "" }, { "docid": "e60ecaea0cecce9ef30b9b80042f0727", "score": "0.39512998", "text": "def _get_all_agents():\n\n client = mesos.DCOSClient()\n agents = client.get_state_summary()['slaves']\n return agents", "title": "" }, { "docid": "5961d32612425bcd0630dee85859bac5", "score": "0.39474222", "text": "def agent_self(consul_url=None, token=None):\n ret = {}\n query_params = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"message\"] = \"No Consul URL found.\"\n ret[\"res\"] = False\n return ret\n\n function = \"agent/self\"\n ret = _query(\n consul_url=consul_url,\n function=function,\n token=token,\n method=\"GET\",\n query_params=query_params,\n )\n return ret", "title": "" }, { "docid": "946a53346d4044e9580db8d4c557dab8", "score": "0.39412442", "text": "def loadSingleModiscoMotifOfArbitraryClass(jsonableObject):\r\n theClass = eval(jsonableObject[ModiscoMotif.JsonKeys.derivedClass]); \r\n return theClass.loadFromJsonableObject(jsonableObject);", "title": "" }, { "docid": "d99a678d5014809bf4fbc76968359bf1", "score": "0.39371005", "text": "def get_bot(cmd, client, resource_group_name, resource_name, bot_json=None):\n raw_bot_properties = client.bots.get(\n resource_group_name=resource_group_name,\n resource_name=resource_name\n )\n if bot_json:\n return BotJsonFormatter.create_bot_json(cmd, client, resource_group_name, resource_name, logger,\n raw_bot_properties=raw_bot_properties)\n\n return raw_bot_properties", "title": "" }, { "docid": "957f897c67db9afcff5ff9d49de0223b", "score": "0.39361537", "text": "def agents(self):\n return self._agents", "title": "" }, { "docid": "957f897c67db9afcff5ff9d49de0223b", "score": "0.39361537", "text": "def agents(self):\n return self._agents", "title": "" }, { "docid": "bcfdd8829667edbd11a78f97b87c2069", "score": "0.3932806", "text": "async def test_custom_agent(\n hass: HomeAssistant,\n hass_client: ClientSessionGenerator,\n hass_admin_user: MockUser,\n mock_agent,\n) -> None:\n assert await async_setup_component(hass, \"homeassistant\", {})\n assert await async_setup_component(hass, \"conversation\", {})\n assert await async_setup_component(hass, \"intent\", {})\n\n client = await hass_client()\n\n data = {\n \"text\": \"Test Text\",\n \"conversation_id\": \"test-conv-id\",\n \"language\": \"test-language\",\n \"agent_id\": mock_agent.agent_id,\n }\n\n resp = await client.post(\"/api/conversation/process\", json=data)\n assert resp.status == HTTPStatus.OK\n assert await resp.json() == {\n \"response\": {\n \"response_type\": \"action_done\",\n \"card\": {},\n \"speech\": {\n \"plain\": {\n \"extra_data\": None,\n \"speech\": \"Test response\",\n }\n },\n \"language\": \"test-language\",\n \"data\": {\"targets\": [], \"success\": [], \"failed\": []},\n },\n \"conversation_id\": \"test-conv-id\",\n }\n\n assert len(mock_agent.calls) == 1\n assert mock_agent.calls[0].text == \"Test Text\"\n assert mock_agent.calls[0].context.user_id == hass_admin_user.id\n assert mock_agent.calls[0].conversation_id == \"test-conv-id\"\n assert mock_agent.calls[0].language == \"test-language\"\n\n conversation.async_unset_agent(\n hass, hass.config_entries.async_get_entry(mock_agent.agent_id)\n )", "title": "" }, { "docid": "5f544f10fb14e9ec6837d282caedecbb", "score": "0.3930963", "text": "def async_get_agent_info(\n hass: core.HomeAssistant,\n agent_id: str | None = None,\n) -> AgentInfo | None:\n manager = _get_agent_manager(hass)\n\n if agent_id is None:\n agent_id = manager.default_agent\n\n for agent_info in manager.async_get_agent_info():\n if agent_info.id == agent_id:\n return agent_info\n\n return None", "title": "" }, { "docid": "e651fe754285598839576e71231ee330", "score": "0.3930342", "text": "def get_actor(payload, actor_id):\n actor = Actors.query.filter_by(id=actor_id).first_or_404()\n\n if actor is None:\n abort(404, 'Sorry, we could not find any Agents to display.')\n\n return jsonify({\n 'actor': actor.format(),\n })", "title": "" }, { "docid": "3ebad06593d933bd65c40134e67e2e75", "score": "0.39281777", "text": "def get_agent_rdf(agent_uri):\n agent = '''@prefix prov: <http://www.w3.org/ns/prov#> .\n<''' + agent_uri + '''>\n a prov:Agent;\n '''\n return agent", "title": "" } ]
2159ce5f65118e46b83ea4fe457f9f0a
actionsarray actionTimestring actionTypestring messagestring subActionType
[ { "docid": "ddcd7ce0aedbe47e10420f7964345ddc", "score": "0.0", "text": "def client_info(self, action):\n url = self.baseurl + '/api/v1.2/statis/action'\n r = requests.post(url, json=action)\n # response = r.json()\n return r", "title": "" } ]
[ { "docid": "a1eab1b30f938463e2e5fb67b270f001", "score": "0.63483447", "text": "def define_actions( action ):\n\n actions = [\"walking\", \"eating\", \"smoking\", \"discussion\", \"directions\",\n \"greeting\", \"phoning\", \"posing\", \"purchases\", \"sitting\",\n \"sittingdown\", \"takingphoto\", \"waiting\", \"walkingdog\",\n \"walkingtogether\"]\n\n if action in actions:\n return [action]\n\n if action == \"all\":\n return actions\n\n if action == \"all_srnn\":\n return [\"walking\", \"eating\", \"smoking\", \"discussion\"]\n\n raise( ValueError, \"Unrecognized action: %d\" % action )", "title": "" }, { "docid": "c47b921cb769a1d110302ea8d774267b", "score": "0.6212396", "text": "def actions(self, action_ary: ['Action']) -> 'Notification':\n\n self.data['actions'] = [x.data for x in action_ary]\n return self", "title": "" }, { "docid": "eaffc341a1363cc6c59a11441240d3f4", "score": "0.6139617", "text": "def actions(self, state):\n # raise NotImplementedError\n word = self.query[state:]\n action = [word[:i] for i in range(1,len(word) + 1)]\n #print(\"action\", action)\n return action", "title": "" }, { "docid": "b5c89e93c300530bd443aa9643d7b347", "score": "0.5910075", "text": "def action(self, action):\n\n values = {}\n values[\"message\"] = choice(self.actions[action][\"txts\"])\n values[\"picture\"] = choice(self.actions[action][\"gifs\"])\n return values", "title": "" }, { "docid": "4d73797f9feb6cffc191e6431ac602fe", "score": "0.5783735", "text": "def getActions(self, state): \n util.raiseNotDefined()", "title": "" }, { "docid": "4d73797f9feb6cffc191e6431ac602fe", "score": "0.5783735", "text": "def getActions(self, state): \n util.raiseNotDefined()", "title": "" }, { "docid": "54bbf94dc43f41ab6844e7da88f8c70d", "score": "0.57705", "text": "def parse_actions(actions: str):\n parsed_actions = {}\n actions = actions.split(';')\n for action in actions:\n action_type, params = action.split('{')\n params = params[:-1]\n params = params.split(',')\n parsed_actions[action_type] = params\n return parsed_actions", "title": "" }, { "docid": "f14cc7b292806576fbcfa1e9249919fe", "score": "0.5741153", "text": "def get_actions(self):\n if self._action_sequence is None:\n self._action_sequence = list()\n self._parse()\n return self._action_sequence", "title": "" }, { "docid": "493f207d23471b62170bde61da5b3d51", "score": "0.56790364", "text": "def get_actions(self):\n\n return [\"TurnCW\", \"TurnCCW\", \"MoveF\", \"MoveB\",\"REFUEL\"]", "title": "" }, { "docid": "6f693942ab96cfa6057379a1285e87a7", "score": "0.56697184", "text": "def action(self, action_type: str, **kwargs) -> list:\n\n context = {'type': action_type}\n context.update(kwargs)\n\n results = []\n\n if action_type in self.scheduler_parser_handler_name:\n for _, each_parser_client in self.parser_clients.items():\n results.append(getattr(each_parser_client, action_type)(context))\n for _, each_scheduler_client in self.scheduler_clients.items():\n results.append(getattr(each_scheduler_client, action_type)(context))\n elif action_type in self.scheduler_handler_name:\n for _, each_scheduler_client in self.scheduler_clients.items():\n results.append(getattr(each_scheduler_client, action_type)(context))\n elif action_type in self.parser_handler_name:\n for _, each_parser_client in self.parser_clients.items():\n results.append(getattr(each_parser_client, action_type)(context))\n else:\n results = [{'code': STATUS_CODE.USER_ERROR, 'msg': 'Not a vaild action type.'}]\n\n return results", "title": "" }, { "docid": "a0ddc3b69fdd1ee1d8be64b3b61e9feb", "score": "0.5652416", "text": "def actions(self, state):\n\t\t\n\t\tfor i in range(4):\n\t\t\tfor j in range(4):\n\t\t\t\tif state[i][j] == 0:\n\t\t\t\t\tposition = (i,j)\n\t\t\t\n\t\tactions = []\t\t\n\t\tif position[0] > 0:\n\t\t\tactions.append( state[position[0]-1][position[1]] )\n\t\tif position[0] < 3:\n\t\t\tactions.append( state[position[0]+1][position[1]] )\n\t\tif position[1] > 0:\n\t\t\tactions.append( state[position[0]][position[1]-1] )\n\t\tif position[1] < 3:\n\t\t\tactions.append( state[position[0]][position[1]+1] )\n\n\t\treturn actions", "title": "" }, { "docid": "d336bc0715b071e69576dbfb9e6edc62", "score": "0.56320363", "text": "def actions(self):\n actions = []\n uf = self.url_for\n bc = self.barcamp\n # we need to check for barcamp as pages use this handler, too and pages can also be on the top level \n if bc is not None:\n actions.append(Action('home', T(\"Home\"), uf('barcamps.index', slug = self.barcamp.slug), self.action == 'home'))\n actions.append(Action('sessions', T(\"session proposals\"), uf('barcamps.sessions', slug = bc.slug), self.action == 'sessions'))\n actions.append(Action('participants', T(\"participants\"), uf('barcamps.userlist', slug = bc.slug), self.action == 'participants'))\n if bc.planning_pad_public or self.is_admin:\n actions.append(Action('planning', T(\"planning\"), uf('barcamps.planning_pad', slug = bc.slug), self.action == 'planning'))\n actions.append(Action('docs', T(\"documentation\"), uf('barcamps.documentation_pad', slug = bc.slug), self.action == 'docs'))\n for page in self.barcamp_view.pages_for(\"menu\"):\n pid = \"page_%s\" %page._id\n actions.append(Action(pid, page.menu_title, uf('barcamp_page', slug = bc.slug, page_slug = page.slug), self.action == pid))\n if bc.twitterwall:\n if bc.twitterwall.find(\"tweetwally\") != -1:\n actions.append(Action('twitterwall', T(\"Twitterwall\"), uf(\"barcamps.tweetwally\", slug = bc.slug), self.action == 'twitterwall'))\n else:\n actions.append(Action('twitterwall', T(\"Twitterwall\"), bc.twitterwall, self.action == 'twitterwall'))\n return actions", "title": "" }, { "docid": "f1b102d5af0fc9c1bac9515d6b1d7a37", "score": "0.56085235", "text": "def getRandomAction(self, actions):", "title": "" }, { "docid": "e50afc2d31ef4b93e11938f02b39f385", "score": "0.557938", "text": "def dialog_action_functions(action: str):\n action_mappings = {\n SLACK_COMMAND_ASSIGN_ROLE_SLUG: [handle_assign_role_action],\n SLACK_COMMAND_ENGAGE_ONCALL_SLUG: [incident_flows.incident_engage_oncall_flow],\n SLACK_COMMAND_EXECUTIVE_REPORT_SLUG: [report_flows.create_executive_report],\n SLACK_COMMAND_TACTICAL_REPORT_SLUG: [report_flows.create_tactical_report],\n SLACK_COMMAND_UPDATE_INCIDENT_SLUG: [handle_update_incident_action],\n }\n\n # this allows for unique action blocks e.g. invite-user or invite-user-1, etc\n for key in action_mappings.keys():\n if key in action:\n return action_mappings[key]\n return []", "title": "" }, { "docid": "9a6273254be8c69f00e7a2c60763fcff", "score": "0.5563254", "text": "def actions_from_ryu(ryu_actions, type_):\n if type_ == 'set':\n ret = ActionSet()\n elif type_ == 'list':\n ret = ActionList()\n else:\n raise ValueError(\"type_ should be either 'set' or 'list'\")\n for action in ryu_actions:\n if action.type == ofproto_v1_3.OFPAT_OUTPUT:\n ret.append('OUTPUT', action.port)\n elif action.type == ofproto_v1_3.OFPAT_COPY_TTL_OUT:\n ret.append('COPY_TTL_OUT', None)\n elif action.type == ofproto_v1_3.OFPAT_COPY_TTL_IN:\n ret.append('COPY_TTL_IN', None)\n elif action.type == ofproto_v1_3.OFPAT_SET_MPLS_TTL:\n ret.append('SET_MPLS_TTL', action.mpls_ttl)\n elif action.type == ofproto_v1_3.OFPAT_DEC_MPLS_TTL:\n ret.append('DEC_MPLS_TTL', None)\n elif action.type == ofproto_v1_3.OFPAT_PUSH_VLAN:\n ret.append('PUSH_VLAN', action.ethertype)\n elif action.type == ofproto_v1_3.OFPAT_POP_VLAN:\n ret.append('POP_VLAN', None)\n elif action.type == ofproto_v1_3.OFPAT_PUSH_MPLS:\n ret.append('PUSH_MPLS', action.ethertype)\n elif action.type == ofproto_v1_3.OFPAT_POP_MPLS:\n ret.append('POP_MPLS', action.ethertype)\n elif action.type == ofproto_v1_3.OFPAT_SET_QUEUE:\n ret.append('SET_QUEUE', action.queue_id)\n elif action.type == ofproto_v1_3.OFPAT_GROUP:\n ret.append('GROUP', action.group_id)\n elif action.type == ofproto_v1_3.OFPAT_SET_NW_TTL:\n ret.append('SET_NW_TTL', action.nw_ttl)\n elif action.type == ofproto_v1_3.OFPAT_DEC_NW_TTL:\n ret.append('DEC_NW_TTL', None)\n elif action.type == ofproto_v1_3.OFPAT_SET_FIELD:\n try:\n ret.append('SET_FIELD', (action.key.upper(),\n normalise_bytes(action.field.value)))\n except Exception:\n ret.append('SET_FIELD', (action.key.upper(),\n normalise_bytes(action.value)))\n elif action.type == ofproto_v1_3.OFPAT_PUSH_PBB:\n ret.append('PUSH_PBB', action.ethertype)\n elif action.type == ofproto_v1_3.OFPAT_POP_PBB:\n ret.append('POP_PBB', None)\n else:\n raise ValueError(\"Unknown ryu action type \" + str(action))\n return ret", "title": "" }, { "docid": "2fa62f3abf5a8f50aac27e4c27016cd4", "score": "0.55519265", "text": "def parse_actions(action_line):\n actions = []\n for action in action_line:\n parts = action.split('->')\n if len(parts) != 2:\n print \"Error, action's file does not fit the syntax rules\"\n sys.exit(4)\n diff_code = parts[0].strip()\n action = parts[1].strip()\n if action not in [\"append\", \"holdingpen\", \"correct\"] or diff_code not in \"rca\":\n print \"Error, action's file does not fit the syntax rules\"\n sys.exit(4)\n actions.append((diff_code, action))\n return actions", "title": "" }, { "docid": "47d6a1618e7870ac37fc382aec7f65bf", "score": "0.55481625", "text": "def actions(self):\n return []", "title": "" }, { "docid": "caa13ebe6af1033aa5a3ba30a8d86b94", "score": "0.5519115", "text": "def action_sub_top(form_str0, data0, begin_num0, mid_str0,\r\n global_ops0, file_type0):\r\n data0 = data0[(data0['action.Unknown'] == False) | (data0['action.Unknown'] == \"FALSE\")]\r\n\r\n sub_dic_0 = search_ac_var_sub(\"action.\", data0)\r\n for del0 in sub_dic_0[\"Unknown\"]:\r\n all_name0 = \"action.\" + del0 + mid_str0 + \"Unknown\"\r\n if all_name0 in data0.columns:\r\n data0 = data0[(data0[all_name0] == False) | (data0[all_name0] == \"FALSE\")]\r\n\r\n ac_dic_china = {\"error\": \"้”™่ฏฏ่กŒไธบ\", \"hacking\": \"้ป‘ๅฎข่ขญๅ‡ป\", \"misuse\": \"ไธๅฝ“ไฝฟ็”จ\", \"physical\": \"็‰ฉ็†ๆ“ไฝœ\",\r\n \"malware\": \"ๆถๆ„่ฝฏไปถ\", \"social\": \"็คพไบค็ญ–็•ฅ\", \"unknown\": \"ๆœช็Ÿฅๅ› ็ด \", \"environmental\": \"ๅผ‚ๅธธ็Žฏๅขƒ\"}\r\n\r\n sub_dic0 = {}\r\n colname0 = list(data0.columns)\r\n for it0 in colname0:\r\n if (form_str0 in it0) and (mid_str0 in it0):\r\n mid0 = it0.split(\".\")[1]\r\n if mid0 not in list(sub_dic0.keys()):\r\n sub_dic0[mid0] = [it0]\r\n else:\r\n sub_dic0[mid0].append(it0)\r\n\r\n sub_df_dic0 = {}\r\n for it1 in list(sub_dic0.keys()):\r\n # str.capitalize()\r\n cat0 = form_str0 + str.capitalize(it1)\r\n tmp_df0 = data0[(data0[cat0] == True) | (data0[cat0] == \"TRUE\")]\r\n if tmp_df0.shape[0] != 0:\r\n sub_df_dic0[it1] = tmp_df0[sub_dic0[it1]]\r\n\r\n key_list0 = list(sub_df_dic0.keys())\r\n\r\n mid_cn0 = {\".vector.\": \"้€”ๅพ„\", \".target.\": \"ๆ”ปๅ‡ป็›ฎๆ ‡\", \".variety.\": \"่กŒไธบ\"}\r\n\r\n for it2 in range(len(key_list0)):\r\n tmp_name0, tmp_value0, sum_0 = order_subaction_pct(sub_df_dic0[key_list0[it2]])\r\n if len(tmp_name0) > 7:\r\n tmp_name0 = tmp_name0[-7:]\r\n tmp_value0 = tmp_value0[-7:]\r\n # print(str(tmp_name0))\r\n action_title0 = \"ๅ›พ็‰‡\" + str(begin_num0 + it2 * 2) + \"๏ผšๅ…จ้ƒจๆ•ฐๆฎๆณ„้œฒไบ‹ไปถๅจ่ƒ่กŒไธบไน‹\"\r\n action_title0 = action_title0 + ac_dic_china[key_list0[it2]]\r\n action_title0 = action_title0 + \"ๅ„\" + mid_cn0[mid_str0] + \"ๅญ็ฑปๅ ๆฏ” (ๆ ทๆœฌๆ€ป้‡๏ผš\"\r\n action_title0 = action_title0 + str(sum_0) + \"ไปถ)\"\r\n action_ops0 = global_ops0 + action_title0 + file_type0\r\n bar_reversal_pct(action_title0, tmp_name0, tmp_value0,\r\n \"ไบ‹ไปถๅ ๆฏ”\", action_ops0)\r\n return None", "title": "" }, { "docid": "d840865f7453c5139ceaea3faa2c14ef", "score": "0.54962075", "text": "def lookup_action(self, action):\n acts1 = action // self.action1\n acts2 = action % self.action1\n if self.n_agents == 1:\n return [acts1, acts2]\n res = [ [ None for y in range( 2) ] for x in range( self.n_agents ) ]\n\n for i in range(0, self.n_agents):\n res[i][0] = acts1[i]\n res[i][1] = acts2[i]\n return res", "title": "" }, { "docid": "026af29e0000f7c891cb7cd72e35991f", "score": "0.5465604", "text": "def strings_to_actions(self, smiles):\n assert type(smiles) == list or type(smiles) == tuple\n tokens = [self._tokenize(s.replace('-c', 'c')) for s in smiles]\n parse_trees = [next(self._parser.parse(t)) for t in tokens]\n productions_seq = [tree.productions() for tree in parse_trees]\n actions = [[self._prod_map[prod] for prod in entry] for entry in productions_seq]\n # now extend them to max length\n actions = np.array([a + [self._n_chars - 1] * (self.MAX_LEN - len(a)) for a in actions])\n return actions", "title": "" }, { "docid": "501f46f16b9eb13f11893594284e4d8f", "score": "0.5462168", "text": "def actions(self, actions):\n return self.perform_actions(actions)", "title": "" }, { "docid": "b4f8db69e77517017bdc852e10611a21", "score": "0.54553807", "text": "def getValuesForActions(self, state, actions):\n actionValuePairs = []\n for i, action in enumerate(actions):\n value = self.getValues(state + [action])\n actionValuePairs.append((action,value))\n return actionValuePairs", "title": "" }, { "docid": "3a0a05b39ef016db1f83a981cf40ede7", "score": "0.54462606", "text": "def parse_actions(self, xml_obj):\r\n\t\tself.do_prepare_global_actions()\r\n\t\tself.actions_element = xml_obj\r\n\t\tfor s_node in xml_obj.children:\r\n\t\t\tif \"action\" == s_node.lname:\r\n\t\t\t\tname = s_node.attributes[\"name\"]\r\n\t\t\t\t_id = s_node.attributes[\"id\"].lower()\r\n\t\t\t\tif not _id or not name:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t_top = s_node.attributes.get(\"top\", \"\")\r\n\t\t\t\t_left = s_node.attributes.get(\"left\", \"\")\r\n\t\t\t\t_state = s_node.attributes.get(\"state\", \"\")\r\n\t\t\t\tcode = s_node.get_value_as_xml().strip()\r\n\t\t\t\tcategory = _id.replace(name,'') #TODO: find better solution to get category\r\n\t\t\t\tself.global_actions[category][_id] = VDOM_server_action(code, _id, _top, _left, _state, name)", "title": "" }, { "docid": "38d67554e9194c7c1b4691f632f29701", "score": "0.5443072", "text": "def actions(self):\n\t\treturn self.actionMap.keys()", "title": "" }, { "docid": "10483fef7f8d59cc3beff30209d7e9c4", "score": "0.5427823", "text": "def get_ticket_actions(req, ticket):", "title": "" }, { "docid": "a7058d90a2119671bfc7bee7645f994d", "score": "0.5424953", "text": "def action_trivial(actions):\n pass", "title": "" }, { "docid": "3be01d229557576a6520ebed2d04cbe3", "score": "0.5422009", "text": "def actions(self, state):", "title": "" }, { "docid": "a6edc69f438a4da64e669c26ee472bd3", "score": "0.5418893", "text": "def actions(self):\r\n return self._actions.iteritems()", "title": "" }, { "docid": "f26d5fed37b025c41128d33eb7527a63", "score": "0.5417461", "text": "def actions(self, request, group, action_list, **kwargs):\r\n return action_list", "title": "" }, { "docid": "a71b369ffbf84ac8ed20360cdeca0802", "score": "0.5415439", "text": "def actions(self):\n # add action functions to the action dictionary\n # self._actions['function_name'] = ('<action_name>', None | [...])\n\n return self._actions", "title": "" }, { "docid": "f6944104c18ab03eeaedd1251c332126", "score": "0.5401228", "text": "def __decode_action(self) -> Callable[[], None]:\n # or at least one of them? As individuals?\n actions: Dict[str, Callable[[], None]] = {\n \" \": self.__increase_volume,\n \"?\": self.__increase_octave,\n \".\": self.__increase_octave,\n \"!\": partial(self.__change_instrument, INSTRUMENT_AGOGO),\n \"O\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"o\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"I\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"i\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"U\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"u\": partial(self.__change_instrument, INSTRUMENT_HARPSICHORD),\n \"\\n\": partial(self.__change_instrument, INSTRUMENT_TUBULAR_BELLS),\n \";\": partial(self.__change_instrument, INSTRUMENT_PAN_FLUTE),\n \",\": partial(self.__change_instrument, INSTRUMENT_CHURCH_ORGAN),\n }\n # if the action is none of above, it is a digit\n # default call defined to avoid too long line\n if self.__action.isdecimal():\n return partial(\n self.__change_instrument,\n int(self.__action) + self.__info.instrument,\n )\n return actions[self.__action]", "title": "" }, { "docid": "12aa5e7a37cee82b98fdd18f9a909e2d", "score": "0.53686905", "text": "def action_wildcards(self) -> List[str]:\n actions = []\n for action in self.Actions:\n prefix = AWSService._get_action_prefix(action)\n wildcard = f\"{prefix}*\"\n if wildcard in actions:\n continue\n\n if AWSService._count_prefix_occurances(prefix, self.Actions) > 1:\n actions.append(wildcard)\n else:\n actions.append(action)\n return sorted(actions)", "title": "" }, { "docid": "d55b928f1732f2eee883ce139ec5d197", "score": "0.5368034", "text": "def actions_to_ryu(actions, rule):\n ret = []\n extra_messages = []\n for action in actions:\n if action[0] == 'OUTPUT':\n ret.append(parser.OFPActionOutput(action[1]))\n elif action[0] == 'COPY_TTL_OUT':\n ret.append(parser.OFPActionCopyTtlOut())\n elif action[0] == 'COPY_TTL_IN':\n ret.append(parser.OFPActionCopyTtlIn())\n elif action[0] == 'SET_MPLS_TTL':\n ret.append(parser.OFPActionSetMplsTtl(action[1]))\n elif action[0] == 'DEC_MPLS_TTL':\n ret.append(parser.OFPActionDecMplsTtl())\n elif action[0] == 'PUSH_VLAN':\n ret.append(parser.OFPActionPushVlan(action[1]))\n elif action[0] == 'POP_VLAN':\n ret.append(parser.OFPActionPopVlan())\n elif action[0] == 'PUSH_MPLS':\n ret.append(parser.OFPActionPushMpls(action[1]))\n elif action[0] == 'POP_MPLS':\n ret.append(parser.OFPActionPopMpls(action[1]))\n elif action[0] == 'SET_QUEUE':\n ret.append(parser.OFPActionSetQueue(action[1]))\n elif action[0] == 'GROUP':\n if isinstance(action[1], Group):\n group_id, extra = group_to_ryu(action[1], rule)\n ret.append(parser.OFPActionGroup(group_id))\n extra_messages += extra\n else:\n ret.append(parser.OFPActionGroup(action[1]))\n elif action[0] == 'SET_NW_TTL':\n ret.append(parser.OFPActionSetNwTtl(action[1]))\n elif action[0] == 'DEC_NW_TTL':\n ret.append(parser.OFPActionDecNwTtl())\n elif action[0] == 'SET_FIELD':\n set_field = {action[1][0].lower(): action[1][1]}\n ret.append(parser.OFPActionSetField(**set_field))\n elif action[0] == 'PUSH_PBB':\n ret.append(parser.OFPActionPushPbb(action[1]))\n elif action[0] == 'POP_PBB':\n ret.append(parser.OFPActionPopPbb())\n else:\n assert not \"GGRR\"\n return (ret, extra_messages)", "title": "" }, { "docid": "7eefe0185c1217d59fa802253c055791", "score": "0.5358677", "text": "def interpret_action(self, raw_action):\n pass", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.534369", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.534369", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.534369", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.534369", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "e6d6f83da94b4e74b1e0ba80e93b922e", "score": "0.53387386", "text": "def actions(self) -> pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleCacheExpirationActionArgs', 'DeliveryRuleCacheKeyQueryStringActionArgs', 'DeliveryRuleRequestHeaderActionArgs', 'DeliveryRuleResponseHeaderActionArgs', 'UrlRedirectActionArgs', 'UrlRewriteActionArgs']]]]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "1785971531ba894b1afbb0f7c60f8030", "score": "0.53379285", "text": "def getActionDict():\n\n actionDict = {\n 'AssignID': (0, ('0x07', '0x0115')),\n 'SetupPod': (0, ('0x03', '0x011b')),\n 'CnfgDelivFlg': (0, ('0x08', '0x1d')),\n 'CnxSetTmpBasal': (2, ('0x1f2', '0x1d', '0x1a16', '0x1d')),\n 'Status&Bolus': (2, ('0x0e', '0x1d', '0x1a17', '0x1d')),\n 'CnxAllSetBasal': (2, ('0x1f7', '0x1d', '0x1a13', '0x1d')),\n 'StatusCheck': (0, ('0x0e', '0x1d')),\n 'AcknwlAlerts': (0, ('0x11', '0x1d')),\n 'CnfgAlerts': (0, ('0x19', '0x1d')),\n 'SetBeeps': (0, ('0x1e', '0x1d')),\n 'CnxDelivery': (0, ('0x1f', '0x1d')),\n 'CnxBasal': (0, ('0x1f1', '0x1d')),\n 'CnxTmpBasal': (0, ('0x1f2', '0x1d')),\n 'CnxBolus': (0, ('0x1f4', '0x1d')),\n 'CnxAll': (0, ('0x1f7', '0x1d')),\n 'BolusAlone': (0, ('0x1a17', '0x1d')),\n 'DeactivatePod': (0, ('0x1c', '0x1d')),\n 'PrgBasalSch': (0, ('0x1a13', '0x1d'))\n }\n return actionDict", "title": "" }, { "docid": "5019b3fc3259e9ef4003e8058cd668b3", "score": "0.533532", "text": "def get_actions(self):\n return []", "title": "" }, { "docid": "d28a27536781ca386ea9f13ba4aee29e", "score": "0.5332621", "text": "def actions(self, state: str) -> list:\n # TODO implement\n possible_actions = []\n \n # Shamelessly copied from \"example_have_cake.py\"\n # due to identical functionality\n \n kb = PropKB()\n kb.tell(decode_state(state, self.state_map).pos_sentence())\n for action in self.actions_list:\n is_possible = True\n for clause in action.precond_pos:\n if clause not in kb.clauses: is_possible = False\n for clause in action.precond_neg:\n if clause in kb.clauses: is_possible = False\n if is_possible:\n possible_actions.append(action)\n \n return possible_actions", "title": "" }, { "docid": "148f47efe74b2d28e3137a860aa2625d", "score": "0.5328687", "text": "def _actions(self):\n return ACTIONS", "title": "" }, { "docid": "1e6e1c9e164324c37c0d8764b5b24b32", "score": "0.53124726", "text": "def getActionList(self,state=None):\n theState = self.currentState if state is None else state\n\n actionList = []\n for event, eventData in self.settings[\"actions\"].items():\n name = event\n freq = eventData[\"frequency\"][theState] if isinstance(eventData[\"frequency\"], dict) \\\n else eventData[\"frequency\"]\n\n actionList.append(dict(name=name, frequency=freq))\n\n return actionList", "title": "" }, { "docid": "4265a00efe5195c58552e912bffda93d", "score": "0.53057724", "text": "def getPossibleActions(self, state):\r\n\r\n pass", "title": "" }, { "docid": "a9f8200c1550b424d7603675d6fbbdd3", "score": "0.52765906", "text": "def _apply_actions(self, actions): \n if self.single_agent:\n self.traffic_signals[self.ts_ids[0]].set_next_phase(actions)\n else:\n for ts, action in actions.items():\n self.traffic_signals[ts].set_next_phase(action)", "title": "" }, { "docid": "a41a323cf3a77f6b194c61840ca24576", "score": "0.52706873", "text": "def actions(self, actions):\n\n self._actions = actions", "title": "" }, { "docid": "29bfdf49105b4aab60c03e90695b4929", "score": "0.52629185", "text": "def _getRequestActionTitleList(self):\n request_action_title_list = []\n for i in range(actionCount+1)[1:]:\n getRequestAction = getattr(self, \"getActionRequest%s\" % i, None)\n if callable(getRequestAction):\n getActionTitle = getattr(self, \"getActionTitle%s\" % i, None)\n if getRequestAction() and getActionTitle is not None and getActionTitle()!=\"\":\n request_action_title_list.append(getActionTitle())\n return request_action_title_list", "title": "" }, { "docid": "76045b373549956b770b94d18144bdd6", "score": "0.5262247", "text": "def actions(self) -> Sequence[Any]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "76045b373549956b770b94d18144bdd6", "score": "0.5262247", "text": "def actions(self) -> Sequence[Any]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "2cfd3a545ce478ff4147c4aa719db237", "score": "0.5254875", "text": "def convert_actions(self,actions):\n convertedActions = []\n for act in actions:\n convertedActions.append(act[1]*3 + act[0])\n convertedActions = np.asarray(convertedActions)\n #Convert to one-hot\n num_actions = 9\n convertedActions = convertedActions.reshape(-1)\n convertedActions = np.eye(num_actions, dtype=np.float32)[convertedActions]\n return convertedActions", "title": "" }, { "docid": "66a493ab0a84e67a4cdb4abd81f7c238", "score": "0.52541506", "text": "def action(self, actions, priority=False):\n auth_key = self.digest_auth(self.nonce)\n data = {\n \"request\": {\n \"actions\": actions,\n \"auth-key\": auth_key,\n \"cnonce\": self.nonce,\n \"id\": self.id_counter,\n \"priority\": priority,\n \"session-id\": self.session_id,\n }\n }\n logger.debug(\"Payload: %s\", data)\n r = self.session.post(\"http://{}{}\".format(self.ipaddr, self.request_endpoint), data = {'req': json.dumps(data)})\n r.raise_for_status()\n self.id_counter += 1\n return r.json()", "title": "" }, { "docid": "9e1b0ab1e4437e81be19c4e32988b69d", "score": "0.5253101", "text": "def legal_actions(self) -> List[Action]:\n pass", "title": "" }, { "docid": "8fc0e102934922b488b56f5344b3a21f", "score": "0.5251009", "text": "def action_space(self):\n \n actions = np.array([0,1])\n return actions", "title": "" }, { "docid": "b9dd45e951193113b0eda08e56269a04", "score": "0.52334183", "text": "def actions(self):", "title": "" }, { "docid": "37d4f762bcdfd83e47b08e9f41d1c4b1", "score": "0.5232622", "text": "def actions(self):\n return self._actions.items()", "title": "" }, { "docid": "8812aef4b247166963e8f6f8e8fd1818", "score": "0.5213092", "text": "def choose_action(self):\n # number starts from 1\n number = 1\n print(\"\\t\\tACTION: \")\n for element in self.action:\n # each element within self.action defined above will be listed.\n print(f\"\\t\\t\\t{number}: {element}\")\n number = number + 1", "title": "" }, { "docid": "05b443433124cabf23de6a31cb0146d8", "score": "0.52056897", "text": "def getActionsFromLog(self,file_,job):\n fin=None\n actions = []\n flag=0\n txt=\"\"\n try:\n if file_.endswith(\".zip\"):\n try:\n zf = zipfile.ZipFile(file_,'r')\n for i in zf.filelist:\n if i.filename.endswith(\".log\"):\n fin = zf.open(i.filename,'r')\n break\n # We expect results zips to only contain 1 text file\n except IOError as e:\n logging.error(\"ActionEventPlotter couldn't read the log file \" + file_)\n except IOError as e:\n logging.error(\"Zip file not found \" +file_)\n if not fin:\n return actions\n for line in fin:\n line=line.decode(\"utf-8\")\n if len(line)==0:\n continue\n if \"[Action]\" in line.split():\n Action = {}\n ActionText =line.split(\"[Action]\",1)[1].strip()\n ActionTimeIndex = ActionText.find(\"(s)\")\n if ActionTimeIndex == -1:\n ActionTimeIndex = ActionText.find(\",\")\n try:\n Action[\"time\"] = float(ActionText[0:ActionTimeIndex].strip())\n except NumberFormatException as e:\n logging.error(\"Couldn't correctly parse log file time to double\")\n Action[\"text\"] = ActionText[ActionText.find(\",\") + 1:].strip()\n flag=1\n txt+=ActionText[ActionText.find(\",\") + 1:].strip()\n\n elif flag==1 and line.startswith(\"\\t\"):\n txt+=line\n elif flag==1 and not line.startswith(\"\\t\"):\n txt=txt.replace(\"\\t\",\"\\n\\t\",1)\n Action[\"text\"]=txt\n if job.logger==True and job.log>2:\n logging.info(\"Adding Action:\" + Action[\"text\"])\n actions.append(Action)\n txt=\"\"\n flag=0\n fin.close()\n return actions", "title": "" }, { "docid": "b65bb8be96ba63857c6f53ac72ebb35c", "score": "0.51917684", "text": "def actions(self):\n return self.__actions", "title": "" }, { "docid": "8f2df053fbd6a9f9b222a7ffa867d7ad", "score": "0.5185481", "text": "def actions(self, state):\n\n\n action_list = []\n right = tuple([state.worker[0] + 1, state.worker[1]])\n left = tuple([state.worker[0] - 1, state.worker[1]])\n down = tuple([state.worker[0], state.worker[1] + 1])\n up = tuple([state.worker[0], state.worker[1] - 1])\n # Creating a 2d array of the taboo cells\n taboo_2d = []\n taboo_2d.append([])\n i = 0\n for line in self.taboo:\n for char in line:\n if char == '\\n':\n taboo_2d[i].append(char)\n taboo_2d.append([])\n i += 1\n else:\n taboo_2d[i].append(char)\n\n # This section could've been shortened by about 3/4ths for better\n # readability. We didn't have time to implement the change.\n if not self.macro: # Elementary actions\n if right not in state.walls:\n if right in state.boxes:\n next_right = tuple([right[0] + 1, right[1]])\n # If box is to the right of the worker, check if we can move it\n if (next_right not in state.boxes and next_right not in state.walls):\n if not self.allow_taboo_push:\n if taboo_2d[next_right[1]][next_right[0]] != 'X':\n action_list.append('Right')\n else:\n action_list.append('Right')\n else:\n action_list.append('Right')\n\n if left not in state.walls:\n if left in state.boxes:\n next_left = tuple([left[0] - 1, left[1]])\n if (next_left not in state.boxes and next_left not in state.walls):\n if not self.allow_taboo_push:\n if taboo_2d[next_left[1]][next_left[0]] != 'X':\n action_list.append('Left')\n else:\n action_list.append('Left')\n else:\n action_list.append('Left')\n\n if down not in state.walls:\n if down in state.boxes:\n next_down = tuple([down[0], down[1] + 1])\n if (next_down not in state.boxes and next_down not in state.walls):\n if not self.allow_taboo_push:\n if taboo_2d[next_down[1]][next_down[0]] != 'X':\n action_list.append('Down')\n else:\n action_list.append('Down')\n else:\n action_list.append('Down')\n\n if up not in state.walls:\n if up in state.boxes:\n next_up = tuple([up[0], up[1] - 1])\n if (next_up not in state.boxes and next_up not in state.walls):\n if not self.allow_taboo_push:\n if taboo_2d[next_up[1]][next_up[0]] != 'X':\n action_list.append('Up')\n else:\n action_list.append('Up')\n else:\n action_list.append('Up')\n\n\n # If macro moves, check which boxes can be moved in what direction\n else:\n macro_moves = state.boxes\n\n for cell in macro_moves:\n right = tuple([cell[0] + 1, cell[1]])\n left = tuple([cell[0] - 1, cell[1]])\n down = tuple([cell[0], cell[1] + 1])\n up = tuple([cell[0], cell[1] - 1])\n\n # Checking left and right at the same time because if worker can stand\n # on the left side but there is a wall on the right, then you cant\n # push the box in that dirrection. Same for up and down.\n if (right not in state.walls and right not in state.boxes and\n left not in state.walls and left not in state.boxes):\n\n if can_go_there(state, right):\n if self.allow_taboo_push:\n action_list.append([right, 'Right'])\n elif taboo_2d[left[1]][left[0]] != 'X':\n action_list.append([right, 'Right'])\n if can_go_there(state, left):\n if self.allow_taboo_push:\n action_list.append([left, 'Left'])\n elif taboo_2d[right[1]][right[0]] != 'X':\n action_list.append( [left, 'Left'])\n action_list.append([left, 'Left'])\n\n\n if (down not in state.walls and down not in state.boxes and\n up not in state.walls and up not in state.boxes):\n\n if can_go_there(state, down):\n if self.allow_taboo_push:\n action_list.append([down, 'Down'])\n elif taboo_2d[up[1]][up[0]] != 'X':\n action_list.append([down, 'Down'])\n if can_go_there(state, up):\n if self.allow_taboo_push:\n action_list.append([up, 'Up'])\n elif taboo_2d[down[1]][down[0]] != 'X':\n action_list.append( [up, 'Up'])\n\n\n return action_list", "title": "" }, { "docid": "a409323766b39cfaf02be81cf48816a7", "score": "0.5183881", "text": "def _Dynamic_AddActions(self, request, _):\r\n\r\n\r\n if ((len(self.__tx_actions) + request.add_request_size()) >\r\n _MAX_ACTIONS_PER_TXN):\r\n raise apiproxy_errors.ApplicationError(\r\n datastore_pb.Error.BAD_REQUEST,\r\n 'Too many messages, maximum allowed %s' % _MAX_ACTIONS_PER_TXN)\r\n\r\n new_actions = []\r\n for add_request in request.add_request_list():\r\n self.__ValidateTransaction(add_request.transaction())\r\n clone = taskqueue_service_pb.TaskQueueAddRequest()\r\n clone.CopyFrom(add_request)\r\n clone.clear_transaction()\r\n new_actions.append(clone)\r\n\r\n self.__tx_actions.extend(new_actions)", "title": "" }, { "docid": "fd89ae4e6def4c31f6571b754c945dc7", "score": "0.51812786", "text": "def select_actions(self):\n if self.time == 0:\n next_action = self._select_actions_confounded()\n else:\n next_action = self._select_actions_unconfounded()\n return next_action", "title": "" }, { "docid": "102048cf993524e5a4e4e43716692239", "score": "0.51696616", "text": "def actions(self):\n pass", "title": "" }, { "docid": "102048cf993524e5a4e4e43716692239", "score": "0.51696616", "text": "def actions(self):\n pass", "title": "" }, { "docid": "5dd6a579d5a1c5cfa8feb4a41a33e5c5", "score": "0.5159415", "text": "def _parse_action(self, string, location, tokens):\n return NotImplementedError", "title": "" }, { "docid": "511f25400553a2e205b6610c91387224", "score": "0.51536596", "text": "def actions(self, state):\n ## Finish me! ##\n actions = []\n if(state[-1] == 0):\n player_states = list(state[:self.size])\n for i,x in enumerate(player_states):\n if(x !=0):\n actions.append(i)\n elif(state[-1] == 1):\n player_states = list(state[self.size+1:self.size*2+1])\n for i,x in enumerate(player_states):\n if(x !=0):\n actions.append(i+(self.size +1))\n return actions", "title": "" }, { "docid": "463363fc62e6f9d5c72d1f20442400d6", "score": "0.5152589", "text": "def actions(self, state: str) -> list:\n possible_actions = []\n fluentstate = decode_state(state, self.state_map)\n positive_fluent_set = set(fluentstate.pos)\n negative_fluent_set = set(fluentstate.neg)\n for action in self.get_actions():\n if set(action.precond_pos).issubset(positive_fluent_set) and set(action.precond_neg).issubset(negative_fluent_set):\n possible_actions.append(action) \n return possible_actions", "title": "" }, { "docid": "796539ae6a93a7b430279b417d808ab1", "score": "0.51518434", "text": "def parse(node):\r\n\t\tgetattr(events.actions, node.tag)(**node.attrib)", "title": "" }, { "docid": "d71371ba4a58793ba3ded18a3b97278b", "score": "0.51364625", "text": "def get_action(self, obs):\n pass", "title": "" }, { "docid": "6ccf95b4e25c6f72f37efe1ec41db589", "score": "0.5136251", "text": "def actions(self):\n all_actions = {}\n for dct_action in self._data[\"actions\"]:\n if dct_action:\n all_actions.update(dct_action)\n return all_actions", "title": "" }, { "docid": "92fc25ce5f64b37399ce39bcbe5f5cd7", "score": "0.51277786", "text": "def format_action(action: Action) -> List[Text]:\n if is_a(action, LogInfo):\n return ['LogInfo({})'.format(format_substitutions(cast(LogInfo, action).msg))]\n elif is_a(action, EmitEvent):\n return [\"EmitEvent(event='{}')\".format(cast(EmitEvent, action).event.name)]\n elif is_a(action, ExecuteProcess):\n typed_action = cast(ExecuteProcess, action)\n msg = 'ExecuteProcess(cmd=[{}], cwd={}, env={}, shell={})'.format(\n ', '.join([format_substitutions(x) for x in typed_action.cmd]),\n typed_action.cwd if typed_action.cwd is None else \"'{}'\".format(\n format_substitutions(typed_action.cwd)\n ),\n typed_action.env if typed_action.env is None else '{' + ', '.join(\n ['{}: {}'.format(format_substitutions(k), format_substitutions(v))\n for k, v in typed_action.env]) + '}',\n typed_action.shell,\n )\n return [msg]\n elif is_a(action, RegisterEventHandler):\n # Different variable name used to assist with type checking.\n typed_action2 = cast(RegisterEventHandler, action)\n result = [\"RegisterEventHandler('{}'):\".format(typed_action2.event_handler)]\n result.extend(indent(format_event_handler(typed_action2.event_handler)))\n return result\n else:\n return [\"Action('{}')\".format(action)]", "title": "" }, { "docid": "2f52a6c784d5dc15dae690c656b330c2", "score": "0.51243836", "text": "def getActions(self):\n actions = self.actions\n if isinstance(actions, dict):\n actions = actions.values()\n\n actions = list(actions)\n\n if self.add_noop_action:\n actions.append(self.NOOP)\n return actions", "title": "" }, { "docid": "d591d6511a7ab428fb833b6fe894a4cf", "score": "0.5120888", "text": "def action_names(self) -> Sequence[Text]:\n return ()", "title": "" }, { "docid": "92c95c44d18064c7821546f89f9886ad", "score": "0.51185334", "text": "def _process_action_requested(request_body_dict):\n for action in JOBS_ETL_ACTIONS:\n if action in request_body_dict:\n if type(request_body_dict[action]) is not bool:\n raise ValueError(\"type error for \", action)\n request_body_dict[action] = int(request_body_dict[action])\n action_timestamp_str = action + \"_put_at\"\n request_body_dict[action_timestamp_str] = str(datetime.datetime.utcnow())", "title": "" }, { "docid": "3f51196024371e541760f012597c70bf", "score": "0.5116039", "text": "def _get_actions(self) -> List[str]:\n self._set_group_ids()\n self._set_menu_items()\n return [a.dump() for a in self.actions]", "title": "" }, { "docid": "a24b53908b362284924fc866969176b3", "score": "0.5110385", "text": "def actions(self, cur_state):\r\n rows = string_to_list(cur_state)\r\n empty_row, empty_col = get_location(rows, 'e')\r\n actions = []\r\n if empty_row > 0:\r\n actions.append(rows[empty_row - 1][empty_col])\r\n if empty_row < 2:\r\n actions.append(rows[empty_row + 1][empty_col])\r\n if empty_col > 0:\r\n actions.append(rows[empty_row][empty_col - 1])\r\n if empty_col < 2:\r\n actions.append(rows[empty_row][empty_col + 1])\r\n return actions", "title": "" }, { "docid": "d319e5705d91a109d53e04877cb369e5", "score": "0.5100636", "text": "def _parse_actions(self, actions: List[List[float]]) -> List[Mapping[str, float]]:\r\n\r\n actions = list(actions)\r\n building_actions = []\r\n\r\n if self.central_agent:\r\n actions = actions[0]\r\n number_of_actions = len(actions)\r\n expected_number_of_actions = self.action_space[0].shape[0]\r\n assert number_of_actions == expected_number_of_actions,\\\r\n f'Expected {expected_number_of_actions} actions but {number_of_actions} were parsed to env.step.'\r\n \r\n for building in self.buildings:\r\n size = building.action_space.shape[0]\r\n building_actions.append(actions[0:size])\r\n actions = actions[size:]\r\n\r\n else:\r\n building_actions = [list(a) for a in actions]\r\n\r\n # check that appropriate number of building actions have been provided\r\n for b, a in zip(self.buildings, building_actions):\r\n number_of_actions = len(a)\r\n expected_number_of_actions = b.action_space.shape[0]\r\n assert number_of_actions == expected_number_of_actions,\\\r\n f'Expected {expected_number_of_actions} for {b.name} but {number_of_actions} actions were provided.'\r\n\r\n active_actions = [[k for k, v in b.action_metadata.items() if v] for b in self.buildings]\r\n actions = [{k:a for k, a in zip(active_actions[i],building_actions[i])} for i in range(len(active_actions))]\r\n actions = [{f'{k}_action':actions[i].get(k, np.nan) for k in b.action_metadata} for i, b in enumerate(self.buildings)]\r\n\r\n return actions", "title": "" }, { "docid": "eb28202fcb49e1f3b8049e32c2b42922", "score": "0.509689", "text": "def preprocess_action(self, action):\n complete_action = np.zeros(self.task.action_space.shape)\n complete_action[:3] = action\n return complete_action", "title": "" }, { "docid": "a035cd1743df025b6f37706811e57464", "score": "0.5093189", "text": "def actions(self,state):\n is_decision, _ = state\n if is_decision:\n return [frozenset()] + [frozenset([i]) for i,t in enumerate(self.config.action_types)] \n else:\n return [frozenset()]", "title": "" }, { "docid": "358ec0c990dd1f1d63d6b05e4e0d7b2a", "score": "0.5091469", "text": "def _get_actions(self):\n return self.__actions", "title": "" }, { "docid": "8c06d2d19fb576638594946905bdb3ee", "score": "0.5087164", "text": "def execute_actions(self, actions):\n for thing, action, parameter in actions:\n try:\n # the method which applies the action is something like:\n # self.thing_ACTION(parameter)\n method = getattr(self, 'thing_' + str(action), None)\n if method:\n event = method(thing, parameter)\n self.event(thing, event)\n else:\n self.event(thing, u'unknown action \"%s\"' % action)\n except Exception as err:\n event = u'error executing %s action: %s' % (action, err.message)\n self.event(thing, event)\n if self.debug:\n raise", "title": "" }, { "docid": "dcfc1c1df9d28ef739f4a5fc502cc052", "score": "0.50836843", "text": "def extractActionSequence(model, actions):\n \"*** YOUR CODE HERE ***\"\n if not model:\n return []\n ret = []\n i = 0\n while True:\n flag = False\n for action in actions:\n symbol = logic.PropSymbolExpr(action, i)\n if symbol in model and model[symbol]:\n ret += [action]\n flag = True\n if not flag:\n break\n i+=1\n print ret\n return ret", "title": "" }, { "docid": "ce9839b612b7eec324de4962d84b3ed5", "score": "0.507964", "text": "def parse_data(data, data_select_action, label_select_action):\n for entry in data:\n data_select_action.append(entry['state'])\n label_select_action.append(entry['action'])", "title": "" }, { "docid": "2f3b13b71bbca11854870951ed9fa086", "score": "0.5072497", "text": "def block_action_functions(action: str):\n action_mappings = {\n ConversationButtonActions.invite_user: [add_user_to_conversation],\n }\n\n # this allows for unique action blocks e.g. invite-user or invite-user-1, etc\n for key in action_mappings.keys():\n if key in action:\n return action_mappings[key]\n return []", "title": "" }, { "docid": "3ab86fe1630cf880993ef7038398dddc", "score": "0.5062571", "text": "def supported_action_list(self) -> List[str]:\r\n pass", "title": "" }, { "docid": "4d4a1617ff9135d0b9bd98241a887ccb", "score": "0.50564444", "text": "def create_actions(self):\n pass", "title": "" }, { "docid": "4d4a1617ff9135d0b9bd98241a887ccb", "score": "0.50564444", "text": "def create_actions(self):\n pass", "title": "" }, { "docid": "f81bf8606cb4baec67b2b2914295670d", "score": "0.50488544", "text": "def actions(self) -> 'outputs.ActivityLogAlertActionListResponse':\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "f392a47ea625cb9228e3407a106d8696", "score": "0.5045434", "text": "def extract_use_action_msgs(outfile, use_action, it_name, kwargs):\n for f in sorted(use_action_msgs):\n if type(use_action) is dict and f in use_action:\n if it_name:\n use_action[f] = writestr(use_action[f], **kwargs)\n # Recursively check sub objects as they may contain more messages.\n if type(use_action) is list:\n for i in use_action:\n extract_use_action_msgs(outfile, i, it_name, kwargs)\n elif type(use_action) is dict:\n for (k, v) in sorted(use_action.items(), key=lambda x: x[0]):\n extract_use_action_msgs(outfile, v, it_name, kwargs)", "title": "" }, { "docid": "7daf99070891692a3419ddb5428ac734", "score": "0.5043327", "text": "def __getitem__(*args):\n return _coin.SoActionMethodList___getitem__(*args)", "title": "" }, { "docid": "02bacc81a1aa8fecce1623d9adbf559d", "score": "0.5031347", "text": "def _take_action(self, action):\n # The 0th action is a \"do nothing\" action.\n if action == self.no_op_action:\n return\n\n # Subtract one from the action to keep things simple.\n action -= 1\n\n if action < self.gen_action_array.shape[0]:\n # Pass the action into the gens method.\n self._take_action_gens(action)\n return\n\n # Adjust the action for shunts.\n action -= self.gen_action_array.shape[0]\n\n if action < self.num_shunts:\n # Pass the action into the shunts method.\n self._take_action_shunts(action)\n return\n\n # Adjust the action for LTCs.\n action -= self.num_shunts\n self._take_action_ltcs(action)\n return", "title": "" }, { "docid": "8372f57d7f2fc9e80482466f7ec6d513", "score": "0.5027255", "text": "def actions(self, state):\n (J1, J2, J3) = state\n (C1, C2, C3) = self.capacities\n legal = [] \n if J3<C3: legal.append(('fetch', 3))\n if J1>0: legal.append(('dump', 1))\n if J2>0: legal.append(('dump', 2))\n if J3>0: legal.append(('dump', 3)) \n if J1<C1: legal.append(('fetch', 1))\n if J2<C2: legal.append(('fetch', 2))\n if J1<C1 and J2>0: legal.append(('pour', 2, 1))\n if J3<C3 and J2>0: legal.append(('pour', 2, 3))\n if J1<C1 and J3>0: legal.append(('pour', 3, 1))\n if J2<C2 and J3>0: legal.append(('pour', 3, 2))\n if J2<C2 and J1>0: legal.append(('pour', 1, 2))\n if J3<C3 and J1>0: legal.append(('pour', 1, 3))\n return legal", "title": "" }, { "docid": "47877fc995f870094e7380b09398b3c9", "score": "0.5024331", "text": "def __is_action(self, a):\n return a in [a.name for a in self.actions]", "title": "" }, { "docid": "695096e46934b0d631876654cf443938", "score": "0.5022552", "text": "def actions(self) -> str:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "38d9b93042e341b6762bdd2359022c1a", "score": "0.5022428", "text": "def __call__(self, actions: list, ns: str = None, json=False):\n\n data = {\n \"action\": self.action,\n \"v\": 23,\n \"message\": {\n \"token\": self.api.token,\n \"actions\": actions,\n },\n }\n\n if json:\n return data\n else:\n return self.send_websocket_request(self.action, data, ns=ns)", "title": "" }, { "docid": "8f67f9c7dece73245f71a458a553ae11", "score": "0.5020272", "text": "def actions(self):\n return self.get_actions()", "title": "" }, { "docid": "210382d6f868dcf29def2faabd7403c1", "score": "0.50174296", "text": "def get_possible_actions(self):\n return [0, 1, 2, 3]", "title": "" }, { "docid": "93aced0ee5c965dc45e31406420ca86c", "score": "0.5015989", "text": "def getPossibleActions(self, state):\n abstract", "title": "" }, { "docid": "7c2a062453bdfa52c2a5a31f4d7b1c0f", "score": "0.5012153", "text": "def _GetActionDescriptions(self):\r\n action_names = self.actions.keys()\r\n action_names.sort()\r\n desc = ''\r\n for action_name in action_names:\r\n desc += ' %s: %s\\n' % (action_name, self.actions[action_name].short_desc)\r\n return desc", "title": "" }, { "docid": "6a3f12f53f3485948b0eb048ba1204ab", "score": "0.501136", "text": "def get_permission_actions():", "title": "" }, { "docid": "7cb07770f294a8ff37828c4fbceb78eb", "score": "0.5004527", "text": "def addActions(self, actions, category):\n for action in actions:\n self.addAction(action, category)", "title": "" } ]
e544453d4703f1678a7fddec7ad18e3c
Return the current user action with the action parameters.
[ { "docid": "199f1fef2f8bdfe98639a7eb4ea04156", "score": "0.7800999", "text": "def get_current_action(self):\r\n # get current action\r\n action = self.user_action_generator.action\r\n \r\n # get current key if the action was KeyPress\r\n key = self.user_action_generator.key\r\n \r\n # get key modifier\r\n key_modifier = self.user_action_generator.key_modifier\r\n \r\n # retrieve action parameters and normalize using the window size\r\n parameters = self.normalize_action_parameters(\r\n self.user_action_generator.get_action_parameters())\r\n return action, key, key_modifier, parameters", "title": "" } ]
[ { "docid": "b8a16c06f0468742277f2b40eef86a0c", "score": "0.75212395", "text": "def _get_action(self):\n return self.__action", "title": "" }, { "docid": "5754aaab8e2accf8435431cbb23e2c13", "score": "0.739199", "text": "def get_action(self):\n return self._action", "title": "" }, { "docid": "dc11547c469136d71a13c44b839e6c29", "score": "0.73834616", "text": "def get_action(self):\n pass", "title": "" }, { "docid": "6a67a54182fde3c04cf03361c7487853", "score": "0.73242736", "text": "def current_action(self):\n return self.actions[self.current_action_index]", "title": "" }, { "docid": "7369e49e11d292c1b140bd6c0b72e76f", "score": "0.7313286", "text": "def action(self) -> str:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "7369e49e11d292c1b140bd6c0b72e76f", "score": "0.7313286", "text": "def action(self) -> str:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "83c0028c88937a7c79ba3b04a8e12166", "score": "0.7299995", "text": "def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "d5e0ff33372fe04069ba58d728bdd954", "score": "0.7201889", "text": "def action(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "3799bb5f87c2960e6ccec1ebb5dd5c35", "score": "0.71984535", "text": "def get_action(self):\n action = agi.get_variable('RECOG_INSTANCE(0/0/action)')\n agi.verbose('got action %s' % action)\n return action", "title": "" }, { "docid": "f3c0ca6ea213159306719662d18a7da8", "score": "0.7182314", "text": "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "8ec733c9cdcb5e415641d4ce487729f9", "score": "0.71401703", "text": "def action(self):\n return self._action", "title": "" }, { "docid": "9ef0e95910e9f473161d2736e78a9a23", "score": "0.7054598", "text": "def action(self):\n return self._action", "title": "" }, { "docid": "9ef0e95910e9f473161d2736e78a9a23", "score": "0.7054598", "text": "def action(self):\n return self._action", "title": "" }, { "docid": "72b3e0960909d738c7262d5a5e80276f", "score": "0.7022429", "text": "def action(self):\n return self._action", "title": "" }, { "docid": "b2244688d7132091a644b1ca5ece15cb", "score": "0.70162654", "text": "def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "2e03019647e5500e0450d78ca7814fb5", "score": "0.6972772", "text": "def get_action(self):\n if 'action' in self.config_data:\n return self.config_data['action']\n return ''", "title": "" }, { "docid": "327e71975c27b717bc2b0ac0b931bf75", "score": "0.6944908", "text": "def action(self) -> Optional[str]:\n return self.__action", "title": "" }, { "docid": "1f58968a9c6a03ee0fdac67c9c27f4b1", "score": "0.6866644", "text": "def current_action(self):\n return self._genome.get_cmd(self._i)", "title": "" }, { "docid": "fa89fec4c04ec46a937e05348e1a7036", "score": "0.6787872", "text": "def get_action(self, name):\r\n return self._global_actions[name]", "title": "" }, { "docid": "1e5065058e8d99647f42e0c09dd64bcc", "score": "0.67371744", "text": "def get_action(self, *args):\n # Add the AI to make the choice TODO\n\n # If an ai exists\n if self.ai == 'torch':\n # Get possible actions\n actions = self.get_action_space()\n return actions[self.ai.action(self.state)]\n elif self.ai:\n action = self.get_action_space()\n return action[self.ai.state_check(self.state)]\n\n # Otherwise\n else:\n\n # Default AI\n return self.no_ai()", "title": "" }, { "docid": "82039141b5a4d688d8952cba70951af7", "score": "0.6719435", "text": "def get_action(self, state):\n action = self.policy.get_action(state)\n return action", "title": "" }, { "docid": "3ac1e0fab9ae480191c723b928eb7f2a", "score": "0.671646", "text": "def get_action(self, *args):\n return 0", "title": "" }, { "docid": "3101e639e81c17baf43a540dc8a029e4", "score": "0.6700917", "text": "def get_action_from_user(self):\n\t\tif self.wait_for_explicit_human_action:\n\t\t\twhile len(self.renderer.pressed_keys) == 0:\n\t\t\t\tself.renderer.get_events()\n\n\t\tif self.key_to_action == {}:\n\t\t\t# the keys are the numbers on the keyboard corresponding to the action index\n\t\t\tif len(self.renderer.pressed_keys) > 0:\n\t\t\t\taction_idx = self.renderer.pressed_keys[0] - ord(\"1\")\n\t\t\t\tif 0 <= action_idx < self.action_space_size:\n\t\t\t\t\treturn action_idx\n\t\telse:\n\t\t\t# the keys are mapped through the environment to more intuitive keyboard keys\n\t\t\t# key = tuple(self.renderer.pressed_keys)\n\t\t\t# for key in self.renderer.pressed_keys:\n\t\t\tfor env_keys in self.key_to_action.keys():\n\t\t\t\tif set(env_keys) == set(self.renderer.pressed_keys):\n\t\t\t\t\treturn self.key_to_action[env_keys]\n\n\t\t# return the default action 0 so that the environment will continue running\n\t\treturn self.default_action", "title": "" }, { "docid": "5e71a3cc9acc08ab890f18e81354dcf0", "score": "0.6691058", "text": "def choose_action(self):\n action = self.policy.choose(self)\n return action", "title": "" }, { "docid": "53d4fbddbce6ee0f6bb802822eab61e3", "score": "0.66163546", "text": "def action(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"action\"),\n )", "title": "" }, { "docid": "469ee86893237e9e34c46ec725034039", "score": "0.66084176", "text": "def user_action_key(user_action_name=None):\n return db.Key.from_path('UserAction', user_action_name or 'default_action')", "title": "" }, { "docid": "69cfc0a2387a15f304253724c6de3ad8", "score": "0.6595023", "text": "def get_action(self, observ, feed_dict=None):\n return self._get_action_tmplt_fn(observ, feed_dict)", "title": "" }, { "docid": "a57133be4735ca83da67b89c6d72cb9b", "score": "0.65894955", "text": "def get_action_from_args(request_args: dict) -> Action:\n # YOUR CODE HERE\n return Action(request_args.get('action', 'no_nudge'))", "title": "" }, { "docid": "bac20a23cf8932b6f0726926760ef1e3", "score": "0.6571338", "text": "def action(self) -> str | None:\n return self._received_message.action", "title": "" }, { "docid": "c0cfdfa2eb5e5fd4068fbea5587ff89a", "score": "0.6486747", "text": "def get_action(self, *args):\n return self.rng.randint(0, self.available_actions)", "title": "" }, { "docid": "c7d6b228457eb290e4bcb54272288551", "score": "0.6476979", "text": "def get_action(self, obs):\n return self._policy.get_action(obs)", "title": "" }, { "docid": "c7d6b228457eb290e4bcb54272288551", "score": "0.6476979", "text": "def get_action(self, obs):\n return self._policy.get_action(obs)", "title": "" }, { "docid": "795e26b696febbcf05639233e3f815c6", "score": "0.6449203", "text": "def get_action_from_args(request_args: dict) -> Action:\n # YOUR CODE HERE\n return Action()", "title": "" }, { "docid": "a9471e5aee649a316af63589f5c28439", "score": "0.64336157", "text": "def get_authorization_action(self):\n raise NotImplementedError", "title": "" }, { "docid": "3bee18ad79400838f36376406bed4d56", "score": "0.63443714", "text": "def get_action(self, game_state):\n return self.get_action_helper(game_state, 0, -math.inf, math.inf)", "title": "" }, { "docid": "52e46373541cce9c261a87ec36f56036", "score": "0.63394296", "text": "def default_action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"default_action\")", "title": "" }, { "docid": "52e46373541cce9c261a87ec36f56036", "score": "0.63394296", "text": "def default_action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"default_action\")", "title": "" }, { "docid": "c8755ab15e3945f19db887e712acd573", "score": "0.6290322", "text": "def get_action(self, state):\n raise NotImplementedError(\"Override me!\")", "title": "" }, { "docid": "b617310bdcc69f1b08f30f77f9acbe6e", "score": "0.62799406", "text": "def get_action(self, obs_dict, goal_dict=None):\n assert not self.nets.training\n\n return self.nets[\"actor\"](obs_dict=obs_dict, goal_dict=goal_dict)", "title": "" }, { "docid": "e1254e78729c2153bf7e53dd02ae4c40", "score": "0.6237663", "text": "def getAction(self, state):\n\n reply_msg = self.update(state)\n self.previous_action = reply_msg.action\n return reply_msg.action", "title": "" }, { "docid": "7caba4814e1558e7922cf1c112dabf20", "score": "0.62332773", "text": "def get_action_info(self,action_name):\n\t\treturn self.get_instructions().get(action_name,{})", "title": "" }, { "docid": "fe83d0a6e19537c4f3c7ec68e3fa08ea", "score": "0.6233162", "text": "def action(self, to_self=False):\n return ActionExpression.build(self, to_self)", "title": "" }, { "docid": "69e2c2a9fcb898f32738528a5c58c713", "score": "0.6212177", "text": "def action(self) -> 'outputs.StatementActionPropertiesResponse':\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "453609ad01cbb4101a27d56afa7bace3", "score": "0.6192671", "text": "def get_action(self, game_state):\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n return self.get_action_helper(game_state, 0)", "title": "" }, { "docid": "453609ad01cbb4101a27d56afa7bace3", "score": "0.6192671", "text": "def get_action(self, game_state):\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n return self.get_action_helper(game_state, 0)", "title": "" }, { "docid": "166f74c1de0cdda6f0290078409b34a6", "score": "0.61851376", "text": "async def prep_action(self):\n logging.info(\"[RL Agent] Selecting action...\")\n self.action = self.policy.select_action(self.state)\n return self.action", "title": "" }, { "docid": "6b388fc2c583edddec06e9e95faf0ab1", "score": "0.61802435", "text": "def get_action(self, register):\n # type: (Register) -> Action\n return self.action", "title": "" }, { "docid": "e32cb0f4d378f907be3d8806256015bb", "score": "0.61792517", "text": "def get_object(self, queryset=None):\n return self.action", "title": "" }, { "docid": "0c5fe3317da28033dd4a4f8d24b80756", "score": "0.61789757", "text": "def action_name(self):\n if \"actionName\" in self._prop_dict:\n return self._prop_dict[\"actionName\"]\n else:\n return None", "title": "" }, { "docid": "9d490271a28b2827690187ab7a280a64", "score": "0.6163821", "text": "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n # action = None\n \"*** YOUR CODE HERE ***\"\n if len(legalActions) == 0:\n action = None\n elif util.flipCoin(self.epsilon):\n action = random.choice(legalActions)\n else:\n action = self.computeActionFromQValues(state)\n\n return action", "title": "" }, { "docid": "a4854fa87790c9edaaf903737499b3bc", "score": "0.6155726", "text": "def get_action(self, state, reward, num_actions):\n pass", "title": "" }, { "docid": "ed6639672ee5c605a74bebb55d84ef17", "score": "0.61553663", "text": "def _generic_action(self, action):\r\n t = action['type']\r\n self.logger.debug('Action type: %s', (t))\r\n if t in self.actions:\r\n kwargs = {k: action[k] for k in action.keys() if k != 'type'}\r\n return self.actions[t](**kwargs)\r\n return None", "title": "" }, { "docid": "0e369db309d340da69f01d654a8f2977", "score": "0.6152116", "text": "def default_action(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"default_action\")", "title": "" }, { "docid": "7b7ead0b384e3381dcca6b02aa0afa05", "score": "0.61300576", "text": "def resource_action(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resource_action\")", "title": "" }, { "docid": "c102df2f64095a2e327568934379edfe", "score": "0.6128783", "text": "def action(self) -> Optional[pulumi.Input[Union[str, 'IpActionType']]]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "18a7030cb41c1ebec7cecd71a9fff5fb", "score": "0.6127973", "text": "def action(self, environment):\n return environment.random_action()", "title": "" }, { "docid": "ad74680056c33bc7e46e2e9e3e289a94", "score": "0.6120896", "text": "def action(self) -> 'outputs.RulesEngineActionResponse':\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "210550a15a1254cc8dcb30dabf651c3e", "score": "0.6117033", "text": "def dispatch(self, *args, **kwargs):\n action = kwargs.pop('action', 'default')\n action_method = getattr(self, str(action), self.default)\n return action_method(*args, **kwargs)", "title": "" }, { "docid": "47d042c4f4bc0dc583a6fe948ae669dd", "score": "0.6115467", "text": "def getAction(self, state):\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action", "title": "" }, { "docid": "47d042c4f4bc0dc583a6fe948ae669dd", "score": "0.6115467", "text": "def getAction(self, state):\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action", "title": "" }, { "docid": "47d042c4f4bc0dc583a6fe948ae669dd", "score": "0.6115467", "text": "def getAction(self, state):\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action", "title": "" }, { "docid": "0e91886688e0e66172985f05b878f1cc", "score": "0.6105076", "text": "def get_actions(self):\n\t\treturn self.get_instructions().get(\"actions\",{})", "title": "" }, { "docid": "739111e75d03cf73a94aabfed9ad1ac8", "score": "0.61011404", "text": "def getAction(self, state):\n # Pick Action\n actions = self.legalActions\n action = None\n # with probability self.epsilon, explore a random action\n if util.flipCoin(self.epsilon):\n return random.choice(actions)\n else:\n action = self.computeActionFromQValues(state)\n return action", "title": "" }, { "docid": "48f9d96ffa04e750e294377381391090", "score": "0.6094126", "text": "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n # epsilon decay\n epsmin = 0.01\n eps_decay = 0.9999\n self.epsilon = max(self.epsilon*eps_decay, epsmin)\n if util.flipCoin(self.epsilon):\n # Act randomly\n action = random.choice(legalActions)\n else:\n # Act greedly\n action = self.computeActionFromQValues(state)\n \n return action", "title": "" }, { "docid": "7296e0587e08a0939786aa7a928215a5", "score": "0.60844487", "text": "def getAction(self, state):\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action", "title": "" }, { "docid": "81a167646ec9331e0ffd0dd5e46f840d", "score": "0.60839957", "text": "def other_action(self, action):\n return self.other_action_mapping.get(action, None)", "title": "" }, { "docid": "5f4aa7c98be9bf64152e46cd7a6b521f", "score": "0.6070014", "text": "def getAction(self, state, prev_state):\n # Pick Action\n actions = self.legalActions\n action = None\n # with probability self.epsilon, explore a random action\n if util.flipCoin(self.epsilon):\n return random.choice(actions)\n else:\n action = self.computeActionFromQValues(state, prev_state)\n return action", "title": "" }, { "docid": "5f4aa7c98be9bf64152e46cd7a6b521f", "score": "0.6070014", "text": "def getAction(self, state, prev_state):\n # Pick Action\n actions = self.legalActions\n action = None\n # with probability self.epsilon, explore a random action\n if util.flipCoin(self.epsilon):\n return random.choice(actions)\n else:\n action = self.computeActionFromQValues(state, prev_state)\n return action", "title": "" }, { "docid": "147bb929cfc4bc8b7089e9b69ac91ddd", "score": "0.60585177", "text": "def gotAction(channel, user, action, irc=None):", "title": "" }, { "docid": "3b1c0d8018f251ea79af109e9cc95ded", "score": "0.605764", "text": "def handle_action(self):\n key = self.getkey()\n if key in self.actions:\n action = self.actions[key]\n action['function'](\n *action.get('args', []), **action.get('kwargs', {})\n )", "title": "" }, { "docid": "9a828453a40bf6147f9fc3c8aace7551", "score": "0.60566896", "text": "def get_action(self, action_name):\n logger.debug(\"I am about to get a cloud function action: {}\".format(action_name))\n url = os.path.join(self.endpoint, 'api', 'v1', 'namespaces',\n self.namespace, 'actions', action_name)\n res = self.session.get(url)\n return res.json()", "title": "" }, { "docid": "a2851e9e789e5ea4e7d16e1c23170df2", "score": "0.6056326", "text": "def get_action():\n print(\"* Action *\")\n return _get_selection(\"(E)ncrypt or (D)ecrypt? \", \"ED\")", "title": "" }, { "docid": "c3acc1c75df3158560a8efc87d32ae28", "score": "0.6042729", "text": "def get(action_id):", "title": "" }, { "docid": "b3a2a695601de29cd382e2853ee3d3c6", "score": "0.6042556", "text": "def auto_action(self):\n return self._auto_action", "title": "" }, { "docid": "e81da9a94bd6aa383a5b1f2a7a7d3baa", "score": "0.6028129", "text": "def resource_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_action\")", "title": "" }, { "docid": "11381643d7ebf8c90632c5cfb07f472e", "score": "0.60277617", "text": "def get_action(self):\n if self.data['cachet'].get('action') is None:\n return []\n else:\n return self.data['cachet']['action']", "title": "" }, { "docid": "844763d378b47371b93e21c80cdb0080", "score": "0.6019425", "text": "def default_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_action\")", "title": "" }, { "docid": "844763d378b47371b93e21c80cdb0080", "score": "0.6019425", "text": "def default_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_action\")", "title": "" }, { "docid": "93b984d0500fb8fa62228cde8061e7e2", "score": "0.60074216", "text": "def _idx_to_action(self, action_idx):\n\t\treturn self.actions[action_idx]", "title": "" }, { "docid": "3274a15fbb7bab26f3fb7dbe2836f91b", "score": "0.5997175", "text": "def action(self, image, action):\n return self.getHttp('images/{:s}/actions/{:s}'.format(image, action))", "title": "" }, { "docid": "0737b62a4868c7e2b31c8622c070a09c", "score": "0.5978531", "text": "def get_action(self, state, epsilon=0.0):\n\t\t#chosen_action = self.pick_action(state, epsilon)\n\t\t#Get the transformation corresponding to the chosen action\n\t\t#chosen_name, chosen_move = helpers.distance_to_action(state, self.agent_name, ast.literal_eval(chosen_action))\n\n\n\t\tchosen_name, chosen_move = self.pick_action(state, epsilon)\t\n\n\t\t#Return the name and transformation of the selected action\n\t\treturn chosen_move, chosen_name", "title": "" }, { "docid": "d967b25bb76618698dedb77459880389", "score": "0.5975784", "text": "def message_action(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"message_action\")", "title": "" }, { "docid": "c11389a5cefa373dd7e5e15b8944af32", "score": "0.59674513", "text": "def get_action_player(self):\n return self.players[self.action_player]", "title": "" }, { "docid": "33af08d992e76510bb8ccbaf1bef5604", "score": "0.5966349", "text": "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n if len(legalActions) == 0:\n return None\n if util.flipCoin(self.epsilon):\n return random.choice(legalActions)\n # implicit else here, of course\n return self.computeActionFromQValues(state)", "title": "" }, { "docid": "2412cfc901bf57ffb714e467340f0e1a", "score": "0.5960216", "text": "def actions(self) -> pulumi.Input[Sequence[pulumi.Input['ExperimentTemplateActionArgs']]]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "03cc63ff0fdcbf4f828a6c92dfa09338", "score": "0.59500545", "text": "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n # epsilon decay\n epsmin = 0.01\n eps_decay = 0.9999\n self.epsilon = max(self.epsilon*eps_decay, epsmin)\n if util.flipCoin(self.epsilon):\n # Act randomly\n action = random.choice(legalActions)\n else:\n # Act greedly\n action = self.computeActionFromNN(state)\n \n return action", "title": "" }, { "docid": "5ea5906d457f3927521a78772e05d31b", "score": "0.5946885", "text": "def take_action(self, action):", "title": "" }, { "docid": "a65f6aef856e492061f50068febc6914", "score": "0.5939621", "text": "def select_action(self):\n pass", "title": "" }, { "docid": "7a622f191cb46c3957d77301b35d101f", "score": "0.593536", "text": "def action_type(self) -> str:\n return pulumi.get(self, \"action_type\")", "title": "" }, { "docid": "7a622f191cb46c3957d77301b35d101f", "score": "0.593536", "text": "def action_type(self) -> str:\n return pulumi.get(self, \"action_type\")", "title": "" }, { "docid": "20ff013026b9cb10ade5583892599eda", "score": "0.5933652", "text": "def action(self) -> Optional[pulumi.Input['WebAclRuleActionArgs']]:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "bf1c6a93d8ddca1d3b2200fedbfb9fbc", "score": "0.5929144", "text": "def cmd_action(self):\n pass", "title": "" }, { "docid": "4c4dd290ac31ef6944a02788ebd733c1", "score": "0.5924506", "text": "def getAction(self, gameState):\n util.raiseNotDefined()", "title": "" }, { "docid": "4e79279b860cec5ab21dda4197edeb5a", "score": "0.5919085", "text": "def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExperimentTemplateActionArgs']]]]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "8cc9bd6500030646086f07363c115d50", "score": "0.5918004", "text": "def action(self) -> pulumi.Input['RuleGroupActivatedRuleActionArgs']:\n return pulumi.get(self, \"action\")", "title": "" }, { "docid": "5532aca1fca3729a44d96abcb9a94805", "score": "0.59151936", "text": "def action_type(self):\n return self._action_type", "title": "" }, { "docid": "5532aca1fca3729a44d96abcb9a94805", "score": "0.59151936", "text": "def action_type(self):\n return self._action_type", "title": "" }, { "docid": "55ed241b1822205d565d6fda371cd31a", "score": "0.5912225", "text": "def _get_portal_return_action(self):\n self.ensure_one()\n return self.env.ref('feury_pricelist.customer_pricelist_action')", "title": "" }, { "docid": "85f0f6efb6fd3a6169afdc2cc4d8e1d2", "score": "0.59057283", "text": "def action_for(self, name, original, *args, **kwds):\n return self.actions[name].action(original, *args, **kwds)", "title": "" }, { "docid": "222d91d9ed784434d80253695229d72a", "score": "0.58955425", "text": "def message_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message_action\")", "title": "" } ]
67218f5ad982602182ad27fb6773c3c3
Checks if class attr are present
[ { "docid": "28041c011931c8d004179897589b27a8", "score": "0.6782795", "text": "def test_correct_classattr(self):\n b = User()\n attr = [\"email\", \"password\", \"first_name\", \"last_name\"]\n d = b.__dict__\n for i in attr:\n self.assertFalse(i in d)\n self.assertTrue(hasattr(b, i))\n self.assertEqual(getattr(b, i, False), \"\")", "title": "" } ]
[ { "docid": "ef997cefcb0ea532cc503fe05aebf969", "score": "0.73302776", "text": "def test_HasClassFields(self):\n for strAttr, _, _ in self.ClassFields:\n strError = 'Missing attribute {} in class {}'.format(strAttr,\n self.TestClass.__name__)\n self.assertTrue(hasattr(self.TestClass, strAttr), strError)\n strAttr = 'foo_bar_shebang'\n strError = 'Attribute {} should not be in class {}'.format(strAttr,\n self.TestClass.__name__)\n self.assertFalse(hasattr(self.TestClass, strAttr), strError)", "title": "" }, { "docid": "28c524eec7a670e237a38fdda4d91289", "score": "0.7277597", "text": "def isclassattr(a, cls):\n for c in cls.__mro__:\n if a in c.__dict__:\n return True\n return False", "title": "" }, { "docid": "30394116dc57ed976b9db0ea1c4ebb86", "score": "0.67241156", "text": "def checkAttr(self, attrName):\n\n if not hasattr(self, attrName):\n raise AttributeError('module has no attribute ' + attrName)\n return False\n return True", "title": "" }, { "docid": "7604b75f026fd29d7adbd9de9aa249e3", "score": "0.6708065", "text": "def attr_check():\n if not session.attributes:\n return False\n for attr in attr_lst:\n if attr == cat_attr:\n if attr not in session.attributes:\n return False\n else:\n if attr not in session.attributes or session.attributes[attr] == None:\n return False\n return True", "title": "" }, { "docid": "03a6cf82468405f2ae9f5fbca56e6d75", "score": "0.66387165", "text": "def test_class_attribute_presence(self):\n l1 = dir(Amenity)\n self.assertIn('name', l1)", "title": "" }, { "docid": "20f86f95ee1b581e19c9851b32c03b79", "score": "0.6623182", "text": "def is_valid(self, klass, attributes):\r\n return attributes.has_values(self.attrs)", "title": "" }, { "docid": "618acde87d8dc9d3f2da1994a46b8791", "score": "0.6596402", "text": "def isattr(self):\n return False", "title": "" }, { "docid": "a466e85bded92f7c4a76cd27228810c8", "score": "0.6561819", "text": "def check_attribute(self, obj, attr):\r\n\r\n return hasattr(obj, attr)", "title": "" }, { "docid": "dbff4cca7be4663470764c8c985fc0a1", "score": "0.65474886", "text": "def hasClass(self, classname):\n if not isinstance(self.elements, (list, tuple)):\n self.elements = (self.elements,)\n\n for el in self.elements:\n if el.getAttribute(\"class\") is not None:\n if classname in el.getAttribute(\"class\"):\n return True\n return False", "title": "" }, { "docid": "982b3c07cc733939efa2731d0bbc9fe2", "score": "0.6496274", "text": "def _check_attrs_cls_for_known_errors(cls: type[DpEvent]) -> None:\n if not attr.has(cls):\n raise ValueError(\n f'Expected `cls` to be an `attrs` decorated class, found `{cls}`.'\n )\n for field in attr.fields(cls):\n if field.name.startswith('_'):\n raise ValueError(\n 'Expected all fields on the `attrs` decorated class to be public, '\n f'found `{cls}` has a field `{field.name}` thats starts with `_`, '\n 'making it private.'\n )\n if not field.init:\n raise ValueError(\n 'Expected all fields on the `attrs` decorated class to set `init` to '\n f'True, found `{cls}` has a field `{field.name}` that set `init` to '\n 'False.'\n )", "title": "" }, { "docid": "0166e0189bb955576093234c232c7da8", "score": "0.6438808", "text": "def test_auto_attribs_detect(self):\n\n @attr.define\n class OldSchool:\n x = attr.field()\n\n assert OldSchool(1) == OldSchool(1)\n\n # Test with maybe_cls = None\n @attr.define()\n class OldSchool2:\n x = attr.field()\n\n assert OldSchool2(1) == OldSchool2(1)", "title": "" }, { "docid": "60748050adf5c036994a28a7e9f5da24", "score": "0.63789356", "text": "def test_class_attribute_presence(self):\n l1 = dir(Rectangle(1, 1))\n self.assertIn('_Rectangle__width', l1)\n self.assertIn('_Rectangle__height', l1)\n self.assertIn('_Rectangle__x', l1)\n self.assertIn('_Rectangle__y', l1)\n self.assertIn('id', l1)", "title": "" }, { "docid": "cd3ac4cb17036a4dbd7471253d6b1a76", "score": "0.63357", "text": "def test_class_attribute_presence(self):\n l1 = dir(Square(1))\n self.assertIn('_Rectangle__width', l1)\n self.assertIn('_Rectangle__height', l1)\n self.assertIn('_Rectangle__x', l1)\n self.assertIn('_Rectangle__y', l1)", "title": "" }, { "docid": "f2478e90102db6531a4068fe109855e0", "score": "0.6303472", "text": "def has_unittest_attr(item, attr):\n if hasattr(item.obj, attr):\n return True\n if item.cls and hasattr(item.cls, attr):\n return True\n if item.parent and hasattr(item.parent.obj, attr):\n return True\n return False", "title": "" }, { "docid": "ef0cf912c0c7cda170c2ed6be218c450", "score": "0.6272687", "text": "def check_attrs(value):\n if not attrs.has(type(value)):\n raise TypeError(\n 'Expected an instance of an attrs decorated class, or an '\n 'attrs-decorated class type; found a value of type '\n f'{type(value)}'\n )", "title": "" }, { "docid": "fc575811747e2da94efc6a06c4d3acbb", "score": "0.62402856", "text": "def _check_classes(self):\n try:\n self.classes = self.imdbs[0].classes\n self.num_classes = len(self.classes)\n except AttributeError:\n # fine, if no classes is provided\n pass\n\n if self.num_classes > 0:\n for db in self.imdbs:\n assert self.classes == db.classes, \"Multiple imdb must have same classes\"", "title": "" }, { "docid": "37c2a99866600dbecbb8a58d7f756500", "score": "0.62193483", "text": "def has_attr(self, attr):\n return attr in self._attrs", "title": "" }, { "docid": "37c2a99866600dbecbb8a58d7f756500", "score": "0.62193483", "text": "def has_attr(self, attr):\n return attr in self._attrs", "title": "" }, { "docid": "af98ed365fc1249b1259f08dc7b6c3ee", "score": "0.62041515", "text": "def is_valid(self, klass, attributes):\r\n return Instance.is_valid(self, klass, attributes) and klass.__contains__(self.klass_value)", "title": "" }, { "docid": "14ada149eb35e4cf7bdccb664f77a440", "score": "0.6196574", "text": "def supported_class(cls, classname):\r\n return False", "title": "" }, { "docid": "cd5d0fffcd2d96c5554dc6aceb35094c", "score": "0.6171811", "text": "def _check_class(self, doc):\n if self._checks & DocChecker.CLASS:\n self._check_basic(doc)", "title": "" }, { "docid": "510874f7d763e4fb4ab49e97de07f981", "score": "0.6159896", "text": "def test_GetClassFields(self):\n for strAttr, _, _ in self.ClassFields:\n getattr(self.TestClass, strAttr) #must be ok\n with self.assertRaises(AttributeError): #non-existent attribute\n getattr(self.TestClass, 'foo_bar_shebang')", "title": "" }, { "docid": "3c887e4c2a5c1a0f6673d79ba4d73c8e", "score": "0.61588407", "text": "def isValid(attr):\n pass", "title": "" }, { "docid": "c2c45cb614a7edebc9983e0f2389b696", "score": "0.61553395", "text": "def _check_classes(self, classes):\n assert len(set(classes) - set(self.dataset.labels)) == 0, \"classes contains a label that isn't in dataset\"", "title": "" }, { "docid": "972688a7f18c27815c82fc73f6bbdf34", "score": "0.60764194", "text": "def test_normal_attrs(self):\n for attr in ('metadata', 'field_names', 'field_labels', 'forms',\n 'events', 'arm_names', 'arm_nums', 'def_field'):\n self.assertTrue(hasattr(self.reg_proj, attr))", "title": "" }, { "docid": "cda06632ba4fbe1fd60d30e182ab45d7", "score": "0.6071123", "text": "def test_hasAttributes(self):\n self.assertTrue(hasattr(City, \"name\"))\n self.assertTrue(hasattr(City, \"state_id\"))", "title": "" }, { "docid": "b0ca8e28b789d1fe9c3308ba6dfd5612", "score": "0.60674936", "text": "def testRequiredAttributes(self):\n\n\t\trequiredAttributes = (\"callback\",\n\t\t\t\t\t\t\t\t\"instances\",\n\t\t\t\t\t\t\t\t\"initialized\",\n\t\t\t\t\t\t\t\t\"path\",\n\t\t\t\t\t\t\t\t\"functions\",\n\t\t\t\t\t\t\t\t\"library\")\n\t\tfor attribute in requiredAttributes:\n\t\t\tself.assertIn(attribute, dir(Library))", "title": "" }, { "docid": "70bd2b9202daaf7870ce0acd972ca7d4", "score": "0.6052554", "text": "def attribute_defined(self, name):\n# sys.stderr.write(\"attribute_defined\\n\")\n return hasattr(self, name)", "title": "" }, { "docid": "d8ed4fc9f4cca554ed435738b9af2ea6", "score": "0.6037728", "text": "def _get_class_attr(cls) -> list:\n return [attr for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith(\"__\")]", "title": "" }, { "docid": "b76cec00815ec6be0c0f5b72b82095f0", "score": "0.60353565", "text": "def test_class_attribute_presence(self):\n l1 = dir(Place)\n self.assertIn('city_id', l1)\n self.assertIn('user_id', l1)\n self.assertIn('name', l1)\n self.assertIn('description', l1)\n self.assertIn('number_rooms', l1)\n self.assertIn('number_bathrooms', l1)\n self.assertIn('max_guest', l1)\n self.assertIn('price_by_night', l1)\n self.assertIn('latitude', l1)\n self.assertIn('longitude', l1)\n self.assertIn('amenity_ids', l1)", "title": "" }, { "docid": "62eebdfaabe623bf17a5835948c5274b", "score": "0.6025745", "text": "def test_attr(self):\n model = self.model(**self.config)\n\n attributes = [\n \"input_size\",\n \"num_classes\",\n \"embedding_size\",\n \"dropout_p\",\n \"vocab_size\",\n \"batch_normalisation\",\n \"bilstm_layers\",\n \"dense_layers\",\n ]\n methods = [\"build_model\", \"process_embedding_layer\"]\n\n for attr in attributes:\n self.assertTrue(hasattr(model, attr))\n for method in methods:\n self.assertTrue(hasattr(model, method))", "title": "" }, { "docid": "b9762013de0115b215b8981055d37863", "score": "0.601793", "text": "def _checkattr(obj, attr):\n # TO DO - support attributes done a path, checking each step in turn\n\n if hasattr(obj, attr):\n objattr = getattr(obj, attr)\n return _notnull(objattr)\n\n return False", "title": "" }, { "docid": "767daf405bd1b49356669b4f33aaa289", "score": "0.5990704", "text": "def _is_attr_object(obj: Any) -> bool:\n return _has_attrs and _attr_module.has(type(obj))", "title": "" }, { "docid": "4979559767e0db67b74fb793672637c8", "score": "0.598739", "text": "def test_host_test_class_has_test_attr(self):\n for i, ht_name in enumerate(self.HOSTREGISTRY.HOST_TESTS):\n ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]\n if ht is not None:\n self.assertEqual(True, hasattr(ht, \"result\"))", "title": "" }, { "docid": "a3b8ad9c359b90ed71b3b4e949e8e906", "score": "0.59859705", "text": "def test_auto_attribs_detect_annotations(self):\n\n @attr.define\n class NewSchool:\n x: int\n\n assert NewSchool(1) == NewSchool(1)\n\n # Test with maybe_cls = None\n @attr.define()\n class NewSchool2:\n x: int\n\n assert NewSchool2(1) == NewSchool2(1)", "title": "" }, { "docid": "04ac8482df26308b9a5c4f09c890ebf1", "score": "0.59239835", "text": "def check(cls, attr_name, rules):\n pass", "title": "" }, { "docid": "a538ecfe3769b5e6f07f0214dcd9d96a", "score": "0.59156644", "text": "def can_load_attribute( self, attr ):\n filename = self._attr_filename( attr )\n return os.path.exists(filename)", "title": "" }, { "docid": "19388e102f760885625675cb6d39f7ee", "score": "0.5893991", "text": "def exist_class_var_dec(self):\r\n return self.next_value_is(\"field\") or self.next_value_is(\"static\")", "title": "" }, { "docid": "c6dc61d92d89fb1ef022f0583cf55afd", "score": "0.5890096", "text": "def has_attr(node, attrname):\n return node.has_attr(attrname)", "title": "" }, { "docid": "bf74c4b7a5557b0a7ead5bf6d656b26b", "score": "0.58881634", "text": "def test_Attrs(self):\n self.assertTrue(hasattr(self.obj, 'name'))", "title": "" }, { "docid": "94ad2e5a933099f20d6e538c3595ce6a", "score": "0.58612865", "text": "def hasattr(self, attrName):\n try:\n getattr(self, attrName)\n except AttributeError as err:\n if hasattr(type(self), attrName):\n raise\n return False\n else:\n return True", "title": "" }, { "docid": "b3511dccc45a88cc975fd6bf55272d3a", "score": "0.5859322", "text": "def validate_class(char):\r\n\r\n possible_class = [\"Barbarian\", \"Bard\", \"Cleic\", \"Druid\", \"Fighter\", \r\n \"Monk\",\"Paladin\", \"Ranger\", \"Rouge\", \"Sorcerer\",\r\n \"Warlock\", \"Wizard\"]\r\n \r\n for i in possible_class:\r\n if(i == char.cclass):\r\n return True\r\n \r\n return False", "title": "" }, { "docid": "a1b3a66dd3b286bbf2d75a8b986b1609", "score": "0.5850705", "text": "def available(self):\n return self._attrs is not None", "title": "" }, { "docid": "7cd8d9d8737e92d15c5c9f45ac2de102", "score": "0.5848648", "text": "def test_HasInstanceFields(self):\n objTest = self.TestClass()\n for strAttr, _, _ in self.ClassFields:\n strError = 'Missing attribute {} in instance of class {}'.format(\n strAttr, self.TestClass.__name__)\n self.assertTrue(hasattr(objTest, strAttr), strError)\n for strAttr, _, _ in self.InstanceFields:\n strError = 'Missing attribute {} in instance of class {}'.format(\n strAttr, self.TestClass.__name__)\n self.assertTrue(hasattr(objTest, strAttr), strError)\n strAttr = 'foo_bar_shebang'\n strError = '{} should not be in instance of class {}'.format(strAttr,\n self.TestClass.__name__)\n self.assertFalse(hasattr(objTest, strAttr), strError)\n del objTest", "title": "" }, { "docid": "b72ecbb0679f68a1aa3be825a4863cb0", "score": "0.58335584", "text": "def filled(self):\n element = self.root.find_element_by_xpath('.')\n classes = element.get_attribute('class')\n return ('filled' in classes)", "title": "" }, { "docid": "47fdeccbd06fec37a75e3e549f17a468", "score": "0.5833208", "text": "def test_required_attributes(self):\n\n required_attributes = (\"ID\",)\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Ebner1998))", "title": "" }, { "docid": "c950e706729ca918e6c835e406399468", "score": "0.5821082", "text": "def test_required_attributes(self):\n\n required_attributes = (\"reflectances\", \"cmfs\", \"illuminant\")\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(Tree_Otsu2018))", "title": "" }, { "docid": "d270db64288a2e622ac7606b3f80d8a2", "score": "0.5818975", "text": "def check_in_attributes(self, query_attribute):\r\n # Tim\r\n try:\r\n return query_attribute.upper() in self.attributes\r\n except AttributeError:\r\n return False", "title": "" }, { "docid": "ee21fbd29248560d71ff0fe872077036", "score": "0.5808911", "text": "def test_required_attributes(self):\n\n required_attributes = (\n \"shape\",\n \"basis_functions\",\n \"means\",\n \"selector_array\",\n )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(Dataset_Otsu2018))", "title": "" }, { "docid": "243e6e299f87dea6035bca6a193c1be3", "score": "0.57749933", "text": "def _class_attributes(self):\n\n attributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))\n return (a for a in attributes if not(a[0].startswith('__') \n and a[0].endswith('__')))", "title": "" }, { "docid": "a21e2a490576b8480b1e70b6b187714b", "score": "0.57663053", "text": "def is_compatible(cls, other: dict) -> bool:\n\t\treturn len(set(other.keys()) & set(cls.class_obj_attrs)) == len(cls.class_obj_attrs)", "title": "" }, { "docid": "846471a368ce0c024217b62e7acfa0d8", "score": "0.57569176", "text": "def node_has_class(node, classes):\n # Preconditions & preparation:\n # wrap single name in list\n if not issubclass(type(classes), list):\n classes = [classes]\n # Main:\n for cname in classes:\n if cname in node['classes']:\n return True\n return False", "title": "" }, { "docid": "617b8bca562814206d00fc7576480f15", "score": "0.5754573", "text": "def test_instance_attribute_presence(self):\n l1 = dir(Amenity())\n self.assertIn('id', l1)\n self.assertIn('updated_at', l1)\n self.assertIn('created_at', l1)\n self.assertIn('__class__', l1)\n self.assertIn('name', l1)", "title": "" }, { "docid": "2b684c228f38deabca549c3a7a464130", "score": "0.57540125", "text": "def check_attributes(self):\n values = {'flag': int}\n\n for attr_name, attr_type in values.items():\n attr = getattr(self, attr_name)\n if not isinstance(attr, attr_type):\n raise ControllerAttributeError(\n 'Attribute \"{}\" in the controller is not the expected '\n 'type. Expected \"{}\", got {}.'.format(\n attr_name, attr_type, type(attr)))\n valid_flags = [0, 1, 2, 3]\n if attr_name == 'flag' and attr not in valid_flags:\n raise ControllerAttributeError(\n 'Attribute \"flag\" in the controller must be one of {}.'\n ' Got: {}.'.format(valid_flags, attr))", "title": "" }, { "docid": "4c2e7a1f77cd8c3ddb5566821a768034", "score": "0.5743298", "text": "def test_required_attributes(self):\n\n required_attributes = (\"partition_axis\", \"row\")\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(Node_Otsu2018))", "title": "" }, { "docid": "eaf8ef2787207cc973fa0a879142a1dc", "score": "0.5723846", "text": "def exist_class(info, request):\n classname = info['match']['classname']\n cls = get_class(classname)\n if not cls:\n return False\n\n info['match']['cls_or_obj'] = cls\n return True", "title": "" }, { "docid": "164fde5846ff102ee8a3bd764f3ec194", "score": "0.5718747", "text": "def has_attr(node: Union[api.MObject, pm.nt.DependNode, str], attr_name: str) -> bool:\n if isinstance(node, api.MObject):\n return has_attr_fast(node, attr_name)\n elif isinstance(node, pm.nt.DependNode):\n return has_attr_fast(node.__apimobject__(), attr_name)\n else:\n return cmds.objExists(node + \".\" + attr_name)", "title": "" }, { "docid": "e6c13de451d4afd3f9336a54b34021c3", "score": "0.5718242", "text": "def check_classes(gtype, section):\n try:\n return sabnzbd.config.get_config(section, \"%s_prio_%s\" % (section, gtype))() > 0\n except TypeError:\n logging.debug(\"Incorrect Notify option %s:%s_prio_%s\", section, section, gtype)\n return False", "title": "" }, { "docid": "0b5d2e041ebe825dcafdc4a75d32aa9c", "score": "0.5710617", "text": "def test_required_attributes(self):\n\n required_attributes = (\n \"reflectances\",\n \"cmfs\",\n \"illuminant\",\n \"basis_functions\",\n \"mean\",\n )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(Data_Otsu2018))", "title": "" }, { "docid": "48b56965a2869b8b15ad5ed7954f45cf", "score": "0.56989443", "text": "def _safe_has_attribute(obj, member: str) -> bool:\n try:\n return hasattr(obj, member)\n except Exception: # pylint: disable=broad-except\n return False", "title": "" }, { "docid": "e134aeb829f385daf8a3271aac807f25", "score": "0.56890464", "text": "def _HasAttributeContainers(self, container_type):", "title": "" }, { "docid": "df6d5413c623a660500013feb98880a0", "score": "0.5683968", "text": "def test_HasProperties(self):\n super(Test_ClassTest2, self).test_HasProperties()\n objTest = self.TestClass()\n for strAttr, _, _, _ in self.Getters:\n strError = 'Instance of class {} has no attribute {}'.format(\n self.TestClass.__name__, strAttr)\n self.assertTrue(hasattr(objTest, strAttr), strError)\n del objTest", "title": "" }, { "docid": "254d56e098423165d964d0f5f26f8628", "score": "0.56824934", "text": "def test_mixed_attr(self):\n\n class InstanceOnlyDescriptor(object):\n def __get__(self, instance, owner):\n if instance is None:\n raise AttributeError(\"You can't retrieve InstanceOnlyDescriptor.\")\n return len(instance.__dict__)\n\n class MyClass(object):\n x = InstanceOnlyDescriptor()\n y = 13\n\n # Make sure the 'InstanceOnlyDescriptor' works.\n self.assertRaises(AttributeError, getattr, MyClass, 'x')\n\n obj = MyClass()\n self.assertEqual(0, obj.x)\n obj.y = 3\n self.assertEqual(1, obj.x)\n\n\n # Fetch fields\n fields = dict(utils.iterclass(MyClass))\n\n # Check for normal fields\n self.assertIn('y', fields)\n self.assertEqual(13, fields['y'])\n\n # Check that the invalid attribute is ignored.\n self.assertNotIn('x', fields)", "title": "" }, { "docid": "ee537c7920ae09a764ceda9e11123b84", "score": "0.5678679", "text": "def test_attrs(self):\n self.ocb = self.test_class(**self.set_default)\n\n # Ensure standard attributes are present\n for attr in ['eab', 'ocb', 'max_delta', 'records', 'rec_ind',\n 'dtime', 'ocb_ind', 'eab_ind']:\n self.assertTrue(hasattr(self.ocb, attr),\n msg=\"missing attr: {:}\".format(attr))\n\n return", "title": "" }, { "docid": "5af3563a94eefa5fd406bd6eea2969b1", "score": "0.5668321", "text": "def check_attribute(self, attribute_name, attribute_value):\n return True", "title": "" }, { "docid": "9319e2665c897f342eaa248808c2c78b", "score": "0.56599677", "text": "def test_regular_attrs(self):\n for attr in 'events', 'arm_names', 'arm_nums':\n attr_obj = getattr(self.reg_proj, attr)\n self.assertIsNotNone(attr_obj)\n self.assertEqual(len(attr_obj), 0)", "title": "" }, { "docid": "20e3cfb1b6c769ec2b081f4b43060a7d", "score": "0.5658235", "text": "def has_attribute(f, src) -> bool:\n if len(src.meta[\"schema\"][\"properties\"]) == 0:\n print(\"[INFO]\", f, \"does not contain any attributes\")\n return False\n else:\n return True", "title": "" }, { "docid": "8e7dbcc52221118f6f9ab0e0584f3dec", "score": "0.5657175", "text": "def test_name_attr(self):\n self.assertEqual(hasattr(self.amenity, \"name\"), True)", "title": "" }, { "docid": "f69d879b3049a8203d610bba41d67f1e", "score": "0.5649987", "text": "def get_class_attr(Cls) -> []:\n import re\n return [a for a, v in Cls.__dict__.items()\n if not re.match('<function.*?>', str(v))\n and not (a.startswith('__') and a.endswith('__'))]", "title": "" }, { "docid": "2ee6851e17e28d113cb191d42b8b7efd", "score": "0.56431794", "text": "def test_class_attr(self):\r\n\r\n self.assertEqual(\r\n self.objects[self.o_id].to_dict()['__class__'], 'User'\r\n )", "title": "" }, { "docid": "91898e18320853422b011186d30ec587", "score": "0.56408596", "text": "def has_class(name: str, classes):\n for class_name in classes:\n if class_name.lower() == name or class_name == name:\n return True\n return False", "title": "" }, { "docid": "4277f62cb0a9cbc6437efeaecc8fe04e", "score": "0.5639279", "text": "def provided_classes(self, cls):\n return isclass(cls) and self.registry.search(to_return=cls)", "title": "" }, { "docid": "615ab4a4d1625c073aa2853100f88184", "score": "0.5623718", "text": "def is_valid(self, klass, attributes):\r\n return TrainingInstance.is_valid(self, klass, attributes)", "title": "" }, { "docid": "634624b02f0868cc04e6ac1a4ab4a433", "score": "0.5587449", "text": "def has(self, attr):\n return hasattr(self, attr)", "title": "" }, { "docid": "a63e7df9f254f076f01ba532ebd0a0b7", "score": "0.5585404", "text": "def has(self, attr_path:str):\n curr_config = self\n attrs = str(attr_path).split('.')\n\n for attr_lvl_name in attrs:\n if hasattr(curr_config, attr_lvl_name):\n value = getattr(curr_config, attr_lvl_name)\n\n if attr_lvl_name == attrs[-1]:\n return True\n elif isinstance(value, Config):\n curr_config = value\n else:\n break\n else:\n break\n\n return False", "title": "" }, { "docid": "28d75890921b5d12d7ad2076b8e4f922", "score": "0.5580463", "text": "def test_carriesAttributes(self):\n updatedClass = _oldstyle._oldStyle(SomeOldStyleClass)\n\n self.assertEqual(updatedClass.__name__, SomeOldStyleClass.__name__)\n self.assertEqual(updatedClass.__doc__, SomeOldStyleClass.__doc__)\n self.assertEqual(updatedClass.__module__, SomeOldStyleClass.__module__)", "title": "" }, { "docid": "c49a5c355e2a964b738596d928e5cd1d", "score": "0.55746573", "text": "def test_class_attribute() -> None:\n type_hints = get_type_hints(lmp.model._elman_net.ElmanNet)\n assert type_hints['model_name'] == ClassVar[str]\n assert lmp.model._elman_net.ElmanNet.model_name == 'Elman-Net'", "title": "" }, { "docid": "10e8d9dce960f5dc1e751b31842574b8", "score": "0.55726045", "text": "def __check_same_attribute(self, parent, child, _class):\n for p_attr in parent:\n for c_attr in child:\n if p_attr.name == c_attr.name:\n raise RedefinedAttributeError(_class)", "title": "" }, { "docid": "65f6bcdb7ad625d364e5fe105b8adba1", "score": "0.5556157", "text": "def is_headline_tag(tag, attrs):\n return ('class', name) in attrs", "title": "" }, { "docid": "a7ed9d88eaf26724059705af39d3de32", "score": "0.55331933", "text": "def test_attrs(self):\n for cls in _get_elements_classes():\n s1 = random_str()\n s2 = random_str()\n\n # Ordinary attributes\n self._test_render(cls, attr=s1, attr_abc=s2)\n\n # Single attributes\n self._test_render(cls, allowfullscreen=True, checked=True, hidden=True, selected=True, required=True)\n\n # Replaceable attributes\n self._test_render(cls, label_for=s1, css=s2)\n\n # Get attributes\n em = cls(attr1=s1, attr2=s2)\n assert em.attrs == {'attr1': s1, 'attr2': s2}\n assert em.get_attr('attr1') == s1\n assert em.get_attr('attr2') == s2\n\n # Set attributes\n em.set_attr('attr3', s1)\n em.set_attr('attr4', s2)\n assert em.attrs == {'attr1': s1, 'attr2': s2, 'attr3': s1, 'attr4': s2}\n assert em.get_attr('attr3') == s1\n assert em.get_attr('attr4') == s2\n\n # Init 'data-*' attrs via constructor\n assert cls(data={'attr1': s1, 'attr2': s2}).attrs == {'data-attr1': s1, 'data-attr2': s2}", "title": "" }, { "docid": "c12a8542c3f1831a19cf8491df6673e1", "score": "0.55282474", "text": "def classes(field):\r\n return field.widget.attrs.get('class', None)", "title": "" }, { "docid": "8083d3afb0c7fc5ee8d8d487542ac68a", "score": "0.5524618", "text": "def has(self, cls_name):\n return cls_name.lower() in self._lookup", "title": "" }, { "docid": "51c8dede7c5a31ce7085a88d1776f7d9", "score": "0.5523427", "text": "def _check_task(self, task) -> bool:\n return (len(set(task) - set(self.classes)) == 0) and (len(set(task)) - len(task) == 0)", "title": "" }, { "docid": "ee79aa624489f0fcb4e2d0490c328659", "score": "0.5519688", "text": "def has_name(self, name):\n return name in self.classes", "title": "" }, { "docid": "c260e1ad15e7896da84d845c10390c77", "score": "0.5519438", "text": "def has_attr(self, qry):\n if isinstance(qry, dict):\n qry = {mpi(key): value for key, value in qry.items()}\n return qry.items() <= self.__dict__.items()\n elif isinstance(qry, str): # place above iterable check, since str is iterable\n return mpi(qry) in self.__dict__.keys()\n elif isinstance(qry, Iterable):\n return all(mpi(i) in self.__dict__.keys() for i in qry)\n\n raise TypeError(\n f'qry passed to has_attr should be of type dict, str, or Iterable, but was {type(qry)} instead')", "title": "" }, { "docid": "d2403899ab6c35942f169d935a4f2547", "score": "0.5500731", "text": "def __contains__(self, attr):\n return attr in self.attr", "title": "" }, { "docid": "a2f7ff54dc1711644decf3b1c9bd9c1e", "score": "0.5499032", "text": "def CF_attribute_compliance(properties_data,name):\n \n # try to see if the attribute list is given directly or if it is part of a class\n class_flag = False\n try:\n data_attribute = properties_data.attribute\n class_flag = True\n except:\n data_attribute = properties_data \n\n\n # load all current attributes\n CF_current_dict = {}\n if data_attribute is not None:\n for attribute in data_attribute:\n CF_current_dict[extract_key(attribute,\"name\")] = extract_key(attribute,\"value\")\n\n\n # ensure the required CF attributes are present\n CF_missing_attr_name = []\n CF_missing_attr_value = []\n for CF_key in [\"long_name\",\"standard_name\"]:\n try:\n CF_current_dict[CF_key]\n except:\n try: \n CF_missing_attr_name.append(CF_key)\n CF_missing_attr_value.append(name)\n except:\n pass\n \n if len(CF_missing_attr_name)>0:\n if class_flag:\n properties_data.attribute = expand_attrdict(properties_data.attribute, CF_missing_attr_name, CF_missing_attr_value)\n else:\n properties_data = expand_attrdict(properties_data,CF_missing_attr_name, CF_missing_attr_value)\n\n return properties_data", "title": "" }, { "docid": "860ff79ce25379c2c54a9a6604247f20", "score": "0.5486859", "text": "def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "title": "" }, { "docid": "d4bffee43cb532060ec0d528ee54d5c9", "score": "0.5468721", "text": "def test_AttValues(self):\n self.assertTrue(self.cClass.name, \"San Francisco\")\n self.assertTrue(self.cClass.state_id, \"State\")", "title": "" }, { "docid": "6141181015ae0f87d25aabd92a191e4b", "score": "0.54612774", "text": "def has_nested_attr(obj: Any, names: Sequence[str]) -> bool:\n try:\n get_nested_attr(obj, names)\n return True\n except AttributeError:\n return False", "title": "" }, { "docid": "5a9b11f9f916264ee16ef459748f869b", "score": "0.54544944", "text": "def has_attr(**kwargs) -> 'CardEvaluator':\n if not kwargs:\n return CardEvaluator(lambda _: True)\n\n (attr, value), *kvs = kwargs.items()\n\n #\n def _inner(card: 'Card') -> bool:\n return getattr(card, attr) == value\n\n return CardEvaluator(_inner) & has_attr(**dict(kvs))", "title": "" }, { "docid": "977a0a02e692829796e7a8893a198a1f", "score": "0.5450211", "text": "def is_known_attribute(attribute, context):\n\n return attribute in context.DICT_EXPECTED_TYPES_ATTR", "title": "" }, { "docid": "11f8e1127feb25bf94766c48efcabbae", "score": "0.5448823", "text": "def __instancecheck__(cls, instance):\n for name, type_ in cls.__attributes__.items():\n try:\n attribute = getattr(instance, name)\n except AttributeError:\n return False\n\n if not isinstance(attribute, type_):\n return False\n\n for name, signature in cls.__signatures__.items():\n function = getattr(instance, name, None)\n if not _implements_signature(function, signature):\n return False\n return True", "title": "" }, { "docid": "737b7479ec2e4de2408ce690d3773469", "score": "0.5443587", "text": "def _check_attributes(data_attrs, control_attrs):\n\n assert data_attrs['parent_experiment_id'] in [control_attrs['experiment_id'], 'N/A']\n\n control_rip = 'r%si%sp%s' %(control_attrs['realization'],\n control_attrs['initialization_method'],\n control_attrs['physics_version'])\n assert data_attrs['parent_experiment_rip'] in [control_rip, 'N/A']", "title": "" }, { "docid": "f95afa3ff991fab7e25acd2bace909d8", "score": "0.54418325", "text": "def test_isattribute_FileStorage(self):\n self.assertTrue(hasattr(FileStorage, '_FileStorage__objects'))\n self.assertTrue(hasattr(FileStorage, '_FileStorage__file_path'))", "title": "" }, { "docid": "75b8b646b832d836a0b5f9577a77357c", "score": "0.54253405", "text": "def test_contains_when_class_init_requires_arguments(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Butterfree(Pokemon):\n element = 'bug'\n\n def __init__(self, name):\n super(Butterfree, self).__init__(name)\n\n self.assertTrue('bug' in registry)", "title": "" }, { "docid": "b669f6a0a2ed60110f6a7f21460d5627", "score": "0.5417015", "text": "def check_config(self):\n for param in self.params:\n if not param[0] in vars(self):\n raise RuntimeError(\n \"{}: {} needs to be set.\".format(self.name, param[0])\n )\n val = self.__getattribute__(param[0])\n type_ok = False\n for param_type in param[1]:\n if isinstance(val, param_type):\n type_ok = True\n break\n if not type_ok:\n raise TypeError(\n \"{}: {} should be {}, got {}:{}\".format(\n self.name, param[0], param[1], val, type(val)\n )\n )\n return True", "title": "" }, { "docid": "933cb730e0e6bee8ecf1d4e38b1cde5a", "score": "0.5412701", "text": "def check_attributes(self, attributes):\n self.log(\"GratiaConfiguration.check_attributes started\")\n\n if self.ignored:\n self.log(\"%s section ignored\" % self.config_section)\n self.log(\"GratiaConfiguration.check_attributes completed\")\n return True\n\n if not self.enabled:\n self.log(\"Not enabled\")\n self.log(\"GratiaConfiguration.check_attributes completed\")\n return True\n status = self._check_servers()\n if 'htcondor-ce' in self._probe_config and requirements_are_installed():\n status &= self._verify_gratia_dirs_for_htcondor_ce_probe()\n self.log(\"GratiaConfiguration.check_attributes completed\")\n return status", "title": "" }, { "docid": "5313cf49ba83067ab974a7f245e99b58", "score": "0.54050726", "text": "def test_TypeClassFields(self):\n for strAttr, typType, _ in self.ClassFields:\n strError = ' '.join(['Type of attribute', strAttr, 'in class',\n self.TestClass.__name__, 'is',\n str(type(getattr(self.TestClass, strAttr))),\n 'instead of', str(typType)])\n self.assertIsInstance(getattr(self.TestClass, strAttr), typType,\n strError)", "title": "" }, { "docid": "e63bbcdeb9c687e30ed6ce458be51b1e", "score": "0.5398413", "text": "def test_klass_has_proxy_attributes(self):\n # check existence\n self.assertTrue(hasattr(self.klass, '_GRAPH'))\n self.assertTrue(hasattr(self.klass, '_PROXY'))\n self.assertTrue(hasattr(self.klass, '_SCRIPTS'))\n self.assertTrue(hasattr(self.klass, 'PersonModel'))\n # check that they are not None\n self.assertIsNotNone(self.klass._GRAPH)\n self.assertIsNotNone(self.klass._PROXY)\n self.assertIsNotNone(self.klass.PersonModel)\n # check that they have the right type\n self.assertIsInstance(self.klass._GRAPH, BulbsGraph)\n self.assertIsInstance(self.klass._PROXY, BulbsNodeProxy)", "title": "" } ]
fdcd639721cb599dcf895499e664212a
Returns the number of tests in the test file
[ { "docid": "c0cf8d7008a50fea2b280fef0de7ecdb", "score": "0.8652606", "text": "def number_of_tests(self):\n tests = c.file_number_of_lines(self.test_set)\n if self.test_set_header:\n tests -= 1\n return tests", "title": "" } ]
[ { "docid": "102ac0a63de87b6a11eba38139c2a37d", "score": "0.76091194", "text": "def _get_num_tests(test_output: str) -> int:\n match = re.search(r\"Tests run: (\\d+)\", test_output)\n return int(match.group(1)) if match else 0", "title": "" }, { "docid": "e8ce229ea3064a8ad2912ca74b052484", "score": "0.75852644", "text": "def number_of_data_files(self):\n num = 0\n for f in os.listdir('.'):\n if f.startswith('.coverage.') or f == '.coverage':\n num += 1\n return num", "title": "" }, { "docid": "12be05dc5856e8f0969bd758a9c41f01", "score": "0.72347355", "text": "def _xml_total_test_count(self):\n return len(self._test_list)", "title": "" }, { "docid": "35a49ad273fec3260734fa2320cb4f79", "score": "0.70362806", "text": "def get_number_runs(self):\n return self._myProject.get_number_data_files()", "title": "" }, { "docid": "a004156821fd58df70efc27186b34291", "score": "0.7034079", "text": "def test_line_count(self):\n #self.assertEqual(Expected, Actual, Message )\n self.assertEqual(4, analyze_text(self.filename)[0], \"Line count:\")", "title": "" }, { "docid": "9a247ced5cc796c84df0bf9a72129ace", "score": "0.6996066", "text": "def test_example_count(self) -> int:\n return pulumi.get(self, \"test_example_count\")", "title": "" }, { "docid": "9ed0d5230a4d3df21af0c305db594bb9", "score": "0.6861403", "text": "def file_stats(file_pairs):\n loc = 0\n nfiles = 0\n nsuites = 0\n ntests = 0\n for path, filename in file_pairs:\n loc += int(os.popen('wc -l '+path+'/'+filename).read().split()[0])\n nfiles += 1\n if (filename[:4] == 'Test'):\n nsuites+=1\n ntests+= int(os.popen('egrep -c -i \"void\\s+Test\" '+path+'/'+filename).read().split()[0])\n return (nfiles, loc, nsuites, ntests)", "title": "" }, { "docid": "e46b67ae123eddad0b9b648ad04fa906", "score": "0.6815652", "text": "def class_count_words(self):\n self.assertEqual(1, 1)\n\n try:\n with open(self.file_name) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n msg = \"test_book_length couldn't find file \" + self.file_name\n print(msg)\n ret_val = None\n else:\n # Count the approximate number of works in the file\n words = contents.split()\n num_words = len(words)\n ret_val = num_words\n\n return ret_val", "title": "" }, { "docid": "db92257bae036f0b15e8a25bd8161443", "score": "0.6787168", "text": "def test_paragraph_count(self):\n self.assertEqual(output_verifier.get_chunk_count(1, self.test_dir_name), 3)", "title": "" }, { "docid": "ac5430097e827ce0d463e6694f0f5758", "score": "0.6730336", "text": "def get_num_files(self):\r\n return self.nfile", "title": "" }, { "docid": "3d99572479d47c2e06aaa91aa034493c", "score": "0.67099786", "text": "def get_number_of_given_str(self):\n self.result = 0\n with open(self.path, 'r') as file:\n self.result = file.read().count(self.string_to_count)\n return f'{self.result} strings were found in the file\\n'", "title": "" }, { "docid": "6e0b2db04e3e6c69f074f708ca8f2337", "score": "0.6694528", "text": "def count(self) -> int:\n return len(self.files)", "title": "" }, { "docid": "530ef318462fb116dbc5f61fa9086730", "score": "0.66835755", "text": "def _get_num_passed(test_output: str) -> int:\n match = re.search(r\"OK \\((\\d+) tests\\)\", test_output)\n if not match: # there were failures\n return _get_num_tests(test_output) - _get_num_failed(test_output)\n return int(match.group(1))", "title": "" }, { "docid": "8af4936ca7e6cbe80e66718d7662ef56", "score": "0.6677547", "text": "def get_num_instruments(self):\n c = 0\n for line in self.lines:\n c += len(line)\n return c", "title": "" }, { "docid": "10e3a8b67202c1997e6b45680313a3b2", "score": "0.664056", "text": "async def test_nr_of_tests(self):\n response = await self.collect(get_request_json_return_value=self.JENKINS_JSON)\n self.assert_measurement(response, value=\"2\", total=\"2\")", "title": "" }, { "docid": "9339cf2481f67ba1b5fc8e99e6031de2", "score": "0.6627532", "text": "def file_count(self) -> int:\n return self.__file_count", "title": "" }, { "docid": "a356be0191f7d35c3fe7dac09fe9b995", "score": "0.661739", "text": "def calculate_number_of_tests(problem: TestSelection, solution: BinarySolution) -> int:\n total_tests = len(problem.tests_index[solution.variables[0]])\n if total_tests == 0:\n total_tests = 123456\n return total_tests", "title": "" }, { "docid": "091e0c774f3f2e3f8cc92d153ae5799e", "score": "0.6611086", "text": "def file_count(self) -> int:\n return len(self.files)", "title": "" }, { "docid": "2f99515cd9c1cebb64b9b79057100ef8", "score": "0.65934044", "text": "def test_without_tokenizer_and_count_lines_into_file(\n create_directory_file_and_return_path,\n):\n assert universal_file_counter(create_directory_file_and_return_path, \"txt\") == 8", "title": "" }, { "docid": "2533fb60e699d133cfbe9e02cc946dbe", "score": "0.65616304", "text": "def __len__(self) -> int:\n return len(self.__test_coverage)", "title": "" }, { "docid": "f936e72a5a4ad6befafd143c938346f7", "score": "0.65510213", "text": "def _count_header_size(test_file):\r\n reading_header = False\r\n found_header = False\r\n comment_size = 0\r\n\r\n for line in test_file:\r\n if not found_header and line.strip() == '{':\r\n found_header = True\r\n reading_header = True\r\n if reading_header:\r\n comment_size += 1\r\n \r\n if line.strip() == '}':\r\n return comment_size\r\n \r\n return comment_size", "title": "" }, { "docid": "4d882bfb7ca700a0b41ff8e8b080eae4", "score": "0.64409", "text": "def count_modules(fname):\n with open(fname, \"r\") as f:\n _ = next(f)\n row = next(f)\n title = row.split(constants.SEP, 1)[0]\n return len(title)", "title": "" }, { "docid": "cd3443eeced34de8e1c39602f77f4dfa", "score": "0.6427313", "text": "def test_character_count(self):\n self.assertEqual(131, analyze_text(self.filename)[1], \"Character count:\")", "title": "" }, { "docid": "fe5982660b84d37a733f355ac37edf35", "score": "0.64224565", "text": "def get_trial_size(cls):\n with open(TestData.trial_size) as f:\n for line in f:\n trial_size = int(line.rstrip())\n f.close()\n return trial_size", "title": "" }, { "docid": "a1a5d36315f3085e0a43e39b5668807d", "score": "0.6421444", "text": "def get_num_files(self, annotations):\n return len(annotations[\"RELEASE\"][0][0][3])", "title": "" }, { "docid": "ac0d5d0902d889101f38c6b221fca576", "score": "0.6407998", "text": "def filecount(path):\n return len(subprocess.check_output(['find', path, '-type', 'f']).strip().split(\"\\n\"))", "title": "" }, { "docid": "647bfd751ddb8c4a365847343274d536", "score": "0.63966787", "text": "def count_runs(fname):\n with open(fname, \"r\") as f:\n hdr = next(f)\n title = hdr.split(constants.SEP, 1)[0]\n if title != \"Run\":\n raise ValueError(\"Expected first column of '%s' to be labeled 'Run', got label '%s'\" % (fname, title))\n last_index = hdr.rsplit(constants.SEP, 1)[-1]\n return int(last_index)", "title": "" }, { "docid": "c8d76b7a5f1358ea52f3f82335a55d4c", "score": "0.6395921", "text": "def get_testdata_len(self):\n if self._test_dataset is None:\n self._test_dataset = MamoDataset(\n np.load(self._test_input_data_path), np.load(self._test_output_data_path))\n return self._test_dataset.__len__()", "title": "" }, { "docid": "a987cc41cd1a4d09ad1bb542ae4eebd7", "score": "0.63890547", "text": "def create_test_files(test_info):\r\n\r\n match_count = 0\r\n for counter in range(test_info[1]):\r\n txt = lorem.paragraphs(5)\r\n match_count += len(re.findall(EXP, txt))\r\n f = open('{}/{}'.format(TEST_FOLDER_PATH, test_info[0]+'_'+str(counter+1)), 'w')\r\n f.write(txt)\r\n f.close()\r\n return match_count", "title": "" }, { "docid": "c7a5014fef3b3b54f7f518780035cf52", "score": "0.6373087", "text": "def test_amount(test_info):\r\n\r\n try:\r\n os.mkdir(TEST_FOLDER_PATH)\r\n except FileExistsError:\r\n pass\r\n\r\n logging.info(f'{datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")} test_file_amount.py')\r\n expected = create_test_files(test_info)\r\n files = ['{}/{}_{}'.format(TEST_FOLDER_PATH, test_info[0], str(i+1)) for i in range(test_info[1])]\r\n locator = Locator(EXP, False, False, files)\r\n locator.analyze_files()\r\n output = locator.get_match_count()\r\n\r\n # Log an error if Locator() returns an incorrect amount of matches\r\n if expected != output:\r\n logging.error(f'{datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")} test_file_amount.py\\t'\r\n f'expected - {expected} output - {output}')\r\n assert expected == output", "title": "" }, { "docid": "f379d29ab9b120a0df5e780667637a51", "score": "0.6370624", "text": "def GetNextTestId(self):\n nbTest = -1\n testPathList = self.GetExistingTestPathList()\n for testPath in testPathList:\n name = os.path.basename(testPath)\n nbTest = max(nbTest, int(name[4:]))\n nbTest += 1\n return nbTest", "title": "" }, { "docid": "fc5d6b68c19e0670f303626ef07d67cd", "score": "0.63694954", "text": "def __len__(self):\n self._ensure_exists()\n with self.open('r') as f:\n # just count the number of non-whitespace lines\n n = sum(1 for line in f.readlines() if not line.isspace())\n return n", "title": "" }, { "docid": "1eb78163ec4d6773128398ba1dd294d1", "score": "0.6366845", "text": "def _passed_tests(self, metric_source_id: str) -> int:\n return self.__test_count(metric_source_id, \"pass\")", "title": "" }, { "docid": "5c672ebcc546e8699776a99f22ea7886", "score": "0.6343019", "text": "def file_count(self):\n return self.codebaseresources.files().count()", "title": "" }, { "docid": "87473846e873b25717185726d779c52d", "score": "0.63092077", "text": "def test_console_do_count(self):\n pass", "title": "" }, { "docid": "f6631cfa8531ed552ec93f63cc7d3e72", "score": "0.62624836", "text": "def __len__(self) -> int:\n cnt_docs = 0\n for fp in self.all_files:\n with codecs.open(fp, encoding=self.encoding) as csv:\n num_lines = sum(1 for line in csv)\n cnt_docs += num_lines - 1 if self.header else num_lines\n if cnt_docs > self.max_docs:\n break\n num_docs = min(cnt_docs, self.max_docs)\n return num_docs", "title": "" }, { "docid": "3ef440d61ec98bfa9f612faaa01231e4", "score": "0.62584925", "text": "def test_with_tokenizer_and_count_tokens(create_directory_file_and_return_path):\n assert (\n universal_file_counter(create_directory_file_and_return_path, \"txt\", str.split)\n == 6\n )", "title": "" }, { "docid": "ab380f085f709bacea396f541d0c5de9", "score": "0.6254017", "text": "def nfiles(self):\n return len(self.table)", "title": "" }, { "docid": "6ad58a4755b46f7a228d9563cf37ed64", "score": "0.62515783", "text": "def count_rows(filename):\n return len(get_rows(filename))", "title": "" }, { "docid": "825959fd48adff693ef0d7274666b376", "score": "0.62421715", "text": "def num_lines(self):\n return (self.name.count('\\n') + self.prerequisites.count('\\n') +\n self.recipe.count('\\n'))", "title": "" }, { "docid": "a40b04d382ce9994b41bb8274d5db7df", "score": "0.6236784", "text": "def test():\n tests = unittest.TestLoader().discover('tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "f0d70322a03f53f0a7fd5d5594de6e0a", "score": "0.6234982", "text": "def enumerate_tests(settings):\n return settings['tests']", "title": "" }, { "docid": "c549f354cbb270f74aa97c935a5b013a", "score": "0.62343174", "text": "def test():\n tests = unittest.TestLoader().discover('test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "c549f354cbb270f74aa97c935a5b013a", "score": "0.62343174", "text": "def test():\n tests = unittest.TestLoader().discover('test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "b9879eb29a1ab6c9fe23713b88f8ca00", "score": "0.6220788", "text": "def count(self):\n tasks = self.yml\n size = 0\n for task in tasks:\n size += self.__loc(task)\n\n return int(round(size/len(tasks)))", "title": "" }, { "docid": "40841445418eb97559e0953783605b94", "score": "0.6215676", "text": "def num_of_files(self):\n return len(self.file_list)", "title": "" }, { "docid": "f25b6b2753da0194c2f85d01c042ef8a", "score": "0.62156737", "text": "def test_countFunc3(self):\n\n return self.assertEqual(test.countFunc(9), \"Fizz\")", "title": "" }, { "docid": "356429b7306871380966e1ad567e573d", "score": "0.6211981", "text": "def __init__(self, filename):\n\n def lines_in_file():\n \"\"\"\n returns the number of lines in the file excluding the first one\n \"\"\"\n lines = 0\n for line in open(filename):\n lines += 1\n return lines - 1\n\n self.f = open(filename)\n self.number_test_cases = int(self.f.readline().strip())\n self.test_case_number = 0", "title": "" }, { "docid": "afda4c69283e1b02aac9f18b4018592e", "score": "0.6191061", "text": "def test_case_count_callback(filename, contentdir, targetfile): # FALSE NEGATIVE\n if os.path.islink(filename):\n return None\n\n fname = filename.lower()\n for test_dir in utils.get_test_dirs():\n if fname.startswith(test_dir) or (fname.find('/'+test_dir) > -1):\n return os.path.relpath(targetfile, contentdir)", "title": "" }, { "docid": "1c5032a8684ab9986a01490e690a664c", "score": "0.6185014", "text": "def number_of_lines(filename=\"\"):\n with open(filename) as NewText:\n return sum(1 for line in NewText)", "title": "" }, { "docid": "d5492f49146613d36ef8864d3b89b147", "score": "0.61828095", "text": "def get_current_count(file_name):\n if not os.path.exists(file_name):\n print(f\"The file \\\"{file_name}\\\" does not exist. Assuming new file.\")\n return 0\n\n return sum(1 for line in open(file_name))", "title": "" }, { "docid": "8384c7c6af79f29c30f673eca6200b2a", "score": "0.6180887", "text": "def file_len(self, fname):\n lines = 0\n\n f = open(fname, 'r')\n lines = len(f.readlines())\n f.close()\n return lines", "title": "" }, { "docid": "f1bacf2ef08d2e24b35f995e1fa49e2f", "score": "0.6176057", "text": "def test_file_number_moved(self):\n mov_files = len(os.listdir(\"/integration_tests/moved\"))\n self.assertEquals(3, mov_files)", "title": "" }, { "docid": "bc17c3237366af805354f4adfa54f42c", "score": "0.615013", "text": "def num_lines(file):\n return sum(1 for _ in open(file))", "title": "" }, { "docid": "b887eedcab314080d115c2b216a722f6", "score": "0.61458623", "text": "def getNumTimePoints(inDir):\n fp = os.path.join(inDir, 'Stack_frequency.txt')\n with open(fp) as file:\n nTimePts = int(file.readlines()[2])\n return nTimePts", "title": "" }, { "docid": "1d0a34e02d2060b1bbe316efe7600053", "score": "0.61110103", "text": "def file_count(self):\n\n return len(self.unique_files)", "title": "" }, { "docid": "e67ddc9b255e5f1b4da94f0a4d4789ce", "score": "0.6109432", "text": "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "e67ddc9b255e5f1b4da94f0a4d4789ce", "score": "0.6109432", "text": "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "e67ddc9b255e5f1b4da94f0a4d4789ce", "score": "0.6109432", "text": "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "88ca33f8d4dcd669c8a8edb563556a28", "score": "0.61074626", "text": "def num_hdu(self):\n with fits.open(self.filepath) as hdu_list:\n try:\n return len(hdu_list)\n except NameError:\n print(\"You need to give a correct name...\")", "title": "" }, { "docid": "e84d8df07b97e058917bda8a17b6ffb6", "score": "0.61070937", "text": "def file_len(path):\n return sum(1 for _ in open(path))", "title": "" }, { "docid": "1cfaeac668df7996f4d3bad253c267b5", "score": "0.6107061", "text": "def count():", "title": "" }, { "docid": "9e5c9d3dd3d3dbab567341aada7c4c8e", "score": "0.61054856", "text": "def _get_num_failed(test_output: str) -> int:\n match = re.search(r\"Failures: (\\d+)\", test_output)\n return int(match.group(1)) if match else 0", "title": "" }, { "docid": "528e4768ebbcf91128a91cbf4ce642d1", "score": "0.6085864", "text": "def find_number_of_pages_in_invoice(filename, data):\n print('Checking page count', filename)\n return len(data['analyzeResult']['readResults'])", "title": "" }, { "docid": "d19e5d98590ff6c83f456dfd2fbd088f", "score": "0.6083323", "text": "def test_countFunc5(self):\n\n return self.assertEqual(test.countFunc(50), \"Buzz\")", "title": "" }, { "docid": "df0b6ca10a0c7d0b67bff2cc29e58cfc", "score": "0.6079197", "text": "def number_of_lines(filename=\"\"):\n nb = 0\n with open(filename, encoding=\"utf-8\") as myfile:\n for i in myfile:\n nb += 1\n return nb", "title": "" }, { "docid": "46ee4f15872b3920991619e846c25edc", "score": "0.60717404", "text": "def filecounter(path):\n\n return len([f for f in os.listdir(path) if os.path.isfile(f)])", "title": "" }, { "docid": "51e2c62eca4d8af68b1e0ea684213762", "score": "0.6070731", "text": "def count_files(root):\n num_files = 0\n for dirpath, _, files in os.walk(root):\n for file in files:\n fmatch = FILE_PATTERN.match(file)\n assert fmatch\n fh = open(os.path.join(dirpath, file), 'r')\n text = fh.read()\n fh.close()\n # compare the number of occurrences with file name\n if text.count(\"PSIML\") == int(fmatch.group(1)):\n num_files += 1\n return num_files", "title": "" }, { "docid": "4986a4dc678411d27c5e051b07cd70f8", "score": "0.606843", "text": "def test_fetch_test_counts_debug(self):\n test_counts = shard_util.fetch_test_counts(DEBUG_APP_OTOOL_OUTPUT, False)\n self.assertEqual(len(test_counts), 5)", "title": "" }, { "docid": "e6a3f3c93e1d92726ed6f79cb515935b", "score": "0.6067479", "text": "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.json')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "title": "" }, { "docid": "8584979b91d6ca2eeb20da4fe8e74cdd", "score": "0.6062153", "text": "def expected_counts():\n\n with open('/container_setup/monitoring-config.yml') as stream:\n config_data = yaml.load(stream)\n\n return config_data['cluster_node_count']", "title": "" }, { "docid": "a360b10885369a111f221d9c10f126ac", "score": "0.6060987", "text": "def get_nb_tasks(inputFile):\n nb_tasks = 0;\n try:\n res = [line.split() for line in open(inputFile).readlines()]\n for host in res:\n nb_tasks += 1\n except IOError:\n print 'Oh dear. No tasks file on node!'\n nb_tasks = -1\n return nb_tasks", "title": "" }, { "docid": "5faee36007e6b2b6102a15543bf129fa", "score": "0.60573053", "text": "def __test_count(self, report_url: str, result_type: str) -> int:\n try:\n root = self.__element_tree(report_url)\n except UrlOpener.url_open_exceptions:\n return -1\n except xml.etree.cElementTree.ParseError:\n return -1\n try:\n return int(root.findall(\"statistics/total/stat\")[1].get(result_type, -1))\n except IndexError as reason:\n logging.warning(\"Can't find %s test count in %s: %s\", result_type, report_url, reason)\n return -1", "title": "" }, { "docid": "1ba51decc3363321f5ea44d7931b8427", "score": "0.6053264", "text": "def number_of_lines(filename=\"\"):\n n_line = 0\n with open(filename) as my_file:\n lines = len(my_file.readlines())\n return lines", "title": "" }, { "docid": "2082bc9457154a9bd2057f9637acf53d", "score": "0.60524327", "text": "def number_of_lines(filename=\"\"):\n\n with open(filename, 'r', encoding='utf-8') as target:\n cont_line = 0\n for lines in target:\n cont_line += 1\n return cont_line", "title": "" }, { "docid": "88ca3b247739463b35f62c8c9df9c0d4", "score": "0.6048469", "text": "def get_num_samples(samples_file_path: str) -> int:\n with open(samples_file_path) as file:\n sample_numbers = file.read().replace('\\n', '')\n\n return int(sample_numbers)", "title": "" }, { "docid": "770ee2a93d4a9c925f36094aaf4e0d68", "score": "0.6045798", "text": "def count_data(path):\n matcher = re.compile(r'[0-9]+\\.json')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "title": "" }, { "docid": "d335ab1cd56a1137c1e6639cd4f40283", "score": "0.6039447", "text": "def get_test_size(self):\n return self.test_size", "title": "" }, { "docid": "29fba5601f0bd79bb8f8ed0d16b271f3", "score": "0.60359913", "text": "def _xml_failure_count(self):\n return len([test for test in self._test_list if test.xml_failure_elements])", "title": "" }, { "docid": "14a3834eacc414bb0c707967f5051d7d", "score": "0.6032492", "text": "def count_num_steps(self):\n return self.config.getint('Count', 'NumSteps')", "title": "" }, { "docid": "79b4b6a005077db59752d0e06d021cbb", "score": "0.6028711", "text": "def file_counter(_dir: Text) -> int:\n return sum([1 for p in Path(_dir).iterdir()])", "title": "" }, { "docid": "06fa762ade8763a66a6349ab75b79790", "score": "0.60278696", "text": "def _get_tries_count(self, file, provider):\n _, rec = self._get_site_rec(file.get(\"sites\", []), provider)\n return rec.get(\"tries\", 0)", "title": "" }, { "docid": "6be7a734b0dd98d474250968de3f6c63", "score": "0.6025967", "text": "def count_lines(path) -> int:\n with open(path, encoding=\"utf-8\") as file:\n return sum(1 for _ in file)", "title": "" }, { "docid": "188d09db3e0d38af040b5d7ba573c01a", "score": "0.6019116", "text": "def file_len(full_path):\n\n return sum(1 for line in open(full_path,\"rb\"))", "title": "" }, { "docid": "14142aed512571e0c45727a29389797f", "score": "0.6019004", "text": "def count_entries(fname):\n cmd = ['grep', '-c', \">\", fname]\n child = run(cmd, stdout=PIPE)\n count = int(child.stdout)\n return count", "title": "" }, { "docid": "68381c3af9a187effe4bd3f7148c7788", "score": "0.6016722", "text": "def count_lines(inputfile):\n with open(inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n return n", "title": "" }, { "docid": "11ba5b2d64a104290624e0a192d95054", "score": "0.6013914", "text": "def tests(self):\n return self.__test_harness.tests", "title": "" }, { "docid": "614de87cd8dd64e596b06c905c78defe", "score": "0.601102", "text": "def get_number_of_suite_failures(report_tree, suite_name):\n doc = tree.getroot()\n for elem in doc.findall('testsuite'):\n name = elem.get('name')\n if name == suite_name:\n return int(elem.get(\"failures\")) \n raise Exception(\"Suite {0} not in report!!\".format(suite_name))", "title": "" }, { "docid": "4f920ba5ff6fd4aaf7e0bb574961be90", "score": "0.60109895", "text": "def test():\n tests = unittest.TestLoader().discover(\"journalmylife/tests\", pattern=\"test*.py\")\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "d998c511ae044558345e0d225a818225", "score": "0.60079455", "text": "def checkEachLineCount(mat,file_name):\n n = sum(mat[0])\n# print \"file:\" , file_name\n for line in mat[1:]:\n if sum(line) !=n :\n print \"Line count !=%d (n value) in line number %d \" , n, line\n# assert all(sum(line) == n for line in mat[1:]), \"Line= count != %d (n value).\" % n\n return n", "title": "" }, { "docid": "bb33567425c605b4145fef76e279031e", "score": "0.6007721", "text": "def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')", "title": "" }, { "docid": "4e8f7a570c2095303cee3a6a47fbb049", "score": "0.6006112", "text": "def Count():\n return CheckForError(lib.LineSpacings_Get_Count())", "title": "" }, { "docid": "aae3f848f95913f55abf30076fae8837", "score": "0.6000477", "text": "def test():\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "aae3f848f95913f55abf30076fae8837", "score": "0.6000477", "text": "def test():\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1", "title": "" }, { "docid": "d9b4b10c9f3723c315b134ddf512d50c", "score": "0.59933794", "text": "def count_strings(files_list):\n number_of_strings = 0\n for file in files_list:\n number_of_strings += file[\"Header\"][\"number of strings\"]\n\n return number_of_strings", "title": "" }, { "docid": "cbcaf9fb021f272a02b9e9134ae7659d", "score": "0.5991289", "text": "def number_of_lines(filename=\"\"):\n with open(filename) as f:\n num_l = 0\n for line in f:\n num_l += 1\n return num_l", "title": "" }, { "docid": "f8a9826f528a2eb72a0b4d7fec2b92ea", "score": "0.59888434", "text": "def count_lines(filename):\n line_count = 0\n for _ in open(filename):\n line_count += 1\n return line_count", "title": "" }, { "docid": "8bb642acac05a8a874049b5176121474", "score": "0.5987656", "text": "def test_query_testlists_count_runs(self):\n self._run_and_assert_query_testlist(extra_args=\"--count\")", "title": "" }, { "docid": "578b7955bc7b3b8870b3fc302f43599e", "score": "0.5985029", "text": "def fixture_num_data(request) -> int:\n return request.param", "title": "" }, { "docid": "0e7f7ec68fddc68565a7faacc9b4e6ca", "score": "0.59765077", "text": "def test_fetch_test_counts_release(self):\n test_counts = shard_util.fetch_test_counts(RELEASE_APP_OTOOL_OUTPUT, True)\n self.assertEqual(len(test_counts), 4)", "title": "" } ]
e6dcf15390001e188afa8c958c44f8c0
Print a comment if print_it == True
[ { "docid": "496621f2535e8c405f2da0f98efe3073", "score": "0.7507166", "text": "def print_comment(text, print_it=verbose):\n prefix = \"OpenCortex >>> \"\n if not isinstance(text, str): text = text.decode('ascii')\n if print_it:\n \n print(\"%s%s\"%(prefix, text.replace(\"\\n\", \"\\n\"+prefix)))", "title": "" } ]
[ { "docid": "71e0ebbe3d52f7aa63197fe9c465c018", "score": "0.71713185", "text": "def print_comment_v(text):\n print_comment(text, True)", "title": "" }, { "docid": "d80b74f4cf1fc4848b45fb7609e4ec77", "score": "0.6587678", "text": "def print_comment(comment):\n shell_type = get_shell_type()\n if shell_type == Shell.LINUX:\n print(f\"# {comment!s}\")\n elif shell_type == Shell.POWER_SHELL:\n print(f\"# {comment!s}\")\n else:\n print(f\"rem {comment!s}\")", "title": "" }, { "docid": "427b0cba42067990e9942e3196613b29", "score": "0.64928573", "text": "def printt(content, flag=False):\n if flag:\n print(content)", "title": "" }, { "docid": "daac42a2e70b7b19cd88f74630236091", "score": "0.64408845", "text": "def comment():", "title": "" }, { "docid": "a4c0c099fb1598a6273e978ca47e72c6", "score": "0.60062695", "text": "def comment(self,msg):\n self.unhang()\n self.indent()\n print (msg,end='\\n',flush=True, file=self.fp)\n self.hanging=False", "title": "" }, { "docid": "806ec93bddbf9df701746446fedd6760", "score": "0.5979922", "text": "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "title": "" }, { "docid": "d9b2ee0b7ea7ca5b14e539201770172f", "score": "0.5957404", "text": "def print_comments():\n with open('a_cpp_file.cpp', 'r') as file:\n data = file.read()\n to_print = ''\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '*' and data[i-2] == '/':\n should_print = True\n if char == '*' and data[i+1] == '/' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '/' and data[i-2] == '/':\n should_print = True\n if char == '\\n' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char", "title": "" }, { "docid": "538d0d7d1bfaba28f0afb05df7130940", "score": "0.59459364", "text": "def sys_comment(comment, is_error=False):\n COMMENT_STR = \"[*]\"\n if is_error:\n COMMENT_STR = \"[X]\"\n\n print_centered(\"{} {} {}\".format(COMMENT_STR, comment, COMMENT_STR), use_logo=True)\n\n return None", "title": "" }, { "docid": "3864186c6d5174170f9b796a378d8506", "score": "0.5902197", "text": "def do_comments(self, line):\n for comment in self.review.comments():\n print(comment)", "title": "" }, { "docid": "9215a1d05728dac5593d7c99c7815eb7", "score": "0.5771661", "text": "def visit_comment(self, node):\n self.printer.comment(node.xml_value)\n return", "title": "" }, { "docid": "efafd87d6c0f6632b9498d94d60f68ff", "score": "0.57525647", "text": "def print_optional(string, print_in_log):\n # type: (str, bool) -> None\n if print_in_log:\n print(string)", "title": "" }, { "docid": "f5d2efc47933d34eee8c255061ed1308", "score": "0.57493377", "text": "def debug_print(self, *content):\n if self.debug:\n print(*content)", "title": "" }, { "docid": "0ec367afb6be36410b4225e8fb434f02", "score": "0.57452327", "text": "def should_add_pr_comment(self):\n pass", "title": "" }, { "docid": "78a97558a86ac821524bc6ba916b077e", "score": "0.56900966", "text": "def test_print(chikin):\n chikin.print()", "title": "" }, { "docid": "d82d2704218a37b40496df86a818b42a", "score": "0.5650392", "text": "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "title": "" }, { "docid": "2f31fbd088fa0858c54385bbf06ef6dd", "score": "0.56363577", "text": "def commentOut(self):\n self.__commentOut = True", "title": "" }, { "docid": "72b600c805f113975c89e211e5c5d0f8", "score": "0.5625621", "text": "def comment(*args, symbol='# '):\n\n data = indent(' '.join(map(str, args)), symbol)\n real_print(data)", "title": "" }, { "docid": "2f9b5795f0f828f90af8cab6aa7e35d9", "score": "0.5515027", "text": "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "title": "" }, { "docid": "67ad89dd08b486b65bbe3d3fe9707c56", "score": "0.54472077", "text": "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "title": "" }, { "docid": "fd60f0525262ac083a83fe4f5baaf8c9", "score": "0.541203", "text": "def print_if_debug(debug, cadena):\n if debug:\n print(time.strftime(\"%H:%M:%S DEBUG => \") + cadena)", "title": "" }, { "docid": "260d12e738f8dc3acf255fb89383ed9c", "score": "0.54104936", "text": "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "title": "" }, { "docid": "84894822ccb1813276ef5fa457f7bb0f", "score": "0.5409052", "text": "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "title": "" }, { "docid": "9570d3a259a7cd987446a62741e9c521", "score": "0.5406983", "text": "def debug(*text):\n if False:\n # if True:\n print(' '.join(str(t) for t in text))", "title": "" }, { "docid": "ec1bc67390310d8dc65ba948b1b09482", "score": "0.5400031", "text": "def comment(self, content):\n pass", "title": "" }, { "docid": "5a779a5e5e483f25503b5c37e67b9937", "score": "0.5397982", "text": "def print_func(path, title, lyrics):\n # clear the screen\n print(chr(27) + '[2J', chr(27) + '[;H')\n\n if path != None:\n print(path)\n print(lyrics)", "title": "" }, { "docid": "538729db7e6c4b95e2563db9716b901d", "score": "0.5395983", "text": "def my_print(self):\n if self.__size == 0:\n print()\n else:\n print(\"\\n\" * self.__position[1], end='')\n for x in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)", "title": "" }, { "docid": "7af37ee6e3eae8797c2e12c4d1559b05", "score": "0.53617895", "text": "def comment(self, comment):\n self.appendString('%' + comment + '\\n')", "title": "" }, { "docid": "22f24f4de43e482cdcee1237131b621f", "score": "0.53593504", "text": "def test_print_info(clarisse):\n info = \"test print info\"\n assert bool(clarisse.print_info(info)) is False", "title": "" }, { "docid": "ffa0a0ba18593fb644fa2c9bd970cbf0", "score": "0.53302044", "text": "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "title": "" }, { "docid": "709673ac04544a5c49085bbdc560401d", "score": "0.5320494", "text": "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "title": "" }, { "docid": "1b70112359ec88167b81cecbe48a98f5", "score": "0.530903", "text": "def _debug_print(message):\n\n if _debug == True:\n print(message)", "title": "" }, { "docid": "166a6535fc45679d7c35a791804ce0c3", "score": "0.53063834", "text": "def Print(self, text):\n pass", "title": "" }, { "docid": "1c7983c22103d888fde435e97cbc1ee4", "score": "0.5297789", "text": "def my_print(self):\n if self.size == 0:\n print(\"\")\n return\n for j in range(self.__position[1]):\n print(\"\")\n for i in range(self.size):\n if self.__position[0] > 0:\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.size)", "title": "" }, { "docid": "0126c6c35b94d71c110cc89e7a69124a", "score": "0.5274779", "text": "def my_print(self):\n if self.__size > 0:\n for k in range(self.__position[1]):\n print()\n for i in range(self.__size):\n for j in range(self.__position[0]):\n print(\" \", end='')\n print(\"#\" * self.__size)\n else:\n print()", "title": "" }, { "docid": "8d52854cb7e95bec630d3f93b5ae62d8", "score": "0.5265684", "text": "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "title": "" }, { "docid": "a25609212efba75bac3b8518a31854f3", "score": "0.52557945", "text": "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "title": "" }, { "docid": "5e2f9f811e2adb1add92e7342862a427", "score": "0.5251463", "text": "def repl_print_statements():\n pass", "title": "" }, { "docid": "1f853561af742378d5d6b088d995f518", "score": "0.5247616", "text": "def debug(self, text):\n if self.PRINT_DEBUG:\n print('[FileHistory] ' + text)", "title": "" }, { "docid": "b73c70eba73221d0151eb3691fd427bd", "score": "0.5245821", "text": "def print_out():\n pass", "title": "" }, { "docid": "4c23af65629fa7092f2660e3e1031244", "score": "0.5242555", "text": "def writeComment(self, line):\n self.write(\" //\" + line + \"\\n\")", "title": "" }, { "docid": "b4463b4cfb6de6471b06d9f40f5cb892", "score": "0.52126455", "text": "def printMe():\n\n print(\"Meeeeeee!\")", "title": "" }, { "docid": "83e0097da04e974cdf687522fdc734a0", "score": "0.520658", "text": "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "title": "" }, { "docid": "3b3caff1e76e17e25a994f3e61b87645", "score": "0.52061373", "text": "def Print (redirect = True) :\n lock = sys.hal_log_values.get(\"Lock\", False)\n if not lock :\n sys.hal_log_values [\"__log_display\"] = redirect", "title": "" }, { "docid": "c7633d24f507eb053ead19245283bf36", "score": "0.5204576", "text": "def printMe():\n\n print(\"meeeeeee\")", "title": "" }, { "docid": "3422a3cf9a19aa93e9a18e5aaf4271ce", "score": "0.5197402", "text": "def show_comments(self):\n\n for index, comment in enumerate(self.comments):\n if index == 0:\n message.heading(f\"[{self.name}]\")\n message.info(comment)\n\n if len(self.comments) > 0:\n log.write(f\"FILE ({self.name})\\n\" + \"\\n\".join(self.comments))", "title": "" }, { "docid": "382e78c25a239ace00bfe558cebf040d", "score": "0.5193732", "text": "def start_comment(self, a, token):\n self.produce(\"comment\", token)\n logging.debug(\"in comment\")\n self.begin('comment')", "title": "" }, { "docid": "be4eab31bd9950885d84b840f1e2fcf0", "score": "0.5192176", "text": "def printme(self, line):\n self.otag.printme(line)", "title": "" }, { "docid": "13c47fa314706afcf9e9f66dceb50694", "score": "0.51849127", "text": "def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)", "title": "" }, { "docid": "3c3a7c87d87cd835fa246face05870dd", "score": "0.51781183", "text": "def _print_custom(self):\n pass", "title": "" }, { "docid": "fbb0615921c3621d529b01c98d3526aa", "score": "0.51750076", "text": "def debugPrint(dbg, msg):\n if(dbg):\n print(msg)", "title": "" }, { "docid": "6b7606b6db16064d822ccf1d2e56ce0c", "score": "0.5174254", "text": "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "title": "" }, { "docid": "7e2a902bcd2c01d593a8e46c7f4d0c52", "score": "0.5170378", "text": "def prnt(printstring, silent=False):\n if not silent:\n stdout.write(printstring)", "title": "" }, { "docid": "072255e6a5105fbf159803e54b46b073", "score": "0.51602614", "text": "def Comment(self, comment):\n self.script.append(\"\")\n for i in comment.split(\"\\n\"):\n self.script.append(\"# \" + i)\n self.script.append(\"\")", "title": "" }, { "docid": "3338ce4608db5c76d5486ba3dfe9efef", "score": "0.5158159", "text": "def printMe():\n print(\"meeeeeee\")", "title": "" }, { "docid": "9728cc33cb5495d612785e30afdbe7fb", "score": "0.5154199", "text": "def my_print(self):\n if self.__size == 0:\n print()\n else:\n for i in range(self.__size):\n for j in range(self.__size):\n print(\"#\", end=\"\")\n print()", "title": "" }, { "docid": "e727d7a2dfae4bf402fd3e69b29b1204", "score": "0.5150764", "text": "def isCommentedOut(self):\n return self.__commentOut", "title": "" }, { "docid": "0c4c3f842d6d646f8e7bd8de9048fcff", "score": "0.5147722", "text": "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "title": "" }, { "docid": "71918d48f3d035ece6a18325eda77283", "score": "0.514022", "text": "def print_line():\n print('+ - - - - + - - - - +'),", "title": "" }, { "docid": "cdb86d95b350e77791413ae9e46ec76c", "score": "0.5130822", "text": "def setup_print(self, t, message):\n if self.config['debug']:\n return tf.Print(t, (t,), message=message+\": \", summarize=10)\n return t", "title": "" }, { "docid": "e30d78594f7bdad312a9a7edd97efca6", "score": "0.51145995", "text": "def what_in_string(printable_string):\n if SCCS_ID in printable_string:\n content = re.sub(r\"^.*\" + re.escape(SCCS_ID), \"\", printable_string)\n content = re.sub(r'(\"|>|\\n|\\\\).*', \"\", content)\n if parameters[\"No formatting\"]:\n print(content)\n else:\n print(\"\\t\" + content)\n\n return True\n\n return False", "title": "" }, { "docid": "e2fb8dc08b69f42aad989562d64018ad", "score": "0.51114774", "text": "def write_comment(fmt):\n\n fit_txt=[fmt.desc[i:i+65] for i in range(0, len(fmt.desc), 65)]\n\n com_txt=[]\n com_txt.append(f\"\\t*{'-' * 68}*\")\n com_txt.append(f\"\\t|\\tFormat: {fmt.name + '_' + fmt.category}\\n\\t|\\tExtensible:{fmt.is_extensible}\")\n com_txt.append(f\"\\t|\\tUsage: {fmt.category_desc}\")\n com_txt.append(f\"\\t|\\tDescription: \")\n [com_txt.append(f\"\\t|\\t {i}\") for i in fit_txt]\n com_txt.append(f\"\\t*{'-' * 68}*;\\n\")\n\n return \"\\n\".join(com_txt)", "title": "" }, { "docid": "923731808c1b75ce1ba41d5df7a6f02b", "score": "0.51090586", "text": "def print_(self, s: str) -> None:", "title": "" }, { "docid": "d8fec6960eb9803738a5733d808e7620", "score": "0.5097462", "text": "def debug_print(input_data, debug_flag):\n if debug_flag:\n if input_data:\n #print(\"################################################ debug_print #############################################################\")\n for item in input_data:\n print(\" {0:<60}\".format(item))\n #print(\"##############################################################################################################################\")\n else:\n print(\" {0:<60}\".format(input_data))", "title": "" }, { "docid": "17c802898d6bba8ac9711bdb12e91b7c", "score": "0.5092612", "text": "def do_say(self, line):\n if line != '':\n print(line)", "title": "" }, { "docid": "2bc7e0ef87e5e091f762ac054c4724da", "score": "0.50894624", "text": "def print_content(self):\n if self.piece!=None:\n print('%s : %s %s' % (self.name, self.piece.color, self.piece.piece_type))\n else:\n print('%s : empty' % (self.name))", "title": "" }, { "docid": "ea56e348cd6d4b675616ce5b0ee9568e", "score": "0.50893533", "text": "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "title": "" }, { "docid": "815a8fadf8793159bd562a1a40e82cd2", "score": "0.5083997", "text": "def do_print(self, line=\"abc\", *, flag: boolean=True, repeat: int=1):\n if flag:\n for i in range(repeat):\n print(line, file=self.stdout)", "title": "" }, { "docid": "2572b600d26cca12d3ee510fc23a6638", "score": "0.5080383", "text": "def _verbose(self,text):\n if self.verbose:\n print(text)", "title": "" }, { "docid": "4181771f9bc081999c5bef720939f751", "score": "0.5069398", "text": "def _printable(self):\n pass", "title": "" }, { "docid": "fe24234b9f1a9b9408be0b0d8e38d3a6", "score": "0.5063737", "text": "def generateCommentBlock(self):\n\t\tself.printt_cls(\"# ===================================================================\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# WARNING: This is an auto-generated file. Do not modify this file.\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# ===================================================================\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"#\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# {} Model\".format(self.objName), cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"#\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# ===================================================================\", cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# Generated on: {ts}\".format(ts = str(datetime.now())), cs.PASTEL_YELLOW)\n\t\tself.printt_cls(\"# ===================================================================\", cs.PASTEL_YELLOW)", "title": "" }, { "docid": "d0ad82ff44e295a33e3a12bb32661e52", "score": "0.50615656", "text": "def strprint(self, mystr):\n if self.is_verbose is True:\n print(mystr)\n else:\n pass\n return", "title": "" }, { "docid": "f5def6714d3c8151ec12722e379e0c62", "score": "0.5044993", "text": "def do_p(self, line):\n if not self.current:\n print_table(self.get_profiles(), self.vertical_display)", "title": "" }, { "docid": "91b5fbb0da593bd505e9a48ae7f78a78", "score": "0.5041624", "text": "def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)", "title": "" }, { "docid": "33aee9cd1cfd7f0af3c934e948872be2", "score": "0.50415945", "text": "def double_line():\n print (\"=============================================================\")", "title": "" }, { "docid": "738268595041c801a75d0b4e1666294f", "score": "0.5041228", "text": "def print_progress(c, t, K, t_end, flag):\r\n if (t > (t_end / 20.0)) and flag == True:\r\n print('1/20 of the simulation is done.')\r\n flag = False\r\n\r\n # debugging information\r\n print('\\n======================\\nTime: %g' % (t))\r\n print('Clones:\\n', c)\r\n print('\\nInteractions in Matrix K:\\n', np.array(K))\r\n print('Dimension of Matrix K:\\t%s' % str(K.shape))\r\n return flag", "title": "" }, { "docid": "0da2dc2eaba68dd92f3f783f1c2ce176", "score": "0.502702", "text": "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "title": "" }, { "docid": "4977d16274778470e5f8955fabdf2294", "score": "0.5024504", "text": "def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "title": "" }, { "docid": "c760f37dde0a9a97ede18524b34829a2", "score": "0.5019976", "text": "def is_printing(line):\r\n return line.startswith('G1 ') and 'X' in line and 'Y' in line and 'E' in line", "title": "" }, { "docid": "4a3636567f96dbeaac152b2baec55643", "score": "0.5009598", "text": "def mips_comment(self, comment_string: str, do_comment: bool) -> str:\n\n return f'{self.code_indent_string()}# {comment_string}\\n' if do_comment else \"\"", "title": "" }, { "docid": "9e57b4954e79946e2f0a6d4d1e0d08c0", "score": "0.5007529", "text": "def pr(string, verbose):\n if(verbose):\n print(string)", "title": "" }, { "docid": "f4d6f011beabfb760b9e8edc41ab5a81", "score": "0.5006249", "text": "def d_print(msg):\n if (DEBUG == 1):\n print(msg)", "title": "" }, { "docid": "230325a72bf27e2735fcfd68fecc5d6d", "score": "0.50009805", "text": "def comment(self):\r\n editor = self.get_current_editor()\r\n if editor is not None:\r\n editor.comment()", "title": "" }, { "docid": "46658b74be9d0f1ecc7896c85e8e208d", "score": "0.49980518", "text": "def detecteComments(liste, j, i):\n\n\treturn liste[j][i] == '#' or (i < len(liste[j])-2 and liste[j][i]==\"\\\"\" and liste[j][i+1]==\"\\\"\" and liste[j][i+2]==\"\\\"\")", "title": "" }, { "docid": "57970dab5f6d2ad702a1064c2cf84529", "score": "0.49928725", "text": "def useful():\n\n print('I do something.')", "title": "" }, { "docid": "30b401be1d708cf0eeec10cdc86fb27f", "score": "0.4985576", "text": "def start_print(outfile: TextIO) -> None:\n outfile.write(\"<!DOCTYPE HTML>\\n\")\n outfile.write(\"<html lang=\\\"en\\\">\\n\")\n outfile.write(\" <head>\\n\")\n outfile.write(\" <meta charset=\\\"utf-8\\\" />\\n\")\n outfile.write(\" <title>Fiddler Crabs</title>\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/uca_style.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/print.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/font-awesome/css/fontawesome.min.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/font-awesome/css/solid.min.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/font-awesome/css/brands.min.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/font-awesome/css/regular.min.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"resources/flag-icon-css/css/flag-icons.min.css\\\" />\\n\")\n outfile.write(\" </head>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <body>\\n\")", "title": "" }, { "docid": "4591389302197a542ad2ac6dedbbc856", "score": "0.4983756", "text": "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "title": "" }, { "docid": "48c1b25f2dfb154a6be3957afb6b3147", "score": "0.4970358", "text": "def simpleStatusPrint(self, i=1, sparse=1):\n if i % sparse == 0:\n print(\"*\", end='', flush=True)\n if i % (50 * sparse) == 0 and i > 0:\n print(\"(\"+str(i)+\")\")", "title": "" }, { "docid": "04a107b2713486441ee7cf0fe24171e9", "score": "0.49688926", "text": "def print_post():\n print('| | |'),", "title": "" }, { "docid": "45239459533dc4e5f3bfa0968edabd17", "score": "0.49668917", "text": "def do_comment(self, line):\n if line.strip() != \"\":\n self.review.comment(line)\n return\n\n with tempfile.NamedTemporaryFile(suffix='.revu.md') as temp:\n subprocess.call(['vim', temp.name])\n with open(temp.name, 'r') as fd:\n self.review.comment(fd.read())", "title": "" }, { "docid": "7e7507ff196653cd72cc07cd73c1990c", "score": "0.49521852", "text": "def print(self):\n # Your implementation here", "title": "" }, { "docid": "cbb2cc05b47f66d31f0ea852a01be4d3", "score": "0.49514318", "text": "def printTurn(self,board,tile):\n if tile == board.BLACK:\n print \"\\n\\nBlack turn 'O'\"\n else:\n print \"\\n\\nWhite turn 'X'\"", "title": "" }, { "docid": "96b79389e5b93749ce44d2fe53491788", "score": "0.4950454", "text": "def print(self):\r\n self.print_avec_separateur()", "title": "" }, { "docid": "f0ccbf5ad816c9648954983cae216181", "score": "0.4950184", "text": "def comment(self, comment): # type: (str) -> None\n self._tmp_comment = comment", "title": "" }, { "docid": "7a4371dda85506f62b3309342a6ba4d2", "score": "0.49429888", "text": "def write_if(self, label):\n self._write_line('if-goto ' + label)", "title": "" }, { "docid": "59935a6188366b13c1ebc0fce78e7611", "score": "0.49381617", "text": "def report_success(self, out, test, example, got) -> None:\n # Check if the verbose string is defined and display the\n # relevant virtualtext if so\n if self.verbose_string is not None:\n self.nvim.api.buf_set_virtual_text(\n 0, self.namespace_id, example.lineno + test.lineno + 1,\n [[\"# \" + self.verbose_string, \"Comment\"]], {})", "title": "" }, { "docid": "47752fd9bd10f58c0ad6dc2dc1e52676", "score": "0.49338868", "text": "def do_print(self, cmd):\n try:\n print(self.EvalExpression(cmd))\n except:\n pass", "title": "" }, { "docid": "0d569fb97ee3a5fd2dd7ee41224dfc06", "score": "0.4933841", "text": "def goodbye_printer(return_it=False, only_conc=False):\n if not kwargs.get('printstatus', True):\n return\n thetime = strftime(\"%H:%M:%S\", localtime())\n if only_conc:\n \n show_me = (thetime, len(conc_df))\n finalstring = '\\n\\n%s: Concordancing finished! %d results.' % show_me\n else:\n finalstring = '\\n\\n%s: Interrogation finished!' % thetime\n if countmode:\n finalstring += ' %d matches.' % tot\n else:\n dat = (numentries, total_total)\n finalstring += ' %d unique results, %d total occurrences.' % dat\n if return_it:\n return finalstring\n else:\n print(finalstring)", "title": "" }, { "docid": "caca2bc0e3b0af050968224e13dfe610", "score": "0.49195275", "text": "def _comments(self, r, widget, **attr):\n\n label = widget.get(\"label\", \"\")\n # Activate if-required\n #if label and isinstance(label, str):\n if label:\n label = current.T(label)\n icon = widget.get(\"icon\", \"\")\n if icon:\n icon = ICON(icon)\n\n _class = self._lookup_class(r, widget)\n\n comments = \"@ToDo\"\n\n # Render the widget\n output = DIV(H4(icon,\n label,\n _class = \"profile-sub-header\",\n ),\n DIV(comments,\n _class = \"card-holder\",\n ),\n _class = _class,\n )\n\n return output", "title": "" }, { "docid": "e3cc2f65487357b04fbbf4a36e1da4ac", "score": "0.49190617", "text": "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "title": "" }, { "docid": "37419d77516c91628a2436854484830c", "score": "0.49134377", "text": "def DEBUG_PRINT(msg, obj='', suffix=''):\n if PRINT_DEBUGS:\n print msg, obj, suffix", "title": "" } ]
b60091eff4fa1a3e17b561797c93ad9a
Returns the MongoDB aggregation pipeline for the stage.
[ { "docid": "28197be9cd5f99638ac517947cc55d2f", "score": "0.0", "text": "def to_mongo(self, sample_collection):\n if not self.has_view:\n raise ValueError(\n \"%s stages use `load_view()`, not `to_mongo()`\" % type(self)\n )\n\n raise NotImplementedError(\"subclasses must implement `to_mongo()`\")", "title": "" } ]
[ { "docid": "60548f516377e285b92b4677a5d21114", "score": "0.65483505", "text": "def aggregation(self) -> Optional[pulumi.Input['AggregationArgs']]:\n return pulumi.get(self, \"aggregation\")", "title": "" }, { "docid": "60548f516377e285b92b4677a5d21114", "score": "0.65483505", "text": "def aggregation(self) -> Optional[pulumi.Input['AggregationArgs']]:\n return pulumi.get(self, \"aggregation\")", "title": "" }, { "docid": "3bf38e707804696214b01d57dcdbd60b", "score": "0.62747353", "text": "def pipeline(self):\n return self._pipeline", "title": "" }, { "docid": "85fffe3e6f468854c5a5d6809eab0e50", "score": "0.6094376", "text": "def _get_aggregation(self):\n return self.__aggregation", "title": "" }, { "docid": "3b694b01d6433fffa9ce4a0f5b7ae21a", "score": "0.5864583", "text": "def _get_aggregate(self):\n return self.__aggregate", "title": "" }, { "docid": "16400cab3335c288c8f1c35b23a0e537", "score": "0.5858932", "text": "def get_pipeline():", "title": "" }, { "docid": "d1148e148d154ac66b42b544255cd2be", "score": "0.5796516", "text": "def aggregation(self):\n # type: () -> int\n return self._get_property('aggregation')", "title": "" }, { "docid": "74aeac828e773eb92e1d81f7ce667a67", "score": "0.57215834", "text": "def __aggregate(self, pipeline, *args, **kwargs):\n raise NotImplementedError(\"Not yet implemented since aggregate pipelines can be non TopicStore compatible docs\")\n # return TopicStoreCursor(self.collection.aggregate(pipeline, *args, **kwargs))", "title": "" }, { "docid": "74dfe47a28cddd1911707d97098d3a4d", "score": "0.57199186", "text": "def get_optimize_pipeline(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "title": "" }, { "docid": "76cd45943b730d640a2507ac814d3fcf", "score": "0.5559771", "text": "def aggregate(self, pipeline, operation: str, db_type: str, col: str) -> Response:\n pipeline = obj_to_utf8_bytes(pipeline)\n meta = self._make_meta(db_type, col)\n aggregate_request = server_pb2.AggregateRequest(pipeline=pipeline, operation=operation, meta=meta)\n return Response(self.stub.Aggregate(aggregate_request))", "title": "" }, { "docid": "f92271e8ef756f23ab5310b404249412", "score": "0.5544029", "text": "def test_full_pipeline(self):\n listener = WhiteListEventListener(\"aggregate\")\n results = listener.results\n client = rs_or_single_client(event_listeners=[listener])\n self.addCleanup(client.close)\n coll = client[self.db.name][self.coll.name]\n with coll.watch([{'$project': {'foo': 0}}]) as _:\n pass\n\n self.assertEqual(1, len(results['started']))\n command = results['started'][0]\n self.assertEqual('aggregate', command.command_name)\n self.assertEqual([\n {'$changeStream': {'fullDocument': 'default'}},\n {'$project': {'foo': 0}}], \n command.command['pipeline'])", "title": "" }, { "docid": "6a56c535a7cfd2bad85114d0c70bdf87", "score": "0.55379105", "text": "def parse_aggregate(self):\n # inner functions\n #def handle_string(astring):\n # adict = self.handle_string(astring)\n # return adict\n\n def handle_dict(adict):#{stage_dict}\n fmt = ''\n l_fmt = ''\n d_fmt = ''\n fmt_list = []\n l_fmt_list = []\n d_fmt_list = []\n for key, value in adict.items():\n if isinstance(value, list):\n for val in value:\n l_fmt_list.append(handle_dict(val))\n\n if key == '$or':\n l_fmt = ' or '.join(l_fmt_list)\n elif key == '$and':\n l_fmt = ' and '.join(l_fmt_list)\n fmt_list.append(l_fmt)\n elif isinstance(value, dict):\n\n if key == '$match':\n d_fmt_list.append(handle_dict(value))\n elif key == '$group':\n d_fmt_list.append(handle_dict(value))\n elif key == '$sort':\n d_fmt_list.append(handle_dict(value))\n elif key == '$limit':\n d_fmt_list.append(handle_dict(value))\n elif key == '$text':\n d_fmt_list.append(handle_dict(value))\n \n elif key == '$push':\n pass\n elif key == 'addToSet':\n pass\n elif key == '$stdDevSamp':\n pass\n else:\n for k, v in value.items():\n if k == '$lt':\n d_fmt_list.append('{0} < {1}'.format(key, v))\n elif k == '$lte':\n d_fmt_list.append('{0} <= {1}'.format(key, v))\n elif k == '$gt':\n d_fmt_list.append('{0} > {1}'.format(key, v))\n elif k == '$gte':\n d_fmt_list.append('{0} >= {1}'.format(key, v))\n elif k == '$ne':\n d_fmt_list.append('{0} != {1}'.format(key, v))\n elif k == '$eq':\n d_fmt_list.append('{0} = {1}'.format(key, v))\n elif k == '$sum':\n sum_val = value['$sum']\n if key == count:\n d_fmt_list.append('count({0})'.format('*'))\n elif not isinstance(sum_val,list):\n d_fmt_list.append('sum({0}) as {1}'.format(sum_val.replace('$',''), key))\n elif isinstance(sum_val, dict):\n d_fmt_list.append('sum({0}) as {1}'.format(handle_dict(sum_val), key))\n else:\n d_fmt_list.append('sum({0}) as {1}'.format(','.join(sum_val).repalce('$', ''),key))\n elif k == '$avg':\n avg_val = value['$avg']\n if not isinstance(avg_val, list):\n d_fmt_list.append('avg({0}) as {1}'.format(avg_val.replace('$', ''), key))\n else:\n d_fmt_list.append('avg({0} as {1}'.format(avg_\n elif k == '$multiply':\n pass \n\n elif k == '$first':\n\n pass\n\n elif k == '$last':\n\n pass\n\n elif k == '$max':\n\n pass\n\n elif k == '$min':\n pass\n if len(d_fmt_list) == 1:\n d_fmt = d_fmt_list[0]\n else:\n d_fmt = ' and '.join(d_fmt_list)\n fmt_list.append(d_fmt) \n \n \n else:\n fmt_list.append('{0}={1}'.format(key, value))\n if len(adict) == 1:\n fmt = fmt_list[0]\n return fmt\n \n fmt = ' and '.join(fmt_list)\n return fmt\n\n \n def handle_match_stage(astring):\n match_fmt_string = ''\n match_dict = self.handle_string(astring)\n print 'match_args_dict: '\n print match_dict\n match_fmt_string = handle_dict(match_dict)\n print 'match_fmt_string: '\n print match_fmt_string\n return match_fmt_string\n\n def handle_group_stage(astring):\n proj_fmt_list = []\n proj_fmt_string = ''\n group_fmt_list = []\n group_fmt_string = ''\n group_dict = self.handle_string(astring)\n print 'group_args_dict: '\n print group_dict\n for key, val in group_dict.items():\n if key == '_id':\n for k in val.keys():\n group_fmt_list.append(k)\n proj_fmt_list.append(k)\n else:\n proj_fmt_list.append(handle_dict({key:val}))\n\n proj_fmt_string = ','.join(proj_fmt_list)\n group_fmt_string = ','.join(group_fmt_list)\n return proj_fmt_string, group_fmt_string\n \n \n def handle_projection_stage(astring):\n proj_fmt_list = []\n projection_dict = self.handle_string(astring)\n for key, val in projection_dict.items():\n if val != 0 or val != False:\n proj_fmt_list.append(key)\n proj_fmt = ','.join(proj_fmt_list)\n return proj_fmt\n\n def handle_sort_stage(astring):\n sort_fmt_list = []\n sort_dict = self.handle_string(astring)\n for key, val in sort_dict.itmes():\n if val == 1:\n sort_fmt_list.append('{0} asc'.format(key))\n else:\n sort_fmt_list.append('{0} desc'.format(key))\n sort_fmt = ','.join(sort_fmt_list)\n return sort_fmt\n\n def handle_limit_stage(astring):\n limit_dict = self.handle_string(astring)\n limit_fmt = limit_dict.values()[0]\n return limit_fmt\n\n\n #end\n\n AGGREGATE_ARGS_PATTERN = r'\\s*(\\{\\s*(?P<stagename>[$\\w]+)\\s*:\\s*(?P<stageargs>\\{[^}]+([}][^}]+)*\\}))\\s*'\n #AGGREGATE_ARGS_PATTERN = r'\\s*(\\{[^}]+([}][^}]+)*\\})\\s*'\n #AGGREGATE_ARGS_PATTERN = r'\\s*(\\{.+?\\})\\s*'\n aggregate_m_iter = re.finditer(AGGREGATE_ARGS_PATTERN, self.op_args) \n #print list(aggregate_m_iter)\n match_fmt = ''\n projection_fmt = ''\n projection_fmt_list = []\n options_fmt = ''\n options_fmt_list = []\n stage_dict = {}\n stage_name_list = []\n\n for a_m in aggregate_m_iter:\n stage_string = a_m.group(1)\n print 'stage_string: '\n print stage_string\n #stage_dict = handle_string(stage_string)\n stage_name = a_m.group('stagename')\n stage_args = a_m.group('stageargs')\n stage_name_list.append(stage_name)\n stage_dict[stage_name] = stage_args\n print 'stage_names: '\n print stage_name_list\n\n for stage_name, stage_args in stage_dict.items():\n if stage_name == '$match':\n if '$match' in stage_name_list and '$group' in stage_name_list and stage_name_list.index('$match') > stage_name_list.index('$group'):\n having_fmt = 'having ' + handle_match_stage(stage_args)\n else:\n match_fmt = 'where ' + handle_match_stage(stage_args)\n options_fmt_list.append(match_fmt)\n \n\n elif stage_name == '$group':\n proj_fmt, group_fmt = handle_group_stage(stage_args)\n group_fmt = 'group by ' + group_fmt\n projection_fmt_list.append(proj_fmt)\n options_fmt_list.append(group_fmt)\n options_fmt_list.append(having_fmt)\n #elif stage_name == '$match':\n # match_fmt = ' having ' + handle_match(stage_args)\n # options_fmt = options_fmt + match_fmt\n \n elif stage_name == '$projection':\n proj_fmt = handle_projection_stage(stage_args)\n projection_fmt_list.append(proj_fmt)\n elif stage_name == '$sort':\n sort_fmt = 'order by ' + handle_sort_stage(stage_args)\n options_fmt_list.append(sort_fmt)\n elif stage_name == '$limit':\n limit_fmt = 'limit ' + handle_limit_stage(stage_args)\n options_fmt_list.append(limit_fmt)\n \n if len(projection_fmt_list) == 1 or projection_fmt_list == []:\n projection_fmt = projection_fmt_list[0]\n else: \n projection_fmt = ','.join(projection_fmt_list)\n if len(options_fmt_list) < 2:\n options_fmt = options_fmt_list[0]\n else:\n options_fmt = ' '.join(options_fmt_list)\n aggregate_fmt = 'select {0} from {1} {2}'.format(projection_fmt,self.coll, options_fmt) \n return aggregate_fmt", "title": "" }, { "docid": "6758658c36fede5bf26aefeab74357fc", "score": "0.54761", "text": "def aggregation_type(self):\n return self._entry['prism:aggregationType']", "title": "" }, { "docid": "92e0fc177e35cc671f29082333b9e350", "score": "0.54306465", "text": "def get_pipeline():\n \n pipeline = Pipeline([\n ('features',FeatureUnion([\n ('text-pipeline',Pipeline([\n ('vect', CountVectorizer(tokenizer= tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting-verb',StartingVerbExtractor())\n ])),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n return pipeline", "title": "" }, { "docid": "74c9226bf20a28bf1eed969d7eb38e11", "score": "0.5357345", "text": "def get_execution_pipeline(self, execution):\n if isinstance(execution, WorkflowExec):\n version = execution.parent_version\n # change the current version to this as well\n\n return self.controller.vistrail.getPipeline(version)\n if isinstance(execution, GroupExec):\n parent = execution.item.wf_execution\n parent_pipeline = self.get_execution_pipeline(parent)\n return parent_pipeline.db_get_module_by_id(\n execution.db_module_id).pipeline", "title": "" }, { "docid": "ba75bade406dcb0a9ed7619d6a08b63d", "score": "0.53111345", "text": "def aggregate(self, pipeline, *args, **kwargs):\n cursor_class = create_class_with_framework(\n AgnosticLatentCommandCursor, self._framework, self.__module__\n )\n\n # Latent cursor that will send initial command on first \"async for\".\n return cursor_class(\n self[\"$cmd.aggregate\"],\n self._async_aggregate,\n pipeline,\n *unwrap_args_session(args),\n **unwrap_kwargs_session(kwargs),\n )", "title": "" }, { "docid": "6ae977bdff9c6a551c6a1467d5a670a3", "score": "0.5289119", "text": "def get_finalize_pipeline(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "title": "" }, { "docid": "46c97547714c2d9bb384a9059c235a54", "score": "0.5283481", "text": "def secondary_aggregation(self) -> Optional[pulumi.Input['AggregationArgs']]:\n return pulumi.get(self, \"secondary_aggregation\")", "title": "" }, { "docid": "46c97547714c2d9bb384a9059c235a54", "score": "0.5283481", "text": "def secondary_aggregation(self) -> Optional[pulumi.Input['AggregationArgs']]:\n return pulumi.get(self, \"secondary_aggregation\")", "title": "" }, { "docid": "d934a1f1719b5f662f49432365d2b7fd", "score": "0.52749133", "text": "def get_pipeline(self, pipeline_name):\n return self.find('*/pipeline[@name=\"%s\"]' % pipeline_name)", "title": "" }, { "docid": "dad48a8c6ca9ff19216e9dae5f432d74", "score": "0.52716666", "text": "def pipeline_spec(self) -> pipeline_spec_pb2.PipelineSpec:\n return self.component_spec.implementation.graph", "title": "" }, { "docid": "5e8c0c40b6df51dd1d10ad4e0586e856", "score": "0.52416474", "text": "def get_pipeline(self):\n print(colored('---------------------- getting pipeline ----------------------', 'blue'))\n location_cols = ['pickup_longitude', 'pickup_latitude',\n 'dropoff_longitude', 'dropoff_latitude']\n distance = Pipeline([\n ('distance', DistanceFeatures()),\n ('scaler', StandardScaler())\n ])\n preprocessor = ColumnTransformer([\n ('time', TimeFeatures(), ['pickup_datetime']),\n ('distance', distance, location_cols),\n ('direction', DirectionFeatures(), location_cols),\n ('airport', AirportFeatures(), location_cols)\n ])\n self.pipeline = Pipeline([\n ('preprocessor', preprocessor),\n ('model', self.get_estimator())\n ])", "title": "" }, { "docid": "2ccd831492207866a92a890101f5b40c", "score": "0.52088463", "text": "def get_neo4j_pipeline(self,outCol,*inputCol):\n c = Components()\n allStages = [c.getDocumentAssembler(inputCol[0],\"document\"),c.getTokenizer(\"document\",\"tokens\"),\\\n c.getNormalizer(\"tokens\",\"normalized\"),\\\n c.getFinisher(\"normalized\",\"finished\"),\\\n c.getDocumentAssembler(inputCol[1],\"document1\"),\\\n c.getTokenizer(\"document1\",\"tokens1\"),\\\n c.getNormalizer(\"tokens1\",\"normalized1\"),\\\n c.getFinisher(\"normalized1\",\"finished1\"),\\\n c.getTf(\"finished\",\"tf\"),c.getIdf(\"tf\",\"locFeature\"),\\\n c.getTf(\"finished1\",\"tf1\"),c.getIdf(\"tf1\",\"adjFeature\"),\\\n c.getVectorAssembler([\"locFeature\",\"adjFeature\"],\"features\"),\\\n c.getStringIndexer(outCol,\"label\")]\n return Pipeline(stages=allStages)", "title": "" }, { "docid": "cbef7814e8d9586997a1e281d76b74c9", "score": "0.5190318", "text": "def get_aggregate(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'aggregate', name_or_id, filters)", "title": "" }, { "docid": "c6b0b1a23ac8c3cf54bf229fc660e3ef", "score": "0.51774555", "text": "def get_pipeline(cls, params):\n if cls.pipeline is None:\n cls.pipeline = cls(params)\n else:\n assert(utils.check_dotmap_equality(cls.pipeline.params, params))\n return cls.pipeline", "title": "" }, { "docid": "41e5c6e93a5911e1aadc9c2c3199c2d3", "score": "0.5109945", "text": "def codepipeline(self) -> Optional[str]:\n return pulumi.get(self, \"codepipeline\")", "title": "" }, { "docid": "825fe007ef00656c415559b37ee1f100", "score": "0.5099742", "text": "def pipeline(self):\n self.__scrapper.pipeline()", "title": "" }, { "docid": "e73b601b92c3b7ca93f06558499b8f4e", "score": "0.5070127", "text": "def get_stage(cls, name):\n return cls.pipeline_stages[name][0]", "title": "" }, { "docid": "f11eab743addefa453f60597857af794", "score": "0.5055302", "text": "def get_module(cls):\n return cls.pipeline_stages[cls.name][0].__module__", "title": "" }, { "docid": "3a2cc791c67a3776df0040f0952b6727", "score": "0.50407135", "text": "def _stage(stage: InputResolver) -> codepipeline.Stages:\n stage_actions = []\n for pos, action in enumerate(stage.actions):\n stage_actions.append(_stage_action(stage.name, pos, action))\n\n return codepipeline.Stages(Name=stage.name, Actions=stage_actions)", "title": "" }, { "docid": "3c592a8ae39158ae799e3d436e27c44c", "score": "0.50069624", "text": "def create_pipeline(self) -> None:\n pass", "title": "" }, { "docid": "41b840b7d23804f94d2baba2daabd187", "score": "0.49398714", "text": "def aggregate(self, pipeline, *args, **kwargs):\n cursor_class = create_class_with_framework(\n AgnosticLatentCommandCursor, self._framework, self.__module__\n )\n\n # Latent cursor that will send initial command on first \"async for\".\n return cursor_class(\n self,\n self._async_aggregate,\n pipeline,\n *unwrap_args_session(args),\n **unwrap_kwargs_session(kwargs),\n )", "title": "" }, { "docid": "5ff7f7d5f285c4e095573e6253d0501d", "score": "0.48997238", "text": "def aggregations(self) -> FrameworkSearchAggregationInterface:\n return self._aggregations", "title": "" }, { "docid": "92b704fd836cb97705174ac2de73d005", "score": "0.48950547", "text": "def revenue_aggregation(query):\n aggregate = connection.Package.collection.aggregate([\n {'$match': query},\n {'$group': {\n '_id':{'cn': '$cn', 'cl': '$cl', 'pt': '$pt', 'ss': '$cs.ss', 'oc': '$occ',\n 'sdy': {'$year': '$cs.sd'}, 'sdm': {'$month': '$cs.sd'},\n 'sdd': {'$dayOfMonth': '$cs.sd'}},\n 'count' : { '$sum' : 1 },\n 'fs' : { '$sum' : '$inv.chrgs.FS' },\n 'cgm' : {'$sum': '$cgm'},\n 'cod' : {'$sum': '$cod'},\n 'tamt': {'$sum': '$inv.rs'},\n 'famt': {'$sum': '$inv.chrgs.DL'},\n 'ramt': {'$sum': {'$add': ['$inv.chrgs.RTO', '$inv.chrgs.DTO']}},\n 'camt': {'$sum': '$inv.chrgs.COD'},\n }}\n ])\n res = aggregate['result']\n #NOTE: Any better way to write the output back to mongo? Feel free to edit this\n # Commented out code is for pymongo, remove hardcoding accordingly though\n #db = pymongo.MongoClient(host=\"stg-wms.delhivery.com\")['delhivery_db']\n #bulk = db.revenueaggregation.initialize_unordered_bulk_op()\n conn = MongoConnection(timeout=RS_TIMEOUT).get_connection()\n mongo_conf = settings.DATABASES.get('mongodb')\n conn = conn[mongo_conf['NAME']]\n\n if 'USER' in mongo_conf and 'PASSWORD' in mongo_conf:\n conn.authenticate(mongo_conf['USER'], mongo_conf['PASSWORD'])\n\n conn = conn['revenue']\n\n for r in res:\n search_dict = {'_id': r['_id']}\n r.pop('_id')\n update_dict = {'$set': r}\n conn.update(\n search_dict,\n update_dict,\n upsert=True)\n #bulk.find(search_dict).upsert().update(update_dict)\n #bulk.execute()", "title": "" }, { "docid": "101a32406b05bb5c826050c5e6410e86", "score": "0.48692206", "text": "def generateAggregation(self, agg):\n if agg:\n if agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_COUNT:\n if agg.groupfield is not None:\n # If the aggregation is 'count(MyDistinctFieldName) by MyGroupedField > XYZ'\n if agg.aggfield is not None:\n count_agg_group_name = \"{}_count\".format(agg.groupfield)\n count_distinct_agg_name = \"{}_distinct\".format(agg.aggfield)\n script_limit = \"params.count {} {}\".format(agg.cond_op, agg.condition)\n self.queries[-1]['aggs'] = {\n count_agg_group_name: {\n \"terms\": {\n \"field\": \"{}\".format(agg.groupfield)\n },\n \"aggs\": {\n count_distinct_agg_name: {\n \"cardinality\": {\n \"field\": \"{}\".format(agg.aggfield)\n }\n },\n \"limit\": {\n \"bucket_selector\": {\n \"buckets_path\": {\n \"count\": count_distinct_agg_name\n },\n \"script\": script_limit\n }\n }\n }\n }\n }\n else: # if the condition is count() by MyGroupedField > XYZ\n group_aggname = \"{}_count\".format(agg.groupfield)\n count_agg_name = \"single_{}_count\".format(agg.groupfield)\n self.queries[-1]['aggs'] = {\n group_aggname: {\n 'terms': {\n 'field': '%s' % (agg.groupfield)\n },\n 'aggs': {\n count_agg_name: {\n 'value_count': {\n 'field': '%s' % agg.groupfield\n }\n },\n 'limit': {\n 'bucket_selector': {\n 'buckets_path': {\n 'count': count_agg_name\n },\n 'script': 'params.count %s %s' % (agg.cond_op, agg.condition)\n }\n }\n }\n }\n }\n else:\n funcname = \"\"\n for name, idx in agg.aggfuncmap.items():\n if idx == agg.aggfunc:\n funcname = name\n break\n raise NotImplementedError(\"%s : The '%s' aggregation operator is not yet implemented for this backend\" % (self.title, funcname))", "title": "" }, { "docid": "bb7374612c691be5ce883338f61ccafc", "score": "0.48635393", "text": "def aggregate_mode(self):\n return self.data.get('aggregate_mode')", "title": "" }, { "docid": "7c43de6ab633448ab8b73317888b8be4", "score": "0.48589116", "text": "def GetPipeline(pipeline_name):\n try:\n pipeline_obj = delivery_pipeline.DeliveryPipelinesClient().Get(\n pipeline_name)\n return pipeline_obj\n except apitools_exceptions.HttpError as error:\n log.debug('Failed to get pipeline {}: {}'.format(pipeline_name,\n error.content))\n log.status.Print('Unable to get delivery pipeline {}'.format(pipeline_name))\n raise error", "title": "" }, { "docid": "0847ae30e98aa75e9be15eca9b5d1fa5", "score": "0.48260325", "text": "def get_aggregate(self, qs):\n return None", "title": "" }, { "docid": "ce5f5cc4ba55860f037d0468a59d8121", "score": "0.47995457", "text": "def get_step_aggregation_query(step_type=None, selection_params={}):\n aggs = {}\n if not step_type:\n step_type = STEP_TYPES[0]\n elif step_type not in STEP_TYPES:\n raise ValueError(step_type, \"Unknown step type (expected one of: %s)\"\n % STEP_TYPES)\n if step_type == 'ctag_format':\n formats = output_formats(**selection_params)\n filters = {}\n for f in formats:\n filters[f] = {\n 'has_child': {\n 'type': 'output_dataset',\n 'query': {'term': {'data_format': f}}\n }\n }\n aggs = {'steps': {'filters': {'filters': filters},\n 'aggs': {'substeps': {'terms': {'field': 'ctag'}}}}}\n elif step_type == 'step':\n aggs = {'steps': {'terms': {'field': 'step_name.keyword'}}}\n else:\n raise DkbApiNotImplemented(\"Aggregation by steps of type '%s' is not\"\n \" implemented yet.\")\n return aggs", "title": "" }, { "docid": "243cbfa88be039511f5c35f3b937ce1e", "score": "0.47801664", "text": "def _get_aggregate_id(self):\n return self.__aggregate_id", "title": "" }, { "docid": "243cbfa88be039511f5c35f3b937ce1e", "score": "0.47801664", "text": "def _get_aggregate_id(self):\n return self.__aggregate_id", "title": "" }, { "docid": "5f366b516352f2cd158e1b3a489b3d2e", "score": "0.47612822", "text": "def groupId(self):\n return 'Aggregate'", "title": "" }, { "docid": "fe2762a9298fb93651df6b3665e0851c", "score": "0.47384325", "text": "def stages(self) -> Sequence['outputs.StageResponse']:\n return pulumi.get(self, \"stages\")", "title": "" }, { "docid": "a34aa5c39b8248e8d0e3786b1453dc50", "score": "0.47152495", "text": "def aggregate_type(self) -> str:\n return self.__aggregate_type", "title": "" }, { "docid": "9ed0c02fd684d7837e365f1961bb93c2", "score": "0.47145095", "text": "def _analyze_pipeline(self, pipeline):\n\n # Note: any changes made in this routine should be reflected in _analyze_pipeline_output() below.\n\n if isinstance(pipeline, rf_pipelines.pipeline_object):\n #\n # This is sort of a hack, but we replace the pipeline_object by its jsonization, and use\n # json data structures throughout this routine instead of pipeline_objects. This is because\n # the container classes in rf_pipelines (e.g. rf_pipelines.pipeline) do not currently define\n # a python API for retreiving their contents, so the only way to \"see\" inside is by jsonizing.\n #\n # FIXME: when the rf_pipelines python API is more developed, it should be possible to remove\n # this hack. (One disadvantage of the hack is that all transforms in the pipeline must define\n # jsonize().)\n\n pipeline = pipeline.jsonize() # fall through...\n\n \n if isinstance(pipeline, list):\n ret = None\n count = 0\n \n for p in pipeline:\n t = self._analyze_pipeline(p)\n if t is not None:\n ret = t\n count += 1\n\n if count > 1:\n raise RuntimeError(\"frb_olympics.rf_pipelines_dedisperser.__init__: pipeline defines multiple dedispersers?!\")\n \n return ret\n\n assert isinstance(pipeline, dict)\n assert pipeline.has_key('class_name')\n\n if pipeline['class_name'] == 'bonsai_dedisperser_python':\n if not pipeline['track_global_max']:\n raise RuntimeError(\"rf_pipelines_dedisperser.__init__: 'track_global_max' flag is not set\"\n + \" (this may mean that frb_olympics.rf_pipelines_dedisperser._analyze_pipeline() is out of date)\") \n return pipeline['use_analytic_normalization']\n\n if pipeline['class_name'] == 'bonsai_dedisperser_cpp':\n raise RuntimeError(\"rf_pipelines_dedisperser.__init__: pipeline contains a bonsai_dedisperser_cpp, not a bonsai_dedisperser_python\"\n + \" (currently, rf_pipelines defines two bonsai dedisperser classes, and only the python class will work in frb_olympics)\")\n\n if pipeline['class_name'] == 'pipeline':\n return self._analyze_pipeline(pipeline['elements'])\n\n if pipeline['class_name'] == 'wi_sub_pipeline':\n return self._analyze_pipeline(pipeline['sub_pipeline'])\n\n good_class_names = [ 'badchannel_mask',\n 'intensity_clipper',\n 'mask_expander',\n 'mask_filler',\n 'noise_filler',\n 'pipeline_fork',\n 'polynomial_detrender',\n 'spline_detrender',\n 'std_dev_clipper' ]\n \n if pipeline['class_name'] not in good_class_names:\n print >>sys.stderr, \"frb_olympics.rf_pipelines_dedisperser.__init__: unrecognized pipeline_object class '%s' in pipeline\" % pipeline['class_name']\n print >>sys.stderr, \" (This may mean that frb_olympics.rf_pipelines_dedisperser._analyze_pipeline() is out of date)\"\n\n return None", "title": "" }, { "docid": "0d6171e2f63832c9c3e5196d710a4286", "score": "0.47081313", "text": "def make_pipeline():\n open = Latest(inputs=[USEquityPricing.open], window_length=1)\n close = Latest(inputs=[USEquityPricing.close], window_length=1)\n \n pipe = Pipeline(\n columns = {\n 'close': close,\n 'open': open\n }\n )\n return pipe", "title": "" }, { "docid": "df870d6d43e21dbf2d715613d3fa0888", "score": "0.47020486", "text": "def single_contribution():\n pipeline = [{\"$group\":{\"_id\":\"$created.user\",\"count\":{\"$sum\":1}}},\n {\"$group\":{\"_id\":\"$count\",\"number\":{\"$sum\":1}}},{\"$sort\":{\"_id\": 1}},\n {\"$limit\":1}]\n return pipeline", "title": "" }, { "docid": "bf15807a638e797e09cd55bf5cb22526", "score": "0.47004664", "text": "def aggregation(self, value):\n self._set_property('aggregation', value)", "title": "" }, { "docid": "d09de483dbb4dcd98dbcc8998189d8ab", "score": "0.46830407", "text": "def stages(self) -> Optional[Sequence['outputs.PlanStage']]:\n return pulumi.get(self, \"stages\")", "title": "" }, { "docid": "b51b5b34cc5accec5250b486145a1446", "score": "0.46444207", "text": "def stage(self) -> str:\n if self._stage is None:\n self._stage = get_engineering_step_stage(self.params)\n return self._stage", "title": "" }, { "docid": "13f7c76370e3c6a7ed2e4bc2b44acb76", "score": "0.4642814", "text": "def etl_scanr(mongo, self):\n self.create_task()\n coll = mongo[MAIN]\n coll.aggregate(publications_pipeline, allowDiskUse=True)\n return {\"ok\": 1}", "title": "" }, { "docid": "0e3bca589b4c869fcddaa7edf00d20b4", "score": "0.46421903", "text": "def get_pipeline_group(self, group_name):\n return self.find('pipelines[@group=\"%s\"]' % group_name)", "title": "" }, { "docid": "b63ed6a80dca7745106fbe037c06cdb0", "score": "0.4629811", "text": "def run(self):\n #Create the pipeline\n self.pipeline = self.set_pipeline()\n \n #Fit the pipeline\n self.pipeline.fit(self.X, self.y)\n return self.pipeline", "title": "" }, { "docid": "81e06df9a004251cc454cc717cdf2767", "score": "0.4628421", "text": "async def fetch_documents_all(pipeline: list) -> list:\n\n data = []\n async for doc in collection.aggregate(pipeline):\n data.append(doc)\n return data", "title": "" }, { "docid": "249e7d7a6fc452b8ce11b74de8019ae9", "score": "0.4609823", "text": "def Aggregated(cls):\n return cls.get_or_insert('0',\n inn='0',\n kpp='0',\n organization_name='Aggregated')", "title": "" }, { "docid": "89add16d1cab462b62b73d58ad44b7e5", "score": "0.4605912", "text": "def get_pipeline_parent_group(self, pipeline_name):\n return self.find('*/pipeline[@name=\"%s\"]/..' % pipeline_name)", "title": "" }, { "docid": "b91412f545f1141a385f0a48a3149d93", "score": "0.4603258", "text": "def aggregate_collection(\n self, params: ParamsOfAggregateCollection\n ) -> ResultOfAggregateCollection:\n return self.request(method='net.aggregate_collection', **params.dict)", "title": "" }, { "docid": "aeddc646b2ea19e2591b84eeb8a35fdc", "score": "0.45888844", "text": "def pipeline(self, pipeline_id):\n return Pipeline(pipeline_id, self._project, self._intf)", "title": "" }, { "docid": "8c4f69f98ce59de8f68437dd12d9eea2", "score": "0.45845154", "text": "def _create_aggregation(self, aggregation_list: List[str]) -> AST:\n field = aggregation_list[0]\n if len(aggregation_list) > 1:\n return Aggs(Variable(field, Terms(field), self._create_aggregation(aggregation_list[1:])))\n else:\n return Aggs(Variable(field, Terms(field)))", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "aa0348a2b7dcf0138ed894e142f31baa", "score": "0.45723557", "text": "def stage(self) -> str:\n return pulumi.get(self, \"stage\")", "title": "" }, { "docid": "a1d9fa6e99ce561631e2f6c8cf7eb9df", "score": "0.45674387", "text": "def get_pipeline(self, pipeline_id):\n endpoint = f\"pipeline/{pipeline_id}\"\n resp = self._request(GET, endpoint, api_version=API_VER_V2)\n return resp", "title": "" }, { "docid": "8d1697bc307c9994ba973dc80861a8a7", "score": "0.45335072", "text": "def is_aggregation_module(self):\n return (\n self.save_image_or_figure == IF_MOVIE or self.when_to_save == WS_LAST_CYCLE\n )", "title": "" }, { "docid": "5b2d063abc85861b7194af32d0498a56", "score": "0.45328882", "text": "def _get_pipeline_name(self, ctx):\n pipeline_type = self._store.get_context_type('pipeline')\n # The selected execution is arbitrary as all have an association with\n # 'pipeline' context\n execution = self._store.get_executions_by_context(ctx.id)[0]\n [pipeline_ctx] = [\n ctx for ctx in self._store.get_contexts_by_execution(execution.id)\n if ctx.type_id == pipeline_type.id\n ]\n return pipeline_ctx.name", "title": "" }, { "docid": "26bbafdd46ff3f23eb9388c914b6caa6", "score": "0.45152536", "text": "def to_pipelines(self) -> ParallelPipelines:\n\n pipeline = [] # type: MultiStagePipeline\n if getattr(self, \"cache_inputs\", False): # TODO: formalize this contract\n pipeline.append(Stage(self.cache_input, list(self.iter_inputs())))\n pipeline.append(Stage(self.prepare_target))\n pipeline.append(Stage(self.store_chunk, list(self.iter_chunks())))\n pipeline.append(Stage(self.finalize_target))\n pipelines = [] # type: ParallelPipelines\n pipelines.append(pipeline)\n return pipelines", "title": "" }, { "docid": "7185d5ac74de780d7a161a5068614d4e", "score": "0.45035845", "text": "def build_pipeline(\n result_dir: str,\n word_counter: Counter,\n tag_counter: Counter\n):\n\n pipeline = Pipeline[MultiPack]()\n pipeline.resource.update(word_counter=word_counter)\n pipeline.resource.update(tag_counter=tag_counter)\n pipeline.set_reader(IUXrayReportReader())\n pipeline.add(MultiPackBoxer())\n pipeline.add(PackNameJsonPackWriter(),\n {'indent': 2, 'output_dir': result_dir, 'overwrite': True},\n NameMatchSelector(select_name='default'))\n pipeline.initialize()\n\n return pipeline", "title": "" }, { "docid": "6774a4510929524ab16a3e3f39fb4d11", "score": "0.45035416", "text": "def instantiate_pipeline(self) -> Pipeline:\n return recursive_instantiate(self)", "title": "" }, { "docid": "3125fc2d183aa6b8327e488db2a64805", "score": "0.45023102", "text": "def serial_pipeline(self) -> 'outputs.SerialPipelineResponse':\n return pulumi.get(self, \"serial_pipeline\")", "title": "" }, { "docid": "6f46bc990ca4e724f7a6e21ab7922223", "score": "0.44831812", "text": "def make_pipeline(pipeline_name=None, datetime_encoding=True, categorical_interactions=True, categorical_encode_mode=\"target_mean\",\n imputation_tracking=True, categorical_tracking=True, outlier_clipping_mode=\"percentile\", outlier_tracking=True, outlier_percentile_lower=0.01, outlier_percentile_upper=0.99, \n kmeans_encoding=True,kmeans_encode_mode=\"distance_to_center\", numeric_transform_mode=\"sqrt\", numeric_transform_cutoff=7, seed=1234):\n \n pl = {\n 'pipeline_name':pipeline_name,\n 'datetime_encoding':datetime_encoding,\n 'imputation_tracking':imputation_tracking,\n 'categorical_interactions':categorical_interactions,\n 'categorical_encode_mode':categorical_encode_mode,\n 'categorical_tracking':categorical_tracking,\n 'outlier_clipping_mode':outlier_clipping_mode,\n 'outlier_percentile_lower':outlier_percentile_lower,\n 'outlier_percentile_upper':outlier_percentile_upper,\n 'outlier_tracking':outlier_tracking,\n 'kmeans_encoding':kmeans_encoding,\n 'kmeans_encode_mode':kmeans_encode_mode,\n 'numeric_transform_mode':numeric_transform_mode,\n 'numeric_transform_cutoff':numeric_transform_cutoff,\n 'seed':seed\n}\n \n return pl", "title": "" }, { "docid": "a244c151c130bfc3ff87d13b48e2e997", "score": "0.44823244", "text": "def get_executable(cls):\n return cls.pipeline_stages[cls.name][1]", "title": "" }, { "docid": "6f318f4d0d9da07abe309fe5b1424f56", "score": "0.4466977", "text": "def run_pipeline(self):\n run = self._client.run_pipeline(self._experiment.id, self._experiment_id,\n f'{self._experiment_id}.yaml')\n\n return run.id", "title": "" }, { "docid": "d86e4c1831c46cd3b5d05c1a9700606a", "score": "0.44605768", "text": "def describe_pipeline(pipelineName=None):\n pass", "title": "" }, { "docid": "0c8ef9cc3acc8ef3d28fd393a8c1372e", "score": "0.44585907", "text": "def stage_id(self):\n return self._stage_id", "title": "" }, { "docid": "87c6d8bdd56c821acf463845ae26caf0", "score": "0.44510338", "text": "def answer_aggregation_type(self) -> str:\n return pulumi.get(self, \"answer_aggregation_type\")", "title": "" }, { "docid": "937ce00c2029761c8b274b9e2e044487", "score": "0.44487044", "text": "def get_model_aggregation(model, filter_kwargs, aggregator_function=Count, aggregated_field='id'):\n queryset = model.objects.filter(**filter_kwargs)\n result = queryset.aggregate(aggregator_function(aggregated_field))\n result_key = '{}__{}'.format(aggregated_field, aggregator_function.__name__.lower())\n return result[result_key] or 0", "title": "" }, { "docid": "777d6a4636ee0322908d35124e3ffb9d", "score": "0.44369823", "text": "def stages(self) -> List[Stage]:\n return self.__stages", "title": "" }, { "docid": "823479c2d3d6402855b498b2b17789cb", "score": "0.44256976", "text": "def group_by(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"group_by\")", "title": "" }, { "docid": "53d6d03e26b35531abb2d370e0dadf60", "score": "0.4424465", "text": "def _get_local_aggregates(self):\n return self.__local_aggregates", "title": "" }, { "docid": "c7332294dde360b81f415c8362a47cd8", "score": "0.4423285", "text": "def group(self):\n return self.tr('Aggregate')", "title": "" }, { "docid": "b30bb3976d08deaa12fbc7f9d09ddec8", "score": "0.44067174", "text": "def aggregate(self, *aggs: AggregateFn) -> Dataset[U]:\n\n def do_agg(blocks, clear_input_blocks: bool, block_udf):\n # TODO: implement clear_input_blocks\n stage_info = {}\n if len(aggs) == 0:\n raise ValueError(\"Aggregate requires at least one aggregation\")\n for agg in aggs:\n agg._validate(self._dataset)\n # Handle empty dataset.\n if blocks.initial_num_blocks() == 0:\n return blocks, stage_info\n\n num_mappers = blocks.initial_num_blocks()\n num_reducers = num_mappers\n if self._key is None:\n num_reducers = 1\n boundaries = []\n else:\n boundaries = sort.sample_boundaries(\n blocks.get_blocks(),\n [(self._key, \"ascending\")]\n if isinstance(self._key, str)\n else self._key,\n num_reducers,\n )\n\n partition_and_combine_block = cached_remote_fn(\n _partition_and_combine_block\n ).options(num_returns=num_reducers + 1)\n aggregate_combined_blocks = cached_remote_fn(\n _aggregate_combined_blocks, num_returns=2\n )\n\n map_results = np.empty((num_mappers, num_reducers), dtype=object)\n map_meta = []\n for i, block in enumerate(blocks.get_blocks()):\n results = partition_and_combine_block.remote(\n block, boundaries, self._key, aggs\n )\n map_results[i, :] = results[:-1]\n map_meta.append(results[-1])\n map_bar = ProgressBar(\"GroupBy Map\", len(map_results))\n map_bar.block_until_complete(map_meta)\n stage_info[\"map\"] = ray.get(map_meta)\n map_bar.close()\n\n blocks = []\n metadata = []\n for j in range(num_reducers):\n block, meta = aggregate_combined_blocks.remote(\n num_reducers, self._key, aggs, *map_results[:, j].tolist()\n )\n blocks.append(block)\n metadata.append(meta)\n reduce_bar = ProgressBar(\"GroupBy Reduce\", len(blocks))\n reduce_bar.block_until_complete(blocks)\n reduce_bar.close()\n\n metadata = ray.get(metadata)\n stage_info[\"reduce\"] = metadata\n return BlockList(blocks, metadata), stage_info\n\n plan = self._dataset._plan.with_stage(AllToAllStage(\"aggregate\", None, do_agg))\n return Dataset(\n plan,\n self._dataset._epoch,\n self._dataset._lazy,\n )", "title": "" }, { "docid": "7187ff8de238bad9ea3ee9e1da405698", "score": "0.44031197", "text": "def get_pipeline_definition(pipeline_config: Dict[str, Any], pipeline_name: Optional[str] = None) -> Dict[str, Any]:\n if pipeline_name is None:\n if len(pipeline_config[\"pipelines\"]) != 1:\n raise PipelineConfigError(\"The YAML contains multiple pipelines. Please specify the pipeline name to load.\")\n return pipeline_config[\"pipelines\"][0]\n\n matching_pipelines = [p for p in pipeline_config[\"pipelines\"] if p[\"name\"] == pipeline_name]\n\n if len(matching_pipelines) == 1:\n return matching_pipelines[0]\n\n if not matching_pipelines:\n raise PipelineConfigError(\n f\"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file. \"\n f\"Existing pipelines: {[p['name'] for p in pipeline_config['pipelines']]}\"\n )\n raise PipelineConfigError(\n f\"There's more than one pipeline called '{pipeline_name}' in the YAML file. \"\n \"Please give the two pipelines different names.\"\n )", "title": "" }, { "docid": "b888a8364ae77c6038afda655ade1254", "score": "0.4402986", "text": "def aggregate(self):\n data = self.get_buffer()\n # do metric reduction\n f, _ = do_metric_reduction(data, self.reduction)\n return f", "title": "" }, { "docid": "b21d054746422b5049b2159d012a4f63", "score": "0.43984336", "text": "def get(self, pipeline_name=None):\n pipeline_name = self._require_param('pipeline_name', locals())\n\n response = self._session.get(\n path=self._session.urljoin(self.RESOURCE_PATH, pipeline_name).format(base_api=self.base_api),\n headers={'Accept': self._accept_header()},\n )\n\n etag = response.headers['ETag']\n return PipelineConfig(session=self._session, data=response.json(), etag=etag)", "title": "" }, { "docid": "dabfd545233e5cefa525e4b966f916e0", "score": "0.43952706", "text": "def pipeline(self, transaction=True):\n return Pipeline(\n self.connection,\n transaction,\n self.encoding,\n self.errors\n )", "title": "" }, { "docid": "b0c715c08cca55bb11a2065f936d14a3", "score": "0.43945", "text": "def load_pipeline(name):\r\n pipeline = _load(name, get_pipelines_paths())\r\n if pipeline is None:\r\n raise ValueError(\"Unknown pipeline: {}\".format(name))\r\n\r\n return pipeline", "title": "" }, { "docid": "fc13d1172cb5ae75c1ac7e6997c183c5", "score": "0.43939468", "text": "def set_pipeline_stage(stage):\n from paddle.fluid.framework import _set_pipeline_stage\n _static_mode_check()\n _set_pipeline_stage(stage)", "title": "" }, { "docid": "d8d19d322cc519a7013291131f495fc5", "score": "0.43890166", "text": "def getPipeRC(num_features):\n from skopt.space import Real\n rc = RidgeClassifier(solver='auto')\n search_space = {\n 'ridgeclassifier__alpha': Real(MIN_SEARCH, 1.0, prior=\"uniform\")\n }\n return (Pipeline([('ss', StandardScaler()),\n ('ridgeclassifier', rc)]), search_space)", "title": "" }, { "docid": "4edc4168f49404d1d9eaa4ff8b53b1c4", "score": "0.4375685", "text": "def _read_group_stage_ids(self, stages, domain, order):\n stage_ids = stages._search([], order=order, access_rights_uid=SUPERUSER_ID)\n return stages.browse(stage_ids)", "title": "" }, { "docid": "4d5a507c5d67134a3c6cf3833efc8b41", "score": "0.43720534", "text": "def staging_resource_group(self) -> Optional[str]:\n return pulumi.get(self, \"staging_resource_group\")", "title": "" }, { "docid": "d1a73109317e64be24f9268582965332", "score": "0.4357305", "text": "def get_sdk_ave_projection(ophys_experiment_id):\n session = get_sdk_session_obj(ophys_experiment_id)\n ave_projection = session.average_projection\n return ave_projection", "title": "" }, { "docid": "aa3247e3aed8c2401d79b47332314159", "score": "0.43572482", "text": "def group_by_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"group_by_fields\")", "title": "" }, { "docid": "10d77a01e56c75b5664166777ee436db", "score": "0.43571642", "text": "def create_aggregate(self, name, availability_zone=None):\n data = _adapter._json_response(\n self.compute.post(\n '/os-aggregates',\n json={'aggregate': {\n 'name': name,\n 'availability_zone': availability_zone\n }}),\n error_message=\"Unable to create host aggregate {name}\".format(\n name=name))\n return self._get_and_munchify('aggregate', data)", "title": "" }, { "docid": "712380774e999c9c1fa879a25d924738", "score": "0.43552676", "text": "def aggregate(loss, weights=None, mode='mean'):\n return lasagne.objectives.aggregate(loss, weights, mode)", "title": "" }, { "docid": "b87e225f10069a6f860b49abc47dd595", "score": "0.43489623", "text": "def _to_java(self):\n\n gateway = SparkContext._gateway\n cls = SparkContext._jvm.org.apache.spark.ml.PipelineStage\n java_stages = gateway.new_array(cls, len(self.getStages()))\n for idx, stage in enumerate(self.getStages()):\n java_stages[idx] = stage._to_java()\n\n _java_obj = JavaParams._new_java_obj(\"org.apache.spark.ml.Pipeline\", self.uid)\n _java_obj.setStages(java_stages)\n\n return _java_obj", "title": "" }, { "docid": "c184366b0560684bfdf8adc4d364af66", "score": "0.4344431", "text": "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "title": "" } ]
d6988d323edac3789be94a660f15ea1f
This method returns the name of the customer
[ { "docid": "bd3f80d095c5204b24e4f670f127c793", "score": "0.9271176", "text": "def get_customer_name(self):\n\t\treturn\tself.name", "title": "" } ]
[ { "docid": "527a7f56285a511d1e07ad302fc9925e", "score": "0.86061275", "text": "def get_customer(self) -> str:\n return self.customer", "title": "" }, { "docid": "caefd9628fac406b27c6bc3d54168670", "score": "0.8200847", "text": "def customer(self):\n return self.details['customer']", "title": "" }, { "docid": "b525fb4fccdb6a578beccac751bdf629", "score": "0.7643868", "text": "def get_customer(self):\n return self._customer", "title": "" }, { "docid": "b525fb4fccdb6a578beccac751bdf629", "score": "0.7643868", "text": "def get_customer(self):\n return self._customer", "title": "" }, { "docid": "3ad46edfde5ad3baa5c77e5677e98284", "score": "0.7589842", "text": "def customer(self):\n return self.__customer", "title": "" }, { "docid": "06fb9739d8b9f26fc514d6ccecb1f9dc", "score": "0.70610523", "text": "def get_order_customer(self):\n return self.__customer", "title": "" }, { "docid": "edd003ff9da364da6611f0954df9fb1e", "score": "0.7041765", "text": "def get_customer():\n return get_option(\"customer\")", "title": "" }, { "docid": "c4dd45019ef762acceb6c9617ac5be35", "score": "0.68555695", "text": "def get_customer(self, name=''):\n if name:\n if self.database_dict.get(name):\n ans_dict = self.database_dict[name]\n else:\n ans_dict = {'message': 'Customer not found'}\n else:\n ans_dict = {'message': 'Please provide Customer name'}\n return ans_dict", "title": "" }, { "docid": "2af6d7f548bc69d3a763407f98b27192", "score": "0.68540287", "text": "def customer(self):\n url = self._get_link(\"customer\")\n if url:\n resp = self.client.customers.perform_api_call(self.client.customers.REST_READ, url)\n return Customer(resp)", "title": "" }, { "docid": "00abbd1c0aad24b33e1484a5ff8f7da6", "score": "0.68480265", "text": "def customer_number(self):\n return self._customer_number", "title": "" }, { "docid": "89b926cbcd128f4def6dec93d205dc42", "score": "0.6764194", "text": "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(1)[0]\n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id),\n content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"last_name\"], test_customer.last_name)", "title": "" }, { "docid": "026ff99ea1c9b9a31a655d01dacbf93a", "score": "0.6762085", "text": "def _get_customer_info(self, username):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"SELECT name, contact, bankInformation, address \n FROM Customers\n WHERE username = %s\"\"\", (username,))\n return cursor.fetchone()", "title": "" }, { "docid": "018214575200b8d10010c5e2be56c201", "score": "0.67459863", "text": "def get_name(self):\n return f\"{super(Contractor, self).get_name()} [C]\"", "title": "" }, { "docid": "0ae6cdbcd2a490580f02fca401273d36", "score": "0.6701015", "text": "def __repr__(self):\n return \"Customer ID: \" + str(self._customer_id) + '\\n' \\\n \"Name: \" + self._last_name + \" \" + self._first_name + '\\n' \\\n \"Phone: \" + self._phone_number + '\\n' \\\n \\\n # Methods", "title": "" }, { "docid": "f681c34785f928305313c7dcb0331917", "score": "0.6668319", "text": "def get_customer(self, obj):\n\n request = self.context.get('request')\n if request:\n embed_fields = request.query_params.get('embed_fields')\n if embed_fields and 'customer' in embed_fields:\n return CustomerSerializer(instance=obj.customer).data\n return obj.customer_id", "title": "" }, { "docid": "863f252509aa24149d79a3b34e4ba88d", "score": "0.6648916", "text": "def customer_id(self):\n return self._customer_id", "title": "" }, { "docid": "f7a822c7288808c7de2fba75467a1ecc", "score": "0.6645675", "text": "def getCurrentCustomer(user):\n topTaskEntry = getCurrentTaskEntry(user)\n if topTaskEntry != None:\n return topTaskEntry.customer\n else:\n return None", "title": "" }, { "docid": "fa5e03e07ca448d5a58c60a9e101ab14", "score": "0.6635037", "text": "def _PrintCustomerId(user):\n print 'CustomerId for %s:' % GetFieldFromUser(user, 'primaryEmail').split(\n '@')[1]\n print GetFieldFromUser(user, 'customerId')", "title": "" }, { "docid": "be7f3bc229da81338f9715c4121b8399", "score": "0.6539528", "text": "def get_name(self, obj):\n if obj.type_client == 'n':\n return obj.first_name + ' ' + obj.last_name\n else:\n return obj.agent_firstname + ' ' + obj.agent_lastname", "title": "" }, { "docid": "4adf626c3dc3ac6151d299178a506588", "score": "0.6469173", "text": "def customer_name(self, customer_name):\n\n self._customer_name = customer_name", "title": "" }, { "docid": "121c75bdcee7bfaf5d6053b2d8733f90", "score": "0.6457976", "text": "def customer_path(account: str,customer: str,) -> str:\n return \"accounts/{account}/customers/{customer}\".format(account=account, customer=customer, )", "title": "" }, { "docid": "a96f727275aa7d96a76266fb07e432d5", "score": "0.6444786", "text": "def __str__(self):\n return {self.id},({self.customer.name})", "title": "" }, { "docid": "18c68d7eeece4beef1099e6ae3830985", "score": "0.64333737", "text": "def customer(self) -> Customer:\n if not self._customer:\n if not self._customer_id:\n self._logger.error(\"No customer ID\")\n return None\n self._customer = Customer.get_by_global_customer_id(self._customer_id)\n return self._customer", "title": "" }, { "docid": "c8d541bf847dbf25060def65431e3b11", "score": "0.6421696", "text": "def get_name(self):\n if self.display_name:\n return self.display_name\n return self.key.string_id() # Email address", "title": "" }, { "docid": "5c1714a3c8767e0553c07448f8b17e3d", "score": "0.6419416", "text": "async def get_customer(self):\n from .customer import Customer\n\n url = self._get_link(\"customer\")\n if url:\n resp = await self._resource.perform_api_call(self._resource.REST_READ, url)\n return Customer(resp)", "title": "" }, { "docid": "1696d53eae506c9183b6404aaef2b438", "score": "0.64147395", "text": "def company_name(self):\n return self.__company_name", "title": "" }, { "docid": "1696d53eae506c9183b6404aaef2b438", "score": "0.64147395", "text": "def company_name(self):\n return self.__company_name", "title": "" }, { "docid": "334883f3fe3d93d113219ccac67aed1d", "score": "0.64114714", "text": "def customer(self) -> Customer:\n if not self._customer_id:\n return None\n return Customer.get_by_global_customer_id(self._customer_id)", "title": "" }, { "docid": "9ad53258fbcb81f25403139a959e1ab4", "score": "0.63986295", "text": "def test_customer_name(self):\n cust_john = Customer.objects.get(contact_num='12345678999')\n cust_milo = Customer.objects.get(contact_num='12345678888')\n self.assertEqual(\n cust_john.get_cust_name(), \"John\")\n self.assertEqual(\n cust_milo.get_cust_name(), \"Milo\")", "title": "" }, { "docid": "f66c427780b684db9a93daf6a640a455", "score": "0.6372805", "text": "def get_company_name(self):\n return self.company_name", "title": "" }, { "docid": "f66c427780b684db9a93daf6a640a455", "score": "0.6372805", "text": "def get_company_name(self):\n return self.company_name", "title": "" }, { "docid": "7f99a6bfb0122a3e0b2c3ba30976fbac", "score": "0.6340517", "text": "def name(self):\r\n return self._cname", "title": "" }, { "docid": "ab6c26791a4e61738992de74c0688e7a", "score": "0.6318771", "text": "def __str__(self):\n return (\"ID: %s, Name: %s %s, Phone #: %s, Address: %s\" % (self.customer_id, self.first_name, self.last_name,\n self.phone_number, self.address))", "title": "" }, { "docid": "caf1c0852216f33c3a79b06be4558e5c", "score": "0.628578", "text": "def get_name(self):\n return self._get_user_ctx()['name']", "title": "" }, { "docid": "e5e63986fc3188dbafe7b232036ac5ac", "score": "0.6278796", "text": "def customer_tenant_id(self) -> str:\n return pulumi.get(self, \"customer_tenant_id\")", "title": "" }, { "docid": "ffebbe9a1d068580e691f71b052a47cd", "score": "0.6269567", "text": "def GetCustomer(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "142a98781039fde10d56903b86301f91", "score": "0.62653387", "text": "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "142a98781039fde10d56903b86301f91", "score": "0.62653387", "text": "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "142a98781039fde10d56903b86301f91", "score": "0.62653387", "text": "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "852defd2137e60cdc143cb0453dd6407", "score": "0.6264931", "text": "def customer_path(customer_id: str,) -> str:\n return \"customers/{customer_id}\".format(customer_id=customer_id,)", "title": "" }, { "docid": "7e3812789161d08d3a33ad677a6640a7", "score": "0.6232928", "text": "def user_to_customer(user):\n\tuser = frappe.get_doc('User', user)\n\tis_customer = False\n\tfor role in user.roles:\n\t\tif role.role == 'Customer':\n\t\t\tis_customer = True\n\tif not is_customer:\n\t\treturn None\n\tcontacts = frappe.get_all('Contact',\n\t\tfilters={\n\t\t\t\"user\": user.name\n\t\t},\n\t\tfields=['name'])\n\n\tfor contact in contacts:\n\t\tcontact = frappe.get_doc('Contact', contact['name'])\n\t\tfor link in contact.links:\n\t\t\tif link.link_doctype == 'Customer':\n\t\t\t\treturn link.link_name\n\treturn None", "title": "" }, { "docid": "dbf12ab4d72af5554b844a96d08289a7", "score": "0.6232644", "text": "def display(self):\n if isinstance(self._customer_id, int):\n return self.__repr__()\n else:\n raise AttributeError(\"'Customer' object has no attribute 'cid'\")", "title": "" }, { "docid": "2d93d89aa06c66f336b5b34bd84ec1b2", "score": "0.62202984", "text": "def get_name() -> str:", "title": "" }, { "docid": "5f990704e50658e909560e740dbac023", "score": "0.6195171", "text": "def getCompanyName(self):\n\t\tif self.companyXML != None:\n\t\t\tcompData = self.companyXML.findBranch(\"Company_Name\")\n\t\t\treturn (compData[0].get(\"Name\"))", "title": "" }, { "docid": "3fddbf43a58d441cb53f01a3ab5f4121", "score": "0.61788684", "text": "def test_get_customer(self):\n pass", "title": "" }, { "docid": "3fddbf43a58d441cb53f01a3ab5f4121", "score": "0.61788684", "text": "def test_get_customer(self):\n pass", "title": "" }, { "docid": "a89df961eae67595f417fb44995ba81b", "score": "0.6172538", "text": "def get_company_name(self):\n\n return self.company_name", "title": "" }, { "docid": "a89df961eae67595f417fb44995ba81b", "score": "0.6172538", "text": "def get_company_name(self):\n\n return self.company_name", "title": "" }, { "docid": "b171329761a1b04ae955e3c2f232712d", "score": "0.61603886", "text": "def customer_email_address(self):\n return self._customer_email_address", "title": "" }, { "docid": "3a63ae40b13aa5e452172a4387166e33", "score": "0.61245424", "text": "def name(self):\n return \"{} {}\".format(self.client_name, self._name)", "title": "" }, { "docid": "91f04023677268ca23d1250f92fae3b0", "score": "0.61183816", "text": "def name(self):\n return '{} {}'.format(self.client_name, self._name)", "title": "" }, { "docid": "91f04023677268ca23d1250f92fae3b0", "score": "0.61183816", "text": "def name(self):\n return '{} {}'.format(self.client_name, self._name)", "title": "" }, { "docid": "6eca4c755ae56dded7b062f7b9b716b3", "score": "0.6115269", "text": "def name(self):\n return self.firstname + ' ' + self.lastname", "title": "" }, { "docid": "dc445b0771c9728aebcda7249f149a37", "score": "0.6107857", "text": "def customer(request):\n msg = \"The request object does not contain a customer. Edit your MIDDLEWARE_CLASSES setting to insert 'shop.middlerware.CustomerMiddleware'.\"\n assert hasattr(request, 'customer'), msg\n\n customer = request.customer\n if request.user.is_staff:\n try:\n customer = CustomerModel.objects.get(pk=request.session['emulate_user_id'])\n except CustomerModel.DoesNotExist:\n customer = VisitingCustomer()\n except (AttributeError, KeyError):\n pass\n return {'customer': customer}", "title": "" }, { "docid": "965aa5f7f062491bbce8099b39a41c48", "score": "0.61053854", "text": "def name(self):\n return f\"{self.client_name} {self._name}\"", "title": "" }, { "docid": "965aa5f7f062491bbce8099b39a41c48", "score": "0.61053854", "text": "def name(self):\n return f\"{self.client_name} {self._name}\"", "title": "" }, { "docid": "965aa5f7f062491bbce8099b39a41c48", "score": "0.61053854", "text": "def name(self):\n return f\"{self.client_name} {self._name}\"", "title": "" }, { "docid": "7c44da13396ee1d3100300b9852c7906", "score": "0.6103489", "text": "def __str__(self):\n return {self.customername}, {self.trainer_name}", "title": "" }, { "docid": "a0e058cdbc843c363a5a623de5285811", "score": "0.60981417", "text": "def account_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "a0e058cdbc843c363a5a623de5285811", "score": "0.60981417", "text": "def account_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "a0e058cdbc843c363a5a623de5285811", "score": "0.60981417", "text": "def account_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_name\")", "title": "" }, { "docid": "42561ff9118cfc800139078e2d1f44d2", "score": "0.60979307", "text": "def getName(self):\n return self._my_name", "title": "" }, { "docid": "b042f5bc331478168e89ca0b9cbf8e41", "score": "0.6091423", "text": "def getName(self):\n return self.country", "title": "" }, { "docid": "98c5c6d07326ec0d6f0caf7f0c41ee17", "score": "0.6085268", "text": "def cname(self) -> Optional[str]:\n return pulumi.get(self, \"cname\")", "title": "" }, { "docid": "65e31770a2eeb3c36ee64cd0252d7733", "score": "0.60776424", "text": "def get_name(self):\n\n return self.name", "title": "" }, { "docid": "3027cc037ecfcbae823f0a7b21cef791", "score": "0.6075063", "text": "def get_name(self):\n\t\tstudent_name = self.person.full_name()\n\t\treturn student_name", "title": "" }, { "docid": "a6d6e378f2f72ca9b57fe58b4ea8b905", "score": "0.60726786", "text": "def name(self) -> str:\n if self.last_name:\n if self.middle_initial:\n return ' '.join((self.first_name, self.middle_initial, self.last_name)).strip().upper()\n return ' '.join((self.first_name, self.last_name)).strip().upper()\n return self.business_name.strip().upper()", "title": "" }, { "docid": "09fd373809dedd0d3dea3c5ce601304b", "score": "0.6069796", "text": "def get_full_name(self):\n return self.email", "title": "" }, { "docid": "546402459450316f952fc300efdfd597", "score": "0.6069775", "text": "def get_name(self): \r\n return self.name", "title": "" }, { "docid": "dd4a91fbf9fe77d601eecf9b4a94ca11", "score": "0.60660183", "text": "def get_name(self):\n return self._get_thermostat_key(\"name\")", "title": "" }, { "docid": "797565adb2fc62de11aacf35124ae1c3", "score": "0.60610807", "text": "def name(self):\n return f\"{self._client_name} {self._name}\"", "title": "" }, { "docid": "f17a6126131ef1ef2a2f881ffc513b75", "score": "0.60541105", "text": "def get_the_last_buyer_name():\n\n return crm.get_name_by_id(get_the_last_buyer_id())", "title": "" }, { "docid": "c011e41a9b5ff98eae47d09843016c12", "score": "0.6051878", "text": "def client_name(self):\r\n if 'clientName' in self._summary['subclient']:\r\n return self._summary['subclient']['clientName']", "title": "" }, { "docid": "ea98aa1952ab03fc056f4f7ad3202770", "score": "0.60518694", "text": "def __repr__(self):\n return (\"ID: %s, Name: %s %s, Phone #: %s, Address: %s\" % (self.customer_id, self.first_name, self.last_name,\n self.phone_number, self.address))", "title": "" }, { "docid": "50f89e3ef6638e87790f70305a3b8dea", "score": "0.604981", "text": "def tenant_name(self) -> str:\n return pulumi.get(self, \"tenant_name\")", "title": "" }, { "docid": "0dc4feaee4a474777bf7fccd4e0166a6", "score": "0.6040822", "text": "def retrieve_customer(self, callback, **params):\n self._require(params, ['id'])\n return self._call(callback, 'retrieve_customer', params)", "title": "" }, { "docid": "929bc8d8fcf97bb7b430182311eafec8", "score": "0.6040141", "text": "def name(self):\r\n return self._properties['userEntity']['userName']", "title": "" }, { "docid": "6ee0eb7a34c7bcdbd5cc26334ee7a7e7", "score": "0.60302126", "text": "def get_name(self):\r\n return self.name", "title": "" }, { "docid": "6ee0eb7a34c7bcdbd5cc26334ee7a7e7", "score": "0.60302126", "text": "def get_name(self):\r\n return self.name", "title": "" }, { "docid": "6ee0eb7a34c7bcdbd5cc26334ee7a7e7", "score": "0.60302126", "text": "def get_name(self):\r\n return self.name", "title": "" }, { "docid": "6ee0eb7a34c7bcdbd5cc26334ee7a7e7", "score": "0.60302126", "text": "def get_name(self):\r\n return self.name", "title": "" }, { "docid": "451fd7311516ebde5d40bde5df61d11e", "score": "0.60301346", "text": "def get_short_name(self): \n return self.email", "title": "" }, { "docid": "cb8a737fa1f2e869fd8fc15077114390", "score": "0.6027911", "text": "def test_get_customer_0(self):\n pass", "title": "" }, { "docid": "5e826adf4a2846b79361345acd6a4131", "score": "0.6023284", "text": "def customer(request, customer_id):\n customer = get_object_or_404(Customer, pk=customer_id)\n invoices = Invoice.objects.filter(customer=customer).order_by('-date_created')\n context = {\n 'title': \"Customer info - %s\" % customer.name,\n 'customer': customer,\n 'invoices': invoices,\n }\n return render(request, 'invoice/customer.html', context)", "title": "" }, { "docid": "6022d8035c0e1c0367f93411c451ed72", "score": "0.6023012", "text": "def cname_record(self) -> str:\n return pulumi.get(self, \"cname_record\")", "title": "" }, { "docid": "b3ecbbf08cbf3cbdfe7f3cdf407ba56b", "score": "0.6022295", "text": "def contact_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"contact_name\")", "title": "" }, { "docid": "b3ecbbf08cbf3cbdfe7f3cdf407ba56b", "score": "0.6022295", "text": "def contact_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"contact_name\")", "title": "" }, { "docid": "5512fffe71569edfada9144c917af67e", "score": "0.6013319", "text": "def get_name(self) -> str:\n pass", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" }, { "docid": "587a76c067cdee5eb97cad264c2daada", "score": "0.60004216", "text": "def get_name(self):\n return self.name", "title": "" } ]
edcfbd83b47afb3c8a65e63a61ec03c4
ON PASSIVE DTP CREATION, NOT ON INITIAL TCP CONNECTION. For Initial TCP connection, see handle_accept in StreamFTPServer. Mainly copypasted from PassiveDTP, except that dtp_handler is run with a stream_rate.
[ { "docid": "24021e26f16b217576b13d6927ba2472", "score": "0.53463334", "text": "def handle_accept(self):\n \"\"\"Called when remote client initiates a connection.\"\"\"\n if not self.cmd_channel.connected:\n return self.close()\n try:\n sock, addr = self.accept()\n except TypeError:\n # sometimes accept() might return None (see issue 91)\n return\n except socket.error, err:\n # ECONNABORTED might be thrown on *BSD (see issue 105)\n if err.args[0] != errno.ECONNABORTED:\n self.log_exception(self)\n return\n else:\n # sometimes addr == None instead of (ip, port) (see issue 104)\n if addr == None:\n return\n\n # Check the origin of data connection. If not expressively\n # configured we drop the incoming data connection if remote\n # IP address does not match the client's IP address.\n if self.cmd_channel.remote_ip != addr[0]:\n if not self.cmd_channel.permit_foreign_addresses:\n try:\n sock.close()\n except socket.error:\n pass\n msg = 'Rejected data connection from foreign address %s:%s.' \\\n %(addr[0], addr[1])\n self.cmd_channel.respond(\"425 %s\" % msg)\n self.log(msg)\n # do not close listening socket: it couldn't be client's blame\n return\n else:\n # site-to-site FTP allowed\n msg = 'Established data connection with foreign address %s:%s.'\\\n % (addr[0], addr[1])\n self.log(msg)\n # Immediately close the current channel (we accept only one\n # connection at time) and avoid running out of max connections\n # limit.\n self.close()\n # delegate such connection to DTP handler\n if self.cmd_channel.connected:\n handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel, self.stream_rate)\n if handler.connected:\n self.cmd_channel.data_channel = handler\n self.cmd_channel._on_dtp_connection()", "title": "" } ]
[ { "docid": "22ea0adb1e670bbe1fc11d1036a3d4aa", "score": "0.63551366", "text": "def on_dtp_connection(self):\n self.debug(\"FTPHandler.on_dtp_connection()\")\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n # check for data to send\n if self.out_dtp_queue:\n data, isproducer, log = self.out_dtp_queue\n if log:\n self.log(log)\n if not isproducer:\n self.data_channel.push(data)\n else:\n self.data_channel.push_with_producer(data)\n if self.data_channel:\n self.data_channel.close_when_done()\n self.out_dtp_queue = None\n\n # check for data to receive\n elif self.in_dtp_queue:\n fd, log = self.in_dtp_queue\n if log:\n self.log(log)\n self.data_channel.file_obj = fd\n self.data_channel.enable_receiving(self.current_type)\n self.in_dtp_queue = None", "title": "" }, { "docid": "33c38cf2fe3257b3fc172f66cc37fd88", "score": "0.63405085", "text": "def handle_connect(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_connect()\")\n self.cmd_channel.respond('200 PORT command successful.')\n # delegate such connection to DTP handler\n handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel)\n self.cmd_channel.data_channel = handler\n self.cmd_channel.on_dtp_connection()\n #self.close() # <-- (done automatically)", "title": "" }, { "docid": "52e1ea2e48cc84a87380d75b362aaae6", "score": "0.6260652", "text": "def ftp_PASV(self, line):\n # close existing DTP-server instance, if any\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n if self.data_channel:\n self.data_channel.close()\n self.data_channel = None\n\n # make sure we are not hitting the max connections limit\n if self.ftpd_instance.max_cons:\n if len(self._map) >= self.ftpd_instance.max_cons:\n msg = \"Too many connections. Can't open data channel.\"\n self.respond(\"425 %s\" %msg)\n self.log(msg)\n return\n\n # open DTP channel\n self.data_server = self.passive_dtp(self)", "title": "" }, { "docid": "37f799dc0f4a7f1462bc732cf5a6fd07", "score": "0.6236533", "text": "def handle_accept(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_accept()\")\n sock, addr = self.accept()\n\n # Check the origin of data connection. If not expressively\n # configured we drop the incoming data connection if remote\n # IP address does not match the client's IP address.\n if (self.cmd_channel.remote_ip != addr[0]):\n if not self.cmd_channel.permit_foreign_addresses:\n try:\n sock.close()\n except socket.error:\n pass\n msg = 'Rejected data connection from foreign address %s:%s.' \\\n %(addr[0], addr[1])\n self.cmd_channel.respond(\"425 %s\" %msg)\n self.cmd_channel.log(msg)\n # do not close listening socket: it couldn't be client's blame\n return\n else:\n # site-to-site FTP allowed\n msg = 'Established data connection with foreign address %s:%s.'\\\n %(addr[0], addr[1])\n self.cmd_channel.log(msg)\n # Immediately close the current channel (we accept only one\n # connection at time) and avoid running out of max connections\n # limit.\n self.close()\n # delegate such connection to DTP handler\n handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel)\n self.cmd_channel.data_channel = handler\n self.cmd_channel.on_dtp_connection()", "title": "" }, { "docid": "67d0f33c345dbcfb5a3c9d3bd5c16ed8", "score": "0.6060341", "text": "def connect(self):\r\n self.ftp = ftplib.FTP(self.host)\r\n self.ftp.set_debuglevel(1)\r\n self.ftp.set_pasv(True)\r\n self.ftp.login(self.login, self.passwd)\r\n if self.directory:\r\n self.ftp.cwd(self.directory)\r\n # optimize socket params for download task\r\n self.ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\r\n self.ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)\r\n self.ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)", "title": "" }, { "docid": "b5418fec8f84243dda2571e2f3352050", "score": "0.5323983", "text": "def ftp_PORT(self, line):\n # Parse PORT request for getting IP and PORT.\n # Request comes in as:\n # > h1,h2,h3,h4,p1,p2\n # ...where the client's IP address is h1.h2.h3.h4 and the TCP\n # port number is (p1 * 256) + p2.\n try:\n line = line.split(',')\n ip = \".\".join(line[:4]).replace(',','.')\n port = (int(line[4]) * 256) + int(line[5])\n except (ValueError, OverflowError):\n self.respond(\"501 Invalid PORT format.\")\n return\n\n # FTP bounce attacks protection: according to RFC-2577 it's\n # recommended to reject PORT if IP address specified in it\n # does not match client IP address.\n if not self.permit_foreign_addresses:\n if ip != self.remote_ip:\n self.log(\"Rejected data connection to foreign address %s:%s.\"\n %(ip, port))\n self.respond(\"501 Can't connect to a foreign address.\")\n return\n\n # ...another RFC-2577 recommendation is rejecting connections\n # to privileged ports (< 1024) for security reasons.\n if not self.permit_privileged_ports:\n if port < 1024:\n self.log('PORT against the privileged port \"%s\" refused.' %port)\n self.respond(\"501 Can't connect over a privileged port.\")\n return\n\n # close existent DTP-server instance, if any.\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n if self.data_channel:\n self.data_channel.close()\n self.data_channel = None\n\n # make sure we are not hitting the max connections limit\n if self.ftpd_instance.max_cons:\n if len(self._map) >= self.ftpd_instance.max_cons:\n msg = \"Too many connections. Can't open data channel.\"\n self.respond(\"425 %s\" %msg)\n self.log(msg)\n return\n\n # open DTP channel\n self.active_dtp(ip, port, self)", "title": "" }, { "docid": "2b3de2377d28629c3b295ea9e34661ee", "score": "0.530459", "text": "def handle_accept(self):\n \"\"\"Called when remote client initiates a connection.\"\"\"\n try:\n sock, addr = self.accept()\n except TypeError:\n # sometimes accept() might return None (see issue 91)\n return\n except socket.error, err:\n # ECONNABORTED might be thrown on *BSD (see issue 105)\n if err.args[0] != errno.ECONNABORTED:\n ftpserver.logerror(traceback.format_exc())\n return\n else:\n # sometimes addr == None instead of (ip, port) (see issue 104)\n if addr is None:\n return\n\n handler = None\n ip = None\n try:\n \"\"\"\n *********************\n handler = StreamHandler, which specifies stream_rate for the overall\n tcp connection.\n *********************\n \"\"\"\n handler = self.handler(sock, self, len(self.handlers), self.stream_rate)\n if not handler.connected:\n return\n ftpserver.log(\"[]%s:%s Connected.\" % addr[:2])\n ip = addr[0]\n self.ip_map.append(ip)\n\n # For performance and security reasons we should always set a\n # limit for the number of file descriptors that socket_map\n # should contain. When we're running out of such limit we'll\n # use the last available channel for sending a 421 response\n # to the client before disconnecting it.\n if self.max_cons and (len(asyncore.socket_map) > self.max_cons):\n print \"Connection accepted for max_cons\"\n sys.stderr.write('ERROR: Connection accepted for max_cons')\n handler.handle_max_cons()\n return\n\n # accept only a limited number of connections from the same\n # source address.\n if self.max_cons_per_ip:\n if self.ip_map.count(ip) > self.max_cons_per_ip:\n handler.handle_max_cons_per_ip()\n print \"Connection accepted for max_cons_per_ip\"\n return\n\n try:\n handler.handle()\n except:\n handler.handle_error()\n except (KeyboardInterrupt, SystemExit, asyncore.ExitNow):\n raise\n except:\n # This is supposed to be an application bug that should\n # be fixed. We do not want to tear down the server though\n # (DoS). We just log the exception, hoping that someone\n # will eventually file a bug. References:\n # - http://code.google.com/p/pyftpdlib/issues/detail?id=143\n # - http://code.google.com/p/pyftpdlib/issues/detail?id=166\n # - https://groups.google.com/forum/#!topic/pyftpdlib/h7pPybzAx14\n ftpserver.logerror(traceback.format_exc())\n if handler is not None:\n handler.close()\n else:\n if ip is not None and ip in self.ip_map:\n self.ip_map.remove(ip)\n print \"Connection accepted.\"\n self.conns.append((handler.remote_ip, handler.remote_port))\n self.handlers.append(handler)", "title": "" }, { "docid": "edf1a40239688884161b9ac7a000ab12", "score": "0.5278688", "text": "def pasv_connection(self, sock, pasv_ip, pasv_port):\n self.ftp_connect(sock, pasv_ip, pasv_port)", "title": "" }, { "docid": "cd706df59c69087d237c219f9f13cbf5", "score": "0.52769566", "text": "def serve_forever(self, **kwargs):\n if not 'count' in kwargs:\n log(\"Serving FTP on %s:%s\" %self.socket.getsockname())\n\n # backward compatibility for python < 2.4\n if not hasattr(self, '_map'):\n if not 'map' in kwargs:\n map = asyncore.socket_map\n else:\n map = kwargs['map']\n self._map = self.handler._map = map\n \n try:\n # FIX #16, #26\n # use_poll specifies whether to use select module's poll()\n # with asyncore or whether to use asyncore's own poll()\n # method Python versions < 2.4 need use_poll set to False\n # This breaks on OS X systems if use_poll is set to True.\n # All systems seem to work fine with it set to False\n # (tested on Linux, Windows, and OS X platforms)\n if kwargs:\n asyncore.loop(**kwargs)\n else:\n asyncore.loop(timeout=1, use_poll=False)\n except (KeyboardInterrupt, SystemExit, asyncore.ExitNow):\n log(\"Shutting down FTPd.\")\n self.close_all()", "title": "" }, { "docid": "a437d898227f08334917dec4e12eb687", "score": "0.51417315", "text": "def handler(self):\r\n\t\tif (self.state != self.INIT):\r\n\t\t\tself.sendRtspRequest(self.TEARDOWN)\r\n\t\t\treply = self.recvRtspReply()\r\n\t\t\t\r\n\t\t\treplyEle = self.parseRtspReply(reply)\r\n\t\t\ttotalSendPacketCount = int(replyEle[3][1])\r\n\r\n\t\t\tif (reply.split('\\n')[0] == \"RTSP/1.0 200 OK\"):\r\n\t\t\t\tself.networkStat.computeLoss(totalSendPacketCount, self.networkStat.receivedPacketCount)\r\n\t\t\t\tself.networkStat.computeADR()\r\n\t\t\t\tself.networkStat.exportLogFile(self.sessionId, self.rtspSeq)\r\n\t\t\t\r\n\t\t\tif os.path.exists(self.cacheFile):\r\n\t\t\t\tos.remove(self.cacheFile)\r\n\t\ttry:\r\n\t\t\tself.rtpSocket_client.close()\r\n\t\t\tself.rtspSocket_client.close()\r\n\t\texcept:\r\n\t\t\tNone\r\n\t\tself.master.destroy()\r\n\t\tsys.exit()", "title": "" }, { "docid": "fbccfdb2caa53821358972741c286e88", "score": "0.51121265", "text": "def ftp():\n pass", "title": "" }, { "docid": "87868d501db1179bee8f4a7a9e5403cf", "score": "0.50919515", "text": "def connection_made(self, transport):\n #self._transport = transport\n\n self._server_ip, self._server_port = (\n transport.get_extra_info('peername')[:2])\n\n self.stream = self._stream_factory(\n transport=transport, client=True, log=self.log)\n\n# self.reader = self._factory_reader()\n# self.reader.set_transport(transport)\n self.shell = self._shell_factory(client=self, log=self.log)\n\n self.init_environment_values()\n self.set_stream_callbacks()\n self._last_received = datetime.datetime.now()\n self._connected = datetime.datetime.now()\n\n # begin connect-time negotiation\n self._loop.call_soon(self.begin_negotiation)\n\n # resolve server fqdn (and later, reverse-dns)\n self._server_host = self._loop.run_in_executor(\n None, socket.gethostbyaddr, self._server_ip)\n self._server_host.add_done_callback(self.after_server_lookup)\n\n self.log.info(self)", "title": "" }, { "docid": "3f9dff2e474a2e7a9664a770d30a470f", "score": "0.509091", "text": "def handle_close(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_close()\")\n self.close()", "title": "" }, { "docid": "34b99195ddd9475f11a40b2f630ab188", "score": "0.507946", "text": "def _openSFTPConnection(self):\n if not self.sftp_open:\n self.sftp = paramiko.SFTPClient.from_transport(self.transport)\n self.sftp_open = True", "title": "" }, { "docid": "06555d34f5da0e53c1816ddaea9ae4d5", "score": "0.5074956", "text": "def main():\n\n print(\"*\" * 50)\n print(\"[LOG] Printing command line arguments\\n\", \",\".join(sys.argv))\n check_file_name()\n print(\"*\" * 50)\n\n # This argument is required.\n # For a server, this means the IP that the server socket\n # will use.\n # The IP of the server, some default values\n # are provided. Feel free to modify them.\n ip_address = get_arg(1, \"127.0.0.1\")\n operation = get_arg(2, \"pull\")\n file_name = get_arg(3, \"test.txt\")\n port = 69\n\n udp_socket = setup_sockets(ip_address)\n tftp_proc = TftpProcessor()\n if operation is \"push\":\n packet = tftp_proc.upload_file(file_name)\n elif operation is \"pull\":\n packet = tftp_proc.request_file(file_name)\n udp_socket.sendto(packet, (ip_address, port))\n inc_packet, server = udp_socket.recvfrom(1000)\n reply = tftp_proc.process_udp_packet(packet)\n if reply == 0:\n print(\"Connection Established\")\n else:\n print(\"Error: Couldn't establish connection\")\n while True:\n # Receive an acknowledgement packet or an error packet\n if tftp_proc.has_pending_packets_to_be_sent() != 0:\n if operation is \"push\":\n sen_packet = tftp_proc.get_next_output_packet()\n elif operation is \"pull\":\n sen_packet = tftp_proc.get_next_output_packet()\n rec_packet, server = udp_socket.recvfrom(1000)\n reply = tftp_proc.process_udp_packet(packet)\n if reply == 0:\n print(\"Packet\")\n continue\n\n # Modify this as needed.\n parse_user_input(ip_address, operation, file_name)", "title": "" }, { "docid": "3ab85237b3b6a06df50ffc1507c90e9f", "score": "0.5064", "text": "def __init__(self, host, username, password, port=22):\n self.sftp = None\n self.sftp_open = False\n \n # open SSH Transport stream\n self.transport = paramiko.Transport((host, port))\n \n self.transport.connect(username=username, password=password)", "title": "" }, { "docid": "2a906c320027cc5178db797b7c9befd9", "score": "0.50156116", "text": "def tcp_start(self, flow: mitmproxy.tcp.TCPFlow):", "title": "" }, { "docid": "de57404062e7d4cc517b137a3e6e874f", "score": "0.50048345", "text": "def __init__(self):\n self._shutdown_lock = threading.Lock()\n self._work_queue = queue.Queue()\n self.ftp_server = ftpclient.setup()", "title": "" }, { "docid": "a27042bfdc606e56fab7e027fd342961", "score": "0.5003947", "text": "def makeService(options):\n from twisted.conch.ssh.connection import SSHConnection\n from twisted.conch.ssh.factory import SSHFactory\n from twisted.conch.ssh.keys import Key\n from twisted.cred.portal import Portal\n\n from swftp.realm import SwftpRealm\n from swftp.sftp.server import SwiftSSHUserAuthServer\n from swftp.auth import SwiftBasedAuthDB\n from swftp.utils import (\n log_runtime_info, GLOBAL_METRICS, parse_key_value_config)\n\n c = get_config(options['config_file'], options)\n\n sftp_service = service.MultiService()\n\n # ensure timezone is GMT\n os.environ['TZ'] = 'GMT'\n time.tzset()\n\n print('Starting SwFTP-sftp %s' % VERSION)\n\n # Add statsd service\n if c.get('sftp', 'log_statsd_host'):\n try:\n from swftp.statsd import makeService as makeStatsdService\n makeStatsdService(\n c.get('sftp', 'log_statsd_host'),\n c.getint('sftp', 'log_statsd_port'),\n sample_rate=c.getfloat('sftp', 'log_statsd_sample_rate'),\n prefix=c.get('sftp', 'log_statsd_metric_prefix')\n ).setServiceParent(sftp_service)\n except ImportError:\n sys.stderr.write('Missing Statsd Module. Requires \"txstatsd\" \\n')\n\n if c.get('sftp', 'stats_host'):\n from swftp.report import makeService as makeReportService\n known_fields = [\n 'command.login',\n 'command.logout',\n 'command.gotVersion',\n 'command.openFile',\n 'command.removeFile',\n 'command.renameFile',\n 'command.makeDirectory',\n 'command.removeDirectory',\n 'command.openDirectory',\n 'command.getAttrs',\n ] + GLOBAL_METRICS\n makeReportService(\n c.get('sftp', 'stats_host'),\n c.getint('sftp', 'stats_port'),\n known_fields=known_fields\n ).setServiceParent(sftp_service)\n\n authdb = SwiftBasedAuthDB(\n c.get('sftp', 'auth_url'),\n global_max_concurrency=c.getint('sftp', 'num_persistent_connections'),\n max_concurrency=c.getint('sftp', 'num_connections_per_session'),\n timeout=c.getint('sftp', 'connection_timeout'),\n extra_headers=parse_key_value_config(c.get('sftp', 'extra_headers')),\n verbose=c.getboolean('sftp', 'verbose'),\n rewrite_scheme=c.get('sftp', 'rewrite_storage_scheme'),\n rewrite_netloc=c.get('sftp', 'rewrite_storage_netloc'),\n )\n\n rabbitmq_hosts = c.get('rabbitmq', 'rabbitmq_hosts')\n rabbitmq_cluster = RabbitClusterClient([RabbitReplica(host, port) \\\n for host, port in [(h,int(p)) for h,p in [r.split(':') \\\n for r in rabbitmq_hosts.split(',')]]], \\\n c.get('rabbitmq', 'username'), \\\n c.get('rabbitmq', 'password')) \\\n if rabbitmq_hosts else None\n queue_name = c.get('rabbitmq', 'queue_name')\n\n realm = SwftpRealm(rabbitmq_cluster, queue_name)\n sftpportal = Portal(realm)\n sftpportal.registerChecker(authdb)\n\n sshfactory = SSHFactory()\n protocol = SwiftSSHServerTransport\n protocol.maxConnectionsPerUser = c.getint('sftp', 'sessions_per_user')\n protocol.supportedCiphers = c.get('sftp', 'chiphers')\n protocol.supportedMACs = c.get('sftp', 'macs')\n protocol.supportedCompressions = c.get('sftp', 'compressions')\n sshfactory.protocol = protocol\n sshfactory.noisy = False\n sshfactory.portal = sftpportal\n sshfactory.services['ssh-userauth'] = SwiftSSHUserAuthServer\n sshfactory.services['ssh-connection'] = SSHConnection\n\n pub_key_string = file(c.get('sftp', 'pub_key')).read()\n priv_key_string = file(c.get('sftp', 'priv_key')).read()\n sshfactory.publicKeys = {\n 'ssh-rsa': Key.fromString(data=pub_key_string)}\n sshfactory.privateKeys = {\n 'ssh-rsa': Key.fromString(data=priv_key_string)}\n\n signal.signal(signal.SIGUSR1, log_runtime_info)\n signal.signal(signal.SIGUSR2, log_runtime_info)\n\n internet.TCPServer(\n c.getint('sftp', 'port'),\n sshfactory,\n interface=c.get('sftp', 'host')).setServiceParent(sftp_service)\n\n return sftp_service", "title": "" }, { "docid": "4929a5eaf7797125d0f76cff57ef842d", "score": "0.49801666", "text": "def startProtocol(self):\n self.transport = LossyTransport(self.transport, self.lossPr)\n DatagramProtocol.transport = self.transport", "title": "" }, { "docid": "2cb669c692fa09b9aa2cc2505df83315", "score": "0.49388564", "text": "def __init__(self, handle, mode='rw'):\n if not isinstance(handle, pyuv.UDP):\n raise TypeError(\"handle: expecting a 'pyuv.UDP' instance, got {!r}\"\n .format(type(handle).__name__))\n super(DatagramTransport, self).__init__(handle, mode)", "title": "" }, { "docid": "b04fd7444639ec0d80b5d475911fb79c", "score": "0.49259666", "text": "def startProtocol(self):\n self.transport = LossyTransport(self.transport, self.lossPr)\n\n DatagramProtocol.transport = self.transport", "title": "" }, { "docid": "7119cc8af14ecd25cc1bb5abac79c094", "score": "0.49203092", "text": "def connection_made(self, transport) -> None:\n peername = transport.get_extra_info(\"peername\")\n logging.info(f\"Server: New connection from {peername}.\")\n self.transport = transport\n loop = asyncio.get_event_loop()\n self.task = loop.create_task(self.serve_data())", "title": "" }, { "docid": "66c597e67be2bc3ff2d88f28dd5a4c88", "score": "0.49184978", "text": "def _make_epasv(self, extmode=False):\n \"\"\"Initialize a passive data channel with remote client which\n issued a PASV or EPSV command.\n If extmode argument is True we assume that client issued EPSV in\n which case extended passive mode will be used (see RFC-2428).\n \"\"\"\n # close establishing DTP instances, if any\n self._shutdown_connecting_dtp()\n\n # close established data connections, if any\n if self.data_channel is not None:\n self.data_channel.close()\n self.data_channel = None\n\n # make sure we are not hitting the max connections limit\n if self.server.max_cons:\n if len(asyncore.socket_map) >= self.server.max_cons:\n msg = \"Too many connections. Can't open data channel.\"\n self.respond(\"425 %s\" %msg)\n self.log(msg)\n return\n\n # open data channel\n self._dtp_acceptor = self.passive_dtp(self, extmode, self.stream_rate)", "title": "" }, { "docid": "41e1abd9a33fd60883a6f4a6b853b309", "score": "0.49155027", "text": "def main():\n print_debug(\"Starting...\")\n host, log_file, port = parse_args()\n logger = Logger(log_file)\n ftp = FTP(host, logger, port)\n do_ftp(ftp)", "title": "" }, { "docid": "1209b0f87642dee775b7398dea9df0ef", "score": "0.4908784", "text": "def handle_error(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "title": "" }, { "docid": "1348fdd388d21fb97347f392909542cd", "score": "0.48465312", "text": "def open(self):\n self._paramiko_sftp_client = self._ssh_client.paramiko_ssh_client.open_sftp()", "title": "" }, { "docid": "d430a639d10e9b13f44a22c8102919bc", "score": "0.4833147", "text": "def __init__(self, processor, server_address,\r\n inputProtocolFactory, outputProtocolFactory = None,\r\n server_class = BaseHTTPServer.HTTPServer):\r\n\r\n if outputProtocolFactory is None:\r\n outputProtocolFactory = inputProtocolFactory\r\n\r\n TServer.TServer.__init__(self, processor, None, None, None,\r\n inputProtocolFactory, outputProtocolFactory)\r\n\r\n thttpserver = self\r\n\r\n class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):\r\n def do_POST(self):\r\n # Don't care about the request path.\r\n itrans = TTransport.TFileObjectTransport(self.rfile)\r\n otrans = TTransport.TFileObjectTransport(self.wfile)\r\n itrans = TTransport.TBufferedTransport(itrans, int(self.headers['Content-Length']))\r\n otrans = TTransport.TMemoryBuffer()\r\n iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)\r\n oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)\r\n try:\r\n thttpserver.processor.process(iprot, oprot)\r\n except ResponseException, exn:\r\n exn.handler(self)\r\n else:\r\n self.send_response(200)\r\n self.send_header(\"content-type\", \"application/x-thrift\")\r\n self.end_headers()\r\n self.wfile.write(otrans.getvalue())\r\n\r\n self.httpd = server_class(server_address, RequestHander)", "title": "" }, { "docid": "d04477bc2946d686fe0b792cea1438f5", "score": "0.4828429", "text": "def __init__(self, transport, buff_size=16384, socket_timeout=5.0,\n progress=None, sanitize=_sh_quote):\n self.transport = transport\n self.buff_size = buff_size\n self.socket_timeout = socket_timeout\n self.channel = None\n self.preserve_times = False\n self._progress = progress\n self._recv_dir = b''\n self._rename = False\n self._utime = None\n self.sanitize = sanitize\n self._dirtimes = {}", "title": "" }, { "docid": "2989c287266813897ac4587691bffe2d", "score": "0.481455", "text": "def start_tftp_server(self):\n self.install_pkgs()\n if self.shared_tftp_server:\n # perform the rest\n raise NotImplementedError\n raise NotImplementedError", "title": "" }, { "docid": "678f8b23a0492e127d28b7bb72ff8c22", "score": "0.48140627", "text": "def tc_start(self, datapath, dpae_port):\n dpid = datapath.id\n self.logger.info(\"Starting TC to DPAE on datapath=%s, dpae_port=%s\",\n dpid, dpae_port)\n switch = self.switches[dpid]\n #*** Check if Active or Passive TC Mode:\n mode = self.main_policy.tc_policies.mode\n self.logger.info(\"TC mode=%s\", mode)\n #*** TBD, deal with context:\n context = self.context_default\n #*** Set up group table to send to DPAE:\n # NEEDS OVS 2.1 OR HIGHER SO COMMENTED OUT FOR THE MOMENT\n # ALSO NEEDS CODE THAT CAN CATER FOR MULTIPLE DPAE\n #switch.flowtables.add_group_dpae(out_port)\n\n if self.main_policy.identity.lldp:\n #*** Install FEs to send LLDP Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_lldp(dpae_port)\n\n if self.main_policy.identity.dhcp:\n #*** Install FEs to send DHCP Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_dhcp(dpae_port)\n\n if self.main_policy.identity.dns:\n #*** Install FEs to send DNS Identity indicators to DPAE:\n switch.flowtables.add_fe_iig_dns(dpae_port)\n\n if mode == 'active':\n #*** Install AMF entries for MACs we already know dest for:\n mac_list = switch.mactable.dump_macs(context)\n for mac in mac_list:\n self.logger.debug(\"Adding previously learned mac=%s dpid=%s \"\n \"dpae_port=%s to Active Mode Filter (amf)\", mac, dpid,\n dpae_port)\n switch.flowtables.add_fe_amf_macport_dst(dpae_port, mac)\n #*** Install FE to so packets returning from DPAE in active mode\n #*** bypass learning tables and go straight to treatment:\n switch.flowtables.add_fe_iim_dpae_active_bypass(dpae_port)\n\n #*** Add any general TC flows to send to DPAE if required by policy\n #*** (i.e. statistical or payload):\n switch.flowtables.add_fe_tc_dpae(\n self.main_policy.optimised_rules.get_rules(),\n dpae_port, mode)\n\n self.logger.info(\"TC started to DPAE on datapath=%s, dpae_port=%s\",\n dpid, dpae_port)\n _results = {\"status\": \"tc_started\",\n \"mode\": mode}\n return _results", "title": "" }, { "docid": "87493396523392825396a9cce5b95623", "score": "0.4806019", "text": "def prepare(self):\n if self.prepared:\n return\n self.socket.listen()\n for name in self.socket.getSocketNames():\n self.serverEventHandler.preServe(name)\n for _ in xrange(self.threads):\n thread = Worker(self.tasks)\n thread.setDaemon(True)\n thread.start()\n\n for fileno in self.socket.handles:\n self.poller.read(fileno)\n self.poller.read(self._read.fileno())\n\n self.prepared = True", "title": "" }, { "docid": "26c2372b1f1a97e8d284392eeaaff206", "score": "0.4804269", "text": "def _ftp_connection(year: int) -> ftplib.FTP:\n ftp = ftp_connection(year)\n ftp.cwd('ROADS')\n\n return ftp", "title": "" }, { "docid": "b3f8ed65c47533f60a1cd4f73f2bd75c", "score": "0.47974735", "text": "def handle_expt(self):\n self.cmd_channel.debug(\"DTPHandler.handle_expt()\")\n self.cmd_channel.respond(\"426 Connection error; transfer aborted.\")\n self.close()", "title": "" }, { "docid": "86e9e87ed9c2a7a192adc60d62f59d21", "score": "0.47716457", "text": "def handle_close(self):\n self.cmd_channel.debug(\"DTPHandler.handle_close()\")\n tot_bytes = self.get_transmitted_bytes()\n # If we used channel for receiving we assume that transfer is\n # finished when client close connection , if we used channel\n # for sending we have to check that all data has been sent\n # (responding with 226) or not (responding with 426).\n if self.receive:\n self.transfer_finished = True\n if self.transfer_finished:\n self.cmd_channel.respond(\"226 Transfer complete.\")\n self.cmd_channel.log(\"Transfer complete; \"\n \"%d bytes transmitted.\" %tot_bytes)\n else:\n self.cmd_channel.respond(\"426 Connection closed; transfer aborted.\")\n self.cmd_channel.log(\"Transfer aborted; \"\n \"%d bytes transmitted.\" %tot_bytes)\n self.close()", "title": "" }, { "docid": "4e789e393ac2112de553bc010a8f1d2a", "score": "0.47281206", "text": "def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name", "title": "" }, { "docid": "b892ee35652e274ad31b6235c9940cbe", "score": "0.47228163", "text": "def __init__(self, target, mode, cnf, debug, offline):\n print \"\\n(%s mode)\" % mode\n if debug==1:\n print \"(debug mode)\"\n if offline==1:\n print \"(offline)\"\n print \"\"\n\n # Read configuration\n self._cnf = cnf\n self.config = ConfigParser.RawConfigParser()\n self.config.read(self._cnf)\n\n # Conditional import\n if self.config.get('FTP','ftpproto').lower() == 'ftps':\n try:\n from M2Crypto import ftpslib\n except ImportError:\n print \"***ERROR: M2Crypto missing\"\n print \"M2Crypto required for FTPS: sudo apt-get install python-m2crypto\"\n sys.exit()\n elif self.config.get('FTP','ftpproto').lower() == 'sftp':\n try:\n import paramiko\n except ImportError:\n print \"***ERROR: Paramiko missing\"\n print \"Paramiko required for SFTP: sudo apt-get install python-paramiko\"\n sys.exit()\n\n # Vars initialization\n self._target = target\n self._mode = mode\n self._debug = debug\n self._offline = offline\n self.timeout = self.config.getint('TIMING', 'urltimeout')\n\n # Proxy settings / initialization\n if self.config.get('CLIENT','useproxy')=='1':\n proxyinfo = {\n 'proxyuser' : self.config.get('CLIENT','proxyuser'),\n 'proxypass' : self.config.get('CLIENT','proxypass'),\n 'proxyhost' : self.config.get('CLIENT','proxyhost'),\n 'proxyport' : int(self.config.get('CLIENT','proxyport'))\n }\n try:\n # build a new opener that uses a proxy requiring authorization\n proxy_support = urllib2.ProxyHandler({\"http\" : \\\n \"http://%(proxyuser)s:%(proxypass)s@%(proxyhost)s:%(proxyport)d\" % proxyinfo})\n opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)\n # install it\n urllib2.install_opener(opener)\n except Exception, err:\n print \"***ERROR in proxy initialization: %s\" % err\n print \"Check your proxy settings in config.cfg\"\n sys.exit()\n self.testnum = 1\n # List of modules and their class name (as specified in the ./modules/ directory)\n # SF#3439544: new module normalUsage\n self.modules = [\n ['Client Side Attacks', 'clientSideAttacks'],\n ['Test Rules', 'testRules'],\n ['Bad Traffic', 'badTraffic'],\n ['Fragmented Packets', 'fragmentedPackets'],\n ['Brute Force', 'bruteForce'],\n ['Evasion Techniques', 'evasionTechniques'],\n ['ShellCodes', 'shellCodes'],\n ['Denial of Service', 'denialOfService'],\n ['Pcap Replay', 'pcapReplay'],\n ['Normal Usage', 'normalUsage'],\n ['IP Reputation', 'ipReputation']\n ]\n\n # Check if a new version is available\n #if self._offline != 1:\n if False:\n if self.checkNewVersionAvailable() != 0:\n print \"+--------------------------------------------------------+\"\n print \"| A NEW VERSION IS AVAILABLE |\"\n print \"| To update pytbull, issue following command: |\"\n print \"| git clone git://git.code.sf.net/p/pytbull/code pytbull |\"\n print \"+--------------------------------------------------------+\"\n print \"\"\n\n # Confirm user acceptance\n self.confirmUserAcceptance()\n\n # Check if prgm is called with root privs\n # Needed for generating raw packets (e.g. some nmap scans)\n print \"BASIC CHECKS\"\n print \"------------\"\n print \"Checking root privileges\".ljust(65, '.'),\n if(os.getuid() != 0):\n print \"[ Failed ]\"\n print \"\\nRoot privileges required!\"\n sys.exit(0)\n print \"[ OK ]\"\n\n # Checking remote ports. Ports have to be opened to send payloads on these ports\n # Notice that a FTP server must listen to port 21/tcp if you use the multipleFailedLogins\n # module since alerts are configured to listen to this port. See for example\n # Snort policy.rules: alert tcp $HOME_NET 21 -> $EXTERNAL_NET any\n self.checkPort(21, 'FTP', 'Install FTP on the remote host: sudo apt-get install vsftpd')\n self.checkPort(self.config.get('SSH', 'port'), 'SSH', 'Install SSH on the remote host: sudo apt-get install openssh-server')\n self.checkPort(80, 'HTTP', 'Install apache on the remote host: sudo apt-get install apache2')\n \n # Chek if paths (from config file) are valid\n self.checkEnvVar()\n\n # Remove temp file\n print \"Removing temporary file\".ljust(65, '.'),\n if os.path.exists(self.config.get('PATHS', 'tempfile')):\n os.remove(self.config.get('PATHS', 'tempfile'))\n print \"[ OK ]\"\n\n # Truncate table test\n print \"Cleaning database\".ljust(65, '.'),\n database.DB(self._cnf).truncateTestResults()\n print \"[ OK ]\"\n\n # Print tests selection\n print \"\\nTESTS\"\n print \"------------\"\n\n for module in self.modules:\n print module[0].ljust(65, '.'),\n if self.config.get('TESTS', module[1]) == '1':\n print \"[ yes ]\"\n else:\n print \"[ no ]\"\n\n print \"\"", "title": "" }, { "docid": "642749dafc9818a94b7e0f9996a3777c", "score": "0.47015443", "text": "def connectionMade(self):\n protocol.Protocol.connectionMade(self)\n self.port = self.transport.getHost().port\n #Start the inactivity timer the connection is dropped if we receive no data\n self.activateInactivityTimer()\n self.sessionState = SMPPSessionStates.OPEN\n self.log.warning(\"SMPP connection established from %s to port %s\", self.transport.getPeer().host, self.port)", "title": "" }, { "docid": "eb96d610bfd4cb5b073e6b6f0959ecc7", "score": "0.46982753", "text": "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "title": "" }, { "docid": "4b34b2b43f7dc4bf50a444eb89ab6890", "score": "0.4696954", "text": "def open(self):\n self._lock.acquire()\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n logger.debug(\n \"PIGGYBACK TCPRELAY\"\n \"PID: {0} PORT: {1}\".format(self._relaypid,\n self._portoffset))\n except AttributeError:\n # TODO: tcprelays might want to close when test is over???\n self._portoffset = get_available_portoffset()\n command = \"/usr/local/bin/tcprelay --portoffset {0} \" \\\n \"--locationid {1} rsync telnet \" \\\n \"ssh > /tmp/tcprelay.{1}.log 2>&1\" \\\n \" &\".format(self._portoffset, self.locationid_param)\n logger.debug(\"SPAWNING TCPRELAY - {0}\".format(command))\n child = subprocess.Popen([\"bash\", \"-c\", command], close_fds=True)\n time.sleep(0.5)\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n except AttributeError:\n logger.error(\n \"FAILED to SPAWN TCPRELAY - CMD {0} \"\n \"OUTPUT: {1} ERROR: {2} RC: {3}\".format(command,\n child.stdout,\n child.stderr,\n child.returncode))\n finally:\n self._lock.release()", "title": "" }, { "docid": "7d8ff80385c31e454ee03a1f114223d3", "score": "0.46953368", "text": "def connectionMade(self):\n self.connection_timestamp = datetime.datetime.now()\n self.factory.open_connections.add((self.connection_timestamp,\n self.transport.getPeer()))\n self.lc = LoopingCall(self.sendRandomBytes)\n self.lc.start(self.send_interval)\n self.log.info(\"Connection made from: {address}\",\n address=self.transport.getPeer())", "title": "" }, { "docid": "be044b36381e06fe38fdb25ce15972f8", "score": "0.4678802", "text": "def open(self):\n self._server = socketserver.ThreadingTCPServer(\n server_address=('localhost', self._requested_local_port),\n RequestHandlerClass=self._create_handler(self._ssh_client, self._remote_host, self._remote_port),\n )\n\n threading.Thread(target=self.serve_forever).start()\n\n print('Forwarding local port {} to remote {}:{}'.format(self.local_port, self.remote_host, self.remote_port))", "title": "" }, { "docid": "f79b9033bb73bdbdfb2f41b7118db15d", "score": "0.4653781", "text": "def main(self):\n self.pid = os.getpid()\n self.fdp.close() # Close fdp on child side\n if self.datasock is None:\n # Create session's data socket and load file\n self.datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.load_file()\n logging.info(\"Child process finished loading file\")\n port = UDP_START + self.meta.sessionid # Port used by the session\n poller = select.poll() # poll fdc and datasock\n poller.register(self.fdc.fileno(), select.POLLIN)\n poller.register(self.datasock.fileno(), select.POLLOUT)\n pkt_p = snc.snc_alloc_empty_packet(snc.snc_get_parameters(self.sc))\n while True:\n for fd, event in poller.poll():\n if fd == self.fdc.fileno() and event is select.POLLIN:\n pkt, ip = self.fdc.recv()\n logging.info(\"Session [%d] received msg <%s> from %s.\" %\n (self.meta.sessionid, iMSG[pkt.header.mtype], ip))\n if pkt.header.mtype == MSG['REQ_SEG']:\n self.add_client(HostInfo(ip, self.meta.sessionid))\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['HEARTBEAT']:\n self.client_heartbeat(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['REQ_STOP'] or pkt.header.mtype == MSG['EXIT']:\n self.remove_client(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n\n if fd == self.datasock.fileno() and event is select.POLLOUT:\n # writable datasock, send data packets to clients\n for cli in self.clients:\n snc.snc_generate_packet_im(self.sc, pkt_p)\n pktstr = pkt_p.contents.serialize(self.meta.sp.size_g,\n self.meta.sp.size_p,\n self.meta.sp.bnc)\n try:\n # Construct data packet with serialized snc_packet\n self.datasock.sendto(CCPacket(CCHeader(MSG['DATA']), pktstr).packed(), (cli.ip, port))\n except:\n logging.warning(\"Caught exception in session %s.\"\n % (self.meta.sessionid,))\n self.lastIdle = datetime.now() # Refresh idle time\n self.housekeeping()", "title": "" }, { "docid": "79a31030e8f2c5998c9a9f84115202e3", "score": "0.4652251", "text": "def server_do(self,input, connstream):\r\n pass", "title": "" }, { "docid": "2e9555eadd882faa0dfd076bde05fcdf", "score": "0.4650192", "text": "def _init_socket(self):\n # destroy the connection if it already exists\n if self.ddpsocket:\n self.ddpsocket.remove_all_listeners('received_message')\n self.ddpsocket.remove_all_listeners('closed')\n self.ddpsocket.remove_all_listeners('opened')\n self.ddpsocket.close_connection()\n self.ddpsocket = None\n\n # create a ddp socket and subscribe to events\n self.ddpsocket = DDPSocket(self.url, self.debug)\n self.ddpsocket.on('received_message', self.received_message)\n self.ddpsocket.on('closed', self.closed)\n self.ddpsocket.on('opened', self.opened)", "title": "" }, { "docid": "8d1932bf4ebca93417fc3eb82b232166", "score": "0.46500129", "text": "def gtp_test(\n self, type='fdir', port='pf', tunnel_pkt='gtpu', inner_L3='ipv4'):\n queue = random.randint(1, self.PF_QUEUE - 1)\n if port != 'pf':\n queue = random.randint(1, self.VF_QUEUE - 1)\n random_teid = random.randint(0x0, 0xFFFFFFFF)\n correct_teid = hex(random_teid)\n wrong_teid = hex((random_teid + 2) % int(0xFFFFFFFF))\n if type is 'fdir':\n if inner_L3 is None:\n self.dut_testpmd.execute_cmd(\n 'flow create 0 ingress pattern eth / ipv4 / udp / \\\n %s teid is %s / end actions queue index %d / end'\n % (tunnel_pkt, correct_teid, queue))\n else:\n self.dut_testpmd.execute_cmd(\n 'flow create 0 ingress pattern eth / ipv4 / udp / \\\n %s teid is %s / %s / end actions queue index %d / end'\n % (tunnel_pkt, correct_teid, inner_L3, queue))\n if type is 'clfter':\n self.dut_testpmd.execute_cmd(\n 'flow create 0 ingress pattern eth / ipv4 / udp / \\\n %s teid is %s / end actions %s / queue index %d / end'\n % (tunnel_pkt, correct_teid, port, queue))\n for match_opt in ['matched', 'not matched']:\n teid = correct_teid\n pkts = []\n for teid_opt in ['correct teid', 'wrong teid']:\n chk = ''\n for chksum_opt in ['good chksum', 'bad chksum']:\n pkts = self.gtp_packets(\n type, tunnel_pkt, inner_L3, match_opt, chk, teid)\n for packet_type in pkts.keys():\n self.tester.scapy_append(\n 'sendp([%s], iface=\"%s\")'\n % (pkts[packet_type], self.tester_intf))\n self.tester.scapy_execute()\n if port is 'pf':\n out = self.dut.get_session_output(timeout=2)\n else:\n out = self.vm0_dut.get_session_output(timeout=2)\n self.verify(\n \"port 0/queue %d\" % queue in out,\n \"Failed to receive packet in this queue!!!\")\n\n if port is 'pf':\n layerparams = ['L3_', 'TUNNEL_',\n 'INNER_L3_', 'INNER_L4_']\n ptypes = packet_type.split('/')\n endparams = ['_EXT_UNKNOWN', '',\n '_EXT_UNKNOWN', '']\n for layerparam, ptype, endparam in zip(\n layerparams, ptypes, endparams):\n layer_type = layerparam + ptype + endparam\n self.verify(\n layer_type in out,\n \"Failed to output ptype information!!!\")\n if queue != 0 and type is 'fdir':\n self.verify(\"PKT_RX_FDIR\" in out,\n \"Failed to test flow director!!!\")\n if teid == wrong_teid or match_opt == 'not matched':\n break\n chk = 'chksum=0x1234,'\n if match_opt == 'not matched':\n break\n queue = 0\n teid = wrong_teid", "title": "" }, { "docid": "1f327930e7dffb2ff53b138bf4fb2ff4", "score": "0.46483532", "text": "def ftp():\r\n session = Session(target=Target(connection=TCPSocketConnection(str(ip),int(port))))\r\n\r\n s_initialize(\"user\")\r\n s_string(\"USER\")\r\n s_delim(\" \")\r\n s_string(\"anonymous\")\r\n s_static(\"\\r\\n\")\r\n\r\n # s_initialize(\"pass\")\r\n # s_string(\"PASS\")\r\n # s_delim(\" \")\r\n # s_string(\"james\")\r\n # s_static(\"\\r\\n\")\r\n #\r\n # s_initialize(\"stor\")\r\n # s_string(\"STOR\")\r\n # s_delim(\" \")\r\n # s_string(\"AAAA\")\r\n # s_static(\"\\r\\n\")\r\n #\r\n # s_initialize(\"retr\")\r\n # s_string(\"RETR\")\r\n # s_delim(\" \")\r\n # s_string(\"AAAA\")\r\n # s_static(\"\\r\\n\")\r\n\r\n session.connect(s_get(\"user\"))\r\n # session.connect(s_get(\"user\"), s_get(\"pass\"))\r\n # session.connect(s_get(\"pass\"), s_get(\"stor\"))\r\n # session.connect(s_get(\"pass\"), s_get(\"retr\"))\r\n\r\n session.fuzz()", "title": "" }, { "docid": "5a916cc0ab9931257e6d5b73a073ec3b", "score": "0.46408847", "text": "def connectionMade(self):\n peer = self.transport.getPeer()\n log.info(\"Deluge Client connection made from: %s:%s\", peer.host, peer.port)\n # Set the initial auth level of this session to AUTH_LEVEL_NONE\n self.factory.authorized_sessions[self.transport.sessionno] = {}", "title": "" }, { "docid": "16a6a2397a85a9d7c95299f25012937d", "score": "0.4640554", "text": "def load() -> Ftp:\n\n return Ftp(\n url=FSEC_FTP_URL,\n username=FSEC_FTP_USERNAME,\n password=FSEC_FTP_PASSWORD,\n basepath=FSEC_FTP_PATH,\n destination=FSEC_DOWNLOAD_DIR,\n )", "title": "" }, { "docid": "ffce66e202ba08af27286f2aceba9e3c", "score": "0.46154392", "text": "def connectionMade(self):\n self._pid = self.transport.pid\n if self._pid:\n self.logger(\"Process has pid %d\" % self._pid)\n self.transport.closeStdin() # close stdin", "title": "" }, { "docid": "02cde713a41cf23713a2de084b08be72", "score": "0.46144727", "text": "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "title": "" }, { "docid": "2d26c3de75a85b58f9f3318449ed5496", "score": "0.4607431", "text": "def est_connection(self):\n try:\n file_size = math.ceil(self.get_file_size())\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n print(f\"[+]connecting to {self.HOSTNAME}:{self.PORT}\")\n s.connect((self.HOSTNAME,self.PORT))\n print(f\"[+]Connected\")\n # prime the server with file meta data\n s.send(f\"{self.FILENAME} {file_size}\".encode())\n print(f\"[+]Sending file info from: {self.get_full_path()}\")\n self.stream_files(s)\n return \n\n except socket.error as msg:\n print(f\"Caught exception: {msg}\")", "title": "" }, { "docid": "e746ee1cedbe1b1798a678b700eb234d", "score": "0.4602082", "text": "def __init__(self):\n\n self.start_timer() #Starts calling handle_timer() at correct rate\n\n self.ports_to_latencies = {} #keeps track of the lengths of the links \n self.hosts_to_ports = {} #Stores optimal sending post for each host\n self.hosts_to_unused_ports = {} #Stores mapping between host and unused ports\n \n # self.filterer = DVFilter()", "title": "" }, { "docid": "806716629bdcded87a80a88aeb15b5a0", "score": "0.46017683", "text": "def start_proxy_handler(self) -> None:\n\n def proxy_handler(listener: socket.socket) -> None:\n sock = listener.accept()[0]\n with self.server_ctx.wrap_socket(sock, server_side=True) as client_sock:\n upstream_sock = socket.create_connection(\n (self.destination_server_host, self.destination_server_port)\n )\n self._read_write_loop(client_sock, upstream_sock)\n upstream_sock.close()\n client_sock.close()\n\n self._start_server(proxy_handler)", "title": "" }, { "docid": "9ab82bf5d2165887774fa0a6cb915436", "score": "0.4601167", "text": "def setup(self):\n self.scp = None", "title": "" }, { "docid": "cb3f0ecb151e26371c6664e7cc261b2e", "score": "0.45979106", "text": "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "title": "" }, { "docid": "bfdc99c6ad5ca2bbd8a3c51a7bbe40c3", "score": "0.45932025", "text": "def post_process(self, packet: 'dict[str, Any]') -> 'MPTCP':\n ret = self.data\n\n ret.option = Enum_Option.Multipath_TCP\n ret.length = self.test['length']\n ret.subtype = Enum_MPTCPOption.get(packet['test']['subtype'])\n\n return ret", "title": "" }, { "docid": "d7bc1168dcfad58c83821a22ce28d008", "score": "0.45816842", "text": "def __init__(self, config):\n self.config = config\n\n # setup logging\n if config.get('ssh_log_path'):\n ssh.util.log_to_file(config['ssh_log_path'])\n\n hostname = config.get('remote_host', 'localhost')\n port = config.get('remote_port', 22)\n username = config.get('username') or getpass.getuser()\n password = config.get('password')\n private_key = config.get('private_key')\n hostkey = self._load_host_key(hostname)\n\n log.debug(\"Connecting to %s, port %s...\", hostname, port)\n self._transport = tpt = ssh.Transport((hostname, port))\n tpt.use_compression(compress=config.get('compress', False))\n self._authenticate(tpt, username, password, hostkey, private_key)\n\n if tpt.is_authenticated():\n log.debug(\"SSH transport authenticated. Creating SFTP client.\")\n # create SFTP client from SSHClient\n self._client = ssh.SFTPClient.from_transport(tpt)\n else:\n raise tpt.get_exception()", "title": "" }, { "docid": "e9d7efcdb5a6e326c77b393202f9bb27", "score": "0.4580045", "text": "def test_fdir_gtpc_pf(self):\n self.gtp_test(\n type='fdir', port='pf', tunnel_pkt='gtpc', inner_L3=None)", "title": "" }, { "docid": "49078608d402fa241194e7e93d300f0b", "score": "0.45770004", "text": "def __init__(self, host, logger, port):\n # Initialize logger.\n self.logger = logger\n self.logger.log(\"Connecting to %s\" % host)\n try:\n # Create socket, connect to host and port.\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.ftp_connect(self.s, host, port)\n # Get response from initial connection.\n msg_rec = repr(self.s.recv(BUFF_SIZE))\n except socket.error as e:\n error_quit(\"Unable to connect due to: %s\" % e, 500)\n self.logger.log(\"Received: %s\" % msg_rec)\n print_debug(msg_rec)\n # Did not receive an acknowledgement to the connection, so terminate.\n if not msg_rec:\n self.close_socket(self.s)", "title": "" }, { "docid": "4372921ea6954144f0c94ab329ad22ba", "score": "0.4574694", "text": "def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server", "title": "" }, { "docid": "ed2d7a83366b2019afd4aa99891c33d4", "score": "0.45656845", "text": "def __init__(self, usocket, starting_point, allow_design):\n self.queue = sundaytasks.utils.get_plugins()\n self.extensions = sundaytasks.utils.get_extensions()\n self.starting_point = starting_point\n self.instance = IOLoop.instance()\n self._allow_design = allow_design\n unix_socket = netutil.bind_unix_socket(usocket)\n netutil.add_accept_handler(unix_socket, self.accept)", "title": "" }, { "docid": "ac1aa4f78fb35b79132a4b8fb283f6ca", "score": "0.45594156", "text": "def create_connection(self, *args_to_use, **kwargs_to_use):\n return pysftp.Connection(*args_to_use, **kwargs_to_use)", "title": "" }, { "docid": "1c62a56fa05951b1eba48035f5ae8cb3", "score": "0.45589703", "text": "def handle_connect(self):\n pass", "title": "" }, { "docid": "e9d59b05b85c84a2e0e9c16489e66887", "score": "0.4554813", "text": "def connect(self, path, server_info, user_info, count):\n pass", "title": "" }, { "docid": "429c964eac4d725a8c3cfa709f0127b5", "score": "0.4552641", "text": "def init_drone(self):\n #if self.log_level:\n self.drone.log.set_level(0)\n self.drone.connect()\n self.set_video_encoder_rate(3)\n self.drone.start_video()\n\n self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,\n self.flight_data_handler)\n self.drone.subscribe(self.drone.EVENT_LOG_DATA,\n self.log_data_handler)\n self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,\n self.handle_flight_received)", "title": "" }, { "docid": "6abb0956e39eed0a67ba9f1f44644609", "score": "0.4552557", "text": "def connect(self):\n self.sock.connect((self.host, self.port))\n self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n logger.debug(\"TCPSPP: Connected to %s:%d\", self.host, self.port)", "title": "" }, { "docid": "c85e023d9203c4ae261ab1ec32296474", "score": "0.45501572", "text": "def do_ftp(ftp):\n login(ftp)\n main_menu(ftp)", "title": "" }, { "docid": "28a4f6fd81dfb72e05ad8c563f0e8ad3", "score": "0.45492244", "text": "def start_sending(self, _):\n self.sending = True\n print(f'[INFO] Sending DEAUTH packets to {self.target_addr} on AP {self.access_point}')", "title": "" }, { "docid": "b2a8b0b26ba89cbfcfdde976bbced20d", "score": "0.45453054", "text": "def _handle_DeferredConnectionIn (self, event, flow, packet):\n pass", "title": "" }, { "docid": "097e8ad96f9d6f652ef96b6050fb65d5", "score": "0.4543501", "text": "def __init__(self, *args, **kvargs):\n self.proxy_host = kvargs.get('proxy_host')\n self.proxy_user = kvargs.get('proxy_user')\n self.proxy_password = kvargs.get('proxy_password')\n self.proxy_port = kvargs.get('proxy_port')\n self.proxy_ssh_key_file = kvargs.get('proxy_ssh_key')\n self.proxy_connection = False\n self.host = kvargs.get('host')\n self.user = kvargs.get('user')\n self.password = kvargs.get('password')\n self.port = kvargs.get('port')\n self.dest_connection = False\n\n try:\n # Add host key policy\n if self.proxy_port is None:\n self.proxy_port = 22\n self.transport = paramiko.Transport((self.proxy_host, self.proxy_port))\n self.transport.start_client()\n if self.proxy_ssh_key_file:\n self.proxy_ssh_key = paramiko.RSAKey.from_private_key_file(self.proxy_ssh_key_file)\n conn_result = self.transport.auth_publickey(username=self.proxy_user, key=self.proxy_ssh_key)\n else:\n conn_result = self.transport.auth_password(username=self.proxy_user, password=self.proxy_password)\n if len(conn_result) == 0:\n self.proxy_connection = True\n else:\n logging.error('Unable to connect to proxy host. Authentication failed.')\n raise TobyException('Unable to connect to proxy host. Authentication failed.')\n except Exception as exp:\n logging.error('Unable to connect to proxy host: %s' % exp)\n raise TobyException('Unable to connect to proxy host: %s' % exp)\n\n try:\n if self.port is None:\n self.port = 22\n self.tunnel = paramiko.Transport(self.transport.open_channel(\n kind='direct-tcpip',\n dest_addr=(self.host, self.port),\n src_addr=('127.0.0.1', 0)))\n self.tunnel.start_client()\n conn_result = self.tunnel.auth_password(username=self.user, password=self.password)\n if len(conn_result) == 0:\n self.dest_connection = True\n else:\n logging.error('Unable to connect to destination host. Authentication failed.')\n raise TobyException('Unable to connect to destination host. Authentication failed.')\n except Exception as exp:\n logging.error('Unable to connect to destination host: %s' % exp)\n raise TobyException('Unable to connect to destination host: %s' % exp)\n\n try:\n self.handle = self.tunnel.open_session(20)\n self.handle.get_pty(width=160, height=0)\n self.handle.invoke_shell()\n self.handle.set_combine_stderr(True)\n self.handle.settimeout(60)\n tnh = self.handle\n got = []\n while True:\n _rd, _wr, _err = select([tnh], [], [], 10)\n if _rd:\n data = tnh.recv(1024)\n data = data.decode(\"utf-8\")\n got.append(data)\n if re.search('> ', data):\n tnh.send(b' start shell\\n')\n data = tnh.recv(1024)\n data = data.decode(\"utf-8\")\n if re.search(r'(\\$|>|#|%)[\\s\\t]?', data):\n break\n except Exception as exp:\n logging.error(\n 'Unable to fetch the prompt on destination host: %s' % exp)\n raise TobyException(\n 'Unable to fetch the prompt on destination host: %s' % exp)", "title": "" }, { "docid": "bf768648dd9ef344d358227aa7451767", "score": "0.45384288", "text": "def open(self):\n broker = os.path.join(getsitepackages()[0], 'pynq_networking', 'rsmb',\n 'rsmb', 'src', 'broker_mqtts')\n\n self.close()\n os.system(f\"nohup {broker} > {self.log} &\")\n\n for t in MQTT_PACKET_TYPES:\n bind_layers(MQTT, t, {'type': t.type})\n\n bind_layers(TCP, MQTT_Stream, {'dport': self.mqtt_port})\n bind_layers(TCP, MQTT_Stream, {'sport': self.mqtt_port})\n\n for t in MQTTSN_PACKET_TYPES:\n bind_layers(MQTTSN, t, {'type': t.type})\n\n bind_layers(UDP, MQTTSN, {'dport': self.mqttsn_port})\n bind_layers(UDP, MQTTSN, {'sport': self.mqttsn_port})", "title": "" }, { "docid": "9ba02b9598d71d595a4ff42a71983a12", "score": "0.45369986", "text": "def connectionMade(self):\r\n self.transport.uid = str(uuid.uuid1())\r\n\r\n self.guid = self.dispatcher.add(self.transport)\r\n self.dispatcher.send(self.guid, {'setup_connection': self.guid})", "title": "" }, { "docid": "9d6019c0f506421ae5556bee177e244d", "score": "0.45369968", "text": "def connectionMade(self):\n self.transport.write('{0}{1}'.format(self._uidServer.getUID(),\n self.delimiter))\n self.transport.loseConnection()", "title": "" }, { "docid": "bd9808c26a53fa92e1f23e20dae0df58", "score": "0.45317814", "text": "def peer_server_upload(self, conn, data_received):\n try:\n file_size = os.path.getsize(SHARED_DIR+'/'+data_received['file_name'])\n f = open(SHARED_DIR+'/'+data_received['file_name'], 'rb')\n #print \"Hosting File: %s for download\" % data_received\n for chunk_start, chunk_size in self.get_chunks(file_size):\n file_chunk = f.read(chunk_size)\n conn.sendall(file_chunk)\n '''\n while True:\n data = f.readline()\n if data:\n conn.sendall(data)\n else:\n break\n '''\n f.close()\n conn.sendall('')\n conn.close()\n except Exception as e:\n print \"File Upload Error, %s\" % e", "title": "" }, { "docid": "711228965a3ebb07a06e960ac91fd447", "score": "0.4531753", "text": "def setup_sftp_conn(transport, attempts=1):\n sftp = None\n # Note: the way paramiko manages the connectionn, this loop\n # doesn't seem to work with multiple attempts.\n # There may be a transport attribute that needs to be reset or\n # a new transport object may need to be generated after each failed\n # attempt.\n while (attempts > 0 and sftp is None):\n user, pwd = basic.get_user_pwd(user_prompt=\"Server username: \",\n pwd_prompt=\"Server password: \")\n try:\n transport.connect(username=user, password=pwd)\n sftp = paramiko.SFTPClient.from_transport(transport)\n except:\n print(\"Unable to connect to server. \"\n \"Incorrect username and password\")\n attempts -= 1\n return sftp", "title": "" }, { "docid": "ec1c45aa56fd2e385bb497a31412c7a3", "score": "0.45287308", "text": "def __init__(self, host, username, port, key_file=None, debug=False):\n\n self.log = logger.getLogger(name=\"directord\", debug_logging=debug)\n self.key_file = key_file\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n self.session = Session()\n self.session.handshake(self.sock)\n self.log.debug(\n \"Handshake with [ %s ] on port [ %s ] complete.\", host, port\n )\n\n self.known_hosts = self.session.knownhost_init()\n\n if key_file:\n self.session.userauth_publickey_fromfile(username, key_file)\n self.log.debug(\"Key file [ %s ] added\", key_file)\n else:\n try:\n self.session.agent_auth(username)\n self.log.debug(\"User agent based authentication enabled\")\n except ssh2.exceptions.AgentConnectionError as e:\n self.log.warning(\n \"SSH Agent connection has failed: %s.\"\n \" Attempting to connect with the user's implicit ssh key.\",\n str(e),\n )\n home = os.path.abspath(os.path.expanduser(\"~\"))\n default_keyfile = os.path.join(home, \".ssh/id_rsa\")\n if os.path.exists(default_keyfile):\n self.session.userauth_publickey_fromfile(\n username, default_keyfile\n )\n self.log.debug(\"Key file [ %s ] added\", key_file)\n\n self.channel = None", "title": "" }, { "docid": "b9008f0d73fd03450419c974f569e98d", "score": "0.45281923", "text": "def tcp_server_thread(id, tcpPort):\n global server\n \n # TCP Connection\n conn,adrr = server.accept()\n received_time_stamp = struct.unpack('!f', conn.recv(4096))[0]\n \n exchange = daemon_thread_builder(CalcDelay, args=(received_time_stamp, id,tcpPort ))\n exchange.start()\n exchange.join()\n\n pass", "title": "" }, { "docid": "faa2e75148739bb8f4703b49cc29990f", "score": "0.45258382", "text": "def connection_made(self, transport):\n print('Connection Made')\n self.transport = transport", "title": "" }, { "docid": "b200648478a52590b90032aeba976d09", "score": "0.45195064", "text": "def download_all_ftp(download_dir, file_match, ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory, max_wait=60):\r\n if max_wait < 0:\r\n max_wait = 0\r\n \r\n remove_old_ftp_downloads(download_dir)\r\n #open the file for writing in binary mode\r\n all_files_downloaded = []\r\n print 'Opening local file'\r\n time_start_connect_attempt = datetime.datetime.utcnow()\r\n request_incomplete = True\r\n ftp_exception = \"FTP Request Incomplete\"\r\n attempt_count = 1\r\n while ((datetime.datetime.utcnow()-time_start_connect_attempt)<datetime.timedelta(minutes=max_wait) \\\r\n or attempt_count == 1) and request_incomplete:\r\n try:\r\n #init FTPClient (moved here because of traffic issues)\r\n ftp_client = PyFTPclient(host=ftp_host,\r\n login=ftp_login,\r\n passwd=ftp_passwd,\r\n directory=ftp_directory)\r\n ftp_client.connect()\r\n file_list = ftp_client.ftp.nlst(file_match)\r\n ftp_client.ftp.quit()\r\n #if there is a file list and the request completed, it is a success\r\n if file_list:\r\n for dst_filename in file_list:\r\n local_path = os.path.join(download_dir, dst_filename)\r\n local_dir = local_path[:-1*len(FileExtension(local_path))-1]\r\n #download and unzip file\r\n try:\r\n #download from ftp site\r\n unzip_file = False\r\n if not os.path.exists(local_path) and not os.path.exists(local_dir):\r\n print \"Downloading from ftp site: \" + dst_filename\r\n unzip_file = ftp_client.download_file(dst_filename, local_path)\r\n else:\r\n print dst_filename + ' already exists. Skipping download ...'\r\n #extract from tar.gz\r\n if unzip_file:\r\n\t\t\t print \"Extracting: \" + dst_filename\r\n ExtractNested(local_path, True)\r\n #add successfully downloaded file to list\r\n all_files_downloaded.append(local_dir)\r\n #request successful when one file downloaded and extracted \r\n request_incomplete = False\r\n else:\r\n print dst_filename + ' already extracted. Skipping extraction ...'\r\n except Exception as ex:\r\n print ex\r\n if os.path.exists(local_path):\r\n os.remove(local_path)\r\n continue\r\n \r\n except Exception as ex:\r\n ftp_exception = ex\r\n pass\r\n \r\n if request_incomplete:\r\n print \"Attempt\", attempt_count, \"failed ...\"\r\n attempt_count += 1\r\n if max_wait > 0:\r\n sleep_time = 5.1\r\n if max_wait < 5.1:\r\n sleep_time = max(max_wait, 0.1)\r\n print \"Sleeping for\", (sleep_time-0.1), \"minutes and trying again ...\"\r\n time.sleep((sleep_time-0.1)*60)\r\n \r\n \r\n \r\n if request_incomplete:\r\n print \"Maximum wait time of\", max_wait, \"minutes exeeded and request still failed. Quitting ...\"\r\n raise Exception(ftp_exception)\r\n \r\n print \"All downloads completed!\"\r\n return all_files_downloaded", "title": "" }, { "docid": "ed0602dfd961b7875d5cf2de9fbc214e", "score": "0.4518171", "text": "def _process_input(self, fd):\n if fd.fileno() == self._proxyfd.fileno():\n pkt = self._grab_packet(\n lambda data, s=self: s.create_packet(packet=data), fd)\n self._handle_proxy_packet(pkt)\n else:\n Server._process_input(self, fd)", "title": "" }, { "docid": "1693986cdc491296134b2022d4e2cf18", "score": "0.45122164", "text": "def server_init(log_set, conf_set, header_set, commands_w_set):\n global log_th, conf_th, header_th, command_w_th\n log_th = log_set\n conf_th = conf_set\n header_th = header_set\n command_w_th = commands_w_set\n sock_ip = conf_set.get_item(q_key='general').get('sock_ip')\n port = int(conf_set.get_item(q_key='general').get('port'))\n return ThreadedTCPServer((sock_ip, port), ThreadedTCPRequestHandler)", "title": "" }, { "docid": "22983fba553a18798b43ad85e775a614", "score": "0.45095387", "text": "def ftp_connect(ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory):\r\n ftp = ftplib.FTP(ftp_host)\r\n ftp.login(ftp_login,ftp_passwd)\r\n ftp.cwd(ftp_directory)\r\n ftp.set_debuglevel(1)\r\n return ftp", "title": "" }, { "docid": "9cdf51556a9eb078569dab7b9745f159", "score": "0.45060232", "text": "def __init__(self, tftp_handler, filename):\n server_root = os.path.abspath(tftp_handler.server.root)\n abs_path = os.path.abspath(os.path.join(server_root, filename))\n if os.path.commonprefix([abs_path, server_root]) != server_root:\n raise ValueError('Directory traversal prevented')\n super(Session, self).__init__(tftp_handler, abs_path)", "title": "" }, { "docid": "304d782a096326a68fa60e8b49409d28", "score": "0.45051432", "text": "def push_dtp_data(self, data, isproducer=False, log=''):\n if self.data_channel:\n self.respond(\"125 Data connection already open. Transfer starting.\")\n if log:\n self.log(log)\n if not isproducer:\n self.data_channel.push(data)\n else:\n self.data_channel.push_with_producer(data)\n if self.data_channel:\n self.data_channel.close_when_done()\n else:\n self.respond(\"150 File status okay. About to open data connection.\")\n self.out_dtp_queue = (data, isproducer, log)", "title": "" }, { "docid": "25f50f354a653185dca030fdc8a176a5", "score": "0.4502698", "text": "def connection_to_server_made(self, transport):\n\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n\n self._logger.info('[%s] local %s:%s connected to %s:%s',\n self._sock_id, local_data[0], local_data[1],\n peer_data[0], peer_data[1])\n\n # No delay OFF -> Nagle's alg used\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 0)\n\n # If required - turn off Nagle's alg (No Delay ON)\n if self._no_delay:\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 1)\n\n # Set Socket TX/RX buffer sizes if specified\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window)\n\n # Print current buf sizes:\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf)\n\n self._stream.connection_established(self)", "title": "" }, { "docid": "4ca79886c2c38c7aa1d48d3575906c69", "score": "0.44975287", "text": "def establish_scp_conn(self):\n ssh_connect_params = self.ssh_ctl_chan.get_configured_param()\n self.scp_conn = paramiko.SSHClient()\n self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.scp_conn.connect(**ssh_connect_params)\n self.scp_client = scp.SCPClient(self.scp_conn.get_transport())", "title": "" }, { "docid": "3667bacaf6e226fc241b2402f7dd720d", "score": "0.4497326", "text": "def connection_made(self, transport):\n super().connection_made(transport)\n\n try:\n self.session = self.server.create_session(transport)\n except AllocationError:\n # An ID could not be allocated for a new session; refuse\n # connection.\n self.logger.warning('Failed to allocate an ID for a new session!')\n self.logger.warning('Refusing connection.')\n transport.close()\n else:\n self.session.on_connected()", "title": "" }, { "docid": "6af8fc7dec3d177b5c7c317af7a2f891", "score": "0.44963434", "text": "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "title": "" }, { "docid": "6f9183e205e43452a0e3efe283f11492", "score": "0.4494645", "text": "def __init__(self, tp):\n self.tp = tp", "title": "" }, { "docid": "4c419d39c4e65d94148f02ebd89f34b4", "score": "0.44903612", "text": "def do_upload(ftp):\n # Active (PORT), Passive (PASV), ExtActive (EPRT), or ExtPassive (EPSV)?\n output, sock, transfer_type = get_transfer_output_and_socket(ftp)\n print_debug(output + \"\\n\")\n\n # What file to upload?\n local_file = raw_input(\"What local file do you want to upload?\\n> \")\n is_file = os.path.isfile(local_file)\n while not local_file or not is_file:\n if not is_file:\n print(\"File not found.\")\n local_file = raw_input(\"What local file do you want to upload?\\n> \")\n is_file = os.path.isfile(local_file)\n # What to save file as?\n remote_path = raw_input(\"What do you want to name the remote file?\\n> \")\n while not remote_path:\n remote_path = raw_input(\"What do you want to name the remote file?\\n> \")\n try:\n msg_rec, data_rec = ftp.stor_cmd(sock, local_file, remote_path, transfer_type)\n print_debug(str(data_rec))\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)\n main_menu(ftp)", "title": "" }, { "docid": "1d2d7e0a171f0d8d80b7cd3787a7c3a1", "score": "0.448991", "text": "def __init__(self, handle, server_hostname=None, mode='rw'):\n if not isinstance(handle, pyuv.Stream):\n raise TypeError(\"handle: expecting a 'pyuv.Stream' instance, got {!r}\"\n .format(type(handle).__name__))\n super(Transport, self).__init__(handle, mode)\n self._server_hostname = server_hostname", "title": "" }, { "docid": "461735f9873e60700169bf287b71f824", "score": "0.44836387", "text": "def setup_class(cls):\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)\n\n temp_dir = os.path.join(cls.t, \"temp_dir_node\")\n os.mkdir(temp_dir)\n cls.connection_node = _make_libp2p_connection(data_dir=temp_dir, delegate=True)\n temp_dir_client = os.path.join(cls.t, \"temp_dir_client\")\n os.mkdir(temp_dir_client)\n cls.connection = _make_libp2p_client_connection(\n data_dir=temp_dir_client, peer_public_key=cls.connection_node.node.pub\n )", "title": "" }, { "docid": "059ee3c59d8f49eebf67883e88d6b298", "score": "0.4483288", "text": "def connection_made(self, transport):\n self._transport = transport\n self._when_connected = datetime.datetime.now()\n self._last_received = datetime.datetime.now()\n\n reader_factory = self._reader_factory\n writer_factory = self._writer_factory\n reader_kwds = {}\n writer_kwds = {}\n\n if self.default_encoding:\n reader_kwds[\"fn_encoding\"] = self.encoding\n writer_kwds[\"fn_encoding\"] = self.encoding\n reader_kwds[\"encoding_errors\"] = self._encoding_errors\n writer_kwds[\"encoding_errors\"] = self._encoding_errors\n reader_factory = self._reader_factory_encoding\n writer_factory = self._writer_factory_encoding\n\n if self._limit:\n reader_kwds[\"limit\"] = self._limit\n\n self.reader = reader_factory(**reader_kwds)\n\n self.writer = writer_factory(\n transport=transport,\n protocol=self,\n reader=self.reader,\n server=True,\n **writer_kwds\n )\n\n logger.info(\"Connection from %s\", self)\n\n self._waiter_connected.add_done_callback(self.begin_shell)\n asyncio.get_event_loop().call_soon(self.begin_negotiation)", "title": "" }, { "docid": "473fb6434366b1e87ce4247844439ec7", "score": "0.44817644", "text": "def connect(self, host=''):\n if host:\n self.host = host\n self.ftp = FTP(self.host)\n self.ftp.login('update', 'update', 'update')", "title": "" }, { "docid": "0db99e29f9a745ba03c1fa6e772c47e2", "score": "0.44800255", "text": "def done (self):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = get_header(CONNECTION, self.header).lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if 'Content-Length' not in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif 'Content-Length' not in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because we'd have to\r\n # use \\r\\n as a terminator, and it would just yuck up a lot of stuff)\r\n # it's very common for developers to not want to type a version number\r\n # when using telnet to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.get_reply_header_text())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = producers.chunked_producer (\r\n producers.composite_producer (self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = producers.composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = producers.composite_producer (self.outgoing)\r\n\r\n # apply a few final transformations to the output\r\n self.channel.push_with_producer (\r\n # globbing gives us large packets\r\n producers.globbing_producer (\r\n # hooking lets us log the number of bytes sent\r\n producers.hooked_producer (\r\n outgoing_producer,\r\n self.log\r\n )\r\n )\r\n )\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()", "title": "" }, { "docid": "11ed63a3facedb5a74c64af04a2c41e9", "score": "0.44790822", "text": "def post_process(self, packet: 'dict[str, Any]') -> 'SMFIdentificationBasedDPDOption':\n ret = super().post_process(packet) # type: SMFIdentificationBasedDPDOption\n ret.mode = Enum_SMFDPDMode.H_DPD\n return ret", "title": "" }, { "docid": "10ae987225b3ffe6ec854e662fdf5809", "score": "0.44789535", "text": "def Main(self):\n # Boolean flag to indicate whether or not the target is currently available.\n target_available = False\n last_unavailable_time = float('-inf')\n self._batch_size = self.args.batch_size\n\n while not self.IsStopping():\n # Should test connect first, and get input HTTP plugin's maximum request.\n if not target_available and not self._CheckConnect():\n if (time_utils.MonotonicTime() >\n (last_unavailable_time + _FAILED_CONNECTION_INTERVAL)):\n last_unavailable_time = time_utils.MonotonicTime()\n self.info('Connection to target unavailable')\n self.Sleep(_FAILED_CONNECTION_INTERVAL)\n continue\n target_available = True\n\n # We need to know the size of request to avoid too big request, so we\n # cache events in memory before making the connection.\n events = []\n event_stream = self.NewStream()\n if not event_stream:\n self.Sleep(1)\n continue\n\n for event in event_stream.iter(timeout=self.args.timeout,\n count=self._batch_size):\n events.append(event)\n\n # If no events are available, don't bother sending an empty transmission.\n if not events:\n self.debug('No events available for transmission')\n event_stream.Commit()\n self._batch_size = self.args.batch_size\n continue\n\n try:\n # Create the temporary directory for attachments.\n with file_utils.TempDirectory(prefix='output_http_') as tmp_dir:\n self.debug('Temporary directory for attachments: %s', tmp_dir)\n\n start_time = time.time()\n request_body = self._PrepareRequestData(events, tmp_dir)\n status_code, reason, clen = self._PostRequest(request_body)\n\n if status_code == 413: # Request Entity Too Large\n event_stream.Abort()\n if len(events) == 1:\n self.error('One event is bigger than input HTTP plugin\\'s '\n 'maximum request limit (event size = %dbytes, input '\n 'plugin maximum size = %dbytes)',\n clen, self._max_bytes)\n return\n\n self.info('Request entity too large, and trying to send a half of '\n 'the request')\n # This won't be 0 since it will stop on above when\n # self._batch_size=1.\n self._batch_size //= 2\n continue\n\n if status_code != 200: # Bad Request\n self.error(reason)\n raise Exception\n\n event_stream.Commit()\n self._batch_size = self.args.batch_size\n elapsed_time = time.time() - start_time\n\n # Size and speed information.\n total_kbytes = clen / 1024\n self.info(\n 'Transmitted %d events, total %.2f kB in %.1f sec (%.2f kB/sec)',\n len(events), total_kbytes, elapsed_time,\n total_kbytes / elapsed_time)\n except requests.ConnectionError as e:\n self.warning('Connection failed: Is input HTTP plugin running?')\n self.debug('Connection error: %s', e)\n event_stream.Abort()\n target_available = False\n self.Sleep(1)\n except Exception as e:\n self.exception('Connection or transfer failed: %s', e)\n event_stream.Abort()\n target_available = False\n self.Sleep(1)", "title": "" }, { "docid": "4a6d593e466723254145f5e86f3c53af", "score": "0.44748312", "text": "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "title": "" }, { "docid": "3cd38962d3bab59a3a918f3f94f4060f", "score": "0.4468871", "text": "def add_fe_tt_advised(self, flow_dict):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n #*** Check it's TCP:\n if flow_dict['proto'] != 'tcp':\n self.logger.error(\"Unsupported proto=%s\", flow_dict['proto'])\n return 0\n\n #*** Convert IP addresses strings to integers:\n ipv4_src = _ipv4_t2i(str(flow_dict['ip_A']))\n ipv4_dst = _ipv4_t2i(str(flow_dict['ip_B']))\n\n #*** Build match:\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_src,\n ipv4_dst=ipv4_dst,\n ip_proto=6,\n tcp_src=flow_dict['tp_A'],\n tcp_dst=flow_dict['tp_B']\n )\n\n #*** Set QoS actions (if any):\n queue = 0\n self.logger.debug(\"flow_dict=%s\", flow_dict)\n\n if flow_dict['actions'] and 'qos_treatment' in flow_dict:\n qos = flow_dict['qos_treatment']\n self.logger.debug(\"qos_treatment=%s\", qos)\n queue = self._nmeta.main_policy.qos_treatment.\\\n get_policy_qos_treatment_value(qos)\n self.logger.debug(\"queue=%s\", queue)\n if queue:\n actions = [parser.OFPActionSetQueue(queue)]\n else:\n actions = []\n\n inst = [parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions),\n parser.OFPInstructionGotoTable(self.ft_tt + 1)]\n priority = 1\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tt,\n priority=priority,\n idle_timeout=self.fe_idle_timeout_qos,\n match=match, instructions=inst)\n self.logger.debug(\"Installing dynamic treatment forward FE dpid=%s\",\n self.dpid)\n self.datapath.send_msg(mod)\n #*** Build counter match (reversed flow):\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_dst,\n ipv4_dst=ipv4_src,\n ip_proto=6,\n tcp_src=flow_dict['tp_B'],\n tcp_dst=flow_dict['tp_A']\n )\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tt,\n priority=priority,\n idle_timeout=self.fe_idle_timeout_qos,\n match=match, instructions=inst)\n self.logger.debug(\"Installing dynamic treatment reverse FE dpid=%s\",\n self.dpid)\n self.datapath.send_msg(mod)", "title": "" } ]
ea950c2fd6d934bafb035b9d5c85283d
Given an input_dir, returns the list of userids. This is to be used as the reference, in terms of ordering, for all other datasets.
[ { "docid": "d0d531e57eecbbd657cdc2c1c73acc18", "score": "0.79590195", "text": "def get_user_ids(input_dir: str) -> List[str]:\n image_files_name_pattern = os.path.join(input_dir, \"Image\", \"*.jpg\")\n image_filepaths = glob.glob(image_files_name_pattern)\n file_names = (os.path.basename(file_path) for file_path in image_filepaths)\n userids = [file_name.split(\".\")[0] for file_name in file_names]\n userids = sorted(userids)\n return userids", "title": "" } ]
[ { "docid": "405b4402f2c8b31623706774d5d56e09", "score": "0.6556707", "text": "def get_ids(dir):\n return (f[:-4] for f in os.listdir(dir))", "title": "" }, { "docid": "6a235251084e44768351a9ca138ef4df", "score": "0.6357975", "text": "def all_train_ids(data_dir) -> np.ndarray:\n return np.array(\n sorted([os.path.basename(fname).lower() for fname in find_in_dir(os.path.join(data_dir, 'train_v2'))]))", "title": "" }, { "docid": "6a3f3a3341603f7607c650918130a38e", "score": "0.60535616", "text": "def all_test_ids(data_dir) -> np.ndarray:\n return np.array(\n sorted([os.path.basename(fname).lower() for fname in find_in_dir(os.path.join(data_dir, 'test_v2'))]))", "title": "" }, { "docid": "75b445792ddbcb704d2764afe594ab71", "score": "0.60397166", "text": "def get_uids_table(dirname):\n ImPaths = [\n os.path.join(dirname, f) for f in os.listdir(dirname)\n if f.endswith('dcm')\n ]\n UIDPaths = {}\n for image_path in ImPaths:\n ds = pydicom.dcmread(image_path)\n uid = ds[('0008', '0018')].value\n pos = ds[('0020', '0032')].value\n UIDPaths[uid] = [pos[-1], image_path]\n\n df = pd.DataFrame.from_dict(\n UIDPaths, orient='index', columns=['z-position', 'path']\n )\n df = df.sort_values(by=['z-position'])\n\n df.index.name = 'UID'\n return df", "title": "" }, { "docid": "519121a2b84218a8e37ba567df6a4c42", "score": "0.60367024", "text": "def _users():\n users = []\n root = os.path.expanduser(\"~/pckr/\")\n for sd in os.listdir(root):\n path = os.path.join(root, sd)\n if os.path.isdir(path):\n users.append(sd)\n return sorted(users)", "title": "" }, { "docid": "45ac3d13499473205c33e38aecfce337", "score": "0.598554", "text": "def guids(list_result_dirs):\n ids = [d.name for d in list_result_dirs()]\n assert len(ids)>0 # test folder prepared?\n return ids", "title": "" }, { "docid": "4fe88c386c1f80d4252cd5afbfcb8679", "score": "0.59819835", "text": "def get_doc_ids(self, corpus_dir):\n if corpus_dir not in self.CORPUS_DIR:\n raise ValueError(\"wrong value. choose 'dev', 'train', or 'mix'\")\n \n doc_ids = []\n if corpus_dir == \"dev\" or corpus_dir == \"train\":\n fpath = self.src + '/' + corpus_dir + self.DOCID_SUFIX_EXT\n with open(fpath, 'r') as f:\n doc_ids = json.loads(f.read()) \n elif corpus_dir == \"mix\":\n doc_ids = self.get_doc_ids(\"dev\") + self.get_doc_ids(\"train\")\n \n return doc_ids", "title": "" }, { "docid": "3de1bb54a8bf1d8da3b630e49eff66bf", "score": "0.59805244", "text": "def user_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"user_ids\")", "title": "" }, { "docid": "23af6221da09cbf2f2c5f2c3edb20f49", "score": "0.59476805", "text": "def get_ids_from_directory():\n print(os.getcwd())\n img_list = []\n files = glob.glob(os.getcwd() + \"/pics/\"+\"*.jpg\")\n for fle in files:\n clean = fle.split('/')[-1]\n img_list.append(clean)\n append_to_csv('pics/img_list.csv', img_list, img_list)\n return files", "title": "" }, { "docid": "a04add34c17851d5b247f977ad8a4a88", "score": "0.5906206", "text": "def treeids(dir_):\n treefiles=[]\n for root, dirs, files in os.walk(dir_):\n files_withid=filter(theid, files)\n treefiles.extend(files_withid)\n return set(map(theid, treefiles))", "title": "" }, { "docid": "f419055486ed09701d58f214611c9a45", "score": "0.5835327", "text": "def user_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_ids\")", "title": "" }, { "docid": "f419055486ed09701d58f214611c9a45", "score": "0.5835327", "text": "def user_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_ids\")", "title": "" }, { "docid": "4d490b341e04da22eb1ba08a270ef059", "score": "0.5787366", "text": "def get_data_ids(self) -> List:\n return [x[:-1] for x in self.data_path_splits] # type: ignore", "title": "" }, { "docid": "3a15b37bc30fa081f7fbc6b0ab1f84cd", "score": "0.57668144", "text": "def get_ids(data_path, subset_ids):\n ids = []\n for subset_id in subset_ids:\n ids_dp = os.listdir(jp(data_path, f'subset{subset_id}'))\n [(ids.append(id_dp.strip('.mhd')) if id_dp.endswith('mhd') else None) for id_dp in ids_dp]\n\n return ids", "title": "" }, { "docid": "91321ad7ae38b05da60839b1e92af87e", "score": "0.57225925", "text": "def get_identities(users_ssh_dir, only_defaults=False):\n identities = []\n if os.path.exists(users_ssh_dir):\n\tssh_files = os.listdir(users_ssh_dir)\n\tdefaults_present = False\n\tdefaults = []\n\tif only_defaults and '.default_ids' in ssh_files:\n\t defaults_present = True\n\t with open(os.path.join(users_ssh_dir, '.default_ids')) as f:\n\t\tdefaults = f.read().splitlines()\n\t # Fix empty entries\n\t defaults = [a for a in defaults if os.path.exists(\n\t\tos.path.join(users_ssh_dir, a))]\n\t # Reduce absolute paths to short names (for easy matching)\n\t defaults = [os.path.split(a)[1] for a in defaults]\n\tfor f in ssh_files:\n\t if f.endswith('.pub'):\n\t\t# If there's a public key there's probably a private one...\n\t\tidentity = f[:-4] # Will be the same name minus '.pub'\n\t\tif identity in ssh_files:\n\t\t identities.append(os.path.join(users_ssh_dir, identity))\n if defaults_present:\n\t# Only include identities marked as default\n\tidentities = [a for a in identities if os.path.split(a)[1] in defaults]\n elif only_defaults:\n\treturn []\n return identities", "title": "" }, { "docid": "5ef87ac7d2ef0dcd6a3fbc17ecf36db4", "score": "0.565371", "text": "def get_patient_ids(image_dir=TRAIN_SRC):\n prefix_len = len(os.path.join(image_dir, ''))\n imgs = glob.glob(os.path.join(image_dir, \n \"*[0-9]_*[0-9].{}\".format(FILE_EXT)))\n return sorted(set([int(s[prefix_len:].split('_')[0]) for s in imgs]))", "title": "" }, { "docid": "35f9bea20b2928568b0ca50f6aa78c5f", "score": "0.56514454", "text": "def get_users(file_names):\n\n users = []\n for item in file_names:\n user_number = item.split('_')[0][:-1]\n if user_number not in users:\n users.append(user_number)\n return users", "title": "" }, { "docid": "a034828eefacdec788f276f467cd5eca", "score": "0.5634928", "text": "def files_in_input_dir(self) -> List[str]:\n inside_input_dir_list = []\n for (dir_path, _, file_names) in os.walk(self.input_dir):\n inside_input_dir_list += [os.path.join(dir_path, f)\n for f in file_names]\n\n return inside_input_dir_list", "title": "" }, { "docid": "efaf7d41f0c804048724ae24296178f1", "score": "0.56258136", "text": "def _get_globus_identities(self, shared_directory: str):\n globus_id_filename = \"{}.globus_id\".format(shared_directory)\n with open(globus_id_filename, \"r\") as fp:\n ident = fp.read()\n return self.globus_auth_client.get_identities(\n usernames=ident.split(\"\\n\")[0]\n )", "title": "" }, { "docid": "556b2c11f09416862c84e5e06e9354e7", "score": "0.55666494", "text": "def get_requests_for_dir(dir):\n reqid = []\n criteria = {'cert-storage': 'NSSDB', 'key-storage': 'NSSDB',\n 'cert-database': dir, 'key-database': dir, }\n requests = _get_requests(criteria)\n for request in requests:\n reqid.append(request.prop_if.Get(DBUS_CM_REQUEST_IF, 'nickname'))\n\n return reqid", "title": "" }, { "docid": "0efe0d361c84768ad957622377c9033f", "score": "0.55501807", "text": "def input_files(datadir):\n return [input_file(filepath) for filepath in listdir(datadir)]", "title": "" }, { "docid": "0f72b8667606d7397e165026ecf54250", "score": "0.552902", "text": "def get_file_names(input_dir=\"input_files\", output_dir=\"output_files\"):\n res = []\n seed_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)),input_dir)\n output_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)),output_dir)\n iofiles = namedtuple(\"iofiles\",['input_file','output_file'])\n for root, dirs, files in os.walk(seed_dir):\n for file_name in files:\n input_file_path = os.path.normpath(os.path.join(root, file_name))\n output_file_path = os.path.normpath(os.path.join(output_dir, \"%s_\"%datetime.now().strftime(\"%Y%m%d-%H%M%S\")+file_name))\n res.append(iofiles(input_file_path,output_file_path))\n return res", "title": "" }, { "docid": "7e02ec3f8988012e2bc39224bbe44c26", "score": "0.55165774", "text": "def get_ids(self):\n # Complete this function\n ids = []\n\n return ids", "title": "" }, { "docid": "afcc18d823869ef8d271333694eec663", "score": "0.55075747", "text": "def getPathIDs(self,path): \n elements = path.split('/')\n pelements = []\n dPath = ''\n for el in elements[1:]:\n dPath += '/'+el\n pelements.append(dPath)\n \n pathString = [ \"'\"+p+\"'\" for p in pelements ]\n req = \"SELECT DirID FROM DirectoryInfo WHERE DirName in (%s) ORDER BY DirID\" % ','.join(pathString) \n result = self.db._query(req)\n if not result['OK']:\n return result\n if not result['Value']:\n return S_ERROR('Directory %s not found' % path)\n return S_OK([ x[0] for x in result['Value'] ])", "title": "" }, { "docid": "7be16d05bf3f748c59c676161da9540a", "score": "0.55062866", "text": "def get_user_ids(self):\n idlist = []\n for item in self:\n if item[\"content_tag\"] == 13:\n idlist.append(item[\"user.value\"])\n\n return idlist", "title": "" }, { "docid": "7dd07d2daa215c64e1f0e2a7449330a3", "score": "0.5501514", "text": "def ids_from_folder(self, cls, fname):\n if cls.identifiers:\n if fname == \"__blank__\":\n return [(k, \"\") for k in cls.identifiers]\n \n # cls.identifiers is ordered, and should match\n # the order of atoms inside fname.\n return [(k, getattr(cls, k).coerce(None, v))\n for k, v in zip(cls.identifiers,\n fname.split(self.idsepchar))\n ]\n else:\n return []", "title": "" }, { "docid": "e208f30205d43496cb6845b8f35088f8", "score": "0.54990923", "text": "def list_user_ids_for_project(self, tenant_id):\n raise exception.NotImplemented() # pragma: no cover", "title": "" }, { "docid": "90c836dd698cffe3874fe9c17d17516c", "score": "0.54885995", "text": "def _get_users(self):\n users_dir = os.path.join(self.deployment_dir, 'users')\n users = [(n, os.path.join(users_dir, n))\n for n in os.listdir(users_dir)]\n return users", "title": "" }, { "docid": "73463b1ab3c816893bc6a430fd62bb3f", "score": "0.54885966", "text": "def getMemberUserIds(self):\n return [ u.id for u in self.getMemberUserSettings() ]", "title": "" }, { "docid": "b9ca665a0d85e1a579991bca3575d09c", "score": "0.54808384", "text": "def get_involved_user_ids(self):\n involved_users = [a.id for a in self.get_involved_users()]\n return involved_users", "title": "" }, { "docid": "720e042208a7dad29d0c57dd34410baf", "score": "0.5480446", "text": "def get_image_ids(image_dir=TRAIN_SRC):\n\n prefix_len = len(os.path.join(image_dir, ''))\n imgs = glob.glob(os.path.join(image_dir, \n \"*[0-9]_*[0-9].{}\".format(FILE_EXT)))\n to_int = lambda k: tuple([int(i) for i in k.split('_')])\n return sorted([s[prefix_len:s.rindex('.')] for s in imgs], key=to_int)", "title": "" }, { "docid": "a74baaed11a690b67c9779c1ecbc32f2", "score": "0.54636365", "text": "def get_entity_ids(self) -> List[str]:\n return [p.name for p in self.project_dirs]", "title": "" }, { "docid": "c6c74d56596833fa61de3c9b1d96a2cd", "score": "0.5453678", "text": "def get_user_ids(researcher_list):\r\n # Retrieved SymplecticIDs\r\n ids = []\r\n # For each researcher, ask symplectic for their Symplectic-id\r\n for researcher in researcher_list:\r\n print researcher\r\n SymplecticID = _get_users(researcher)\r\n if SymplecticID and SymplecticID != '':\r\n ids.append(SymplecticID)\r\n return ids", "title": "" }, { "docid": "ed7ec3ad73e7fda1e5b7c01c660b0a19", "score": "0.54245794", "text": "def get_filenames(input_dir):\n return [os.fsdecode(file) for file in os.listdir(input_dir)]", "title": "" }, { "docid": "d1ea77e18d6c80906b8b2cd084a5f308", "score": "0.53694165", "text": "def getIDList():\n return _getUniversal(tc.ID_LIST, \"\")", "title": "" }, { "docid": "d1ea77e18d6c80906b8b2cd084a5f308", "score": "0.53694165", "text": "def getIDList():\n return _getUniversal(tc.ID_LIST, \"\")", "title": "" }, { "docid": "8ff8c9ead52b19987efd5b2c0ae7e9a6", "score": "0.53358877", "text": "def get_searched_ids(root_directories):\n from glob import glob\n inode_to_path = {}\n inodes = \"{\"\n total_dirs = 0\n for root_directory in root_directories.split(','):\n try:\n searched_dirs = glob(root_directory, recursive=True)\n except TypeError:\n searched_dirs = glob(root_directory)\n if not searched_dirs:\n continue\n\n for mydir in searched_dirs:\n total_dirs = total_dirs + 1\n # If we pass more than 15 dirs, ebpf program fails\n if total_dirs > 15:\n print('15 directories limit reached')\n break\n inode_id = os.lstat(mydir)[stat.ST_INO]\n if inode_id in inode_to_path:\n if inode_to_path[inode_id] == mydir:\n print('Skipping {} as already considered'.format(mydir))\n else:\n inodes = \"{},{}\".format(inodes, inode_id)\n inode_to_path[inode_id] = mydir\n print('Considering {} with inode_id {}'.format(mydir, inode_id))\n\n inodes = inodes + '}'\n if len(inode_to_path) == 0:\n print('Cannot find any valid directory')\n exit()\n return inodes.replace('{,', '{'), inode_to_path", "title": "" }, { "docid": "8ff8c9ead52b19987efd5b2c0ae7e9a6", "score": "0.53358877", "text": "def get_searched_ids(root_directories):\n from glob import glob\n inode_to_path = {}\n inodes = \"{\"\n total_dirs = 0\n for root_directory in root_directories.split(','):\n try:\n searched_dirs = glob(root_directory, recursive=True)\n except TypeError:\n searched_dirs = glob(root_directory)\n if not searched_dirs:\n continue\n\n for mydir in searched_dirs:\n total_dirs = total_dirs + 1\n # If we pass more than 15 dirs, ebpf program fails\n if total_dirs > 15:\n print('15 directories limit reached')\n break\n inode_id = os.lstat(mydir)[stat.ST_INO]\n if inode_id in inode_to_path:\n if inode_to_path[inode_id] == mydir:\n print('Skipping {} as already considered'.format(mydir))\n else:\n inodes = \"{},{}\".format(inodes, inode_id)\n inode_to_path[inode_id] = mydir\n print('Considering {} with inode_id {}'.format(mydir, inode_id))\n\n inodes = inodes + '}'\n if len(inode_to_path) == 0:\n print('Cannot find any valid directory')\n exit()\n return inodes.replace('{,', '{'), inode_to_path", "title": "" }, { "docid": "008b8975d0cc781cb5616b83b914e611", "score": "0.53279936", "text": "def getTrainingUserList():\n listOfUsersMatFiles = [\"Alice.mat\", \"Charlotte.mat\", \"Irina.mat\", \"Konstantin.mat\", \"Lokesh.mat\", \"Mike.mat\", \"Russa.mat\"]\n return listOfUsersMatFiles", "title": "" }, { "docid": "8b4530c197b6f8bab30abeaeecfc3074", "score": "0.5320499", "text": "def getChr(input_dir, samples):\r\n chr_names = set([])\r\n\r\n for samp in samples:\r\n samp_dir = input_dir + \"/\" + samp\r\n for subdir in os.listdir(samp_dir):\r\n chr = subdir.replace(samp, \"\")\r\n chr = chr.lstrip(\"_\")\r\n chr_names.add(chr)\r\n\r\n return list(chr_names)", "title": "" }, { "docid": "6def510273f86655fd364832eea2d145", "score": "0.5317602", "text": "def user_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "title": "" }, { "docid": "6def510273f86655fd364832eea2d145", "score": "0.5317602", "text": "def user_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "title": "" }, { "docid": "9f0fde1d60f68130ca3990d22d8dba6e", "score": "0.5302483", "text": "def getUserIDs(id, allowSkypeUserSearch=False):\n\tids0 = id.split(\",\")\n\tids = []\n\tfor id in ids0:\n\t\ttry: id = getUserID(id, allowSkypeUserSearch)\n\t\texcept NoIDError: pass\n\t\telse: ids.append(id)\n\tids = filter(None, ids)\n\treturn \",\".join(ids)", "title": "" }, { "docid": "3800619bf74b4bd9f5cb6b6b62013217", "score": "0.52777195", "text": "def uids(self):\n return self._platform_impl.get_process_uids()", "title": "" }, { "docid": "b3d8a7820869fd05bc71e075902d691d", "score": "0.52686536", "text": "def _get_train_test_split_ids(cls, dataset_path):\n index_file_path = jpath(dataset_path, cls.index_file_path)\n reader = csv.DictReader(open(index_file_path), delimiter=\",\")\n name_id_mapping = {}\n for data in reader:\n pid = int(data[\"id\"])\n if data[\"title\"] != \"\" and pid not in cls.ignore_ids:\n name = f\"{data['artist']}: {data['title']}\"\n if name not in name_id_mapping:\n name_id_mapping[name] = []\n name_id_mapping[name].append(pid) # Repetition count: 1->613, 2->110, 3->19\n\n train_ids, test_ids = [], [] # Folder ids\n for pids in name_id_mapping.values():\n if len(pids) <= 2:\n pid = pids[0]\n else:\n pid = pids[2]\n\n if pid <= cls.train_test_split_id:\n train_ids.append(str(pid).zfill(4))\n else:\n test_ids.append(str(pid).zfill(4))\n\n return train_ids, test_ids", "title": "" }, { "docid": "c650e7dc91e31e621eae10ebd504bffd", "score": "0.52552104", "text": "def fetchNBAPlayersUserIDsFromFile():\n userIds = []\n with open(nbaPlayersFileName, 'r', newline='') as csvfile:\n csvreader = csv.reader(csvfile)\n fields = next(csvreader) #move the reader cursor to next line\n # extracting each data row one by one\n for row in csvreader:\n userIds.append(row[0])\n return userIds", "title": "" }, { "docid": "7489468c8a120e021235d5e2a63a0545", "score": "0.5235812", "text": "def get_ids():\r\n home_dir = os.path.expanduser('~')\r\n ids_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(ids_dir):\r\n os.makedirs(ids_dir)\r\n id_path = os.path.join(ids_dir, 'ClientID.json')\r\n\r\n store = Storage(id_path)\r\n ids = store.get()\r\n if not ids or ids.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_ID, URL)\r\n flow.user_agent = APP_NAME\r\n if flags:\r\n ids = tools.run_flow(flow, store, flags)\r\n else: \r\n ids = tools.run(flow, store)\r\n return ids", "title": "" }, { "docid": "ee86aadd9d93da27b25ec114341906a6", "score": "0.52355677", "text": "def external_ids(self) -> pulumi.Output[Sequence['outputs.UserExternalId']]:\n return pulumi.get(self, \"external_ids\")", "title": "" }, { "docid": "b26c8566910f243c6753b4ba510f1a85", "score": "0.5235348", "text": "def _crawlIds(self) -> List:\n\n ids = []\n with webdriver.Firefox() as driver:\n for page in range(1 + self.offset, 1 + self.offset + self.pages):\n url = self._generate_url(page)\n driver.get(url)\n \n try:\n # Wait until the items are loaded\n WebDriverWait(driver, 10, 0.5, ignored_exceptions=(TimeoutException)).until(\n EC.presence_of_all_elements_located((By.CLASS_NAME, \"l-item\")))\n \n soup = BeautifulSoup(driver.page_source, \"lxml\")\n items = soup.find_all(\"div\", {\"class\": \"l-item\"})\n \n for item in items:\n video_id = item.find(\"a\")[\"href\"][25:]\n user_id = item.find(\"a\", {\"class\": \"v-author\"})[\"href\"][21:]\n ids.append((video_id, user_id))\n except TimeoutException:\n pass\n \n return ids", "title": "" }, { "docid": "6e56cf1398fb5de1b11354cfd0f94c8f", "score": "0.5224569", "text": "def make_dataset(input_dir: str) -> tf.data.Dataset:\n preprocessing_batch_size = 100\n userids = get_user_ids(input_dir)\n \n userids_dataset = tf.data.Dataset.from_tensor_slices(userids)\n preprocessed_images = image_preprocessing.make_dataset(input_dir, userids)\n # preprocessed_texts = text_preprocessing.make_dataset(input_dir, userids)\n # preprocessed_likes = likes_preprocessing.make_dataset(input_dir, userids)\n \n return tf.data.Dataset.zip({\n \"userids\": userids_dataset,\n \"image_in\": preprocessed_images,\n # \"text_in\": preprocessed_texts,\n # \"likes_in\": preprocessed_likes,\n })", "title": "" }, { "docid": "0084c2f93338241d9ee7d50851c46514", "score": "0.52206755", "text": "def owned_dataset_ids(self) -> List[str]:\n dataset_ids = self.session.get_resource(self._path() + \"/dataset_ids\")[\"dataset_ids\"]\n return dataset_ids", "title": "" }, { "docid": "02521d7ca5c25c258acaab508cd1079b", "score": "0.5215871", "text": "def get_event_ids(self):\n event_ids = []\n path = self.get_path()\n for root, dirs, files in os.walk(path):\n event_ids.extend(e.replace(\".json\", \"\") for e in files if e != \".DS_Store\")\n\n return event_ids", "title": "" }, { "docid": "1ae8f286065c3d5fabaefcc35c730d27", "score": "0.5215431", "text": "def get_ids(self):\n pass", "title": "" }, { "docid": "1955be10d8a2699989d62e24055cce5f", "score": "0.5212781", "text": "def user_ids(self) -> set[str]:\n if self.users is None:\n return set()\n return {user.id for user in self.users.items}", "title": "" }, { "docid": "d5d7cf6a3b572bc094497f16df9ce096", "score": "0.52083707", "text": "def _filenames_from_dir(self, dirname):\n try:\n files = hdfs.listdir(dirname, \"f\")\n # Make files order predictable\n files.sort()\n except HDFSException, err:\n raise Consumer.ConsumerException(err)\n return files", "title": "" }, { "docid": "1ef4eae1e03080e865f12fbcd4452f10", "score": "0.51982", "text": "def get_user_by_ids(id_list):\n # in - method get the list of id\n return db.session.query(User).filter(User.user_id.in_(id_list)).all()", "title": "" }, { "docid": "6a02c5f8126507bccffc6cdf26be4c9d", "score": "0.51968986", "text": "def _get_data_ids(self):\n rows = self._get_rows()\n return [row.id for row in rows]", "title": "" }, { "docid": "7788a250a3fa891791e3da66a0b9e23e", "score": "0.51930183", "text": "def handles_to_ids(handles: List[str]) -> List[int]:\n num_handles = len(handles)\n if num_handles > 100:\n raise ValueError(\"Cannot lookup ids for more than 100 twitter handles.\")\n handles = [hndl[1:] if hndl.startswith('@') else hndl for hndl in handles]\n log.debug(\"Getting ids for handles: %s\", handles)\n user_objs = ensure_api().UsersLookup(screen_name=handles)\n ids = [user_obj.id for user_obj in user_objs]\n log.debug(\"Ids: %s\", ids)\n return ids", "title": "" }, { "docid": "72b9da2d9795362ce367a9af1cd35269", "score": "0.51760733", "text": "def sorted_step_ids(self) -> List[str]:\n ids: List[str] = []\n source_ids = self.source_ids.copy()\n inv_dag = {k: v.copy() for k, v in self.inv_dag.items()}\n while source_ids:\n head = source_ids.pop()\n ids.append(head)\n for tail in self.dag.get(head, set()):\n incoming = inv_dag[tail]\n incoming.remove(head)\n if not incoming:\n source_ids.add(tail)\n return ids", "title": "" }, { "docid": "fb21efea2adbe056e00578705b0b548a", "score": "0.51746035", "text": "def get_telegram_id_list(self):\n users_list_full = self.get_all_users_with_telegram_id()\n users_telegram_id_list = [itr.telegram_id for itr in users_list_full]\n return users_telegram_id_list", "title": "" }, { "docid": "290a1e4fc9afb8ab92dd42b044e49df0", "score": "0.5171914", "text": "def _parse_map_file(self):\n dir_ids = []\n with open(self.map_filename) as map_file:\n reader = csv.DictReader(map_file)\n for row in reader:\n dir_ids.append((row['original_id'], row['patient_id']))\n return dir_ids", "title": "" }, { "docid": "5706f7285651c39f810ce90baf208ed8", "score": "0.51609296", "text": "def file_ids(self):\r\n return self._file_ids", "title": "" }, { "docid": "ddb0b331586ef8414506b9bf53afd7f1", "score": "0.51570743", "text": "def getdirrows(dirid):\n db = get_db()\n cur = db.execute('select dirid, parid, dirname, firstid, emptyid, isdel from dirs ' + \\\n 'where parid = %d' % dirid)\n rows = cur.fetchall()\n return rows", "title": "" }, { "docid": "9dedec7adf093053a68325923864dbc9", "score": "0.5148311", "text": "def get_list_of_users(self):\n\n counter = 0\n lookup_ids = list()\n \n def _fire_lookup(counter, look_up):\n \"\"\"\n (str, list) -> None\n Internal method to lookup user accounts by ID.\n \"\"\"\n follower_ids = self.auth.users.lookup(user_id=look_up)\n for follower in follower_ids:\n #self.df.loc[counter,'screen_name'] = follower['screen_name']\n #self.df.loc[counter,'id'] = follower['id']\n self.df.loc[counter,'location'] = follower['location']\n counter += 1\n\n follower_ids = self.auth.followers.ids(screen_name=self.user_name)\n \n for account_id in follower_ids['ids']:\n lookup_ids.append(account_id)\n if len(lookup_ids) == 100:\n _fire_lookup(counter, lookup_ids)\n lookup_ids = list()\n counter +=100\n\n if len(lookup_ids) > 0:\n _fire_lookup(counter, lookup_ids)", "title": "" }, { "docid": "5bd72456517c6558d1adaa44fadac2dc", "score": "0.51320857", "text": "def list_ids(args):\n funds = args.list.split(',') \n get_funds(funds, MSUniverses[args.universe.upper()], args.output, args.savefiles)", "title": "" }, { "docid": "4c8634273ac7662d0786f2e7b1634528", "score": "0.5128451", "text": "def idList(indList):\n\twith open(indList, \"r\") as f: \n\t\tL = [line.strip() for line in f.readlines()]\n\treturn L", "title": "" }, { "docid": "f09887173f1b294a93e49ec8c7f60fdf", "score": "0.51282567", "text": "def getTestingUserList():\n listOfUsersMatFiles = []\n return listOfUsersMatFiles", "title": "" }, { "docid": "8f66f4e893f4d8676c243ef213e32467", "score": "0.511964", "text": "def readIDs(self,string):\n \tlistOfIDs = []\n \tinputFile = open(string)\n \tfor line in inputFile:\n \t\tline = line.rstrip()\n \t\tlistOfIDs.append(line)\n \tinputFile.close()\n \treturn listOfIDs", "title": "" }, { "docid": "7bfe60e8594e74a52faf6e96b45c53b6", "score": "0.5119215", "text": "def _load_items(self):\n ids = []\n root = self._root\n files = os.listdir(os.path.join(root, IMG_ROOT))\n ids += [line.strip()[:-4] for line in files]\n return ids", "title": "" }, { "docid": "8b6687c2bb8112ce7103a567605e2c88", "score": "0.51166975", "text": "def get_pid_list (path):\n\n csv = CsvFile ()\n csv.read(path)\n print '{} records read'.format(len(csv.data))\n pid_list = map (lambda x:x['pid'], csv.data)\n return pid_list", "title": "" }, { "docid": "b17f61656c41e9e5aaeb2533d1bb3df2", "score": "0.5107553", "text": "def get_subject_ids(root_dir, n_sub=None):\n\n sub_ids = [dir_ for dir_ in os.listdir(root_dir) if dir_.startswith('CC')]\n\n if n_sub is not None:\n sub_ids = sub_ids[:n_sub]\n\n return sub_ids", "title": "" }, { "docid": "f83630c4fbed19cc382229dbdd9e9a11", "score": "0.51022834", "text": "def get_IDs(directory, isligID, istargetID):\n\n if istargetID:\n targetID = directory.split('/')[-1]\n if isligID:\n ligID = directory.split('/')[-2]\n else:\n ligID = None\n elif isligID:\n targetID = None\n ligID = directory.split('/')[-1]\n else:\n targetID = None\n ligID = None\n\n return ligID, targetID", "title": "" }, { "docid": "989b40a8bfaf4cd91aebc7d6b2f05a15", "score": "0.51022494", "text": "def get_id_list(self):\n if self.id_list == []:\n list = []\n keylist = self.sequence.keys()\n keylist.sort(key=int)\n for key in keylist:\n list.append(key)\n self.id_list = list\n return list\n else:\n return self.id_list", "title": "" }, { "docid": "d8f6d78205519e9c8835bfdc26ea3bd5", "score": "0.50929743", "text": "def get_files(self):\n cur = self.connector.cursor()\n cur.execute(\"SELECT UniqueID FROM freemage_files WHERE UsID=0 OR UsID=?\"\n , (self.user,))\n output = []\n for x in cur:\n output.append(x[0])\n cur.close()\n return output", "title": "" }, { "docid": "83954ee367fe674b92b9f2920b705dcd", "score": "0.50913525", "text": "def distorted_inputs(self, data_dir):\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in range(1, 6)]\n\n return self.__get_dataset(filenames, augmentation=True)", "title": "" }, { "docid": "4c77969fbe93f112637a535067673b1a", "score": "0.5087573", "text": "def get_uid_parts(self) -> List[str]:\n parts = self.uid.split(self.UID_SEPARATOR) # pylint: disable=no-member\n parts = [part for part in parts if part.strip()]\n return parts", "title": "" }, { "docid": "2b8fe2177b37a630627cfb910fc76643", "score": "0.5085021", "text": "def get_sample_ids(self):\n ## return only a fraction of the data set for debugging purposes\n #return list(range(len(read_csv(self.LABEL_FILE)[:, 0])))[0:5000:40]\n #return list(range(len(read_csv(self.LABEL_FILE)[:, 0])))[0:5000:2]\n return list(range(len(read_csv(self.LABEL_FILE)[:, 0])))", "title": "" }, { "docid": "30ba614a8b4398d05f7640e7951e3d6d", "score": "0.50822264", "text": "def ReadIDList(infile, delim=None):#{{{\n try:\n fpin = open(infile,\"r\")\n li = fpin.read().split(delim)\n fpin.close()\n if delim != None:\n li = [x.strip() for x in li]\n return li\n except IOError:\n print \"Failed to read idlistfile %s\"%infile\n return []", "title": "" }, { "docid": "c2e0b9681d459b0bc816c0aefac81fc4", "score": "0.5077929", "text": "def get_all_user_ids(self):\n userids = list()\n\n try:\n standings_json = self.standings()\n\n for item_id in standings_json.get('items'):\n for user_id in standings_json['items'][item_id]['players']:\n player_dict = dict()\n player_dict['id'] = user_id['id']\n player_dict['name'] = user_id['name']\n userids.append(player_dict)\n break\n return userids\n\n except (ComunioAccessTokenError, KeyError) as ex:\n self.logger.error(ex)\n self.__handle_comunio_login()\n return self.get_all_user_ids()", "title": "" }, { "docid": "00c0138d9f17ac93d442851465859204", "score": "0.50727654", "text": "def load_all_users(path):\n return get_file_content(path + \"userList.txt\")", "title": "" }, { "docid": "2e1db82e83636c0368b2d2eeef5f3bbd", "score": "0.50714713", "text": "def inner_node_uids(data):\n return [data[node][0].properties['uid'] for node in range(len(data))]", "title": "" }, { "docid": "a1e387c19f53ea5981b6a731dc5015be", "score": "0.50649667", "text": "def makeUniqNameSet(self, directoryHandlingObj, dirPath):\n try:\n fileList = directoryHandlingObj.getDirectoryElements(dirPath)\n mainNmaeDf = pd.DataFrame(columns=self.inputNameSetHeader)\n for index in range(0,len(fileList)):\n if index == 0:\n mainNmaeDf = self.csvToDF(dirPath + self.osDirPathConnector() + fileList[index], ',', self.inputNameSetHeader)\n else:\n mainNmaeDf = self.mergeDataFrame(mainNmaeDf, dirPath + self.osDirPathConnector() + fileList[index], ',', self.inputNameSetHeader, 'name')\n\n return mainNmaeDf\n except Exception as e:\n print('error in makeUniqNameSet in DataHandling', e)\n return False", "title": "" }, { "docid": "c01480f91602b86df2daf50067c19405", "score": "0.5057289", "text": "def identity_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"identity_ids\")", "title": "" }, { "docid": "c46fa95bbcdcfe443a862e7b234cf165", "score": "0.5036033", "text": "def read_id_list(id_list_file):\n\n id_list = np.array([line.strip('\\n ') for line in open(id_list_file)])\n\n return id_list", "title": "" }, { "docid": "5bb1a223b9453ceed3e70ab4c7cb437d", "score": "0.5030511", "text": "def challenge_in(self):\n user_in = list()\n for line in fileinput.input():\n user_in.append(line)\n return user_in", "title": "" }, { "docid": "4e49ee30cf4f276728f5c7cb8f25fd6e", "score": "0.5027158", "text": "def token_ids(self):\n return list(self._token_ids)", "title": "" }, { "docid": "f34fcf68f325e6e80af67cf483becb1b", "score": "0.5026856", "text": "def get_event_ids(inputs_dir, event_batch, max_event_batch):\n\n # Check events file exists and open it\n events_file = 'events.bin'\n events_fp = os.path.join(inputs_dir, events_file);\n if not os.path.exists(events_fp):\n raise Exception('Events file does not exist.')\n with os.popen(f'evetocsv < {events_fp}') as p:\n events_pd = pd.read_csv(p)\n\n # Randomise event IDs and sort into batches\n np.random.seed(1234) # Use same random seed for all batches\n events_pd['event_id'] = np.random.choice(\n events_pd['event_id'], events_pd.size, replace=False\n )\n chunksize = math.ceil(events_pd.size / max_event_batch)\n start_position = chunksize * (event_batch - 1)\n end_position = chunksize * event_batch\n if end_position > events_pd.size:\n end_position = events_pd.size\n\n return events_pd['event_id'].to_numpy()[start_position:end_position]", "title": "" }, { "docid": "e2c275f202763cf78d0eae43c3097cc8", "score": "0.5021429", "text": "def fileids(self, fileids=None, categories=None):\n if fileids:\n return fileids\n return self.corpus.fileids()", "title": "" }, { "docid": "7d1026830efcbf33c0ad0855c969d8c7", "score": "0.50159377", "text": "def GetAllUsersProfileDirectory(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "234348029876b2df0b9b90d93426b823", "score": "0.50092715", "text": "def get_account_ids(self) -> typing.List[str]:\n return [row[self._accounts_id_index] for row in self._accounts_arr[1:]]", "title": "" }, { "docid": "1318a316aada24793a49f8ae912653cd", "score": "0.50028276", "text": "async def get_local_dids(self) -> Sequence[DIDInfo]:\n ret = [self._get_did_info(did) for did in self.profile.local_dids]\n return ret", "title": "" }, { "docid": "09f412c76e903ab77c187695d2ecb1fb", "score": "0.49958593", "text": "def get_processed_posts(store_dir: str) -> list:\n post_ids = list()\n if not os.path.isdir(store_dir):\n return post_ids\n\n for subdir_name in os.listdir(store_dir):\n if os.path.isdir(os.path.join(store_dir, subdir_name)):\n try:\n post_id = int(subdir_name)\n except ValueError:\n msg = \"Wrong subdir name (should be number): {}\"\n logging.debug(msg.format(subdir_name))\n continue\n\n path_to_file = os.path.join(\n store_dir,\n subdir_name,\n POST_FILE_NAME.format(post_id))\n\n if os.path.isfile(path_to_file):\n post_ids.append(post_id)\n\n return post_ids", "title": "" }, { "docid": "20f0579b484f3c02ed4a7a825fbcb579", "score": "0.4991573", "text": "def external_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserExternalIdArgs']]]]:\n return pulumi.get(self, \"external_ids\")", "title": "" }, { "docid": "b54e4b379fb0f05f3c93259d74fcdd54", "score": "0.4985468", "text": "def process_users(filename):\n #users = set()\n users = []\n for _, element in ET.iterparse(filename):\n if 'uid' in element.attrib:\n user = element.attrib['uid']\n if user not in users:\n users.append(user)\n #print element.attrib['uid']\n pass\n #return users\n return set(users)", "title": "" }, { "docid": "311cc15272c7e71fa3e79cb39dfb8ac2", "score": "0.49796242", "text": "def generate_user_identifier_list(self):\n # Import the random module\n import random\n word_list = get_wordlist()\n # Use random.sample to pull x words from word_list, where number of \n # dice is = to settings.DICE_ROLLS. \n user_list = random.sample(word_list, settings.DICE_ROLLS)\n return user_list", "title": "" }, { "docid": "08e0606e66ceb043f8600fe385a57240", "score": "0.49787346", "text": "def identities(query=None):\n return _users(query, secret=True)", "title": "" }, { "docid": "55896fe1f1038c457d2db30dbe462034", "score": "0.49786127", "text": "def find_all_units(uniprot_ids, directory=''):\n\n units = []\n for uniprot_id in uniprot_ids:\n\n f = os.path.join(directory, 'uniprot', 'chembl_%s.csv' % uniprot_id)\n if os.path.isfile(f):\n data = pd.read_csv(f)\n units += data['units'].tolist()\n\n return set(units)", "title": "" }, { "docid": "4b7837b26dc8da500c05282385724da6", "score": "0.4974033", "text": "def setup_im_ids(self):\n search_path = os.path.join(self.config[\"in_dir\"], \"*/\")\n case_list = sorted(glob(search_path))\n case_list = case_list[:210] if self.config[\"with_masks\"] else case_list[210:]\n return case_list", "title": "" }, { "docid": "0167411b4fa60686aeffe9edefaefc1e", "score": "0.49592853", "text": "def existing_hashes(root_dir: str) -> List[int]:\n folders = [name for name in\n os.listdir(root_dir) if os.path.isdir(name)]\n hashes = []\n for folder in folders:\n if folder is int:\n hashes.append(folder)\n return hashes", "title": "" }, { "docid": "14f66234f4c986e3902402f7cf8aedff", "score": "0.49568748", "text": "def _users(query=None, secret=False):\n \n out = []\n \n if query:\n keys = _ctx.keylist(query, secret)\n else:\n keys = _ctx.keylist(\"\", secret)\n \n for key in keys:\n for uid in key.uids:\n out.append(User(uid, key))\n \n return out", "title": "" } ]
5284b5b1c816e437f2dd5b007b5e4604
Load the driver from the one specified in args, or from flags.
[ { "docid": "cc5cff5c0422f40ab9537d843010485a", "score": "0.0", "text": "def __init__(self, volume_driver=None, *args, **kwargs):\n super(ReddwarfVolumeManager, self).__init__(*args, **kwargs)", "title": "" } ]
[ { "docid": "29d364750dffbefc3df07d8c03186348", "score": "0.5825256", "text": "def _get_driver(self, path, driver):\n self._debug('Loading driver %s...' % driver)\n abs_path = os.path.abspath(os.path.join(path, driver + '.py'))\n try:\n self._drivers_loaded[driver] = imp.load_source(driver, abs_path)\n except:\n self._debug('ERROR:usb4butia:_get_driver cannot load %s' % driver, abs_path)", "title": "" }, { "docid": "8eb8327781578703cf68abe0352a111e", "score": "0.5564747", "text": "def load(self, argv):\n self.prog_name = argv[0]\n parser = self.create_parser(argv[0])\n opts, args = parser.parse_args(argv[1:])\n handle_default_options(opts)\n self.execute(*args, **opts.__dict__)", "title": "" }, { "docid": "cf76858df3000028f8357a816ee9e19f", "score": "0.55580676", "text": "def __load_driver(name):\n global __count\n try:\n dname = os.path.basename(name).replace(\".py\", \"\")\n mod_name = \"ahio.drivers.%s%d\" % (dname, __count)\n loader = importlib.machinery.SourceFileLoader(mod_name, name)\n driver = loader.load_module()\n __count += 1\n return driver if hasattr(driver, \"ahioDriverInfo\") else False\n except Exception:\n return False", "title": "" }, { "docid": "9c4e59e599d58c17ab87f79e4e921b98", "score": "0.5554549", "text": "def load(*args, **kwargs):\n # Extract backend\n backend = kwargs.get('backend', None)\n\n # Default parameters\n if backend is None:\n backend = mdharmony.DEFAULT_BACKEND\n\n # Make sure there was a default\n if backend is None:\n raise Exception(\"No backend specified.\")\n\n # Find backend topology loader\n return mdharmony.registry.TOPOLOGY_CLASSES[backend].load(*args, **kwargs)", "title": "" }, { "docid": "73a8b344d573abd57a4e68c34ee20d61", "score": "0.5490842", "text": "def load_command_line(args):\n parser: ArgumentParser = basic_cli_parser(\n version_text=__version__, devel=True, trace=True, logfile=True, configfile=True\n )\n switches = parser.parse_args(args) # noqa F811\n\n # Convert any path strings to Path()\n if switches.logfile:\n switches.logfile = Path(switches.logfile)\n if switches.configfile:\n switches.configfile = Path(switches.configfile)\n\n CONFIG.update(switches)", "title": "" }, { "docid": "6b7f996aede21b2527be3cd638b48a69", "score": "0.5234356", "text": "def __init__(self, driver=None, **params):\n if driver:\n self.open(driver, **params)", "title": "" }, { "docid": "7b90b47f5860bca190e40172698dd69f", "score": "0.52066016", "text": "def loadpy(self, arg_s):\n self.load(arg_s)", "title": "" }, { "docid": "d090ca01f2625f03d89527b6974ffbfb", "score": "0.5165471", "text": "def backend_loader(pkg, name):\n\n def loader(init_args):\n module = importlib.import_module(pkg)\n cls = getattr(module, name)\n return cls(**init_args)\n\n return loader", "title": "" }, { "docid": "a71ed74ff078457f00726ec7bd020e92", "score": "0.5137623", "text": "def get_backend_driver(namespace, name, invoke_on_load=False):\n # NOTE: We use lazy import because importing from stevedore adds significat import time\n # overhead to other modules which don't need this package (stevedore needs to inspect various\n # entrypoint files on disk for all the installed Python packages which is slow)\n from stevedore.driver import DriverManager\n\n LOG.debug('Retrieving driver for backend \"%s\"' % (name))\n\n try:\n manager = DriverManager(\n namespace=namespace, name=name, invoke_on_load=invoke_on_load\n )\n except RuntimeError:\n message = 'Invalid \"%s\" backend specified: %s' % (namespace, name)\n LOG.exception(message)\n raise ValueError(message)\n\n return manager.driver", "title": "" }, { "docid": "58418b1ff5fdb6bcddd97f2cb6f18110", "score": "0.5061642", "text": "def load(pkgname, args=(), kw=None, doprint=1, hush=0, save=1):\n if isinstance(pkgname, _iraftask.IrafPkg):\n p = pkgname\n else:\n p = getPkg(pkgname)\n if kw is None:\n kw = {}\n if '_doprint' not in kw:\n kw['_doprint'] = doprint\n if '_hush' not in kw:\n kw['_hush'] = hush\n if '_save' not in kw:\n kw['_save'] = save\n p.run(*tuple(args), **kw)", "title": "" }, { "docid": "c5f2bda172a6a267c938dc41865b3fc5", "score": "0.5039654", "text": "def cli(driver_name, path, version, force):\n driver = download_driver(driver_name, path=path, version=version, force=force)\n click.echo(driver)", "title": "" }, { "docid": "3e478ad97336537459d70bb9dc751249", "score": "0.5006938", "text": "def get_pipeline_driver(module_name, passed_args=None):\n _imports = __import__(module_name, fromlist=[\"get_pipeline\"])\n kwargs = convert_struct(passed_args)\n return _imports.get_pipeline(**kwargs)", "title": "" }, { "docid": "fb353a6f161deaf3a5a2a34c72dced4a", "score": "0.49790695", "text": "def test_parser_with_driver(parser):\n with pytest.raises(SystemExit):\n parser.parse_args([url, \"--driver\", \"local\"])", "title": "" }, { "docid": "c7635686844970ebf5a8feea3cb0495c", "score": "0.4907722", "text": "def test_parser_with_known_drivers(parser):\n for driver in ['local', 's3']:\n parser.parse_args([url, '--driver', driver, 'destination'])", "title": "" }, { "docid": "8ad5f10109b28549292aa5a4dedae2a5", "score": "0.48819065", "text": "def switch_driver(self, driver=None):\n if driver is None:\n driver = LocalDriver()\n\n driver.import_data(self.driver.export_data())\n self.driver = driver", "title": "" }, { "docid": "d96678f8509a4fbf0cae2a9f224b5228", "score": "0.48535246", "text": "def load(cls, *args):\r\n return cls(*args)", "title": "" }, { "docid": "9b2ff7c804bc57c16481630a14483553", "score": "0.48496106", "text": "def load_args(ap):\n return ap.parse_args()", "title": "" }, { "docid": "944f7c6575e30a7ba79a71037c34d428", "score": "0.48338288", "text": "def do_load(self, ns: argparse.Namespace):\n cfg_path = \"config/\" + ns.target + \"/\" + ns.name + \"/professos.json\"\n if not os.path.exists(cfg_path):\n self.perror(cmd2.style('No {} target named {} found!'.format(ns.target, ns.name), fg=cmd2.fg.red))\n return\n\n if self._testModule:\n self.unregister_command_set(self._testModule)\n self._testModule = None\n\n if ns.target == 'op':\n self._testModule = OpTest(self, ns.name)\n else:\n self._testModule = RpTest(self, ns.name)\n\n try:\n self.register_command_set(self._testModule)\n self.poutput('')\n self.prompt = 'cli>> {}> {}> '.format(ns.target, ns.name)\n self.do_start(\"\")\n except ValueError:\n self.poutput('Module already loaded')", "title": "" }, { "docid": "5228e3b2cd2326835fa534047dddeafa", "score": "0.4814093", "text": "def get_web_driver(self, remote_flag, os_name, os_version, browser,\n browser_version, remote_project_name, remote_build_name):\n if remote_flag.lower() == 'y':\n web_driver = self.select_remote_platform(remote_flag, os_name, os_version,\n browser, browser_version, remote_project_name,\n remote_build_name)\n\n elif remote_flag.lower() == 'n':\n web_driver = self.run_local(browser)\n\n return web_driver", "title": "" }, { "docid": "699bc4469949a074561c6dc97789e31c", "score": "0.47969455", "text": "def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n options = _backends[name][1](**options)\n key = (name, tuple(sorted(list(options.items()))))\n res = _active_backends.get(key, None)\n if res is None:\n try:\n res = _backends[name][0](options)\n _active_backends[key] = res\n except Exception as e:\n raise LoadingError(name) from e\n return res", "title": "" }, { "docid": "579f2eafff8e19dce8f3be61fac1fb15", "score": "0.4788409", "text": "def run_retrieval_from_args(parsed_args=None, debug=False):\n if parsed_args is None:\n parsed_args = arguments.parse_args()\n _data = gen_data_from_args(parsed_args)\n if parsed_args.alg not in ALGS:\n raise ValueError(\"unknown alg {} (valid algs: {})\".format(parsed_args.alg, list(ALGS)))\n return run_retrieval(parsed_args.alg, query_paths=_data[\"query_paths\"], database_paths=_data[\"database_paths\"], metric_name=parsed_args.metric, debug=debug)", "title": "" }, { "docid": "d530b1cfd64aef575a656b920fe3c5c7", "score": "0.47713763", "text": "def __init__(self, db_name, *args):\n\n if db_name in self.backends:\n self.dbapi = self.backends[db_name]\n argzip = zip(database_args[db_name], args)\n else:\n raise ValueError, \"Database %s not supported\" % db_name\n\n self.kwargs = dict((k,v) for (k,v) in argzip if v is not None)\n self.connect()", "title": "" }, { "docid": "1fdd12f075ac66bb80fe83cf889a2f18", "score": "0.4765755", "text": "def exec_cmdline(args):\n projectmeta, accessfiles = projid2meta(args.project)\n if args.station is None:\n # Try to get station from accessfiles in project config file\n try:\n args.station = list(accessfiles.keys()).pop()\n except:\n raise RuntimeError(\"No stations found for project {}\".format(args.project))\n try:\n # See if station has an access config file\n acf_name = accessfiles[args.station]\n except:\n raise RuntimeError(\"Station {} not found for project {}\".format(args.station,\n args.project))\n userilisadir = ilisa.monitorcontrol.user_conf_dir\n acf_path = os.path.join(userilisadir, acf_name)\n with open(acf_path) as acffp:\n ac = yaml.safe_load(acffp)\n # Initialize stationdriver :\n stndrv = StationDriver(ac['LCU'], ac['DRU'], mockrun=args.mockrun)\n args.func(stndrv, args)", "title": "" }, { "docid": "b820bab65d1df288ef67421b72ec463a", "score": "0.47608146", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "b820bab65d1df288ef67421b72ec463a", "score": "0.47608146", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "b820bab65d1df288ef67421b72ec463a", "score": "0.47608146", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "b820bab65d1df288ef67421b72ec463a", "score": "0.47608146", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "954cf3b30c5dc9992d0322457446a1d4", "score": "0.47603324", "text": "def load_cfg(cfg, args):\n cfg.merge_from_file(args.cfg_file)\n cfg.merge_from_list(args.opts)\n assert_cfg(cfg)", "title": "" }, { "docid": "863e73ad83d36bc2d8ac8f91e4251a9e", "score": "0.47578502", "text": "def load_storage_driver(conf, cache, storage_type=None,\n control_mode=False, control_driver=None):\n if control_mode:\n mode = 'control'\n storage_type = storage_type or conf['drivers'].management_store\n else:\n mode = 'data'\n storage_type = storage_type or conf['drivers'].message_store\n\n driver_type = 'zaqar.{0}.storage'.format(mode)\n\n _invoke_args = (conf, cache)\n if control_driver is not None:\n _invoke_args = (conf, cache, control_driver)\n\n try:\n mgr = stevedore.DriverManager(namespace=driver_type,\n name=storage_type,\n invoke_on_load=True,\n invoke_args=_invoke_args)\n\n if conf.profiler.enabled:\n if ((mode == \"control\" and conf.profiler.trace_management_store) or\n (mode == \"data\" and conf.profiler.trace_message_store)):\n trace_name = '{0}_{1}_driver'.format(storage_type, mode)\n return profiler.trace_cls(trace_name,\n trace_private=True)(mgr.driver)\n else:\n return mgr.driver\n\n except Exception as exc:\n LOG.exception('Failed to load \"%s\" driver for \"%s\"',\n driver_type, storage_type)\n raise errors.InvalidDriver(exc)", "title": "" }, { "docid": "da0a4abfd2b0d3de5a2cf16a04dfeef3", "score": "0.47573364", "text": "def driver(filename, fetch=False):\n # Splitting results in removal of delim. Have to rebuild\n dirname = ''.join(['/' + token for token in filename.split('/')[1:-1]])\n\n global prep_list\n prep_list = loader.load_file(filename, dirname)\n\n parse_prep([*prep_list[0]][0], dirname)", "title": "" }, { "docid": "f51c3773f7363a08c38bdec9fe46d128", "score": "0.47531408", "text": "def __init__(\n self,\n probe: Optional[\"DebugProbe\"],\n auto_open: bool = True,\n options: Optional[Mapping[str, Any]] = None,\n option_defaults: Optional[Mapping[str, Any]] = None,\n **kwargs\n ) -> None:\n # Importing Board here eases circular import issues, and it's only needed here anyway.\n from ..board.board import Board\n\n super().__init__()\n\n Session._current_session = weakref.ref(self)\n\n self._probe = probe\n self._closed: bool = True\n self._inited: bool = False\n self._user_script_namespace: Dict[str, Any] = {}\n self._user_script_proxy: Optional[UserScriptDelegateProxy] = None\n self._user_script_print_proxy = PrintProxy()\n self._delegate: Optional[Any] = None\n self._auto_open = auto_open\n self._options = OptionsManager()\n self._gdbservers: Dict[int, \"GDBServer\"] = {}\n self._probeserver: Optional[\"DebugProbeServer\"] = None\n self._context_state = SimpleNamespace()\n\n # Set this session on the probe, if we were given a probe.\n if probe is not None:\n probe.session = self\n\n # Update options.\n self._options.add_front(kwargs)\n self._options.add_back(options)\n\n # Init project directory.\n if self.options.get('project_dir') is None:\n self._project_dir: str = os.environ.get('PYOCD_PROJECT_DIR') or os.getcwd()\n else:\n self._project_dir: str = os.path.abspath(os.path.expanduser(self.options.get('project_dir')))\n LOG.debug(\"Project directory: %s\", self.project_dir)\n\n # Switch the working dir to the project dir.\n os.chdir(self.project_dir)\n\n # Load options from the config file.\n config = self._get_config()\n probes_config = config.pop('probes', None)\n\n # Pick up any config file options for this probe. These have priority over global options.\n if (probe is not None) and (probes_config is not None):\n did_match_probe = False\n for uid, settings in probes_config.items():\n if str(uid).lower() in probe.unique_id.lower():\n if did_match_probe:\n LOG.warning(\"Multiple probe config options match probe ID %s\", probe.unique_id)\n break\n LOG.info(\"Using config options for probe %s\" % (probe.unique_id))\n self._options.add_back(settings)\n did_match_probe = True\n\n # Add global config options.\n self._options.add_back(config)\n\n # Merge in lowest priority options.\n self._options.add_back(option_defaults)\n\n # Logging config.\n self._configure_logging()\n\n # Bail early if we weren't provided a probe.\n if probe is None:\n self._board = None\n return\n\n # Load the user script.\n self._load_user_script()\n\n # Ask the probe if it has an associated board, and if not then we create a generic one.\n self._board = probe.create_associated_board() or Board(self)", "title": "" }, { "docid": "ea0e9472c2d0db3465257ea8a7ce2e0f", "score": "0.47505918", "text": "def __init__(self, driver=None):\n self._driver = driver or get_driver(self.__class__)", "title": "" }, { "docid": "5b05ac0561cdce0da2bbae0cc3789975", "score": "0.47407898", "text": "def driver(self, driver: str) -> Any:\n return self.drivers[driver]", "title": "" }, { "docid": "2bae8e7cfbb431303ebd079b91a705f8", "score": "0.4738304", "text": "def load_database(cfg):\n if cfg.mode == 'Train':\n train_db = BaseDataset(cfg, use_trans=True)\n train_db.load_data(mode='Train')\n train_loader = DataLoader(dataset=train_db,\n batch_size=cfg.opts.batch,\n shuffle=True,\n num_workers=cfg.opts.workers)\n\n if cfg.opts.is_val:\n val_db = BaseDataset(cfg)\n val_db.load_data(mode='Val')\n val_loader = DataLoader(dataset=val_db,\n batch_size=cfg.opts.batch,\n shuffle=True,\n num_workers=cfg.opts.workers)\n else:\n val_loader = None\n\n if '-1' in cfg.opts.save_epoch:\n cfg.opts.save_list = int(cfg.opts.save_epoch.split(',')[0].replace('-1', str(cfg.opts.epoch)))\n else:\n cfg.opts.save_list = int(cfg.opts.save_epoch.split(',')[0].replace(' ', ''))\n print(\">>> [%s] was created ...\" % type(train_db).__name__)\n return train_loader, val_loader\n\n elif cfg.mode == 'Test':\n test_db = BaseDataset(cfg)\n test_db.load_data(mode='Test')\n if cfg.opts.num_test < cfg.opts.batch:\n cfg.opts.batch = cfg.opts.num_test\n test_loader = DataLoader(dataset=test_db,\n batch_size=cfg.opts.batch,\n shuffle=False,\n num_workers=cfg.opts.workers)\n print(\">>> [%s] was created ...\" % type(test_db).__name__)\n return test_loader\n\n else:\n raise IOError(\"[Error] opts.mode --> '{:s}' in options should be 'Train' or 'Test'...\".format(cfg.mode))", "title": "" }, { "docid": "1f021d1fcddbe56156d39ab7c716221b", "score": "0.47334847", "text": "def selectService ( webargs, dbcfg ):\n\n [ service, sym, restargs ] = webargs.partition ('/')\n\n if service == 'xy':\n return xyImage ( restargs, dbcfg )\n\n elif service == 'xz':\n return xzImage ( restargs, dbcfg )\n\n elif service == 'yz':\n return yzImage ( restargs, dbcfg )\n\n elif service == 'hdf5':\n return HDF5 ( restargs, dbcfg )\n\n elif service == 'npz':\n return numpyZip ( restargs, dbcfg )\n\n else:\n return web.notfound()", "title": "" }, { "docid": "2cfc3d77898e003743dd83bbbecc07fc", "score": "0.47238216", "text": "def test_parser_with_unknow_driver(parser):\n with pytest.raises(SystemExit):\n parser.parse_args([url, '--driver', 'azure', 'destination'])", "title": "" }, { "docid": "ef9cc4a300436a3e073c22cf0f9d31f0", "score": "0.47203195", "text": "def init_loader(dataset):\n if dataset == 'rohan':\n return RohanLoader() # harric added\n elif dataset == 'miccai':\n return MiccaiLoader()\n elif dataset == 'cmr':\n return CmrLoader()\n\n elif dataset == 'liverct':\n return LiverCtLoader()\n elif dataset == 'kits':\n return KitsLoader()\n elif dataset == 'toy':\n return ToyLoader()\n elif dataset == 'multimodalcardiac':\n return MultiModalCardiacLoader()\n return None", "title": "" }, { "docid": "2a13f25b01b63cfb7cd5f2d2d654fea4", "score": "0.47202533", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "2a13f25b01b63cfb7cd5f2d2d654fea4", "score": "0.47202533", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "2a13f25b01b63cfb7cd5f2d2d654fea4", "score": "0.47202533", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "2a13f25b01b63cfb7cd5f2d2d654fea4", "score": "0.47202533", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "2a13f25b01b63cfb7cd5f2d2d654fea4", "score": "0.47202533", "text": "def load(self, *args):\n return", "title": "" }, { "docid": "824b6eea10968ad4f0d46df00d02a155", "score": "0.47200328", "text": "def selectService ( webargs, dbcfg ):\n\n [ service, sym, restargs ] = webargs.partition ('/')\n\n if service == 'xy':\n return xyImage ( restargs, dbcfg )\n\n elif service == 'xz':\n return xzImage ( restargs, dbcfg )\n\n elif service == 'yz':\n return yzImage ( restargs, dbcfg )\n\n elif service == 'hdf5':\n return HDF5 ( restargs, dbcfg )\n\n elif service == 'npz':\n return numpyZip ( restargs, dbcfg ) \n\n else:\n return web.notfound()", "title": "" }, { "docid": "e796c773e2cce8ee5515fac148f17c47", "score": "0.47103214", "text": "def _str_load(\n name: str, pkg: Callable, discovered_plugins: Dict[str, Callable]\n) -> None:\n if OS.lower() in pkg.REQUIRED_OS.lower():\n if _verify_privilege(pkg):\n discovered_plugins[name] = pkg.entrypoint\n else:\n logging.error(\"{} must be ran on {}\".format(name, pkg.REQUIRED_OS))", "title": "" }, { "docid": "a15e55ac448369ac2f111bde6b5d34ed", "score": "0.4703607", "text": "def pick(mode,dst,file_name,language,print_option):\n \n if mode == \"database\":\n return DatabaseGenerator(dst,file_name,language,print_option)\n \n else: # mode == \"class\"\n return ClassGenerator(dst,file_name,language,print_option)", "title": "" }, { "docid": "ba4c3302d8f4a3f5c40d4714df66213f", "score": "0.4682172", "text": "def _loader(name: str, discovered_plugins: Dict[str, Callable]) -> None:\n logging.info(\"Found {}\".format(name))\n pkg = importlib.import_module(\"chirp.plugins.{}\".format(name))\n try:\n if not hasattr(pkg.entrypoint, \"__call__\"):\n raise AttributeError\n try:\n if isinstance(pkg.REQUIRED_OS, str):\n _str_load(name, pkg, discovered_plugins)\n elif isinstance(pkg.REQUIRED_OS, (tuple, list)):\n _iter_load(name, pkg, discovered_plugins)\n else:\n logging.error(\n \"Not sure how to interpret REQUIRED_OS for plugin {}\".format(name)\n )\n except AttributeError:\n if _verify_privilege(pkg):\n discovered_plugins[name] = pkg.entrypoint\n except AttributeError:\n logging.error(\"{} does not have a valid entrypoint\".format(name))", "title": "" }, { "docid": "90c36f7df41a1c0319a7b168c5e9b661", "score": "0.46805677", "text": "def plugin_load(name: str):\n try:\n assert '\\\\' not in name and '/' not in name, 'Plugin must be a filename, not path.'\n\n with open(f'{folder}/{name}.py') as f:\n plugin = plugin_build(interface_factory, name, f.read())\n assert plugin.MODE & mode, f'Plugin \\'{name}\\' does not support current running mode.'\n return plugin\n except BaseException as e:\n if verbose:\n print(e, file=stderr)\n return None", "title": "" }, { "docid": "6848989e43a81fdd58519c0a0659e6ba", "score": "0.4679503", "text": "def new_driver_object(name):\n driver = __locate_driver_named(name)\n return driver.Driver() if driver else None", "title": "" }, { "docid": "43a6e9ae057fe613e2e54f50154ace86", "score": "0.4678334", "text": "def create_loader(options, missing_modules=()):\n if options.precompiled_builtins:\n return PickledPyiLoader.load_from_pickle(\n options.precompiled_builtins, options, missing_modules)\n elif options.use_pickled_files:\n return PickledPyiLoader(options, missing_modules=missing_modules)\n else:\n return Loader(options, missing_modules=missing_modules)", "title": "" }, { "docid": "6193ee69729449e96521d72e9872b1c3", "score": "0.46712723", "text": "def do_Load(self, arg):\n\n self.database_name = arg\n self.db = sqlite3.connect(self.database_name + \".db\")\n self.cursor = self.db.cursor()", "title": "" }, { "docid": "e686892f0f8504b01a45af8effbdb5df", "score": "0.4669872", "text": "def load(cls, load_path, env=None):", "title": "" }, { "docid": "3c535bc36408a09da7615cace8ae2e68", "score": "0.4665392", "text": "def sdb(opts, functions=None, whitelist=None, utils=None, loaded_base_name=None):\n if utils is None:\n utils = {}\n\n return LazyLoader(\n _module_dirs(opts, \"sdb\"),\n opts,\n tag=\"sdb\",\n pack={\n \"__sdb__\": functions,\n \"__utils__\": utils,\n \"__salt__\": minion_mods(opts, utils=utils),\n },\n whitelist=whitelist,\n extra_module_dirs=utils.module_dirs if utils else None,\n loaded_base_name=loaded_base_name,\n )", "title": "" }, { "docid": "eb283a8f606a3b86b3cf01a457556508", "score": "0.46597996", "text": "def load_args(args, filename=None):\n if filename is None:\n filename = args.output_dir / 'args.json'\n content = load_json(filename)\n for key, value in content.items():\n if (not hasattr(args, key)) or (getattr(args, key) == None):\n setattr(args, key, value)", "title": "" }, { "docid": "b7f56bd1e586e9542e42d9fde9a4e2f0", "score": "0.46424535", "text": "def from_kwargs(**kwargs):\n for fetcher in all_strategies:\n if fetcher.matches(kwargs):\n return fetcher(**kwargs)\n\n raise InvalidArgsError(**kwargs)", "title": "" }, { "docid": "a9431522425ecb96c17b51549e7320ac", "score": "0.4639142", "text": "def cli_load_plugin(self, args) -> str:\n plugin_name = args.plugin_name\n current_dir = os.path.dirname(os.path.realpath(__file__))\n if not os.path.isfile(\"{}/{}.py\".format(current_dir, plugin_name)):\n return error(\"Plugin {} DNE\".format(plugin_name))\n\n # First, let's see if this is already imported\n module_name = \"plugins.{}\".format(plugin_name)\n if module_name in sys.modules:\n self.cli_unload_plugin(plugin_name)\n mod = sys.modules[module_name]\n importlib.reload(mod)\n self.register_plugin(self.get_class(mod, plugin_name))\n return ok(\"Plugin {} reloaded\".format(plugin_name))\n\n importlib.invalidate_caches()\n mod = importlib.import_module(module_name)\n self.register_plugin(self.get_class(mod, plugin_name))\n return ok(\"Plugin {} loaded\".format(plugin_name))", "title": "" }, { "docid": "947c6b01350828b60d316c285181a772", "score": "0.46274075", "text": "def load(self, *args):\n return self.send({'cmd': 'load', 'args': args})", "title": "" }, { "docid": "10e40ee65d7d89395fbafe385f1098cd", "score": "0.46238953", "text": "def load(self, env, path, disable_re, name_re):", "title": "" }, { "docid": "a4f1eb24e9e6c85a16bb92c9a7a72bc2", "score": "0.46182466", "text": "def test_parser_with_driver_and_destination(parser):\n args = parser.parse_args([url, '--driver', 'local', '/some/path'])\n \n assert args.url == url\n assert args.driver == 'local'\n assert args.destination == '/some/path'", "title": "" }, { "docid": "6d1816fa37912e8be9b3471d40a67cea", "score": "0.4609732", "text": "def setup(args):\r\n cfg = get_cfg()\r\n cfg.merge_from_file(args.config_file)\r\n cfg.merge_from_list(args.opts)\r\n cfg.freeze()\r\n default_setup(cfg, args)\r\n return cfg", "title": "" }, { "docid": "d245ce3249b6283dfbd287df8666c3f1", "score": "0.46060166", "text": "def load_game(self, *args):\n pass", "title": "" }, { "docid": "2e770848711c15b44b3fb7d077634800", "score": "0.46033624", "text": "def __handle(args):\n\n def arg_match(options):\n return util.arg_match(options, sys.argv[1:])\n\n # See if there is a \"PostgreSQL argument\" specified in the invocation\n # without '--postgresql' being there. There is no way to distinguish\n # a default argument and a deliberately specified argument without\n # inspecting sys.argv.\n options = ['--dbaddress', '--dbport', '--dbusername', '--dbname',\n '--db-host', '--db-port', '--db-username', '--db-name']\n psql_args_matching = arg_match(options)\n if any(psql_args_matching) and\\\n 'postgresql' not in args:\n first_matching_arg = next(iter([match for match\n in psql_args_matching]))\n parser.error(\"argument {0}: not allowed without \"\n \"argument --postgresql\".format(first_matching_arg))\n # parser.error() terminates with return code 2.\n\n # --not-host-only is a \"shortcut\", actually a to-be-deprecated\n # call which means '--host \"\"'.\n # TODO: Actually deprecate --not-host-only later on.\n options = ['--not-host-only', '--host']\n if set(arg_match(options)) == set(options):\n parser.error(\"argument --not-host-only: not allowed with \"\n \"argument --host, as it is a shortcut to --host \"\n \"\\\"::\\\"\")\n else:\n # Apply the shortcut.\n if arg_match(['--not-host-only']):\n args.listen_address = \"::\" # Listen on every interface.\n\n # --not-host-only is just a shortcut optstring, no actual use\n # is intended later on.\n delattr(args, 'not_host_only')\n\n # --workspace and --sqlite cannot be specified either, as\n # both point to a database location.\n options = ['--sqlite', '--workspace']\n options_short = ['--sqlite', '-w']\n if set(arg_match(options)) == set(options) or \\\n set(arg_match(options_short)) == set(options_short):\n parser.error(\"argument --sqlite: not allowed with \"\n \"argument --workspace\")\n\n # --workspace and --config-directory also aren't allowed together now,\n # the latter one is expected to replace the earlier.\n options = ['--config-directory', '--workspace']\n options_short = ['--config-directory', '-w']\n if set(arg_match(options)) == set(options) or \\\n set(arg_match(options_short)) == set(options_short):\n parser.error(\"argument --config-directory: not allowed with \"\n \"argument --workspace\")\n\n # If workspace is specified, sqlite is workspace/config.sqlite\n # and config_directory is the workspace directory.\n if arg_match(['--workspace', '-w']):\n args.config_directory = args.workspace\n args.sqlite = os.path.join(args.workspace,\n 'config.sqlite')\n setattr(args, 'dbdatadir', os.path.join(args.workspace,\n 'pgsql_data'))\n\n # Workspace should not exist as a Namespace key.\n delattr(args, 'workspace')\n\n if '<CONFIG_DIRECTORY>' in args.sqlite:\n # Replace the placeholder variable with the actual value.\n args.sqlite = args.sqlite.replace('<CONFIG_DIRECTORY>',\n args.config_directory)\n\n # Convert relative sqlite file path to absolute.\n if 'sqlite' in args:\n args.sqlite = os.path.abspath(args.sqlite)\n\n if 'postgresql' not in args:\n # Later called database modules need the argument to be actually\n # present, even though the default is suppressed in the optstring.\n setattr(args, 'postgresql', False)\n\n # This is not needed by the database starter as we are\n # running SQLite.\n if 'dbdatadir' in args:\n delattr(args, 'dbdatadir')\n else:\n # If --postgresql is given, --sqlite is useless.\n delattr(args, 'sqlite')\n\n # Indicate in args that we are in instance manager mode.\n if \"list\" in args or \"stop\" in args or \"stop_all\" in args:\n setattr(args, \"instance_manager\", True)\n\n # If everything is fine, do call the handler for the subcommand.\n main(args)", "title": "" }, { "docid": "51e74dcae820354799b780355a94456d", "score": "0.45987737", "text": "def load_py(cls, filename, tunables):\n # check filename\n if not os.path.isfile(filename):\n raise TRexError(\"File '{0}' does not exist\".format(filename))\n\n basedir = os.path.dirname(filename)\n sys.path.insert(0, basedir)\n\n try:\n file = os.path.basename(filename).split('.')[0]\n module = __import__(file, globals(), locals(), [], 0)\n imp.reload(module) # reload the update \n\n try:\n profile = module.register().get_profile(tunables)\n except SystemExit:\n # called \".. -t --help\", return None\n return None\n profile.meta = {'type': 'python',\n 'tunables': tunables}\n return profile\n finally:\n sys.path.remove(basedir)", "title": "" }, { "docid": "753ec173194d15c6975526a2daa37051", "score": "0.45855692", "text": "def setup(args):\n cfg = get_cfg()\n # cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "cc5033ca3f9e592e899add3b5b81e81d", "score": "0.45726007", "text": "def load_tuner(self):\n # tuner will either be a key into TUNERS_MAP or a path to\n # a file that defines a class called CustomTuner.\n if self.datarun.tuner in TUNERS_MAP:\n self.Tuner = TUNERS_MAP[self.datarun.tuner]\n else:\n path, classname = re.match(CUSTOM_CLASS_REGEX,\n self.datarun.tuner).groups()\n mod = imp.load_source('btb.tuning.custom', path)\n self.Tuner = getattr(mod, classname)\n logger.info('Tuner: %s' % self.Tuner)", "title": "" }, { "docid": "986dd105a2d33c54dca1abdd56d2453f", "score": "0.4571138", "text": "def find_driver_class(driver_name):\n driver_class = None\n for module in 'kubernetes_driver', 'openshift_template_driver':\n driver_class = getattr(\n importlib.import_module('pebbles.drivers.provisioning.%s' % module),\n driver_name,\n None\n )\n if driver_class:\n break\n\n return driver_class", "title": "" }, { "docid": "d880d95399a29882fe11e684476b43b8", "score": "0.4550021", "text": "def findPlug(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "53c04f8a55918ba043c0dab33df03efe", "score": "0.4549119", "text": "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "6d3b64006f4025ea64e6783914df623b", "score": "0.4544488", "text": "def test_main_calls_load_single_core(load_single_core_mock, argparse_mock):\n ParseArgsRetValue = namedtuple('ParseArgsRetValue', 'cpu_core duration cpu_load path_to_profile_json')\n\n args_mock = Mock()\n argparse_mock.ArgumentParser = Mock(return_value=args_mock)\n\n args_mock.parse_args = Mock(return_value=ParseArgsRetValue(cpu_core=0, duration=10,\n cpu_load=0.5, path_to_profile_json=\"\"))\n main()\n\n load_single_core_mock.assert_called_once()", "title": "" }, { "docid": "23c472e582aace45a89173c342e620b9", "score": "0.45443976", "text": "def setup_driver(url=BASE_URL + FREEGAME_URL_EXTENSION):\n # url = BASE_URL + \"/store/en-US/free-games\"\n data_dir = os.path.join(DIRNAME, \".config/google-chrome\")\n options = webdriver.ChromeOptions()\n options.add_argument(\n \"user-data-dir={}\".format(data_dir))\n\n ##### UNCOMMENT NEXT TWO LINES TO USE BRAVE BROWSER #####\n # brave_path = \"/usr/bin/brave-browser\"\n # options.binary_location = brave_path\n\n driver = webdriver.Chrome(\n executable_path=CHROME_DRIVER_PATH, chrome_options=options)\n driver.get(url)\n print(url)\n return driver", "title": "" }, { "docid": "0a1dcd69b409d3140438e187f03b01fa", "score": "0.4539268", "text": "def load_imks_ext(self, arg):\n import os, os.path\n ip = self.shell\n exts = arg.split(\",\")\n for ext in exts:\n if ext == \"calendars\":\n calendars.loadcalendars(ip)\n elif ext == \"geolocation\":\n import geolocation\n ip.user_ns[\"get_geolocation\"] = geolocation.get_geolocation\n ip.user_ns[\"set_geolocation\"] = geolocation.set_geolocation\n elif ext == \"constants\":\n import constants\n constants.loadconstants(engine=eval(engine, self.shell.user_ns))\n self.shell.user_ns[\"const\"] = constants.constants\n elif ext == \"currencies\":\n if ip.user_ns.has_key(\"openexchangerates_id\"):\n app_id = self.shell.user_ns[\"openexchangerates_id\"]\n else: app_id = \"\"\n currencies.currencies(app_id)\n else: print \"Unknown extension `%s'.\" % ext", "title": "" }, { "docid": "339b77360789e428a5c75be0f6741fdc", "score": "0.45391273", "text": "def load(cls, load_path, env=None):\n pass", "title": "" }, { "docid": "31962e78813888d10c492cfa3ed79edc", "score": "0.45388168", "text": "def cli(ap, main):\n cmd = load_args(ap)\n main(**vars(cmd))", "title": "" }, { "docid": "6d7a053a85a4114d6ea064b6a84baee8", "score": "0.45344275", "text": "def __init__(self, driver: str = _DEFAULT_DRIVER):\n self._driver = driver\n self._app = None", "title": "" }, { "docid": "edd36e416cf886669d17303ce66521a0", "score": "0.4529116", "text": "def _backend_from_arg(backend):\n\n if isinstance(backend, str):\n try:\n backend = _named_backends[backend]\n except KeyError as e:\n raise ValueError(f'Unknown backend {backend}') from e\n\n if backend.__ua_domain__ != 'numpy.scipy.fft':\n raise ValueError('Backend does not implement \"numpy.scipy.fft\"')\n\n return backend", "title": "" }, { "docid": "b794f33ebaba133bd256cf17ebe1dc7a", "score": "0.45204034", "text": "def experiments_cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data-root\", \"-dr\", type=str)\n parser.add_argument(\"--data-folder\", \"-df\", type=str)\n parser.add_argument(\"--python\", \"-p\", type=str)\n\n args = parser.parse_args()\n\n if args.data_root != None and args.data_folder != None:\n DATA_ARG = \"-ddr {} -ddf {}\".format(args.data_root,args.data_folder)\n else:\n DATA_ARG = \"\"\n\n return sys.executable, DATA_ARG", "title": "" }, { "docid": "75ea2e1d48f6f185bb921ed6aa11fae7", "score": "0.45141557", "text": "def test_parse_args_parsing_gpus(monkeypatch, cli_args, expected_gpu):\n monkeypatch.setattr(\"torch.cuda.device_count\", lambda: 2)\n cli_args = cli_args.split(\" \") if cli_args else []\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n args = parser.parse_args()\n\n trainer = Trainer.from_argparse_args(args)\n assert trainer.data_parallel_device_ids == expected_gpu", "title": "" }, { "docid": "f0324ca712856ad2c84cfd7395a1346e", "score": "0.45110947", "text": "def _load_plugin(plugin_dir, device, cpu_extension):\n\n print(\"Initializing plugin for {} device...\".format(device))\n plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)\n if cpu_extension and 'CPU' in device:\n plugin.add_cpu_extension(cpu_extension)\n return plugin", "title": "" }, { "docid": "bcf4c80fa8f1fb0195e494487d84ff16", "score": "0.45088983", "text": "def android(self, remote_flag, desired_capabilities, app_path, app_name, appium_version,\n app_package, app_activity, username, password, device_flag):\n #Get the driver when test is run on a remote platform\n if remote_flag.lower() == 'y':\n mobile_driver = self.remote_platform_mobile(remote_flag, app_path, app_name, desired_capabilities,\n username, password, appium_version)\n\n #Get the driver when test is run on local setup\n else:\n try:\n desired_capabilities['appPackage'] = app_package\n desired_capabilities['appActivity'] = app_activity\n if device_flag.lower() == 'y':\n mobile_driver = mobile_webdriver.Remote(localhost_url, desired_capabilities)\n else:\n desired_capabilities['app'] = os.path.join(app_path, app_name)\n mobile_driver = mobile_webdriver.Remote(localhost_url, desired_capabilities)\n except Exception as exception:\n self.print_exception(exception, remote_flag)\n\n return mobile_driver", "title": "" }, { "docid": "d4ae38bbc7bd64e9e7f1ee053a235c20", "score": "0.45043698", "text": "def load_storage_impl(uri, control_mode=False, default_store=None):\n\n mode = 'control' if control_mode else 'data'\n driver_type = 'zaqar.{0}.storage'.format(mode)\n # Note(wanghao): In python3.9, urlparse will return 'localhost' as scheme\n # instead of '' in python3.8 when uri string is 'localhost:xxxxx'. So there\n # need to handle this change.\n storage_type = urllib_parse.urlparse(uri).scheme\n if storage_type == '' or storage_type == 'localhost':\n storage_type = default_store\n\n try:\n mgr = stevedore.DriverManager(namespace=driver_type,\n name=storage_type,\n invoke_on_load=False)\n\n return mgr.driver\n\n except Exception as exc:\n LOG.exception('Error loading storage driver')\n raise errors.InvalidDriver(exc)", "title": "" }, { "docid": "3842dcfa5b4af7962be56caf9c743358", "score": "0.4504105", "text": "def load(cls, load_path, env=None):\n raise NotImplementedError()", "title": "" }, { "docid": "d58f8582b3dfb72135e31a52c95312cc", "score": "0.45040855", "text": "def __init__(self,dbtype=None,*args) :\n\n self.dbtype = dbtype\n config = Config()\n\n if dbtype == self.DBTYPE_SQLITE :\n from dbaccess.dbsqlite3 import DbSqlite3\n\n self.filename = args[0]\n self.db = DbSqlite3(args[0])\n\n elif dbtype == self.DBTYPE_ORACLE :\n # username, password, sid\n from dbaccess.dboracle import DbOracle\n nls_lang = config.oracle['nls_lang'] if 'nls_lang' in config.oracle else None\n if nls_lang is None: \n self.db = DbOracle(args[0],args[1],args[2],args[3],args[4])\n else:\n self.db = DbOracle(args[0],args[1],args[2],args[3],args[4],nls_lang)\n\n self.connectstring = self.db.connectstring\n\n elif dbtype == self.DBTYPE_MYSQL :\n from dbaccess.dbmysql import DbMySql\n self.db = DbMySql(args[0],args[1],args[2],args[3],args[4])\n else :\n raise \"Ungueltiger Datenbanktype '%s'\" % (dbtype)", "title": "" }, { "docid": "18d5bdefb7b3fe632829e7529eb7998c", "score": "0.45013702", "text": "def load():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"SOURCE\", help='source file to import',\n type=argparse.FileType('r'))\n args = parser.parse_args()\n return load_file(args.SOURCE)", "title": "" }, { "docid": "83f04df72190853520c1976ad0ce8856", "score": "0.45006034", "text": "def load(filename, **kwargs):\r\n if os.path.exists(filename):\r\n execfile(filename)\r\n for name, obj in locals().items():\r\n if not name.startswith('_') and isinstance(obj, types.FunctionType):\r\n COMMANDS[name] = obj\r\n if not name.startswith('_'):\r\n __builtins__[name] = obj\r\n else:\r\n _fail(kwargs, \"Load failed:\\n\" + _indent(\r\n \"File not found: \" + filename))", "title": "" }, { "docid": "aaffceb420a123dc6e96cbe345dde1e2", "score": "0.44953403", "text": "def _load_plugin(plugin_dir, device, cpu_extension=None):\n\n print(\"Initializing plugin for {} device from {}...\".format(device, plugin_dir))\n plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)\n if cpu_extension and 'CPU' in device:\n print(\"Loading extension: {}\".format(cpu_extension))\n plugin.add_cpu_extension(cpu_extension)\n return plugin", "title": "" }, { "docid": "16520658c8c17c08f5f19ffe237de1f0", "score": "0.4492173", "text": "def loadName(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "f0ea654c41c94fc6184006ccf61e8453", "score": "0.44835564", "text": "def init_main():\n args = parse_arguments()\n\n if args.labels is None or not os.path.exists(args.labels):\n raise ValueError('{} labels file not found'.format(args.labels))\n\n if args.data is None or not os.path.exists(args.data):\n raise ValueError('{} data file not found'.format(args.data))\n\n # Check output filename\n if args.output is None:\n args.output = 'out_' + str(datetime.now())\n\n # Check model architecture\n if args.model not in __implemeted_models__:\n raise ValueError('model must be in {}. '\n 'Found: {}.'.format(__implemeted_models__, args.model))\n\n return args", "title": "" }, { "docid": "7320945727a9b9f83731c47a592f7566", "score": "0.448294", "text": "def load(fp, **kwargs):\n return __default.load(fp, **kwargs)", "title": "" }, { "docid": "fa663dbb9a1f5f1da96d78505e69f998", "score": "0.4478415", "text": "def get_driver():\n return get_component(IDriver)", "title": "" }, { "docid": "299bf5a6adcec143439114c3fde67126", "score": "0.4471507", "text": "def load(hub, sources, cli=None, dyne_name=None, loader=\"yaml\", parse_cli=True):\n hub.pop.sub.add(dyne_name=\"config\")\n hub.config.integrate.load(sources, cli, dyne_name, loader, parse_cli)", "title": "" }, { "docid": "b9708d2bbfbe31db657948723f5f83f8", "score": "0.4467571", "text": "def init(args):\n from . import init_impl\n\n init_impl.main(args)", "title": "" }, { "docid": "dbe45065a1ee9831b2a46e8bb9e0e548", "score": "0.44673055", "text": "def _load_subprocess(self, *args):\n\n cmd = self.subprocess_cmd\n if args:\n cmd += args\n\n try:\n if self.silent_errors:\n with open(os.devnull, 'w') as devnull:\n output_str = subprocess.check_output(cmd, stderr=devnull)\n else:\n output_str = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as error:\n warnings.warn(\"%s returned nonzero exit code (%d)\" % (cmd, error.returncode))\n output_str = error.output\n except OSError as error:\n if error.errno == errno.ENOENT:\n raise type(error)(error.errno, \"%s command not found\" % self.subprocess_cmd[0])\n raise\n\n if isstr(output_str):\n # Python 2 - subprocess.check_output returns a string\n self.load_str(output_str)\n else:\n # Python 3 - subprocess.check_output returns encoded bytes\n self.load_str(output_str.decode())", "title": "" }, { "docid": "dbe45065a1ee9831b2a46e8bb9e0e548", "score": "0.44673055", "text": "def _load_subprocess(self, *args):\n\n cmd = self.subprocess_cmd\n if args:\n cmd += args\n\n try:\n if self.silent_errors:\n with open(os.devnull, 'w') as devnull:\n output_str = subprocess.check_output(cmd, stderr=devnull)\n else:\n output_str = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as error:\n warnings.warn(\"%s returned nonzero exit code (%d)\" % (cmd, error.returncode))\n output_str = error.output\n except OSError as error:\n if error.errno == errno.ENOENT:\n raise type(error)(error.errno, \"%s command not found\" % self.subprocess_cmd[0])\n raise\n\n if isstr(output_str):\n # Python 2 - subprocess.check_output returns a string\n self.load_str(output_str)\n else:\n # Python 3 - subprocess.check_output returns encoded bytes\n self.load_str(output_str.decode())", "title": "" }, { "docid": "3f834a8d261f3687245ee11416ec3169", "score": "0.4464297", "text": "def __init__(self, *args, **kwargs):\n if len(args) == 1:\n self.load(args[0])\n\n self.from_dict(kwargs)", "title": "" }, { "docid": "96dcc3ef9b5e68ab9478079e153770fc", "score": "0.44642243", "text": "def driver_init( driver_config, driver_secrets ):\n\n global fs\n global storage_dir\n global lock\n\n # create a re-entrant lock (not a read lock)\n lock = threading.RLock()\n\n # detect a role\n rolestr = sys.argv[1]\n role = abstractfs.afsrole.DISCOVER\n if rolestr == \"read\":\n role = abstractfs.afsrole.READ\n elif rolestr == \"crawl\":\n role = abstractfs.afsrole.DISCOVER\n else:\n gateway.log_error( \"Unknown role: %s\" % rolestr )\n return False\n\n if not _initFS( driver_config, driver_secrets, role ):\n gateway.log_error( \"Unable to init filesystem\")\n return False\n\n if not fs.exists( \"/\" ):\n gateway.log_error( \"No such file or directory: %s\" % storage_dir )\n return False\n\n if not fs.is_dir( \"/\" ):\n gateway.log_error( \"Not a directory: %s\" % storage_dir )\n return False\n\n if role == abstractfs.afsrole.DISCOVER:\n # add initial dataset\n _resync( \"/\" )\n\n return True", "title": "" }, { "docid": "ae05d8e49bde7511cde76b4f8ded2e48", "score": "0.44639623", "text": "def choose_driver_from_os(options, caps):\n my_os = platform.system()\n if (my_os == \"Windows\"):\n return selenium.webdriver.Chrome(CHROME_DRIVER_LOCATION_WINDOWS, options=options, desired_capabilities=caps)\n elif (my_os == \"Darwin\"):\n return selenium.webdriver.Chrome(CHROME_DRIVER_LOCATION_MAC, options=options, desired_capabilities=caps)\n elif (my_os == \"Linux\"):\n return selenium.webdriver.Chrome(CHROME_DRIVER_LOCATION_LINUX, options=options, desired_capabilities=caps)\n print(\"ERROR: You're using an invalid OS -- please use Linux, Mac, or Windows.\")\n quit()", "title": "" }, { "docid": "6f9f87430aca6eb5de3e65ac90ce2803", "score": "0.44631702", "text": "def pick_gcamera():\n\n name = os.path.basename(sys.argv[0])\n if name.startswith('lcoGcamera'):\n return gcamera()\n elif name.startswith('lcoEcamera'):\n return ecamera()", "title": "" }, { "docid": "abe07692935e727ebee682986d4b869f", "score": "0.44614193", "text": "def getGdalDriver(fname, fmt):\n\n from osgeo import gdal\n\n # have to use geotiff or a similar format b/c not all formats support Create\n # if create not supported, use geotiff as an intermediate format, then copy\n driver = gdal.GetDriverByName(fmt)\n\n metadata = driver.GetMetadata()\n\n supports_create = False\n\n if metadata.has_key(gdal.DCAP_CREATE) and metadata[gdal.DCAP_CREATE] == 'YES':\n supports_create = True\n\n if not supports_create:\n return gdal.GetDriverByName(\"GTiff\"), supports_create, \".tif\"\n else:\n # already got driver for the chosen format, so no need to redefine it\n return driver, supports_create, os.path.splitext(fname)[1]", "title": "" }, { "docid": "eb14960855aca30c0a93756ec287532b", "score": "0.44604915", "text": "def load(app):\n handler.register(ArgParseHandler)", "title": "" }, { "docid": "1580aaa807dc1bd7ade15d90dba2a51c", "score": "0.44477016", "text": "def get_downloader(session, class_name, args):\r\n\r\n external = {\r\n 'wget': WgetDownloader,\r\n 'curl': CurlDownloader,\r\n 'aria2': Aria2Downloader,\r\n 'axel': AxelDownloader,\r\n }\r\n\r\n for bin, class_ in iteritems(external):\r\n if getattr(args, bin):\r\n return class_(session, bin=getattr(args, bin))\r\n\r\n return NativeDownloader(session)", "title": "" }, { "docid": "7cbc22393332cb3b18b5b6409cc921f6", "score": "0.44473374", "text": "def get_dataset(args: argparse.Namespace) -> cldfbench.Dataset:\n ds = _get(args.dataset, ep=args.entry_point)\n if ds:\n return ds\n raise ParserError(termcolor.colored(\n '\\nInvalid dataset spec: <{0}> {1}\\n'.format(args.entry_point, args.dataset), \"red\"))", "title": "" }, { "docid": "5205970b677cc77b3cfc75e72c732488", "score": "0.4445767", "text": "def get_plugin(cls, plugin, *args):\n\t\tif not issubclass(plugin, cls):\n\t\t\tif plugin not in cls.loaded_by_name:\n\t\t\t\treturn\n\t\t\tplugin = cls.loaded_by_name[plugin]\n\t\tfor instance in plugin.enabled:\n\t\t\tif instance.args == args:\n\t\t\t\treturn instance", "title": "" } ]
a8a9c2682b7df65a05e26dfb11a51644
Validate snippet and device list with user
[ { "docid": "61a0d856096cd7be8b40d900987e795c", "score": "0.6050847", "text": "def confirm_deployment_validity(snippet: str, requested_device_name_list: List[str]) -> bool:\n\n # Print snippet and ask user for confirmation\n netcat.LOGGER.info(\"Displaying snippet and asking user for confirmation\")\n print()\n print(\"******************** CONFIGURATION SNIPPET ********************\")\n print()\n print(snippet)\n print()\n print(\"***************************************************************\")\n print()\n confirmation = input(\"Type 'yes' if the above snippet is correct: \")\n print()\n\n if confirmation.lower() != \"yes\":\n netcat.LOGGER.error(\"User haven't confirmed validity of snippet\")\n return False\n\n netcat.LOGGER.info(\"User confirmed validity of snippet\")\n\n # Print device list and ask user for confirmation\n netcat.LOGGER.info(\"Displaying list of devices and asking user for confirmation\")\n print()\n print()\n print(\"************************* DEVICE LIST *************************\")\n print()\n print(\", \".join(requested_device_name_list))\n print()\n print(\"***************************************************************\")\n print()\n confirmation = input(\"Type 'yes' if the above device list is correct: \")\n print()\n\n if confirmation.lower() != \"yes\":\n netcat.LOGGER.error(\"User haven't confirmed validity of device list\")\n return False\n\n netcat.LOGGER.info(\"User confirmed validity of device list\")\n\n return True", "title": "" } ]
[ { "docid": "3b47c7e3290d05b7551f276b3bcf0573", "score": "0.5582056", "text": "def validate_device_info(self, devinfo, prefix=\"\"):\n errors = []\n\n if \"deviceType\" in devinfo:\n deviceType = devinfo[\"deviceType\"]\n if deviceType == \"network/upnp\":\n if \"upnp\" in devinfo:\n upnpinfo = devinfo[\"upnp\"]\n child_errors = self.validate_upnp_info(upnpinfo, prefix=prefix + \"/upnp\")\n errors.extend(child_errors)\n else:\n errors.append([prefix + \"upnp\", \"Device type 'network/upnp' must have a 'upnp' data member.\"])\n if deviceType == \"network/ssh\":\n if \"host\" not in devinfo:\n errors.append(\"SSH Devices must have a 'host' field.\")\n if \"credentials\" not in devinfo:\n errors.append(\"Device type 'network/ssh' must have a 'credentials' data member.\")\n else:\n errors.append([prefix + \"deviceType\", \"Device information is missing the required 'deviceType' data member.\"])\n\n return errors", "title": "" }, { "docid": "4c0c8c962a7c36924a2d93c65916c89e", "score": "0.5579996", "text": "def test_listname_validation(self):\n self.user_registration()\n result = self.user_logsin()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post(\n '/shoppinglists',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'listname':'@@@'})\n self.assertEqual(res.status_code, 400)\n result = json.loads(res.data.decode())\n self.assertEqual(\n result['message'],\n u\"listname should contain letters, digits and with a min length of 1\")", "title": "" }, { "docid": "198ef782746393e9f5f0bd98abce6600", "score": "0.55455226", "text": "def validate_devices_list(self, devlist, prefix=\"\"): # pylint: disable=unused-argument\n errors = []\n\n for devidx, devinfo in enumerate(devlist):\n item_prefix = \"/devices[%d]\" % devidx\n child_errors = self.validate_device_info(devinfo, prefix=item_prefix)\n errors.extend(child_errors)\n\n return errors", "title": "" }, { "docid": "fd95e17eba6bd4e520c0ecda983a74e8", "score": "0.5530953", "text": "def validate():", "title": "" }, { "docid": "47618994ba78aacecce464ab3958d617", "score": "0.5447495", "text": "async def _validate_user_input(user_input: dict) -> tuple:\n errors = {}\n\n # Validate amazon forwarding email addresses\n if isinstance(user_input[CONF_AMAZON_FWDS], str):\n status, amazon_list = await _check_amazon_forwards(user_input[CONF_AMAZON_FWDS])\n if status[0] == \"ok\":\n user_input[CONF_AMAZON_FWDS] = amazon_list\n else:\n user_input[CONF_AMAZON_FWDS] = amazon_list\n errors[CONF_AMAZON_FWDS] = status[0]\n\n # Check for ffmpeg if option enabled\n if user_input[CONF_GENERATE_MP4]:\n valid = await _check_ffmpeg()\n else:\n valid = True\n\n if not valid:\n errors[CONF_GENERATE_MP4] = \"ffmpeg_not_found\"\n\n # validate custom file exists\n if user_input[CONF_CUSTOM_IMG] and CONF_CUSTOM_IMG_FILE in user_input:\n valid = path.isfile(user_input[CONF_CUSTOM_IMG_FILE])\n else:\n valid = True\n\n if not valid:\n errors[CONF_CUSTOM_IMG_FILE] = \"file_not_found\"\n\n # validate scan interval\n if user_input[CONF_SCAN_INTERVAL] < 5:\n errors[CONF_SCAN_INTERVAL] = \"scan_too_low\"\n\n # validate imap timeout\n if user_input[CONF_IMAP_TIMEOUT] < 10:\n errors[CONF_IMAP_TIMEOUT] = \"timeout_too_low\"\n\n return errors, user_input", "title": "" }, { "docid": "b04a8fb610d7c2c507b0e3a3fc6b7041", "score": "0.5424532", "text": "def validate_user(self):\n pass", "title": "" }, { "docid": "4fe8f011ee9cfffc86cebfbbcb0e316a", "score": "0.5343571", "text": "def _validate_user_options(self, profile_list):\n # `user_options` is allowed to be falsy as it could be via a JupyterHub\n # REST API request to spawn a server - then `user_options` can be\n # anything.\n if not self.user_options:\n return\n\n # If \"profile\" isn't declared or falsy, no further validation is done.\n profile_slug = self.user_options.get(\"profile\")\n if not profile_slug:\n return\n\n # Ensure \"profile\" is defined in profile_list by calling _get_profile\n # with a truthy profile_slug.\n profile = self._get_profile(profile_slug, profile_list)\n\n # Ensure user_options related to the profile's profile_options are valid\n for option_name, option in profile.get('profile_options', {}).items():\n unlisted_choice_key = f\"{option_name}--unlisted-choice\"\n unlisted_choice = self.user_options.get(unlisted_choice_key)\n choice = self.user_options.get(option_name)\n if not (unlisted_choice or choice):\n # no user_options was passed for this profile option, the\n # profile option's default value can be used\n continue\n if unlisted_choice:\n # we have been passed a value for the profile option's\n # unlisted_choice, it must be enabled and the provided value\n # must validate against the validation_regex if configured\n if not option.get(\"unlisted_choice\", {}).get(\"enabled\"):\n raise ValueError(\n f\"Received unlisted_choice for {option_name} without being enabled.\"\n )\n\n validation_regex = option[\"unlisted_choice\"].get(\"validation_regex\")\n if validation_regex and not re.match(validation_regex, unlisted_choice):\n raise ValueError(\n f\"Received unlisted_choice for {option_name} that failed validation regex.\"\n )", "title": "" }, { "docid": "768834dbd0dcc4358ff466483afe475d", "score": "0.52773774", "text": "async def validate_input(data):\n userid = data.get(CONF_USERNAME)\n password = data.get(CONF_PASSWORD)\n\n prefix = data[CONF_PREFIX]\n url = _make_url_from_data(data)\n requires_password = url.startswith(\"elks://\")\n\n if requires_password and (not userid or not password):\n raise InvalidAuth\n\n elk = elkm1.Elk(\n {\"url\": url, \"userid\": userid, \"password\": password, \"element_list\": [\"panel\"]}\n )\n elk.connect()\n\n if not await async_wait_for_elk_to_sync(elk, VALIDATE_TIMEOUT, url):\n raise InvalidAuth\n\n device_name = data[CONF_PREFIX] if data[CONF_PREFIX] else \"ElkM1\"\n # Return info that you want to store in the config entry.\n return {\"title\": device_name, CONF_HOST: url, CONF_PREFIX: slugify(prefix)}", "title": "" }, { "docid": "4f414f7c9a5b2681b45d7d0457dae3d6", "score": "0.5252285", "text": "def story3(self):\n self.validate()\n for device in self.valid_devices:\n print \"\\n---- Valid Device ----\"\n print device\n for device in self.invalid_devices:\n print \"\\n---- Invalid Device ----\"\n print device", "title": "" }, { "docid": "bff74c868bb4bdf33169547cdf7dede6", "score": "0.52464485", "text": "def expected_audience_for_list(user_list):\n for list_item in user_list.items.all():\n if list_item.content_type == ContentType.objects.get_for_model(Course):\n if list_item.item.platform in PROFESSIONAL_COURSE_PLATFORMS:\n return []\n elif list_item.content_type == ContentType.objects.get_for_model(Program):\n if (\n OfferedBy.micromasters.value\n not in list_item.item.offered_by.values_list(\"name\", flat=True)\n ):\n return []\n\n return [\"Open Content\"]", "title": "" }, { "docid": "1052eeff478f677f44dce2a07e7b3b63", "score": "0.5207575", "text": "def verify(self):\n super().verify()\n\n if not all(\n self.payload.get(key)\n for key in (\n \"lti_consumer_site_id\",\n \"lti_user_id\",\n )\n ):\n raise TokenError(_(\"Malformed LTI user token\"))", "title": "" }, { "docid": "5b8bc8446c8e286e6d1dda8e20ce6e3d", "score": "0.5205845", "text": "def check_if_list_valid(items, content_type):\n if len(items) < 1:\n return False\n else:\n return True", "title": "" }, { "docid": "fadf93785f1f0abb5a97118f73f9e09f", "score": "0.5180151", "text": "def validate(genelist):\n\n sanity = Sanity()\n sanity.check(genelist)", "title": "" }, { "docid": "68a82fb5e2dcb30f9cb01171d1d3b0f5", "score": "0.51366115", "text": "def _validate(self):", "title": "" }, { "docid": "165b8dad39e57b6c716facc6ca70bce6", "score": "0.511177", "text": "def validate_data_for_list(arguments: list) -> bool:\n result = ValidationResult()\n available_arguments = [\"customer_id\", \"full_name\", \"position\", \"name_of_the_organization\", \"email\", \"phone\"]\n\n for argument in arguments:\n if argument not in available_arguments:\n result.isSuccess = False\n result.errors.append(f\"'list' command has no argument: {argument}\")\n\n if not result.isSuccess:\n print(*result.errors)\n\n return result.isSuccess", "title": "" }, { "docid": "5333bea28a7a540447788061d6017d2a", "score": "0.5071972", "text": "def validate_input(helper, definition):\n pass", "title": "" }, { "docid": "5333bea28a7a540447788061d6017d2a", "score": "0.5071972", "text": "def validate_input(helper, definition):\n pass", "title": "" }, { "docid": "e8a15f6ddd9a4c9538a2a1bad4415dca", "score": "0.50662875", "text": "def mc_validate(self):", "title": "" }, { "docid": "ed575f1fe13454e13808f66fed832950", "score": "0.5063198", "text": "def validateSchema(self, schema):\n def _checkSiteList(list):\n if self.has_key(list) and hasattr(self,'allCMSNames'):\n for site in self[list]:\n if not site in self.allCMSNames: #self.allCMSNames needs to be initialized to allow sitelisk check\n raise RuntimeError(\"The site \" + site + \" provided in the \" + list + \" param has not been found. Check https://cmsweb.cern.ch/sitedb/json/index/SEtoCMSName?name= for a list of known sites\")\n\n self.requireValidateFields(fields=self.requiredFields, schema=schema, validate=False)\n\n _checkSiteList(\"SiteWhitelist\")\n _checkSiteList(\"SiteBlacklist\")\n\n #Control if the request name contain spaces\n if schema.get(\"RequestName\").count(' ') > 0:\n msg = \"RequestName cannot contain spaces\"\n self.raiseValidationException(msg = msg)\n\n return", "title": "" }, { "docid": "4c8992b93386f49b833982e6d67241ef", "score": "0.5039352", "text": "def checkRegistration(self):\n\n user = []\n\n user.append(self.firstNameEntry.get())\n user.append(self.lastNameEntry.get())\n user.append(self.userNameEntry.get())\n user.append(self.postCodeEntry.get())\n user.append(self.streetNameEntry.get())\n user.append(self.houseNumberEntry.get())\n user.append(self.emailEntry.get())\n user.append(self.emailConfirmEntry.get())\n user.append(self.passwordEntry.get())\n user.append(self.passwordConfirmationEntry.get())\n user.append(self.phoneNumberEntry.get())\n\n isCorrect = util.verifyRegistration(user)\n if isinstance(isCorrect, str):\n self.errorLabel.config(text=isCorrect)\n else:\n if isCorrect:\n self.controller.init(self.userNameEntry.get())\n self.controller.show_frame(strings.welcomeClass)\n else:\n self.errorLabel.config(text=isCorrect)", "title": "" }, { "docid": "e52f5b798e9b13c5eb324f6dc60c5047", "score": "0.50329524", "text": "def validate(self, settings, item):\n raise NotImplementedError", "title": "" }, { "docid": "0b6150693316cb5162d524947c4c247b", "score": "0.5030565", "text": "def test_check_empty_missing_items(self):\n CommonTestCases.user_token_assert_in(\n self,\n create_check_query_empty_missing_item,\n \"missing_items is required field\"\n )", "title": "" }, { "docid": "540ab505eca8d78a4af4910c5bb8d53e", "score": "0.5005028", "text": "def validate(self):\n return []", "title": "" }, { "docid": "2fd255fe2a7d59df0648d88fe40fff0b", "score": "0.50026226", "text": "def valid_boot_list(boot_list,\n valid_boot_types):\n\n for boot_name in boot_list:\n boot_name = boot_name.strip(\" \")\n error_message = gv.valid_value(boot_name,\n valid_values=valid_boot_types,\n var_name=\"boot_name\")\n if error_message != \"\":\n BuiltIn().fail(gp.sprint_error(error_message))", "title": "" }, { "docid": "3966d0fc9055c4b855f524a5ced0d9ef", "score": "0.4993743", "text": "def validate(self, attrs):\n provider = [provider.name for provider in FeedProviders if provider.value == attrs['feed_type']]\n provider_instance = get_provider(provider[0])\n # Calling the API to get user info to verify the validity of user\n user_data = provider_instance.get_user_info_data(attrs['account'].access_token)\n user_data = provider_instance.process_user_data(user_data)\n valid_user_data = attrs['detail'] == user_data['id']\n # calling the API for pages\n try:\n pages_data = provider_instance.get_pages(attrs['account'].access_token)\n except NotImplementedError:\n if not valid_user_data:\n raise serializers.ValidationError(\n \"Requested id={} is not a valid id\".format(attrs['detail'])\n )\n attrs.update({'other_data': user_data})\n return attrs\n page_data = provider_instance.process_page_data(pages_data)\n valid_page_data = [page for page in page_data if attrs['detail'] == page['id']]\n if not any([valid_page_data, valid_user_data]):\n raise serializers.ValidationError(\n \"Requested id={} is not a valid id\".format(attrs['detail'])\n )\n if valid_user_data:\n attrs.update({'other_data': user_data})\n elif valid_page_data:\n attrs.update({'other_data': valid_page_data[0]})\n return attrs", "title": "" }, { "docid": "83d1c7e69a717696d5182ee8be5b0219", "score": "0.49821025", "text": "def test_clean_with_valid_values(self):\n values = self.field.clean(['allow', 'block'])\n\n self.assertIsNotNone(values)\n self.assertEqual(len(values), 2)\n\n consent_data = values[0]\n self.assertEqual(consent_data.requirement_id, 'my-requirement-1')\n self.assertEqual(consent_data.source, 'https://example.com/consent/')\n self.assertTrue(consent_data.granted)\n self.assertEqual(\n consent_data.extra_data,\n {\n 'test': True,\n })\n\n consent_data = values[1]\n self.assertEqual(consent_data.requirement_id, 'my-requirement-2')\n self.assertEqual(consent_data.source, 'https://example.com/consent/')\n self.assertFalse(consent_data.granted)\n self.assertEqual(\n consent_data.extra_data,\n {\n 'test': True,\n })\n\n # Make sure the timestamps are identical.\n self.assertEqual(values[0].timestamp, values[1].timestamp)", "title": "" }, { "docid": "82f96dc202a1c913db011c1364720d0a", "score": "0.4978267", "text": "def is_list_equel(self, generated_sequence, get_list_from_user):\n # print(generated_sequence)\n # print(get_list_from_user)\n if generated_sequence != get_list_from_user:\n print(\"wrong, this is not the macthing umbers\")\n return False\n else:\n print(\"success, you selected the correct numbers\")\n return True", "title": "" }, { "docid": "c31baf35d31e78bc3d92aa8762368194", "score": "0.49758294", "text": "def verification_input(input_value):\n schema = {\n \"type\": \"object\",\n \"required\": [\"userId\",\"bankName\",\"fileName\",\"file\"],\n \"properties\": {\n \"userId\":{\"type\":\"string\",\"minLenght\":1},\n \"bankName\":{\"type\":\"string\",\"minLength\":1},\n \"fileName\":{\"type\":\"string\",\"minLength\":1},\n \"file\":{\"type\":\"string\",\"minLength\":1},\n \"password\":{\"type\":\"string\"}\n },\n }\n try:\n return validate(instance=input_value, schema=schema)\n except Exception as e:\n return e.message", "title": "" }, { "docid": "445e47ba8c076a03a0b78d630201f5f8", "score": "0.49743235", "text": "def validate_data(funcdesc, items, values):\n print('\\nValidating input values...\\n')\n try:\n\n # check the number of user inputs matches the number required\n if len(items) != len(values):\n raise ValueError('Incorrect number of input values,'\n f' {len(items)} expected')\n\n # check no 0 length values in the input\n if (len(min(values, key=len))) == 0:\n raise ValueError('All inputs must have a length > 0')\n\n # loop through each expected input and check syntax based on name\n for x in range(len(items)):\n if items[x].upper() == 'EVENT CODE':\n tmpcode = values[x] # capture the event code for use later\n elif items[x].upper() == 'DATE(DD-MM-YYYY)':\n tmpdate = values[x]\n datetime.strptime(tmpdate, '%d-%m-%Y') # ? ValueError\n # using a regex to check 2 digit day and month entered\n if not re.search(r'^\\d{2}-\\d{2}-\\d{4}$', tmpdate):\n raise ValueError('2 digit day and month required in Date')\n if (datetime.strptime(tmpdate, '%d-%m-%Y').date() <\n datetime.today().date()):\n raise ValueError('Date must be >= current date')\n elif items[x].upper() in ('CAPACITY', 'SEATS'):\n tmpnum = int(values[x]) # ? ValueError\n # check value is > 0\n if (tmpnum <= 0):\n raise ValueError(f'{items[x]} must have a value > 0')\n elif items[x].upper() == 'EMAIL':\n patt = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'\n if not re.search(patt, values[x]):\n raise ValueError('Email address is not valid')\n\n # once all syntax checks have been passed, next the semantic checks/\n # business rules for the data need to run - these are specific to the\n # operation being requested\n # (the additional business rules for the CANCEL operations have been\n # left in the cancel functions as they need to attempt to find the\n # specific row to delete/update in the spreadsheet - it would be\n # inefficient to scan through the spreadsheet multiple times.)\n\n if (funcdesc == 'ADD A NEW EVENT'):\n # a. event code/date combination must be unique\n if event_exists(tmpcode, tmpdate):\n raise ValueError('Duplicate Event Code and Date found')\n\n elif (funcdesc == 'ADD A NEW BOOKING'):\n # a. event must exist in events sheet\n # b. seats requested must be <= seats available\n if not event_exists(tmpcode, tmpdate):\n raise ValueError('Booking cannot be added. '\n 'Event does not exist')\n elif (num_seats_available(tmpcode, tmpdate) < tmpnum):\n raise ValueError('Booking cannot be added. '\n 'Not enough seats available')\n\n except ValueError as e:\n print(f'Invalid data: {e}, please try again.')\n return False\n\n print('Input values are valid...\\n')\n return True", "title": "" }, { "docid": "33c0f036079a56529c2ed468946e22d9", "score": "0.49697527", "text": "def test__bad_list_option__succeeds():\n assert True", "title": "" }, { "docid": "076639ca501a51e92ffe1936505dad5a", "score": "0.49605274", "text": "def test_user_form_invalid_data(self):\n pass", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.49595165", "text": "def validate(self):", "title": "" }, { "docid": "f452a80b3605cb7bd63d4347f44b62a9", "score": "0.49487084", "text": "def validate_landscape(self, landscape_info):\n errors = []\n\n if \"pod\" in landscape_info:\n podinfo = landscape_info[\"pod\"]\n if \"devices\" in podinfo:\n devices_list = podinfo[\"devices\"]\n child_errors = self.validate_devices_list(devices_list, prefix=\"\")\n errors.extend(child_errors)\n else:\n errors.append([\"/pod/devices\", \"A pod description requires a 'devices' list data member.\"])\n else:\n errors.append([\"/pod\", \"A landscape description requires a 'pod' data member.\"])\n\n return errors", "title": "" }, { "docid": "5b4c742f1e11e4c7867fe8395af1ab45", "score": "0.49404252", "text": "def is_valid_listings(link):\n raise NotImplementedError", "title": "" }, { "docid": "6a31012a6e1863cc8ca693789ee84d93", "score": "0.49388534", "text": "def mc_post_validate(self):", "title": "" }, { "docid": "c5db45325ca827c35920a4bff9495b3c", "score": "0.49344993", "text": "def validator(self, lst, type):\n if self.req.content_type == 'application/json':\n if set(lst).issubset(self.data):\n if type == 'signup':\n err = [\n self.empty_check([self.data['username'], self.data['firstname'], self.data['contact'], self.data['email'],self.data['password']]),\n self.type_check([self.data['username'], self.data['firstname']], str),\n self.email_check(self.data['email']), \n self.password_check(self.data['password']),\n self.contact_check(self.data['contact'])\n ]\n\n if type == 'order':\n err = [\n self.empty_check([self.data['product'], self.data['description'], self.data['pickup'], self.data['destination'], self.data['weight']]),\n self.type_check([self.data['product'], self.data['description'], self.data['pickup'], self.data['destination']], str),\n self.type_check([self.data['weight']], int)\n ]\n\n for value in err:\n if not isinstance(value, bool):\n return value\n return True\n return jsonify(self.err(\"some field is missing\")), 400\n return jsonify(self.err('only json data is allowed')), 406", "title": "" }, { "docid": "bef2a0c8c3f8e7d2afc9e5a36cad3585", "score": "0.491759", "text": "def validate_user_data(user_data, schema):\n return schema.is_valid([user_data])", "title": "" }, { "docid": "aa0ce6fd3e569a1f01888eff3d33f79e", "score": "0.48944035", "text": "def check_data(self, values):\n # Check that data is a list\n if not isinstance(values, list):\n raise TypeError(\"Data needs to be a Python List.\")\n\n # Check length\n if not self.check_length(values):\n raise AVMListLengthError(\"List is not the correct length.\")\n\n checked_data = []\n # Check data type in list\n for value in values:\n if (isinstance(value, basestring)):\n value = value\n value = self.format_data(value)\n\n if self.check_cv(value):\n checked_data.append(value)\n else:\n raise AVMItemNotInControlledVocabularyError(\n \"Item is not in the controlled vocabulary.\")\n else:\n if value is None:\n checked_data.append(\"-\")\n else:\n raise TypeError(\n \"Elements of list need to be string or unicode.\")\n\n if len(set(checked_data)) == 1 and checked_data[0] == \"-\":\n checked_data = []\n\n return checked_data", "title": "" }, { "docid": "ea8e825fd289f6cee561159c04dd4739", "score": "0.4890953", "text": "def _validate(self) -> typing.List[builtins.str]:\n return jsii.invoke(self, \"validate\", [])", "title": "" }, { "docid": "ea2706907e189c01273545d9ec2c157c", "score": "0.48902768", "text": "def _init_validation_helper(items: Optional[List[str]]) -> List[str]:\n if items is None:\n return []\n # Make sure the items are all non-empty strings.\n for item in items:\n dbg.dassert_isinstance(item, str)\n dbg.dassert_ne(item, \"\")\n dbg.dassert_no_duplicates(items)\n return items", "title": "" }, { "docid": "c62d612545e4c82beffd2c625943a935", "score": "0.48879126", "text": "def validate(self, data: Any) -> None:", "title": "" }, { "docid": "ad36361f7361b354d30bb8756395e011", "score": "0.48820823", "text": "def validate(self, data):\n user = data['user']\n movie = data['movie']\n query = List_movie.objects.select_related().filter(user=user, movie=movie, is_active=True)\n validate_if_user_add_message(query, \"list_movie\")\n return data", "title": "" }, { "docid": "7eec41ad5a7e2c6ef3d046e7c70bff6d", "score": "0.48753455", "text": "def _validate(self, errors):\r\n pass", "title": "" }, { "docid": "8b87b258f8e8f3a71caacea434887ac7", "score": "0.48730776", "text": "def validate(self):\n # Authenticate USER\n if Form.validate(self) == False:\n return False\n if self.instructor_type.data == \"TA\":\n if TA.check_in_ta_list(self.net_id.data) == False:\n self.net_id.errors.append(\"This Net ID is not a valid TA\")\n elif TA.check_ta_registration(self.net_id.data) == True:\n self.net_id.errors.append(\"This Net ID has already been registered\")\n else:\n return True\n elif self.instructor_type.data == \"student\":\n if student.check_in_student_list(self.net_id.data) == False:\n self.net_id.errors.append(\"This NETID is not a valid Student\")\n elif student.check_student_registration(self.net_id.data) == True:\n self.net_id.errors.append(\"This Net ID has already been registered\")\n else:\n return True\n\n return False", "title": "" }, { "docid": "87b9e786a9ce7a398e110a16d7706d33", "score": "0.48708507", "text": "def test_suggestion_on_wrong_question(self):\n CommonTestCases.user_token_assert_in(\n self,\n make_suggestion_on_wrong_question,\n \"Provide the missing items\"\n )", "title": "" }, { "docid": "0d0f128b7ec38612759de596e314f435", "score": "0.48683926", "text": "def test_list_items(self):\n expected = [(self.superuser, 0), (self.regular_user, 2)]\n url = reverse('tokens:list')\n self.assert_element_count(expected, url, 'sodar-tk-list-item', 'class')", "title": "" }, { "docid": "24409167118d80b8db2b759127848932", "score": "0.48653084", "text": "def validate(form):", "title": "" }, { "docid": "349bf4591dc8ec8bba353b8f274471b5", "score": "0.48586845", "text": "def validate(value, context=None):", "title": "" }, { "docid": "10f5c68a0a4167d70b00072b0a1f3c7d", "score": "0.48522642", "text": "def validate_request(self, request):", "title": "" }, { "docid": "ee342485e7a7ebf426100b873b6123d5", "score": "0.48515803", "text": "def validate(self, attrs):\n attrs[\"program\"] = get_object_or_404(Program, pk=attrs[\"program_id\"])\n if not attrs[\"program\"].financial_aid_availability:\n raise ValidationError(\"Financial aid not available for this program.\")\n if not ProgramEnrollment.objects.filter(program=attrs[\"program\"], user=self.context[\"request\"].user).exists():\n raise ValidationError(\"User not in program.\")\n if not is_profile_filled_out(self.context[\"request\"].user.profile):\n raise ValidationError(\"Profile is not complete\")\n return attrs", "title": "" }, { "docid": "d6f0520a8951ac0059a3b25f4eebfca7", "score": "0.4848644", "text": "def smartarrayvalidation(self, options):\r\n client = None\r\n inputline = list()\r\n runlogin = False\r\n\r\n if options.encode and options.user and options.password:\r\n options.user = Encryption.decode_credentials(options.user)\r\n options.password = Encryption.decode_credentials(options.password)\r\n\r\n try:\r\n client = self._rdmc.app.get_current_client()\r\n if options.user and options.password:\r\n if not client.get_username():\r\n client.set_username(options.user)\r\n if not client.get_password():\r\n client.set_password(options.password)\r\n except:\r\n if options.user or options.password or options.url:\r\n if options.url:\r\n inputline.extend([options.url])\r\n if options.user:\r\n inputline.extend([\"-u\", options.user])\r\n if options.password:\r\n inputline.extend([\"-p\", options.password])\r\n else:\r\n if self._rdmc.app.config.get_url():\r\n inputline.extend([self._rdmc.app.config.get_url()])\r\n if self._rdmc.app.config.get_username():\r\n inputline.extend([\"-u\", self._rdmc.app.config.get_username()])\r\n if self._rdmc.app.config.get_password():\r\n inputline.extend([\"-p\", self._rdmc.app.config.get_password()])\r\n\r\n if inputline or not client:\r\n runlogin = True\r\n if not inputline:\r\n sys.stdout.write('Local login initiated...\\n')\r\n\r\n if runlogin:\r\n self.lobobj.loginfunction(inputline)", "title": "" }, { "docid": "0a1d582f66774cb5a11296e6a778f6a7", "score": "0.48482943", "text": "def _validate(self, errors):\n pass", "title": "" }, { "docid": "81df8bba9ba59bb6a9963ca014dce1eb", "score": "0.4847993", "text": "def test_check_no_missing_items(self):\n CommonTestCases.user_token_assert_in(\n self,\n create_check_query_no_missing_item,\n \"Provide the missing items\"\n )", "title": "" }, { "docid": "07ecc73c655c065c67358672be4c02d3", "score": "0.48458266", "text": "def test_verify_the_mdp_fields_under_policy():", "title": "" }, { "docid": "9ab143bf4201af28b94098309ad2bb6d", "score": "0.48397502", "text": "async def validate_input(hass: HomeAssistant, data: dict) -> dict:\n session = async_get_clientsession(hass)\n casa = CasaTunes(session, data[CONF_HOST])\n system = await casa.get_system()\n\n return {\n \"title\": system.AppName,\n \"mac_address\": format_mac(system.MACAddress),\n }", "title": "" }, { "docid": "11a4c9e4612be1178ed52af807a7e97a", "score": "0.4839656", "text": "def valid_input_devices(self):\n mics=[]\n for device in range(self.p.get_device_count()):\n if self.valid_test(device):\n mics.append(device)\n if len(mics)==0:\n print(\"no microphone devices found!\")\n else:\n print((\"found %d microphone devices: %s\"%(len(mics),mics)))\n return mics", "title": "" }, { "docid": "34a53a9e12eec02ed2ce97f5cc7095df", "score": "0.48347363", "text": "def test_validate(self):\n self.assertIsNone(cli.cli(f\"noid -V\"))", "title": "" }, { "docid": "60e27ba70ce987abbd06f39280389cc6", "score": "0.4829903", "text": "def test_listDoesNotDisplayDuplicates(self):\n authTypes = []\n options = DummyOptions()\n for cf in options._checkerFactoriesForOptHelpAuth():\n self.assertNotIn(cf.authType, authTypes)\n authTypes.append(cf.authType)", "title": "" }, { "docid": "c80e8976cfdf886def81922a2a5d7c36", "score": "0.48053366", "text": "async def validate_input(hass: core.HomeAssistant, data):\n session = async_get_clientsession(hass)\n fully = FullyKiosk(session, data[\"host\"], data[\"port\"], data[\"password\"])\n\n try:\n with timeout(10):\n deviceInfo = await fully.getDeviceInfo()\n except (FullyKioskError, ClientConnectorError):\n raise CannotConnect\n\n # If you cannot connect:\n # throw CannotConnect\n # If the authentication is wrong:\n # InvalidAuth\n\n # Return info that you want to store in the config entry.\n return {\n \"title\": f\"{deviceInfo['deviceName']} {deviceInfo['deviceID']}\",\n \"host\": data[\"host\"],\n \"port\": data[\"port\"],\n \"password\": data[\"password\"],\n }", "title": "" }, { "docid": "3f60387f7234ea16e445606a79862b67", "score": "0.4801042", "text": "def validate(self, value):", "title": "" }, { "docid": "830d52cb3a0ca1bdab78a1708151f8aa", "score": "0.48006406", "text": "def validate_data(self, data, **kwargs):\n if data['type'] == \"Personal\":\n person_identifiers = ['Orcid']\n identifiers = data.get('identifiers', {}).keys()\n if any([ident not in person_identifiers for ident in identifiers]):\n raise ValidationError(_(\"Invalid identifier for a person.\"))\n elif data['type'] == \"Organizational\":\n org_identifiers = ['ror']\n identifiers = data.get('identifiers', {}).keys()\n if any([ident not in org_identifiers for ident in identifiers]):\n raise ValidationError(\n _(\"Invalid identifier for an organization.\")\n )", "title": "" }, { "docid": "1429545e52074ec4a525a2273749dccb", "score": "0.48002997", "text": "def validate_upnp_info(self, upnpinfo, prefix=\"\"): # pylint: disable=no-self-use,unused-argument\n errors = []\n\n if \"USN\" not in upnpinfo:\n errors.append([prefix + \"USN\", \"UPnP information is missing a 'USN' data member.\"])\n if \"modelNumber\" not in upnpinfo:\n errors.append([prefix + \"modelNumber\", \"UPnP information is missing a 'modelNumber' data member.\"])\n if \"modelName\" not in upnpinfo:\n errors.append([prefix + \"modelName\", \"UPnP information is missing a 'modelName' data member.\"])\n\n return errors", "title": "" }, { "docid": "9e85b2b8e32fde502c28f9c0348cc2a2", "score": "0.47992432", "text": "def validate_input(data):\n\n genres = ['fiction', 'non-fiction']\n\n for param in ('title', 'author', 'genre'):\n if param not in data or len(data[param].strip()) < 1 or data['genre'].lower() not in genres:\n return \"Invalid \" + param\n\n if 'book_code' not in data:\n return \"Missing book Code\"\n elif Book.get_book(data['book_code']):\n return \"Book(book_code) already in lib\"\n elif type(data['book_code']) != int or len(str(data['book_code'])) != 12:\n return \"Invalid book_code\"\n\n # Check if given ddc_code follows Dewey Decimal Classification syst.\n if 'ddc_code' not in data:\n return \"Missing ddc Code\"\n else:\n pattern = r\"^[\\d][\\d][\\d](\\.*[\\d])*$\"\n match = re.search(pattern, data['ddc_code'])\n if not match:\n return \"Invalid ddc_code. Use DDC structure.\"", "title": "" }, { "docid": "a97249589f405f40519890111bc0bc08", "score": "0.47972944", "text": "def validate_list(arg, args):\n _valid = True\n if arg[1] not in VALID_LIST_OPTIONS:\n _valid = False\n log.echo_error(\"'%s' is not a valid option\" % arg[1])\n if len(args) > 1:\n log.echo_error('Too many arguments: %s' % len(args))\n _valid = False\n elif len(args) == 1:\n for key in args:\n if key == 'region':\n if not valid_instance_region(args['region']):\n _valid = False\n log.echo_error(\"Invalid Region '\" + args['region'] + \"' , run 'list regions' to see valid options\")\n else:\n _valid = False\n return _valid", "title": "" }, { "docid": "66a0c232df15b87c326947a0e923938a", "score": "0.47899514", "text": "def test_validate_bad(self):\n with mock_input(\"app\"):\n self.assertEqual(validate_or_prompt_path(\"message\", \"// app / \"), \"app\")", "title": "" }, { "docid": "9dff759dbe96eb449352e7ce1d160b8b", "score": "0.47814828", "text": "def _single_validate(self, typ: str, value: Any, key: str):\n # if required argument\n if value is None:\n raise vol.Invalid(\n f\"Missing required option '{key}' in {self._name} ({self._slug})\"\n ) from None\n\n # Lookup secret\n if str(value).startswith(\"!secret \"):\n secret: str = value.partition(\" \")[2]\n value = self.sys_homeassistant.secrets.get(secret)\n if value is None:\n raise vol.Invalid(\n f\"Unknown secret '{secret}' in {self._name} ({self._slug})\"\n ) from None\n\n # parse extend data from type\n match = RE_SCHEMA_ELEMENT.match(typ)\n\n if not match:\n raise vol.Invalid(\n f\"Unknown type '{typ}' in {self._name} ({self._slug})\"\n ) from None\n\n # prepare range\n range_args = {}\n for group_name in _SCHEMA_LENGTH_PARTS:\n group_value = match.group(group_name)\n if group_value:\n range_args[group_name[2:]] = float(group_value)\n\n if typ.startswith(_STR) or typ.startswith(_PASSWORD):\n if typ.startswith(_PASSWORD) and value:\n self.pwned.add(hashlib.sha1(str(value).encode()).hexdigest())\n return vol.All(str(value), vol.Range(**range_args))(value)\n elif typ.startswith(_INT):\n return vol.All(vol.Coerce(int), vol.Range(**range_args))(value)\n elif typ.startswith(_FLOAT):\n return vol.All(vol.Coerce(float), vol.Range(**range_args))(value)\n elif typ.startswith(_BOOL):\n return vol.Boolean()(value)\n elif typ.startswith(_EMAIL):\n return vol.Email()(value)\n elif typ.startswith(_URL):\n return vol.Url()(value)\n elif typ.startswith(_PORT):\n return network_port(value)\n elif typ.startswith(_MATCH):\n return vol.Match(match.group(\"match\"))(str(value))\n elif typ.startswith(_LIST):\n return vol.In(match.group(\"list\").split(\"|\"))(str(value))\n elif typ.startswith(_DEVICE):\n try:\n device = self.sys_hardware.get_by_path(Path(value))\n except HardwareNotFound:\n raise vol.Invalid(\n f\"Device '{value}' does not exist in {self._name} ({self._slug})\"\n ) from None\n\n # Have filter\n if match.group(\"filter\"):\n str_filter = match.group(\"filter\")\n device_filter = _create_device_filter(str_filter)\n if device not in self.sys_hardware.filter_devices(**device_filter):\n raise vol.Invalid(\n f\"Device '{value}' don't match the filter {str_filter}! in {self._name} ({self._slug})\"\n )\n\n # Device valid\n self.devices.add(device)\n return str(device.path)\n\n raise vol.Invalid(\n f\"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})\"\n ) from None", "title": "" }, { "docid": "f734726a74d4892e3bfa16f73af931f2", "score": "0.477588", "text": "def available(user_token):", "title": "" }, { "docid": "988aec76c754beab2e77ceb1dfd4384c", "score": "0.47719616", "text": "def validate():\n pass", "title": "" }, { "docid": "7aeade31949a6ac53d53aeef5f4ba703", "score": "0.47688648", "text": "def _validate_source_list(cls, source_list):\n msg = (\"You included a special value in your CSP, but did not wrap it in quotes.\"\n \" eg. %s should be '%s'\")\n for v in cls.unquoted_reserved_source_expressions:\n if v in source_list:\n raise ValueError(msg % (v, v))\n return source_list", "title": "" }, { "docid": "6052bc515cd5d6f070fc45b61bffca77", "score": "0.47652504", "text": "async def send_validation_warnings(self, ctx: commands.Context, app: SpotlightApp):\n try:\n user_id = extract_user_id(app.user_id)\n except discord.InvalidArgument:\n logger.warning(\"User ID format for spotlight app is invalid: '{}'\".format(app.user_id))\n await self.bot.say(\"**Warning**: User ID format is invalid: '{}'\".format(app.user_id))\n return\n\n # user not on server\n if ctx.message.server.get_member(user_id) is None:\n logger.warning(\"Spotlight app user not on server: '{}' {}\"\n .format(app.user_name_only, user_id))\n await self.bot.say(\"**Warning:** User not on server: {} {}\"\n .format(app.user_name_only, user_mention(user_id)))", "title": "" }, { "docid": "ae91f2ec9b4364acc49c4db296a09e9d", "score": "0.47651547", "text": "def validate_input(helper, definition):\n # This example accesses the modular input variable\n # aws_description_role_name = definition.parameters.get('aws_description_role_name', None)\n pass", "title": "" }, { "docid": "1acb3d5e842dfbd75003397131e7ee7e", "score": "0.47644767", "text": "def validar_usuario(user):\n\n caracteres_especiales = '!ยก@#$%._-'\n\n if len(user) > 3 and len(user) < 21:\n if user.isalnum():\n return True\n else:\n for c in user:\n if not (c.isalnum() or c in caracteres_especiales):\n sg.PopupQuick('Solo se permiten los siguientes '\n + 'caracteres: !ยก@#$%._-',\n auto_close = False)\n return False\n return True\n else:\n sg.PopupQuick('El usuario debe tener entre 4 y 20 caracteres.', \n auto_close = False)\n return False", "title": "" }, { "docid": "4da5f2167683041db23470c9754d30fe", "score": "0.4749691", "text": "def test_apps_page__validate_adding_truecommand_as_a_custom_app():", "title": "" }, { "docid": "9aa9a9d56e30857398c6d1ef7d045042", "score": "0.47477913", "text": "def _check_tags(self, attribute, value):\n no_req_tag_err = f'At least one tag required from tag list: {\", \".join(REQUIRED_TAG_LIST)}'\n\n config_data = config.ConfigFile.load()\n cfg = config.Config(config_data=config_data)\n if cfg.check_required_tags and not value:\n self.value_error(no_req_tag_err)\n\n if cfg.check_required_tags and (not any(tag in REQUIRED_TAG_LIST for tag in value)):\n self.value_error(no_req_tag_err)\n\n if not value:\n return\n\n if len(value) > constants.MAX_TAGS_COUNT:\n self.value_error(f\"Expecting no more than {constants.MAX_TAGS_COUNT} tags in metadata\")\n\n for tag in value:\n if not re.match(constants.NAME_REGEXP, tag):\n self.value_error(f\"'tag' has invalid format: {tag}\")\n if len(tag) > MAX_LENGTH_TAG:\n self.value_error(\n f\"Each tag in 'tags' list must not be greater than {MAX_LENGTH_TAG} characters\"\n )", "title": "" }, { "docid": "925edf81f03e1b0536a392674ddbf4f3", "score": "0.4742419", "text": "def validate_config(self, model: Module, config_list: List[Dict]):\n pass", "title": "" }, { "docid": "d86ce31c6edbbcc0abb9b41efe1d108e", "score": "0.47374156", "text": "def validate_rule(self, client):\n pass", "title": "" }, { "docid": "d64e69a70f3226e52f687867d3f2796f", "score": "0.47370744", "text": "def check_required_attributes(self):\n result_list = []\n required_attributes = ['url', 'user', 'password', 'reference']\n missing_attributes = []\n\n # Set response to empty so as to flush the response set by some earlier fucntion call\n self.response = ''\n\n # Loop to check if the required attributes are set\n for attribute in required_attributes:\n if getattr(self, attribute):\n result_list.append(True)\n else:\n # Set response/message for attributes which aren't set\n missing_attributes.append(attribute)\n result_list.append(False)\n \n # Compare result_list to get True for all True results and False for any False result \n check_attributes = all(result_list)\n \n if check_attributes:\n self.response = 'You have entered all required magics. You may now use your notebook.'\n else:\n self.response = '''You have these required magics remaining: %s. \\n''' % (\n ', '.join(map(str, missing_attributes)))\n\n # Display response to frontend before moving forward\n self.display_response(response=(self.response + '\\n'))\n\n return check_attributes", "title": "" }, { "docid": "737e40a0a8664efc7887671c63fb035f", "score": "0.47354758", "text": "def validateData(fields, data):", "title": "" }, { "docid": "ac22fc602608f6b44225bd7215e104f1", "score": "0.47349706", "text": "def test_createsList(self):\n options = DummyOptions()\n options.parseOptions(['--auth', 'memory'])\n self.assertEqual(len(options['credCheckers']), 1)\n options = DummyOptions()\n options.parseOptions(['--auth', 'memory', '--auth', 'memory'])\n self.assertEqual(len(options['credCheckers']), 2)", "title": "" }, { "docid": "2e31c2f22489d1fbd13bbc779eab6c16", "score": "0.47336", "text": "def valid_suits():\n return SUITLIST", "title": "" }, { "docid": "75f53503bf67f8dbf82ebb64098a6d72", "score": "0.47316247", "text": "async def test_user_input_device_not_found(hass, mrp_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"description_placeholders\"] == {\"devices\": \"`MRP Device (127.0.0.1)`\"}\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\"device_input\": \"none\"},\n )\n\n assert result2[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result2[\"errors\"] == {\"base\": \"no_devices_found\"}", "title": "" }, { "docid": "38c25f9c19835dd273d475b6a5a855c4", "score": "0.47176972", "text": "def validate_config(user_input: dict) -> dict:\n errors = {}\n # check file provider config\n if user_input.get(CONF_FILE_ENABLED):\n # check if music directory is valid\n music_dir = user_input.get(CONF_FILE_DIRECTORY)\n if music_dir and not os.path.isdir(music_dir):\n errors[CONF_FILE_DIRECTORY] = \"directory_not_exists\"\n if user_input.get(CONF_YTMUSIC_ENABLED):\n # check if user has cookie in password\n yt_pass = user_input.get(CONF_YTMUSIC_PASSWORD)\n if not CONF_YTMUSIC_PASSWORD or len(yt_pass) < 50:\n errors[CONF_YTMUSIC_PASSWORD] = \"yt_no_cookie\"\n return errors", "title": "" }, { "docid": "6cd87b0248a288b656ea956dd405a43f", "score": "0.4714048", "text": "def test_valid_ldr_values_list():\n ldr_list = [300, 487, 908, 920]\n assert i2c.is_valid_ldr_data(ldr_list) is True", "title": "" }, { "docid": "6778f5261a65757fb82d9bc96ffd7315", "score": "0.47136173", "text": "def test_validate_input_list(asset_keys):\n assert isinstance(validate_input(asset_keys), List)", "title": "" }, { "docid": "21e093f90a4e0f23b57e13db62e8b0b8", "score": "0.47127157", "text": "def validate(self, value) -> List[str]:\n errors = super().validate(value)\n if not self.allowed_values:\n return errors\n\n if value not in self.allowed_values:\n errors.append(f\"INVALID: Only {self.allowed_values} permitted (got: '{value}')\")\n\n return errors", "title": "" }, { "docid": "416ec8cc41b2ea31400cba47db296b7c", "score": "0.4710487", "text": "def _validate_register_in(self):\n registrator_apps = [app.register_in for app in self.apps if app.register_in]\n app_names = {app.name for app in self.apps}\n for registrator_app in registrator_apps:\n if registrator_app not in app_names:\n raise MalformedAppStackError('\"register_in\" field of some app points to a '\n 'nonexistent app: {}'.format(registrator_app))", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.47085688", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.47085688", "text": "def validate(self):\n pass", "title": "" }, { "docid": "e9cb911bb00165e296a721e9043d8f59", "score": "0.47058332", "text": "def test_summary_users_list(self):\n self.login()\n res = self.view_times()\n\n # Get the line of the div that the users list is in\n lines = res.data.split('\\n')\n users = [\"userone\", \"usertwo\", \"userthree\"]\n users_line = [l for l in lines if all(u in l for u in users)]\n assert users_line", "title": "" }, { "docid": "bc0a5c3d7a50cfbfd6d06f4d47e323df", "score": "0.47037026", "text": "def post_validate(self):", "title": "" }, { "docid": "d67bf3e7513984e4ac0f13418743f4f1", "score": "0.4701595", "text": "def validators(data):\r\n if iptools.ipv4.validate_ip(data['vip']) is False: return(\"Please enter valid virtual IP.\")\r\n if iptools.ipv4.validate_ip(data['mgmt_ip1']) is False: return(\"Please enter valid Rbridge 1 IP.\")\r\n if iptools.ipv4.validate_ip(data['mgmt_ip2']) is False: return(\"Please enter valid Rbridge 2 IP.\")\r\n if iptools.ipv4.validate_ip(data['nsx_cnt_ip']) is False: return(\"Please enter valid NSX Controller IP.\")\r\n if iptools.ipv4.validate_cidr(data['intf_ip']) is False: return(\"Please enter valid interface IP.\")\r\n if iptools.ipv4.validate_cidr(data['ve_ip']) is False: return(\"Please enter valid VE interface with its netmask IP.\")\r\n if iptools.ipv4.validate_cidr(data['vip_mask']) is False: return(\"Please enter valid virtual IP netmask.\")\r\n if switchportValidator(data['intf_name1'], data['rbridge_id1']) is False: return(\"Please enter valid switch port 1 of Rbridge 1.\")\r\n if switchportValidator(data['intf_name2'], data['rbridge_id2']) is False: return(\"Please enter valid switch port 2 of Rbridge 1.\")\r\n if intfValidator(data['intf_name']) is False: return(\"Please enter valid interface name.\")\r\n if rbridgeidValidator(data['rbridge_id1']) is False: return(\"Please enter valid Rbridge-1 id.\")\r\n if rbridgeidValidator(data['rbridge_id2']) is False: return(\"Please enter valid Rbridge-2 id.\")\r\n if data['username'] is False: return(\"Please enter valid VDX switch user name.\")\r\n if data['password'] is False: return(\"Please enter valid VDX switch password.\")\r\n if data['nsx_cnt_name'] is False: return(\"Please enter valid NSX controller name.\")\r\n if data['overlay_gateway_name'] is False: return(\"Please enter valid overlay gateway name.\")\r\n if data['vcenter_name'] is False: return(\"Please enter valid vCenter name.\")\r\n if data['vcenter_pass'] is False: return(\"Please enter valid vCenter password.\")\r\n if data['vcenter_user'] is False: return(\"Please enter valid vCenter user name.\")\r\n if data['vcenter_url'] is False: return(\"Please enter valid vCenter URL.\")\r\n if data['vcs_id'] is False: return(\"Please enter valid VCS id.\")\r\n if vlanValidator(data['ve_vlan']) is False: return(\"Please enter valid transport VLAN id.\")\r\n if vlanValidator(data['vlan']) is False: return(\"Please enter valid VLAN id.\")\r\n if portValidator(data['nsx_cnt_port']) is False: return(\"Please enter valid NSX controller port.\")\r\n if data['lswitch_name'] is False: return(\"Please enter valid logical switch name.\")\r\n if data['nsx_mgr_pass'] is False: return(\"Please enter valid NSX manager password.\")\r\n if data['nsx_mgr_user'] is False: return(\"Please enter valid NSX username.\")\r\n if portNameValidator(data['port_name']) is False: return(\"Please enter valid NSX controller port.\")\r\n if data['switch_name'] is False: return(\"Please enter valid switch name.\")\r\n if data['vtep_name'] is False: return(\"Please enter valid VTEP name.\")\r\n if data['intf_type'] is False: return(\"Please enter valid interface type.\")\r\n if iptools.ipv4.validate_ip(data['nsx_mgr_ip']) is False: return(\"Please enter valid NSX manager ip.\")\r\n \r\n return True", "title": "" }, { "docid": "6035f24f5f1478c90e29ad9060dbb114", "score": "0.470043", "text": "def validate_problem(self):", "title": "" }, { "docid": "5d4ca1d6bcf952a77bc782bd9c6a1494", "score": "0.46996865", "text": "def is_valid(cmd, list):\n if cmd[4:] in list:\n return True\n else:\n return False", "title": "" }, { "docid": "a4d36b3fed15f1d34be314cd4502ef70", "score": "0.46829766", "text": "def validate(self):\n pass # pragma: no cover", "title": "" }, { "docid": "7d506348abf2b0a5038f1a3eb0a2a97e", "score": "0.46807876", "text": "def validate_input(self):\n # VALIDATES TITLE ENTRY\n title = self.title_value.get()\n if title == '':\n cfg.ValidationError(self, text='Recipe title can\\'t be empty!', padx=10, pady=10)\n return\n # VALIDATES DESCRIPTION ENTRY\n desc = self.desc_value.get(1.0, tk.END)\n if len(desc) == 1:\n cfg.ValidationError(self, text='Recipe description can\\'t be empty!', padx=10, pady=10)\n return\n return title, desc", "title": "" }, { "docid": "9e1a9d657a71a4d80ca18c3745b095a6", "score": "0.46797657", "text": "def Validate(self, request, _):\n parser = HuntArgsParser(request)\n\n flow_runner_args, flow_args = parser.ParseFlowArgs()\n flow_runner_args.Validate()\n flow_args.Validate()\n\n hunt_runner_args = parser.ParseHuntRunnerArgs()\n hunt_runner_args.Validate()", "title": "" }, { "docid": "eed0b8acd9ec97d8f1158118cccea8c6", "score": "0.46793", "text": "def use_the_checker(self):\n self.products_list = self.checker.complete_check(self.products_list)", "title": "" }, { "docid": "92c2e5ea107b18ea0c3c32a47e61c988", "score": "0.46780813", "text": "def validate(self):\n pass;", "title": "" }, { "docid": "3e835ecb218e1e15f139b5f56f032a80", "score": "0.4676385", "text": "async def test_user_form_single_instance_allowed(\n hass: HomeAssistant, canary_config_flow\n) -> None:\n await init_integration(hass, skip_entry_setup=True)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data=USER_INPUT,\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"single_instance_allowed\"", "title": "" }, { "docid": "7e72ba97a2ddecc0cf433ae411cf006b", "score": "0.4676383", "text": "def _check_licenses(self, attribute, value):\n for license in value:\n if len(license) > MAX_LENGTH_LICENSE:\n self.value_error(\n f\"Each license in 'licenses' list must not be greater than \"\n f\"{MAX_LENGTH_LICENSE} characters\"\n )\n invalid_licenses = [id for id in value if not is_valid_license_id(id)]\n if invalid_licenses:\n self.value_error(\n \"Expecting 'license' to be a list of valid SPDX license \"\n \"identifiers, instead found invalid license identifiers: '{}' \"\n \"in 'license' value {}. \"\n \"For more info, visit https://spdx.org\".format(\", \".join(invalid_licenses), value)\n )", "title": "" } ]
67fbb6fcae3c99ed218d2119a82b1dfc
Search using the GeoLocation functionallity of GoogleAPI. This assumes its given data are infact street signs. intersections are marked by '&'
[ { "docid": "e18b0729804c87403bd9a77406bd57f5", "score": "0.66954947", "text": "def _search_geolocation(g_maps, output_path, streets):\n\n combinations_to_try = __create_suffixes_combinations(streets)\n results = []\n for comb in combinations_to_try:\n geocode_results = g_maps.geocode(components={'route': comb})\n\n for res in geocode_results:\n if res not in results:\n results.append(res)\n\n # the best matches are those that google managed to match completely\n best = [res for res in results if not res.get('partial_match')]\n\n all_matches = True\n stop = False\n if len(best):\n print(\"[*] Best Matches:\")\n lats, lngs = __display_matches(best)\n\n while True: # allow user to choose his option\n inp = input(\"Type:\\tNUMBER to open map\\n\\tP to choose from Partial matches\"\n \"\\n\\tG to try to guess\\n\\tS to stop: \").upper()\n if inp == \"S\":\n stop = True\n break\n elif inp == \"P\":\n break\n elif inp == \"G\":\n all_matches = False\n break\n elif int(inp) > 0 and int(inp) - 1 <= len(best):\n # open the map\n i = int(inp) - 1\n __plot_point(lats[i], lngs[i], output_path, best[i][\"formatted_address\"])\n\n # show matches that Google matched partially (only part of given address)\n if not stop and all_matches and len(results):\n print(\"[*] Partial Matches\")\n lats, lngs = __display_matches(results)\n while True:\n inp = input(\"Type:\\tNUMBER to open map\\n\\tG to try to guess\\n\\tS to stop: \").upper()\n if inp == \"S\":\n stop = True\n break\n elif inp == \"G\":\n break\n elif int(inp) > 0 and int(inp) - 1 <= len(results):\n # open the map\n i = int(inp) - 1\n __plot_point(lats[i], lngs[i], output_path, results[i][\"formatted_address\"])\n return stop", "title": "" } ]
[ { "docid": "eceead8944cf2d534d88cf4bcd05bbf8", "score": "0.70246893", "text": "def search_location(streets, others, output_path):\n\n g_maps = googlemaps.Client(key=API_KEY)\n\n if len(streets):\n print(\"[+] Trying to match exact geo-location data...\") # Geocoding an address\n if _search_geolocation(g_maps, output_path, streets):\n return\n else:\n print(\"[X] No geo-data was found!\")\n\n print(\"[+] Guessing Area...\")\n guesses = []\n for o in others:\n complete = o.word.text + ', ' + ' & '.join([s.word.text for s in streets])\n find_results = g_maps.find_place(complete, 'textquery', fields=['geometry/location/lat',\n 'geometry/location/lng',\n 'formatted_address',\n 'name'])[\"candidates\"]\n for res in find_results:\n guesses.append(res) # will add duplicates, because that means intersection!\n\n close_guesses = []\n for pair in itertools.combinations(guesses, 2):\n if __measure_dist(pair[0][\"geometry\"][\"location\"][\"lat\"],\n pair[0][\"geometry\"][\"location\"][\"lng\"],\n pair[1][\"geometry\"][\"location\"][\"lat\"],\n pair[1][\"geometry\"][\"location\"][\"lng\"]) < 50: # less than 50 meters\n close_guesses.append(pair)\n\n if len(close_guesses):\n print(\"[+] Found some intersections!\")\n lats = [np.mean([pair[0][\"geometry\"][\"location\"][\"lat\"],\n pair[1][\"geometry\"][\"location\"][\"lat\"]]) for pair in close_guesses]\n lngs = [np.mean([pair[0][\"geometry\"][\"location\"][\"lng\"],\n pair[1][\"geometry\"][\"location\"][\"lng\"]]) for pair in close_guesses]\n for i in range(len(close_guesses)):\n print(\"\\t%d - %s and %s at (%.5fN, %.5fE)\" % (i + 1,\n close_guesses[i][0][\"formatted_address\"],\n close_guesses[i][0][\"formatted_address\"],\n lats[i], lngs[i]))\n\n # let user choose his option\n while True:\n inp = input(\"Type map NUMBER to open, or S to stop: \").upper()\n if inp == \"S\":\n return\n elif int(inp) > 0 and int(inp) - 1 <= len(close_guesses):\n # open the map\n i = int(inp) - 1\n __plot_point(lats[i], lngs[i], output_path,\n close_guesses[i][0][\"formatted_address\"] + ' and ' +\n close_guesses[i][0][\"formatted_address\"])", "title": "" }, { "docid": "8690fdc54dca14fd52ed730d5feba422", "score": "0.6524467", "text": "def search():\n\n # Remove Country Code from query (since using US locations only; erase if allowing other countries)\n query = request.args.get(\"q\").replace(\"US\", \" \")\n\n # Remove unnecessary decorators\n query = query.replace(\",\", \" \").replace(\"+\", \" \")\n\n # Split query into individual words\n query = query.split()\n\n # If query is single word, just copy it in words list\n words = []\n if len(query) == 1:\n words = query.copy()\n\n # If query is not single word, check if certain words should be concatenated\n else:\n\n # Create list and variable for storing concatenating words\n words = []\n concat = query[0]\n\n # Iterate over all the words\n for q in query[1:]:\n\n # Check whether the single word or concatenated word has any matches\n result = db.execute(\n \"SELECT COUNT(*) FROM places WHERE place_name LIKE :q OR admin_name1 LIKE :q OR admin_code1 LIKE :q OR postal_code LIKE :q\", q=concat + \" \" + q + \"%\")\n\n # If there is a match, concatenate the word\n if result[0][\"COUNT(*)\"] != 0:\n concat += \" \" + q\n\n # If adding the next word has no match, add finished concatenation and initialize new concatenation\n else:\n words.append(concat)\n concat = q\n\n # Add last word to list\n words.append(concat)\n\n # Create SQL SELECT command string for all of the words (concatenate into one single string)\n command = \"SELECT * FROM places WHERE\"\n\n # Append each command string for each words to find overlapping matches\n for i in range(len(words)):\n\n # Append SQL wildcard % to each word to use LIKE\n words[i] = words[i] + \"%\"\n\n # Enable overlapping by using \"AND\" (for intersection)\n if i != 0:\n command = command + \" AND \"\n\n # Append created argument to command string\n command = command + \"(place_name LIKE '\" + words[i] + \"' OR admin_name1 LIKE '\" + words[i] + \\\n \"' OR admin_code1 LIKE '\" + words[i] + \"' OR postal_code LIKE '\" + words[i] + \"')\"\n\n # Find overlapping matches of all of the words from the database (places table)\n rows = db.execute(command)\n\n # Return location data in JSON format\n return jsonify(rows)", "title": "" }, { "docid": "12a8389b234adfb1c8783d8f2078bc0d", "score": "0.64094424", "text": "def search(self, query, offset):\n \n def parse_geocode_json(site, url, query, results):\n \"\"\"Create a OpenSearch Response from Google Geoode results results.\n \n Google's Geocode search API returns results in JSON format. This function simply loads the JSON into memory and creates an equivalent representation that is OpenSearch compliant.\n \n Parameters:\n \n * site (str): search engine name\n * url (str): the url for the results that were retrieved to use as the OpenSearch link for the response\n * query (str): query search terms (n.b. not a OpenSearch Query object)\n * results (dict): results from service\n \n Returns:\n \n * puppy.model.OpenSearch.Response\n \n \"\"\"\n response = Response()\n response.version = 'json'\n response.feed.setdefault('title', \"{0}: {1}\".format(site, query))\n response.feed.setdefault('link', url)\n response.feed.setdefault('description', \"Search results for '{0}' at {1}\".format(query, site))\n response.namespaces.setdefault(\"opensearch\", \"http://a9.com/-/spec/opensearch/1.1/\")\n response.feed.setdefault(\"opensearch_startindex\", 0)\n \n for result in results:\n try:\n resultDict ={}\n resultDict['title'] = result['formatted_address']\n longTitle = ''\n for component in result['address_components']:\n longTitle += (component['long_name'] + ', ')\n resultDict['link'] = ''\n resultDict['longTitle'] = longTitle[:len(longTitle)-2]\n resultDict['lat'] = result['geometry']['location']['lat']\n resultDict['lon'] = result['geometry']['location']['lng']\n \n if 'bounds' in result['geometry']:\n resultDict['neBorderLat'] = result['geometry']['bounds']['northeast']['lat']\n resultDict['neBorderLon'] = result['geometry']['bounds']['northeast']['lng']\n resultDict['swBorderLat'] = result['geometry']['bounds']['southwest']['lat']\n resultDict['swBorderLon'] = result['geometry']['bounds']['southwest']['lng']\n resultDict['distanceAcross'] = self.calcDistance(resultDict['neBorderLat'], resultDict['swBorderLat'], resultDict['neBorderLon'], resultDict['swBorderLon'])\n resultDict['summary'] = \"{0} is found at: Latitude: {1}, Longitude: {2}. The area it covers is {3}km across (between the NE and SW corners).\".format(resultDict['title'], resultDict['lat'], resultDict['lon'], resultDict['distanceAcross'])\n else:\n resultDict['summary'] = \"{0} is found at: Latitude: {1}, Longitude: {2}.\".format(resultDict['title'], resultDict['lat'], resultDict['lon'])\n response.entries.append(resultDict)\n\n # If there is an arithmetic error pass on the result but note it for the user and the result in question\n except ArithmeticError, e:\n note = \"Arithmetic Error occured when calculating the distance across for a result.\"\n print \"An {0}\\nResult: {1}\\n\\n\".format(note, result)\n continue\n except Exception, e:\n print \"Skipping a result due to: {0} \\nWhen parsing a result from: {1}\\n\".format(e, url)\n continue\n\n # If the processing worked okay then set total results and items per page\n response.feed['opensearch_totalresults'] = len(response.entries)\n response.feed['opensearch_itemsperpage'] = len(response.entries)\n return response\n\n try:\n url = \"https://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor={1}\".format(urllib2.quote(query.search_terms), self.sensor) \n data = urllib2.urlopen(url).read()\n results = json.loads(data)\n return parse_geocode_json('Google Geocode', url, query.search_terms, results['results'])\n \n # urllib2 - this catches http errors due to the service being down, lack of a proxy etc\n except urllib2.URLError, e:\n raise SearchEngineError(\"Google Geocode\", e, errorType = 'urllib2', url = url)\n\n # Check for a type error for offset or a generic type error if offset is valid\n except TypeError, e:\n if isinstance(offset, int) == False:\n note = \"Please ensure that 'offset' is an integer.\"\n raise SearchEngineError(\"Google Geocode\", e, note = note, offsetType = type(offset))\n\n raise SearchEngineError(\"Google Geocode\", e, url = url)\n\t \n # Catch Attribute error which deals with unexpected none type for the objects the wrapper uses and other associated issues\n except AttributeError, e:\n raise SearchEngineError(\"Google Geocode\", e, url = url)", "title": "" }, { "docid": "db3fc4ecac9e9de878033fa470950c90", "score": "0.6338495", "text": "def search(request):\n address = request.GET.get('address')\n if not address:\n raise rest.RequestException(400, 'parameterMissing', 'Missing required paramer \"address\"')\n\n try:\n location = googlemaps.forwardGeocode(request.GET.get('address'))\n return JsonResponse({\n 'location': {\n 'latitude': location['lat'],\n 'longitude': location['lng']\n }\n })\n except googlemaps.GoogleException as e:\n raise rest.RequestException(500, 'upstreamError', e.message, e.debugMessage)", "title": "" }, { "docid": "10cdaf2f3788aa502e8a4eeccc7a7ac0", "score": "0.6302801", "text": "def search_administrative_boundaries():\n keyword = flask_request.args.get('keyword')\n\n # if kw is an id, get geometry directly\n if keyword.isdigit():\n region_id = int(keyword)\n # is a region_id\n query = f'''\n SELECT st_asgeojson(t.geom) as geojson from us_states t where state_id = {region_id}\n union\n SELECT st_asgeojson(t.geom) as geojson from us_counties t where county_id = {region_id}\n union\n SELECT st_asgeojson(t.geom) as geojson from us_cities t where city_id = {region_id}\n '''\n\n return make_response(jsonify([json.loads(geom) for geom, in Connection.sql_execute(query)]))\n\n else:\n # load abbreviation\n # default value is keyword itself if not found in dict\n keyword = us_states_abbr.get(keyword, keyword)\n\n search_state = \"SELECT st_asgeojson(t.geom) from us_states t where lower(state_name)=lower(%s)\"\n search_county = \"SELECT st_asgeojson(t.geom) from us_counties t where lower(county_name)=lower(%s) limit 1\"\n search_city = \"SELECT st_asgeojson(t.geom) from us_cities t where lower(city_name)=lower(%s) limit 1\"\n\n with Connection() as conn:\n cur = conn.cursor()\n results = None\n if not results:\n cur.execute(search_state, (keyword,))\n results = [json.loads(geom) for geom, in cur.fetchall()]\n if not results:\n cur.execute(search_county, (keyword,))\n results = [json.loads(geom) for geom, in cur.fetchall()]\n if not results:\n cur.execute(search_city, (keyword,))\n results = [json.loads(geom) for geom, in cur.fetchall()]\n cur.close()\n return make_response(jsonify(results))", "title": "" }, { "docid": "f1bb6a816f1c125bc323bc3f95dedc6d", "score": "0.6288417", "text": "def search(api_key, term, lat, long):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n #'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT,\n 'latitude': lat,\n 'longitude': long\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "title": "" }, { "docid": "0bfae4e659a4b2b67f4b3f4cb1b5c706", "score": "0.6206883", "text": "def search(address):\n address = unquote_plus(address)\n lat, lng = get_coordinates(address)\n\n # These could be done in parallel...somehow?\n s_reps = state_reps(lat, lng)\n federal_reps = feds(lat, lng)\n return {\n 'location': (lat, lng),\n 'state': s_reps,\n 'federal': federal_reps\n }", "title": "" }, { "docid": "9e70da72c05b2df122b4b84f6189926e", "score": "0.61013454", "text": "def get_poi():\n lat = request.args.get('lat', type=float)\n lng = request.args.get('lng', type=float)\n url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n location = \"{},{}\".format(lat, lng)\n params = {\"key\": CONFIG.GOOGLE_KEY,\n \"location\": location,\n \"radius\": CONFIG.RADIUS,\n \"keyword\": CONFIG.FIND}\n # Requests encodes the comma with percent so a work around for that problem\n params_str = \"&\".join(\"{}={}\".format(k, v) for k, v in params.items())\n r = requests.get(url, params=params_str)\n return flask.jsonify(r.json()['results'])", "title": "" }, { "docid": "e54cfd1f761434e0385f740ceee87998", "score": "0.61007935", "text": "def geolocate(address_string, data):\n\n main_key = 'AIzaSyBFwN-7_erzpXeWWFe3DwMqSPKGoCjj1Hg'\n\n gmaps = googlemaps.Client(key=main_key)\n location = {}\n ed = False\n\n for feature in data['features']:\n if address_string.lower() == feature['properties']['NAME_TAG'].lower():\n location['lng'] = feature['properties']['LONGITUDE']\n location['lat'] = feature['properties']['LATITUDE']\n ed = True\n status = 1\n\n if not ed:\n # Add Ireland to search string to remove ambiguity\n address = address_string + ' Ireland'\n\n # Get results from gmaps API\n geocode_result = gmaps.geocode(address)\n\n # If a result is returned\n if len(geocode_result) > 0:\n # Set the location as the result\n location = geocode_result[0]['geometry']['location']\n status = 1\n else:\n status = 2\n\n # Check if location is within east/west boundary of Ireland\n if status is 1:\n if location['lng'] > -10.738539 and location['lng'] < -5.930445:\n pass\n else:\n status = 2\n\n # Check if location is within north/south boundary of Ireland\n if status is 1:\n if location['lat'] > 51.387652 and location['lat'] < 55.445918:\n pass\n else:\n status = 2\n\n # If a bad result, set default location\n if status is 2:\n location = {'lng': -6.2603, 'lat': 53.3498}\n\n return location, status", "title": "" }, { "docid": "9fcb489210cd5f3e79865dfcec819f42", "score": "0.6001553", "text": "def _geocode(self, data: list):\n self.geocoder.check_api_key()\n return self.geocoder.geocode(data)", "title": "" }, { "docid": "a7f27a34671594a5d54f424c5ffe66fc", "score": "0.58989286", "text": "def search(bounds):\n \n url_params = {\n 'bounds': bounds\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "title": "" }, { "docid": "bde137c478a463d512dc38b53c99c67f", "score": "0.588142", "text": "def search_by_location(self, search: AdapterLocationSearch) -> List[AdapterHotel]:", "title": "" }, { "docid": "4fe4a77a5e803c2df068dbadb61b3b51", "score": "0.58781797", "text": "def search_business(api_key, latitude, longitude, offset):\n\n url_params = {\n 'latitude': latitude, \n 'longitude': longitude,\n 'limit': SEARCH_LIMIT, \n 'offset': offset\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "title": "" }, { "docid": "b510c45a798275daa1a8214db2579946", "score": "0.58542925", "text": "def places_locate():\n\n return render_template('location-search.html',\n google_api_key=google_api_key)", "title": "" }, { "docid": "6080bbd5db757c2d2ed4524828f405b1", "score": "0.5846148", "text": "def search_coordinate(self,data):\n logging.info(\"parpare to run search_coordinate\")\n\n results,image_urls=self.map_engine.search_coordinate(data)\n self.save_image_urls_to_database(image_urls)\n if len(image_urls)!=0:\n results=json.dumps((results),len(image_urls))\n return results", "title": "" }, { "docid": "0b2ba84b3c14fca35b27a07a5d390f97", "score": "0.5816496", "text": "def _construct_positional_search_query(self, x: float, y: float):\n\t\tbase_string = 'http://transport.opendata.ch/v1/locations?'\n\t\tfull_string = base_string + 'x=' + str(x) + '&y=' + str(y)\n\t\treturn full_string", "title": "" }, { "docid": "c94f59c93684232a3a9c99d7f2aef2df", "score": "0.5773805", "text": "def call_google_maps_positionnement():\n search_url = \"https://maps.googleapis.com/maps/api/place/textsearch/json\"\n search_payload = {\"key\": \"AIzaSyC4v_YJVsNLXGa0pXP6U3Lwp8WHPi1fnsc\", \"query\": \"openclassrooms\"}\n search_req = requests.get(search_url, params=search_payload)\n search_json = search_req.json()\n\n with open('gmaps_data.json', 'w') as fp:\n json.dump(search_json, fp)", "title": "" }, { "docid": "4d670eef8d474b442e7efcc3d9ad3af5", "score": "0.5744734", "text": "def search(term, location, start_lat, start_long, radius_filter, sort):\n \n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'cll': str(start_lat)+\",\"+str(start_long),\n 'limit': 10,\n 'sort': 0,\n 'radius_filter': radius_filter,\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "title": "" }, { "docid": "74012471aec5669513823d9762509e84", "score": "0.5734593", "text": "def googleplaces(lat,\n lng,\n radius_metres,\n search_term,\n key,\n pagetoken=None,\n nmbr_returned=0):\n location = urllib.parse.quote(\"%.5f,%.5f\" % (lat,lng))\n radius = float(radius_metres)\n name = urllib.parse.quote(str(search_term))\n \n if types:\n search_url = ('https://maps.googleapis.com/maps/api/place/' + 'nearbysearch' +\n '/json?location=%s&radius=%d&keyword=%s&type=%s&key=%s') % (location, radius, name, types, key) \n else:\n search_url = ('https://maps.googleapis.com/maps/api/place/' + 'nearbysearch' +\n '/json?location=%s&radius=%d&keyword=%s&key=%s') % (location, radius, name, key)\n if pagetoken is not None:\n search_url += '&pagetoken=%s' % pagetoken\n # SLEEP so that request is generated\n time.sleep(2)\n \n time.sleep(0.1)\n req_count.increment_key()\n \n google_search_request = requests.get(search_url)\n search_json_data = google_search_request.json()\n \n \n if search_json_data['status'] == 'OK':\n nmbr_returned += len(search_json_data['results'])\n for place in search_json_data['results']:\n try:\n shop = [place['name'].encode('ascii', 'ignore').decode('ascii'),\n place['vicinity'].encode('ascii', 'ignore').decode('ascii'),\n place['geometry']['location']['lat'],\n place['geometry']['location']['lng'],\n place['types'],\n place['place_id']]\n except:\n shop = [place['name'].encode('ascii', 'ignore').decode('ascii'),\n place['vicinity'].encode('ascii', 'ignore').decode('ascii'),\n place['geometry']['location']['lat'],\n place['geometry']['location']['lng'],\n place['types'],\n place['place_id']]\n if shop not in shops_list:\n shops_list.append(shop)\n # Possible to get up to 60 results\n # from one search by passing next_page_token\n try:\n next_token = search_json_data['next_page_token']\n googleplaces(lat=lat,\n lng=lng,\n radius_metres=radius_metres,\n search_term=search_term,\n key=key,\n pagetoken=next_token,\n nmbr_returned=nmbr_returned)\n return\n except KeyError:\n pass\n elif search_json_data['status'] == 'ZERO_RESULTS':\n pass\n else:\n try:\n print('Error: %s' % search_json_data['error_message'])\n except KeyError:\n print('Unknown error message - check URL')\n \n debug_list.append([lat, lng, nmbr_returned])", "title": "" }, { "docid": "15c0a61cfcb5ccd00668256eda0624fc", "score": "0.5707262", "text": "def get_geoloc_from(request,cities=False,other=False,all_mention=False): #### Extraction of geolocation information from a sentence\n \n def check_dict(request,exact_mention,real_name,geoloc_dict): #### Save in the dictionnary the real name instead of the exact mention that could be wrong\n \"\"\"This function is used save in a dictionary the real name of a city or country instead of the exact mention\n Description :\n This function will save in the dictionary gave as argument the exact mention of he geolocation information and if the argument all_mention is True it will save the number of times it appear in the request(string)\n Args :\n request (string) :\n A string to search exact_mention in\n exact_mention (string) : \n The exact mention of the entity\n real_name (string) :\n The real name of the entity\n geoloc_dict (dictionary) :\n A dictionary to store the information\n Return :\n No return\n \"\"\"\n if all_mention:\n nb_of_mention=len(re.findall(r'^'+exact_mention+'[^a-zA-Z0-9]|[^a-zA-Z0-9]'+exact_mention+'[^a-zA-Z0-9]|[^a-zA-Z0-9]'+exact_mention+'$',request))\n else:\n nb_of_mention=1\n if real_name not in geoloc_dict:\n geoloc_dict[real_name]=nb_of_mention\n else:\n geoloc_dict[real_name]+=nb_of_mention\n\n def check_in_dict(dico): #### Use to look for key in a dict and then save it if key in it\n \"\"\"This function will go through a dictionary (dico) to find if there is any ey in the request\n Description :\n Here the function will go through the dictionary pass as an argument to check if any (abreviation/iso2/iso3) are in the request (string).\n If it is, it calls check_dict() function to save the real name (value) based on the exact mention (key)\n Args : \n dico (dictionary) :\n A dictionary with a pattern as a key and real name as value\n Return :\n No return\n \"\"\"\n for abrev in dico:\n if abrev not in geoloc_met and re.search(r'^'+abrev+'[^a-zA-Z0-9]|[^a-zA-Z0-9]'+abrev+'[^a-zA-Z0-9]|[^a-zA-Z0-9]'+abrev+'$',request):\n check_dict(request,abrev,dico[abrev],geoloc_dict)\n geoloc_met.append(abrev)\n if not all_mention:\n return\n\n geoloc={\n \"Countries\":countries_list,\n \"Cities\":cities_list,\n \"iso2\":countries_iso2,\n \"iso3\":countries_iso3}\n geoloc_dict={}\n if cities:\n geoloc_geoname=geoloc[\"Cities\"]\n else:\n geoloc_geoname=geoloc[\"Countries\"]\n geoloc_met=[]\n geoloc_info=[]\n spacy_aff=nlp(request)\n \n for entities in spacy_aff.ents: \n if entities.label_ =='GPE' and entities.text not in geoloc_met and not re.search(r'[^0-9a-zA-Z\\s]',entities.text):\n geoloc_info.append(entities.text)\n if other and entities.text not in geoloc[\"Countries\"] and entities.text not in geoloc[\"Cities\"] and entities.text not in geoloc[\"iso2\"] and entities.text not in geoloc[\"iso3\"]:\n check_dict(request,entities.text,entities.text,geoloc_dict)\n geoloc_met.append(entities.text)\n elif not other:\n entity=entities.text[0].upper()+entities.text[1:].lower()\n if entities.text in geoloc_geoname:\n check_dict(request,entities.text,entities.text,geoloc_dict)\n geoloc_met.append(entities.text)\n elif entity in geoloc_geoname:\n check_dict(request,entities.text,entity,geoloc_dict)\n geoloc_met.append(entities.text)\n else:\n for w in entities.text.split():\n if w in geoloc_geoname:\n check_dict(request,w,w,geoloc_dict)\n geoloc_met.append(w)\n \n if not cities and not other:\n check_in_dict(abrevs)\n # check_in_dict(countries_iso3)\n # check_in_dict(countries_iso2)\n\n if cities and len(geoloc_met)<1:\n for w in re.findall(r'[\\w]+',request):\n if w in geoloc[\"Cities\"]:\n check_dict(request,w,w,geoloc_dict)\n geoloc_met.append(w)\n \n if not cities and not other:\n if len(geoloc_met)>0 and not all_mention:\n return geoloc_dict\n for info in geoloc_info:\n if info.lower().title() in countries_list:\n check_dict(request,info,info.lower().title(),geoloc_dict)\n geoloc_met.append(info)\n return geoloc_dict\n for w in re.findall(r'[\\w]+',request):\n if w in geoloc_geoname:\n check_dict(request,w,w,geoloc_dict)\n geoloc_met.append(w)\n if len(geoloc_met)>0 and not all_mention:\n return geoloc_dict\n return geoloc_dict", "title": "" }, { "docid": "ed506c2ad514f97be6396491c3a74cd5", "score": "0.5678127", "text": "def point_of_interest_search(poi):\n url = configs.get_api_url(configs.GOOGLE_CONFIGS) + \\\n configs.get_api_path(configs.GOOGLE_CONFIGS, 'placeSearch')\n params = {'key': configs.get_api_key(configs.GOOGLE_CONFIGS), \n 'input': poi.replace(' ', '%20'), \n 'inputtype': 'textquery',\n 'fields': 'place_id,formatted_address,geometry,name'}\n response = requests.get(url, params)\n\n #error check when response status != 200\n\n json = response.json()\n return place.PointOfInterest(json['candidates'][0]['name'],\n json['candidates'][0]['place_id'],\n json['candidates'][0]['geometry']['location']['lat'],\n json['candidates'][0]['geometry']['location']['lng'],\n json['candidates'][0]['formatted_address'])", "title": "" }, { "docid": "c5790e7f5b3bb81a0a67291db048d74b", "score": "0.5649655", "text": "def search(begin, end, geojson, latlon, address, path, row, clouds, sensors,\n tiers, slcoff, output):\n if geojson:\n geom = _geom_from_geojson(geojson)\n elif latlon:\n y, x = latlon\n geom = Point(x, y)\n elif address:\n geom = _geom_from_address(address)\n else:\n geom = None\n\n # If only year provided, set default month and day\n if len(begin) == 4:\n begin += '-01-01'\n if len(end) == 4:\n end += '-01-01'\n\n if sensors:\n sensors = [s.strip() for s in sensors.split(',')]\n if tiers:\n tiers = [t.trip() for t in tiers.split(',')]\n\n catalog = Catalog()\n scenes = catalog.search(begin, end, path, row, geom, clouds, sensors,\n tiers, slc=not slcoff)\n\n if output:\n _to_csv(scenes, output)\n else:\n for scene in scenes:\n click.echo(scene['product_id'])", "title": "" }, { "docid": "214b59c134addadf5eca873fae1a43c6", "score": "0.5641458", "text": "def scrape_google(tuple_list, lifeline_num, api_key):\n names = []\n categories = []\n lat = []\n long = []\n lifeline = []\n search_term_list=[]\n for n in tuple_list:\n for i in n[0]:\n search_term_list.append(i)\n for search_term in search_term_list:\n query_result = api_key.nearby_search(\n location='Houston, Texas', keyword=search_term,\n radius=40000)\n\n for place in query_result.places:\n names.append(place.name)\n categories.append(search_term)\n lat.append(place.geo_location['lat'])\n long.append(place.geo_location['lng'])\n lifeline.append(lifeline_num)\n\n print(f'got 20 for search term: {search_term}')\n time.sleep(2)\n def component_builder(tuple_list):\n def unpack(cell):\n for n in tuple_list:\n for i in n[0]:\n if cell == i:\n return n[1]\n return df['Category'].map(unpack)\n\n df = pd.DataFrame(columns=['Business'])\n df['Business'] = names\n df['Category'] = categories\n df['Source'] = 'Google'\n df['Latitude'] = lat\n df['Longitude'] = long\n df['Lifeline'] = lifeline\n df['Component'] = component_builder(tuple_list)\n\n return df", "title": "" }, { "docid": "804c7c22714ab2f60f252e096983b0c9", "score": "0.5630255", "text": "def basic_search(self, params):\n\t\tself.search(params)\n\t\ttry:\n\t\t\tself.extract_json_data()\n\t\texcept:\n\t\t\tself.extract_html_data()\n\t\t# the following is for logging only\n\t\tfor trip in self.trips:\n\t\t\tfor seg in trip:\n\t\t\t\tif params.buckets:\n\t\t\t\t\tseg.search_buckets(params.buckets)\n\t\t\t\ttry:\n\t\t\t\t\tprint(seg.condensed_repr())\n\t\t\t\texcept:\n\t\t\t\t\tprint(seg)\n\t\t\tprint('---')\n\t\treturn self.trips", "title": "" }, { "docid": "e3fcea4d601e021928ec82bb14523671", "score": "0.562118", "text": "def step05_search_geo(self):\n path = \"{0}?km=10&from=44.59641,-123.25022\".format(reverse(\"company-geosearch-list\"))\n _logger.info(\"Path: {}\".format(path))\n response = self.api_client.get(path)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"total\"], 1)\n self.assertEqual(response.data[\"results\"][0][\"name\"], self.companies[1][\"name\"])\n super(GetAllCompanyTest, self).assert_equal_dicts(\n response.data[\"results\"][0], self.companies[1], [\"_id\", \"created_at\", \"updated_at\", \"score\"]\n )", "title": "" }, { "docid": "2e4fb5865819444c9d078fc2a35b1a6d", "score": "0.56133574", "text": "def runsearch():\n print(\"%d Keys Remaining\" % (len(API_KEY)-1))\n for partition in coord.coordset:\n # Keys have a life-span of 1000 requests\n key_number = (req_count.keynum // 1000)\n req_count.increment_partition()\n \n googleplaces(lat=partition[0],\n lng=partition[1],\n radius_metres=RADIUS_KM*1000,\n search_term=COMPANY_SEARCH,\n key=API_KEY[key_number])\n \n # OUTPUT to CSV\n f = open(SAVE_PATH + COMPANY_SEARCH + '_python_mined.csv', 'w', newline='')\n w = csv.writer(f)\n #w.writerow(['Name', 'Partial Address', 'Latitude', 'Longitude', 'Google Place Tags', 'Google Place ID', 'Phone', 'Full Address', 'Website', 'Open Status'])\n for one_store in shops_list:\n w.writerow(one_store)\n f.close()\n \n # OUTPUT LOG to CSV\n f = open(SAVE_PATH + 'log_' + COMPANY_SEARCH + '_python_mined.csv', 'w', newline='')\n w = csv.writer(f)\n for debug_result in debug_list:\n w.writerow(debug_result)\n f.close()\n \n # DETAIL SEARCH\n fillindetails()", "title": "" }, { "docid": "21d3b2249d3bc21f2f3c9c587776b768", "score": "0.56131846", "text": "def bulk_search_variants_by_coordinates(sorted_queries, search_mode='any'):\n\n def is_sorted(prev_q, current_q):\n if prev_q['chr'] < current_q['chr']:\n return True\n if prev_q['chr'] > current_q['chr']:\n return False\n if prev_q['start'] < current_q['start']:\n return True\n if prev_q['start'] > current_q['start']:\n return False\n if prev_q['stop'] < current_q['stop']:\n return True\n if prev_q['stop'] > current_q['stop']:\n return False\n return True\n\n ct_pointer = 0\n query_pointer = 0\n last_query_pointer = -1\n match_start = None\n ct = MODULE.COORDINATE_TABLE\n matches = defaultdict(list)\n Match = namedtuple('Match', ct.columns)\n\n def append_match(matches_list, query, ct_row):\n matches_list[query].append(Match(**ct_row.to_dict()))\n\n while query_pointer < len(sorted_queries) and ct_pointer < len(ct):\n if last_query_pointer != query_pointer:\n q = sorted_queries[query_pointer]\n if q.build != 'GRCh37':\n raise ValueError(\"Bulk coordinate search only supports build GRCh37\")\n if match_start is not None:\n ct_pointer = match_start\n match_start = None\n last_query_pointer = query_pointer\n c = ct.iloc[ct_pointer]\n q_chr = str(q.chr)\n c_chr = c.chr\n if q_chr < c_chr:\n query_pointer += 1\n continue\n if q_chr > c_chr:\n ct_pointer += 1\n continue\n q_start = int(q.start)\n c_start = c.start\n q_stop = int(q.stop)\n c_stop = c.stop\n if q_start > c_stop:\n ct_pointer += 1\n continue\n if q_stop < c_start:\n query_pointer += 1\n continue\n if search_mode == 'any':\n append_match(matches, q, c)\n elif search_mode == 'exact' and q_start == c_start and q_stop == c_stop:\n q_alt = q.alt\n c_alt = c.alt\n q_ref = q.ref\n c_ref = c.ref\n if q_alt == '-':\n raise ValueError(\"Unexpected alt `-` in coordinate query. Did you mean `None`?\")\n if q_ref == '-':\n raise ValueError(\"Unexpected ref `-` in coordinate query. Did you mean `None`?\")\n if (not (q_alt != '*' and q_alt != c_alt)) and (not (q_ref != '*' and q_ref != c_ref)):\n append_match(matches, q, c)\n elif search_mode == 'query_encompassing' and q_start <= c_start and q_stop >= c_stop:\n append_match(matches, q, c)\n elif search_mode == 'record_encompassing' and c_start <= q_start and c_stop >= q_stop:\n append_match(matches, q, c)\n if match_start is None:\n match_start = ct_pointer\n ct_pointer += 1\n return dict(matches)", "title": "" }, { "docid": "6927573abbe7403aff45022dc197f6c9", "score": "0.5611515", "text": "def alert_search(self, params):\n\t\tself.search(params)\n\t\ttry:\n\t\t\tself.extract_json_data()\n\t\texcept:\n\t\t\tself.extract_html_data()\n\t\tfound_segs = []\n\t\tfor t in self.trips:\n\t\t\tif params.nonstop and len(t) > 1:\n\t\t\t\tcontinue\n\t\t\tfor seg in t:\n\t\t\t\tseg.format_deptime()\n\t\t\t\tdeptime = 100*seg.depart_datetime.hour + seg.depart_datetime.minute\n\t\t\t\tseg.search_buckets(params.buckets)\n\t\t\t\tsearch_times = params.flightno.split(':')\n\t\t\t\tif len(search_times) == 2:\n\t\t\t\t\tsearch_times[0] = search_times[0] or '0000'\n\t\t\t\t\tsearch_times[1] = search_times[1] or '2359'\n\t\t\t\tif (\n\t\t\t\t\tnot params.flightno\n\t\t\t\t) or (\n\t\t\t\t\tseg.flightno in params.flightno\n\t\t\t\t) or (\n\t\t\t\t\tlen(search_times) == 2 and \n\t\t\t\t\t(\n\t\t\t\t\t\tdeptime >= int(search_times[0]) or \n\t\t\t\t\t\tseg.depart_datetime.day > seg.search_datetime.day\n\t\t\t\t\t) and (\n\t\t\t\t\t\tdeptime <= int(search_times[1]) and\n\t\t\t\t\t\tseg.search_datetime.day == seg.depart_datetime.day\n\t\t\t\t\t)\n\t\t\t\t):\n\t\t\t\t\tfound_segs.append(seg)\n\t\tif len(found_segs)==0:\n\t\t\traise Exception('No results found for '+str(params))\n\t\treturn found_segs", "title": "" }, { "docid": "7beae665cce12e350e4fb3aa2a0d6e77", "score": "0.5608779", "text": "def get_spatial_join_search_results(es, ranges, dataset_id=None,\n query_args=None):\n\n filter_query = list()\n if dataset_id:\n filter_query.append(\n {'term': {'dataset_id': dataset_id}}\n )\n\n should_query = list()\n coverage = sum([\n (range_[1][0] - range_[0][0]) * (range_[0][1] - range_[1][1])\n for range_ in ranges])\n for i, range_ in enumerate(ranges):\n should_query.append({\n 'nested': {\n 'path': 'ranges',\n 'query': {\n 'function_score': {\n 'query': {\n 'geo_shape': {\n 'ranges.range': {\n 'shape': {\n 'type': 'envelope',\n 'coordinates': [\n [range_[0][0], range_[0][1]],\n [range_[1][0], range_[1][1]]\n ]\n },\n 'relation': 'intersects'\n }\n }\n },\n 'script_score': {\n 'script': {\n 'params': {\n 'min_lon': range_[0][0],\n 'max_lat': range_[0][1],\n 'max_lon': range_[1][0],\n 'min_lat': range_[1][1],\n 'coverage': coverage\n },\n 'source': '''\n double n_min_lon = Math.max(doc['ranges.min_lon'].value, params.min_lon);\n double n_max_lat = Math.min(doc['ranges.max_lat'].value, params.max_lat);\n double n_max_lon = Math.min(doc['ranges.max_lon'].value, params.max_lon);\n double n_min_lat = Math.max(doc['ranges.min_lat'].value, params.min_lat);\n return ((n_max_lon - n_min_lon) * (n_max_lat - n_min_lat)) / params.coverage;'''\n }\n },\n 'boost_mode': 'replace'\n }\n },\n 'inner_hits': {\n '_source': False,\n 'size': 100,\n 'name': 'range-{0}'.format(i)\n },\n 'score_mode': 'sum'\n }\n })\n\n body = {\n '_source': {\n 'excludes': [\n 'name',\n 'dataset_name',\n 'dataset_description',\n 'ranges'\n ]\n },\n 'query': {\n 'function_score': {\n 'query': {\n 'bool': {\n 'filter': filter_query,\n 'should': should_query,\n 'minimum_should_match': 1\n }\n },\n 'functions': [] if not query_args else query_args,\n 'score_mode': 'sum',\n 'boost_mode': 'multiply'\n }\n }\n }\n\n # logger.info(\"Query (spatial): %r\", body)\n\n return es.search(\n index='datamart_spatial_coverage',\n body=body,\n size=TOP_K_SIZE\n )['hits']['hits']", "title": "" }, { "docid": "55bd9dc4eb1c1099ca07bef5d58fd117", "score": "0.5594724", "text": "def geocoding(adrs):\n geolocator = Nominatim(user_agent=\"api\")\n location = None\n while(location == None):\n print(\"location not found\")\n location = geolocator.geocode(adrs)\n print(location.latitude, location.longitude)", "title": "" }, { "docid": "c3fdc4124c326644479520a2d316bdcc", "score": "0.5594013", "text": "def search(query):", "title": "" }, { "docid": "41228baf24462c70fe3f6ef812fae058", "score": "0.5588693", "text": "def testStructuredAddress(self):\n request = clients.GeocodeRequest()\n request.country = \"United States\"\n request.state = \"California\"\n request.city = \"San Francisco\"\n request.locality = \"Sausalito\"\n request.street = \"66 Starbuck Drive, Muir Beach\"\n\n response = self._api.geocode([request])\n\n assert len(response) == 1\n assert response[0].code == \"SFO\"\n assert response[0].latitude == 37.85909\n assert response[0].longitude == -122.48525", "title": "" }, { "docid": "6ab2f1756068143f996942659de20b8a", "score": "0.5562895", "text": "def search(MY_API_KEY, term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n\t\t'sort_by': 'distance',\n # 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, MY_API_KEY, url_params=url_params)", "title": "" }, { "docid": "2c5141070fe75f5d8df5fc853d333641", "score": "0.5549206", "text": "def get(self, request, latlong, format=None):\n # Define data.\n places_api_key = os.environ['RYR_COLLECTOR_GOOGLE_PLACES_API_KEY']\n\n # Prepare client.\n client = CollectorClient('google', api_key=places_api_key)\n client.authenticate()\n\n # Retrieve search results.\n search_results = client.search_place(latlong)\n return Response(search_results)", "title": "" }, { "docid": "3598b809f1c1aaa79060d866f7638bb4", "score": "0.55477417", "text": "def search(self, paths_rows=None, lat=None, lon=None, address=None, start_date=None, end_date=None, cloud_min=None,\n cloud_max=None, limit=1, geojson=False):\n\n search_string = self.query_builder(paths_rows, lat, lon, address, start_date, end_date, cloud_min, cloud_max)\n\n # Have to manually build the URI to bypass requests URI encoding\n # The api server doesn't accept encoded URIs\n\n r = requests.get('%s?search=%s&limit=%s' % (self.api_url, search_string, limit))\n\n r_dict = json.loads(r.text)\n result = {}\n\n if 'error' in r_dict:\n result['status'] = u'error'\n result['code'] = r_dict['error']['code']\n result['message'] = r_dict['error']['message']\n\n elif 'meta' in r_dict:\n if geojson:\n result = {\n 'type': 'FeatureCollection',\n 'features': []\n }\n for r in r_dict['results']:\n feature = {\n 'type': 'Feature',\n 'properties': {\n 'sceneID': r['sceneID'],\n 'row': three_digit(r['row']),\n 'path': three_digit(r['path']),\n 'thumbnail': r['browseURL'],\n 'date': r['acquisitionDate'],\n 'cloud': r['cloud_coverage']\n },\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [\n [r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']],\n [r['lowerLeftCornerLongitude'], r['lowerLeftCornerLatitude']],\n [r['lowerRightCornerLongitude'], r['lowerRightCornerLatitude']],\n [r['upperRightCornerLongitude'], r['upperRightCornerLatitude']],\n [r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']]\n ]\n ]\n }\n }\n\n result['features'].append(feature)\n\n else:\n result['status'] = u'SUCCESS'\n result['total'] = r_dict['meta']['found']\n result['limit'] = r_dict['meta']['limit']\n result['total_returned'] = len(r_dict['results'])\n result['results'] = [{'sceneID': i['sceneID'],\n 'sat_type': u'L8',\n 'path': three_digit(i['path']),\n 'row': three_digit(i['row']),\n 'thumbnail': i['browseURL'],\n 'date': i['acquisitionDate'],\n 'cloud': i['cloud_coverage']}\n for i in r_dict['results']]\n\n return result", "title": "" }, { "docid": "d40db15b618c2f057b3fca59fea0f3c3", "score": "0.5546621", "text": "def test_geocode():\n myplace = script.ApiGoogle(\"25320 TORPES\")\n myplace.geocode()\n assert myplace.found == True\n assert myplace.city == \"25320 Torpes, France\"\n assert myplace.lat == 47.169692\n assert myplace.lng == 5.891695899999999", "title": "" }, { "docid": "f48beac76e3ae34582c78d7b8959b74e", "score": "0.5543705", "text": "def test_search_by_address(self):\n q = self.EMPTY_SEARCH_QUERY.copy()\n q['query'] = '60 Spear Street'\n q['distance'] = 3.0\n response = self.client.post('/', q)\n self.assertContains(response, 'Nine Months', count=2, status_code=200)", "title": "" }, { "docid": "60fb32574a1bccd43a07f66ab7f9ebf8", "score": "0.55379057", "text": "def search_locations():\n\n # See if there is a query\n query = request.args.get('query')\n if query:\n # Search for the query\n locations = [i.name for i in Location.query.filter(Room.name.like('%' + query + '%'))]\n else:\n # Respond with JSON of all organizations\n locations = [i.name for i in Location.query.all()]\n\n # Return the JSON response\n return jsonify(locations)", "title": "" }, { "docid": "2ffc119d8e55256817e575965cac87e9", "score": "0.5537342", "text": "def search(term, location, num_results):\n if isinstance(term, unicode):\n term = unicode(unidecode(term))\n if isinstance(location, unicode):\n location = unicode(unidecode(location))\n \n print '----------------------------'\n print term, type(term)\n print location, type(location)\n print '----------------------------'\n url_params = {\n 'term': term,\n 'location': location,\n 'limit': num_results\n }\n\n return yelp(API_HOST, SEARCH_PATH, url_params=url_params)", "title": "" }, { "docid": "db5c23a61eaca2675aa9349ed48b6288", "score": "0.55329746", "text": "def main():\n # url = f\"{MAPQUEST_BASE_URL}?key={MAPQUEST_API_KEY}&location=Babson%20College\"\n # pprint(get_json(url))\n\n location = input(\"Please enter a place:\")\n # print(get_lat_long(location))\n # pprint(get_nearest_station(42.34461, -71.10411)) # fenway park\n\n print(find_stop_near(location))\n # print(find_stop_near(\"Boston Common\"))", "title": "" }, { "docid": "a0d4424d77a9c3bff4ef283ed95c16ef", "score": "0.55255914", "text": "def parse_geocode_json(site, url, query, results):\n response = Response()\n response.version = 'json'\n response.feed.setdefault('title', \"{0}: {1}\".format(site, query))\n response.feed.setdefault('link', url)\n response.feed.setdefault('description', \"Search results for '{0}' at {1}\".format(query, site))\n response.namespaces.setdefault(\"opensearch\", \"http://a9.com/-/spec/opensearch/1.1/\")\n response.feed.setdefault(\"opensearch_startindex\", 0)\n \n for result in results:\n try:\n resultDict ={}\n resultDict['title'] = result['formatted_address']\n longTitle = ''\n for component in result['address_components']:\n longTitle += (component['long_name'] + ', ')\n resultDict['link'] = ''\n resultDict['longTitle'] = longTitle[:len(longTitle)-2]\n resultDict['lat'] = result['geometry']['location']['lat']\n resultDict['lon'] = result['geometry']['location']['lng']\n \n if 'bounds' in result['geometry']:\n resultDict['neBorderLat'] = result['geometry']['bounds']['northeast']['lat']\n resultDict['neBorderLon'] = result['geometry']['bounds']['northeast']['lng']\n resultDict['swBorderLat'] = result['geometry']['bounds']['southwest']['lat']\n resultDict['swBorderLon'] = result['geometry']['bounds']['southwest']['lng']\n resultDict['distanceAcross'] = self.calcDistance(resultDict['neBorderLat'], resultDict['swBorderLat'], resultDict['neBorderLon'], resultDict['swBorderLon'])\n resultDict['summary'] = \"{0} is found at: Latitude: {1}, Longitude: {2}. The area it covers is {3}km across (between the NE and SW corners).\".format(resultDict['title'], resultDict['lat'], resultDict['lon'], resultDict['distanceAcross'])\n else:\n resultDict['summary'] = \"{0} is found at: Latitude: {1}, Longitude: {2}.\".format(resultDict['title'], resultDict['lat'], resultDict['lon'])\n response.entries.append(resultDict)\n\n # If there is an arithmetic error pass on the result but note it for the user and the result in question\n except ArithmeticError, e:\n note = \"Arithmetic Error occured when calculating the distance across for a result.\"\n print \"An {0}\\nResult: {1}\\n\\n\".format(note, result)\n continue\n except Exception, e:\n print \"Skipping a result due to: {0} \\nWhen parsing a result from: {1}\\n\".format(e, url)\n continue\n\n # If the processing worked okay then set total results and items per page\n response.feed['opensearch_totalresults'] = len(response.entries)\n response.feed['opensearch_itemsperpage'] = len(response.entries)\n return response", "title": "" }, { "docid": "e11cadda1852fb7b60f82cbcc05c776f", "score": "0.551764", "text": "def get_google_results(address, api_key=None, return_full_response=False):\r\n # Set up your Geocoding url\r\n geocode_url = \"https://maps.googleapis.com/maps/api/geocode/json?address={}\".format(address)\r\n if api_key is not None:\r\n geocode_url = geocode_url + \"&key={}\".format(api_key)\r\n\r\n # Ping google for the reuslts:\r\n results = requests.get(geocode_url)\r\n # Results will be in JSON format - convert to dict using requests functionality\r\n results = results.json()\r\n\r\n # if there's no results or an error, return empty results.\r\n if len(results['results']) == 0:\r\n output = {\r\n \"formatted_address\": None,\r\n \"latitude\": None,\r\n \"longitude\": None,\r\n \"accuracy\": None,\r\n \"google_place_id\": None,\r\n \"postcode\": None\r\n }\r\n else:\r\n answer = results['results'][0]\r\n\r\n output = {\r\n \"formatted_address\": answer.get('formatted_address'),\r\n \"latitude\": answer.get('geometry').get('location').get('lat'),\r\n \"longitude\": answer.get('geometry').get('location').get('lng'),\r\n }\r\n\r\n # Append some other details:\r\n output['input_string'] = address\r\n output['number_of_results'] = len(results['results'])\r\n output['status'] = results.get('status')\r\n if return_full_response is True:\r\n output['response'] = results\r\n\r\n return output", "title": "" }, { "docid": "a4309e5b33a1e81375954286024e2a7e", "score": "0.5516787", "text": "def get_census_results(address):\n # Set up your Geocoding url\n geocode_url = \"https://geocoding.geo.census.gov/geocoder/geographies/onelineaddress?address={}&benchmark=Public_AR_Census2010&vintage=Census2010_Census2010&layers=14&format=json\".format(address)\n\n\n # Ping US Census Bureau for the reuslts:\n results = requests.get(geocode_url)\n # Results will be in JSON format - convert to dict using requests functionality\n results = results.json()\n\n # if there's no results or an error, return empty results.\n if len(results['result']) == 0:\n output = {\n \"formatted_address\" : None,\n \"latitude\": None,\n \"longitude\": None,\n \"postcode\": None,\n \"state_code\" : None,\n \"county_code\" : None,\n \"tract_code\" : None\n }\n else:\n answer = results['result']['addressMatches']\n output = {\n \"formatted_address\" : answer[0].get('matchedAddress'),\n \"latitude\": answer[0].get('coordinates').get('y'),\n \"longitude\": answer[0].get('coordinates').get('x'),\n \"postcode\": answer[0].get('addressComponents').get('zip'),\n \"state_code\": answer[0].get('geographies').get('Census Blocks')[0].get('STATE'),\n \"county_code\": answer[0].get('geographies').get('Census Blocks')[0].get('COUNTY'),\n \"tract_code\":answer[0].get('geographies').get('Census Blocks')[0].get('TRACT'),\n\n }\n\n # Append some other details:\n output['input_address'] = address\n #output['fips_code'] = pd.concat(output['state_code']+output['county_code']+output['tract_code'])\n\n\n return output", "title": "" }, { "docid": "6ab6e9ed5d0fa1a91963aa4ef2643a00", "score": "0.55151737", "text": "def _make_location_filter(query_params, etag_hash=''):\n def make_rectangle(bbox):\n \"\"\"Given a bbox csv returns a geometry object for it\"\"\"\n xmin, ymin, xmax, ymax = (float(x) for x in bbox.split(','))\n return Polygon(((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)))\n\n # Filter by any boolean filters provided\n result = Q()\n for f, val in query_params.iteritems():\n if f in _location_bool_fields:\n logger.debug('Adding Filter: %s = %s' % (f, val))\n kw = {f: val == 'true'}\n etag_hash += str(kw)\n result &= Q(**kw)\n\n # Filter by any bbox if provided\n if 'bbox' in query_params:\n etag_hash = '' # Can't cache bbox queries\n bbox = make_rectangle(query_params['bbox'])\n bbox_filter = Q(geom__bboverlaps=bbox)\n result &= bbox_filter\n\n return result, etag_hash", "title": "" }, { "docid": "fbcc6132a1b61c3f0ca7b75b5a7b13cb", "score": "0.54734516", "text": "def search():\n q = request.args.get(\"q\")\n q = q.replace(\" / +/g\",\" \")\n \n if \",\" in q:\n print (\"COMMA\") #for debug\n tokens = [item.strip().lower() for item in q.split(\",\")]\n elif \" \" in q:\n print (\"SPACE\") #for debug\n tokens = [item.strip().lower() for item in q.split(\" \")]\n else:\n isInteger = False\n try:\n postalCode = int(q)\n isInteger = True\n except ValueError:\n pass\n \n if isInteger:\n print (\"NUMBER\")\n query = text(\"\"\"SELECT * FROM places WHERE postal_code LIKE :q\n GROUP BY country_code, place_name, admin_code1\n ORDER BY postal_code\n LIMIT 10\n \"\"\")\n results = conn.execute(query,q=q + \"%\")\n else:\n print (\"NOT NUMBER\")\n query = text(\"\"\"SELECT * FROM places WHERE place_name LIKE :q OR admin_name1 LIKE :q OR admin_name2 LIKE :q\n GROUP BY country_code, place_name, admin_code1\n ORDER BY place_name\n LIMIT 10\n \"\"\")\n results = conn.execute(query,q=q + \"%\")\n \n rows = [dict(result) for result in results]\n return jsonify(rows)\n \n if \"us\" in tokens:\n tokens.remove(\"us\")\n \n containsCode = False\n containsState = False\n \n for token in tokens:\n if token in codesList:\n containsCode = True\n code = token\n tokens.remove(token)\n value=code\n elif token in statesList:\n containsState=True\n state = token\n tokens.remove(token)\n value = state\n \n if containsCode or containsState:\n query = text(\"\"\"SELECT * FROM places WHERE admin_code1 LIKE :value AND (place_name LIKE :value OR place_name LIKE :value2 OR admin_name2 LIKE :value2)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY place_name\n LIMIT 10\n \"\"\")\n updatedQuery = \" \".join(tokens) + \"%\"\n results = conn.execute(query, value=value, value2=updatedQuery)\n if not containsCode and not containsState:\n query = text(\"\"\"SELECT * FROM places WHERE place_name LIKE :q OR admin_name1 LIKE :q OR admin_name2 LIKE :q\n GROUP BY country_code, place_name, admin_code1\n ORDER BY place_name\n LIMIT 10\n \"\"\")\n results = conn.execute(query,q=q + \"%\")\n \n rows = [dict(result) for result in results]\n return jsonify(rows)", "title": "" }, { "docid": "523ba7906db4deef56f996db3be9744a", "score": "0.54672587", "text": "def refsearch(self, *arg):\n (search, mapname) = normalize_argv(arg, 2)\n if search is None:\n self._missing_argument()\n\n search = arg[0]\n if mapname is None:\n mapname = \"all\"\n msg(\"Searching for reference to: %s in: %s ranges\" % (repr(search), mapname))\n result = peda.search_reference(search, mapname)\n\n text = peda.format_search_result(result)\n pager(text)\n\n return", "title": "" }, { "docid": "221ce6fd89ffdd4ae44a8f3f119b07f3", "score": "0.54575163", "text": "def stop_map_search(request):\n lat1 = Decimal(request.GET['lat1'])\n lat2 = Decimal(request.GET['lat2'])\n lon1 = Decimal(request.GET['lon1'])\n lon2 = Decimal(request.GET['lon2'])\n \n stops = Stop.objects.filter(latitude__range=(lat1, lat2),\n longitude__range=(lon1, lon2))\n output = []\n for stop in stops:\n output.append(stop.json())\n return HttpResponse(json.dumps({\"stops\": output}),\n mimetype='application/json')", "title": "" }, { "docid": "884a188fd8b855fd4e00142a09d4dd87", "score": "0.5451789", "text": "async def google(self, ctx, text):\r\n search_type = ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower().split(\" \")\r\n #Start of Image\r\n if search_type[0] == \"image\":\r\n search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower())\r\n if search_valid == \"image\":\r\n await self.bot.say(\"Please actually search something\")\r\n else:\r\n uri = \"https://www.google.com/search?tbm=isch&q=\"\r\n quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+7:].lower())\r\n encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')\r\n await self.bot.say(uri+encode)\r\n #End of Image\r\n #Start of Maps\r\n elif search_type[0] == \"maps\":\r\n search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower())\r\n if search_valid == \"maps\":\r\n await self.bot.say(\"Please actually search something\")\r\n else:\r\n uri = \"https://www.google.com/maps/search/\"\r\n quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+6:].lower())\r\n encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')\r\n await self.bot.say(uri+encode)\r\n #End of Maps\r\n #Start of generic search\r\n else:\r\n uri = \"https://www.google.com/search?q=\"\r\n quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:])\r\n encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')\r\n await self.bot.say(uri+encode)\r\n #End of generic search\r", "title": "" }, { "docid": "7a76a8fcbb8ca845d9cebc4b57ba56e8", "score": "0.54385805", "text": "def search(lat, lng, distance):\n\n url = SEARCH_URL.format(lat, lng, distance,\n CATEGORY_ID, CLIENT_ID, CLIENT_SECRET,\n time.strftime(\"%Y%m%d\"))\n venue_list = []\n\n try:\n data = make_request(url)\n\n for item in data['response']['groups'][0]['items']:\n venue = item['venue']\n venue_list.append(Business(venue['name'],\n venue['location']['address'],\n venue['rating'],\n venue['ratingSignals'],\n venue['stats']['checkinsCount']))\n except Exception, e:\n print e\n\n return venue_list", "title": "" }, { "docid": "3657801413d1f2a8be313a9079c7e65c", "score": "0.54373145", "text": "def multiple_restaurant_lookup(subscription_key):\n\n client = EntitySearchClient(\n endpoint=\"https://api.cognitive.microsoft.com\",\n credentials=CognitiveServicesCredentials(subscription_key)\n )\n\n try:\n restaurants = client.entities.search(query=\"seattle restaurants\")\n\n if restaurants.places.value:\n\n # get all the list items that relate to this query\n list_items = [entity for entity in restaurants.places.value\n if entity.entity_presentation_info.entity_scenario == \"ListItem\"]\n\n if list_items:\n\n suggestions = []\n for place in list_items:\n # Pythonic approach : EAFP \"Easier to ask for forgiveness than permission\"\n # see https://docs.python.org/3/glossary.html\n try:\n suggestions.append(\"{} ({})\".format(\n place.name, place.telephone))\n except AttributeError:\n print(\n \"Unexpectedly found something that isn\\'t a place named '{}'\", place.name)\n\n print(\"Ok, we found these places: \")\n print(\", \".join(suggestions))\n\n else:\n print(\"Couldn't find any relevant results for \\\"seattle restaurants\\\"\")\n\n else:\n print(\"Didn't see any data..\")\n\n except Exception as err:\n print(\"Encountered exception. {}\".format(err))", "title": "" }, { "docid": "3bf5b2c37037b76e4a7a80082b99fd21", "score": "0.5424755", "text": "def find_coord(obs):\n\n address = obs['Address']\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Bank + ', '+ obs.Branch + ', ' + obs.Center\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json&type=bank'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Bank + ', '+ obs.Branch + ', ' + obs.Center\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Bank + ', '+ obs.Branch + ', ' + obs.District\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json&type=bank'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Bank + ', '+ obs.Branch + ', ' + obs.District\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Branch + ', ' + obs.Center + ', ' + obs.District + ', ' + obs.State\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.Center + ', ' + obs.District + ', ' + obs.State\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n address = obs.District + ', ' + obs.State\n url = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n response = requests.get(url).json()\n if len(response) != 0:\n for res in response:\n if float(res['lon']) > 68.11 and float(res['lon']) < 97.4:\n if float(res['lat']) > 6.46 and float(res['lat']) < 35.52:\n return obs.name,res['lat'],res['lon']\n \n return obs.name,'nan','nan'", "title": "" }, { "docid": "69ff77473c81ad30454b61dee76e1d50", "score": "0.54007214", "text": "def search_around_point(self, lati: float, longi: float):\n\t\tquery = self._construct_positional_search_query(lati, longi)\n\t\treturn self._get_and_clean_request(query)", "title": "" }, { "docid": "0b2091750a16c833d97d0ae0473d0cea", "score": "0.5398331", "text": "def test_ask_google_for_address():\n with pytest.raises(TypeError):\n ask_google_for_address() # listing required\n\n response, encoding = read_search_results()\n parsed_page = parse_source(response)\n listing_generator = extract_listings(parsed_page)\n listing = listing_generator.next()\n add_location(listing)\n\n goog_data = ask_google_for_address(listing)\n assert isinstance(goog_data, dict)\n assert 'results' in goog_data\n assert isinstance(goog_data['results'], list)\n first_result = goog_data['results'][0]\n assert isinstance(first_result, dict)\n\n # We are implicitly testing find_best_address() here, too\n assert 'types' in first_result\n assert 'street_address' in first_result['types']\n # I'm more or less assuming the above is the case;\n # google seems to reliably do this\n assert 'formatted_address' in first_result\n assert isinstance(first_result['formatted_address'], unicode)", "title": "" }, { "docid": "078331b5e1a26cf03bde36cfc0bd86c5", "score": "0.5397246", "text": "def census_single_query(addr):\n url = \"http://geocoding.geo.census.gov/geocoder/locations/onelineaddress?address={}&benchmark=9&format=json\".format(addr)\n response = urlopen(url)\n data = response.readall().decode('utf-8')\n data = json.loads(data)\n\n if \"result\" not in data:\n return None\n if \"addressMatches\" not in data[\"result\"]:\n return None\n\n matches = data[\"result\"][\"addressMatches\"]\n if len(matches) == 0:\n return None\n if len(matches) > 1:\n print (addr)\n for m in matches:\n print (m[\"addressComponents\"])\n print (matches)\n raise Exception(\"Too many matches\")\n\n result = matches[0][\"coordinates\"]\n result = {\"lat\": result[\"y\"], \"lon\": result[\"x\"], \"matched_address\": matches[0]['matchedAddress']}\n return pd.Series(result)", "title": "" }, { "docid": "e98e519f2c42e033471bab768c9e4f9f", "score": "0.53907895", "text": "def search():\n search_param = request.args['search']\n\n # create a list so Big Bend -> [ Big , Bend]\n descriptivename = search_param.split()\n\n parks_or_list = set()\n events_or_list = set()\n states_or_list = set()\n campgrounds_or_list = set()\n\n # keep a dictionary of each park that shows up in or search and count of other terms that show up\n # and list is found by filtering dictionary to only intances with the same count as number of search terms\n parks_dict = {}\n events_dict = {}\n states_dict = {}\n campgrounds_dict = {}\n\n # get or results\n for search in descriptivename:\n print(search)\n park_search_instance = Park.query.filter(or_(Park.name.ilike('%' + search + '%'), \n Park.latitude.ilike('%' + search + '%'), \n Park.longitude.ilike('%' + search + '%'), \n Park.address.ilike('%' + search + '%'), \n Park.phone.ilike('%' + search + '%'), \n Park.website.ilike('%' + search + '%'),\n Park.zipcode.ilike('%' + search + '%'), \n Park.photo_url.ilike('%' + search + '%'), \n Park.zipregion.ilike('%' + search + '%'), \n Park.state_fk.ilike('%' + search + '%'))).all()\n for v in park_search_instance:\n parks_or_list.add(v)\n if v in parks_dict:\n parks_dict[v] += 1\n else:\n parks_dict[v] = 1\n\n\n event_search_instance = Event.query.filter(or_(Event.latitude.ilike('%' + search + '%'), \n Event.longitude.ilike('%' + search + '%'), \n Event.topics.ilike('%' + search + '%'), \n Event.start_date.ilike('%' + search + '%'), \n Event.end_date.ilike('%' + search + '%'), \n Event.pic_url.ilike('%' + search + '%'),\n Event.org_name.ilike('%' + search + '%'), \n Event.contact_phone_num.ilike('%' + search + '%'), \n Event.zipregion.ilike('%' + search + '%'), \n # Event.park_fk.ilike('%' + search + '%'), \n Event.state_fk.ilike('%' + search + '%'), \n Event.zipcode.ilike('%' + search + '%'))).all()\n for v in event_search_instance:\n events_or_list.add(v)\n if v in events_dict:\n events_dict[v] += 1\n else:\n events_dict[v] = 1\n\n\n state_search_instance = State.query.filter(or_(State.name.ilike(search), \n State.description.ilike('%' + search + '%'), \n State.total_area.ilike('%' + search + '%'), \n State.population.ilike('%' + search + '%'), \n State.highest_point.ilike('%' + search + '%'))).all()\n for v in state_search_instance:\n states_or_list.add(v)\n if v in states_dict:\n states_dict[v] += 1\n else:\n states_dict[v] = 1\n\n campground_search_instance = Campground.query.filter(or_(Campground.name.ilike('%' + search + '%'), \n Campground.description.ilike('%' + search + '%'), \n Campground.latitude.ilike('%' + search + '%'), \n Campground.longitude.ilike('%' + search + '%'), \n Campground.direction.ilike('%' + search + '%'),\n Campground.phone.ilike('%' + search + '%'), \n Campground.email.ilike('%' + search + '%'), \n Campground.zipcode.ilike('%' + search + '%'))).all()\n for v in campground_search_instance:\n campgrounds_or_list.add(v)\n if v in campgrounds_dict:\n campgrounds_dict[v] += 1\n else:\n campgrounds_dict[v] = 1\n print(\"time to and\")\n parks_and_list = set()\n for key in parks_dict:\n if parks_dict[key] >= len(descriptivename):\n parks_and_list.add(key)\n\n events_and_list = set()\n for key in events_dict:\n if events_dict[key] >= len(descriptivename):\n events_and_list.add(key)\n\n states_and_list = set()\n for key in states_dict:\n if states_dict[key] >= len(descriptivename):\n states_and_list.add(key)\n\n campgrounds_and_list = set()\n for key in campgrounds_dict:\n if campgrounds_dict[key] >= len(descriptivename):\n campgrounds_and_list.add(key)\n print(\"render template\")\n return render_template('Search.html', \n parks_or_list=parks_or_list, \n events_or_list=events_or_list, \n states_or_list=states_or_list, \n campgrounds_or_list=campgrounds_or_list,\n parks_and_list=parks_and_list, \n events_and_list=events_and_list, \n states_and_list=states_and_list, \n campgrounds_and_list=campgrounds_and_list, \n search=search_param)", "title": "" }, { "docid": "b8ce09c40197637e60b5a7c4c4dd1ec2", "score": "0.5390234", "text": "def es_search(fingerprint, range_):\n words = \" \".join(fingerprints_to_words(fingerprint, range_))\n return es.search(index=es_index, q=words)", "title": "" }, { "docid": "4b171def1290f06eb311e3a9cad46c2e", "score": "0.53893244", "text": "def query_google_place(self, search):\n\n return requests.get('https://maps.googleapis.com/maps/api/place/findplacefromtext/json?'\n 'input={search}&inputtype=textquery&'\n 'fields=formatted_address,name,geometry&'\n 'key={google_api_place_key}'.\n format(search=search, google_api_place_key=self.google_api_place_key))", "title": "" }, { "docid": "f53964481ecce681fa51b94aa3ec2bdb", "score": "0.5384141", "text": "def search_in_google_place(self, search):\n\n response = self.query_google_place(search)\n\n if response.status_code == 200:\n content = response.json()\n\n if len(content['candidates']) > 0 and content['status'] == 'OK':\n return content['candidates'][0]\n\n return None", "title": "" }, { "docid": "29c8bec9c2d2a8065b4efdde5be19905", "score": "0.5378378", "text": "def gmapAnswer(self, search):\n geocode_result = self.gmaps.geocode(search, language=\"fr\")\n if len(geocode_result) == 0:\n return {\n 'address': None,\n 'latitude': None,\n 'longitude': None\n }\n else:\n data = {'address': geocode_result[0]['formatted_address'],\n 'latitude': geocode_result[0]['geometry']['location']['lat'],\n 'longitude': geocode_result[0]['geometry']['location']['lng']\n }\n print(data)\n return data", "title": "" }, { "docid": "e6a461973037a2529e890792bb7ef5ff", "score": "0.53676224", "text": "def geocode_api(request):\n body = json.loads(request.body)\n address_string = body.get(\"search_string\")\n if not address_string:\n msg = {\"error_message\": \"search_string required in request\"}\n return HttpResponseBadRequest(json.dumps(msg), content_type=\"application/json\")\n\n normalized_address = address_lookup_by_string(address_string)\n\n if not normalized_address:\n msg = {\"error_message\": \"No address returned from SmartyStreets\"}\n return HttpResponseBadRequest(json.dumps(msg), content_type=\"application/json\")\n\n return JsonResponse({\"address\": normalized_address})", "title": "" }, { "docid": "54bf79da8e9c517fb65fee0030847a6d", "score": "0.5366734", "text": "def search(app_secret, location):\r\n\r\n url_params = {\r\n 'location': location.replace(' ', '+'),\r\n 'limit': search_limit\r\n }\r\n return request(API_HOST, SEARCH_PATH, app_secret, url_params=url_params)", "title": "" }, { "docid": "62fa8f487a71cf65241a7295959c5102", "score": "0.53469974", "text": "def get_places_search():\n data = request.get_json()\n if data is None or type(data) != dict:\n return make_response(\"Not a JSON\", 400)\n places = []\n states = data.get('states', [])\n cities = data.get('cities', [])\n amenities = data.get('amenities', [])\n if states == [] and cities == [] and amenities == []:\n for place in storage.all(Place).values():\n places.append(place.to_dict())\n return jsonify(places)\n if states != []:\n for state_id in data['states']:\n state = storage.get(State, state_id)\n if state is not None:\n for city in state.cities:\n for place in city.places:\n places.append(place.to_dict())\n if cities != []:\n for city_id in data['cities']:\n city = storage.get(City, city_id)\n if city is not None:\n for place in city.places:\n places.append(place.to_dict())\n return jsonify(places)", "title": "" }, { "docid": "58b01cfd29e95f842fc92a7c0cd6e6d9", "score": "0.53379357", "text": "def woeid_search(query):\n query = 'q=select * from geo.places where text=\"%s\"' % query\n body = web.get('http://query.yahooapis.com/v1/public/yql?' + query,\n dont_decode=True)\n parsed = xmltodict.parse(body).get('query')\n results = parsed.get('results')\n if results is None or results.get('place') is None:\n return None\n if type(results.get('place')) is list:\n return results.get('place')[0]\n return results.get('place')", "title": "" }, { "docid": "e54dcc9220441ce684ba1558a245cd23", "score": "0.5323395", "text": "def main():\n print(get_lat_long('boston'))\n print(find_stop_near('brookline'))", "title": "" }, { "docid": "6ed927a799dcd740a002ed3ee47f3077", "score": "0.5322238", "text": "def search(api_key, term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': 20\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "title": "" }, { "docid": "f9d7829d0d1b1238f5eda25cd1756589", "score": "0.5321321", "text": "def address_geo(name, city):\r\n \r\n path = PATH.format(name, city, name, city)\r\n\r\n request_html = requests.get(path, headers={\"User-Agent\": AGENT})\r\n\r\n page = request_html.content\r\n \r\n soup_html = BeautifulSoup(page, \"html.parser\")\r\n \r\n propriete = soup_html.find_all(\"span\")\r\n\r\n stop = ''\r\n address = []\r\n\r\n for i in propriete:\r\n if stop == True:\r\n break\r\n if i.string == None:\r\n pass\r\n else:\r\n for j in SCRAP:\r\n finding = str(i.string).find(str(j))\r\n if finding >= 0:\r\n address.append(i.string)\r\n stop = True\r\n \r\n return address", "title": "" }, { "docid": "103c12fb72f3ef3a0661c31c23adef09", "score": "0.53209484", "text": "def search(term, location, offset, category):\n #http://api.yelp.com/v2/search/?location=San Francisco, CA&category_filter=cafes\n \n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'offset': offset,\n 'limit': SEARCH_LIMIT,\n 'category_filter': category.replace(' ', '+'),\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "title": "" }, { "docid": "99986f4904ea46a2eb06c3b02ff5bc6b", "score": "0.53169703", "text": "def geolocate():\n\n return render_template('geolocate.html', google_api_key=google_api_key)", "title": "" }, { "docid": "58c52d2e46b0bf8b97a11d4241085db9", "score": "0.53161347", "text": "def test_get_fields_by_address_suggestion_multi_example(client):\n address = '1651 mission street suite 1000'\n\n response = get_parcels_by_address_response(client, address, {'returnSuggestions':True})\n assert response.status_code == 200\n\n content = json.loads(response.content)\n\n assert jsend.is_success(content)\n assert len(content['data']['parcels']) > 1\n for parcel in content['data']['parcels']:\n assert parcel['attributes']['blklot']\n assert parcel['attributes']['ADDRESS']\n assert 'geometry' not in content['data']['parcels'][0]", "title": "" }, { "docid": "eaebfa6409290cfa022ceccb44f0ff8c", "score": "0.53145725", "text": "def test_get_fields_by_address_suggestion_base_example(client):\n address = '1650 mission street #100'\n\n response = get_parcels_by_address_response(client, address, {'returnSuggestions':True})\n assert response.status_code == 200\n\n content = json.loads(response.content)\n\n assert jsend.is_success(content)\n assert len(content['data']['parcels']) == 1\n assert content['data']['parcels'][0]['attributes']['blklot'] == '3512008'\n assert content['data']['parcels'][0]['attributes']['block_num'] == '3512'\n assert content['data']['parcels'][0]['attributes']['lot_num'] == '008'\n assert content['data']['parcels'][0]['attributes']['ADDRESS'] == '1650 MISSION ST'\n assert 'geometry' not in content['data']['parcels'][0]", "title": "" }, { "docid": "edaa6e3fab5e5665806581efd66dd5c4", "score": "0.5304089", "text": "def test_yelp_geo(self):\n places = yelp.query_geo(36.00532112,-78.9260447, 8.0, 'indian', 4)\n self.assertNotEqual(places, None)\n self.assertTrue(len(places)>=1)", "title": "" }, { "docid": "70375e582e0b2f639e048c14b8da09fb", "score": "0.52910084", "text": "def search(api_key, term, location, phone, sort_by='best_match'):\n\n try:\n term = term.replace(' ', '+')\n except AttributeError: # if not str\n term = str(term)\n\n try:\n location = location.replace(' ', '+')\n except AttributeError: # if not str\n location = str(location)\n\n try:\n phone = '+1' + phone.replace('-', '')\n except AttributeError: # if not str\n phone = str(phone)\n\n url_params = {\n 'term': term,\n 'location': location,\n 'phone': phone,\n 'limit': SEARCH_LIMIT,\n 'sort_by': sort_by\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "title": "" }, { "docid": "c0d33ed4e3dab8eb90342af226cb5062", "score": "0.5288014", "text": "def process_search_by_coordinates(cls, x, y, pictures=False):\n data = ScrapperHTML.scrap_coord(x, y)\n\n root = ElementTree.fromstring(data)\n pc1 = root.find(\n \"{http://www.catastro.meh.es/}coordenadas//{http://www.catastro.meh.es/}coord//{http://www.catastro.meh.es/}pc//{http://www.catastro.meh.es/}pc1\")\n pc2 = root.find(\n \"{http://www.catastro.meh.es/}coordenadas//{http://www.catastro.meh.es/}coord//{http://www.catastro.meh.es/}pc//{http://www.catastro.meh.es/}pc2\")\n\n results = []\n if pc1 is not None and pc2 is not None:\n cadaster = ''.join([pc1.text, pc2.text])\n html_picture_tuples = ScrapperHTML.scrap_cadaster(cadaster, None, None, pictures)\n\n if not isinstance(html_picture_tuples, list):\n html_picture_tuples = [html_picture_tuples]\n\n for html_picture_tuple in html_picture_tuples:\n html, picture = html_picture_tuple\n cadaster_entry = cls.parse_html_parcela(html, x, y, picture)\n cadaster_entry.to_elasticsearch()\n results.append(cadaster_entry)\n\n return results", "title": "" }, { "docid": "1436226a5501ca1a0b2a16b44bddc0c7", "score": "0.5287751", "text": "def find_location2(ip_address):\n data = json.loads(open('censyskey', 'r').read())\n\n API_URL = \"https://www.censys.io/api/v1/search/certificates\"\n UID = data[\"API ID\"]\n SECRET = data[\"Secret\"]\n\n res = requests.post(API_URL, \n data= {\n \"query\":\"80.http.get.headers.server:Apache\",\n \"page\":2,\n \"fields\":[\"ip\", \"location.country\", \"autonomous_system.asn\"]\n },\n auth=(UID, SECRET))\n return res\n #if res.status_code == 200:\n # content = res.content.decode('utf-8')\n # loaded = json.loads(content)\n # return loaded['location']['longitude'], loaded['location']['latitude']", "title": "" }, { "docid": "69f0d7dca6cc3558443cacb292545853", "score": "0.5285919", "text": "def get_gps_GoogleMapHTMLRequest(self, address):\r\n time.sleep(2)\r\n address = address.decode('iso8859_15')\r\n address = address.encode('utf8')\r\n address = urllib2.quote(address)\r\n #add these two line especially for EXE compilation\r\n #http://stackoverflow.com/questions/21201238/twilio-python-module-errors-after-compiling/21206079#21206079\r\n r = requests.get(u'http://maps.google.com/?q='+address+'&hl=fr')\r\n text = r.text\r\n try:\r\n lat, lon = eval(re.findall(ur'\\[[-+]?\\d+\\.\\d+,[-+]?\\d+\\.\\d+\\]',r.text)[0])\r\n # I remove the \"u\" for the regex to avoid unequal comparaison by difflib.SequenceMatcher\r\n addresse = re.findall(ur'\\[\\[\".*?\",\"(.*?)\",\\[',r.text)[0]\r\n return lat,lon, addresse\r\n except:\r\n return None, None, None", "title": "" }, { "docid": "8529851fe41387c54475d748bd833d57", "score": "0.52815604", "text": "def search_places():\n form = request.get_json(force=True)\n place_list = []\n all_cities = []\n all_amenities = []\n all_places = []\n if len(form) == 0:\n all_places = storage.all('Place')\n for place in all_places.values():\n place_list.append(place.to_dict())\n return jsonify(place_list), 200\n if 'cities' in request.json:\n for city_id in form['cities']:\n all_cities.append(storage.get('City', 'city_id'))\n if 'states' in request.json:\n for state_id in form['states']:\n for city in storage.get('State', state_id).cities:\n if city not in all_cities:\n all_cities.append(city)\n for city in all_cities:\n for place in city.places:\n all_places.append(place)\n\n if 'amenities' in request.json and len(all_places) != 0:\n for amenity_id in form['amenities']:\n all_amenities.append(storage.get('Amenity', amenity_id))\n for amenity in all_amenities:\n for place in all_places:\n if place not in amenity.place_amenities:\n all_places.remove(place)\n if 'amenities' in request.json and len(all_places) == 0:\n for amenity_id in form['amenities']:\n all_amenities.append(storage.get('Amenity', amenity_id))\n for amenity in all_amenities:\n for place in amenity.place_amenities:\n place_list.append(place.to_dict())\n return jsonify(place_list), 200\n\n for place in all_places:\n place_list.append(place.to_dict())\n return jsonify(place_list), 200", "title": "" }, { "docid": "c3f4d774ad87eb613d27dfdb1659bb5c", "score": "0.52764565", "text": "def search(city):\n params = {'location': city, 'limit': 20}\n return request(API_HOST, SEARCH_PATH, url_params=params)", "title": "" }, { "docid": "0647637b261ed81c5fbc3490fd72453d", "score": "0.52718806", "text": "def search_maps(self):\n\n go = True\n entry = self.drop_down.currentText()\n if entry == \"Other\":\n user, ok = QInputDialog.getText(self, \"Search Entry\", \"Enter your search:\")\n if ok:\n entry = user\n else:\n go = False\n if go:\n url = 'google.com/maps/search/' + entry.lower() + '/'\n browser = webbrowser.get('google-chrome')\n if os.name == 'nt':\n sub.run(['cmd', '/c', 'start', 'chrome.exe', url])\n elif os.name == 'posix':\n sub.run(['google-chrome', url])", "title": "" }, { "docid": "f32c255a8c3d203e36bcd7aaa1dce5c7", "score": "0.525868", "text": "def get_request(request):\n request_json = request.get_json()\n nowlat = request_json['lat']\n nowlng = request_json['lng']\n genre = request_json['type']\n dis = request_json['dis']\n\n result = search_place(nowlat, nowlng, genre, dis)\n return result", "title": "" }, { "docid": "865c44291963e96c6dc727366e29701c", "score": "0.5257682", "text": "def search(self, **kwargs):\n endpoint = \"json/stations/search\"\n # lowercase tag reference since the API turned to be case-sensitive\n for paramkey in ['tag', 'tagList']:\n if paramkey in kwargs:\n kwargs[paramkey] = kwargs[paramkey].lower()\n url = self.build_url(endpoint)\n return self.client.get(url, **kwargs)", "title": "" }, { "docid": "fd17271a512b81ed7205547438c0b597", "score": "0.5252012", "text": "def location_search_params(api_key, location, **kwargs):\n # What is the url endpoint for search?\n url = 'https://api.yelp.com/v3/businesses/search'\n # How is Authentication performed?\n Bearer='Bearer '+api_key\n headers = {'Authorization' : Bearer}\n # SPACES in url is problematic. How should you handle location containing spaces?\n location = location.replace(\" \",\"+\")\n url_params = {'location':location}\n # Include keyword arguments in url_params\n for key,value in kwargs.items():\n url_params[key] = value \n \n return url, headers, url_params", "title": "" }, { "docid": "c81a85ca53a18163fee002c912e40761", "score": "0.524233", "text": "def main():\n \n # Random Deloitte Offices as Tests\n orig = '1725 Duke Street, Alexandria, VA 22314-3456'\n wayp1 = 'US National Arboretum in Washington, DC'\n wayp2 = '2 15th Street NW, Washington, DC 20002'\n wayp3 = 'Monticello near Charlottesville, VA'\n dest = '7900 Tysons One Place, McLean, VA 22102-5971'\n\n origs = ['100 S. Charles Street, Baltimore, MD 21201-2713',\n '6810 Deerpath Road, Elkridge, MD 21075',\n '22454 Three Notch Road, Lexington Park, MD 20653']\n dests = ['1700 Market Street, Philadelphia, PA 19103-3984',\n '5 Walnut Grove Drive, Horsham, PA 19044',\n '1 Braxton Way, Glen Mills, PA 19342']\n\n geo = Geotool()\n\n # Google Maps Test\n response_google = geo.geocode_google(orig)\n\n # Bing Maps Test\n response_bing = geo.geocode_bing(wayp1)\n\n # Geocodio Test\n response_geocodio = geo.geocode_geocodio(wayp2)\n\n # MapQuest Test\n response_mapquest = geo.geocode_mapquest(dest)\n\n # ESRI Test\n response_esri = geo.geocode_esri(wayp3)", "title": "" }, { "docid": "34ed9eb0bd579861432948f80f621ea9", "score": "0.523783", "text": "def search_shops_near(request, format=None):\n if request.method == 'GET':\n limit_product = int(request.query_params['limit']) if 'limit' in request.query_params else LIMIT_PRODUCT_BY_SHOP\n radius = float(request.query_params['radius']) if 'radius' in request.query_params else LIMIT_RADIUS\n lat = float(request.query_params['lat']) if 'lat' in request.query_params else None\n lng = float(request.query_params['lng']) if 'lng' in request.query_params else None\n if not lat or not lng:\n return Response('Lat or lng not provided', status=status.HTTP_400_BAD_REQUEST)\n shops = Shops.objects.filter(lat__range=(lat - (radius / 100), lat + (radius / 100)), lng__range=(lng - (radius / 100), lng + (radius / 100)))\n\n shops_id = shops.values_list('id')\n products = Products.objects.filter(shop_id__in=shops_id).order_by('-popularity')[0:limit_product]\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "75d3f9d21d647a6cb9462457f6767f6c", "score": "0.52332985", "text": "def geocode(sportstogeocode, hmanames):\n #create a list of the placenames for comparison purposes\n placenames = [i.lower() for i in list(hmanames[\"name\"])]\n \n #create a new geodataframe which will hold the geocoded tweets\n sportshma = gpd.GeoDataFrame()\n \n for i, row in sportstogeocode.iterrows():\n \n try:\n #search if any lemmas match the list of placenames\n placelist = [lemma.lower() for lemma in sportstogeocode.loc[i, \"lemmas\"] if(lemma.lower() in placenames)]\n \n #if there are any placenames, retrieve the point for that place and add it to the tweet information\n if len(placelist) > 0:\n x = hmanames.loc[hmanames[\"name\"]== placelist[0], \"geometry\"].values[0].x\n y = hmanames.loc[hmanames[\"name\"]== placelist[0], \"geometry\"].values[0].y\n geom = hmanames.loc[hmanames[\"name\"]== placelist[0], \"geometry\"].values[0]\n row[\"geometry\"] = geom\n row[\"lon\"] = x\n row[\"lat\"] = y\n sportshma = sportshma.append(row)\n \n except TypeError:\n print(\"Encountered a TypeError\")\n \n print(str(len(sportshma)) + \" tweets succesfully geocoded\")\n return sportshma", "title": "" }, { "docid": "98f529550a95fef7a740686956b61c6d", "score": "0.5231777", "text": "def match(\n self,\n query: str,\n latitude: Optional[float] = None,\n longitude: Optional[float] = None,\n ) -> List[Dict]:\n suggestions = []\n for city in self.data:\n if self._is_candidate(city, query):\n suggestions.append(\n {\n \"name\": self._get_full_city_name(city),\n \"latitude\": city.get(\"latitude\", 0.0),\n \"longitude\": city.get(\"longitude\", 0.0),\n \"score\": self._score(city, query, latitude, longitude),\n }\n )\n return sorted(suggestions, reverse=True, key=lambda x: x[\"score\"])", "title": "" }, { "docid": "543fcb3f56b34a747acca8dc52ea8f0a", "score": "0.52299374", "text": "def search(self, **kwargs):\n endpoint = \"{fmt}/stations/search\".format(fmt=self._fmt)\n url = self.build_url(endpoint, **kwargs)\n return self.client.get(url, **kwargs)", "title": "" }, { "docid": "281668e8826f93a56ba53c81756dcb4d", "score": "0.5220314", "text": "def get_places(lat, lon, radius, keyword = \"\", type_ = \"\"):\n \n url_nearby_search = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n loc = str(lat) + \",\" + str(lon)\n \n parameters = {\"key\": gm_key, \"location\": loc, \"radius\": radius, \"keyword\": keyword, \"type\": type_}\n \n response = requests.get(\n url=url_nearby_search, \n params = parameters\n )\n \n restaurants_data = response.json()\n \n return restaurants_data", "title": "" }, { "docid": "e1281fc2de943472d95f7ff9a514a1dd", "score": "0.52003455", "text": "def geocode(address: str) -> Optional[Dict[str,float]]:\n url = \"https://www.pagis.org/arcgis/rest/services/LOCATORS/CompositeAddressPtsRoadCL/GeocodeServer/findAddressCandidates\"\n params = {\n \"SingleLine\": address,\n \"f\": \"json\",\n \"outFields\": \"*\",\n \"maxLocations\": 3,\n \"outSR\": {\n \"wkid\": 102651,\n \"latestWkid\": 3433,\n },\n }\n response = requests.get(url, params=params)\n log.debug(f\"HTTP GET:\\t{response.url}\")\n data = server_ok(response, \"Geolocator\")\n if not data:\n return None\n try:\n log.debug(f\"address: {address} has {len(data['candidates'])} valid candidates\")\n location = data['candidates'][0]['location']\n if 'x' not in location.keys() and 'y' not in location.keys():\n log.warning(f\"invalid location: {location}\")\n return None\n return location\n except Exception as e:\n log.warning(f\"Valid address candidate not found for `{address}` with error: {e}\")\n return None", "title": "" }, { "docid": "4139c09f030d26a8c589c7f9c218abaf", "score": "0.51984406", "text": "def test_geo(self):\n self.params = Params()\n self.gmaps = Gmaps()\n assert self.gmaps.geo(self.params.search_1) == self.params.response_1\n assert self.gmaps.geo(self.params.search_2) == self.params.response_2\n assert self.gmaps.geo(self.params.search_3) == self.params.response_3\n # bad search who return False\n assert self.gmaps.geo(self.params.tintin_et_milou) == self.params.return_false\n assert self.gmaps.geo(self.params.bleach) == self.params.return_false\n assert self.gmaps.geo(self.params.random_search) == self.params.return_false\n assert self.gmaps.geo(self.params.punctuation) == self.params.return_false\n assert self.gmaps.geo(self.params.empty_search) == self.params.return_false", "title": "" }, { "docid": "009a25d5059d6051b6dcd25cdc261064", "score": "0.5194169", "text": "def test_get_geo_coordinate_bounds_2d(self):", "title": "" }, { "docid": "451ee609901ca85bde785b947cb62a8e", "score": "0.51833606", "text": "def google(server, user, target, message):\n query = message\n headers = default_headers\n opts = {'q': query, 'v': '1.0'}\n url = urllib.request.Request(google_search_url + \"?\" + urlencode(opts), headers=headers)\n page = urllib.request.urlopen(url)\n results = json.loads(page.read().decode())\n page.close()\n url = results['responseData']['cursor']['moreResultsUrl']\n if not results['responseData']['results']:\n eyercbot.send('sendMsg', server, user, target, 'I found no results for that query.')\n return\n response = '\u0002' + results['responseData']['results'][0]['titleNoFormatting'] + ':\u0002 ' + results['responseData']['results'][0]['url'] + ' | ' + results['responseData']['results'][0]['content']\n eyercbot.send('sendMsg', server, user, target, eyercbot.html2irc(response)[0:eyercbot.config[\"plugin_config\"][\"search\"][\"length\"]])\n eyercbot.send('sendMsg', server, user, target, url)", "title": "" }, { "docid": "98dd3673ec31952e2251081a79e99803", "score": "0.51807237", "text": "def getBureauxPosteArroundThisPoint (self,lat,lon,distance):\n get_postaloffice=u\"search/\"\n url_options = {\n u\"dataset\":u'laposte_poincont2',\n u\"facet\":u'caracteristique_du_site',\n u\"facet\":u'code_postal',\n u\"facet\":u'localite',\n u\"facet\":u'code_insee',\n u\"facet\":u'precision_du_geocodage',\n u\"facet\":u'precision_du_geocodage',\n u\"rows\":u'1000',\n u\"geofilter.distance\":u'%f,%f,%f' % (lat,lon,distance)\n }\n \n response = requests.get(self.LAPOSTE_URL + get_postaloffice, params=url_options)\n response_json = response.json()\n places = list()\n for record in response_json['records'] :\n \n place,created = Place.objects.get_or_create(lat=record[u'fields'][u'latitude'],lon=record[u'fields'][u'longitude'])\n #if created :\n # self.log.info(\"A new place has been created by laPosteApi\")\n place.name = record[u'fields'][u'libelle_du_site']\n '''\n substitue Boite postale et Agence Postale\n MUDAISON BP => MUDAISON \n SAUSSAN AP => SAUSSAN\n CALVISSON LES JOUETS DE LEO RP => CALVISSON LES JOUETS DE LEO\n '''\n \n place.name = re.sub('\\ BP$', '', place.name)\n place.name = re.sub('\\ AP$', '', place.name)\n place.name = re.sub('\\ RP$', '', place.name)\n place.name = re.sub('\\ LPRT$', '', place.name)\n\n place.label = place.name\n place.city = record[u'fields'][u'localite']\n \n if u'adresse' in record[u'fields'] and record[u'fields'][u'adresse'] != u\"LE BOURG\" :\n place.street = record[u'fields'][u'adresse']\n place.postalAdress = \"%s %d %s\" % (place.street, int(record[u'fields'][u'code_postal']), record[u'fields'][u'localite'])\n else :\n place.street = u''\n place.postalAdress = \"%s %s\" % (record[u'fields'][u'code_postal'], record[u'fields'][u'localite'])\n \n place.pays = record[u'fields'][u'pays']\n place.lon = record[u'fields'][u'longitude']\n place.lat = record[u'fields'][u'latitude']\n place.save()\n places.append(place)\n self.log.info(\"found %d places arround %.1f Km\" % (len(places), distance/1000)) \n #for place in places :\n #self.log.info(u\" - \" + place.name) \n return places", "title": "" }, { "docid": "addc523401d220803d40e338eff15c09", "score": "0.51788414", "text": "def search():\n global response\n\n # Get API response\n # URL and api key\n url = \"https://api.openweathermap.org/data/2.5/weather\"\n api_key = \"59a99dfbbe0ddb794763ae17dcd255ab\"\n\n # Search by the appropriate query, either city name or zip\n if search_method.get() == 1:\n querystring = {\"q\": city_entry.get(), \"appid\": api_key, 'units': 'imperial'}\n elif search_method.get() == 2:\n querystring = {\"zip\": city_entry.get(), \"appid\": api_key, 'units': 'imperial'}\n\n # Call API\n response = requests.request(\"GET\", url, params=querystring)\n response = response.json()\n\n\n # Example response return\n \"\"\"{'coord': {'lon': -71.0598, 'lat': 42.3584},\n 'weather': [{'id': 800, 'main': 'Clear', 'description': 'clear sky', 'icon': '01n'}], 'base': 'stations',\n 'main': {'temp': 291.71, 'feels_like': 291.08, 'temp_min': 288.72, 'temp_max': 293.4, 'pressure': 1011,\n 'humidity': 56}, 'visibility': 10000, 'wind': {'speed': 2.68, 'deg': 333, 'gust': 7.15},\n 'clouds': {'all': 0}, 'dt': 1627697442,\n 'sys': {'type': 2, 'id': 2013408, 'country': 'US', 'sunrise': 1627637694, 'sunset': 1627689974},\n 'timezone': -14400, 'id': 4930956, 'name': 'Boston', 'cod': 200}\n \"\"\"\n get_weather()\n get_icon()", "title": "" }, { "docid": "fad82b1e6f197ca3981beae880692534", "score": "0.517478", "text": "def _get_results(query: str):\n\n query = urllib.parse.quote_plus(query)\n response = _get_source(\"https://www.google.co.uk/search?q=\" + query)\n\n return response", "title": "" }, { "docid": "09f1198ac8c5058f9e234a1625a18e4f", "score": "0.5172012", "text": "def params_from_loc(location: Dict[str,float]) -> Dict[str,Any]:\n geom_query = {\n \"xmin\": location['x'],\n \"ymin\": location['y'],\n \"xmax\": location['x'] + 13,\n \"ymax\": location['y'] + 13,\n \"spatialReference\": {\n \"wkid\": 102651,\n \"latestWkid\": 3433,\n }\n }\n geom_string = ''.join(geom_query.__repr__().split()).replace(\"'\",'\"')\n params = {\n \"f\": \"json\",\n \"spatialRel\": \"esriSpatialRelIntersects\",\n \"maxAllowableOffset\": 1,\n \"geometry\": geom_string,\n \"geometryType\": \"esriGeometryEnvelope\",\n \"inSR\": 102651,\n \"outFields\": \"CALC_ACRE\",\n \"returnGeometry\": \"true\",\n }\n return params", "title": "" }, { "docid": "084f17b295d9320d25b48b80cd73c691", "score": "0.51588047", "text": "def process_locations():\n start = datetime.datetime.now()\n\n # load the already geocoded item cache:\n geocache_file = conf.runtime.geocache_file\n gmap = googlemaps.Client(key=conf.google.maps_api_key)\n\n client = conf.google.get_client()\n coffee_doc = client.open_by_key(conf.runtime.coffee_doc_id)\n wsdata = util.read_tab_as_df(coffee_doc, 'cupping dimensions', num_columns=13)\n beans = wsdata[wsdata['Coffee bean'] != ''][['Coffee bean', 'Manual Location']]\n\n if not geocache_file.exists():\n conf.logger.info(f'GeoCache file not found: {geocache_file}')\n geocache = dict()\n elif conf.runtime.force_refresh:\n conf.logger.warning(f'Forcing GeoCache refresh of: {geocache_file}')\n geocache = dict()\n else:\n infile = open(geocache_file, 'rb')\n geocache = pickle.load(infile)\n infile.close()\n\n num_geocoded = 0\n for index, sources in beans.iterrows():\n source = sources['Coffee bean']\n best_location = source\n if sources['Manual Location'] != '':\n best_location = sources['Manual Location']\n if source not in geocache:\n conf.logger.info(f'Cache did not contain {source}, so fetching it from the API.')\n num_geocoded += 1\n places = gmap.geocode(best_location)\n if len(places) == 0:\n conf.logger.warning(f'Found ZERO matches for {source}, so skipping it.')\n continue\n elif len(places) > 1:\n conf.logger.warning(f'Found {len(places)} different matches for {source}, proceeding with the first match.')\n place = places[0]\n coded = dict()\n try:\n coded['source'] = source\n place_id = place['place_id']\n coded['place_id'] = place_id\n coded['place_url'] = f'https://www.google.com/maps/place/?q=place_id:{place_id}'\n conf.logger.debug(f'Found {place_id}')\n coded['raw'] = place\n coded['updated'] = datetime.datetime.now()\n coded['country'] = ''\n coded['country_code'] = ''\n coded['state'] = ''\n coded['region'] = ''\n coded['place'] = ''\n coded['latitude'] = ''\n coded['longitude'] = ''\n coded['elevation_m'] = ''\n\n country = [item['long_name'] for item in place['address_components'] if 'country' in item['types']]\n if len(country) > 0:\n coded['country'] = country[0]\n country_code = [item['short_name'] for item in place['address_components'] if 'country' in item['types']]\n if len(country_code) > 0:\n coded['country_code'] = country_code[0]\n state = [item['long_name'] for item in place['address_components'] if 'administrative_area_level_1' in item['types']]\n if len(state) > 0:\n coded['state'] = state[0]\n region = [item['long_name'] for item in place['address_components'] if 'administrative_area_level_2' in item['types']]\n if len(region) > 0:\n coded['region'] = region[0]\n if 'locality' in place['address_components'][0]:\n coded['place'] = place['address_components'][0]['long_name']\n else:\n coded['place'] = place['formatted_address']\n if 'location' in place['geometry']:\n coded['latitude'] = place['geometry']['location']['lat']\n coded['longitude'] = place['geometry']['location']['lng']\n coded['elevation_m'] = f\"{gmap.elevation((coded['latitude'], coded['longitude']))[0]['elevation']:.3f}\"\n except Exception as e:\n conf.logger.warning(f'Couldn\\'t find the plus code in {source} - error was {e}, got as far as {coded} items')\n\n geocache[source] = coded\n else:\n conf.logger.info(f'Found {source} in cache, so using the cached version.')\n\n # update the cache\n outfile = open(geocache_file, 'wb')\n pickle.dump(geocache, outfile)\n outfile.close()\n\n # save the locations in the Config object\n conf.runtime.geocache = geocache\n\n conf.logger.info(f'Geocoded {num_geocoded} items in {(datetime.datetime.now() - start).total_seconds():.2f}s')\n return geocache", "title": "" }, { "docid": "d958957bc44e58ea62a899c111a260c5", "score": "0.5152954", "text": "def search(self):", "title": "" }, { "docid": "d958957bc44e58ea62a899c111a260c5", "score": "0.5152954", "text": "def search(self):", "title": "" }, { "docid": "699564a854745c88dcb8bef429842fcb", "score": "0.51512855", "text": "def build_query(args):\n\n query = {'key': API_KEY}\n\n radius = args.radius if args.radius != None else DEFAULT_RADIUS\n query['radius'] = radius\n\n if args.keyword != None:\n query['keyword'] = args.keyword\n\n location = LATITUDE + ',' + LONGITUDE\n query['location'] = location\n\n return query", "title": "" }, { "docid": "b1044ccc9cd2bbd858a54c336e3077ce", "score": "0.51500934", "text": "def search_variants_by_coordinates(coordinate_query, search_mode='any'):\n get_all_variants()\n if coordinate_query.build == 'GRCh37':\n ct = COORDINATE_TABLE\n start_idx = COORDINATE_TABLE_START\n stop_idx = COORDINATE_TABLE_STOP\n chr_idx = COORDINATE_TABLE_CHR\n start = int(coordinate_query.start)\n stop = int(coordinate_query.stop)\n chromosome = str(coordinate_query.chr)\n # overlapping = (start <= ct.stop) & (stop >= ct.start)\n left_idx = chr_idx.searchsorted(chromosome)\n right_idx = chr_idx.searchsorted(chromosome, side='right')\n chr_ct_idx = chr_idx[left_idx:right_idx].index\n right_idx = start_idx.searchsorted(stop, side='right')\n start_ct_idx = start_idx[:right_idx].index\n left_idx = stop_idx.searchsorted(start)\n stop_ct_idx = stop_idx[left_idx:].index\n match_idx = chr_ct_idx & start_ct_idx & stop_ct_idx\n m_df = ct.loc[match_idx, ]\n if search_mode == 'any':\n var_digests = m_df.v_hash.to_list()\n return [CACHE[v] for v in var_digests]\n elif search_mode == 'query_encompassing':\n match_idx = (start <= m_df.start) & (stop >= m_df.stop)\n elif search_mode == 'variant_encompassing':\n match_idx = (start >= m_df.start) & (stop <= m_df.stop)\n elif search_mode == 'exact':\n match_idx = (start == m_df.start) & (stop == m_df.stop)\n if coordinate_query.alt is not None and coordinate_query.alt != '*':\n if coordinate_query.alt == '-':\n raise ValueError(\"Unexpected alt `-` in coordinate query. Did you mean `None`?\")\n match_idx = match_idx & (coordinate_query.alt == m_df.alt)\n elif coordinate_query.alt is None:\n match_idx = match_idx & pd.isnull(m_df.alt)\n if (coordinate_query.ref is not None and coordinate_query.ref != '*'):\n if coordinate_query.ref == '-':\n raise ValueError(\"Unexpected ref `-` in coordinate query. Did you mean `None`?\")\n match_idx = match_idx & (coordinate_query.ref == m_df.ref)\n elif coordinate_query.ref is None:\n match_idx = match_idx & pd.isnull(m_df.ref)\n else:\n raise ValueError(\"unexpected search mode\")\n var_digests = m_df.loc[match_idx,].v_hash.to_list()\n return [CACHE[v] for v in var_digests]\n else:\n if search_mode == 'exact':\n if coordinate_query.alt or coordinate_query.ref:\n if coordinate_query.alt == '*' or coordinate_query.ref == '*':\n raise ValueError(\"Can't use wildcard when searching for non-GRCh37 coordinates\")\n if coordinate_query.alt == '-':\n raise ValueError(\"Unexpected alt `-` in coordinate query. Did you mean `None`?\")\n if coordinate_query.ref == '-':\n raise ValueError(\"Unexpected ref `-` in coordinate query. Did you mean `None`?\")\n hgvs = _construct_hgvs_for_coordinate_query(coordinate_query)\n if hgvs is not None:\n s = requests.Session()\n retry = Retry(\n total=5,\n read=5,\n connect=5,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n )\n adapter = requests.adapters.HTTPAdapter(max_retries=retry)\n s.mount('http://', adapter)\n r = s.get(url=_allele_registry_url(), params={'hgvs': hgvs})\n data = r.json()\n if '@id' in data:\n allele_registry_id = data['@id'].split('/')[-1]\n if not allele_registry_id == '_:CA':\n return search_variants_by_allele_registry_id(allele_registry_id)\n else:\n raise ValueError(\"alt or ref required for non-GRCh37 coordinate queries\")\n else:\n raise ValueError(\"Only exact search mode is supported for non-GRCh37 coordinate queries\")", "title": "" } ]
1e982dbea5d8b95f5c90b7f7b40a3e67
Sets the current tool to edit shape coordinates. Returns None
[ { "docid": "b90c2abb6c7e2c051b1f9f45e888119c", "score": "0.879929", "text": "def set_current_tool_to_edit_shape_coords(self):\n\n self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL\n self.variables.current_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL", "title": "" } ]
[ { "docid": "745d7a5ceebf910f6ee3d6fd4a96a1ef", "score": "0.8333395", "text": "def set_current_tool_to_edit_shape(self):\n\n self.variables.active_tool = TOOLS.EDIT_SHAPE_TOOL\n self.variables.current_tool = TOOLS.EDIT_SHAPE_TOOL", "title": "" }, { "docid": "1828cf63c01a3eca58d7bd05aeb5104b", "score": "0.7301687", "text": "def set_current_tool_to_translate_shape(self):\n\n self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL\n self.variables.current_tool = TOOLS.TRANSLATE_SHAPE_TOOL", "title": "" }, { "docid": "34e86011e050dc346000ac57cae132c1", "score": "0.70456964", "text": "def setTool(self):\n self.canvas().setMapTool(self)", "title": "" }, { "docid": "580ec64012f9c7dbb33b5175120798fc", "score": "0.6916515", "text": "def set_current_tool_to_select_closest_shape(self):\n\n self.variables.active_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL\n self.variables.current_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL", "title": "" }, { "docid": "30be643db467f3eb72c46a88ae9213c0", "score": "0.68199146", "text": "def set_current_tool_to_selection_tool(self):\n\n self.variables.current_shape_id = self.variables.select_rect_id\n self.variables.active_tool = TOOLS.SELECT_TOOL\n self.variables.current_tool = TOOLS.SELECT_TOOL", "title": "" }, { "docid": "b7917bd041c6b952be3f525e01bc582c", "score": "0.6580228", "text": "def set_current_tool_to_draw_point(self, point_id=None):\n\n self.variables.current_shape_id = point_id\n self.show_shape(point_id)\n self.variables.active_tool = TOOLS.DRAW_POINT_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_POINT_BY_CLICKING", "title": "" }, { "docid": "af31e62ac936d66b13322ccf0fef8f0c", "score": "0.65256655", "text": "def set_current_tool_to_draw_ellipse(self, ellipse_id=None):\n\n self.variables.current_shape_id = ellipse_id\n self.show_shape(ellipse_id)\n self.variables.active_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_ELLIPSE_BY_DRAGGING", "title": "" }, { "docid": "2082c67c06090698339695c531d82f6b", "score": "0.65002966", "text": "def set_current_tool_to_zoom_in(self):\n\n self.variables.current_shape_id = self.variables.zoom_rect_id\n self.variables.active_tool = TOOLS.ZOOM_IN_TOOL\n self.variables.current_tool = TOOLS.ZOOM_IN_TOOL", "title": "" }, { "docid": "1ee5e47f1d6dcdd8d86c0423caae81e8", "score": "0.64482874", "text": "def Set(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_Set(self, *args)", "title": "" }, { "docid": "0329ba44bb2a9de1cc9c77030711bb73", "score": "0.62589395", "text": "def set_current_tool_to_zoom_out(self):\n\n self.variables.current_shape_id = self.variables.zoom_rect_id\n self.variables.active_tool = TOOLS.ZOOM_OUT_TOOL\n self.variables.current_tool = TOOLS.ZOOM_OUT_TOOL", "title": "" }, { "docid": "fa842e703e91cee772975a0e85aab8a4", "score": "0.6236207", "text": "def set_current_tool_to_draw_rect(self, rect_id=None):\n\n self.variables.current_shape_id = rect_id\n self.show_shape(rect_id)\n self.variables.active_tool = TOOLS.DRAW_RECT_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_RECT_BY_DRAGGING", "title": "" }, { "docid": "80209faeb75e82b3b9c6eac48a2f94f8", "score": "0.6178693", "text": "def edit(self, p):\n self.poses[self.selected_point].model = p\n self.calibration_changed()", "title": "" }, { "docid": "76269440ce871f2f61c7370e3fc03122", "score": "0.6105037", "text": "def set_new_location(self, xPos, yPos):", "title": "" }, { "docid": "e4b7caa62222b9f43a7d63c95f849767", "score": "0.6078754", "text": "def set_current_tool_to_pan(self):\n\n self.variables.active_tool = TOOLS.PAN_TOOL\n self.variables.current_tool = TOOLS.PAN_TOOL", "title": "" }, { "docid": "85bed7b254e99eb3e28bb0a2c7e0e02b", "score": "0.60748833", "text": "def callback_handle_left_mouse_click(self, event):\n\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n self.variables.pan_anchor_point_xy = event.x, event.y\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n closest_coord_index = self.find_closest_shape_coord(self.variables.current_shape_id, event.x, event.y)\n self.variables.tmp_closest_coord_index = closest_coord_index\n elif self.variables.active_tool == TOOLS.SELECT_CLOSEST_SHAPE_TOOL:\n closest_shape_id = self.find_closest_shape(event.x, event.y)\n self.variables.current_shape_id = closest_shape_id\n self.highlight_existing_shape(self.variables.current_shape_id)\n else:\n start_x = self.canvasx(event.x)\n start_y = self.canvasy(event.y)\n\n self.variables.current_shape_canvas_anchor_point_xy = (start_x, start_y)\n if self.variables.current_shape_id not in self.variables.shape_ids:\n coords = (start_x, start_y, start_x + 1, start_y + 1)\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.create_new_line(coords)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.create_new_line(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.create_new_arrow(coords)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.create_new_arrow(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.create_new_rect(coords)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n self.create_new_rect(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.create_new_ellipse(coords)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.create_new_point((start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.create_new_polygon(coords)\n self.variables.actively_drawing_shape = True\n else:\n print(\"no tool selected\")\n else:\n if self.variables.current_shape_id in self.variables.shape_ids:\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n (start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.event_click_polygon(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n if self.variables.actively_drawing_shape:\n self.variables.actively_drawing_shape = False\n else:\n self.variables.actively_drawing_shape = True", "title": "" }, { "docid": "784cf160fd81ab6a381d27c846fd3e9b", "score": "0.60274136", "text": "def XCAFDoc_ShapeTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_Set(*args)", "title": "" }, { "docid": "b69bed68ff1a33f216fb262f311a28e3", "score": "0.6010801", "text": "def set_current_tool_to_draw_rect_by_clicking(self, rect_id=None):\n\n self.variables.current_shape_id = rect_id\n self.show_shape(rect_id)\n self.variables.active_tool = TOOLS.DRAW_RECT_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_RECT_BY_CLICKING", "title": "" }, { "docid": "349680d0ced94e1dd595f9aab36ba404", "score": "0.59694207", "text": "def run(self):\n self.iface.mapCanvas().setMapTool(self.tool)", "title": "" }, { "docid": "de5f1200fa9d640da7d135017bc9dbb8", "score": "0.5964246", "text": "def Point_Pick(self):\n self.vtkWidget.iren.AddObserver('RightButtonPressEvent', self.pick_loc)\n self.renWin.Render()", "title": "" }, { "docid": "8622f2e52b0edcb6983db30d9bd391e6", "score": "0.59161854", "text": "def Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_Set(*args)", "title": "" }, { "docid": "2795a8feae74452b519532bbe5eaef07", "score": "0.58885044", "text": "def set_current_tool_to_draw_arrow_by_clicking(self, arrow_id=None):\n\n self.variables.current_shape_id = arrow_id\n self.show_shape(arrow_id)\n self.variables.active_tool = TOOLS.DRAW_ARROW_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_ARROW_BY_CLICKING", "title": "" }, { "docid": "f7caac265eaa0f603ed9996de386b630", "score": "0.58580947", "text": "def set_current_tool_to_draw_polygon_by_clicking(self, polygon_id=None):\n\n self.variables.current_shape_id = polygon_id\n self.show_shape(polygon_id)\n self.variables.active_tool = TOOLS.DRAW_POLYGON_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_POLYGON_BY_CLICKING", "title": "" }, { "docid": "4dad33f23966c1ded6c4fc3e4939be45", "score": "0.5856651", "text": "def setLocation(self, p):\n super(PolygonTool, self).setLocation(p.point)\n _x, _y = self.getLocation().getCoords()\n _count = self.__nsides\n _inc = self.__increment\n if self.__external:\n _offset = _inc/2.0\n else:\n _offset = 0.0\n _cx, _cy = self.__center.point.getCoords()\n _xsep = _x - _cx\n _ysep = _y - _cy\n _angle = math.atan2(_ysep, _xsep) + _offset\n _rad = math.hypot(_xsep, _ysep)/math.cos(_offset)\n _xp = self.__xpts\n _yp = self.__ypts\n for _i in range(_count):\n _xp[_i] = _cx + (_rad * math.cos(_angle))\n _yp[_i] = _cy + (_rad * math.sin(_angle))\n _angle = _angle + _inc", "title": "" }, { "docid": "aa2a49555f9b44c2fc7a2db408d3c628", "score": "0.5836568", "text": "def set_current_tool_to_draw_line_by_clicking(self, line_id=None):\n\n self.variables.current_shape_id = line_id\n self.show_shape(line_id)\n self.variables.active_tool = TOOLS.DRAW_LINE_BY_CLICKING\n self.variables.current_tool = TOOLS.DRAW_LINE_BY_CLICKING", "title": "" }, { "docid": "a9b4ba6d988f20c677fa37b0788c4128", "score": "0.5813637", "text": "def set_current_tool_to_draw_arrow_by_dragging(self, arrow_id=None):\n\n self.variables.current_shape_id = arrow_id\n self.show_shape(arrow_id)\n self.variables.active_tool = TOOLS.DRAW_ARROW_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_ARROW_BY_DRAGGING", "title": "" }, { "docid": "db0a66f5811d5d3f5991b2b7d814fb78", "score": "0.5780619", "text": "def update_to_coord(self, point):\r\n if self._index_of_sel_point != -1 and self._index_of_sel_point <= len(self.points)-1:\r\n self._command_stack.do(model.structure.UpdatePoint(\r\n self._structure, self._index_of_sel_point, round(point[0]), round(point[1])))\r\n elif self._index_of_sel_point == len(self.points) or not self.points:\r\n self._command_stack.do(model.structure.AddPoint(\r\n self._structure, self._index_of_sel_point+1, round(point[0]), round(point[1])))\r\n if self._index_of_sel_point+1 >= len(self.points):\r\n self.winfo_toplevel().update()\r\n self._index_of_sel_point = len(self.points)\r\n else:\r\n self._set_selection(self._index_of_sel_point+1)\r\n self.winfo_toplevel().update()", "title": "" }, { "docid": "e38a7c47f976315c6f089efe44f1bced", "score": "0.57471514", "text": "def ShapeTool(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_ShapeTool(self, *args)", "title": "" }, { "docid": "4d519ea82d7bcb92bd2106cdf8541708", "score": "0.57393825", "text": "def set_current_tool_to_draw_line_by_dragging(self, line_id=None):\n\n self.variables.current_shape_id = line_id\n self.show_shape(line_id)\n self.variables.active_tool = TOOLS.DRAW_LINE_BY_DRAGGING\n self.variables.current_tool = TOOLS.DRAW_LINE_BY_DRAGGING", "title": "" }, { "docid": "c71ca664ca14880722eae4654ce2a67d", "score": "0.5733437", "text": "def setToolTo(*args, **kwargs)->None:\n pass", "title": "" }, { "docid": "b54eab42081701a54955acfaf866f855", "score": "0.5731767", "text": "def set_active_tool(self, tool=None):\n self.active_tool = tool", "title": "" }, { "docid": "7b163eb5a3bbd355cd45909e5822b5ef", "score": "0.5721405", "text": "def toggle_geom(self,event):\n geom=self.master.winfo_geometry()\n self.master.geometry(self._geom)\n self._geom=geom", "title": "" }, { "docid": "835ccb60c6f77fe735b36ef1a89cf487", "score": "0.5714223", "text": "def set_editable_point(self, point_index):\r\n self._point_index = point_index\r\n #flags: the stringvar will change because the point is selected\r\n #checked in the callback to only modify the structure if the user edits the fields\r\n self.inhibit_callbacks = True\r\n\r\n self._point_index_var.set(f\"Vertex {self._point_index}\")\r\n self.editable_x.set(round(self._structure.points[point_index][0], 1))\r\n self.editable_y.set(round(self._structure.points[point_index][1], 1))\r\n\r\n self.inhibit_callbacks = False", "title": "" }, { "docid": "1a7d51566d5c2985b3b0ff139656309b", "score": "0.5650728", "text": "def ShapeTool(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_ShapeTool(self, *args)", "title": "" }, { "docid": "83f99813ae02e8900b3d44b0d26dc9fc", "score": "0.56424576", "text": "def sketchAction(self):\n gsvMessage = \"Click on map to draw geo sketches\"\n self.iface.mainWindow().statusBar().showMessage(gsvMessage)\n self.dumLayer.setCrs(self.iface.mapCanvas().mapSettings().destinationCrs())\n self.canvas.setMapTool(self)\n self.canvasAction = \"sketch\"", "title": "" }, { "docid": "0e0c39de7cf0a454f62637d4c462dd34", "score": "0.5611367", "text": "def ToggleDrawingTools(self, event):\n pass", "title": "" }, { "docid": "f9d2641305d2f4871938501874155afc", "score": "0.5610368", "text": "def Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "title": "" }, { "docid": "28a09afb6882a2eeb4b8bc3e08236895", "score": "0.55987394", "text": "def set_coordinates(self, x, y):\n self.x = x\n self.y = y", "title": "" }, { "docid": "714f43e8f7e8c7539c106d76b635b4f8", "score": "0.5596969", "text": "def _on_point_selected(self, _event):\r\n selected_iid = self._tree.selection()\r\n self._index_of_sel_point = self._tree.index(selected_iid)\r\n self._edit_zone.set_editable_point(self._tree.item(selected_iid)[\"values\"][0])\r\n self._notify(\"focus\", {})", "title": "" }, { "docid": "d3446c374b9d88995c46acc462613443", "score": "0.5574843", "text": "def setPreviousMapTool(self):\n if self.iface.mapCanvas().mapTool() != self._currentMapTool:\n self.iface.mapCanvas().setMapTool(self._currentMapTool)", "title": "" }, { "docid": "e7ae38f5125fb033b857c4a23784c22f", "score": "0.5564051", "text": "def XCAFDoc_ShapeMapTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "title": "" }, { "docid": "9a59cd25a677f2f8db4c17b93f426a2c", "score": "0.55529666", "text": "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "title": "" }, { "docid": "276e68225dc55c5f85a3691e6f4dfc5b", "score": "0.5547851", "text": "def set_location(self, x, y):\n self.scene.set_location(x, y)\n self.redraw()", "title": "" }, { "docid": "33b1c53df23b1d088f2d546fe8fb33a9", "score": "0.55383915", "text": "def toggle_geom(self,event):\n \n geom=self.winfo_geometry()\n print(geom,self._geom)\n self.geometry(self._geom)\n self._geom=geom", "title": "" }, { "docid": "2092bc5ff89b84b8d31ef27cbcad4e3c", "score": "0.55371565", "text": "def SetShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_SetShape(self, *args)", "title": "" }, { "docid": "847409d4ce99b9cea1813ce59ccb3662", "score": "0.5527454", "text": "def on_VI_XY_set_clicked(self):\n # TODO: not implemented yet\n disp_coord()\n if qmdz_const.Auto_Range == 0:\n xmin = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'x_min'))\n xmax = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'x_max'))\n ymin = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'y_min'))\n ymax = int(read_config(qmdz_const.SYS_CONF_PATH, 'COORD', 'y_max'))\n self.VI_MPL.change_xy(xmin, xmax, ymin, ymax)", "title": "" }, { "docid": "01f567b771ab9d54ee1064e1149681dd", "score": "0.55153215", "text": "def modify_existing_shape_using_canvas_coords(self, shape_id, new_coords, update_pixel_coords=True):\n vector_object = self.get_vector_object(shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n point_size = vector_object.point_size\n x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)\n x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)\n canvas_drawing_coords = (x1, y1, x2, y2)\n else:\n canvas_drawing_coords = tuple(new_coords)\n self.coords(shape_id, canvas_drawing_coords)\n if update_pixel_coords:\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, new_coords)", "title": "" }, { "docid": "7f1211cb71d1e274d92b61e9c02c5a3c", "score": "0.5510964", "text": "def set_mouse_button_tool(self, button, tool) :\n \n event_name = \"%sButton\"%button\n \n if event_name in self._mouse_tools : \n self._mouse_tools[event_name].deselect()\n \n if tool :\n tool.select()\n self._mouse_tools[event_name] = tool\n elif event_name in self._mouse_tools :\n del self._mouse_tools[event_name]", "title": "" }, { "docid": "aeb4d6221583b94d3207c6ad2d7b1cbb", "score": "0.55077213", "text": "def m_location_set(self, x: int, y: int):\n pass", "title": "" }, { "docid": "ffe31952d3a9ce61c7ca97510e25e6e5", "score": "0.54978585", "text": "def startMoveAddressTool(self):\n\n self.iface.mapCanvas().setMapTool(self._moveaddtool)\n self._moveaddtool.setEnabled(True)", "title": "" }, { "docid": "5a73480eaeb8b63d9a9f9b83732f1baa", "score": "0.54970324", "text": "def mapToolChanged(self):\n \n if (isinstance(self.iface.mapCanvas().mapTool(), GetRcl) == False and\n isinstance(self.iface.mapCanvas().mapTool(), UpdateReviewPosition) == False): \n self._currentMapTool = self.iface.mapCanvas().mapTool()\n #self.highlighter.hideAll()\n # logging \n uilog.info('*** TOOL CHANGE *** {0} started'.format(self.iface.mapCanvas().mapTool()))", "title": "" }, { "docid": "c753fda4880f3e2739cc21c065af9185", "score": "0.5493256", "text": "def start_edit(self):\n txt = self.model.get_current_line()\n self._line.original_widget = self._line_edit\n self._line_edit.set_edit_text(txt)\n self._line_edit.set_edit_pos(len(txt))\n self._top.set_focus(2)", "title": "" }, { "docid": "dbe028d75d3c6315dabd8e9b98ba9c62", "score": "0.5488601", "text": "def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())", "title": "" }, { "docid": "1a19ca1ca7b59ade6fe2d4c08db06883", "score": "0.54719144", "text": "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "title": "" }, { "docid": "1c6ddcde0dac42ebd509c2854c0f35b2", "score": "0.5467421", "text": "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "title": "" }, { "docid": "8745b8a1759c5fba4c9367d591ff9c43", "score": "0.5457866", "text": "def SetPoint(self, pt):\r\n \r\n self._pointDrag = pt", "title": "" }, { "docid": "37be3b6f793a15da73c9c02cc9d130a9", "score": "0.5451839", "text": "def set_mode_point():\n global DRAW_MODE\n DRAW_MODE=\"point\"", "title": "" }, { "docid": "f3fe0db4f9c99b0b9861877a27d03b5e", "score": "0.5432437", "text": "def SetToolBitmap(self, tool_id, bitmap):\r\n \r\n tool = self.FindTool(tool_id)\r\n if tool:\r\n tool.bitmap = bitmap", "title": "" }, { "docid": "4160dc3fe4b4e2dfeda32ea15b453aeb", "score": "0.54221994", "text": "def setEditCursor(self, event):\n self.editMode = True\n self.updateCursor(\"X_cursor\")\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return\n if not self.gridBusy[x][y]:\n return\n assert len(self.history) >= self.gridBusy[x][y]\n self.lastChanged = self.gridBusy[x][y]\n self.changeColor(self.lastChanged, self.colors['pent_edit'])", "title": "" }, { "docid": "c3995defbd3810c85b73a4393514d567", "score": "0.5412216", "text": "def SetShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_SetShape(self, *args)", "title": "" }, { "docid": "184468e8f2d33139dfc9188d9d030e34", "score": "0.54023683", "text": "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "title": "" }, { "docid": "7fdc9dfbe1bf41c50b5cecb874e4721f", "score": "0.5389474", "text": "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "title": "" }, { "docid": "199194a5fbedf542296f7a8eade9c5a6", "score": "0.53769135", "text": "def ShapeTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ShapeTool(*args)", "title": "" }, { "docid": "d36669c5ba5fc974455b4e301b3c4814", "score": "0.5364598", "text": "def setPlotFocus(pt): \n if pt not in [None]+plotFocusList:\n return None\n simuConfig[\"PLOT.FOCUS\"] = pt", "title": "" }, { "docid": "3cc87ab730cd89330c79fc8ea4315af5", "score": "0.5360223", "text": "def _point_edited(self, _var_name, _list_index, _operation):\r\n #update the point only if:\r\n #- the user edited the var (not just a point selection)\r\n #- the input string can be parsed to ints\r\n if (not self.inhibit_callbacks and\r\n is_float(self.editable_x.get()) and is_float(self.editable_y.get())):\r\n self.command_stack.do(model.structure.UpdatePoint(self._structure,\r\n self._point_index,\r\n float(self.editable_x.get()),\r\n float(self.editable_y.get())))", "title": "" }, { "docid": "7b5c347ac15b022e84104dfdcd68d85f", "score": "0.53417534", "text": "def _moveTo(self, pt):\n self._handleAnchor()\n t = \"M%s\" % (pointToString(pt))\n self._commands.append(t)\n self._lastCommand = \"M\"\n self._lastX, self._lastY = pt", "title": "" }, { "docid": "116704bd43b0126d2d03f732735fb9b3", "score": "0.5341653", "text": "def onSelPoint(self, evt=None, opt='__', relative_e0=False, win=None):\n if opt not in self.wids:\n return None\n\n _x, _y = last_cursor_pos(win=win, _larch=self.larch)\n\n if _x is not None:\n if relative_e0 and 'e0' in self.wids:\n _x -= self.wids['e0'].GetValue()\n self.wids[opt].SetValue(_x)", "title": "" }, { "docid": "78b34a524d99dd7c19ca6935b001d099", "score": "0.53082114", "text": "def setcoordsys(self, csys):\n return _image.image_setcoordsys(self, csys)", "title": "" }, { "docid": "1e324c13084c8c9e806bf6efdef00fb1", "score": "0.529727", "text": "def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0", "title": "" }, { "docid": "1e324c13084c8c9e806bf6efdef00fb1", "score": "0.529727", "text": "def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0", "title": "" }, { "docid": "3612d1dea60eaccff99300c2c5395029", "score": "0.52950686", "text": "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "title": "" }, { "docid": "c48c86221def32756d1f5f18c27d0710", "score": "0.5283977", "text": "def new_position_edit(self, p):\n\n DBG(\"new edit position\")\n if self.mode != 'view':\n self.edit_widget.new_text(p.b)", "title": "" }, { "docid": "2868906903cf86217fc67a28ffccb62b", "score": "0.5280011", "text": "def _change_shape(self,x,y,w,h):\n top = y \n left = x\n right = x + w\n bottom = y + h\n return top,right,bottom,left", "title": "" }, { "docid": "f519b4308730b71a962a98e1629b641e", "score": "0.5272074", "text": "def doOptPoint(opnt):\n s.setScriptBool(odi.INDX_BOOL_DO_OPT_POINT, opnt)\n s.setScriptBool(odi.INDX_BOOL_DO_OPT_POINT_SET, True)", "title": "" }, { "docid": "78b359f49d93aaab54ee4a9ad6920d59", "score": "0.5263257", "text": "def set_canvas_coords(self, visual_x, visual_y, visual_r):\n self.visual_y = visual_y\n self.visual_x = visual_x\n self.visual_r = visual_r", "title": "" }, { "docid": "aca4e725c5b55f83f566c56b17e4c4ad", "score": "0.5262514", "text": "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "title": "" }, { "docid": "73017b0fd3a5e00521ba878080b8bb2a", "score": "0.5249553", "text": "def set_xy(self, x, y, val):\r\n\t\tself.grid[y, x] = val", "title": "" }, { "docid": "3db9b9c5f6e8e42e0c1054c0945b1366", "score": "0.52366436", "text": "def set(self, x, y):\n self.x = x\n self.y = y", "title": "" }, { "docid": "9ae67281c8c249b11553c012913b51f9", "score": "0.5235385", "text": "def set_plot(self, plot):\n self.pw = plot\n self._setup_widget()", "title": "" }, { "docid": "ef69c92f63e43f48a1edd55d48b0a292", "score": "0.52345103", "text": "def modifyPoint(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "26b7ebe4597757c357c849e1608917ef", "score": "0.52330786", "text": "def set_point(self, x, y):\n self._x = x\n self._y = y", "title": "" }, { "docid": "cd826a1f777364ecc9efba69c21a3e7f", "score": "0.5232795", "text": "def set_current_tool_to_none(self):\n\n self.variables.active_tool = None\n self.variables.current_tool = None", "title": "" }, { "docid": "b48b91acad758b914309a41dfebff38e", "score": "0.5223926", "text": "def SetGuideShape(self, event=None):\r\n\r\n self.SetShape(self.region) \r\n \r\n if event is not None:\r\n # Skip the event on wxGTK\r\n event.Skip()\r\n wx.CallAfter(wx.SafeYield, self, True)", "title": "" }, { "docid": "cc4d32d9cd328ea9c0ef7334cd997116", "score": "0.5220837", "text": "def editBuildPoints(self, data=None, event=None, axes=None):\n\n if event.button == 1:\n suc = self.addBuildPoint(data=data, event=event, axes=axes)\n elif event.button == 3:\n suc = self.deleteBuildPoint(data=data, event=event)\n else:\n return False\n\n # redraw the corrected canvas (new positions ans new numbers)\n if len(data.buildP):\n y, x = zip(*data.buildP)\n else:\n y = x = []\n self.pointsBuild.set_data(x, y)\n for i, _ in enumerate(data.buildP):\n self.pointsBuildAnnotate[i].set_text('{0:2d}'.format(i))\n self.drawHemisphere()\n return suc", "title": "" }, { "docid": "7e3e77284528338331b4edcafecb640b", "score": "0.52184117", "text": "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "title": "" }, { "docid": "a6a0c41f10846254965301244b1f24be", "score": "0.5218308", "text": "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "title": "" }, { "docid": "a6a0c41f10846254965301244b1f24be", "score": "0.5218308", "text": "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "title": "" }, { "docid": "a6a0c41f10846254965301244b1f24be", "score": "0.5218308", "text": "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "title": "" }, { "docid": "34acb6a2f1ed7110e311c50e91bafc54", "score": "0.5217162", "text": "def ShapeTool(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_ShapeTool(self, *args)", "title": "" }, { "docid": "84f8b3b3e81f540be57e531cd633317b", "score": "0.521645", "text": "def SetShape(self, region):\r\n \r\n if wx.Platform == '__WXMAC__':\r\n # HACK so we don't crash when SetShape is called\r\n return\r\n else:\r\n super(AuiDockingHintWindow, self).SetShape(region)", "title": "" }, { "docid": "b22b85487dced5a335299f106fae578a", "score": "0.5207037", "text": "def open_attr_editing(self, element) -> None:\n if self.attr_editing_window is not None:\n self.close_attr_editing()\n else:\n position = wx.GetMousePosition()\n self.attr_editing_window = AttributeEditingFrame(self, wx.ID_ANY,\n position=position,\n element=element)\n figure_element = self.graph_to_figure[element]\n figure_element.annotation = self.annotate_element(figure_element)", "title": "" }, { "docid": "62484c145fe80ec2bd80112395c24cb3", "score": "0.52063084", "text": "def setreferencelocation(self, *args, **kwargs):\n return _coordsys.coordsys_setreferencelocation(self, *args, **kwargs)", "title": "" }, { "docid": "94bf906f107bbc084acb95ab6cccf7d7", "score": "0.5205065", "text": "def SetToolId(self, id):\r\n\r\n self.tool_id = id", "title": "" }, { "docid": "51670d325ea9186552ff2ba1385d7541", "score": "0.520336", "text": "def setOriginLines(val=\"xy\"):\n if val == \"x\":\n dislin.xaxgit()\n elif val == \"y\":\n dislin.yaxgit()\n elif val == \"cross\":\n dislin.cross()\n else:\n dislin.axgit()", "title": "" }, { "docid": "c7212f055a18d9fd69d83b4fdb25eb72", "score": "0.5202053", "text": "def setPoint(self, set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0", "title": "" }, { "docid": "cc8db9634c3e48d9a0a3334e1e0b7ce4", "score": "0.51998043", "text": "def set_geometry(self, selection_name, geometry):", "title": "" }, { "docid": "d2da8e7c2701f55a8eae6e8f5395e356", "score": "0.51980567", "text": "def SetGuideShape(self, event=None):\r\n\r\n self.SetShape(self.region) \r\n\r\n if event is not None:\r\n # Skip the event on wxGTK\r\n event.Skip()\r\n wx.CallAfter(wx.SafeYield, self, True)", "title": "" }, { "docid": "8d97e129cd1b2fda413031194f84f685", "score": "0.51974916", "text": "def XCAFDoc_DocumentTool_ShapeTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ShapeTool(*args)", "title": "" }, { "docid": "0bccacff91cea87f3859ab5f58f40e4c", "score": "0.51964843", "text": "def setOperationMode(self):\n\n if self.ui.checkEditNone.isChecked():\n self.operationMode = 'normal'\n elif self.ui.checkEditBuildPoints.isChecked():\n self.operationMode = 'build'\n elif self.ui.checkEditHorizonMask.isChecked():\n self.operationMode = 'horizon'\n elif self.ui.checkPolarAlignment.isChecked():\n self.operationMode = 'star'\n\n self.drawHemisphere()\n\n return True", "title": "" }, { "docid": "305ec53455f658a2e49cf8896fee17b1", "score": "0.51878893", "text": "def startUpdateAddressTool(self):\n\n self.iface.mapCanvas().setMapTool(self._updateaddtool)\n self._updateaddtool.setEnabled(True)", "title": "" }, { "docid": "9542922e0574d8778cd804489489217e", "score": "0.5179393", "text": "def set_stroke_move(self, use_stroke=True):\r\n self.board.set_stroke_move(use_stroke)", "title": "" } ]
b0e2c74cf887e7bfcc8bd8b5160d7677
Checks whether the currently logged in user can create or edit a task, when the specified role is required.
[ { "docid": "d3bcab63cb65b2d503d97fe659bfb779", "score": "0.77631235", "text": "def canCreateTaskWithRequiredRole(self, required_role):\n if required_role == 'mentor':\n valid_org_keys = [o.key() for o in self.data.mentor_for]\n elif required_role == 'org_admin':\n valid_org_keys = [o.key() for o in self.data.org_admin_for]\n else:\n raise ValueError('Invalid required_role argument ' + str(required_role))\n\n if self.data.organization.key() not in valid_org_keys:\n raise exception.Forbidden(message=DEF_NO_TASK_CREATE_PRIV % (\n self.data.organization.name))\n\n if (time.isBefore(self.data.timeline.orgsAnnouncedOn()) \\\n or self.data.timeline.tasksClaimEnded()):\n raise exception.Forbidden(message=access_checker.DEF_PAGE_INACTIVE)", "title": "" } ]
[ { "docid": "f488f928e748390b66273f859a86562a", "score": "0.7407585", "text": "def canCreateTask(self):\n return self.canCreateTaskWithRequiredRole('mentor')", "title": "" }, { "docid": "92dd1cad5bff8325c5e3ca5ccdb34116", "score": "0.7034055", "text": "def checkCanUserEditTask(self):\n assert access_checker.isSet(self.data.task)\n\n if not self.canUserEditTask():\n raise exception.Forbidden(\n message=DEF_NO_TASK_EDIT_PRIV % (self.data.task.org.name))", "title": "" }, { "docid": "56cfa593d3bb80df284874381ce18dae", "score": "0.6649711", "text": "def has_permission(self, request, view=None):\n return \"A\" in request.user.roles", "title": "" }, { "docid": "d60791a0c88e73f2aa7f9169f6d80852", "score": "0.6619122", "text": "def canUserEditTask(self):\n return self.data.mentorFor(self.data.task.org.key())", "title": "" }, { "docid": "ecdf78709c15caeefb680b4139026583", "score": "0.65575725", "text": "def fullfills_role(self, role):\n if self.role == role:\n return True\n return self.role == \"admin\" and role == \"user\"", "title": "" }, { "docid": "782eaafa6fe0233dc1a7bcdd675b9cf9", "score": "0.64934784", "text": "def has_permission(self, request, view=None):\n return \"U\" in request.user.roles", "title": "" }, { "docid": "3d1ea657cd4f33c5ce68383e2b3c2f65", "score": "0.6471111", "text": "def canBulkCreateTask(self):\n return self.canCreateTaskWithRequiredRole('org_admin')", "title": "" }, { "docid": "eb0a918ab07dd986f4e035af7b0b49a0", "score": "0.6466357", "text": "def has_role(roles, object=None):", "title": "" }, { "docid": "76b15f1cb7b36d77ddf9caa66d158d06", "score": "0.6359312", "text": "def check_permission(self, user, event):\n if event.project is not None:\n if event.classified:\n return user.has_perm(\n 'timeline.view_classified_event', event.project\n )\n else:\n return user.has_perm('timeline.view_timeline', event.project)\n else:\n if event.classified:\n return user.has_perm('timeline.view_classified_site_event')\n else:\n return user.has_perm('timeline.view_site_timeline')", "title": "" }, { "docid": "b42ab9f2626da73a3013bb4716ec113a", "score": "0.62994164", "text": "def has_permission(self, request, view):\n\n project = Project.objects.get(pk=view.kwargs['project_pk'])\n\n project_rules = {\n 'GET': ['guest', 'member', 'admin'],\n 'POST': ['member', 'admin'],\n 'PUT': ['member', 'admin'],\n 'DELETE': ['admin']\n }\n\n is_authorized = False\n if request.method in project_rules:\n is_authorized = keylib.keycloak_verify_user_role(request, project.slug, project_rules[request.method])\n\n print('Is authorized: {}'.format(is_authorized))\n return is_authorized", "title": "" }, { "docid": "a64613aaf7c7b50058396f73ef67d9a3", "score": "0.6126023", "text": "def requires_roles(*roles):\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n if getCurrentUserRole() not in roles:\n return privilegeError()\n return f(*args, **kwargs)\n return wrapped\n return wrapper", "title": "" }, { "docid": "9e7436909c2d7664d0812f6a20ec9f35", "score": "0.60872024", "text": "def student_test_func(self):\n\n if self.request.user.role == User.STUDENT:\n return True\n return False", "title": "" }, { "docid": "74d793dd86121b3447b5723c7cc89fc2", "score": "0.60815305", "text": "def checkHasTaskEditableStatus(self):\n if not task_logic.hasTaskEditableStatus(self.data.task):\n raise exception.Forbidden(message=DEF_TASK_UNEDITABLE_STATUS)", "title": "" }, { "docid": "fde59820f60535e25a2552869ec882b7", "score": "0.60704356", "text": "def timelineAllowsTaskEditing(self):\n return not (time.isBefore(self.data.timeline.orgsAnnouncedOn()) or\n self.data.timeline.tasksClaimEnded())", "title": "" }, { "docid": "2a7e6ada7222c52adbc878cc2eca3cb8", "score": "0.59609294", "text": "def has_perm(self, user):\n return self.flow_task.can_execute(user, self.task)", "title": "" }, { "docid": "946efe4a0c263352019664b04ff558be", "score": "0.5906411", "text": "def permitted( allowed_roles, user_roles ):\n\n for role in user_roles:\n if role in allowed_roles:\n return True;\n return False;", "title": "" }, { "docid": "fa893301d77b27d5f7e3311b5131f981", "score": "0.590549", "text": "def test_allowed_if_in_task(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n request.META[_TASK_NAME_HEADER] = \"test\"\n\n with sleuth.fake(\"djangae.tasks.decorators.is_in_task\", True):\n response = view(request)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "53f730ba06ba86c33e27e369ad61a86d", "score": "0.58872634", "text": "def can_edit(self, user):\n return user in self.organisers or user.is_admin()", "title": "" }, { "docid": "2763a9833378d71b08d667a91dbfd3ce", "score": "0.5885625", "text": "def has_permission(self, request, view):\n return request.method == 'POST' or \\\n (request.user and request.user.is_staff)", "title": "" }, { "docid": "94a82e8b5d74a5ed05c4d3e63c4c7a50", "score": "0.58759505", "text": "def test_func(self):\n if not self.pos.team.permission_set:\n raise PermissionDenied(\"No permissions set defined for this team\")\n if not self.request.user.has_perm(\"camps.backoffice_permission\"):\n raise PermissionDenied(\"User has no backoffice permission\")\n\n if not self.request.user.has_perm(\n \"camps.orgateam_permission\",\n ) and not self.request.user.has_perm(\"camps.\" + self.pos.team.permission_set):\n raise PermissionDenied(\"User has no permission for this Pos\")\n return True", "title": "" }, { "docid": "a91eb644364192f9e23f59bd2ef5e987", "score": "0.5858682", "text": "def test_can_user_perform_not_create(cursor, make_user_roles, getfcn):\n newfcn, obj_type = getfcn\n info = make_user_roles('create', obj_type, True)\n authid = info['user']['auth0_id']\n obj = newfcn(org=info['org'])\n cursor.execute('SELECT can_user_perform_action(%s, %s, %s)',\n (authid, obj['id'], 'create'))\n assert cursor.fetchone()[0] == 0", "title": "" }, { "docid": "23070531210e9653b7c3ac715a6597ec", "score": "0.58564657", "text": "def test_assign_role_to_none_existing_user(self):\n CommonTestCases.admin_token_assert_in(\n self,\n assign_role_to_non_existing_user_mutation,\n \"User not found\"\n )", "title": "" }, { "docid": "012043af738d6324210ae1a3d1cd54ff", "score": "0.58311975", "text": "async def is_allowed(ctx: commands.Context, roles: List[int]) -> bool:\n\n perms = ctx.channel.permissions_for(ctx.author)\n\n if perms.administrator:\n return True\n\n for rid in roles:\n if rid in [role.id for role in ctx.author.roles]:\n return True\n\n else:\n return False", "title": "" }, { "docid": "8b6178b793ed80356099b411ca5a830f", "score": "0.58218074", "text": "def has_role(roles):\n\n def decorator(decorated_function):\n\n @wraps(decorated_function)\n def wrapper(*args, **kwds):\n if current_user.role in roles:\n return decorated_function(*args, **kwds)\n flash('You do not have permission to perform this action.')\n return redirect(url_for('.index'))\n\n return wrapper\n\n return decorator", "title": "" }, { "docid": "14781758bf07cb304f4e0ab2549b637a", "score": "0.58147204", "text": "def has_permission_to_edit_restaurant(user, restaurant_id):\r\n if user.is_superuser:\r\n return True\r\n elif is_restaurant_manager(user, restaurant_id):\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "1675749cda9b709b7813bea3c8520137", "score": "0.5791716", "text": "def has_permission(self, request, view):\n if view.kwargs['user_id'] in ['me', str(request.user.id)]:\n return True\n return False", "title": "" }, { "docid": "50f6fa008f15de7a08b263b9c42e65c3", "score": "0.5789846", "text": "def test_mismatch(self, client, anonymous_role, role, view_permission):\n obj = TestModel.objects.get_or_create(name=\"bla\")[0]\n\n RolePermission.assign(obj, role, view_permission)\n\n assert not anonymous_role.has_access(obj, view_permission)", "title": "" }, { "docid": "940581660fc254a97e6e3fe68d58e330", "score": "0.57897365", "text": "def has_perm(self, user):\n return self.flow_task.can_execute(user)", "title": "" }, { "docid": "5ec4793ab29f70984e758c63a76f2704", "score": "0.57861423", "text": "def test_user_can_create_multiple_permissions(cursor, make_user_roles,\n new_role, new_permission):\n info = make_user_roles('create', 'forecasts')\n org = info['org']\n user = info['user']\n cursor.execute(\n 'SELECT user_can_create(%s, %s)',\n (user['auth0_id'], 'forecasts'))\n assert cursor.fetchone()[0] == 1\n role = new_role(org=org)\n perm = new_permission('create', 'forecasts', True, org=org)\n cursor.execute(\n 'INSERT INTO role_permission_mapping (role_id, permission_id) '\n 'VALUES (%s, %s)', (role['id'], perm['id']))\n cursor.execute(\n 'INSERT INTO user_role_mapping (user_id, role_id) VALUES (%s, %s)',\n (user['id'], role['id']))\n cursor.execute(\n 'SELECT user_can_create(%s, %s)',\n (user['auth0_id'], 'forecasts'))\n assert cursor.fetchone()[0] == 1", "title": "" }, { "docid": "7a9a9ad0c155c8cc4f37d738556b6937", "score": "0.57825303", "text": "def assignable_to_role(self) -> bool:\n return pulumi.get(self, \"assignable_to_role\")", "title": "" }, { "docid": "3944d8c743e1dce86785ec2db75d689a", "score": "0.5766206", "text": "def can_edit(self, auth=None, user=None):\n if not auth and not user:\n raise ValueError('Must pass either `auth` or `user`')\n if auth and user:\n raise ValueError('Cannot pass both `auth` and `user`')\n user = user or auth.user\n\n return (\n user and ((self.has_permission(user, WRITE) and self.has_submitted_preprint) or self.has_permission(user, ADMIN))\n )", "title": "" }, { "docid": "7c38d86faa6e79f112dc065cb36e18bf", "score": "0.5760112", "text": "def test_user_can_edit_obj_false_without_permission(self):\n self.assertFalse(self.permission_helper.user_can_edit_obj(self.user, self.form))", "title": "" }, { "docid": "15ceed6152c7998d6409c74a5ccb7dc6", "score": "0.5740985", "text": "def has_permission(self, request, view):\n return request.user", "title": "" }, { "docid": "8353271c928243d323e78f0988cf1127", "score": "0.57408154", "text": "def has_permission(self, request, view):\n return view.action in ('list', 'retrieve') or request.user.is_authenticated", "title": "" }, { "docid": "42ecbaf86989e1116f60ef8c4db54fea", "score": "0.5739328", "text": "def get_editable_privilege(self, user):\n return user.get('role') in ('admin', 'manager', 'engineer')", "title": "" }, { "docid": "df6c9bbe269a3d9a01114c5e14bfd91a", "score": "0.573695", "text": "async def check_allowed_role(self, ctx, role):\n guild_roles = ctx.guild.roles\n me = ctx.guild.me\n role_str = role[3:-1]\n role_index = math.inf\n for i in range(len(guild_roles)):\n if role_str == str(guild_roles[i].id):\n role_index = i\n break\n highest_role_index = guild_roles.index(me.top_role)\n return highest_role_index >= role_index", "title": "" }, { "docid": "120c4a1290d15fd92d3a74666034367c", "score": "0.5728156", "text": "def checkTimelineAllowsTaskEditing(self):\n if not self.timelineAllowsTaskEditing():\n raise exception.Forbidden(message=access_checker.DEF_PAGE_INACTIVE)", "title": "" }, { "docid": "87dc6582e3b3a0ec540d47c0f5aee082", "score": "0.5710522", "text": "def check_project_role(self,conn):\n query = \"SELECT * FROM project_role where projectID = \" + str(self.project_id) + \" AND ID = \" + str(self.role_invite) + \" ;\"\n result = conn.execute(query)\n if result.rowcount > 0:\n return True\n return False", "title": "" }, { "docid": "c3035f025272e49b86e8848de9a324ed", "score": "0.57057935", "text": "def canApplyNonStudent(self, role, edit_url):\n self.isLoggedIn()\n\n if self.data.profile and not self.data.profile.student_info:\n raise RedirectRequest(edit_url)\n\n if not self.data.profile:\n return\n\n raise AccessViolation(DEF_ALREADY_PARTICIPATING_AS_STUDENT_MSG % (\n role, self.data.program.name))", "title": "" }, { "docid": "733a478766ac7a97bd7ef4d9eb28b830", "score": "0.5679277", "text": "def check_role(self, identifiers, role_s, logical_operator):", "title": "" }, { "docid": "db7f3a94c0c21a29f0699761457a90c6", "score": "0.56792504", "text": "def test_role (self):\n self._test_typed(self.create_role())", "title": "" }, { "docid": "9f85bf5fd768266abdfd633ab6bfe2ab", "score": "0.56607956", "text": "def test_user_can_create(cursor, make_user_roles, getfcn):\n newfcn, obj_type = getfcn\n info = make_user_roles('create', obj_type, True)\n authid = info['user']['auth0_id']\n newfcn(org=info['org'])\n cursor.execute('SELECT user_can_create(%s, %s)',\n (authid, obj_type))\n assert cursor.fetchone()[0] == 1", "title": "" }, { "docid": "4a1cfb70d851111e1c2c524100a26347", "score": "0.5660615", "text": "def _is_task_visible(context, task):\n # Is admin == task visible\n if context.is_admin:\n return True\n\n # No owner == task visible\n if task['owner'] is None:\n return True\n\n # Perform tests based on whether we have an owner\n if context.owner is not None:\n if context.owner == task['owner']:\n return True\n\n return False", "title": "" }, { "docid": "6869ac5e72de36575644a21eba572de9", "score": "0.565413", "text": "def has_permission(self):\n return self.request.user.first_name == 'Bob'", "title": "" }, { "docid": "e83720a4090fe97744f6b8fc3cd233fd", "score": "0.56504536", "text": "def test_create_role(self):\n pass", "title": "" }, { "docid": "37aaca365f8a5dbe99c0ab6e8e095a07", "score": "0.56453943", "text": "def test_user_can_edit_obj_true(self):\n self.user.user_permissions.add(self.change_permission)\n self.assertTrue(self.permission_helper.user_can_edit_obj(self.user, self.form))", "title": "" }, { "docid": "f0e30730900f7c80f96c0889b6937cf3", "score": "0.5643977", "text": "async def can_use(self,\n ctx: commands.Context) -> bool:\n\n def _(perm: str):\n roles = [r for r in self.bot.config[\"roles\"].items() if perm in r[1]]\n \n for role, actions in roles:\n if role == \"everyone\" and perm in actions:\n return True\n\n if role in (r.id for r in ctx.author.roles) and perm in actions:\n return True\n\n ctx.can_use = _\n return True", "title": "" }, { "docid": "39adca8b42fd6470d08317ace4604cdc", "score": "0.5634641", "text": "def test_set_role(self):\n pass", "title": "" }, { "docid": "5d8e3b7501144b34a1acc436e7e2cb92", "score": "0.5634189", "text": "def has_role(self, user, role):\r\n roles = self.get_roles(user)\r\n if role in roles:\r\n return True\r\n return False", "title": "" }, { "docid": "f8cdcb8c8579c5c12158960a1044b9c5", "score": "0.5628957", "text": "def test_not_admin_create_role(self):\n warnings.simplefilter(\"ignore\")\n request_token = self.client().get('/api/login/',\n headers={'Content-Type': 'application/json',\n 'Authorization': _basic_auth_str(self.user_not_admin.get('username'),\n self.user_not_admin.get('password'))})\n json_data = json.loads(request_token.data)\n self.user_not_admin['token'] = json_data.get('token')\n json_role = {'name': string_generator()+'role_test_name', 'comment': string_generator()+'role_test_comment'}\n request_role_not_admin_create = self.client().post('/api/role/', headers={'Content-Type': 'application/json',\n 'x-access-token': self.user_not_admin['token']},\n data=json.dumps(json_role))\n json_data = json.loads(request_role_not_admin_create.data)\n self.assertTrue(json_data.get('message'))\n self.assertEqual(json_data.get('message'), 'Cannot perform that function!')\n self.assertEqual(request_role_not_admin_create.status_code, 200)", "title": "" }, { "docid": "c1805ec7b2de99f4d8ff8c96c53c2fec", "score": "0.56196016", "text": "async def permits(self, identity: str, permission: UserRole, context=None):\n log.debug(\"context: %s\", context)\n\n if identity is None or permission is None:\n return False\n\n async with self.engine.acquire() as conn:\n query = users.select().where(\n sa.and_(users.c.email == identity,\n users.c.status != UserStatus.BANNED)\n )\n ret = await conn.execute(query)\n user = await ret.fetchone()\n if user is not None:\n return permission <= user['role']\n return False", "title": "" }, { "docid": "a1572aeae84c21fde797f4c40bbc0f48", "score": "0.56117076", "text": "def participant(self):\n log = self._params.get('log', self._discard)\n context = self._context_build(pending=True)\n conf = self._config_pending\n\n if conf.get('control') == 'off':\n log.debug(\"Excluding task '%s' -- control is off\", self._name)\n return False\n\n # If role-set is None (but not the empty set)\n # then role processing is inhibited.\n #\n active_roles = self._legion.get_roles()\n if active_roles is None:\n log.debug(\"Including task '%s' -- role processing is inhibited\", self._name)\n return True\n\n # If roles are present, at least one has to match the role-set.\n # If none are present, the task is always included.\n #\n roles = self._get_list(conf.get('roles'), context=context)\n\n # If a task has no roles listed, then it particpates\n # in all roles:\n #\n if not roles:\n log.debug(\"Including task '%s' -- no explicit roles\", self._name)\n return True\n\n for role in roles:\n if role in active_roles:\n log.debug(\"Including task '%s' -- has role '%s'\", self._name, role)\n return True\n log.debug(\"Excluding task %r -- no role matches %s\", self._name, active_roles)\n return False", "title": "" }, { "docid": "0b7c25b6170009ee6d49a2a3098591ff", "score": "0.56095564", "text": "def test_roles_and_permissions(self):\n u1 = User(email='[email protected]', password='cat')\n u2 = User(email='[email protected]', password='1234')\n # test normal user\n self.assertTrue(u1.can(Permission.WRITE_ARTICLES))\n self.assertFalse(u1.can(Permission.MODERATE_COMMENTS))\n # test the admin user\n self.assertTrue(u2.can(Permission.WRITE_ARTICLES))\n self.assertTrue(u2.can(Permission.MODERATE_COMMENTS))", "title": "" }, { "docid": "b9909f6474e67a6e7ee767adc16e4657", "score": "0.56042427", "text": "def test_can_user_perform_action_multiple_permissions(\n cursor, make_user_roles, new_permission, new_forecast):\n info = make_user_roles('read', 'forecasts', False)\n org = info['org']\n fx = new_forecast(org=org)\n perm = new_permission('read', 'forecasts', False, org=org)\n cursor.execute(\n 'INSERT INTO role_permission_mapping (role_id, permission_id) VALUES '\n '(%s, %s)', (info['role']['id'], perm['id']))\n cursor.executemany(\n 'INSERT INTO permission_object_mapping (permission_id, object_id)'\n ' VALUES (%s, %s)', [(perm['id'], fx['id']),\n (info['permission']['id'], fx['id'])])\n cursor.execute('SELECT can_user_perform_action(%s, %s, %s)',\n (info['user']['auth0_id'], fx['id'], 'read'))\n assert cursor.fetchone()[0] == 1", "title": "" }, { "docid": "f1b5a3e6a5e38d2cafcfc79990f7efaa", "score": "0.56015617", "text": "def IsAllowed(self, action, unused_user):\n if action in (crud_model.Actions.READ, crud_model.Actions.QUERY):\n return True\n return users.is_current_user_admin()", "title": "" }, { "docid": "3137fe86669e7879c6faa1b56f4710f0", "score": "0.55967635", "text": "def has_admin(self):\n return self.has_role(self.me(), 'admin')", "title": "" }, { "docid": "841fe7c6e39b35b8931d30050877e380", "score": "0.55778354", "text": "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "981a18de9290c4d7b01bfea4cd554bfb", "score": "0.55765575", "text": "def has_role(self, user, role):\n roles = self.get_roles(user)\n if role in roles:\n return True\n return False", "title": "" }, { "docid": "56feb981880c9f24f72145b069c1e0d3", "score": "0.5566783", "text": "def create_with_create_view_privilege_granted_directly_or_via_role(self, node=None):\n user_name = f\"user_{getuid()}\"\n role_name = f\"role_{getuid()}\"\n\n if node is None:\n node = self.context.node\n with user(node, f\"{user_name}\"):\n Scenario(test=create_with_create_view_privilege,\n name=\"create with create view privilege granted directly\")(grant_target_name=user_name, user_name=user_name)\n\n with user(node, f\"{user_name}\"), role(node, f\"{role_name}\"):\n with When(\"I grant the role to the user\"):\n node.query(f\"GRANT {role_name} TO {user_name}\")\n Scenario(test=create_with_create_view_privilege,\n name=\"create with create view privilege granted through a role\")(grant_target_name=role_name, user_name=user_name)", "title": "" }, { "docid": "a8614040060dbb74d3f8e94cd408b9d0", "score": "0.55229455", "text": "def _check_perm(self):\n for perm in self.require_permissions:\n if not self.request.user.has_perm(perm):\n return False\n allow = True\n if self.require_permissions_or:\n allow = False\n for perm in self.require_permissions_or:\n if self.request.user.has_perm(perm):\n allow = True\n return allow", "title": "" }, { "docid": "63195e701ca0857ee338f980180f691b", "score": "0.55165917", "text": "def is_allowed_to(self, user, right):\n try:\n perm = Permission.objects.get(\n organization=self,\n user=user)\n except Permission.DoesNotExist:\n return False\n else:\n return getattr(perm, right)", "title": "" }, { "docid": "87b24b95d8ddc02f64cc20a0ebd91b5e", "score": "0.55109304", "text": "def has_permission(self, request, view):\n if request.user.is_authenticated and hasattr(request.user, 'profile'):\n return True\n return False", "title": "" }, { "docid": "9b367cc026cbb4d89dd90bd2ab8f138a", "score": "0.55106694", "text": "def has_permission(self):\n try:\n MiembroProyecto.objects.get(user=self.request.user, proyecto__pk=self.kwargs[self.proyecto_param])\n return True\n except MiembroProyecto.DoesNotExist:\n return False", "title": "" }, { "docid": "23b384027c53c5b2a63c73d555ba9005", "score": "0.55001765", "text": "def validate(self, attrs):\n request = self.context.get(\"request\")\n user = getattr(request, \"user\", None)\n role = attrs.get(\"role\")\n\n # Update\n if self.instance:\n can_set_role_to = self.instance.get_abilities(user)[\"set_role_to\"]\n\n if role and role not in can_set_role_to:\n message = (\n _(\n f\"You are only allowed to set role to {', '.join(can_set_role_to)}\"\n )\n if can_set_role_to\n else _(\"You are not allowed to set this role for this course.\")\n )\n raise exceptions.PermissionDenied(message)\n\n # Create\n else:\n try:\n organization_id = self.context[\"organization_id\"]\n except KeyError as exc:\n raise exceptions.ValidationError(\n _(\n \"You must set a organization ID in context to create a new \"\n \"organization access.\"\n )\n ) from exc\n\n if not models.OrganizationAccess.objects.filter(\n organization=organization_id,\n user=user,\n role__in=[enums.OWNER, enums.ADMIN],\n ).exists():\n raise exceptions.PermissionDenied(\n _(\"You are not allowed to manage accesses for this organization.\")\n )\n\n if (\n role == enums.OWNER\n and not models.OrganizationAccess.objects.filter(\n organization=organization_id,\n user=user,\n role=enums.OWNER,\n ).exists()\n ):\n raise exceptions.PermissionDenied(\n \"Only owners of an organization can assign other users as owners.\"\n )\n\n attrs[\"organization_id\"] = self.context[\"organization_id\"]\n return attrs", "title": "" }, { "docid": "8ed22651820a6f600f1d647127b54483", "score": "0.54945415", "text": "def can_update(self, org):\n user = account.get_current_user()\n return user and (user.is_admin() or org.is_member(user))", "title": "" }, { "docid": "0f683512174698e3ea97070b34890acf", "score": "0.54925346", "text": "def user_has_access(self, user):\n if not user: return False\n query = db.Query(TaskListMember)\n query.filter('task_list =', self)\n query.filter('user =', user)\n return query.get()", "title": "" }, { "docid": "2eaf6d6eab5745586cfe2d561dc5c5d3", "score": "0.5491366", "text": "def test_changing_role_of_none_existing_user(self):\n CommonTestCases.admin_token_assert_in(\n self,\n change_role_of_non_existing_user_mutation,\n \"User not found\"\n )", "title": "" }, { "docid": "c67b346a084d143ecce6419a8fa3d992", "score": "0.5483716", "text": "def test_permissions(self):\n\n user = self.activity.organizer.user\n self.assertTrue(user.has_perm('activities.change_activity', self.activity))", "title": "" }, { "docid": "f877c711863c5e4506c9d3139f2c2615", "score": "0.5481929", "text": "def has_permission(self, request, view):\n return True", "title": "" }, { "docid": "f877c711863c5e4506c9d3139f2c2615", "score": "0.5481929", "text": "def has_permission(self, request, view):\n return True", "title": "" }, { "docid": "57c8aa8a8fff50bfccd3e6cc40a3a4e9", "score": "0.5479093", "text": "def test_has_perm_thread_edit(self):\n user = UserFactory()\n self.group.user_set.add(user)\n self.assertTrue(has_perm(user, \"forums.edit_forum_thread\", self.forum_1))\n self.assertFalse(has_perm(user, \"forums.edit_forum_thread\", self.forum_2))", "title": "" }, { "docid": "d5e693f6dc25e60f60603abc051b93ae", "score": "0.547332", "text": "def isTaskVisible(self):\n assert access_checker.isSet(self.data.task)\n\n # TODO(nathaniel): Yep, this is weird.\n can_edit = False\n try:\n self.checkCanUserEditTask()\n self.checkHasTaskEditableStatus()\n self.checkTimelineAllowsTaskEditing()\n can_edit = True\n except exception.UserError:\n pass\n\n if not self.data.timeline.tasksPubliclyVisible():\n if can_edit:\n return False\n period = self.data.timeline.tasksPubliclyVisibleOn()\n raise exception.Forbidden(\n message=access_checker.DEF_PAGE_INACTIVE_BEFORE % period)\n\n if not self.data.task.isAvailable():\n if can_edit:\n return False\n raise exception.Forbidden(message=access_checker.DEF_PAGE_INACTIVE)\n\n return True", "title": "" }, { "docid": "4595ee7cf32ec36188fd28ce5993e9cd", "score": "0.5467968", "text": "def checkAccess(self):\n self.check.isOrgAdmin()", "title": "" }, { "docid": "4d9f81bfd5d19764469da8b1f0580246", "score": "0.54677916", "text": "def test_permissions(self):\n\n user = self.calendar.activity.organizer.user\n\n self.assertTrue(user.has_perm('activities.change_calendar', self.calendar))\n self.assertTrue(user.has_perm('activities.delete_calendar', self.calendar))", "title": "" }, { "docid": "4f60f073253e039830bdfe2f3ed1abf9", "score": "0.5465252", "text": "def has_object_permission(self, request, view, obj):\n\n #si es superadmin o el usuario autenticado intentahacer GET, PUT o DELETE sobre su mismo perfil\n return request.user.is_superuser or request.user == obj", "title": "" }, { "docid": "0b47e859c629b4d24e049cda075e68a6", "score": "0.5461056", "text": "def mr_Perm():\n async def predicate(ctx):\n return ctx.guild is not None and ctx.author.guild_permissions.manage_roles and ctx.me.guild_permissions.manage_roles\n return commands.check(predicate)", "title": "" }, { "docid": "f54c35b6287f984cef4555f7bdefff89", "score": "0.5450945", "text": "def _check_task(self, task_list_dict):\n if task_list_dict is None:\n return False\n\n for field in self.man_fields:\n if field not in task_list_dict:\n return False\n\n return True", "title": "" }, { "docid": "b2abea28ce96097b2816798b9ee3cfae", "score": "0.5450063", "text": "def allow_action(self, roles, action, item):\n # SM: Note that calling get_item_actions will emit a query.\n item_actions = self.get_item_actions(action, item)\n\n if not item_actions:\n return action.model == 'restrict'\n ret_val = False\n # For DATASET_ACCESS only, user must have ALL associated roles\n if action == self.permitted_actions.DATASET_ACCESS:\n for item_action in item_actions:\n if item_action.role not in roles:\n break\n else:\n ret_val = True\n # For remaining actions, user must have any associated role\n else:\n for item_action in item_actions:\n if item_action.role in roles:\n ret_val = True\n break\n return ret_val", "title": "" }, { "docid": "c18696294563b4343dffc9b96a2e6c0b", "score": "0.5448278", "text": "def is_eligible_for(self, task_type: str) -> bool:\n assert task_type, 'Empty parameter `task_type` passed'\n\n return self.admin \\\n or task_type in chain(*(g.allowed_types for g in self.groups))", "title": "" }, { "docid": "25139f0d1c32202715647951d52cd78e", "score": "0.54458225", "text": "def default_can_edit(request, *args, **kwargs):\n return request.user.is_staff or request.user.is_superuser", "title": "" }, { "docid": "1c5d5eadfae5ec0523da4f3eeea9d5cc", "score": "0.5445631", "text": "def has_object_permission(self, request, view, obj):\n return view.action == 'retrieve' or obj.owner == request.user or request.user.is_superuser", "title": "" }, { "docid": "0d8c7d2238f2f655b30fc67be708efc0", "score": "0.54446983", "text": "def test_grant_user_role(self):\n with mock.patch(\n 'tower_cli.resources.role.Resource.role_write') as mock_write:\n kwargs = dict(user=1, type='read', project=3)\n self.res.grant(**kwargs)\n mock_write.assert_called_once_with(fail_on_found=False, **kwargs)", "title": "" }, { "docid": "4a47315f5e87027b3f83c960cbd8cd22", "score": "0.5444494", "text": "def allow_action( self, roles, action, item ):\n # SM: Note that calling get_item_actions will emit a query.\n item_actions = self.get_item_actions( action, item )\n\n if not item_actions:\n return action.model == 'restrict'\n ret_val = False\n # For DATASET_ACCESS only, user must have ALL associated roles\n if action == self.permitted_actions.DATASET_ACCESS:\n for item_action in item_actions:\n if item_action.role not in roles:\n break\n else:\n ret_val = True\n # For remaining actions, user must have any associated role\n else:\n for item_action in item_actions:\n if item_action.role in roles:\n ret_val = True\n break\n return ret_val", "title": "" }, { "docid": "044b6f099c96f3c82ac7af50ae9a57c7", "score": "0.5436252", "text": "def test_can_user_perform_action(cursor, make_user_roles, action, getfcn,\n other):\n newfcn, obj_type = getfcn\n info = make_user_roles(action, obj_type, True)\n authid = info['user']['auth0_id']\n obj = newfcn(org=info['org'])\n cursor.execute('SELECT can_user_perform_action(%s, %s, %s)',\n (authid, obj['id'], other))\n if action == other:\n assert cursor.fetchone()[0] == 1\n else:\n assert cursor.fetchone()[0] == 0", "title": "" }, { "docid": "67e9b48dd1d882e62d40776138b787d7", "score": "0.543467", "text": "def test_func(self):\n return not self.request.user.is_authenticated", "title": "" }, { "docid": "bbfd398f945c77be68bdf5ef0770f716", "score": "0.54344684", "text": "def check_permission(self, user_id, task_id):\n try:\n return task_storage.check_permission(user_id=user_id, task_id=task_id)\n\n except ObjectNotFound:\n logger.error(errs.TaskNotExistError().name)\n raise errs.TaskNotExistError()", "title": "" }, { "docid": "e50c47d453368d207e9d88ea8845471d", "score": "0.54328007", "text": "def has_role(self, identifiers, role_s):", "title": "" }, { "docid": "3f0734df809ff1328038fd1a3a3774e0", "score": "0.54320014", "text": "def validate(self, attrs):\n request = self.context.get(\"request\")\n user = getattr(request, \"user\", None)\n role = attrs.get(\"role\")\n\n # Update\n if self.instance:\n can_set_role_to = self.instance.get_abilities(user)[\"set_role_to\"]\n\n if role and role not in can_set_role_to:\n message = (\n _(\n f\"You are only allowed to set role to {', '.join(can_set_role_to)}\"\n )\n if can_set_role_to\n else _(\"You are not allowed to set this role for this course.\")\n )\n raise exceptions.PermissionDenied(message)\n\n # Create\n else:\n try:\n course_id = self.context[\"course_id\"]\n except KeyError as exc:\n raise exceptions.ValidationError(\n _(\n \"You must set a course ID in context to create a new course access.\"\n )\n ) from exc\n\n if not models.CourseAccess.objects.filter(\n course=course_id,\n user=user,\n role__in=[enums.OWNER, enums.ADMIN],\n ).exists():\n raise exceptions.PermissionDenied(\n _(\"You are not allowed to manage accesses for this course.\")\n )\n\n if (\n role == enums.OWNER\n and not models.CourseAccess.objects.filter(\n course=course_id,\n user=user,\n role=enums.OWNER,\n ).exists()\n ):\n raise exceptions.PermissionDenied(\n \"Only owners of a course can assign other users as owners.\"\n )\n\n attrs[\"course_id\"] = self.context[\"course_id\"]\n return attrs", "title": "" }, { "docid": "09ef3df548ab44f5c69c07b4dbf64601", "score": "0.5425371", "text": "def test_allowed_if_in_task(self):\n\n @task_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n with sleuth.fake(\"djangae.tasks.decorators.is_in_task\", True):\n response = view(request)\n\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "9697163cad37ea8679dfb03cba98236b", "score": "0.54238665", "text": "def can_edit_ticket(self, req, ticket_or_type):\n decision = False\n if ticket_or_type and req:\n resource = t_type = None\n if isinstance(ticket_or_type, AgiloTicket):\n resource = ticket_or_type.resource\n else:\n t_type = ticket_or_type\n policy = AgiloPolicy(self.env)\n decision = policy.check_ticket_edit(req.authname, resource, \n req.perm, t_type=t_type)\n return decision", "title": "" }, { "docid": "e603e79c1597b316ce478a569ed35d93", "score": "0.5422578", "text": "def test_func(self):\n transaction_to_view = self.get_object()\n try:\n related_to_self = self.request.user.pk == transaction_to_view.member.pk\n except AttributeError: # Transaction need not have a member\n related_to_self = False\n is_staffer = self.request.user.has_permission(\"core.view_all_transactions\")\n return is_staffer or related_to_self", "title": "" }, { "docid": "536824ac2809c6e4675641f925d9ab9a", "score": "0.54200953", "text": "def has_role(self, user, name):\n role = security.datastore.find_or_create_role(name)\n return user.has_role(role)", "title": "" }, { "docid": "d28b49d360237c23c97af2048a2a1104", "score": "0.54194367", "text": "def satisfies_requirement(self, required_roles):\n check_required_roles(required_roles, \"code\")\n for rr in required_roles:\n if not isinstance(self, rr):\n return False\n return True", "title": "" }, { "docid": "1ae701a10bea3b5cf46a54c7986b1103", "score": "0.5417459", "text": "async def permits(\n self,\n identity: str,\n permission: Union[str, tuple],\n context: Optional[dict] = None,\n ) -> bool:\n if identity is None or permission is None:\n log.debug(\n \"Invalid %s of %s. Denying access.\",\n f\"{identity=}\",\n f\"{permission=}\",\n )\n return False\n\n user = await self._get_active_user_with(identity)\n if user:\n role = user.get(\"role\")\n return await check_access(self.access_model, role, permission, context)\n\n return False", "title": "" }, { "docid": "c34dc718b8e7b617a485e5749dc5c69a", "score": "0.5417284", "text": "def is_allowed_to_edit(self, user):\n if self.owner_type == \"user\":\n if self.user == user:\n # The user is the owner of the petition\n return True\n else:\n return False\n else:\n # But it is an org petition\n try:\n perm = Permission.objects.get(\n organization=self.org,\n user=user\n )\n except Permission.DoesNotExist:\n # No such permission, denied\n return False\n else:\n return perm.can_modify_petitions", "title": "" }, { "docid": "c5afa1850502f0c5f6a828ef0715eb9a", "score": "0.5416518", "text": "def test_user_role_list_requires_auth(self):\n # values here don't matter because we should 401 before they're checked\n path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {\n 'tenant_id': uuid.uuid4().hex,\n 'user_id': uuid.uuid4().hex,\n }\n\n r = self.admin_request(path=path, expected_status=401)\n self.assertValidErrorResponse(r)", "title": "" }, { "docid": "6d68b9bc7c92cce7bfd9f11be69002e6", "score": "0.54098105", "text": "def has_permission(user, api_event):\n try:\n db_event = UserCalendarEvent.objects.get(event_id=api_event['id'])\n return user.username == db_event.edx_user or is_staff(\n user, db_event.user_calendar.username)\n except (ObjectDoesNotExist, KeyError) as e:\n log.warn(e)\n return False", "title": "" }, { "docid": "c5ee9062872090629f4525dc6a91b645", "score": "0.5407413", "text": "def user_can_edit_document(user, **kwargs):\n return user.is_staff or user.has_perm('document_catalogue.change_document')", "title": "" }, { "docid": "2dea63c9f2d9c1a42b9f94ba54436f6d", "score": "0.5402014", "text": "def roles_accepted(*roles):\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n perm = Permission(*[RoleNeed(role) for role in roles])\n if not perm.can():\n abort(HTTPStatus.FORBIDDEN)\n return fn(*args, **kwargs)\n return decorated_view\n return wrapper", "title": "" }, { "docid": "6b0c24373469bccbf2c744b4b9970df5", "score": "0.53971523", "text": "def _canAccessRequestEntity(self, entity, user, org):\n # check if the entity is addressed to the current user\n if user.key() != self.data.user.key():\n # check if the current user is an org admin for the organization\n self.isOrgAdmin()", "title": "" } ]
1bde121e390fc36fc99ed0b7fd77c9d2
Add product to the cart and update it's quantity
[ { "docid": "3cb7c65e6952b839dff8153624c77b7c", "score": "0.76845706", "text": "def add_cart(request, product_id):\n product = Product.objects.get(id=product_id)\n \n try:\n cart = Cart.objects.get(cart_id=__cart_id(request))\n except Cart.DoesNotExist:\n cart = Cart.objects.create(cart_id=__cart_id(request))\n cart.save()\n \n try:\n cart_item = CartItem.objects.get(product=product, cart=cart)\n \n if cart_item.quantity < cart_item.product.stock:\n cart_item.quantity += 1\n \n cart_item.save()\n except CartItem.DoesNotExist:\n cart_item = CartItem.objects.create(product=product, quantity=1,cart=cart)\n cart_item.save()\n \n return redirect('shop')", "title": "" } ]
[ { "docid": "038040f6e0eb53f4d3d4004bd145c921", "score": "0.862083", "text": "def add(self, product, quantity=1, update_quantity=False): # get product,quantity,update_quantity from cart/views.py\n product_id = str(product.id) # if the item didn't in the cart, initialize it\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'price': str(product.price)}\n if update_quantity: # if it should be updated,we set specific product quantity\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "2919b89cc1ec9a2e57ee3aefc5e9234e", "score": "0.86157006", "text": "def add_to_cart(self, product, quantity=1):\n\n if product in self.current_products:\n self.current_products[product]['quantity'] += quantity\n else:\n self.current_products[product] = {\n 'quantity': quantity\n }", "title": "" }, { "docid": "1592fbec8cf55a7e3f57b6079c1b7f5c", "score": "0.8459186", "text": "def add(self, product, quantity=1, initial_update_quantity=False):\n product_id = str(product.id)\n if product.id not in self.cart:\n self.cart[product_id] = {\n 'quantity': 0,\n 'price': str(product.price)\n }\n\n if initial_update_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n\n self.save()", "title": "" }, { "docid": "df85cdf2ef42a2be978629686fcb5e08", "score": "0.8425215", "text": "def add(self, product, quantity=1):\n product_id = str(product.id)\n if product_id not in self.cart:\n price = product.discount_price if product.discount_price else product.price\n self.cart[product_id] = {'quantity': 0, 'price': str(price)}\n self.cart[product_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "c26a50093c8fb0ec1bbb94273cd63899", "score": "0.82972884", "text": "def add(self, product, quantity=1, override_quantity=False):\n product_id = str(product.id)\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'price': str(product.price)}\n if override_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "f3607fa13e648ff8650408d622414165", "score": "0.82525384", "text": "def add_to_cart(self, request, pk=None):\n cart = self.get_object()\n try:\n product = Product.objects.get(pk=request.data[\"product_id\"])\n quantity = int(request.data[\"quantity\"])\n except Exception as e:\n return Response(e, status=status.HTTP_404_NOT_FOUND)\n\n existing_cart_item = CartItem.objects.filter(cart=cart, product=product).first()\n # before creating a new cart item check if it is in the cart already\n # and if yes increase the quantity of that item\n if existing_cart_item:\n existing_cart_item.quantity += quantity\n existing_cart_item.save()\n else:\n new_cart_item = CartItem(cart=cart, product=product, quantity=quantity)\n new_cart_item.save()\n\n # return the updated cart to indicate success\n serializer = CartSerializer(cart)\n return Response(serializer.data)", "title": "" }, { "docid": "f256196600682af1956991ce8cbf796e", "score": "0.82387084", "text": "def add(self, product, quantity=1, override_quantity=False):\n cart_item = self.cart.items.filter(product = product).first()\n if not cart_item:\n cart_item = CartItem.objects.create(cart = self.cart, product = product, price = product.price, quantity = quantity)\n else:\n if override_quantity:\n cart_item.quantity = quantity\n else:\n cart_item.quantity += quantity\n cart_item.save()", "title": "" }, { "docid": "1dc0b98a2698d1d50bc3e16738a723e5", "score": "0.8228277", "text": "def add(self, product, qty):\n product_id = str(product.id) # has to be a string to check session data\n\n if product_id in self.cart:\n self.cart[product_id][\"qty\"] = qty # if product already in the basket, just update quantity\n else:\n self.cart[product_id] = {\"price\": str(product.price), \"qty\": int(qty)} # else add item to cart and save it\n\n # self.session.modified = True # tells django explicitly that the session has been modified\n self.save() # does the same thing, but through a function for less repeating", "title": "" }, { "docid": "0428a3b59cc3a09a5ce8964f5a6cbf51", "score": "0.81954014", "text": "def add(self, product, quantity=1, override_quantity=False):\n# ----------------------------------------------------------\n# You use the product ID as a key in the cart's content dictionary. You convert the\n# product ID into a string because Django uses JSON to serialize session data, and\n# JSON only allows string key names. The product ID is the key, and the value\n# that you persist is a dictionary with quantity and price figures for the product\n product_id = str(product.id)\n# The product's price is converted from decimal into a string in order to serialize it.\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'price': str(product.price)}\n if override_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "e97c332db10b5e0a4b3c4a27ee2200c7", "score": "0.81535506", "text": "def add_to_cart(self, request):\n cart = Cart.objects.filter(user=request.user).first()\n if cart is None:\n cart = Cart(user=request.user)\n cart.save()\n\n try:\n product = Product.objects.get(\n pk=request.data['product_id']\n )\n quantity = int(request.data['quantity'])\n except Exception as e:\n return Response({'status': 'fail'})\n\n # Disallow adding to cart if available inventory is not enough\n if product.stock <= 0 or product.stock - quantity < 0:\n print (\"There is no more product available\")\n return Response({'status': 'fail'})\n\n existing_cart_item = CartItem.objects.filter(cart=cart,product=product).first()\n # before creating a new cart item check if it is in the cart already\n # and if yes increase the quantity of that item\n if existing_cart_item:\n existing_cart_item.quantity += quantity\n existing_cart_item.save()\n else:\n new_cart_item = CartItem(cart=cart, product=product, quantity=quantity)\n new_cart_item.save()\n \n # return the updated cart to indicate success\n cartItem = CartItem.objects.filter(cart=cart)\n serializer = CartItemSerializer(cartItem, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "b47370ff4dc0dc3b265cc68f48fa71b6", "score": "0.7969012", "text": "def update(self, product, qty):\n product_id = str(product)\n if product_id in self.cart:\n self.cart[product_id]['qty'] = qty\n self.save()", "title": "" }, { "docid": "65bfc1433544c06ad27d8d690c3f3b84", "score": "0.7946699", "text": "def add(self,produto,quantidade=1,update_quantidade = False):\n produto_id = str(produto.id)\n if produto_id not in self.cart:\n self.cart[produto_id] = {'quantidade':0,'preco':str(produto.preco)}\n if update_quantidade:\n self.cart[produto_id]['quantidade'] = quantidade\n else:\n self.cart[produto_id]['quantidade'] += quantidade\n self.save()", "title": "" }, { "docid": "95df1578d02585c14993e812ae1c1f5a", "score": "0.79306847", "text": "def add_to_cart(self, product):\n # your code goes here!", "title": "" }, { "docid": "36dcec323e8ea2336eb26f902c4fd2a6", "score": "0.7894479", "text": "def add(self, product, quantity):\n product_id = str(product.id)\n if product_id in self.basket:\n self.basket[product_id]['quantity'] = quantity\n else:\n self.basket[product_id] = {'price': float(\n product.regular_price), 'quantity': int(quantity)}\n self.session.modified = True", "title": "" }, { "docid": "a079e96298bbd9deacf08a5ef162ba19", "score": "0.78733397", "text": "def add_product_quantity(self, name, quantity):\n\n\t\tif self.is_product_available(name):\n\t\t\ttemp_product = list(filter(lambda x: x.name == name, self.get_product_list()))\n\t\t\tself.cursor.execute(\"UPDATE `introse`.`inventory` SET `quantity`='\" + str((temp_product[0].quantity + quantity)) + \"', `lastupdated`='\" + str(datetime.datetime.now()) + \"' WHERE `productName`='\" + str(name) + \"';\")\n\t\t\tself.connect.begin()\n\t\telse:\n\t\t\tprint('product does not exist!')", "title": "" }, { "docid": "035ae599350a6858c7e9455e61c68a79", "score": "0.78083444", "text": "def add_to_cart(self, item_name, quantity):\r\n try:\r\n if item_name in self.cart:\r\n self.cart[item_name] += quantity\r\n else:\r\n self.cart[item_name] = quantity\r\n except:\r\n print \"Oops, something went wrong picking that item :(\"", "title": "" }, { "docid": "b2df013819d5138764c096c1ee831496", "score": "0.77977604", "text": "def update_product_quantity(self):\n #get of the product\n product = self.database.get_product_by_id(id=self.product[0])\n #get of the quantity to add\n add_quantity = int(self.ids.add_quantity.text)\n #checking if the add quantity is less than the product quantity\n if ( product[4] + add_quantity >= 0):\n #update of the quantity in the database\n self.database.update_product_quantity(id=product[0], increment=add_quantity)\n #get of the current modified product\n product = self.database.get_product_by_id(id=self.product[0])\n #change of the product quantity with the new quantity on the screen (detail screen)\n self.ids.product_quantity.text = str(product[4])\n #reset of the add quantity\n self.ids.add_quantity.text = '0'\n print(product)", "title": "" }, { "docid": "d51d0a92011c3ed58fb562aa6ffd3d37", "score": "0.76875263", "text": "def add_to_cart(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get(\"quantity\"))\n redirect_url = request.POST.get(\"redirect_url\")\n size = None\n if \"product_size\" in request.POST:\n size = request.POST[\"product_size\"]\n cart = request.session.get(\"cart\", {})\n\n if size:\n if item_id in list(cart.keys()):\n if size in cart[item_id][\"items_by_size\"].keys():\n cart[item_id][\"items_by_size\"][size] += quantity\n messages.success(\n request,\n f'You updated size {size.upper()} {product.name} quantity to {cart[item_id][\"items_by_size\"][size]}',\n )\n else:\n cart[item_id][\"items_by_size\"][size] = quantity\n messages.success(\n request,\n f\"You added size {size.upper()} {product.name} to your shopping cart\",\n )\n else:\n cart[item_id] = {\"items_by_size\": {size: quantity}}\n messages.success(\n request,\n f\"You added size {size.upper()} {product.name} to your shopping cart\",\n )\n else:\n if item_id in list(cart.keys()):\n cart[item_id] += quantity\n messages.success(\n request, f\"You updated {product.name} quantity to {cart[item_id]}\"\n )\n else:\n cart[item_id] = quantity\n messages.success(\n request, f\"You added {product.name} to your shopping cart.\"\n )\n\n request.session[\"cart\"] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "99dc49aecbc4af076fe925a50453234a", "score": "0.76856756", "text": "def add_item_to_cart(request, product_id):\n # get cart session if avaialble, initiate otherwise\n qty = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n cart = request.session.get('cart', {})\n\n if qty > 10:\n messages.error(request, 'Sorry big spender, only 10 '\n 'items per person :)')\n return redirect(redirect_url)\n elif qty < 1:\n messages.error(request, 'Please make sure you are '\n 'not adding 0 or negative amount.')\n return redirect(redirect_url)\n\n # Check if product is already in the cart\n if product_id in list(cart.keys()):\n # If in the cart, increase quantity\n cart[product_id] += qty\n messages.info(request, ' ')\n else:\n # Add to the cart if not in it already\n cart[product_id] = qty\n messages.info(request, ' ')\n\n # assign values to cart\n request.session['cart'] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "ce5d604d9f515ca3017705465fd23e1a", "score": "0.767833", "text": "def update(self, product_id, product_qty):\n product_id = str(product_id)\n if product_id in self.basket:\n self.basket[product_id]['quantity'] = int(product_qty)\n self.session.modified = True", "title": "" }, { "docid": "c122d0ee67ad10c116ee9e3f357b39ed", "score": "0.7577501", "text": "def add_product_to_cart(self, product: 'Product', cart: 'Cart', quantity: int = 1) -> None:\n product_data = {\n 'data': {\n 'quantity': quantity,\n 'type': 'cart_item',\n 'id': product.id,\n },\n }\n self._session.post(\n f'{self._url}/{cart.reference}/items',\n json=product_data,\n )", "title": "" }, { "docid": "900ecd197c528f357614f360a8ea7065", "score": "0.75427", "text": "def add_product_to_cart():\n print(\"Work is in progress\")\n CustomerActions.customer_action()", "title": "" }, { "docid": "0d6cd3c57b29abf0dd5e8952719e3114", "score": "0.75386655", "text": "def add_product_to_cart(self):\n sleep(1)\n cart_total = self.methods.presence_of_element_located(self.ADDED_TO_CART).text\n self.methods.wait_for_element(self.ADD_TO_CART).click()\n if self.methods.element_exists(self.ADDED_TO_CART):\n try:\n sleep(1)\n successfully_added = self.methods.presence_of_element_located(self.ADDED_TO_CART).text\n assert cart_total != successfully_added # change assertion if any bug appears\n except AssertionError:\n self.add_product_to_cart()\n else:\n self.add_product_to_cart()", "title": "" }, { "docid": "3d73e2969dfa877117c32da71ce7889c", "score": "0.7536604", "text": "def add_to_cart(request, item_id):\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n number = None\n\n if 'product_size' or 'product_number' in request.POST:\n size = request.POST.get('product_size', None)\n number = request.POST.get('product_number', None)\n\n cart = request.session.get('cart', {})\n\n if size:\n if item_id in list(cart.keys()):\n\n if size in cart[item_id]['items_by_size'].keys():\n cart[item_id]['items_by_size'][size] += quantity\n messages.success(request,\n f'Update size {size.upper()} {product.name}'\n f' quantity to'\n f'{cart[item_id][\"items_by_size\"][size]}')\n else:\n cart[item_id]['items_by_size'][size] = quantity\n messages.success(request,\n f'Added size {size.upper()} {product.name}'\n f'to the cart!')\n\n else:\n cart[item_id] = {'items_by_size': {size: quantity}}\n messages.success(request,\n f'Added size {size.upper()} {product.name}'\n f'to the cart!')\n\n elif number:\n if item_id in list(cart.keys()):\n\n if number in cart[item_id]['items_by_number'].keys():\n cart[item_id]['items_by_number'][number] += quantity\n messages.success(request,\n f'Update number {number} {product.name}'\n f' quantity to '\n f' {cart[item_id][\"items_by_number\"]}')\n\n else:\n cart[item_id]['items_by_number'][number] = quantity\n messages.success(request,\n f'Added number {number} {product.name}'\n f' to the cart!')\n\n else:\n cart[item_id] = {'items_by_number': {number: quantity}}\n messages.success(request,\n f'Added number {number} {product.name}'\n f' to the cart!')\n\n else:\n if item_id in list(cart.keys()):\n cart[item_id] += quantity\n messages.success(request,\n f'Updated {product.name}'\n f' quantity to {cart[item_id]}')\n\n else:\n cart[item_id] = quantity\n messages.success(request, f'Added {product.name} to the cart!')\n\n request.session['cart'] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "6c266aa97660bf7250035f4412f1b530", "score": "0.75298953", "text": "def increase_quantity_product(product, quantity=1):\n product.amount = F(\"amount\") + quantity\n product.save(update_fields=[\"amount\"])", "title": "" }, { "docid": "39f568cb61aeba2f6e33e5d31049ff75", "score": "0.7515883", "text": "def _add_to_cart(self, variation, quantity):\n field_names = [f.name for f in ProductVariation.option_fields()]\n data = dict(list(zip(field_names, variation.options())))\n data[\"quantity\"] = quantity\n self.client.post(variation.product.get_absolute_url(), data)", "title": "" }, { "docid": "4640e687dabd91ba36502069b945e0a4", "score": "0.7512118", "text": "def set_quantity(self, product, quantity):\n quantity = int(quantity)\n if quantity < 0:\n raise ValueError('Quantity must be positive when updating cart')\n if product in self.products:\n self._items_dict[product.pk].quantity = quantity\n if self._items_dict[product.pk].quantity < 1:\n del self._items_dict[product.pk]\n self.update_session()", "title": "" }, { "docid": "8eb38be62f74d12142336426ef79e593", "score": "0.74917185", "text": "def _add_to_cart(self, variation, quantity):\r\n field_names = [f.name for f in ProductVariation.option_fields()]\r\n data = dict(list(zip(field_names, variation.options())))\r\n data[\"quantity\"] = quantity\r\n self.client.post(variation.product.get_absolute_url(), data)", "title": "" }, { "docid": "8f47953b1933be75d7bcb11df1877838", "score": "0.74521613", "text": "def update(self, id, quantity):\n cart_item = self.get(id)\n\n cart_item.quantity = quantity\n\n return cart_item.save()", "title": "" }, { "docid": "6cff0d2235289ddf0dd421e2ee6cc3f8", "score": "0.7451113", "text": "def set_quantity(self, product, quantity):\n quantity = int(quantity)\n if quantity < 0:\n raise ValueError('Quantity must be positive when updating cart')\n if product in self.products:\n self._items_dict[product.id].quantity = quantity\n if self._items_dict[product.id].quantity < 1:\n del self._items_dict[product.id]\n self.update_session()", "title": "" }, { "docid": "a586e2883967dfdb9da74c9afe923e8e", "score": "0.7445297", "text": "def addStock(self,quantity):\n updateQuantity(self.sellerId,self.productId,quantity)\n self.quantity += quantity", "title": "" }, { "docid": "41df47a003a047532412b91b7dfd15c5", "score": "0.74428105", "text": "def add_cart(request, product_id):\n product = Product.objects.get(id=product_id)\n try:\n cart = Cart.objects.get(cart_id=_cart_id(request))\n except Cart.DoesNotExist: # If there is no cart, we create a new one\n cart = Cart.objects.create(\n cart_id=_cart_id(request)\n )\n cart.save()\n try: # Here we attempt to get cart item information\n cart_item = CartItem.objects.get(product=product, cart=cart)\n if cart_item.quantity < cart_item.product.stock:\n cart_item.quantity += 1\n cart_item.save()\n except CartItem.DoesNotExist: # If the cart item doesn't exist, we create a new one for the session\n cart_item = CartItem.objects.create(\n product=product,\n quantity=1,\n cart=cart,\n )\n cart_item.save()\n return redirect('cart_detail')", "title": "" }, { "docid": "474ab123168f71d20a166a80e89d28b6", "score": "0.74226165", "text": "def add_to_cart(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n cart = request.session.get('cart', {})\n\n if item_id in list(cart.keys()):\n cart[item_id] += quantity\n messages.success(\n request, f'{cart[item_id]} {product.name} added to your cart')\n else:\n cart[item_id] = quantity\n messages.success(request, f'{product.name} added to your cart')\n\n request.session['cart'] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "366e47e98ef05591692a0e237ed85275", "score": "0.74218774", "text": "def add_item_to_bag(request, product_id):\n\n # incase object isn't found\n shop_product = get_object_or_404(Product, pk=product_id)\n qty = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n shopping_bag = request.session.get('bag', {})\n\n if product_id in list(shopping_bag.keys()):\n shopping_bag[product_id] += qty\n messages.success(\n request, f'Updated {shop_product.name} quantity to {shopping_bag[product_id]}.')\n else:\n shopping_bag[product_id] = qty\n messages.success(\n request, f'Added {qty} x {shop_product.name} to your bag.')\n\n request.session['bag'] = shopping_bag\n return redirect(redirect_url)", "title": "" }, { "docid": "b70aa8dd767d952315b893557e89dca8", "score": "0.73943657", "text": "def AddToCart(self):\n units = self.fieldUnits.text()\n productCode = self.fieldCode.text()\n if productCode == '' or productCode == '0':\n showMessage(self, 'Input Error', 'Please select a product')\n return\n if units == '0':\n showMessage(self, 'Input Error', 'Please set the units required')\n return\n if self.crt.isExistingProduct(int(productCode)):\n showMessage(self, 'Input Error', 'Product already Exist in cart!')\n return\n if int(self.crt.getProductDetails(productCode)[4]) < int(units):\n showMessage(self, 'Input Error', units + ' units of product is not available!')\n return\n units = int(units)\n productCode = int(productCode)\n self.crt.newProduct(productCode, units)\n self.clear()\n self.setupCartTable()\n self.updateTotalCost()", "title": "" }, { "docid": "b2b6e3da7ef58056d5088b3ce12890bc", "score": "0.73803973", "text": "def add_product(self, product_name, quantity) : \r\n \r\n if product_name not in self._products :\r\n self._products[product_name] = quantity #add new product and quantity \r\n # { product_name : quantity }\r\n \r\n else :\r\n print(f\"Warning : product {product_name} already exists!\")", "title": "" }, { "docid": "43c8562d8734304d2320a373796025de", "score": "0.7377596", "text": "def update_cart(self, name: str, newqty: int):\r\n print('Updating: {} quantity in cart to {}'.format(name, newqty))\r\n if newqty < 1:\r\n raise Exception(\"Quantity must be greater than 0\")\r\n for item in self.cart:\r\n if item['name'] == name:\r\n # the item is in the cart\r\n item['qty'] = newqty", "title": "" }, { "docid": "1162b3a117f9dcffdda9212cbd3ee39f", "score": "0.7367979", "text": "def update_item(self, product_id, quantity):\n self.items.filter(product_id=product_id).update(quantity=quantity)", "title": "" }, { "docid": "5895bdda71f90206a456af9102b20664", "score": "0.73679525", "text": "def add(self, product, price=None, quantity=1):\n quantity = int(quantity)\n if quantity < 1:\n raise ValueError('Quantity must be at least 1 when adding to cart')\n if product in self.products:\n self._items_dict[product.id].quantity += quantity\n else:\n if price == None:\n raise ValueError('Missing price when adding to cart')\n self._items_dict[product.id] = CartItem(product, quantity, price)\n self.update_session()", "title": "" }, { "docid": "b9f6aea0a48c73ebc40c1bcdb7ca9bb1", "score": "0.7359938", "text": "def add_product_to_cart(self, product_id):\r\n self._cart.append(product_id)", "title": "" }, { "docid": "2b12e75606a47c811f7b9898a7ec32ee", "score": "0.7353209", "text": "def add(self, product, add_ons, removed, price=None, quantity=1):\n quantity = int(quantity)\n if quantity < 1:\n raise ValueError('Quantity must be at least 1 when adding to cart')\n\n if price == None:\n raise ValueError('Missing price when adding to cart')\n self._items_dict[self.item_no] = ModifiedCartItem(product, quantity, price, add_ons, removed, self.item_no)\n self.item_no += 1\n self.update_session()", "title": "" }, { "docid": "316de3d436f2097fbf97b0c83fa3fdff", "score": "0.73445296", "text": "def addtoCart(request):\n if not request.user.is_authenticated:\n return render(request, 'user/login.html')\n\n user_id = request.user.id\n if request.method == 'POST':\n post_dict = request.POST\n product_id = int(post_dict.get(\"product_id\", \"0\"))\n size_type = post_dict.get(\"size_type\", \"small\")\n print(\"size_type\", size_type)\n product = Item.objects.get(id=product_id)\n cart_item, created = CartItem.objects.get_or_create(user_id=user_id, product=product, size=size_type,\n defaults={\n # 'name': product.name,\n # 'desc': product.description,\n # 'price': product.price,\n # 'product_id': product_id,\n 'user_id': user_id,\n # 'pic_address': product.pic_address,\n 'quantity': 0})\n CartItem.objects.filter(id=cart_item.id).update(quantity=F('quantity') + 1)\n\n product.size_set.filter(size_type=size_type).update(stock=F('stock') - 1)\n print('stock_sum', product.size_set.aggregate(Sum('stock')))\n if product.size_set.aggregate(Sum('stock'))['stock__sum'] == 0:\n product.sold_out = True\n product.save()\n\n return redirect('/cart/fetch_user_cart')", "title": "" }, { "docid": "fd122f468668896a73b0de6136d029ea", "score": "0.7337254", "text": "def plus_cart(request):\n\n cart_item = Cart_item.objects.get(pk=request.GET.get('product_id'))\n cart_item.qty += 1\n cart_item.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "title": "" }, { "docid": "12b1c34085c0f1c762dc0d6ef75a1802", "score": "0.73280394", "text": "def add_item(self, product_id, quantity, user):\n if self.items.filter(product_id=product_id):\n item = self.items.get(product_id=product_id)\n item.quantity += quantity\n item.save()\n return item\n\n item = self.items.create(product_id=product_id,\n quantity=quantity,\n updated_by=str(user),\n created_by=str(user))\n\n return item", "title": "" }, { "docid": "b6b38b9e124726f919a8e9b85b897193", "score": "0.730652", "text": "def add_to_cart(self):\n shopping_site = IShoppingSite(self.context)\n form = self.request.form\n add_to_cart = form.pop('form.buttons.AddToCart', None)\n subarticle = form.pop('subarticle', None)\n\n uuid = None\n quantity = '1'\n\n if subarticle is not None:\n\n uuids = subarticle\n parent_uuid = add_to_cart\n\n if not isinstance(uuids, list):\n uuids = [subarticle]\n\n for subarticle_uuid in uuids:\n parent = aq_parent(aq_inner(shopping_site.get_object(UID=subarticle_uuid)))\n if parent_uuid == IUUID(parent):\n uuid = subarticle_uuid\n\n quantity = form.get(parent_uuid)\n\n uuid = uuid or add_to_cart\n\n if uuid is not None:\n\n quantity = form.get('quantity') or form.get(uuid) or quantity\n validate = validation.validatorFor('isInt')\n url = self.context.restrictedTraverse('@@plone_context_state').current_base_url()\n message = None\n\n if quantity is not None and validate(quantity) == 1:\n quantity = int(quantity)\n obj = shopping_site.get_object(UID=uuid)\n if obj:\n item = IArticleAdapter(obj)\n if quantity > item.quantity_max():\n quantity = item.quantity_max()\n if quantity > 0:\n size = ISize(obj)\n gross = item.gross()\n kwargs = {\n 'depth': size.depth,\n 'gross': gross,\n 'height': size.height,\n 'net': item.get_net(gross),\n 'quantity': quantity,\n 'title': item.title(),\n 'sku': obj.sku,\n 'vat': item.get_vat(gross),\n 'vat_rate': item.context.vat_rate,\n 'weight': size.weight,\n 'width': size.width,\n }\n item.add_to_cart(**kwargs)\n notify(ArticleAddedToCartEvent(item, self.request))\n else:\n message = _(u'Input positive integer value to add to cart.')\n else:\n message = _(u\"Not available to add to cart.\")\n else:\n message = _(u\"Input integer value to add to cart.\")\n\n if message:\n IStatusMessage(self.request).addStatusMessage(message, type='warn')\n\n return self.request.response.redirect(url)", "title": "" }, { "docid": "03f7aea327a9d327b348c4db592901cd", "score": "0.72754735", "text": "def add_to_cart(request, item_id):\n amount = int(request.POST.get('amount'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n if 'product_size' in request.POST:\n size = request.POST['product_size']\n cart = request.session.get('cart', {})\n\n if size:\n if item_id in list(cart.keys()):\n if size in cart[item_id]['items_by_size'].keys():\n cart[item_id]['items_by_size'][size] += amount\n else:\n cart[item_id]['items_by_size'][size] = amount\n else:\n cart[item_id] = {'items_by_size': {size: amount}}\n else:\n if item_id in list(cart.keys()):\n cart[item_id] += amount\n else:\n cart[item_id] = amount\n\n request.session['cart'] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "e1a0de56daf94ef4fa254149a61d63a0", "score": "0.7268269", "text": "def add(self, product, quantity=1):\n added_quantity = product.remove_quantity(quantity)\n if added_quantity:\n self.products.append(product)\n self.quantity += added_quantity\n self.subtotal += product.price * added_quantity\n return self", "title": "" }, { "docid": "45b696a21154216feeda5e2f8ac3af44", "score": "0.7264676", "text": "def update_quantity(self, data):\n\t\tcontext = self.env.context.copy() or {}\n\t\tcontext['prestashop'] = 'prestashop'\n\t\trec_id = data.get('product_id')\n\t\tassert rec_id, _('Active ID is not set in Context')\n\t\tif int(data.get('new_quantity')) < 0:\n\t\t\traise UserError(_('Quantity cannot be negative.'))\n\t\tif int(data.get('new_quantity')) == 0:\n\t\t\treturn True\n\t\tinventory_obj = self.env['stock.inventory']\n\t\tinventory_line_obj = self.env['stock.inventory.line']\n\t\tprod_obj_pool = self.env['product.product']\n\t\tres_original = prod_obj_pool.with_context(context).browse(rec_id)\n\t\tif int(data.get('new_quantity')) == int(res_original.qty_available):\n\t\t\treturn True\n\t\tconfig_id=self.env['prestashop.configure'].search([('active','=',True)])\n\t\tif config_id:\n\t\t\tlocation_id = config_id[0].pob_default_stock_location.id\n\t\telse:\n\t\t\tlocation_id = self.env['stock.location'].search([('name','=','Stock')])\n\t\t\tlocation_id = location_id[0].id\n\t\tif location_id:\n\t\t\tth_qty = res_original.qty_available\n\t\t\tinventory_id = inventory_obj.with_context(context).create({\n\t\t\t 'name': _('INV: %s') % tools.ustr(res_original.name),\n\t\t\t 'product_id': rec_id,\n\t\t\t 'location_id': location_id,\n\t\t\t 'filter':'product'\n\t\t })\n\t\t\tline_data = {\n\t\t 'inventory_id': inventory_id.id,\n\t\t 'product_qty': data.get('new_quantity'),\n\t\t 'location_id': location_id,\n\t\t 'product_id': rec_id,\n\t\t 'product_uom_id': res_original.uom_id.id,\n\t\t 'theoretical_qty': th_qty\n\t\t\t}\n\t\t\tinventory_line_obj.with_context(context).create(line_data)\n\t\t\tinventory_id.with_context(context).action_done()\n\t\telse:\n\t\t\treturn \"Sorry, Default Stock Location not found!!!\"\n\t\treturn True", "title": "" }, { "docid": "5c020a5737d1fbc5684dc39a29b646a3", "score": "0.72619694", "text": "def test_add_to_cart_with_quantity_set(self):\n product = Product(\n {\n 'name': 'testname',\n 'description': 'test description',\n 'price': 20,\n 'discount': 10,\n 'stock': 3\n }\n )\n product_id = product.pk\n response = self.client.post(\n '/add_to_cart/quantity=3', product_id)\n quantity = response.get(\"quantity\")\n self.assertTrue((quantity, 3), True)", "title": "" }, { "docid": "090f655e96ca2cda3043674118098448", "score": "0.72565484", "text": "def add_to_cart(self):\n self.functions.wait_for_element(self.ADD_TO_CART_BTN).click()\n product_added_to_cart = self.functions.wait_for_element(self.PRODUCT_ADDED_TO_CART)\n assert product_added_to_cart.text == self.text, \"didn't added to cart product!\"", "title": "" }, { "docid": "73b3bb02777c652ea505b26acccbadfa", "score": "0.7241613", "text": "def add(self, furniture, quantity=1):\n furniture_id = str(furniture.id)\n if furniture_id not in self.cart:\n self.cart[furniture_id] = {'quantity': 1, 'price': str(furniture.price)}\n else:\n self.cart[furniture_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "239814fc4a22527d42db907dddf71697", "score": "0.72125", "text": "def add(self, product, action=None):\n id = product.id\n newItem = True\n size = self.request.GET.get(\"size\")\n # print(size,'iko')\n # if qty:\n # qty=int(qty)\n # else:\n # qty=1\n\n # else:\n # size = 5\n x= product.sizes.all()\n maxsize=x.aggregate(Max('size'))['size__max']\n minsize=x.aggregate(Min('size'))['size__min']\n size = size if size else minsize\n if size:\n size=int(size)\n # print(product.price)\n # print(size)\n if str(product.id) not in self.cart.keys():\n\n self.cart[product.id] = {\n 'userid': self.request.session.session_key,\n 'product_id': id,\n 'product_slug':product.slug,\n 'name': product.title,\n 'price': int(product.price*size),\n 'image': product.image1.url,\n 'size': size,\n }\n else:\n newItem = True\n\n for key, value in self.cart.items():\n if key == str(product.id):\n if value['size']<maxsize:\n value['size'] = (value['size'] + 5) if size is None else size\n newItem = False\n self.save()\n break\n if newItem == True:\n\n self.cart[product.id] = {\n 'userid': self.request.session.session_key,\n 'product_id': product.id,\n 'name': product.title,\n 'size': size,\n 'price': int(product.price),\n 'image': product.image1.url,\n 'size':size\n }\n\n self.save()", "title": "" }, { "docid": "da18dab34b8a9d92af923078ce1582f4", "score": "0.7207095", "text": "def addCartItem():\n pass", "title": "" }, { "docid": "fa37c166e8728184335455a4dfd2f8a6", "score": "0.7190672", "text": "def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print (product + \" added.\")\r\n else:\r\n print (product + \" is already in the cart.\")", "title": "" }, { "docid": "178cd5457a41c59f7ee2d4f1a80faf52", "score": "0.71777326", "text": "def add_cart(self):\n\n self.logger.info(\"Enter add cart page\")\n Element(\"Good_details\", \"add\").click()\n\n if Element(\"add_cart\", \"add_cart\").does_exist():\n\n if self.size == \"1\":\n self.logger.info(\"Select size\")\n Element(\"add_cart\", \"size\").clicks(0)\n\n if self.qty == \"0\":\n self.logger.info(\"Input qty\")\n Element(\"add_cart\", \"qty\").get().clear()\n\n Element(\"add_cart\", \"buy\").click()", "title": "" }, { "docid": "3307b4cc9f9f225738a4912bf7018043", "score": "0.7176878", "text": "def add_to_cart(request, id):\n if request.user.is_authenticated:\n\n if request.POST.get('quantity') and int(request.POST.get('quantity')) > 0: \n quantity = int(request.POST.get('quantity'))\n #SKU combines the product-id with different sizes. Model from Discouts App \n sku_id = (request.POST.get('sku')) \n cart = request.session.get('cart', {})\n cart[sku_id] = int(cart.get(sku_id, 0)) + quantity\n request.session['cart'] = cart\n messages.warning(request, 'The product is added to your cart.')\n else:\n # Warning if the amount of products is not specified\n messages.warning(request, 'You have to specify how many products you want to purchase.')\n else:\n # Warning if the user is not logged in\n messages.warning(request, 'You have to log in / register first, before you can purchase our products.')\n \n return redirect(reverse('all_products'))", "title": "" }, { "docid": "7d044415e686426512aef915d391f828", "score": "0.71691495", "text": "def increment_cart_item(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_item = OrderedItems.objects.get(\n item=item,\n user=request.user,\n ordered=False,\n )\n order_item.quantity += 1\n order_item.save()\n return redirect('core:order-summary')", "title": "" }, { "docid": "4d6faec9629ca262cca8ba42a74b659f", "score": "0.7160844", "text": "def add_to_basket(request, item_id):\n\n get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += quantity\n else:\n basket[item_id] = quantity\n\n sweetify.success(request, 'Nice', text='Successfully added to basket',\n icon='success')\n\n request.session['basket'] = basket\n return redirect(redirect_url)", "title": "" }, { "docid": "e2a2bbbd5f182eb8e380bb6cb980b9ce", "score": "0.7155233", "text": "def add_product_to_cart(driver, size, color, quantity):\n logging.info(f\"Set product size: [{size}], color: [{color}], quantity: [{quantity}] and add to cart\")\n product_detail_page = ProductDetailPage(driver)\n product_detail_page.select_size(size)\n product_detail_page.select_color(color)\n product_detail_page.input_quantity(quantity)\n product_detail_page.add_to_cart()", "title": "" }, { "docid": "b1049bb25b8a01f5a56c3f74f18d355f", "score": "0.71460396", "text": "def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print product + \" added.\"\r\n else:\r\n print product + \" is already in the cart.\"", "title": "" }, { "docid": "97ee639789f454403c693d6efb7993ce", "score": "0.7145523", "text": "def test_adding_a_product_that_already_exists(self):\n shopcart = Shopcart(7, [{\"pid\": 1, \"quantity\": 5}])\n shopcart.save()\n\n shopcart.add_product(1,5)\n\n self.assertEqual(shopcart.products[0].quantity, 10)", "title": "" }, { "docid": "48ea90da1a044cf89c1953b04ef4ed7e", "score": "0.7143351", "text": "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print product + \" added.\"\n else:\n print product + \" is already in the cart.\"", "title": "" }, { "docid": "48ea90da1a044cf89c1953b04ef4ed7e", "score": "0.7143351", "text": "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print product + \" added.\"\n else:\n print product + \" is already in the cart.\"", "title": "" }, { "docid": "a19c3fb5fe7999e22ad1c43b511c8453", "score": "0.7137215", "text": "def add_to_cart(self, cart_id, product):\n if not product in self.products:\n return False\n\n if not cart_id in self.carts:\n self.carts[cart_id] = []\n\n if self.products[product]:\n producer = self.products[product][0]\n self.producers[producer] -= 1\n self.products[product].remove(producer)\n self.carts[cart_id].append([product, producer])\n return True\n\n return False", "title": "" }, { "docid": "600b77e093d0729e9f8ad45dcc865f54", "score": "0.7123447", "text": "def add_item(self, product_code: str, quantity: int):\n try:\n price = (\n self._session.query(self._market_class)\n .filter(self._market_class.name == product_code)\n .first()\n .price\n )\n cart = self._cart_class(name=product_code, quantity=quantity, price=price)\n self._session.add(cart)\n self._session.commit()\n\n except Exception as e:\n raise Exception(f\"Their was an issue adding your new product: {e}\")", "title": "" }, { "docid": "534ca9d252f56f9cf343a08a1def5314", "score": "0.7115879", "text": "def add_to_cart(request, item_id):\n product = Album.objects.get(pk=item_id)\n added_item = request.session.get('added_item', {})\n request.session['added_item'] = {}\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n redirect_url = request.POST.get('redirect_url')\n\n if item_id in list(cart.keys()):\n cart[item_id] += quantity\n messages.success(request, f'Added another copy of {product.artist} - {product.title} to your cart')\n else:\n cart[item_id] = quantity\n messages.success(request, f'Added {product.artist} - {product.title} to your cart')\n\n request.session['added_item'] = item_id\n request.session['cart'] = cart\n return redirect(redirect_url)", "title": "" }, { "docid": "cd999b1ab19177cf0e2d32963fa2b97a", "score": "0.71148604", "text": "def update_stock(self, quantity):\r\n if self.num_in_stock is not None:\r\n self.num_in_stock += quantity\r\n self.save()\r\n if self.default:\r\n self.product.num_in_stock = self.num_in_stock\r\n self.product.save()", "title": "" }, { "docid": "07d637064d1796383b4ef3c57e8d4311", "score": "0.7109366", "text": "def increaseInventory(productID: int, quantity: int) -> bool:\n pass", "title": "" }, { "docid": "d66ec0c485444b2aec7c898dc37a913f", "score": "0.7099053", "text": "def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):\n sale_order = request.website.sale_get_order(force_create=True)\n if sale_order.state != 'draft':\n request.session['sale_order_id'] = None\n sale_order = request.website.sale_get_order(force_create=True)\n\n product_custom_attribute_values = None\n if kw.get('product_custom_attribute_values'):\n product_custom_attribute_values = json.loads(kw.get('product_custom_attribute_values'))\n\n no_variant_attribute_values = None\n if kw.get('no_variant_attribute_values'):\n no_variant_attribute_values = json.loads(kw.get('no_variant_attribute_values'))\n\n sale_order._cart_update(\n product_id=int(product_id),\n add_qty=add_qty,\n set_qty=set_qty,\n product_custom_attribute_values=product_custom_attribute_values,\n no_variant_attribute_values=no_variant_attribute_values\n )\n\n if kw.get('express'):\n return request.redirect(\"/shop/checkout?express=1\")\n\n # return request.redirect(\"/shop/cart\")\n product_id = request.env[\"product.product\"].browse(int(product_id))\n if product_id.is_booking:\n return request.redirect(f\"/shop/{product_id.id}/booking-type\")\n else:\n return request.redirect(\"/shop/cart\")", "title": "" }, { "docid": "14ff850041dca21f0d9faa2f168a0937", "score": "0.70940065", "text": "def _add_to_cart(cls, customer_id, product_id, product_price, product_name):\n return requests.post('%s/shopcarts/%s' % (cls.SHOPCART_SERV_URL, customer_id), json={\n 'product_id': product_id,\n 'customer_id': customer_id,\n 'quantity': 1,\n 'price': product_price,\n 'text': product_name,\n })", "title": "" }, { "docid": "ac3d322e50083ed38b5236bf975512c6", "score": "0.7081541", "text": "def add_product(self):\n self._operations.click_element_css(self.__ADD_TO_CART_CSS)", "title": "" }, { "docid": "c93049f23b47055e95135b0c82b97806", "score": "0.70718175", "text": "def add_basket_item(product_id,amount=1):\n product = mongo.db.products.find_one({\"_id\": ObjectId(product_id)})\n basket = get_basket()\n\n index = indexOf(basket,\"id\",str(product[\"_id\"]))\n if index >= 0:\n # Product is already in basket, lets update the quantity\n basket[index][\"amount\"] += amount\n else:\n # Product missing in basket, let's add it\n item = {\n \"id\": str(product[\"_id\"]),\n \"name\": product[\"name\"],\n \"amount\": amount,\n \"price\":product[\"price\"],\n \"image_url\":product[\"image_url\"]\n }\n basket.append(item)\n\n session[\"basket\"] = basket\n storeBasket()\n flash(\"Basket item added\")", "title": "" }, { "docid": "6aada61c5661fbe66bbb4dd4bb4bc738", "score": "0.70613307", "text": "def add_to_shopping_bag(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n if 'product_size' in request.POST:\n size = request.POST['product_size']\n shopping_bag = request.session.get('shopping_bag', {})\n\n if size:\n if item_id in list(shopping_bag.keys()):\n if size in shopping_bag[item_id]['items_by_size'].keys():\n shopping_bag[item_id]['items_by_size'][size] += quantity\n messages.success(request, f'Updated size {size.upper()} {product.name} quantity to {shopping_bag[item_id][\"items_by_size\"][size]}')\n else:\n shopping_bag[item_id]['items_by_size'][size] = quantity\n messages.success(request, f'Added size {size.upper()}\\\n {product.name} to your shopping bag')\n else:\n shopping_bag[item_id] = {'items_by_size': {size: quantity}}\n messages.success(request, f'Added size {size.upper()}\\\n {product.name} to your shopping bag')\n else:\n if item_id in list(shopping_bag.keys()):\n shopping_bag[item_id] += quantity\n messages.success(request, f'Updated {product.name} quantity to\\\n {shopping_bag[item_id]}')\n else:\n shopping_bag[item_id] = quantity\n messages.success(request, f'Added {product.name} to your shopping\\\n bag')\n\n request.session['shopping_bag'] = shopping_bag\n return redirect(redirect_url)", "title": "" }, { "docid": "ffbb1ce34ad41d92ad318b6bd4a8f880", "score": "0.70565134", "text": "def add_product(self, product):\r\n self._inventory.append(product)", "title": "" }, { "docid": "5ecac6df8689e66ab329a05bb2841dda", "score": "0.70268244", "text": "def adjust_cart(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n\n if quantity > 0:\n cart[item_id] = quantity\n messages.success(\n request,\n f'There is now {cart[item_id]} {product.name} in your cart')\n else:\n cart.pop(item_id)\n messages.success(request, f'{product.name} removed from your cart')\n\n request.session['cart'] = cart\n return redirect(reverse('view_cart'))", "title": "" }, { "docid": "ffa659fa554ccc0db42e10d4a819f9ee", "score": "0.70166767", "text": "def add_to_cart(self, cart_id, product):\n with self.consumer_add_mutex:\n if product in self.available_prod:\n # Remove the product from producer queue and available products\n self.available_prod.remove(product)\n\n for (producer, p_list) in self.producer_queues.items():\n # Remove the product from the producer's queue\n if product in p_list:\n self.producer_queues[producer].remove(product)\n self.products[product] = producer\n break\n\n # Add the product to the cart\n self.carts[cart_id].append(product)\n return True\n\n return False", "title": "" }, { "docid": "a823920b4209bc83be1ba1c5c7fdb5ac", "score": "0.7010377", "text": "def add(self, product):\r\n lines = self.lines.filter(product=product)\r\n if len(lines) == 0:\r\n self.lines.create(\r\n product=product, title=product.get_title())\r\n else:\r\n line = lines[0]\r\n line.quantity += 1\r\n line.save()", "title": "" }, { "docid": "75df569fa66ebd126fc2f581211cb116", "score": "0.700846", "text": "def add_product(self, product):\n # your code goes here!", "title": "" }, { "docid": "d425474f42267b94e9b2298a0410a1c1", "score": "0.69655097", "text": "def add_to_bag(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(\n request, f'Updated quantity of {product.name} to {bag[item_id]}.')\n else:\n bag[item_id] = quantity\n messages.success(\n request, f'Successfully added {product.name} to shopping bag.')\n\n request.session['bag'] = bag\n return redirect(redirect_url)", "title": "" }, { "docid": "2f48b5a6eea7804f57e348570b4bbcf3", "score": "0.69650203", "text": "def increment_item(request, cart_id):\n # Retrieve the cart item from Cart table\n cart_item = get_object_or_404(Cart, id=cart_id)\n\n # Validate if the request's method is POST\n if request.method == 'POST':\n\n # Increment the quantity and save the changes to the object\n # then redirect the user to cart page\n cart_item.quantity += 1\n cart_item.save()\n return redirect('cart')", "title": "" }, { "docid": "acdbbd65590052ac32c2caf1b143f925", "score": "0.6953576", "text": "def add_to_cart(self, item: MenuItem) -> None:\n cart_item = self._get_equivalent_item(item)\n if cart_item:\n cart_item.set_amount(cart_item.get_amount() + item.get_amount())\n else:\n self._cart.append(item)", "title": "" }, { "docid": "9a9821f652d8c0a24159d9f8f6b943c4", "score": "0.69501305", "text": "def add_to_cart_quick(request, id):\n quantity=1\n prev = request.POST.get('prev')\n \n cart = request.session.get('cart', {})\n if id in cart:\n cart[id] = int(cart[id]) + quantity\n else:\n cart[id] = cart.get(id, quantity)\n \n request.session['cart'] = cart\n return HttpResponseRedirect(prev)", "title": "" }, { "docid": "b8aa5f41580d317a8463d0fe9b9dae55", "score": "0.6943064", "text": "def add_cart_multi(self, **product_quantity_map):\n\t\tfail_check = jsend.FailCheck()\n\t\t\n\t\tif not hasattr(product_quantity_map, '__iter__'):\n\t\t\tfail_check.add('product_quantity_map', 'product_quantity_map should be a json object mapping product IDs to quantities to add to the cart')\n\t\t\n\t\tfor product_id, quantity in product_quantity_map.items():\n\t\t\t# data validation\n\t\t\tif not tools.isnumeric(product_id):\n\t\t\t\tfail_check.add('product_id', 'Product IDS must be numeric')\n\t\t\tif not tools.isnumeric(quantity):\n\t\t\t\tfail_check.add('quantity', 'Quantity must be numeric') \n\n\t\t\tif fail_check.failed():\n\t\t\t\treturn fail_check.fail()\n\t\t\t\n\t\t\tproduct_id = int(product_id)\n\t\t\tquantity = float(quantity)\n\t\t\t\n\t\t\tnew_quantity = request.registry['website']._ecommerce_add_product_to_cart(request.cr, request.uid,\n\t\t\t\tproduct_id=product_id,\n\t\t\t\tnumber=quantity,\n\t\t\t\tcontext=request.context)\n\t\t\t# bug in _ecommerce_add_product_to_cart means that if adding a new product to the cart, \n\t\t\t# the quantity is always set to 1. Handle this by checking returned qty against specified\n\t\t\t# quantity and adjust it accordingly\n\t\t\tif new_quantity < quantity:\n\t\t\t\trequest.registry['website']._ecommerce_add_product_to_cart(request.cr, request.uid,\n\t\t\t\tproduct_id=int(product_id),\n\t\t\t\tnumber=quantity - new_quantity,\n\t\t\t\tcontext=request.context)\n\t\t\t\t\n\t\treturn self.get_cart_info()", "title": "" }, { "docid": "de0c751343d8ac65d459f65111384bd6", "score": "0.6941453", "text": "def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):\n super(WebsiteSale, self).cart_update(product_id, add_qty, set_qty, **kw)\n if 'product_template_id' in kw and 'my_cart' in kw:\n return request.redirect(\"/shop\")\n\n else:\n return request.redirect('/shop/cart')", "title": "" }, { "docid": "77b23e4471b3326fab507f3bb1831ba9", "score": "0.69410396", "text": "def adjust_cart(request, item_id):\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n size = None\n number = None\n\n if 'product_size' or 'product_number' in request.POST:\n size = request.POST.get('product_size', None)\n number = request.POST.get('product_number', None)\n cart = request.session.get('cart', {})\n\n if size:\n if quantity > 0:\n cart[item_id]['items_by_size'][size] = quantity\n messages.success(request,\n f'Updated size'\n f' {size.upper()} {product.name}'\n f' quantity to'\n f' {cart[item_id][\"items_by_size\"][size]}')\n\n else:\n del cart[item_id]['items_by_size'][size]\n if not cart[item_id]['items_by_size']:\n cart.pop(item_id)\n messages.success(request,\n f'Removed size {size.upper()} {product.name}'\n f' from the cart!')\n\n elif number:\n if quantity > 0:\n cart[item_id]['items_by_number'][number] = quantity\n messages.success(request,\n f'Update number {number} {product.name}'\n f' quantity to '\n f' {cart[item_id][\"items_by_number\"][number]}')\n\n else:\n del cart[item_id]['items_by_number'][number]\n if not cart[item_id]['items_by_number']:\n cart.pop(item_id)\n messages.success(request,\n f'Removed number {number} {product.name}'\n f' from the cart!')\n\n else:\n if quantity > 0:\n cart[item_id] = quantity\n messages.success(request,\n f'Updated {product.name} to {cart[item_id]}')\n\n else:\n cart.pop(item_id)\n messages.success(request, f'Removed {product.name} to the cart!')\n\n request.session['cart'] = cart\n return redirect(reverse('view_cart'))", "title": "" }, { "docid": "310d44a5e6767f6a23b3e2f0d818adf2", "score": "0.69337815", "text": "def add_to_cart(self, cart_id, product):\n with self.add_to_cart_lock:\n for key in self.products_dict:\n for prod in self.products_dict[key]:\n if prod == product:\n self.products_dict[key].remove(product)\n self.carts[cart_id].append(product)\n self.reserved[product] = key\n return True\n return False", "title": "" }, { "docid": "36116adac856c14e499f4e5fdf612f7c", "score": "0.69298357", "text": "def increment(self, quantity=None):\n\n data = {}\n if quantity:\n data[\"quantity\"] = self._normalize_quantity(quantity)\n\n xml = self.request(\n \"/customers/add-item-quantity\",\n code=self.subscription.customer.code,\n item_code=self.code,\n **data\n )\n\n item_xpath = '//subscription/items/item[@id=\"%s\"]' % self.id\n (item_xml,) = xml.xpath(item_xpath)\n self._load_from_xml(item_xml)\n\n return self", "title": "" }, { "docid": "2047cc8a4d1c26e7ea7a819a5bbfbfdd", "score": "0.6920818", "text": "def add_item_to_cart(item, cart):\n cart.items.append(item)\n return cart", "title": "" }, { "docid": "6d58f93a2f2adba8b840a82fe6ac66cb", "score": "0.6917637", "text": "def add_item():\r\n # had some difficulty parsing the jquery data\r\n x = request.form\r\n y = x.to_dict(flat=False)\r\n user_input = y[\"userInput\"][0]\r\n\r\n found = False\r\n\r\n for i in session[\"my_kart\"]:\r\n # if in cart, increase quantity, rather than make a new line item.\r\n if i[\"item\"] == user_input:\r\n i[\"quantity\"] = i[\"quantity\"] + 1\r\n session.modified = True\r\n found = True\r\n break\r\n\r\n if found == False:\r\n # append new item to cart.\r\n session[\"my_kart\"].append({\r\n \"item\": user_input,\r\n \"quantity\": 1\r\n })\r\n session.modified = True\r\n\r\n # adds this new item to the list of suggested matches.\r\n suggs.append(user_input)\r\n\r\n # return the updated cart which will be used to update state\r\n return get_kart()", "title": "" }, { "docid": "11d50f5751eebb5ac5ea5dc17a9578f5", "score": "0.6913174", "text": "def add_to_cart(listing_id):\n listing = Listing.query.filter_by(id=listing_id, available=True).first()\n if not listing:\n abort(404)\n if not request.json:\n abort(400)\n if ('quantity' not in request.json or\n type(request.json['quantity']) is not int):\n abort(400)\n\n cart_item = CartItem.query.filter_by(\n merchant_id=current_user.id,\n listing_id=listing_id\n ).first()\n\n new_quantity = request.json['quantity']\n is_currently_incart = cart_item is not None\n\n if new_quantity == 0 and is_currently_incart:\n db.session.delete(cart_item)\n elif new_quantity != 0 and is_currently_incart:\n cart_item.quantity = new_quantity\n elif new_quantity != 0 and not is_currently_incart:\n db.session.add(\n CartItem(\n merchant_id=current_user.id,\n listing_id=listing_id,\n quantity=new_quantity\n )\n )\n db.session.commit()\n name = Listing.query.filter_by(id=listing_id).first().name\n return jsonify({'quantity': new_quantity, 'name': name})", "title": "" }, { "docid": "18da1d1c60df20a4ef48a61bec597b29", "score": "0.6905621", "text": "def add_item(self, variation, quantity):\r\n kwargs = {\"sku\": variation.sku, \"unit_price\": variation.price()}\r\n item, created = self.items.get_or_create(**kwargs)\r\n if created:\r\n item.description = force_text(variation)\r\n item.unit_price = variation.price()\r\n item.url = variation.product.get_absolute_url()\r\n image = variation.image\r\n if image is not None:\r\n item.image = force_text(image.file)\r\n variation.product.actions.added_to_cart()\r\n item.quantity += quantity\r\n item.save()", "title": "" }, { "docid": "aed842f754612b759a0457223f3e7075", "score": "0.68994355", "text": "def add_to_cart(request, id):\n quantity=int(request.POST.get('quantity'))\n \n cart = request.session.get('cart', {})\n if id in cart:\n cart[id] = int(cart[id]) + quantity\n else:\n cart[id] = cart.get(id, quantity)\n \n request.session['cart'] = cart\n return redirect(reverse('index'))", "title": "" }, { "docid": "d6048270c531697580bfd8a378179bd9", "score": "0.6898533", "text": "def update_cart(request, item_id):\n\n product = get_object_or_404(Album, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n\n if quantity > 0:\n cart[item_id] = quantity\n else:\n cart.pop(item_id)\n\n request.session['cart'] = cart\n return redirect(reverse('view_cart'))", "title": "" }, { "docid": "daa1fd1bd95dfd5378b701c656e98d41", "score": "0.6890488", "text": "def change_cart_item_quantity(cart, app_id, new_quantity):\n for item in cart.items:\n if int(item.app_id) == app_id:\n item.quantity = new_quantity\n break\n return cart", "title": "" }, { "docid": "fb6127fd4a17876aab785ed9880ac9ed", "score": "0.6886492", "text": "def add_to_bag(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n if 'product_size' in request.POST:\n size = request.POST['product_size']\n bag = request.session.get('bag', {})\n\n if size:\n if item_id in list(bag.keys()):\n if size in bag[item_id]['items_by_size'].keys():\n bag[item_id]['items_by_size'][size] += quantity\n messages.success(request, f'Updated size {size.upper()} \\\n {product.name} quantity to \\\n {bag[item_id][\"items_by_size\"][size]}')\n else:\n bag[item_id]['items_by_size'][size] = quantity\n messages.success(request, f'Added size {size.upper()} \\\n {product.name} to your bag')\n else:\n bag[item_id] = {'items_by_size': {size: quantity}}\n messages.success(request, f'Added size {size.upper()} \\\n {product.name} to your bag')\n else:\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request, f'Updated {product.name} quantity \\\n to {bag[item_id]}')\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {product.name} to your bag')\n\n request.session['bag'] = bag\n return redirect(redirect_url)", "title": "" }, { "docid": "e64c2f6da971eb5a2c07cf3b2b835969", "score": "0.6881767", "text": "def add_product(self, new_prod):\n self.list_of_products.append(new_prod)", "title": "" }, { "docid": "5e8a9f290798a0a438a1b6ddf0403866", "score": "0.68753886", "text": "def add_to_cart(self, cart_id, product):\n for i, queue in enumerate(self.producers_queues):\n # queue = self.producers_queues[i]\n self.queues_locks[i].acquire()\n for queue_product in queue:\n # if we found the product, remove it from the market and add it to the cart\n if queue_product == product:\n self.carts_products[cart_id].append((product, i))\n queue.remove(queue_product)\n self.queues_locks[i].release()\n return True\n\n self.queues_locks[i].release()\n return False", "title": "" }, { "docid": "7d31ccd9d68b0e56d8d058c642125ea7", "score": "0.6871063", "text": "def add_to_cart():\n if not session:\n session[\"cart\"] = []\n\n product_id = request.form[\"product_id\"]\n product_name = request.form[\"product_name\"]\n\n session[\"cart\"].append(product_id)\n session.modified = True\n\n flash(f\"{product_name} has been added to your cart.\")\n return redirect(\"/\")", "title": "" }, { "docid": "7ef5f405897d784d066aea1207e13ec9", "score": "0.68660337", "text": "def adjust_cart(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get(\"quantity\"))\n size = None\n if \"product_size\" in request.POST:\n size = request.POST[\"product_size\"]\n cart = request.session.get(\"cart\", {})\n\n if size:\n if quantity > 0:\n cart[item_id][\"items_by_size\"][size] = quantity\n messages.success(\n request,\n f'You updated size {size.upper()} {product.name} quantity to {cart[item_id][\"items_by_size\"][size]}',\n )\n else:\n del cart[item_id][\"items_by_size\"][size]\n if not cart[item_id][\"items_by_size\"]:\n cart.pop(item_id)\n messages.success(\n request,\n f\"You removed size {size.upper()} {product.name} from your cart\",\n )\n else:\n if quantity > 0:\n cart[item_id] = quantity\n messages.success(\n request, f\"You updated {product.name} quantity to {cart[item_id]}\"\n )\n else:\n cart.pop(item_id)\n messages.success(request, f\"You removed {product.name} from your cart\")\n\n request.session[\"cart\"] = cart\n return redirect(reverse(\"view_cart\"))", "title": "" }, { "docid": "8de7335bc10f3eb4c855a5d5ea5121e5", "score": "0.6864846", "text": "def add_item(self, item, quantity):\n self.goods[item.name] = self.goods.get(item.name, 0) + quantity", "title": "" } ]
cb9c1c4fdb48eddde66afef13b7904af
Allow use of `hash` so that a |Particle| instance may be used as a key in a `dict`.
[ { "docid": "a801f58462299836602c20628475aa20", "score": "0.55745435", "text": "def __hash__(self) -> int:\n return hash(self.__repr__())", "title": "" } ]
[ { "docid": "82db20635cd06c7647f81eb18910a756", "score": "0.6146902", "text": "def __hash__(self):\n return hash(self._mean) + hash(self._variance)", "title": "" }, { "docid": "1ee89e699ad8265b4902c38050171d5d", "score": "0.61150706", "text": "def __hash__():", "title": "" }, { "docid": "6c85007b357139bc2799ef2f714bb8e7", "score": "0.60447496", "text": "def __hash__(self):\n return hash(self.hash_key())", "title": "" }, { "docid": "0feb0e6bdf51f1d361979335580c86c2", "score": "0.6039706", "text": "def __hash__(self):\n\t\treturn hash(tuple(sorted(self.__dict__.items())))", "title": "" }, { "docid": "00040d712b146ae4c6853001da39e3bf", "score": "0.60204315", "text": "def __hash__(self) -> int:\n return hash((self.name,))", "title": "" }, { "docid": "138399faf77f1b1b8f56ad5164cfc363", "score": "0.60084665", "text": "def __hash__(self):\n # TODO\n return hash('{0}:{1}'.format(self.x, self.y))", "title": "" }, { "docid": "4076891e3970b943ca06a7761f0700fa", "score": "0.59948367", "text": "def __hash__(self) -> int:\r\n return hash((self.x, self.y))", "title": "" }, { "docid": "89aec1b94fde2b65f8d160d9cb637127", "score": "0.5989241", "text": "def __hash__(self):\n return hash((sum([(i + 1) * hash(s) for i, s in enumerate(self.symbols)]), (sum([(i + 1) * hash(s) for i, s in enumerate(self.relations)]))))", "title": "" }, { "docid": "f96b458fec1bc69da74b39f16ba905b4", "score": "0.5936868", "text": "def __hash__(self):\n return hash( (self.x, self.y) )", "title": "" }, { "docid": "39f69315b8507b99116cf5bc77bd4d69", "score": "0.59366775", "text": "def __hash__(self):\n return hash(self.__key__())", "title": "" }, { "docid": "8fab6070a42d3d5806a450aac1ba03eb", "score": "0.59262", "text": "def __hash__(self):\n return hash(self.__key())", "title": "" }, { "docid": "f6c501775e67aac9e0c9301ab425b1ea", "score": "0.59241545", "text": "def __hash__(self):\n return hash((self.x, self.y))", "title": "" }, { "docid": "306a410145081902cb7632afee0b1a52", "score": "0.5920948", "text": "def __hash__(self):\n return hash((self.__class__, self._hash))", "title": "" }, { "docid": "b81ad9af0a63a26f64958bd422d860ca", "score": "0.5920848", "text": "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "title": "" }, { "docid": "9056b195b3d8d8abf3fd3e4ab54f7346", "score": "0.5914276", "text": "def __hash__(self):", "title": "" }, { "docid": "9056b195b3d8d8abf3fd3e4ab54f7346", "score": "0.5914276", "text": "def __hash__(self):", "title": "" }, { "docid": "32d7508c7f2c14c9e356ee4c74887bd4", "score": "0.5910639", "text": "def __hash__(self):\r\n return hash(tuple(sorted(self.__dict__.items())))", "title": "" }, { "docid": "65da791a8a923f2e854dc9f9c16dfbf7", "score": "0.59100556", "text": "def _store_particle_identity(self) -> NoReturn:\n self._validate_inputs()\n argument, mass_numb, Z = self.__inputs\n symbol = _parsing.dealias_particle_aliases(argument)\n if symbol in _special_particles.data_about_special_particles:\n self._attributes[\"symbol\"] = symbol\n else:\n self._store_identity_of_atom(argument)", "title": "" }, { "docid": "9147337d2d27ad7aaf6046d7c48ad4cf", "score": "0.58922446", "text": "def __hash__(self):\n pass", "title": "" }, { "docid": "68011eb7b18784240420375f46b9f49b", "score": "0.5890164", "text": "def __hash__(self) -> int:\n ... # For subclasses to implement.", "title": "" }, { "docid": "7caaef3a690e319f32671e513fdca9f0", "score": "0.58603567", "text": "def json_dict(self) -> dict:\n particle_dictionary = super().json_dict\n particle_dictionary[\"plasmapy_particle\"][\"__init__\"][\"args\"] = (self.symbol,)\n return particle_dictionary", "title": "" }, { "docid": "f044cc6340a35334748fa19c279cc558", "score": "0.5858067", "text": "def __hash__(self):\n return hash(self.key)", "title": "" }, { "docid": "a87dcbe32ce7eab5e4775d00db729e8c", "score": "0.5852584", "text": "def __hash__(self):\n if self._hash is None:\n hash_ = 0\n for pair in self.items():\n hash_ ^= hash(pair)\n self._hash = hash_\n return self._hash", "title": "" }, { "docid": "92746b6fc28c26794b5980f55e14c401", "score": "0.5841632", "text": "def __hash__(self) -> hash:\n return hash(self._name)", "title": "" }, { "docid": "4991e262f270471f1dd476c84dc17d14", "score": "0.5834565", "text": "def __hash__(self) -> int:\n return hash((self.identifier, self.info))", "title": "" }, { "docid": "c904b221936ac9eff423f67b0d5a616d", "score": "0.5834178", "text": "def __hash__(self):\n return hash((self.name, self.alias))", "title": "" }, { "docid": "a6d98534551c751d3c1c94ff7bf5079a", "score": "0.5824006", "text": "def __hash__(self):\n # fixme mm 05/19/2018\n # The python standard library reference says https://docs.python.org/3/reference/datamodel.html#object.__hash__\n # that one should not implement a __hash__ function for mutable objects\n # The existence of the set_key method suggests that BibtexEntry objects are mutable\n # so we run the risk of finding the wrong object\n # If we want set functionalyty we need hashability so we should probably sacrifice mutability \n return hash(self.key)", "title": "" }, { "docid": "44353eae79f358cd4d902ae5af64862c", "score": "0.58218914", "text": "def __hash__(self):\n # TODO(dpranke) - implement this\n raise NotImplementedError(\"hash() not implemented\")", "title": "" }, { "docid": "a5075d5a017b745940ddb0d8d34d4f57", "score": "0.58186364", "text": "def test_hash(self):\n #WARNING: if you change the Stem after putting it in a dict, all bets\n #are off as to behavior. Don't do it!\n s = Stem(1, 5, 2)\n t = Stem(1, 5, 2)\n u = Stem(2, 4, 6)\n v = Stem(2, 4, 6)\n w = Stem(2, 4, 4)\n d = {}\n\n assert s is not t\n \n for i in (s, t, u, v, w):\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n\n self.assertEqual(len(d), 3)\n self.assertEqual(d[Stem(1, 5, 2)], 2)\n self.assertEqual(d[Stem(2, 4, 6)], 2)\n self.assertEqual(d[Stem(2, 4, 4)], 1)\n assert Stem(1,5) not in d", "title": "" }, { "docid": "f50baa5497a83947970555596c36c1e2", "score": "0.581502", "text": "def __hash__(self) -> int:\n return hash(Hole)", "title": "" }, { "docid": "126ac9245495645cfb019891df67e1f2", "score": "0.58139765", "text": "def __hash__(self):\n return hash(self.identifier)", "title": "" }, { "docid": "66eaeef543ffed0b2a97d40cb9e63bf8", "score": "0.5811639", "text": "def __hash__(self):\r\n return hash(self.value)", "title": "" }, { "docid": "da48f4b9f6ab1e1ac608c4137ae5dcbf", "score": "0.5811506", "text": "def __hash__(self):\n ...", "title": "" }, { "docid": "a0043f7bfd39a933ee6d5a09ec67ed0a", "score": "0.58103675", "text": "def __hash__(self) -> int:\n return hash(self.name)", "title": "" }, { "docid": "3314eeea4ec04b9a4af0dbb1f9fa02d1", "score": "0.5806919", "text": "def __hash__(self):\n return hash(self.name)", "title": "" }, { "docid": "3314eeea4ec04b9a4af0dbb1f9fa02d1", "score": "0.5806919", "text": "def __hash__(self):\n return hash(self.name)", "title": "" }, { "docid": "7260f9bc02df0749a5fd55dfe7d835f3", "score": "0.5793961", "text": "def __hash__(self):\n # return hash(tuple(sorted(self.__dict__.items())))\n return hash(str(self))", "title": "" }, { "docid": "3bd85c96664631ecaaf32ce5cb5fd40e", "score": "0.5790856", "text": "def hash(self,value):", "title": "" }, { "docid": "644c573bec98d3cb301b69b7f7b04653", "score": "0.57893425", "text": "def __hash__(self):\n return hash((self.value))", "title": "" }, { "docid": "659f1a463de31c8936313eb9f552a5fe", "score": "0.57882214", "text": "def __hash__(self) -> int:\n return hash(self.__key())", "title": "" }, { "docid": "d59b37ef9c49b83f8b92bf114fcb0dfb", "score": "0.57495254", "text": "def __hash__(self):\n hashstr = ''.join(self.walk(lambda x: x.hashstr(x)))\n return hash(hashstr)", "title": "" }, { "docid": "81e95d5a5e25c37ec3b8d24084d77003", "score": "0.5745507", "text": "def __hash__(self):\n return (self.x ** self.y) + (self.x * self.y)", "title": "" }, { "docid": "3722b986b622e7a6031904205f80af95", "score": "0.5744062", "text": "def __hash__(self):\n return object.__hash__(self)", "title": "" }, { "docid": "ca36b92d6d7d1a85b8de3014fee4e551", "score": "0.5735484", "text": "def __hash__(self):\n return hash(self.pos) + hash(frozenset(self.goals))", "title": "" }, { "docid": "71328622388944d94ff6aafdd20e7256", "score": "0.57170254", "text": "def __hash__(self):\n return hash((self.__class__, str(self)))", "title": "" }, { "docid": "b7248a197b52edddcac11ce3e97d5ce0", "score": "0.57113653", "text": "def __hash__(self):\n return super().__hash__()", "title": "" }, { "docid": "b7248a197b52edddcac11ce3e97d5ce0", "score": "0.57113653", "text": "def __hash__(self):\n return super().__hash__()", "title": "" }, { "docid": "46f550a62b2f2790a5a7ff6a4c43a403", "score": "0.57061994", "text": "def __hash__(self):\n h = 5\n h = 43 * h + id(self.edge_face)\n h = 43 * h + id(self.intersecting_face)\n return h", "title": "" }, { "docid": "292bde1fcead09d1dd3acbcc1fde45fa", "score": "0.5700397", "text": "def param_hash(p) -> int:\n return hash(tuple(_hash_field(getattr(p, f.name)) for f in fields(p)))", "title": "" }, { "docid": "359f06a443f5605dc001d77fb29fd5d4", "score": "0.56999046", "text": "def __hash__(self):\n return id(self)", "title": "" }, { "docid": "359f06a443f5605dc001d77fb29fd5d4", "score": "0.56999046", "text": "def __hash__(self):\n return id(self)", "title": "" }, { "docid": "98c79cfef97231c1608076e609d48125", "score": "0.5698696", "text": "def __hash__(self):\n return hash(self.__repr__())", "title": "" }, { "docid": "98c79cfef97231c1608076e609d48125", "score": "0.5698696", "text": "def __hash__(self):\n return hash(self.__repr__())", "title": "" }, { "docid": "98c79cfef97231c1608076e609d48125", "score": "0.5698696", "text": "def __hash__(self):\n return hash(self.__repr__())", "title": "" }, { "docid": "19d63e86aed2add1f43c8cebcb1da3f0", "score": "0.5690802", "text": "def __hash__(self):\n return hash(self._name)", "title": "" }, { "docid": "6c9cea40d5695715d87b96bc74143d2d", "score": "0.5690779", "text": "def __hash__(self) -> int:\n return object.__hash__(self)", "title": "" }, { "docid": "c36e4623058a18b75088eccaa827ec7d", "score": "0.5688689", "text": "def __hash__(self):\n return hash(self.n)", "title": "" }, { "docid": "44b734d6e7b66bec560b96e6c32b187f", "score": "0.56825715", "text": "def hash(self):", "title": "" }, { "docid": "a00b9d626dcf12bced8a7bfd1ddd51e2", "score": "0.5678852", "text": "def __hash__(self) -> int:\n return hash(self._name)", "title": "" }, { "docid": "fe5deebd0ced2aba28e929c22cf1dd7c", "score": "0.5676207", "text": "def __hash__(self):\n return hash(self.node)", "title": "" }, { "docid": "289f50f0f22ae3ecc463e85850d3a75d", "score": "0.56717587", "text": "def __hash__(self) -> int:\n return hash(self._inner_value)", "title": "" }, { "docid": "289f50f0f22ae3ecc463e85850d3a75d", "score": "0.56717587", "text": "def __hash__(self) -> int:\n return hash(self._inner_value)", "title": "" }, { "docid": "3b6c231804d778631eb35503e2069e22", "score": "0.5666749", "text": "def __hash__(self):\n if self.id == -1:\n return object.__hash__(self)\n return hash((self.engine, self.id))", "title": "" }, { "docid": "3670bd9f2552902da240f3ccf333ccbf", "score": "0.5665311", "text": "def __hash__(self):\n return hash(self.id)", "title": "" }, { "docid": "3670bd9f2552902da240f3ccf333ccbf", "score": "0.5665311", "text": "def __hash__(self):\n return hash(self.id)", "title": "" }, { "docid": "670dad5f31fb360105c7ee32e7d5f864", "score": "0.56527317", "text": "def __hash__(self):\n return self.name.__hash__()", "title": "" }, { "docid": "95463846bfd1211042327183b8b71b79", "score": "0.5648496", "text": "def __hash__(self):\n return hash(self.id_)", "title": "" }, { "docid": "95463846bfd1211042327183b8b71b79", "score": "0.5648496", "text": "def __hash__(self):\n return hash(self.id_)", "title": "" }, { "docid": "9ef82610b3f21fb9ad5f0676e9343840", "score": "0.5633171", "text": "def __hash__(self):\n return hash(str(self))", "title": "" }, { "docid": "9e2f70923e2882581318c18061a9269a", "score": "0.56319606", "text": "def __hash__(self) -> int:\n return hash(tuple(self.vector))", "title": "" }, { "docid": "c8370f1ceec92c40a7d0f9a3a9646aa5", "score": "0.56292623", "text": "def __hash__(self) -> int:\n return hash(self.value)", "title": "" }, { "docid": "c8370f1ceec92c40a7d0f9a3a9646aa5", "score": "0.56292623", "text": "def __hash__(self) -> int:\n return hash(self.value)", "title": "" }, { "docid": "c8370f1ceec92c40a7d0f9a3a9646aa5", "score": "0.56292623", "text": "def __hash__(self) -> int:\n return hash(self.value)", "title": "" }, { "docid": "c8370f1ceec92c40a7d0f9a3a9646aa5", "score": "0.56292623", "text": "def __hash__(self) -> int:\n return hash(self.value)", "title": "" }, { "docid": "c8370f1ceec92c40a7d0f9a3a9646aa5", "score": "0.56292623", "text": "def __hash__(self) -> int:\n return hash(self.value)", "title": "" }, { "docid": "1d4f14ace271ae46a2512eee740a7e66", "score": "0.56280535", "text": "def __hash__(self):\n return hash(self.__str__())", "title": "" }, { "docid": "b3f9320178e595df9b603695588d94f4", "score": "0.56264895", "text": "def __hash__(self):\n return hash(self.__title)", "title": "" }, { "docid": "b037862d64eb1435c0bf7e7b1408b683", "score": "0.561732", "text": "def hash_func(self, v):\n pass", "title": "" }, { "docid": "ce96cc5c6d828be5f3f418b33a43616d", "score": "0.5604686", "text": "def __hash__(self):\n return hash(repr(self))", "title": "" }, { "docid": "c46d338ba52d708b27fac2d0b156e6d4", "score": "0.560081", "text": "def __hash__(self):\n return 0", "title": "" }, { "docid": "c46d338ba52d708b27fac2d0b156e6d4", "score": "0.560081", "text": "def __hash__(self):\n return 0", "title": "" }, { "docid": "1c9ba95aeb3aa9d0201e8d00db092bc8", "score": "0.5600305", "text": "def __hash__(self):\n return hash(self.data)", "title": "" }, { "docid": "1c9ba95aeb3aa9d0201e8d00db092bc8", "score": "0.5600305", "text": "def __hash__(self):\n return hash(self.data)", "title": "" }, { "docid": "99ac353218c17a8af7294a10191bd940", "score": "0.5595226", "text": "def __hash__(self):\n self._hash = self._hash or hash(tuplize((self.observation, self.configuration)))\n # self._hash = self._hash or self.board.tobytes()\n return self._hash", "title": "" }, { "docid": "97106b802390c459dffe74d5dd5c91fd", "score": "0.5591019", "text": "def __hash__(self) -> int:\n return hash(self.id)", "title": "" }, { "docid": "97106b802390c459dffe74d5dd5c91fd", "score": "0.5591019", "text": "def __hash__(self) -> int:\n return hash(self.id)", "title": "" }, { "docid": "97106b802390c459dffe74d5dd5c91fd", "score": "0.5591019", "text": "def __hash__(self) -> int:\n return hash(self.id)", "title": "" }, { "docid": "101bf17fc032f1c0d8082b59592a89e2", "score": "0.55833584", "text": "def __hash__(self):\n return hash(self.parent()) ^ hash(self._ogf)", "title": "" }, { "docid": "97c8cf01054555f0e5ff1cfc83972ba8", "score": "0.5579406", "text": "def __hash__(self):\n return hash(self.id_str)", "title": "" }, { "docid": "08a0caf5739b300fbd0ee3319bc09514", "score": "0.55716723", "text": "def __hash__(self):\n return hash(self._value)", "title": "" }, { "docid": "f24dc5a04347d31bb8ba703b1c3242e4", "score": "0.557108", "text": "def __hash__(self):\n return hash((self.identifier, self.context))", "title": "" }, { "docid": "07e271f1704166feec3095d7549fb558", "score": "0.557069", "text": "def __hash__(self):\n return hash((self.valid, tuple(self.substitution.keys()), tuple(self.substitution.values())))", "title": "" }, { "docid": "5c0b5c22b8b3496820b261b215ed2221", "score": "0.556783", "text": "def __hash__(self):\n\n return hash(self.get_id())", "title": "" }, { "docid": "1df123da7eef5d83a230ca67227756e0", "score": "0.5562815", "text": "def __hash__(self):\n return hash((self.lat, self.lon))", "title": "" }, { "docid": "5aa6c8ec0f61d28c33cf9a9e2be34647", "score": "0.5558346", "text": "def __hash__(self):\n #TODO: hash poker hand the same as HT lookup value.\n return hash", "title": "" }, { "docid": "654c4fb19da0700b75c6a8cb29100526", "score": "0.5550524", "text": "def __hash__(self):\n hash_value = 0\n \n shift = 0\n \n for field_value in self._iter_entity_containers():\n if (field_value is not None):\n hash_value ^= len(field_value) << shift\n \n for entity_id in field_value.keys():\n hash_value ^= entity_id\n \n shift += 4\n \n return hash_value", "title": "" }, { "docid": "cc5b68fb02e33066ab5bbfb7e6223454", "score": "0.55488044", "text": "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes", "title": "" }, { "docid": "c17f63eecd1e29c82fc4ddcc5637e109", "score": "0.5540075", "text": "def __hash__(self):\n return hash((self.name, self.context))", "title": "" }, { "docid": "758d7044665484841e35d513a84cb21f", "score": "0.5534353", "text": "def __hash__(self):\n return hash(self.exponents)", "title": "" } ]
86ac939b5df955ee10bb53581ead4b69
Reformat gridder evaluated on the nu grid returned by make_evaluation_grids into the sampled C function which has an index for the closest gridpoint and an index for the fractional distance from that gridpoint
[ { "docid": "e7f43fd4d5950f40a55473836065815b", "score": "0.0", "text": "def gridder_to_C(gridder, R):\n M = len(gridder) // (2 * R)\n C = np.zeros((2 * R, M), dtype=float)\n for r in range(0, 2 * R):\n l = r - R + 1\n indx = np.arange(M) - 2 * M * l\n # Use symmetry to deal with negative indices\n indx[indx<0] = -indx[indx<0] - 1\n C[r, :] = gridder[indx]\n return C", "title": "" } ]
[ { "docid": "c823a3cba264c1f5bea7da3f273e4960", "score": "0.59278005", "text": "def convertDistanceFromPartyGrid(d, index):\n return d*PartyGlobals.PartyGridUnitLength[index] + PartyGlobals.PartyGridToPandaOffset[index] + PartyGlobals.PartyGridUnitLength[index]/2.0", "title": "" }, { "docid": "91f7b336da2ef9d3d3871f5e3dc3fced", "score": "0.57802725", "text": "def interp_grid(self):\n return tuple(self.to_ncube(theta) for theta in self.theta.T)", "title": "" }, { "docid": "7c8b91d257efe0f2c777693599feefc8", "score": "0.57781726", "text": "def convertDistanceToPartyGrid(d, index):\n return int((d - PartyGlobals.PartyGridToPandaOffset[index])/PartyGlobals.PartyGridUnitLength[index])", "title": "" }, { "docid": "e7dcf7f2e767408ea68a68b8362b280f", "score": "0.5732989", "text": "def lininterp_4d(u_grid, v_grid, w_grid, x_grid, vals, s):\n\n\n d = 4\n smin = (u_grid[0], v_grid[0], w_grid[0], x_grid[0])\n smax = (u_grid[-1], v_grid[-1], w_grid[-1], x_grid[-1])\n\n order_0 = len(u_grid)\n order_1 = len(v_grid)\n order_2 = len(w_grid)\n order_3 = len(x_grid)\n\n # (s_1, ..., s_d) : evaluation point\n s_0 = s[0]\n s_1 = s[1]\n s_2 = s[2]\n s_3 = s[3]\n\n # (s_1, ..., sn_d) : normalized evaluation point (in [0,1] inside the grid)\n s_0 = (s_0-smin[0])/(smax[0]-smin[0])\n s_1 = (s_1-smin[1])/(smax[1]-smin[1])\n s_2 = (s_2-smin[2])/(smax[2]-smin[2])\n s_3 = (s_3-smin[3])/(smax[3]-smin[3])\n\n # q_k : index of the interval \"containing\" s_k\n q_0 = max( min( int(s_0 *(order_0-1)), (order_0-2) ), 0 )\n q_1 = max( min( int(s_1 *(order_1-1)), (order_1-2) ), 0 )\n q_2 = max( min( int(s_2 *(order_2-1)), (order_2-2) ), 0 )\n q_3 = max( min( int(s_3 *(order_3-1)), (order_3-2) ), 0 )\n\n # lam_k : barycentric coordinate in interval k\n lam_0 = s_0*(order_0-1) - q_0\n lam_1 = s_1*(order_1-1) - q_1\n lam_2 = s_2*(order_2-1) - q_2\n lam_3 = s_3*(order_3-1) - q_3\n\n # v_ij: values on vertices of hypercube \"containing\" the point\n v_0000 = vals[(q_0), (q_1), (q_2), (q_3)]\n v_0001 = vals[(q_0), (q_1), (q_2), (q_3+1)]\n v_0010 = vals[(q_0), (q_1), (q_2+1), (q_3)]\n v_0011 = vals[(q_0), (q_1), (q_2+1), (q_3+1)]\n v_0100 = vals[(q_0), (q_1+1), (q_2), (q_3)]\n v_0101 = vals[(q_0), (q_1+1), (q_2), (q_3+1)]\n v_0110 = vals[(q_0), (q_1+1), (q_2+1), (q_3)]\n v_0111 = vals[(q_0), (q_1+1), (q_2+1), (q_3+1)]\n v_1000 = vals[(q_0+1), (q_1), (q_2), (q_3)]\n v_1001 = vals[(q_0+1), (q_1), (q_2), (q_3+1)]\n v_1010 = vals[(q_0+1), (q_1), (q_2+1), (q_3)]\n v_1011 = vals[(q_0+1), (q_1), (q_2+1), (q_3+1)]\n v_1100 = vals[(q_0+1), (q_1+1), (q_2), (q_3)]\n v_1101 = vals[(q_0+1), (q_1+1), (q_2), (q_3+1)]\n v_1110 = vals[(q_0+1), (q_1+1), (q_2+1), (q_3)]\n v_1111 = vals[(q_0+1), (q_1+1), (q_2+1), (q_3+1)]\n\n # interpolated/extrapolated value\n output = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_0000) + (lam_3)*(v_0001)) + (lam_2)*((1-lam_3)*(v_0010) + (lam_3)*(v_0011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_0100) + (lam_3)*(v_0101)) + (lam_2)*((1-lam_3)*(v_0110) + (lam_3)*(v_0111)))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_1000) + (lam_3)*(v_1001)) + (lam_2)*((1-lam_3)*(v_1010) + (lam_3)*(v_1011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_1100) + (lam_3)*(v_1101)) + (lam_2)*((1-lam_3)*(v_1110) + (lam_3)*(v_1111))))\n\n return output", "title": "" }, { "docid": "6e6b666b55dc49544073c693fb9f9e25", "score": "0.56776935", "text": "def grid(func, points, values, grid_lon, grid_lat):\n grid_var = np.ones(grid_lon.shape) * np.nan\n for i in range(grid_var.shape[0]):\n for j in range(grid_var.shape[1]):\n #print(i,j)\n lon = grid_lon[i,j]\n lat = grid_lat[i,j]\n var_tot = values\n distances = np.sqrt((lon-points[:,0])**2 + (lat-points[:,1])**2)\n weights = [func(i) for i in distances]\n if np.sum(weights) != 0:\n grid_var[i,j] = np.average(values, weights=weights)\n return grid_var", "title": "" }, { "docid": "485aea44774e35c02e2f562f0c6a1477", "score": "0.56373274", "text": "def regrid_simple(Nens, X, X_coords, ind_lat, ind_lon, ntrunc):\n\n # truncate to a lower resolution grid (triangular truncation)\n ifix = np.remainder(ntrunc, 2.0).astype(int)\n nlat_new = ntrunc + ifix\n nlon_new = int(nlat_new * 1.5)\n\n # create new lat,lon grid arrays\n # Note: AP - According to github.com/jswhit/pyspharm documentation the\n # latitudes will not include the equator or poles when nlats is even.\n # TODO: Decide whether this should be tied to spherical regridding grids\n if nlat_new % 2 == 0:\n include_poles = False\n else:\n include_poles = True\n\n lat_new, lon_new, _, _ = generate_latlon(nlat_new, nlon_new,\n include_endpts=include_poles)\n\n # cartesian coords of target grid\n xt, yt, zt = lon_lat_to_cartesian(lon_new.flatten(), lat_new.flatten())\n\n # cartesian coords of source grid\n lats = X_coords[:, ind_lat]\n lons = X_coords[:, ind_lon]\n xs, ys, zs = lon_lat_to_cartesian(lons, lats)\n\n # cKDtree object of source grid\n tree = cKDTree(list(zip(xs, ys, zs)))\n\n # inverse distance weighting (N pts)\n N = 20\n fracvalid = 0.7\n d, inds = tree.query(list(zip(xt, yt, zt)), k=N)\n L = 200.\n w = np.exp(-np.square(d) / np.square(L))\n\n # transform each ensemble member, one at a time\n X_new = np.zeros([nlat_new * nlon_new, Nens])\n X_new[:] = np.nan\n for k in range(Nens):\n tmp = np.ma.masked_invalid(X[:, k][inds])\n mask = tmp.mask\n\n # apply tmp mask to weights array\n w = np.ma.masked_where(np.ma.getmask(tmp), w)\n\n # compute weighted-average of surrounding data\n datagrid = np.sum(w * tmp, axis=1) / np.sum(w, axis=1)\n\n # keep track of valid data involved in averges\n nbvalid = np.sum(~mask, axis=1)\n nonvalid = np.where(nbvalid < int(fracvalid * N))[0]\n\n # make sure to mask grid points where too few valid data were used\n datagrid[nonvalid] = np.nan\n X_new[:, k] = datagrid\n\n # make sure a masked array is returned, if at\n # least one invalid data is found\n if np.isnan(X_new).any():\n X_new = np.ma.masked_invalid(X_new)\n\n return X_new, lat_new, lon_new", "title": "" }, { "docid": "f54e69fd432a7cf791457a6c4cb805d6", "score": "0.5619136", "text": "def build_grid(df_new, step_m):\n lat1=min(df_new.lat)\n lat2=max(df_new.lat)\n lon1=min(df_new.lon)\n lon2=max(df_new.lon)\n step_lon=step_m/(40000*math.cos((lat1+lat2)*math.pi/360)/360)\n step_lat=step_m/(40000./360.)\n # Define the repartition functions...\n to_bin_lon = lambda x: np.floor(x / step_lon) * step_lon\n to_bin_lat = lambda x: np.floor(x / step_lat) * step_lat\n return lat1,lat2,step_lat,lon1,lon2,step_lon,to_bin_lon,to_bin_lat", "title": "" }, { "docid": "14ab65224196e1918bdbdec098ceadc2", "score": "0.56070125", "text": "def get_sc_grid(grid,nsnr:int,snrs:np.ndarray, calc_psz:bool=False):\n \n\n # holds cumulative and differential source counts\n cpsnrs=np.zeros([nsnr])\n psnrs=np.zeros([nsnr-1])\n \n if not calc_psz:\n nother=grid.dmvals.size\n else:\n # holds DM-dependent source counts\n nother=grid.zvals.size\n\n # Generate the grids\n cpgrid=np.zeros([nsnr,nother])\n pgrid=np.zeros([nsnr-1,nother])\n \n backup1=np.copy(grid.thresholds)\n \n # modifies grid to simplify beamshape\n grid.beam_b=np.array([grid.beam_b[-1]])\n grid.beam_o=np.array([grid.beam_o[-1]])\n grid.b_fractions=None\n \n for i,s in enumerate(snrs):\n \n grid.thresholds=backup1*s\n grid.calc_pdv()\n grid.calc_rates()\n rates=grid.rates\n if calc_psz:\n cpgrid[i,:]=np.sum(rates,axis=1)\n else:\n cpgrid[i,:]=np.sum(rates,axis=0)\n cpsnrs[i]=np.sum(cpgrid[i,:])\n \n # the last one contains cumulative values\n for i,s in enumerate(snrs):\n if i==0:\n continue\n psnrs[i-1]=cpsnrs[i-1]-cpsnrs[i]\n pgrid[i-1,:]=cpgrid[i-1,:]-cpgrid[i,:]\n\n # Reset grid\n grid.thresholds = backup1\n grid.calc_pdv()\n grid.calc_rates()\n\n #\n return psnrs, pgrid", "title": "" }, { "docid": "e6ead3b1376434eb4a0935b138d073fe", "score": "0.55527514", "text": "def fixedgrid(nrows=50, ncols=50, \n source_rate=4.67e21, source_row=25, source_col=25,\n dx=1000, dy=1000, dt=50,\n tstart=0, tend=3600,\n O3_init=8.61e09,\n wind_init=(5.0, -15.0),\n diff_init=100):\n my_cols = ncols\n my_rows = nrows / NPE\n if RANK == (NPE-1):\n my_rows += (nrows % NPE)\n print '%d: %d rows' % (RANK, my_rows)\n my_row0 = (nrows / NPE) * RANK\n \n ncells = my_rows * my_cols\n shape = (my_rows, my_cols)\n source = (source_row, source_col)\n conc = np.zeros((my_rows, my_cols, chem_param.NSPEC), dtype=np.float32)\n wind_u = np.full(shape, wind_init[0])\n wind_v = np.full(shape, wind_init[1])\n diff = np.full(shape, diff_init)\n time = tstart\n \n # Absolute integration tolerances for variable species \n abstol = np.full(5, chem_param.ATOLS, dtype=np.float32)\n # Relative integration tolerances for variable species \n reltol = np.full(5, chem_param.RTOLS, dtype=np.float32)\n\n # Integer integration in/out parameters \n idata = np.zeros(20, dtype=np.int32)\n # Real value integration in/out parameters \n rdata = np.zeros(20, dtype=np.float32)\n # Last timestamp in each grid cell \n lastH = np.zeros(ncells, dtype=np.float32)\n for i in xrange(my_rows):\n for j in xrange(my_cols):\n chem_init.Initialize(conc[i,j,:],conc[i,j,chem_param.NVAR:]) \n\n # Rosenbrock default parameters \n idata[0] = 0 # System is non-autonomous: F = F(t,y) \n idata[1] = 0 # Use vector tolerances \n idata[2] = 100000 # Maximum number of integration steps \n idata[3] = 5 # Rodas4 Rosenbrock method \n idata[4] = 0 # Tolerance vectors will not be checked \n\n rdata[0] = 0 # Integration step size lower bound: 0 recommended \n rdata[1] = 0 # Integration step size upper bound: 0 recommended \n rdata[2] = dt # Starting integration step size \n rdata[3] = 0.2 # Lower bound on step decrease factor \n rdata[4] = 6 # Upper bound on step increase factor \n rdata[5] = 0.1 # Step decrease factor after step rejection \n rdata[6] = 0.9 # Safety factor in the computation of new step size \n \n if (source_row > my_row0) and (source_row < (my_row0+my_rows)):\n print 'Rank %d added plume at %s' % (RANK, str(source))\n conc[source_row-my_row0,source_col,chem_param.IND_O3] += source_rate / (dx * dy)\n\n if RANK == 0:\n print 'TIME: %g' % time\n while time < tend:\n discretize_rows(dx, dt/2.0, conc, wind_u, diff)\n discretize_cols(dy, dt, conc, wind_v, diff)\n discretize_rows(dx, dt/2.0, conc, wind_u, diff)\n tsretval = chem_integrate.GridIntegrate(my_rows*my_cols, conc, time, time + dt, \n abstol, reltol, idata, rdata, lastH)\n time += dt\n if RANK == 0:\n print 'TIME: %g' % time\n if RANK == 0:\n print 'done'", "title": "" }, { "docid": "c8dacbec4a13c73625fdf22a783e1af6", "score": "0.55432874", "text": "def space_grid_data (grid_lon_size, grid_lat_size, data, lon_indexes, lat_indexes ) :\n \n if data.size > 0 :\n print (\"data range: \" + str(data.min()) + \" \" + str(data.max()))\n \n space_grid_shape = (grid_lon_size, grid_lat_size) # TODO, is this the correct order?\n \n # create the density map and figure out how dense the data will be\n # FUTURE, I do not like this looping solution, figure out how to do this in native numpy ops\n density_map = numpy.zeros(space_grid_shape)\n nobs_map = numpy.zeros(space_grid_shape)\n for index in range(data.size) :\n nobs_map[lon_indexes[index], lat_indexes[index]] += 1\n if numpy.isfinite(data[index]) :\n density_map[lon_indexes[index], lat_indexes[index]] += 1\n max_depth = numpy.max(density_map)\n \n print (\"max depth: \" + str(max_depth))\n \n # create the space grids for this variable\n space_grid = numpy.ones((max_depth, grid_lon_size, grid_lat_size), dtype=numpy.float32) * numpy.nan #TODO, dtype\n temp_depth = numpy.zeros(space_grid_shape)\n \n # put the variable data into the space grid\n # FUTURE, I do not like this looping solution, figure out how to do this in native numpy ops\n for index in range(data.size) :\n if numpy.isfinite(data[index]) :\n depth = temp_depth[lon_indexes[index], lat_indexes[index]]\n space_grid[depth, lon_indexes[index], lat_indexes[index]] = data[index]\n temp_depth[ lon_indexes[index], lat_indexes[index]] += 1\n \n if space_grid.size > 0 :\n print (\"grid range: \"), numpy.nanmin(space_grid), numpy.nanmax(space_grid)\n \n return space_grid, density_map, nobs_map, max_depth", "title": "" }, { "docid": "ff9745ab2c51626349db0d9a3648abe2", "score": "0.55040675", "text": "def new_xy_grid(xy,z,dx,dz,pline=None,fit_to_xy=True):\n # reverse dz if necessary\n z_is_negative = np.less(nanmean(z),0)\n if z_is_negative == (dz < 0):\n my_dz = dz\n else:\n my_dz = -dz\n z_new = np.arange(z[0],z[-1],my_dz)\n\n xd,yd,dd,pline = find_projection_distances(xy,pline=pline)\n # find gridding dimensions\n x0 = pline[0,0]\n y0 = pline[0,1]\n x00 = pline[1,0]-pline[0,0]\n y00 = pline[1,1]-pline[0,1]\n pline_distance = np.sqrt(x00**2 + y00**2)\n xy_new_range = np.arange(0,pline_distance,np.abs(dx))\n grid_angle = np.arctan2(y00,x00)\n xy_new = np.zeros((np.size(xy_new_range),2),dtype=np.float64)\n xy_new[:,0] = xy_new_range*np.cos(grid_angle) + x0 # back to projection x - might be offset by up to dx\n xy_new[:,1] = xy_new_range*np.sin(grid_angle) + y0 # back to projection y - might be offset by up to dy\n tmpx, tmpy, xy_new_range,pline = find_projection_distances(xy_new,pline=pline)\n if fit_to_xy:\n # remove cells beyond dd\n dd_start = np.min(dd)\n dd_end = np.max(dd)\n fitted = np.ones(np.size(xy_new_range),np.bool)\n for i in range(np.size(xy_new_range)):\n if xy_new_range[i] < dd_start or xy_new_range[i] > dd_end:\n fitted[i] = False\n return (dd,xy_new_range[fitted],xy_new[fitted,:],z_new)\n else:\n return (dd,xy_new_range,xy_new,z_new)", "title": "" }, { "docid": "5d4d27bd056e496d1f7f6f45588d0844", "score": "0.55008644", "text": "def _interpolate(self, grid, roundup=False):\n # there's no point in working with the whole of the data array if it's\n # masked.\n useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))\n assert (len(useful_chunk) == 1)\n my_xdim, my_ydim = self.data[useful_chunk[0]].shape\n\n if MEDIAN_FILTER:\n f_grid = ndimage.median_filter(grid, MEDIAN_FILTER)\n if MF_THRESHOLD:\n grid = numpy.where(\n numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid\n )\n else:\n grid = f_grid\n\n # Bicubic spline interpolation\n xratio = float(my_xdim) / self.back_size_x\n yratio = float(my_ydim) / self.back_size_y\n\n my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape),\n mask=self.data.mask)\n\n # Remove the MaskedArrayFutureWarning warning and keep old numpy < 1.11\n # behavior\n my_map.unshare_mask()\n\n # Inspired by https://stackoverflow.com/questions/13242382/resampling-a-numpy-array-representing-an-image\n # Should be much faster than scipy.ndimage.map_coordinates.\n # scipy.ndimage.zoom should also be an option for speedup, but zoom dit not let me produce the exact\n # same output as map_coordinates. My bad.\n # I checked, using fitsdiff, that it gives the exact same output as the original code\n # up to and including --relative-tolerance=1e-15 for INTERPOLATE_ORDER=1.\n # It was actually quite a hassle to get the same output and the fill_value is essential\n # in interp1d. However, for some unit tests, grid.shape=(1,1) and then it will break\n # with \"ValueError: x and y arrays must have at least 2 entries\". So in that case\n # map_coordinates should be used.\n\n if INTERPOLATE_ORDER==1 and grid.shape[0]>1 and grid.shape[1]>1:\n x_initial = numpy.linspace(0., grid.shape[0]-1, grid.shape[0], endpoint=True)\n y_initial = numpy.linspace(0., grid.shape[1]-1, grid.shape[1], endpoint=True)\n x_sought = numpy.linspace(-0.5, -0.5 + xratio, my_xdim, endpoint=True)\n y_sought = numpy.linspace(-0.5, -0.5 + yratio, my_ydim, endpoint=True)\n\n primary_interpolation = interp1d(y_initial, grid, kind='slinear', assume_sorted=True,\n axis=1, copy=False, bounds_error=False,\n fill_value=(grid[:, 0], grid[:, -1]))\n transposed = primary_interpolation(y_sought).T\n\n perpendicular_interpolation = interp1d(x_initial, transposed, kind='slinear', assume_sorted=True,\n axis=1, copy=False, bounds_error=False,\n fill_value=(transposed[:, 0], transposed[:, -1]))\n my_map[useful_chunk[0]] = perpendicular_interpolation(x_sought).T\n else:\n slicex = slice(-0.5, -0.5 + xratio, 1j * my_xdim)\n slicey = slice(-0.5, -0.5 + yratio, 1j * my_ydim)\n my_map[useful_chunk[0]] = ndimage.map_coordinates(\n grid, numpy.mgrid[slicex, slicey],\n mode='nearest', order=INTERPOLATE_ORDER)\n\n # If the input grid was entirely masked, then the output map must\n # also be masked: there's no useful data here. We don't search for\n # sources on a masked background/RMS, so this data will be cleanly\n # skipped by the rest of the sourcefinder\n if numpy.ma.getmask(grid).all():\n my_map.mask = True\n elif roundup:\n # In some cases, the spline interpolation may produce values\n # lower than the minimum value in the map. If required, these\n # can be trimmed off. No point doing this if the map is already\n # fully masked, though.\n my_map = numpy.ma.MaskedArray(\n data=numpy.where(\n my_map >= numpy.min(grid), my_map, numpy.min(grid)),\n mask=my_map.mask\n )\n return my_map", "title": "" }, { "docid": "b929a9a52b0bf659da6f954da80e9131", "score": "0.5456978", "text": "def interpolate_cells(self, startvec, endvec, interpol_points):\n alphas = np.linspace(0,1,interpol_points+1)\n \n startvec = pd.DataFrame(startvec.reshape(-1, len(startvec))).append([startvec]*self.batch_size,ignore_index=True)\n endvec = pd.DataFrame(endvec.reshape(-1, len(endvec))).append([endvec]*self.batch_size,ignore_index=True)\n \n gen_ind = session.run(self.x_generated, {self.z : startvec})\n gen_ind = pd.DataFrame(gen_ind)\n gen_ind = gen_ind.iloc[gen_ind.index==1]\n df_sim = pd.DataFrame(gen_ind.values)\n for index in alphas: \n \n noise_current = index*endvec + (1-index)*startvec\n \n gen_ind = session.run(self.x_generated, {self.z : noise_current})\n gen_ind = pd.DataFrame(gen_ind)\n gen_ind = gen_ind.iloc[gen_ind.index==1]\n \n df_sim = df_sim.append(gen_ind)\n #true_ind = true_ind.append(pandas.DataFrame((index*endvec + (1-index)*startvec)).T)\n return(df_sim)", "title": "" }, { "docid": "94be5a5767708cabcb69b3341f4cdc07", "score": "0.54538405", "text": "def Interpolation(uc):\n [depth, xdim, ydim] = uc.shape\n \n # Initialise a next fine grid\n xnodes = 2*xdim-1\n ynodes = 2*ydim-1\n uf = np.zeros((depth, xnodes,ynodes))\n \n \n # For even ordered i and j on fine grid\n for k in range(depth):\n for i in range(xdim):\n for j in range (ydim):\n uf[k, 2*i, 2*j]=uc[k, i,j]\n \n\n # For even ordered j on fine grid on fine grid\n for k in range(depth):\n for i in range(0, ynodes, 2):\n for j in range(1, xnodes-1, 2):\n uf[k,i,j]=0.5*(uf[k,i,j-1]+uf[k,i,j+1])\n\n \n # For even ordered i on fine grid on fine grid\n for k in range(depth):\n for i in range(1, xnodes-1, 2):\n for j in range (0, ynodes, 2):\n uf[k,i,j]=0.5*(uf[k,i-1,j]+uf[k,i+1,j])\n \n # For odd ordered i and j on fine grid on fine grid\n for k in range(depth):\n for i in range (1, xnodes-1, 2):\n for j in range (1, ynodes-1, 2):\n uf[k,i,j]=0.25*(uf[k,i-1,j]+uf[k,i+1,j]+uf[k,i,j-1]+uf[k,i,j+1])# \n\n \n \n return uf", "title": "" }, { "docid": "94be5a5767708cabcb69b3341f4cdc07", "score": "0.54538405", "text": "def Interpolation(uc):\n [depth, xdim, ydim] = uc.shape\n \n # Initialise a next fine grid\n xnodes = 2*xdim-1\n ynodes = 2*ydim-1\n uf = np.zeros((depth, xnodes,ynodes))\n \n \n # For even ordered i and j on fine grid\n for k in range(depth):\n for i in range(xdim):\n for j in range (ydim):\n uf[k, 2*i, 2*j]=uc[k, i,j]\n \n\n # For even ordered j on fine grid on fine grid\n for k in range(depth):\n for i in range(0, ynodes, 2):\n for j in range(1, xnodes-1, 2):\n uf[k,i,j]=0.5*(uf[k,i,j-1]+uf[k,i,j+1])\n\n \n # For even ordered i on fine grid on fine grid\n for k in range(depth):\n for i in range(1, xnodes-1, 2):\n for j in range (0, ynodes, 2):\n uf[k,i,j]=0.5*(uf[k,i-1,j]+uf[k,i+1,j])\n \n # For odd ordered i and j on fine grid on fine grid\n for k in range(depth):\n for i in range (1, xnodes-1, 2):\n for j in range (1, ynodes-1, 2):\n uf[k,i,j]=0.25*(uf[k,i-1,j]+uf[k,i+1,j]+uf[k,i,j-1]+uf[k,i,j+1])# \n\n \n \n return uf", "title": "" }, { "docid": "27e152c3c23b8c915b081d4bf9cbfebc", "score": "0.54430366", "text": "def discretise(sample):\n return list((np.digitise(s, g) for s, g in zip(sample, grid)))", "title": "" }, { "docid": "8fbe5eb9a5aa1b8363ae97c71709508b", "score": "0.54220027", "text": "def newer_new_xy_grid(xy,z,dx,dz,pline=None):\n # reverse dz if necessary\n z_is_negative = np.less(nanmean(z),0)\n if z_is_negative == (dz < 0):\n my_dz = dz\n else:\n my_dz = -dz\n xd,yd,dd,pline = find_projection_distances(xy,pline=pline)\n # reverse dx if necessary\n dd_start = np.min(dd)\n dd_end = np.max(dd)\n dd_increasing = np.less(yd[np.argmin(dd)],yd[np.argmax(dd)])\n if dd_increasing == (dx > 0):\n my_dx = dx\n else:\n my_dx = -dx\n tmp = dd_end\n dd_end = dd_start\n dd_start = tmp \n # find gridding dimensions\n xy_new_range = np.arange(dd_start,dd_end,my_dx)\n z_new = np.arange(z[0],z[-1],my_dz) # find z1\n # find x/y ppojected locations of new grid\n min_dd = np.argmin(dd)\n max_dd = np.argmax(dd)\n x0 = xy[min_dd,0]\n y0 = xy[min_dd,1]\n x00 = xy[max_dd,0] - x0\n y00 = xy[max_dd,1] - y0\n grid_angle = np.arctan2(y00,x00)\n xy_new = np.zeros((np.size(xy_new_range),2),dtype=np.float64)\n xy_new[:,0] = xy_new_range*np.cos(grid_angle) + x0 # back to projection x - might be offset by up to dx\n xy_new[:,1] = xy_new_range*np.sin(grid_angle) + y0 # back to projection y - might be offset by up to dy\n \n return (dd,xy_new_range,xy_new,z_new)", "title": "" }, { "docid": "3de59bba3d5ebcbe236266cd57149156", "score": "0.54193264", "text": "def cExGrid(x):\r\n return interpolate.griddata(coord, cEx, x, fill_value=0.0)", "title": "" }, { "docid": "49088b5031104d91daa2635ea9178b85", "score": "0.54047316", "text": "def new_xy_grid_old(xy,z,dx,dz,pline=None,fit_to_xy=True):\n # reverse dz if necessary\n z_is_negative = np.less(nanmean(z),0)\n if z_is_negative == (dz < 0):\n my_dz = dz\n else:\n my_dz = -dz\n xd,yd,dd,xy_line = find_projection_distances(xy,pline=pline)\n # reverse dx if necessary\n dd_start = np.min(dd)\n dd_end = np.max(dd)\n dd_increasing = np.less(yd[np.argmin(dd)],yd[np.argmax(dd)])\n if dd_increasing == (dx > 0):\n my_dx = dx\n else:\n my_dx = -dx\n tmp = dd_end\n dd_end = dd_start\n dd_start = tmp \n # find gridding dimensions\n if pline is not None and not fit_to_xy:\n x0 = pline[0,0]\n y0 = pline[0,1]\n x00 = pline[1,0]-pline[0,0]\n y00 = pline[1,1]-pline[0,1]\n pline_distance = np.sqrt(x00**2 + y00**2)\n #if dx < 0:\n # xy_new_range = np.arrange(0,pline_distance,abs(dx))\n xy_new_range = np.arange(0,pline_distance,np.abs(dx))\n grid_angle = np.arctan2(y00,x00)\n xy_new = np.zeros((np.size(xy_new_range),2),dtype=np.float64)\n xy_new[:,0] = xy_new_range*np.cos(grid_angle) + x0 # back to projection x - might be offset by up to dx\n xy_new[:,1] = xy_new_range*np.sin(grid_angle) + y0 # back to projection y - might be offset by up to dy\n poo1, poo2, xy_new_range,pline = find_projection_distances(xy_new,pline=pline)\n else:\n xy_new_range = np.arange(dd_start,dd_end,my_dx)\n # find x/y ppojected locations of new grid\n min_dd = np.argmin(dd)\n max_dd = np.argmax(dd)\n x0 = xy[min_dd,0]\n y0 = xy[min_dd,1]\n x00 = xy[max_dd,0] - x0\n y00 = xy[max_dd,1] - y0\n grid_angle = np.arctan2(y00,x00)\n xy_new = np.zeros((np.size(xy_new_range),2),dtype=np.float64)\n xy_new[:,0] = xy_new_range*np.cos(grid_angle) + x0 # back to projection x - might be offset by up to dx\n xy_new[:,1] = xy_new_range*np.sin(grid_angle) + y0 # back to projection y - might be offset by up to dy\n z_new = np.arange(z[0],z[-1],my_dz) # find z1\n \n return (dd,xy_new_range,xy_new,z_new)", "title": "" }, { "docid": "3e9e681b98624e23c7ba4e890def71d9", "score": "0.53670645", "text": "def interpolate_curvatures(fr):\n\tglobal raw,fine\n\tinterp = scipy.interpolate.griddata(raw[fr][:,:2],raw[fr][:,2],\n\t\t(fine[fr].T[0],fine[fr].T[1]),method='cubic')\n\t#---! eliminate nan from the edges\n\tinterp[np.isnan(interp)] = 0.0\n\treturn interp", "title": "" }, { "docid": "13512919b335707922f1b793ddbfcbea", "score": "0.5320866", "text": "def get_all_distances_rounded(station_locations, grid_points, decimal_round) -> list:\n\n gp_dists = []\n for gp in grid_points:\n dists = get_distances(list_of_locs=station_locations, point=gp)\n # round to deciamsl to reduce number of required synth spectra\n dists = np.round(dists, decimals=decimal_round)\n gp_dists.append(dists)\n \n return gp_dists", "title": "" }, { "docid": "476f18a689f4b1ae06fcd574cf771b21", "score": "0.5283771", "text": "def regular_grid(field, lons, lats, grid_lon, grid_lat):\n points = [lons, lats]\n points = np.transpose(np.array(points))\n return griddata(points, field, (grid_lon, grid_lat), method='linear')", "title": "" }, { "docid": "38fabf15b275a95d541912d6a480fdc3", "score": "0.52734077", "text": "def xy_regrid(nparray,xy,xy_new,z=None,z_new=None,pre_calcs=None,\n kind='bin average',sd_drop=0): \n if kind == 'bin average':\n # this fuction optionally returns a tuple, we only want 1st element which is means\n return xy_bin_average(nparray,xy,xy_new,z,z_new,pre_calcs,\n return_stats=False,sd_drop=sd_drop)[0]\n else:\n return xy_interpolate(nparray,xy,xy_new,z,z_new,pre_calcs,kind)", "title": "" }, { "docid": "98199ce11565989d044d52dc1ccbddc5", "score": "0.527132", "text": "def rectangular_grid_sample(self, size_ab, size_bc, space_to_sample=\"DP\"):\n size = size_ab * size_bc\n mgrid = np.lib.index_tricks.nd_grid()\n if space_to_sample == \"linDP\":\n vab = (\n mgrid[0:size_ab, 0:size_bc][0]\n * (math.sqrt(self.maxab) - math.sqrt(self.minab))\n / float(size_ab)\n + math.sqrt(self.minab)\n ) ** 2.0\n vbc = (\n mgrid[0:size_ab, 0:size_bc][1]\n * (math.sqrt(self.maxbc) - math.sqrt(self.minbc))\n / float(size_bc)\n + math.sqrt(self.minbc)\n ) ** 2.0\n v = [vab.reshape(size).astype(\"d\"), vbc.reshape(size).astype(\"d\")]\n dlz = tf.stack(v, axis=1)\n elif space_to_sample == \"sqDP\":\n x = np.linspace(self.min_mprimeac, self.max_mprimeac, size_ab)\n y = np.linspace(self.min_thprimeac, self.max_thprimeac, size_bc)\n # Remove corners of sqDP as they lie outside phsp\n xnew = x[\n (x > self.min_mprimeac)\n & (x < self.max_mprimeac)\n & (y > self.min_thprimeac)\n & (y < self.max_thprimeac)\n ]\n ynew = y[\n (x > self.min_mprimeac)\n & (x < self.max_mprimeac)\n & (y > self.min_thprimeac)\n & (y < self.max_thprimeac)\n ]\n mprimeac, thprimeac = np.meshgrid(xnew, ynew)\n dlz = self.from_square_dalitz_plot(\n mprimeac.flatten().astype(\"d\"), thprimeac.flatten().astype(\"d\")\n )\n else:\n vab = (\n mgrid[0:size_ab, 0:size_bc][0]\n * (self.maxab - self.minab)\n / float(size_ab)\n + self.minab\n )\n vbc = (\n mgrid[0:size_ab, 0:size_bc][1]\n * (self.maxbc - self.minbc)\n / float(size_bc)\n + self.minbc\n )\n v = [vab.reshape(size).astype(\"d\"), vbc.reshape(size).astype(\"d\")]\n dlz = tf.stack(v, axis=1)\n\n return self.filter(dlz)", "title": "" }, { "docid": "b11921c5cda0b484598b0bbcf0f39865", "score": "0.5258103", "text": "def integration_grid(low, high, target_spacing):\n\n range_diff = high - low\n spacing = range_diff / int(math.ceil(range_diff / target_spacing))\n grid = np.arange(low + 0.5 * spacing, high, spacing)\n\n return grid, spacing", "title": "" }, { "docid": "f0a79c65a7ea01863de30d2af5cbc9b9", "score": "0.52557266", "text": "def _generate_evaluation_grid(self, pass_depth, turbine_depth):\n\n # Initialize yaw angles to evaluate, 'Ny' times the wind rose\n Ny = self.Ny_passes[pass_depth]\n evaluation_grid = np.tile(self._yaw_angles_opt_subset, (Ny, 1, 1, 1))\n\n # Get a list of the turbines in order of x and sort front to back\n for iw in range(self._nwinddirections_subset):\n turbid = self.turbines_ordered_array_subset[iw, turbine_depth] # Turbine to manipulate\n\n # # Check if this turbine needs to be optimized. If not, continue\n # if not self._turbs_to_opt_subset[iw, 0, turbid]:\n # continue\n\n # # Remove turbines that need not be optimized\n # turbines_ordered = [ti for ti in turbines_ordered if ti in self.turbs_to_opt]\n\n # Grab yaw bounds from self\n yaw_lb = self._yaw_lbs[iw, :, turbid]\n yaw_ub = self._yaw_ubs[iw, :, turbid]\n\n # Saturate to allowable yaw limits\n yaw_lb = np.clip(\n yaw_lb,\n self.minimum_yaw_angle[iw, :, turbid],\n self.maximum_yaw_angle[iw, :, turbid]\n )\n yaw_ub = np.clip(\n yaw_ub,\n self.minimum_yaw_angle[iw, :, turbid],\n self.maximum_yaw_angle[iw, :, turbid]\n )\n\n if pass_depth == 0:\n # Evaluate all possible coordinates\n yaw_angles_subset = np.linspace(yaw_lb, yaw_ub, Ny)\n else:\n # Remove middle point: was evaluated in previous iteration\n c = int(Ny / 2) # Central point (to remove)\n ids = [*list(range(0, c)), *list(range(c + 1, Ny + 1))]\n yaw_angles_subset = np.linspace(yaw_lb, yaw_ub, Ny + 1)[ids]\n\n evaluation_grid[:, iw, :, turbid] = yaw_angles_subset\n\n self._yaw_evaluation_grid = evaluation_grid\n return evaluation_grid", "title": "" }, { "docid": "888478555af352f7ff174dd3e3d8107f", "score": "0.52517", "text": "def norm_idx(self, zs, rows, cols):\n rr, zz, cc = np.meshgrid(rows, zs, cols)\n rr = (rr / self.measurement_size)[..., np.newaxis] - 0.5\n cc = (cc / self.measurement_size)[..., np.newaxis] - 0.5\n zz = (zz / self.num_layers)[..., np.newaxis] - 0.5\n mesh_grid = np.concatenate((rr, cc, zz), axis=-1) * 2\n return mesh_grid", "title": "" }, { "docid": "7338a380e9510e2d6da5d5ad6ed3a64e", "score": "0.52452606", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n \n # print(east_max , east_min)\n # print(north_max , north_min)\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "21f91e455bd8fed83804c009daac5358", "score": "0.5244419", "text": "def test3_srcGridHasNoBounds(self):\n\n toolsAndMethods = {\n 'libcf': ['linear',],\n 'esmf': ['conserve', 'patch', 'linear'],\n }\n \n f = cdms2.open(cdat_info.get_prefix() + '/sample_data/swan.four.nc')\n vari = f('HS')\n gridi = vari.getGrid()\n lati = vari.getLatitude()\n loni = vari.getLongitude()\n f.close()\n\n nyo, nxo = 100, 200\n ymin, ymax = lati.min(), lati.max()\n xmin, xmax = loni.min(), loni.max()\n dy, dx = (ymax - ymin)/float(nyo), (xmax - xmin)/float(nxo)\n yo = numpy.array([ymin + dy*(j + 0.5) for j in range(nyo)])\n xo = numpy.array([xmin + dx*(i + 0.5) for i in range(nxo)])\n lato = cdms2.createAxis(yo)\n lato.designateLatitude() ; lato.units = 'degrees_north'\n lono = cdms2.createAxis(xo)\n lono.designateLongitude() ; lono.units= 'degrees_east'\n grido = cdms2.createRectGrid(lato, lono) \n\n for tool in toolsAndMethods:\n for met in toolsAndMethods[tool]:\n t0 = time()\n print tool.upper(), met, ':' \n diag = {'numDstPoints': None, 'numValid': None}\n # although the user may choose esmf, we expect to interpolate\n # using libcf as the source grid has no bounds in this case\n varo = vari.regrid(grido, \n regridMethod = met, regridTool = tool,\n coordSys = 'cart',\n nitermax = 10, tolpos = 0.01,\n diag = diag)\n # make sure libcf was selected\n self.assertEqual(diag['regridTool'], 'libcf')\n self.assertEqual(diag['regridMethod'], 'linear')", "title": "" }, { "docid": "e58fcc81dc93f5c98b1fcb1d0d18d295", "score": "0.52437073", "text": "def gen_dist_param_integrate_points_A(dist_param):\n\n '''Single element'''\n data__A_dict = dist_param['data__A']\n mu_val = [v for k, v in data__A_dict['params'].items() if 'mu' in k]\n sd_val = [v for k, v in data__A_dict['params'].items() if 'sd' in k]\n\n # if (dist_type_mu is None):\n dist_type_mu = mu_val[0]\n # if (dist_type_sd is None):\n dist_type_sd = sd_val[0]\n\n dist_type_integrate_method = data__A_dict['integrate']['method']\n dist_type_integrate_points = data__A_dict['integrate']['params']['points']\n\n if (dist_type_integrate_method == 'grid'):\n min_quantile = 0.00\n max_quantile = 1.00\n\n quantile_brackets = np.linspace(min_quantile,\n max_quantile,\n dist_type_integrate_points + 1)\n\n param_value_weights = (quantile_brackets[1:] - quantile_brackets[:-1])\n quantile_brackets_points = (quantile_brackets[1:] + quantile_brackets[:-1]) / 2\n\n param_value_points = norm.ppf(quantile_brackets_points,\n loc=dist_type_mu,\n scale=dist_type_sd)\n\n # copied over from param_loops.py line 247\n min_v = np.min(param_value_points)\n param_save_suffixs = []\n param_descs = []\n for A in param_value_points:\n if (A < 0):\n str_rep = str(int(np.abs(A - min_v) * 10000))\n else:\n str_rep = str(int(np.abs(A - min_v) * 10000))\n param_save_suffixs.append(str_rep)\n param_descs.append('A=' + str(A))\n\n dist_param_integrate_points = \\\n {'title_integrate_loop': 'Productivity Loop',\n 'file_save_suffix_integrate_loop': '_A',\n 'combo_desc_integrate_loop': 'Productivity',\n 'param_types': ['data_type'],\n 'param_names': ['A'],\n 'param_values_keys': ['data_type_A'],\n 'param_descs': param_descs,\n 'param_save_suffixs': param_save_suffixs,\n 'param_weights': param_value_weights,\n 'param_values': {'data_type_A': param_value_points}}\n\n return dist_param_integrate_points", "title": "" }, { "docid": "7029b416d590bc1759dbbef906aac668", "score": "0.5235493", "text": "def update_grid(self):\n\t\tcoords = self.cell_centers.view(numpy.float32).reshape((self.max_cells, 4))\n\n\t\tx_coords = coords[:,0]\n\t\tmin_x_coord = x_coords.min()\n\t\tmax_x_coord = x_coords.max()\n\t\tself.grid_x_min = int(math.floor(min_x_coord / self.grid_spacing))\n\t\tself.grid_x_max = int(math.ceil(max_x_coord / self.grid_spacing))\n\t\tif self.grid_x_min == self.grid_x_max:\n\t\t\tself.grid_x_max += 1\n\n\t\ty_coords = coords[:,1]\n\t\tmin_y_coord = y_coords.min()\n\t\tmax_y_coord = y_coords.max()\n\t\tself.grid_y_min = int(math.floor(min_y_coord / self.grid_spacing))\n\t\tself.grid_y_max = int(math.ceil(max_y_coord / self.grid_spacing))\n\t\tif self.grid_y_min == self.grid_y_max:\n\t\t\tself.grid_y_max += 1\n\n\t\tself.n_sqs = (self.grid_x_max-self.grid_x_min)*(self.grid_y_max-self.grid_y_min)", "title": "" }, { "docid": "53e8f21b9d724acda617e9ed217b456a", "score": "0.5224125", "text": "def get_gc_dist_grid(df, grdx, grdy):\r\n ny, nx = grdy.shape[0], grdx.shape[1]\r\n lat, lon = xy2latlon(grdx, grdy)\r\n grd_dist = np.zeros((nx, ny, len(df)))\r\n for i in range(len(df)):\r\n grd_dist[:, :, i] = gc_dist(*df.iloc[i][[\"lon\", \"lat\"]], lon, lat)\r\n return grd_dist", "title": "" }, { "docid": "a51deaf1978e2dbe6c19d197ab426a16", "score": "0.52011085", "text": "def grid_surface_integral(grid_lats,grid_azis,grid_values,sphere_radius,aziunit):\n\n if np.any(np.not_equal(grid_lats[:,0],grid_lats[:,1])):\n raise ValueError(('Latitudes are not the same in columns 0 and 1'\n +'this function expects the grid arrays to vary'\n +'in latitude along dimension 0'))\n if np.any(np.not_equal(grid_azis[0,:],grid_azis[1,:])):\n raise ValueError(('Azimuths/longitudes are not the same in rows 0 and 1'\n +'this function expects the grid arrays to vary'\n +'in longitude along dimension 1'))\n\n lats = grid_lats[:,0]\n dlats = np.diff(lats)\n dlat = np.abs(np.nanmedian(dlats))\n\n azis = grid_azis[0,:]\n dazis = angle_difference(azis[:-1],azis[1:],aziunit)\n dazis = np.mod(dazis,2*np.pi/_azifac(aziunit)) #Ensure > 0\n dazi = np.nanmedian(dazis)\n\n gridcell_bottom_lats = grid_lats-dlat/2.\n gridcell_top_lats = grid_lats+dlat/2.\n gridcell_left_azis = angle_difference(dazi/2.,grid_azis,aziunit)\n gridcell_right_azis = grid_azis+dazi/2.\n \n gridcell_areas = great_circle_rectangle_area(gridcell_bottom_lats,\n gridcell_top_lats,\n gridcell_left_azis,\n gridcell_right_azis,\n sphere_radius,\n aziunit)\n\n integrated_values = np.nansum(gridcell_areas*grid_values)\n return integrated_values", "title": "" }, { "docid": "d106b1a84033e9aec2f26da39293326f", "score": "0.5195131", "text": "def grid_data(ds, city='toronto', res=0.05):\n\n # Lat/lon max/min\n lonmn, lonmx, latmn, latmx = poi.get_plot_limits(city=city, extent=1, res=res)\n\n\n # Create a uniform lat/lon grid\n lat_bnds = np.arange(latmn, latmx, res)\n lon_bnds = np.arange(lonmn, lonmx, res)\n\n # arr will accumulate the values within each grid entry\n no2_arr = np.zeros([lat_bnds.size, lon_bnds.size])\n u_arr = np.zeros([lat_bnds.size, lon_bnds.size])\n v_arr = np.zeros([lat_bnds.size, lon_bnds.size])\n # dens_arr will count the number of observations that occur within that grid entry\n dens_arr = np.zeros([lat_bnds.size, lon_bnds.size], dtype=np.int32)\n\n # Load datasets\n no2 = ds.nitrogendioxide_tropospheric_column.values\n u = ds.u.values\n v = ds.v.values\n lat = ds.latitude.values\n lon = ds.longitude.values\n\n # Check if the lat and lon values are found within lat/lon bounds\n lat_flt = (lat > latmn) * (lat < latmx)\n lon_flt = (lon > lonmn) * (lon < lonmx)\n\n # Create array to filter data points found within lat/lon bounds\n filter_arr = lat_flt * lon_flt\n\n # Keep no2 values that are within the bounded lat/lon\n no2 = no2[filter_arr]\n u = u[filter_arr]\n v = v[filter_arr]\n\n # Filter lat/lon mn/mx values for each grid square\n vlonmn = np.minimum(ds['longitude_bounds'][0].values,\n ds['longitude_bounds'][1].values)[filter_arr]\n vlonmx = np.maximum(ds['longitude_bounds'][2].values,\n ds['longitude_bounds'][3].values)[filter_arr]\n vlatmn = np.minimum(ds['latitude_bounds'][0].values,\n ds['latitude_bounds'][1].values)[filter_arr]\n vlatmx = np.maximum(ds['latitude_bounds'][2].values,\n ds['latitude_bounds'][3].values)[filter_arr]\n\n for i in range(no2.size):\n # Find the indices in the lat/lon_bnds grid at which the\n # max/min lat/lon would fit (i.e. finding the grid squares that the data\n # point has values for)\n lat_inds = np.searchsorted(\n lat_bnds, np.array([vlatmn[i], vlatmx[i]]))\n lon_inds = np.searchsorted(\n lon_bnds, np.array([vlonmn[i], vlonmx[i]]))\n\n # Obtain the lat/lon indices that will be used to slice val_arr\n lat_slice = slice(lat_inds[0], lat_inds[1]+1)\n lon_slice = slice(lon_inds[0], lon_inds[1]+1)\n\n # Add the NO2 values that fit in those lat/lon grid squares to val_arr and\n # add 1 to dens_arr to increase the count of observations found in that\n # grid square\n no2_arr[lat_slice, lon_slice] += no2[i]\n u_arr[lat_slice, lon_slice] += u[i]\n v_arr[lat_slice, lon_slice] += v[i]\n dens_arr[lat_slice, lon_slice] += 1\n\n # Divide val_arr by dens_arr; if dividing by 0, return 0 in that entry\n # no2_arr = no2_arr.clip(min=0)\n\n no2_arr_mean = np.divide(no2_arr, dens_arr, out=(\n np.zeros_like(no2_arr)), where=(dens_arr != 0))\n u_arr_mean = np.divide(u_arr, dens_arr, out=(\n np.zeros_like(u_arr)), where=(dens_arr != 0))\n v_arr_mean = np.divide(v_arr, dens_arr, out=(\n np.zeros_like(v_arr)), where=(dens_arr != 0))\n\n # CREATE NEW DATASET WITH NO2, WS, AND BEARING FOR EACH LAT, LON\n new_ds = xr.Dataset({\n 'no2': xr.DataArray(\n data=np.array([no2_arr_mean]), # enter data here\n dims=['time', 'latitude', 'longitude'],\n coords={'latitude': ('latitude', lat_bnds),\n 'longitude': ('longitude', lon_bnds),\n 'time': np.array([ds.measurement_time.values])},\n attrs={'units': 'mol m-2'}),\n 'u': xr.DataArray(\n data=np.array([u_arr_mean]), # enter data here\n dims=['time', 'latitude', 'longitude'],\n coords={'latitude': ('latitude', lat_bnds),\n 'longitude': ('longitude', lon_bnds),\n 'time': np.array([ds.measurement_time.values])},\n attrs={'units': 'm/s'}),\n 'v': xr.DataArray(\n data=np.array([v_arr_mean]), # enter data here\n dims=['time', 'latitude', 'longitude'],\n coords={'latitude': ('latitude', lat_bnds),\n 'longitude': ('longitude', lon_bnds),\n 'time': np.array([ds.measurement_time.values])},\n attrs={'units': 'm/s'})},\n attrs={'description': 'dataset for NO2 TVCD, wind speed, and bearing'})\n\n return new_ds", "title": "" }, { "docid": "d38149aeaaef4989fa635929a12097ff", "score": "0.517274", "text": "def _interp(self, grid, pts):\n lat, weights, xloc = regular_grid_interpolation.get_interp_coefficients(\n grid,\n pts,\n min_grid_value=self.min_grid_value,\n max_grid_value=self.max_grid_value)\n xloc *= self.x_location_max\n\n return lat, weights, xloc", "title": "" }, { "docid": "c2bba823603fc5347e6c7aaf607bc2e3", "score": "0.51525396", "text": "def interpolate_wind_components_from_rpn_files(data_dir: Path = \"\", out_dir: Path = \"\", target_grid_config=None,\n wind_level=1., wind_level_kind=level_kinds.HYBRID):\n\n # sort files to be in the chronological order\n files_sorted = list(sorted((mfile for mfile in data_dir.iterdir()), key=file_sort_key))\n\n out_file_name = \"erai0.75_interpolated_uu_vv_knots.nc\"\n\n n_records_written = 0\n\n uu_var = None\n vv_var = None\n time_var = None\n\n indices_in_source_field = None\n lon_t = None\n\n with Dataset(out_dir.joinpath(out_file_name), \"w\") as ds:\n\n assert isinstance(ds, Dataset)\n\n for in_file in files_sorted:\n\n print(\"Processing {}\".format(in_file))\n\n with RPN(str(in_file)) as r:\n assert isinstance(r, RPN)\n uu = r.get_all_time_records_for_name_and_level(\"UU\", level=wind_level, level_kind=wind_level_kind)\n vv = r.get_all_time_records_for_name_and_level(\"VV\", level=wind_level, level_kind=wind_level_kind)\n\n # create dimensions, initialize variables and coordiates\n if uu_var is None:\n lons, lats = r.get_longitudes_and_latitudes_for_the_last_read_rec()\n\n xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons.flatten(), lats.flatten())\n ktree = KDTree(list(zip(xs, ys, zs)))\n\n #\n lon_t, lat_t = target_grid_config.get_lons_and_lats_of_gridpoint_centers()\n xt, yt, zt = lat_lon.lon_lat_to_cartesian(lon_t.flatten(), lat_t.flatten())\n\n # nearest neighbour interpolation\n dists, indices_in_source_field = ktree.query(list(zip(xt, yt, zt)), k=1)\n\n ds.createDimension(\"time\")\n ds.createDimension(\"x\", lon_t.shape[0])\n ds.createDimension(\"y\", lon_t.shape[1])\n\n lon_var = ds.createVariable(\"lon\", \"f4\", dimensions=(\"x\", \"y\"))\n lat_var = ds.createVariable(\"lat\", \"f4\", dimensions=(\"x\", \"y\"))\n\n lon_var[:] = lon_t\n lat_var[:] = lat_t\n\n start_date = file_sort_key(in_file)\n\n time_var = ds.createVariable(\"time\", \"i4\", dimensions=(\"time\",))\n time_var.units = \"hours since {:%Y-%m-%d %H:%M:%S}\".format(start_date)\n\n uu_var = ds.createVariable(\"UU\", \"f4\", dimensions=(\"time\", \"x\", \"y\"),\n zlib=True,\n least_significant_digit=3)\n uu_var.units = \"knots\"\n uu_var.coordinates = \"lon lat\"\n\n vv_var = ds.createVariable(\"VV\", \"f4\",\n dimensions=(\"time\", \"x\", \"y\"),\n zlib=True,\n least_significant_digit=3)\n vv_var.units = \"knots\"\n vv_var.coordinates = \"lon lat\"\n\n t_vals = list(sorted(uu))\n\n uu_vals = [uu[t].flatten()[indices_in_source_field].reshape(lon_t.shape) for t in t_vals]\n vv_vals = [vv[t].flatten()[indices_in_source_field].reshape(lon_t.shape) for t in t_vals]\n\n uu_var[n_records_written:, :, :] = uu_vals\n vv_var[n_records_written:, :, :] = vv_vals\n time_var[n_records_written:] = date2num(t_vals, time_var.units)\n\n n_records_written += len(t_vals)", "title": "" }, { "docid": "c894725981161064f614c005be93bba3", "score": "0.51521105", "text": "def lininterp_3d(x_grid, y_grid, z_grid, vals, s):\n\n\n d = 3\n smin = (x_grid[0], y_grid[0], z_grid[0])\n smax = (x_grid[-1], y_grid[-1], z_grid[-1])\n\n order_0 = len(x_grid)\n order_1 = len(y_grid)\n order_2 = len(z_grid)\n\n # (s_1, ..., s_d) : evaluation point\n s_0 = s[0]\n s_1 = s[1]\n s_2 = s[2]\n\n # normalized evaluation point (in [0,1] inside the grid)\n s_0 = (s_0-smin[0])/(smax[0]-smin[0])\n s_1 = (s_1-smin[1])/(smax[1]-smin[1])\n s_2 = (s_2-smin[2])/(smax[2]-smin[2])\n\n # q_k : index of the interval \"containing\" s_k\n q_0 = max( min( int(s_0 *(order_0-1)), (order_0-2) ), 0 )\n q_1 = max( min( int(s_1 *(order_1-1)), (order_1-2) ), 0 )\n q_2 = max( min( int(s_2 *(order_2-1)), (order_2-2) ), 0 )\n\n # lam_k : barycentric coordinate in interval k\n lam_0 = s_0*(order_0-1) - q_0\n lam_1 = s_1*(order_1-1) - q_1\n lam_2 = s_2*(order_2-1) - q_2\n\n # v_ij: values on vertices of hypercube \"containing\" the point\n v_000 = vals[(q_0), (q_1), (q_2)]\n v_001 = vals[(q_0), (q_1), (q_2+1)]\n v_010 = vals[(q_0), (q_1+1), (q_2)]\n v_011 = vals[(q_0), (q_1+1), (q_2+1)]\n v_100 = vals[(q_0+1), (q_1), (q_2)]\n v_101 = vals[(q_0+1), (q_1), (q_2+1)]\n v_110 = vals[(q_0+1), (q_1+1), (q_2)]\n v_111 = vals[(q_0+1), (q_1+1), (q_2+1)]\n\n # interpolated/extrapolated value\n output = (1-lam_0)*((1-lam_1)*((1-lam_2)*(v_000) + (lam_2)*(v_001)) + (lam_1)*((1-lam_2)*(v_010) + (lam_2)*(v_011))) + (lam_0)*((1-lam_1)*((1-lam_2)*(v_100) + (lam_2)*(v_101)) + (lam_1)*((1-lam_2)*(v_110) + (lam_2)*(v_111)))\n return output", "title": "" }, { "docid": "53c85a53221c89cf8d12f6d87dd2ca5c", "score": "0.5127787", "text": "def _interpolate(self, grid: np.ndarray) -> np.ndarray:\n raise NotImplementedError", "title": "" }, { "docid": "e3fd3ea7d8a76069ac8f044e16bbbc52", "score": "0.5118991", "text": "def xyz2grd(point_lons,point_lats,point_zvals,grid_lons,grid_lats):\n # https://stackoverflow.com/questions/30655749/how-to-set-a-maximum-distance-between-points-for-interpolation-when-using-scipy\n \n # TODO change this to the pygmt implementation?\n\n if grid_lons.ndim == 1:\n grid_lons, grid_lats = np.meshgrid(grid_lons, grid_lats)\n\n grid_sampling = np.abs(grid_lats[1]-grid_lats[0])\n xy = np.vstack((point_lons, point_lats)).T\n\n # Construct kd-tree, functionality copied from scipy.interpolate\n tree = cKDTree(xy)\n xi = _ndim_coords_from_arrays((grid_lons, grid_lats), ndim=xy.shape[1])\n dists, indexes = tree.query(xi)\n\n grid_interp = spi.griddata(xy, point_zvals,\n (grid_lons, grid_lats),\n method='nearest')\n\n # Copy original result but mask missing values with NaNs\n result = grid_interp[:]\n result[dists > grid_sampling/2.] = np.nan\n\n return result", "title": "" }, { "docid": "c524d661f1fed310e8c6030899672012", "score": "0.51161677", "text": "def grid():\n # A = [A0, A1, A2, A3, A4, A5, A6, A7, A8, A9]\n # A9 = [0,18]\n # ......\n # A1 = [0,2]\n # A0 = [0,0]\n ##########################################################################################################################\n A = np.array([ [0,0] , [0,2] , [0,4] , [0,6] , [0,8] , [0,10] ])\n \n B = np.array([ [2,10] , [2,8] , [2,6] , [2,4] , [2,2] , [2,0] ])\n \n C = np.array([ [4,0] , [4,2] , [4,4] , [4,6] , [4,8] , [4,10] ])\n\n D = np.array([ [6,10] , [6,8] , [6,6] , [6,4] , [6,2] , [6,0] ])\n\n E = np.array([ [8,0] , [8,2] , [8,4] , [8,6] , [8,8] , [8,10] ])\n\n F = np.array([ [10,10] , [10,8] , [10,6] , [10,4] , [10,2] , [10,0] ])\n\n return [A, B, C, D, E, F];", "title": "" }, { "docid": "0e543dc78dcf0fcbac78bb675e751999", "score": "0.51114905", "text": "def _sample_grid(\n self,\n xy: Tuple[Iterable[Number], Iterable[Number]],\n kx: int = 1,\n ky: int = 1,\n s: Number = 0,\n ) -> np.ndarray:\n x, y = xy\n signs = np.sign(self.d).astype(int)\n # HACK: scipy.interpolate.RectBivariateSpline does not support NAN\n Zmin = np.nanmin(self.array)\n is_nan = np.isnan(self.array)\n self.array[is_nan] = helpers.numpy_dtype_minmax(self.array.dtype)[0]\n fun = scipy.interpolate.RectBivariateSpline(\n self.y[:: signs[1]],\n self.x[:: signs[0]],\n self.array[:: signs[1], :: signs[0]],\n bbox=(min(self.ylim), max(self.ylim), min(self.xlim), max(self.xlim)),\n kx=kx,\n ky=ky,\n s=s,\n )\n xdir = 1 if (len(x) < 2) or x[1] > x[0] else -1\n ydir = 1 if (len(y) < 2) or y[1] > y[0] else -1\n samples = fun(y[::ydir], x[::xdir], grid=True)[::ydir, ::xdir]\n samples[samples < Zmin] = np.nan\n self.array[is_nan] = np.nan\n return samples", "title": "" }, { "docid": "00d3c1033598cd306a883741927a79b5", "score": "0.510666", "text": "def interp_rho_z_t(energy, num=1):\n\n loc_left = np.searchsorted(energy_list, energy, side='right')-1\n loc_right = np.searchsorted(energy_list, energy)\n en1 = energy_list[loc_left]\n en2 = energy_list[loc_right]\n\n\n if __name__ == '__main__':\n p = Pool(2)\n results = p.starmap(sample_at_data_energy, [(en1, num), (en2, num)])\n\n samplearr1 = results[0]\n samplearr2 = results[1]\n i = 0\n list_out_rho_z = []\n while i < np.shape(samplearr1)[0]:\n new_data1 = samplearr1[i]\n new_data2 = samplearr2[i]\n values_to_interp = np.vstack([new_data1, new_data2])#.transpose()\n list_out_rho_z.append(griddata([en1, en2], values_to_interp, energy, method='linear', rescale=True))\n i+=1\n\n df_out_rho_z = pd.DataFrame(np.row_stack(list_out_rho_z), columns=['rho', 'z', 't'])\n\n return df_out_rho_z", "title": "" }, { "docid": "1001b1b16715929a2c8a07b4c9c5ff92", "score": "0.51065004", "text": "def create_temp_interp_gridded(time_common_grid, depth_common_grid, temp_values, time_values, depth_values): \n n_file = len(temp_values)\n n_depth = len(depth_common_grid)\n n_time = len(time_common_grid)\n # initialise with nan\n temp_gridded = np.full((n_depth, n_time), np.nan)\n \n temp_binned_array = []\n depth_binned_array = []\n \n time_delta = (time_common_grid[1]- time_common_grid[0])\n \n time_bins_start = []\n for j in range(n_time):\n time_bins_start.append(time_common_grid[j] - time_delta/2) # time_1d_interp sits in the centre of the bin\n\n time_bins_start.append(time_common_grid[j] + time_delta/2) # add last value\n\n # histogram doesn't work with datetime so we need to use timestamps in seconds since a reference date\n unit_in_seconds_since_arbitrary_date = 'seconds since 1950-01-01 00:00:00 UTC'\n arbitrary_calendar = 'gregorian'\n timestamp_bins_start = date2num(time_bins_start, unit_in_seconds_since_arbitrary_date, arbitrary_calendar)\n \n # temporal binning per dataset\n for i_file in range(n_file): \n timestamp_values = date2num(time_values[i_file], unit_in_seconds_since_arbitrary_date, arbitrary_calendar)\n \n time_hist = np.histogram(timestamp_values, timestamp_bins_start)[0]\n \n # sometimes there is no data in a bin so time_hist == 0\n # 0 divided by 0 yields a NaN which is what we want so we can safely ignore the warning\n old_settings = np.seterr(divide='ignore', invalid='ignore')\n temp_binned = np.histogram(timestamp_values, timestamp_bins_start, weights=temp_values[i_file]) [0] / time_hist\n depth_binned = np.histogram(timestamp_values, timestamp_bins_start, weights=depth_values[i_file])[0] / time_hist\n np.seterr(**old_settings)\n \n temp_binned_array.append(temp_binned)\n depth_binned_array.append(depth_binned)\n\n # vertical interpolation per time stamp\n for j in range(n_time):\n temp_binned = np.array([row[j] for row in temp_binned_array])\n depth_binned = np.array([row[j] for row in depth_binned_array])\n \n temp_binned = temp_binned [~np.isnan(temp_binned)]\n depth_binned = depth_binned[~np.isnan(depth_binned)]\n \n if not temp_binned.size or not depth_binned.size:\n # array empty due to all NaN\n continue\n \n # we need to sort temp and depth by increasing depths before we can interpolate\n ii = np.argsort(depth_binned)\n depth_binned = depth_binned[ii]\n temp_binned = temp_binned [ii]\n \n # we only want to interpolate what's between the depth_binned range, what is below or above is nan\n temp_gridded[:,j] = np.interp(depth_common_grid, depth_binned, temp_binned, left=np.nan, right=np.nan)\n \n return temp_gridded", "title": "" }, { "docid": "44202469ca976a6e382506753a20c6c7", "score": "0.51014197", "text": "def sample_grid(f_locs, function_domain, num_pts):\n lows, highs = zip(*function_domain)\n num_funcs = len(f_locs)\n f_locs = np.tile(np.asarray(f_locs), num_pts).reshape(-1, len(f_locs[0]))\n f_pts = np.random.uniform(lows, highs, (num_funcs * num_pts, len(lows)))\n return np.hstack([f_locs, f_pts])", "title": "" }, { "docid": "2be379606c7858fd57282c1e4b35ccd3", "score": "0.5096038", "text": "def normalization(grid, satellite, country):\n num_bands = grid.shape[0]\n means = MEANS[satellite][country]\n stds = STDS[satellite][country]\n grid = (grid-means[:num_bands].reshape(num_bands, 1, 1, 1))/stds[:num_bands].reshape(num_bands, 1, 1, 1)\n \n if satellite not in ['s1', 's2', 'planet']:\n raise ValueError(\"Incorrect normalization parameters\")\n return grid", "title": "" }, { "docid": "f4e595826e7e41ce6541769e07fed129", "score": "0.50874186", "text": "def test0(self):\n\n toolsAndMethods = {\n 'libcf': ['linear'],\n 'esmf': ['linear', 'patch', 'conserve'],\n }\n \n f = cdms2.open(cdat_info.get_prefix() + '/sample_data/swan.four.nc')\n vari = f('HS')\n f.close()\n gridi = vari.getGrid()\n\n # add bounds to input grid\n lati = vari.getLatitude()\n loni = vari.getLongitude()\n xib, yib = bounds2d(loni, lati)\n loni.setBounds(xib)\n lati.setBounds(yib)\n self.assertNotEqual(gridi.getLatitude().getBounds(), None)\n self.assertNotEqual(gridi.getLongitude().getBounds(), None)\n \n # output grid\n nyo, nxo = 100, 200\n ymin, ymax = lati.min(), lati.max()\n xmin, xmax = loni.min(), loni.max()\n dy, dx = (ymax - ymin)/float(nyo), (xmax - xmin)/float(nxo)\n yo = numpy.array([ymin + dy*(j + 0.5) for j in range(nyo)])\n xo = numpy.array([xmin + dx*(i + 0.5) for i in range(nxo)])\n lato = cdms2.createAxis(yo)\n lato.designateLatitude() ; lato.units = 'degrees_north'\n lono = cdms2.createAxis(xo)\n lono.designateLongitude() ; lono.units= 'degrees_east'\n grido = cdms2.createRectGrid(lato, lono)\n self.assertNotEqual(grido.getLatitude().getBounds(), None)\n self.assertNotEqual(grido.getLongitude().getBounds(), None)\n \n for tool in toolsAndMethods:\n for met in toolsAndMethods[tool]:\n t0 = time()\n print tool.upper(), met, ':' \n diag = {}\n varo = vari.regrid(grido,\n regridMethod = met, regridTool = tool,\n coordSys = 'cart', nitermax = 10, \n diag = diag)\n print 'diag = ', diag\n met2 = diag['regridMethod']\n tool2 = diag['regridTool']\n self.assertEqual(met, met2)\n self.assertEqual(tool2, tool)\n self.assertGreater(varo.min(), -0.01)\n dt = time() - t0\n print tool.upper(), met, ':', dt, 'seconds'\n\n if PLOT:\n pylab.figure(figsize=(12, 6))\n pylab.subplots_adjust(right=0.9)\n pylab.subplot(121)\n pylab.pcolor(loni[:], lati[:], vari[0].asma(), vmin = 0, vmax = 2.5)\n pylab.axis([xmin, xmax, ymin, ymax])\n pylab.colorbar()\n pylab.title('Original')\n pylab.subplot(122)\n pylab.pcolor(lono[:], lato[:], varo[0].asma(), vmin = 0, vmax = 2.5)\n pylab.axis([xmin, xmax, ymin, ymax])\n pylab.title(tool.upper()+' / '+met.upper())\n pylab.colorbar()#cax=pylab.axes([0.92, 0.3, 0.02, 0.6]))\n pylab.savefig('testRaynaud.%(tool)s.%(met)s.png'%vars())", "title": "" }, { "docid": "0479e83edd8ce577fb1c8781f48228d1", "score": "0.5079891", "text": "def smooth(x, y, xgrid):\n samples = np.random.choice(len(x), len(x), replace=True)\n y_s = y[samples]\n x_s = x[samples]\n y_sm = localreg(x_s, y_s, x0=None, degree=1, kernel=triangular, width=19.08094)\n y_grid = scipy.interpolate.interp1d(x_s, y_sm, fill_value='extrapolate')(xgrid)\n\n return y_grid", "title": "" }, { "docid": "3eadb291cce18555ec5d0be545ebb2d0", "score": "0.50710887", "text": "def resample(self, cellsize, method='nearest'):\n xllcenter = self.aschdr['xllcenter']\n yllcenter = self.aschdr['yllcenter']\n xurcenter = xllcenter + self.aschdr['cellsize'] * self.aschdr['ncols']\n yurcenter = yllcenter + self.aschdr['cellsize'] * self.aschdr['nrows']\n nx = int((xurcenter - xllcenter) // cellsize)\n ny = int((yurcenter - yllcenter) // cellsize)\n dimratio = cellsize / self.aschdr['cellsize']\n\n if method == 'nearest':\n JJ, II = np.meshgrid(np.arange(nx), np.arange(ny))\n srcII = np.around(II * dimratio) \\\n .astype(int) \\\n .clip(0, self.aschdr['nrows'] - 1)\n srcJJ = np.around(JJ * dimratio) \\\n .astype(int).\\\n clip(0, self.aschdr['ncols'] - 1)\n self.data = self.data[srcII, srcJJ]\n else:\n raise NotImplementedError('method \"{0}\" not '\n 'implemented'.format(method))\n\n self.aschdr['cellsize'] = float(cellsize)\n self.aschdr['nrows'] = ny\n self.aschdr['ncols'] = nx\n self.aschdr['xllcorner'] = self.aschdr['xllcenter'] - (0.5 * cellsize)\n self.aschdr['yllcorner'] = self.aschdr['yllcenter'] - (0.5 * cellsize)\n self._enforce_hdr_consistency()\n return", "title": "" }, { "docid": "fe7500eb0706dfd9f25b07cbbb057c84", "score": "0.5070662", "text": "def _interpolate_across(\n grid,\n outfile,\n resolution,\n limits,\n intpolparams,\n basepath=\"grid/\",\n intpol_freqs=False,\n along_var=\"xcen\",\n outbasename=\"\",\n debug=False,\n verbose=False,\n):\n print(\"\\n********************\\nAcross interpolation\\n********************\")\n # Parameters possibly in header\n headvars = [\n \"tracks\",\n \"isochs\",\n \"massini\",\n \"age\",\n \"FeHini\",\n \"MeHini\",\n \"yini\",\n \"alphaMLT\",\n \"ove\",\n \"gcut\",\n \"eta\",\n \"alphaFe\",\n \"dif\",\n ]\n\n # Determine whether the grid is iscohrones or tracks\n if \"track\" in grid[\"header/library_type\"][()]:\n isomode = False\n modestr = \"track\"\n dname = \"dage\"\n\n # Determine the number to assign the new tracks\n tracklist = list(grid[basepath + \"tracks\"].items())\n newnum = max([int(f[0].split(\"track\")[-1]) for f in tracklist]) + 1\n numfmt = len(tracklist[0][0].split(\"track\")[-1])\n\n # Form basis of varied parameters\n bpars = [par.decode(\"UTF-8\") for par in grid[\"header/active_weights\"]]\n baseparams = {par: resolution[par] for par in bpars}\n const_vars = {}\n for par in headvars:\n if (par not in bpars) and (par in grid[\"header\"]):\n const_vars[par] = grid[os.path.join(\"header\", par)][0]\n\n # Collect the headvars, as they are constant along the track\n headvars = list(np.unique(list(bpars) + list(const_vars)))\n sobol = _check_sobol(grid, resolution)\n\n elif \"isochrone\" in grid[\"header/library_type\"][()]:\n isomode = True\n modestr = \"isochrone\"\n dname = \"dmass\"\n newnum = 0\n\n # Parameters for forming basis\n bpars = [par.decode(\"UTF-8\") for par in grid[\"header/active_weights\"]]\n baseparams = {par: resolution[par] for par in bpars}\n const_vars = {}\n isochhead = os.path.join(\"header\", basepath)\n for par in headvars:\n if (par not in bpars) and (par in grid[isochhead]):\n const_vars[par] = grid[os.path.join(isochhead, par)][0]\n # Only propagate the present parameters\n headvars = list(np.unique(list(bpars) + list(const_vars)))\n sobol = _check_sobol(grid, resolution)\n\n # Check frequency limits\n if \"freqs\" in limits:\n freqlims = limits[\"freqs\"]\n del limits[\"freqs\"]\n\n # Extract tracks/isochrones within user-specified limits\n print(\"Locating limits and restricting sub-grid ... \", flush=True)\n selectedmodels = ih.get_selectedmodels(grid, basepath, limits, cut=False)\n\n # If Cartesian method, save tracks/isochrones within limits to new grid\n fail = False\n if grid != outfile and not sobol:\n for name, index in selectedmodels:\n if not isomode:\n index2d = np.array(np.transpose([index, index]))\n if not (any(index) and sum(index) > 2):\n outfile[os.path.join(name, \"FeHini_weight\")] = -1\n else:\n # Write everything from the old grid to the new in the region\n for key in grid[name].keys():\n keypath = os.path.join(name, key)\n if \"_weight\" in key:\n outfile[keypath] = grid[keypath][()]\n elif \"osc\" in key:\n if intpol_freqs:\n outfile[keypath] = grid[keypath][index2d]\n else:\n outfile[keypath] = grid[keypath][index]\n\n # Form the base array for interpolation\n base = np.zeros((len(selectedmodels), len(baseparams)))\n for i, name in enumerate(selectedmodels):\n for j, bpar in enumerate(baseparams):\n parm = grid[basepath + name][bpar][0]\n base[i, j] = parm\n\n # Determine the base params for new tracks\n print(\"\\nBuilding triangulation ... \", end=\"\", flush=True)\n triangulation = spatial.Delaunay(base)\n new_points, trindex = _calc_across_points(\n base, baseparams, triangulation, sobol, outbasename, debug, verbose\n )\n print(\"done!\")\n\n # List of tracknames for accessing grid\n tracknames = list(selectedmodels)\n # List to sort out failed tracks/isochrones at the end\n success = np.ones(len(new_points[:, 0]), dtype=bool)\n\n #############\n # Main loop #\n #############\n numnew = len(new_points)\n print(\"Interpolating {0} tracks/isochrones ... \".format(numnew))\n\n # Use a progress bar (with the package tqdm; will write to stderr)\n pbar = tqdm(total=numnew, desc=\"--> Progress\", ascii=True)\n\n # Use tqdm for progress bar\n for tracknum, (point, tind) in enumerate(zip(new_points, trindex)):\n # Update progress bar in the start of the loop to count skipped tracks\n pbar.update(1)\n\n # Directory of the track/isochrone\n if not isomode:\n libname = (\n basepath + \"tracks/track\" + str(int(newnum + tracknum)).zfill(numfmt)\n )\n else:\n FeH = point[bpars.index(\"FeHini\")]\n age = point[bpars.index(\"age\")]\n libname = basepath + \"FeH={0:.4f}/age={1:.4f}\".format(FeH, age)\n\n # Form the basis of interpolation, and collect minmax of the along track variable\n ind = triangulation.simplices[tind]\n count = sum([sum(selectedmodels[tracknames[i]]) for i in ind])\n intbase = np.zeros((count, len(bpars) + 1))\n y = np.zeros((count))\n minmax = np.zeros((len(ind), 3))\n ir = 0\n\n # Loop over the enveloping tracks\n for j, i in enumerate(ind):\n track = tracknames[i]\n bvar = grid[basepath + track][along_var][selectedmodels[track]]\n minmax[j, :] = [min(bvar), max(bvar), abs(np.median(np.diff(bvar)))]\n for k, a in enumerate(list(bvar)):\n intbase[k + ir, : len(base[i])] = base[i]\n intbase[k + ir, -1] = a\n ir += len(bvar)\n minmax = [max(minmax[:, 0]), min(minmax[:, 1]), np.mean(minmax[:, 2])]\n if minmax[0] > minmax[1]:\n warstr = \"Warning: Interpolating {0} {1} \".format(\n modestr, newnum + tracknum\n )\n warstr += \"was aborted due to no overlap in {0}\".format(along_var)\n warstr += \" of the enveloping {0}!\".format(modestr)\n print(warstr)\n success[tracknum] = False\n outfile[os.path.join(libname, \"FeHini_weight\")] = -1\n continue\n\n # Assume equal spacing, but approximately the same number of points\n try:\n Npoints = abs(int(np.ceil((minmax[1] - minmax[0]) / minmax[2])))\n except:\n prtstr = \"Choice of base parameter '{:s}' resulted\".format(along_var)\n prtstr += \" in an error when determining it's variance along the \"\n prtstr += \"{:s}, consider choosing another.\".format(modestr)\n raise ValueError(prtstr)\n # The base along the new track\n newbvar = np.linspace(minmax[0], minmax[1], Npoints)\n newbase = np.ones((len(newbvar), len(bpars) + 1))\n for i, p in enumerate(point):\n newbase[:, i] *= p\n newbase[:, -1] = newbvar\n sub_triangle = spatial.Delaunay(intbase)\n\n try:\n # Interpolate and write each individual parameter, apart from oscillations\n for key in intpolparams:\n keypath = os.path.join(libname, key)\n # Weights are given a placeholder value\n if \"_weight\" in key:\n outfile[keypath] = 1.0\n elif key == along_var:\n outfile[keypath] = newbase[:, -1]\n elif key == dname:\n outfile[keypath] = ih.bay_weights(newbase[:, -1])\n elif \"name\" in key:\n outfile[keypath] = len(newbase[:, -1]) * [b\"interpolated-entry\"]\n elif (\"osc\" in key) or (key in const_vars):\n continue\n else:\n ir = 0\n for j, i in enumerate(ind):\n track = tracknames[i]\n yind = selectedmodels[track]\n y[ir : ir + sum(yind)] = grid[basepath + track][key][yind]\n ir += sum(yind)\n intpol = interpolate.LinearNDInterpolator(sub_triangle, y)\n newparam = intpol(newbase)\n if any(np.isnan(newparam)):\n nan = \"{0} {1} had NaN value(s)!\".format(\n modestr, newnum + tracknum\n )\n raise ValueError(nan)\n outfile[keypath] = newparam\n\n # Dealing with oscillations\n if intpol_freqs:\n osc = []\n osckey = []\n sections = [0]\n for i in ind:\n # Extract the oscillation fequencies and id's\n track = tracknames[i]\n for model in np.where(selectedmodels[track])[0]:\n osc.append(grid[basepath + track][\"osc\"][model])\n osckey.append(grid[basepath + track][\"osckey\"][model])\n sections.append(len(osc))\n newosckey, newosc = ih.interpolate_frequencies(\n fullosc=osc,\n fullosckey=osckey,\n agevec=intbase,\n newagevec=newbase,\n sections=sections,\n freqlims=freqlims,\n debug=debug,\n trackid=newnum + tracknum,\n )\n Npoints = len(newosc)\n # Writing variable length arrays to an HDF5 file is a bit tricky,\n # but can be done using datasets with a special datatype.\n # --> Here we follow the approach from BASTA/make_tracks\n dsetosc = outfile.create_dataset(\n name=os.path.join(libname, \"osc\"),\n shape=(Npoints, 2),\n dtype=h5py.special_dtype(vlen=np.float),\n )\n dsetosckey = outfile.create_dataset(\n name=os.path.join(libname, \"osckey\"),\n shape=(Npoints, 2),\n dtype=h5py.special_dtype(vlen=np.int),\n )\n for i in range(Npoints):\n dsetosc[i] = newosc[i]\n dsetosckey[i] = newosckey[i]\n\n # Dealing with constants of the track\n outfile[os.path.join(libname, \"FeHini_weight\")] = 1.0\n for par, parval in zip(baseparams, point):\n keypath = os.path.join(libname, par)\n try:\n outfile[keypath]\n except:\n outfile[keypath] = np.ones(len(newbase[:, -1])) * parval\n for par in const_vars:\n keypath = os.path.join(libname, par)\n if par in [\"tracks\", \"isochs\"]:\n continue\n try:\n outfile[keypath]\n except:\n outfile[keypath] = np.ones(len(newbase[:, -1])) * const_vars[par]\n except KeyboardInterrupt:\n print(\"BASTA interpolation stopped manually. Goodbye!\")\n sys.exit()\n except:\n # If it fails, delete progress for the track, and just mark it as failed\n try:\n del outfile[libname]\n except:\n None\n success[tracknum] = False\n print(\"Error:\", sys.exc_info()[1])\n outfile[os.path.join(libname, \"FeHini_weight\")] = -1\n print(\"Interpolation failed for {0}\".format(libname))\n if debug:\n print(\"Point at:\")\n [print(name, value, \", \") for name, value in zip(bpars, point)]\n print(\"Simplex formed by the {0}s:\".format(modestr))\n print(\", \".join([tracknames[i] for i in ind]))\n\n ####################\n # End of main loop #\n ####################\n pbar.close()\n\n # Plot the new resulting base\n plotted = ip.base_corner(\n baseparams, base, new_points[success], triangulation, sobol, outbasename\n )\n if plotted:\n print(\"Across interpolation base has been plotted in\", \"figure\", outbasename)\n\n # Remove all previous tracks, to conserve sobol homogeniety\n if grid == outfile and sobol:\n for name in tracknames:\n namepath = os.path.join(basepath, name)\n del outfile[namepath]\n\n # Re-add frequency limits for combined approaches\n if intpol_freqs:\n limits[\"freqs\"] = freqlims\n\n # Write the new tracks to the header, and recalculate the weights\n outfile = ih._extend_header(outfile, basepath, headvars)\n outfile = ih._recalculate_weights(outfile, basepath, headvars)\n return grid, outfile, fail", "title": "" }, { "docid": "ed4e7e231e0fc3f81923827340fd6209", "score": "0.5044165", "text": "def generate_grid(self, bbox, lon_step=1, lat_step=1, output='cd'):\n\n\t\tbearingEast = 90\n\t\tbearingNorth = 0\n\t\tminLatitude, maxLatitude, minLongitude, maxLongitude = bbox\n\t\tp_sw = Point(minLatitude, minLongitude)\n\t\tp_se = Point(minLatitude, maxLongitude)\n\t\tp_ne = Point(maxLatitude, maxLongitude)\n\t\tlon_span_km = geopy_distance(p_sw, p_se).kilometers\n\t\tlat_span_km = geopy_distance(p_ne, p_se).kilometers\n\t\tlat_chunks = range(int(lat_span_km / lon_step) + 1)\n\t\tlon_chunks = range(int(lon_span_km / lat_step) + 1)\n\n\t\t# return center cells\n\t\tif output == 'cc':\n\t\t\tlst_lat = [(VincentyDistance(kilometers=lon_step * x + lon_step / 2).destination(p_sw, bearingNorth)).latitude for x in lat_chunks]\n\t\t\tlst_lon = [(VincentyDistance(kilometers=lat_step * y + lat_step / 2).destination(p_sw, bearingEast)).longitude for y in lon_chunks]\n\t\t\tself.INDEX = {'lats': lst_lat, 'lons': lst_lon}\n\n\t\t\treturn [(x, y) for x in lst_lat[:-1] for y in lst_lon[:-1]]\n\n\t\t# return list of cells\n\t\telif output == 'c':\n\t\t\tlst_lat = [(VincentyDistance(kilometers=lon_step * x + lon_step).destination(p_sw, bearingNorth)).latitude for x in lat_chunks]\n\t\t\tlst_lon = [(VincentyDistance(kilometers=lat_step * y + lat_step).destination(p_sw, bearingEast)).longitude for y in lon_chunks]\n\t\t\tcells = self.generate_cells(lst_lat, lst_lon)\n\t\t\tself.INDEX = {'lats': lst_lat, 'lons': lst_lon}\n\t\t\treturn cells\n\n\t\t# return cells matrix\n\t\telif output == 'cm':\n\t\t\tlst_lat = [minLatitude]+[(VincentyDistance(kilometers=lat_step * y + lat_step).destination(p_sw, bearingNorth)).latitude for y in lat_chunks]\n\t\t\tlst_lon = [minLongitude]+[(VincentyDistance(kilometers=lon_step * x + lon_step).destination(p_sw, bearingEast)).longitude for x in lon_chunks]\n\t\t\tself.INDEX = {'lats': lst_lat, 'lons': lst_lon}\n\t\t\tcells = self.generate_cells_matrix(lst_lat, lst_lon)\n\t\t\treturn cells\n\n\t\t# return cells dictionary:\n\t\telif output == 'cd':\n\t\t\tlst_lat = [minLatitude]+[(VincentyDistance(kilometers=lat_step * y + lat_step).destination(p_sw, bearingNorth)).latitude for y in lat_chunks]\n\t\t\tlst_lon = [minLongitude]+[(VincentyDistance(kilometers=lon_step * x + lon_step).destination(p_sw, bearingEast)).longitude for x in lon_chunks]\n\t\t\tself.INDEX = {'lats': lst_lat, 'lons': lst_lon}\n\t\t\tcells = self.generate_cells_dictionary(lst_lat, lst_lon)\n\t\t\treturn cells", "title": "" }, { "docid": "4d01b579acfff0d23e5c2fc15d20037c", "score": "0.5039861", "text": "def _call_nearest(self, x_new):\n\n # 2. Find where in the averaged data the values to interpolate\n # would be inserted.\n # Note: use side='left' (right) to searchsorted() to define the\n # halfway point to be nearest to the left (right) neighbour\n x_new_indices = searchsorted(self.x_bds, x_new, side='left')\n\n # 3. Clip x_new_indices so that they are within the range of x indices.\n x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)\n\n # 4. Calculate the actual value for each entry in x_new.\n y_new = self._y[x_new_indices]\n\n return y_new", "title": "" }, { "docid": "d999a91aa5a75a3b488202bd43da51af", "score": "0.5038738", "text": "def calc_grg_grid(nlon_nhrg, lat_nhrg):\n\n ##### Global lat from south to north\n if lat_nhrg[1] - lat_nhrg[0] > 0: # lat_nhrg is from south to north\n lat_grg = np.concatenate( [-lat_nhrg[::-1], lat_nhrg] )\n is_input_s2n = True\n else: # lat_nhrg is from north to south\n lat_grg = np.concatenate( [-lat_nhrg, lat_nhrg[::-1]] )\n is_input_s2n = False\n\n ##### Global lon from west to east, 0 is about in the middle\n ##### lon: a list of arrays\n # Combine the NH and SH\n if is_input_s2n:\n nlon_grg = np.concatenate( [nlon_nhrg[::-1], nlon_nhrg] )\n else:\n nlon_grg = np.concatenate( [nlon_nhrg, nlon_nhrg[::-1]] )\n\n # Initiate\n lon_grg = []\n\n # Loop for each lat\n for i, n in enumerate(nlon_grg):\n dl = 360.0/n # lon interval for each lat\n if n % 2 == 1: # n is odd\n l0 = -180.0 + dl/2.0\n l1 = 180.0 - dl/2.0\n n0 = int((n-1)/2)\n else: # n is even\n l0 = -180.0 + dl\n l1 = 180.0\n n0 = int((n-2)/2)\n n1 = n - n0\n \n # 0 degree must be included in the reduced Gaussian grid\n lon_temp = np.concatenate([np.linspace(l0, 0, n0, endpoint=False), np.linspace(0, l1, n1)])\n\n # Add the lon array to the lon_grg list\n lon_grg.append(lon_temp)\n\n return lon_grg, lat_grg", "title": "" }, { "docid": "2ddeb14e4194e795a35d5c492bdaab30", "score": "0.50378644", "text": "def nearest_regrid(distances: ndarray, indexes: ndarray, in_values: ndarray) -> ndarray:\n min_index = np.argmin(distances, axis=1)\n index0 = np.arange(min_index.shape[0])\n index_in = indexes[index0, min_index]\n output = in_values[index_in]\n return output", "title": "" }, { "docid": "c549c87ecd6eb7a4823e6a3df9e22279", "score": "0.50297666", "text": "def get_grids():\n kxgrid=np.arange((par['nkx0']))\n kxgrid=kxgrid*par['kxmin']\n kygrid=np.empty(par['nky0'])\n kzgrid=np.empty(par['nkz0'])\n herm_grid=np.arange(par['nv0'])\n herm_grid=1.0*herm_grid\n for i in range(par['nky0']/2):\n kygrid[par['nky0']-1-i]=-float(i+1)*par['kymin']\n kygrid[i]=float(i)*par['kymin']\n kygrid[par['nky0']/2]=par['nky0']/2*par['kymin']\n for i in range(par['nkz0']/2):\n kzgrid[par['nkz0']-1-i]=-float(i+1)*par['kzmin']\n kzgrid[i]=float(i)*par['kzmin']\n kzgrid[par['nkz0']/2]=par['nkz0']/2*par['kzmin']\n return kxgrid,kygrid,kzgrid,herm_grid", "title": "" }, { "docid": "86b491673aa1a37d437cc7e3464ec095", "score": "0.50256175", "text": "def _adjust2grid(pos):\n return cf.GRID_PRECISION*int(pos/cf.GRID_PRECISION)", "title": "" }, { "docid": "b2ecc77bec5d2f212fd9ae9f1146d272", "score": "0.5015397", "text": "def grid(bounds=(35,-5,36,-4),step=.001):\n ra = []\n dec = []\n\n for y in N.arange(bounds[1],bounds[3],step):\n x = N.arange(bounds[0],bounds[2],step)\n ra.append(x)\n dec.append(y*N.ones(len(x)))\n\n return N.concatenate(ra),N.concatenate(dec)", "title": "" }, { "docid": "faac7d84b767b1cce28b90fdc12b73ff", "score": "0.501424", "text": "def createGrid(self):\n self.grid = numpy.linspace(\n self.x1, self.x2, num=int((self.x2 - self.x1) // self.dX + 1))\n self.idx = 0", "title": "" }, { "docid": "bbd65e12616c0ad6280c4628d2966614", "score": "0.5013587", "text": "def _get_interpolation_grid(self, grid_spec: InterpGridSpecType) -> np.ndarray:\n\n if isinstance(grid_spec, InterpolationGrid):\n grid = grid_spec(self.curve)\n elif isinstance(grid_spec, int):\n grid = UniformInterpolationGrid(grid_spec)(self.curve)\n elif isinstance(grid_spec, (np.ndarray, abc.Sequence)):\n grid = np.array(grid_spec, dtype=np.float64)\n else:\n raise ValueError(f'Invalid type {type(grid_spec)} of interpolation grid')\n\n if grid.ndim != 1:\n raise ValueError(\n 'The interpolation grid should be 1xM array where M is number of points in interpolated curve')\n if not np.issubdtype(grid.dtype, np.number):\n raise ValueError(f'Invalid dtype {grid.dtype} of interpolation grid')\n\n dt = np.diff(grid)\n\n if np.any(dt < 0) or np.any(np.isclose(dt, 0)):\n raise ValueError(\n 'The values in the interpolation grid must be strictly increasing ordered.')\n\n t_start, t_end = self.curve.t[0], self.curve.t[-1]\n\n if np.min(grid) > t_start or np.max(grid) < t_end:\n warnings.warn(\n f'The interpolation grid in range [{np.min(grid)}, {np.max(grid)}]. '\n f'It does not cover the whole curve parametrization range [{t_start}, {t_end}].',\n InterpolationWarning)\n\n return grid", "title": "" }, { "docid": "9b43c059ed529942e78f144c0117015d", "score": "0.5012254", "text": "def gridWave() :\n return [10.**(logw0_chip[0]+np.arange(nw_chip[0])*dlogw),\n 10.**(logw0_chip[1]+np.arange(nw_chip[1])*dlogw),\n 10.**(logw0_chip[2]+np.arange(nw_chip[2])*dlogw)]", "title": "" }, { "docid": "d3859456efe6cec6988764af5924167b", "score": "0.50121766", "text": "def approx_integrate(func: callable, x_lb: float, x_ub: float, n_division=20) -> float:\n grid, step = np.linspace(x_lb, x_ub, n_division, retstep=True)\n # print(grid)\n # print(step)\n # print(func(grid)[0:-1].sum()*step)\n # print(func(grid)[1:].sum()*step)\n predictions = func(grid)\n return (0.5*(predictions[0]+predictions[-1]) + predictions[1:-1])*step\n # return (func(grid)[0:-1].sum() + func(grid)[1:].sum())*0.5*step", "title": "" }, { "docid": "637c30ed943a3000c5c8b9bbeb730b93", "score": "0.5009107", "text": "def map_grids_to_points(self):\n\t\t# TODO Look for an effective way to load up the points into their Grids\n\t\t# BUG Check why lower values of n_grids returns only a single grid Graph\n\t\tself.get_grids()\n\t\tpoints = self._points.copy()\n\t\t# convert points to list so that we can replace instances of points with Point\n\t\tself._points = list(self._points)\n\t\tfor i, point in enumerate(points):\n\t\t\tfor grid in self._grids:\n\t\t\t\tif grid.contains(Point(point)):\n\t\t\t\t\t# Replace Point with the Point Object \n\t\t\t\t\tself._points[i] = Point(point)\n\t\t\t\t\tgrid.add_point(self._points[i])\n\t\t\t\t\tbreak\n\t\tself._points = np.array(self._points)\n\t\t# Add Starting point to grid\n\t\tfor grid in self.grids:\n\t\t\tif grid.contains(self.start_pos):\n\t\t\t\tgrid.add_point(self.start_pos)\n\t\t\t\tbreak\n\t\tself._grids_for_calc = self._grids\n\t\t# Compute grid centric points and omniscient_reference point\n\t\tself.compute_cost()", "title": "" }, { "docid": "268e8efb4f2f44c1e6317bb9c8c7681d", "score": "0.49964663", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "268e8efb4f2f44c1e6317bb9c8c7681d", "score": "0.49964663", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "5f8f001895548517e761a03628364bec", "score": "0.49960718", "text": "def _interpolate(precompute_array, freq):\n points = np.zeros(\n (precompute_array.shape[0] * precompute_array.shape[1], 2)\n )\n for n in range(precompute_array.shape[0]):\n for p in range(precompute_array.shape[1]):\n points[n * precompute_array.shape[1] + p] = n, p\n return griddata(points, precompute_array.flatten(), freq, method='linear')", "title": "" }, { "docid": "1299e03f5f161781b5dce5d7c5d6a7ed", "score": "0.49955395", "text": "def get_grid(im, expected_spacing=(105, 105), grid_shape=(16, 24),\n x_data=None, y_data=None,\n expected_center=(100, 100), run_dev=False, dev_reduce_grid_data_fraction=None,\n validate_parameters=False, grid_correction=None):\n\n adjusted_values = True\n center = expected_center\n spacings = expected_spacing\n\n adaptive_threshold = get_adaptive_threshold(im, threshold_filter=None, segments=100, sigma=30)\n\n im_filtered = get_denoise_segments(im < adaptive_threshold, iterations=3)\n del adaptive_threshold\n\n if expected_spacing is None:\n expected_spacing = tuple(float(a)/b for a, b in zip(im.shape, grid_shape))\n\n get_segments_by_size(\n im_filtered, min_size=40,\n max_size=expected_spacing[0] * expected_spacing[1], inplace=True)\n\n get_segments_by_shape(im_filtered, expected_spacing, inplace=True)\n\n labeled, labels = ndimage.label(im_filtered)\n if x_data is None or y_data is None:\n if labels > 0:\n centra = ndimage.center_of_mass(im_filtered,\n labeled, range(1, labels + 1))\n x_data, y_data = np.array(centra).T\n adjusted_values = False\n\n del labeled\n\n if dev_reduce_grid_data_fraction is not None:\n filter_grid_data = np.random.random(x_data.shape) < dev_reduce_grid_data_fraction\n x_data = x_data[filter_grid_data]\n y_data = y_data[filter_grid_data]\n\n if adjusted_values is False:\n\n if run_dev:\n\n center, spacings = get_grid_parameters(\n x_data, y_data, grid_shape, spacings=expected_spacing)\n\n if validate_parameters:\n center, spacings, adjusted_values = get_valid_parameters(\n center, spacings, expected_center, expected_spacing)\n else:\n adjusted_values = False\n\n else:\n\n center, spacings = get_grid_parameters(\n x_data, y_data, grid_shape, spacings=expected_spacing)\n\n if grid_correction is not None:\n center = tuple(a + b for a, b in\n zip(center, [i * j for i, j in\n zip(grid_correction, spacings)]))\n\n adjusted_values = True\n\n if validate_parameters:\n center, spacings, adjusted_values = get_valid_parameters(\n center, spacings, expected_center, expected_spacing)\n\n dx, dy = spacings\n\n grid = build_grid_from_center(x_data, y_data, center, dx, dy, grid_shape)\n\n return grid, x_data, y_data, center, spacings, adjusted_values", "title": "" }, { "docid": "91f4534d168d9bb0d52f9d2c59dab96f", "score": "0.49868563", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size - 1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size - 1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size - 1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size - 1)),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "ea01aed2f5d4a213af477921d6e45d61", "score": "0.49861374", "text": "def create_grid(data, drone_altitude, safety_distance):\r\n\r\n # minimum and maximum north coordinates\r\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\r\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\r\n\r\n # minimum and maximum east coordinates\r\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\r\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\r\n\r\n # given the minimum and maximum coordinates we can\r\n # calculate the size of the grid.\r\n north_size = int(np.ceil(north_max - north_min))\r\n east_size = int(np.ceil(east_max - east_min))\r\n\r\n # Initialize an empty grid\r\n grid = np.zeros((north_size, east_size))\r\n\r\n # Populate the grid with obstacles\r\n for i in range(data.shape[0]):\r\n north, east, alt, d_north, d_east, d_alt = data[i, :]\r\n if alt + d_alt + safety_distance > drone_altitude:\r\n obstacle = [\r\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\r\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\r\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\r\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\r\n ]\r\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\r\n\r\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "35bc6fac9a29740816de0eb1760c823a", "score": "0.49806225", "text": "def grid_visibility_to_griddata(vis, griddata, cf):\n\n assert isinstance(vis, Visibility), vis\n\n assert vis.polarisation_frame == griddata.polarisation_frame\n\n nchan, npol, nz, oversampling, _, support, _ = cf.shape\n sumwt = numpy.zeros([nchan, npol])\n pu_grid, pu_offset, pv_grid, pv_offset, pwg_grid, pwg_fraction, pwc_grid, pwc_fraction, pfreq_grid = \\\n convolution_mapping_visibility(vis, griddata, vis.frequency, cf)\n _, _, _, _, _, gv, gu = cf.shape\n coords = zip(vis.vis * vis.flagged_imaging_weight, vis.flagged_imaging_weight,\n pfreq_grid, pu_grid,\n pu_offset, pv_grid, pv_offset, pwg_grid, pwc_grid)\n griddata.data[...] = 0.0\n\n # Do this in place to avoid creating a new copy. Doing the conjugation outside the loop\n # reduces run time immensely\n cf.data = numpy.conjugate(cf.data)\n\n du = gu // 2\n dv = gv // 2\n for v, vwt, chan, uu, uuf, vv, vvf, zzg, zzc in coords:\n griddata.data[chan, :, zzg, (vv - dv):(vv + dv), (uu - du):(uu + du)] += \\\n cf.data[chan, :, zzc, vvf, uuf, :, :] * v[:, numpy.newaxis, numpy.newaxis]\n sumwt[chan, :] += vwt\n\n cf.data = numpy.conjugate(cf.data)\n return griddata, sumwt", "title": "" }, { "docid": "107f6accb427128f4d8710c31cee2ecd", "score": "0.49795026", "text": "def wavenumber_grid4xs (vLow, vHigh, gammaMean, sampling=5.0, gridRatio=1, nGrids=1, verbose=0):\n\t# grid point spacing\n\tdv = gammaMean/sampling\n\t# number of grid point intervals\n\tnv = int(round( (vHigh-vLow)/dv ))\n\t\n\tif nGrids<1: raise SystemExit, 'ERROR --- wavenumber_grid4xs: nonpositive number of grids!'\n\tif not gridRatio in [1,2,4,8]: raise SystemExit, 'ERROR --- wavenumber_grid4xs: invalid grid ratio!'\n\n\t# adjust number of grid point intervals to an integer multiple of gridRatio^(nGrids-1)\n\tmm = gridRatio**(nGrids-1)\n #fgs120310 nv = mm*int(nv/mm)\n\tnv = mm*max(int(nv/mm),1) # make sure that there are at least mm grid points\t\n\n\t# adjust spacing to provide an integer number of intervals\n\tdv = (vHigh-vLow)/nv\n\t# set up array of grid points\n\tv = np.arange(vLow,vHigh+0.9*dv,dv)\n\tif verbose: print '%s %8i%s %8f %8f %8f %s %8f %8f %s %g%s %i' % \\\n\t ('waveumber grid: ', nv, '+1 points: ', v[0], v[1], v[2], ' ... ', v[-2], v[-1], ' (delta ', dv,')',mm)\n\treturn v", "title": "" }, { "docid": "cd6df162c8f3695c70f08c0975465a42", "score": "0.49783674", "text": "def create_grid(data, drone_altitude, safety_distance):\r\n\r\n # minimum and maximum north coordinates\r\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\r\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\r\n\r\n # minimum and maximum east coordinates\r\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\r\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\r\n\r\n # given the minimum and maximum coordinates we can\r\n # calculate the size of the grid.\r\n north_size = int(np.ceil(north_max - north_min))\r\n east_size = int(np.ceil(east_max - east_min))\r\n\r\n # Initialize an empty grid\r\n grid = np.zeros((north_size, east_size))\r\n\r\n # Populate the grid with obstacles\r\n for i in range(data.shape[0]):\r\n north, east, alt, d_north, d_east, d_alt = data[i, :]\r\n if alt + d_alt + safety_distance > drone_altitude:\r\n obstacle = [\r\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size - 1)),\r\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size - 1)),\r\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size - 1)),\r\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size - 1)),\r\n ]\r\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\r\n\r\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "718dc538f8fcded9aece77cea7179428", "score": "0.49773607", "text": "def calc_gxx_grid(xbeg, xend, dlon, ybeg, yend, dlat, mode='c'):\n if mode == 'c':\n nlon = int(round( (xend - xbeg)/dlon ))\n nlat = int(round( (yend - ybeg)/dlat ))\n\n lon_gxx = np.linspace(xbeg+dlon/2, xend-dlon/2, nlon)\n lat_gxx = np.linspace(ybeg+dlat/2, yend-dlat/2, nlat)\n elif mode == 'b':\n nlon = int(round( (xend - xbeg)/dlon )) + 1\n nlat = int(round( (yend - ybeg)/dlat )) + 1\n\n lon_gxx = np.linspace(xbeg, xend, nlon)\n lat_gxx = np.linspace(ybeg, yend, nlat)\n\n return lon_gxx, lat_gxx", "title": "" }, { "docid": "de0a3e0757da809c6ffd943e1d41c1ea", "score": "0.49697042", "text": "def _interpolate(self, xi):\n # cache latest evaluation point for gradient method's use later\n self._xi = xi\n\n if not self.extrapolate:\n for i, p in enumerate(xi.T):\n if np.isnan(p).any():\n raise OutOfBoundsError(\"One of the requested xi contains a NaN\",\n i, np.NaN, self.grid[i][0], self.grid[i][-1])\n\n eps = 1e-14 * self.grid[i][-1]\n if not np.logical_and(np.all(self.grid[i][0] <= p + eps),\n np.all(p - eps <= self.grid[i][-1])):\n p1 = np.where(self.grid[i][0] > p)[0]\n p2 = np.where(p > self.grid[i][-1])[0]\n # First violating entry is enough to direct the user.\n violated_idx = set(p1).union(p2).pop()\n value = p[violated_idx]\n raise OutOfBoundsError(\"One of the requested xi is out of bounds\",\n i, value, self.grid[i][0], self.grid[i][-1])\n\n if self._compute_d_dvalues:\n # If the table grid or values are component inputs, then we need to create a new table\n # each iteration.\n interp = self._interp\n self.table = interp(self.grid, self.values, interp, **self._interp_options)\n self.table._compute_d_dvalues = True\n\n table = self.table\n if table._vectorized:\n result, derivs_x, derivs_val, derivs_grid = table.evaluate_vectorized(xi)\n\n else:\n xi = np.atleast_2d(xi)\n n_nodes, nx = xi.shape\n result = np.empty((n_nodes, ), dtype=xi.dtype)\n derivs_x = np.empty((n_nodes, nx), dtype=xi.dtype)\n derivs_val = None\n\n # TODO: it might be possible to vectorize over n_nodes.\n for j in range(n_nodes):\n val, d_x, d_values, d_grid = table.evaluate(xi[j, :])\n result[j] = val\n derivs_x[j, :] = d_x.flatten()\n if d_values is not None:\n if derivs_val is None:\n dv_shape = [n_nodes]\n dv_shape.extend(self.values.shape)\n derivs_val = np.zeros(dv_shape, dtype=xi.dtype)\n in_slice = table._full_slice\n full_slice = [slice(j, j + 1)]\n full_slice.extend(in_slice)\n shape = derivs_val[tuple(full_slice)].shape\n derivs_val[tuple(full_slice)] = d_values.reshape(shape)\n\n # Cache derivatives\n self._d_dx = derivs_x\n self._d_dvalues = derivs_val\n\n return result", "title": "" }, { "docid": "c924c606cf17b36d6f67f66745a50d93", "score": "0.4966183", "text": "def deflections_from_grid(\n self, grid, bypass_decorator=False\n ):\n deflection_y = -np.multiply(self.magnitude, grid[:, 0])\n deflection_x = np.multiply(self.magnitude, grid[:, 1])\n return self.rotate_grid_from_profile(np.vstack((deflection_y, deflection_x)).T)", "title": "" }, { "docid": "25c1f9eb9c4ee5ada1cd17310ac5b482", "score": "0.49659547", "text": "def map_to_grid(x,y,r,m,rho,a,h,bounds,cutoff):\n\n nx = x.size\n ny = y.size\n# X,Y = pylab.meshgrid(x,y)\n Z = numpy.zeros((nx,ny))\n nlists = []\n ngridpts = x.size*y.size\n xmin,xmax,ymin,ymax = bounds\n\n# instead of building a neighbour list for each pixel, why not build\n# a neighbour list as\n# (x,y) index\n# where (x,y) is the pixel coordinates and index is the neighbour.\n# will this be faster?\n \n for i in range(nx):\n for j in range(ny):\n nlist = neighbours.Nlist(ngridpts,cutoff)\n point = numpy.array([x[i],y[j]])\n nlist.build(point,r,h,periodic=True,xmax=xmax,ymax=ymax)\n nlist.assign_properties(m,rho,a,h)\n # compute the sph interpolant\n v = numpy.zeros(len(nlist.ds))\n v = nlist.m/nlist.rho\n # r is the list of distances\n # x is the list of properties\n # vol is the list of volumes\n Z[i,j] = smooth_point(nlist.ds,nlist.dr,nlist.a,v,nlist.h,\"lucy\")\n\n return Z", "title": "" }, { "docid": "663fc3ae2255b6c86781697d9b741fe8", "score": "0.49650142", "text": "def grid2_contour_get_output_grid(scalar_field_grid: Grid) -> Grid:\n return Grid(\n scalar_field_grid.cell_ndcount - 1,\n scalar_field_grid.origin + scalar_field_grid.cell_sides_length * 0.5,\n scalar_field_grid.cell_sides_length)", "title": "" }, { "docid": "d47e968bd61f36ef46e165c1f7a5f6b9", "score": "0.49633732", "text": "def __init__(self, ngrid=175, rmin=0.05*kpc, rmax=5*Mpc):\n\n self.ngrid = ngrid\n self.rmax = rmax\n self.rmin = rmin\n\n # radial direction: log spaced grid from xmin=ln(rmin) to xmax=ln(rmax)\n # xin and xout are the inner and outer edges of each grid cell\n xmax = np.log(rmax)\n xmin = np.log(rmin)\n self.dx = (xmax-xmin)/ngrid\n self.x = xmin + (np.arange(self.ngrid, dtype=np.float64)+0.5)*self.dx\n self.xout = xmin + (np.arange(self.ngrid, dtype=np.float64)+1)*self.dx\n self.xin = xmin + np.arange(self.ngrid, dtype=np.float64)*self.dx\n self.r = np.exp(self.x)\n self.rout = np.exp(self.xout)\n self.rin = np.exp(self.xin)\n\n # theta direction: evenly spaced from 0 to pi; number of points is nth\n self.nth = 101\n self.disc_ind = self.nth//2\n self.dth = np.pi/self.nth\n self.th = (np.arange(self.nth, dtype=np.float64)+0.5)*self.dth\n self.thin = np.arange(self.nth, dtype=np.float64)*self.dth\n self.thout = (np.arange(self.nth, dtype=np.float64)+1)*self.dth\n self.grid_shape = (self.ngrid, self.nth)\n\n # set up grid structure; any object, e.g. rgrid, is a ngrid x nth array\n self.rgrid, self.thgrid = np.meshgrid(self.r, self.th, indexing='ij')\n self.ringrid, self.thingrid = np.meshgrid(self.rin, self.thin,\n indexing='ij')\n self.routgrid, self.thoutgrid = np.meshgrid(self.rout, self.thout,\n indexing='ij')\n self.sthgrid = np.sin(self.thgrid)\n self.sthingrid = np.sin(self.thingrid)\n self.sthoutgrid = np.sin(self.thoutgrid)\n\n # coefficient of each term in the discretised Laplacian\n self.__coeff1 = self.routgrid/(self.dx**2*self.rgrid**3)\n self.__coeff2 = self.ringrid/(self.dx**2*self.rgrid**3)\n self.__coeff3 = self.sthoutgrid/((self.dth*self.rgrid)**2*self.sthgrid)\n self.__coeff4 = self.sthingrid/((self.dth*self.rgrid)**2*self.sthgrid)\n\n # dLdu_const gives the final constant term in the Newton Raphson\n # expression for dL/du\n d1 = (self.rgrid**3*self.dx**2)\n d2 = (self.rgrid**2*self.sthgrid*self.dth**2)\n self.__dLdu_const = ((self.ringrid+self.routgrid)/d1 +\n (self.sthingrid+self.sthoutgrid)/d2)\n\n # dvol is cell volume, full vol is volume of whole sphere\n self.dvol = 2*np.pi*self.rgrid**3*np.sin(self.thgrid)*self.dx*self.dth\n self.fullvol = 4.0/3.0*np.pi*(self.rmax**3 - self.rmin**3)\n\n # user change to True if setting a guess\n self.GuessFlag = False\n\n return", "title": "" }, { "docid": "cb0eb5080218a8684ed834107063135f", "score": "0.49633497", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.amin(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.amax(data[:, 0] + data[:, 3]))\n print(0, north_max - north_min)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.amin(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.amax(data[:, 1] + data[:, 4]))\n print(0,east_max-east_min)\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n # TODO: Determine which cells contain obstacles\n # and set them to 1.\n #\n # Example:\n #\n # grid[north_coordinate, east_coordinate] = 1\n nc = int(north - north_min)\n ec = int(east - east_min)\n dn = int(d_north)\n de = int(d_east)\n sd = int(safety_distance)\n x0 = int(ec - (de + sd))\n y0 = int(nc - (dn + sd))\n xm = int(ec + (de + sd))\n ym = int(nc + (dn + sd))\n nm = north_max - north_min\n em = east_max - east_min\n print(drone_altitude,alt,d_alt)\n for e in range(x0,xm):\n for n in range(y0,ym):\n # skip out of range conditions\n if e < 0: \n continue\n if e >= em:\n continue\n if n < 0:\n continue\n if n >= nm:\n continue\n # check if drone is above obstacle altitude\n if alt + d_alt + safety_distance > drone_altitude:\n continue\n # plot it\n grid[n][e] = 1\n \n return grid", "title": "" }, { "docid": "1b87d80fdd03b7999497bab28001dd3b", "score": "0.49597648", "text": "def _get_function_vals(self, grid_size=100):\n train_x = self.get_train_inputs()\n test_x = self.get_test_inputs()\n\n mu = self._mean\n\n dx = max(np.abs(train_x - mu[None, :]).max(),\n np.abs(test_x - mu[None, :]).max())\n dx = 1.05 * dx\n\n x1 = np.linspace(start=mu[0]-dx, stop=mu[0]+dx, num=grid_size)\n x2 = np.linspace(start=mu[1]-dx, stop=mu[1]+dx, num=grid_size)\n\n X1, X2 = np.meshgrid(x1, x2)\n\n X = np.vstack([X1.ravel(), X2.ravel()]).T\n\n Y = self._map(X).reshape(X1.shape)\n\n return X1, X2, Y", "title": "" }, { "docid": "b336eb938601f34508e5be6beb41ac81", "score": "0.49592617", "text": "def calc_psnr_2D(grid,survey,pset,slist,doplot=None,xlim=[1,100],ylim=[0.01,1]):\n \n ######## Calculates p(DM,z | FRB) ########\n # i.e. the probability of a given z,DM assuming\n # an FRB has been observed. The normalisation\n # below is proportional to the total rate (ish)\n \n rates=grid.rates\n zvals=grid.zvals\n dmvals=grid.dmvals\n \n #DMobs=survey.DMEGs\n DMobs=np.sort(survey.DMEGs)\n idmsort=np.argsort(survey.DMEGs)\n Zobs=survey.Zs[idmsort]\n \n #Zobs=survey.Zs\n \n \n \n #if survey.meta[\"TOBS\"] is not None:\n #\tTotalRate=np.sum(rates)*survey.meta[\"TOBS\"]\n # this is in units of number per MPc^3 at Emin\n \n # normalise to total probability of 1\n norm=np.sum(rates) # gets multiplied by event size later\n \n \n # get indices in dm space\n ddm=dmvals[1]-dmvals[0]\n kdms=DMobs/ddm\n idms1=kdms.astype('int')\n idms2=idms1+1\n dkdms=kdms-idms1\n \n # get indices in z space\n dz=zvals[1]-zvals[0]\n kzs=Zobs/dz\n izs1=kzs.astype('int')\n izs2=izs1+1\n dkzs=kzs-izs1\n \n pvals = rates[izs1,idms1]*(1.-dkdms)*(1-dkzs)\n pvals += rates[izs2,idms1]*(1.-dkdms)*dkzs\n pvals += rates[izs1,idms2]*dkdms*(1-dkzs)\n pvals += rates[izs2,idms2]*dkdms*dkzs\n \n bad=np.array(np.where(pvals <= 0.))\n if bad.size > 0:\n pvals[bad]=1e-20 # hopefully small but not infinitely so\n llsum=np.sum(np.log10(pvals))#-norm\n llsum -= np.log10(norm)*Zobs.size # once per event\n \n \n \n ###### Calculates p(E | z,DM) ########\n # i.e. the probability of observing an FRB\n # with energy E given redshift and DM\n # this calculation ignores beam values\n # this is the derivative of the cumulative distribution\n # function from Eth to Emax\n # this does NOT account for the probability of\n # observing something at a relative sensitivty of b, i.e. assumes you do NOT know localisation in your beam...\n # to do that, one would calculate this for the exact value of b for that event. The detection\n # probability has already been integrated over the full beam pattern, so it would be trivial to\n # calculate this in one go. Or in other words, one could simple add in survey.Bs, representing\n # the local sensitivity to the event [keeping in mind that Eths has already been calculated\n # taking into account the burst width and DM, albeit for a mean FRB]\n # Note this would be even simpler than the procedure described here - we just\n # use b! Huzzah! (for the beam)\n # IF:\n # - we want to make FRB width analogous to beam, THEN\n # - we need an analogous 'beam' (i.e. width) distribution to integrate over,\n # which gives the normalisation\n \n NS=slist.size\n psnrs=np.zeros([survey.Ss.size,NS]) # generates plot for each individual FRB\n for k,s in enumerate(slist):\n # NOTE: to break this into a p(SNR|b) p(b) term, we first take\n # the relative likelihood of the threshold b value compare\n # to the entire lot, and then we calculate the local\n # psnr for that beam only. But this requires a much more\n # refined view of 'b', rather than the crude standatd \n # parameterisation\n \n # calculate vector of grid thresholds\n Emax=grid.Emax\n Emin=grid.Emin\n gamma=grid.gamma\n #Eths has dimensions of width likelihoods and nobs\n # i.e. later, the loop over j,w uses the first index\n Eths = grid.thresholds[:,izs1,idms1]*(1.-dkdms)*(1-dkzs)\n Eths += grid.thresholds[:,izs2,idms1]*(1.-dkdms)*dkzs\n Eths += grid.thresholds[:,izs1,idms2]*dkdms*(1-dkzs)\n Eths += grid.thresholds[:,izs2,idms2]*dkdms*dkzs\n \n FtoE = grid.FtoE[izs1]*(1.-dkzs)\n FtoE += grid.FtoE[izs2]*dkzs\n \n beam_norm=np.sum(survey.beam_o)\n \n # now do this in one go\n # We integrate p(snr|b,w) p(b,w) db dw. I have no idea how this could be multidimensional\n psnr=np.zeros(Eths.shape[1:])\n for i,b in enumerate(survey.beam_b):\n bEths=Eths/b # array of shape NFRB, 1/b\n #bEobs=bEths*survey.Ss\n bEobs=bEths*s\n for j,w in enumerate(grid.eff_weights):\n temp=grid.array_diff_lf(bEobs[j,:],Emin,Emax,gamma) * FtoE #one dim in beamshape, one dim in FRB\n \n psnr += temp.T*survey.beam_o[i]*w #multiplies by beam factors and weight\n \n # at this stage, we have the amplitude from diff power law summed over beam and weight\n \n # we only alculate the following sg and V factors to get units to be\n # comparable to the 1D case - otherwise it is superfluous\n sg = grid.sfr_smear[izs1,idms1]*(1.-dkdms)*(1-dkzs)\n sg += grid.sfr_smear[izs2,idms1]*(1.-dkdms)*dkzs\n sg += grid.sfr_smear[izs1,idms2]*dkdms*(1-dkzs)\n sg += grid.sfr_smear[izs2,idms2]*dkdms*dkzs\n dV = grid.dV[izs1]*(1-dkzs) + grid.dV[izs2]*dkzs\n # at this stage, sg and dV account for the DM distribution and SFR;\n # dV is the volume elements\n # we just multiply these together\n sgV = sg*dV\n wzpsnr = psnr.T*sgV\n \n # this step weights psnr by the volumetric values\n \n ######## NORMALISATION DISCUSSION ######\n # we want to calculate p(snr) dpsnr\n # this must be \\int p(snr | w,b) p(w,b) dw,b\n # \\int p(snr | detection) p(det|w,b) p(w,b) dw,b\n # to make it an indpeendent factor, and not double-count it, means calculating\n # \\int p(snr | detection) dsnr p(det|w,b) p(w,b) dw,b / \\int p(det|w,b) p(w,b) dw,b\n # array_diff_power_law simply calculates p(snr), which is the probability amplitude\n # -(gamma*Eth**(gamma-1)) / (Emin**gamma-Emax**gamma )\n # this includes the probability; hence need to account for this\n \n # it is essential that this normalisation occurs for a normalised pvals\n # this normalisation essentially undoes the absolute calculation of the rate, i.e. we are normalising by the total distribution\n # hence we *really* ought to be adding the normalisation to this...\n # the idea here is that p(snr,det)/p(det) * p(det)/pnorm. Hence pvals - which contains\n # the normalisation - should be the un-normalised values.\n \n wzpsnr /= pvals\n \n psnrs[:,k]=wzpsnr\n \n # checks to ensure all frbs have a chance of being detected\n bad=np.array(np.where(wzpsnr == 0.))\n if bad.size > 0:\n snrll = float('NaN') # none of this is possible! [somehow...]\n else:\n snrll = np.sum(np.log10(wzpsnr))\n \n llsum += snrll\n if doplot is not None:\n plt.figure()\n ax=plt.gca()\n ax.set_aspect('auto')\n \n dlogs=np.log(slist[1])-np.log(slist[0])\n wpsnrs = psnrs*(slist)\n norm=np.sum(wpsnrs,axis=1)\n plotpsnrs = (psnrs.T/norm).T #correctly normalised now\n print(\"For plot \",doplot,\" norms were \",norm)\n #now multiply by slist to some power\n plotpsnrs*=slist\n \n xmin=xlim[0]\n xmax=xlim[1]\n ymin=ylim[0]\n ymax=ylim[1]\n \n plt.ylim(ymin,ymax)\n plt.xlim(xmin,xmax)\n plt.yscale('log')\n plt.xscale('log')\n \n linestyles=['-',':','--','-.']\n markerstyles=['o','^','x']\n ylist=[]\n DMs=survey.DMs[idmsort]\n Ss=survey.Ss[idmsort]\n for j,DM in enumerate(DMs):\n ls=linestyles[int(j/10)]\n ms=markerstyles[int(j/10)]\n plt.plot(slist,plotpsnrs[j],linestyle=ls,zorder=1)\n # adds in observed plot\n sobs=Ss[j]\n i2=np.where(slist>sobs)[0][0]\n i1=i2-1\n k=(sobs-slist[i1])/(slist[i2]-slist[i1])\n y=(k*plotpsnrs[j,i2]+(1.-k)*plotpsnrs[j,i1])\n plt.scatter(Ss[j],y,color=plt.gca().lines[-1].get_color(),s=70,marker=ms,zorder=2)\n plt.plot(slist,-plotpsnrs[j],label=str(int(round(DM,0))),linestyle=ls,zorder=1,marker=ms,color=plt.gca().lines[-1].get_color())\n \n #plt.plot(DMobs,pvals,'ro')\n plt.xlabel('$s$')\n plt.ylabel('$s \\\\, p_s(s)$')\n from matplotlib.ticker import ScalarFormatter\n ax=plt.gca()\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n \n import matplotlib.ticker as ticker\n\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))\n ax.xaxis.set_minor_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))\n \n plt.legend(loc='upper right',ncol=4,fontsize=10)\n plt.tight_layout()\n ax.set_aspect('auto')\n plt.savefig(doplot)\n plt.close()\n return slist,psnrs", "title": "" }, { "docid": "de88cfa295c296a6bdf10b84f39746f4", "score": "0.4955698", "text": "def make_grid3():\n\n def f(x, y):\n return -(1 - x)**2 - 100 * (y - x**2)**2\n\n # Set [x-min, x-max] x [y-min, y-max] for grid3.\n a, b, c, d = -2, 2, -1, 3\n\n return np.array([\n [f(a + i * (b - a) / 99., c + j * (d - c) / 99.) for j in range(100)]\n for i in range(100)])", "title": "" }, { "docid": "bc0d9e4e27ef28007b8d619e1238871c", "score": "0.4947885", "text": "def interpolateToGrid(self,geodict,method='linear'):\n raise NotImplementedError('interpolateToGrid method not implemented in base class')", "title": "" }, { "docid": "3ec074b88c73de996e9d44b4eaaa7bbf", "score": "0.49426052", "text": "def snap_to_grid(x: number_t, tick: number_t, offset: number_t=0, nearest=True) -> number_t:\n t = x.__class__\n if nearest:\n return t(round((x - offset) / tick)) * tick + offset\n else:\n return t(int((x - offset) / tick)) * tick + offset", "title": "" }, { "docid": "9d274b6f337272c64e878a6e44625635", "score": "0.4942427", "text": "def refine_grid(self, new_grid, axis=0, fun=np.array, **kwargs):\n if len(self.grids[axis]) > 1:\n f = interpolate.interp1d(fun(self.grids[axis]), self.data,\n axis=axis, **kwargs)\n self.grids[axis] = new_grid\n self.data = f(fun(new_grid))\n else: # if the intention is to create a useful TensorX\n self.data = self.data.repeat(len(new_grid), axis=axis)\n self.grids[axis] = new_grid\n\n self.check_dimension()\n\n return self", "title": "" }, { "docid": "be8a937396e9f888f2a28c9e611448c6", "score": "0.4931954", "text": "def grid_chunk(lon, lat, data, dst_crs, transform, src_crs=CRS_WGS):\n # transform coordinates\n xs, ys = map(np.asarray, rasterio.warp.transform(\n src_crs=src_crs,\n dst_crs=dst_crs,\n xs=lon,\n ys=lat\n ))\n\n # flooring the transformed (pixel center) point coords gives nearest\n # (0-based) grid cell center index\n idx = np.vstack([v.astype('int64') for v in (~transform * (xs, ys))])\n\n uidx, inverse = np.unique(idx, axis=1, return_inverse=True)\n datareduced = map_reduce_nanmean(data, inverse, num_unique=uidx.shape[1])\n\n return uidx, datareduced", "title": "" }, { "docid": "97629ba9a7e03d5fdbbe0ddc0a8749f5", "score": "0.4929845", "text": "def _calculate_distances_land_grid(base_point_vector_path, base_raster_path,\n target_dist_raster_path, work_dir):\n LOGGER.info('Starting _calculate_distances_land_grid.')\n temp_dir = tempfile.mkdtemp(dir=work_dir, prefix='calc-dist-land')\n\n # Open the point shapefile and get the layer\n base_point_vector = gdal.OpenEx(base_point_vector_path, gdal.OF_VECTOR)\n base_point_layer = base_point_vector.GetLayer()\n # A list to hold the land to grid distances in order for each point\n # features 'L2G' field\n l2g_dist = []\n # A list to hold the individual distance transform path's in order\n land_point_dist_raster_path_list = []\n\n # Get the original layer definition which holds needed attribute values\n base_layer_defn = base_point_layer.GetLayerDefn()\n file_ext, driver_name = _get_file_ext_and_driver_name(\n base_point_vector_path)\n output_driver = ogr.GetDriverByName(driver_name)\n single_feature_vector_path = os.path.join(\n temp_dir, 'single_feature' + file_ext)\n target_vector = output_driver.CreateDataSource(single_feature_vector_path)\n\n # Create the new layer for target_vector using same name and\n # geometry type from base_vector as well as spatial reference\n target_layer = target_vector.CreateLayer(base_layer_defn.GetName(),\n base_point_layer.GetSpatialRef(),\n base_layer_defn.GetGeomType())\n\n # Get the number of fields in original_layer\n base_field_count = base_layer_defn.GetFieldCount()\n\n # For every field, create a duplicate field and add it to the new\n # shapefiles layer\n for fld_index in range(base_field_count):\n base_field = base_layer_defn.GetFieldDefn(fld_index)\n target_field = ogr.FieldDefn(base_field.GetName(),\n base_field.GetType())\n # NOT setting the WIDTH or PRECISION because that seems to be\n # unneeded and causes interesting OGR conflicts\n target_layer.CreateField(target_field)\n\n # Create a new shapefile with only one feature to burn onto a raster\n # in order to get the distance transform based on that one feature\n for feature_index, point_feature in enumerate(base_point_layer):\n # Get the point features land to grid value and add it to the list\n field_index = point_feature.GetFieldIndex('L2G')\n l2g_dist.append(float(point_feature.GetField(field_index)))\n\n # Copy original_datasource's feature and set as new shapes feature\n output_feature = ogr.Feature(feature_def=target_layer.GetLayerDefn())\n\n # Since the original feature is of interest add its fields and\n # Values to the new feature from the intersecting geometries\n # The False in SetFrom() signifies that the fields must match\n # exactly\n output_feature.SetFrom(point_feature, False)\n target_layer.CreateFeature(output_feature)\n target_vector.SyncToDisk()\n target_layer.DeleteFeature(point_feature.GetFID())\n\n dist_raster_path = os.path.join(temp_dir,\n 'dist_%s.tif' % feature_index)\n _create_distance_raster(base_raster_path, single_feature_vector_path,\n dist_raster_path, work_dir)\n # Add each features distance transform result to list\n land_point_dist_raster_path_list.append(dist_raster_path)\n\n target_layer = None\n target_vector = None\n base_point_layer = None\n base_point_vector = None\n l2g_dist_array = numpy.array(l2g_dist)\n\n def _min_land_ocean_dist(*grid_distances):\n \"\"\"Aggregate each features distance transform output and create one\n distance output that has the shortest distances combined with each\n features land to grid distance\n\n Args:\n *grid_distances (numpy.ndarray): a variable number of numpy.ndarray\n\n Returns:\n a numpy.ndarray of the shortest distances\n\n \"\"\"\n # Get the shape of the incoming numpy arrays\n # Initialize with land to grid distances from the first array\n min_distances = numpy.min(grid_distances, axis=0)\n min_land_grid_dist = l2g_dist_array[numpy.argmin(grid_distances, axis=0)]\n return min_distances + min_land_grid_dist\n\n pygeoprocessing.raster_calculator(\n [(path, 1)\n for path in land_point_dist_raster_path_list], _min_land_ocean_dist,\n target_dist_raster_path, _TARGET_DATA_TYPE, _TARGET_NODATA)\n\n shutil.rmtree(temp_dir, ignore_errors=True)\n\n LOGGER.info('Finished _calculate_distances_land_grid.')", "title": "" }, { "docid": "6239b16b32f4acce7dd86006c8d39e99", "score": "0.49272096", "text": "def _normalizeThroughCDF(self, data, params):\n normed = self._sampleCDF(data, params)\n normed = self.normEngine.ppf(normed)\n return normed", "title": "" }, { "docid": "27d12f76ae4685c685dcb35a0ef2e896", "score": "0.49208406", "text": "def evaluate_grid(self, **params):\n def recurse_grid(fixed_params, grid_params):\n if len(grid_params) == 0:\n result = fixed_params\n result['metric'] = self.evaluate(fixed_params)\n result['average_metric'] = np.mean(result['metric'], axis=0)\n return [result]\n\n curr_param, curr_values = list(grid_params.items())[0]\n new_grid_params = dict(grid_params)\n del new_grid_params[curr_param]\n results = []\n for value in curr_values:\n results += recurse_grid({**fixed_params, curr_param: value}, new_grid_params)\n return results\n\n results = recurse_grid({}, params)\n results = pd.DataFrame(results)\n if self.data_dir is not None:\n self.save(results, params)\n self.num_evaluations += 1\n return results", "title": "" }, { "docid": "939b4f31ef0c40ca182a4cc691781335", "score": "0.49187538", "text": "def get_surrounding_grid(\n band: gdal.Dataset, x_index: int, y_index: int) -> (List[int], List[float]):\n\n # Read scanlines of the raster, build up the four points and corresponding values:\n scanline_one = band.ReadRaster(xoff=x_index, yoff=y_index, xsize=2, ysize=1,\n buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32)\n row_one = struct.unpack('f' * 2, scanline_one)\n values = []\n values.extend(row_one)\n scanline_two = band.ReadRaster(xoff=x_index, yoff=y_index+1, xsize=2, ysize=1,\n buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32)\n row_two = struct.unpack('f' * 2, scanline_two)\n values.append(row_two[1])\n values.append(row_two[0])\n\n points = [[x_index, y_index], [x_index+1, y_index],\n [x_index+1, y_index+1], [x_index, y_index+1]]\n\n return points, values", "title": "" }, { "docid": "86fd87b15daab331649c117518d9feb0", "score": "0.49186057", "text": "def interpolate(self, target_point):\n\n # interpolate ppfs to target point, interpolate quantiles step of [1]\n interpolated_ppfs = griddata(self.grid_points, self.ppfs, target_point)\n\n # compute pdf values for all bins, evaluate interpolant PDF values step of [1], drop the earlier\n # introduced extra bin\n interpolated_pdfs = pdf_from_ppf(\n self.bin_edges, interpolated_ppfs, self.quantiles\n )[..., 1:]\n\n # Renormalize pdf to sum of 1\n normed_interpolated_pdfs = norm_pdf(interpolated_pdfs, self.bin_edges[1:], self.normalization)\n\n # Re-swap axes and set all nans to zero\n return np.nan_to_num(normed_interpolated_pdfs).reshape(1, *self.input_shape[1:])", "title": "" }, { "docid": "20aa2da817c9c0c5e9fa8a3a08730c81", "score": "0.49096227", "text": "def compute_diffdisp(grid_perf,grid_disl,t_mag,a0,cutoff,modulo,plot):\n\n disps = [[atom2[2]-atom1[2],atom2[3]-atom1[3],min_image_t(atom1[4],atom2[4],t_mag,a0)-atom1[4]] for atom1,atom2 in zip(grid_perf,grid_disl)]\n\n ddlist = []\n for atom1_i,atom2_i,disp_i in zip(grid_perf,grid_disl,disps):\n for atom1_j,atom2_j,disp_j in zip(grid_perf,grid_disl,disps):\n if atom1_i[0] > atom1_j[0]:\n ## only consider diff. disp. between 1st nn within the slab\n if np.linalg.norm([atom2_j[2]-atom2_i[2],atom2_j[3]-atom2_i[3],min_image_t(atom2_i[4],atom2_j[4],t_mag,a0)-atom2_i[4]]) < cutoff:\n diffdisp = [disp_j[0]-disp_i[0],disp_j[1]-disp_i[1],(disp_j[2]-disp_i[2])%modulo]\n if plot == 'screw':\n ## take care of apparent discontinuity in the screw component across the (negative) cut plane\n if diffdisp[2] > modulo/2: diffdisp[2] -= modulo\n elif diffdisp[2] < -modulo/2: diffdisp[2] += modulo\n ddlist.append([atom2_i[2:5],atom2_j[2:5],diffdisp])\n\n return ddlist", "title": "" }, { "docid": "11b5e9004dbc83bd373f3f75f29777db", "score": "0.490096", "text": "def lininterp_1d(grid, vals, x):\n\n a, b, G = np.min(grid), np.max(grid), len(grid)\n\n s = (x - a) / (b - a)\n\n q_0 = max(min(int(s * (G - 1)), (G - 2)), 0)\n v_0 = vals[q_0]\n v_1 = vals[q_0 + 1]\n\n ฮป = s * (G - 1) - q_0\n\n return (1 - ฮป) * v_0 + ฮป * v_1", "title": "" }, { "docid": "852a7d6e1333fcb74920c7efb4ab5a91", "score": "0.4898185", "text": "def grid_visibility_weight_to_griddata(vis, griddata: GridData, cf):\n assert isinstance(vis, Visibility), vis\n assert vis.polarisation_frame == griddata.polarisation_frame\n\n nchan, npol, nz, ny, nx = griddata.shape\n sumwt = numpy.zeros([nchan, npol])\n pu_grid, pu_offset, pv_grid, pv_offset, pwg_grid, pwg_fraction, pwc_grid, pwc_fraction, pfreq_grid = \\\n convolution_mapping_visibility(vis, griddata, vis.frequency, cf)\n _, _, _, _, _, gv, gu = cf.shape\n coords = zip(vis.flagged_imaging_weight, pfreq_grid, pu_grid, pv_grid, pwg_grid)\n griddata.data[...] = 0.0\n\n real_gd = numpy.real(griddata.data)\n for vwt, chan, xx, yy, zzg in coords:\n real_gd[chan, :, zzg, yy, xx] += vwt\n sumwt[chan, :] += vwt\n\n griddata.data = real_gd.astype(\"complex\")\n\n return griddata, sumwt", "title": "" }, { "docid": "013a3830e2bfc242519537a78428e521", "score": "0.4897707", "text": "def warp_double_range(x, flo, arange_spatial_x, arange_spatial_y, padding_mode=\"zeros\"):\n B, C, H, W = x.size()\n\n # for speed\n xx = arange_spatial_x.view(1, -1).repeat(H, 1) # [H, W]\n yy = arange_spatial_y.view(-1, 1).repeat(1, W) # [H, W]\n\n xx = xx.view(1, 1, H, W)\n yy = yy.view(1, 1, H, W)\n grid = torch.cat((xx, yy), 1).float() # [1, 2, H, W]\n grid = grid.repeat(B, 1, 1, 1) # [B, 2, H, W]\n\n\n if x.is_cuda:\n grid = grid.cuda()\n vgrid = grid + flo\n\n vgridx = vgrid[:, 0, :, :].clone().unsqueeze(1)\n vgridy = vgrid[:, 1, :, :].clone().unsqueeze(1)\n vgridx = 2.0 * vgridx / max(W - 1, 1) - 1.0\n vgridy = 2.0 * vgridy / max(H - 1, 1) - 1.0\n\n vgrid = torch.cat([vgridx, vgridy], dim=1)\n vgrid = vgrid.permute(0, 2, 3, 1).contiguous()\n output = nn.functional.grid_sample(x, vgrid, mode='bilinear', padding_mode=padding_mode)\n\n return output", "title": "" }, { "docid": "62c4facf2ca6234accb555b1c86c0337", "score": "0.48976883", "text": "def radial_sample(self, azimuth):\n angle = 90 - azimuth # alpha = 90 - theta\n\n # finding grid corners (and transforming origin to (0,0))\n x_max = self.df['Easting'].max() - self.vent.x\n x_min = self.df['Easting'].min() - self.vent.x\n y_max = self.df['Northing'].max() - self.vent.y\n y_min = self.df['Northing'].min() - self.vent.y\n\n def pythagoras(x, y):\n return np.sqrt(x**2 + y**2)\n\n # finding longest possible distance from vent on grid\n # i.e. distance from vent to furthest corner\n r = max([pythagoras(x_max, y_max), pythagoras(x_max, y_min),\n pythagoras(x_min, y_min), pythagoras(x_min, y_max)])\n\n # creating array of distances along radial sampling line\n # TODO: Allow user specification of amount of samples\n # Possibly by clipping line first, then creating this linspace.\n rr = np.linspace(0, r, 100)\n\n # calculating coordinates at each point along sampling line\n coords = []\n for r in rr:\n coords.append((self.vent.x + r * np.cos(angle * (np.pi / 180)),\n self.vent.y + r * np.sin(angle * (np.pi / 180))))\n\n # Creating shapely LinString object to allow clipping\n line = LineString(coords)\n xl, yl = line.xy\n\n # Creating rectangular representation of grid boundaries\n grid_poly = Polygon([(x_max + self.vent.x, y_max + self.vent.y),\n (x_max + self.vent.x, y_min + self.vent.y),\n (x_min + self.vent.x, y_min + self.vent.y),\n (x_min + self.vent.x, y_max + self.vent.y)])\n\n xp, yp = grid_poly.exterior.xy\n\n # Intersecting radial line and grid to clip line to grid\n grid_line = line.intersection(grid_poly)\n xi, yi = grid_line.xy\n\n # Iterating along line, and finding nearest data point to each point\n near_points = []\n for point in grid_line.coords:\n near = shapely.ops.nearest_points(\n Point(point), MultiPoint(self.df.geometry.values))\n near_points.append(near[1])\n\n # Selecting each datapoint from dataframe\n sample = self.df.query(\"geometry in @near_points\")\n\n sample_points = LineString(near_points)\n\n xg, yg = sample_points.xy\n\n # plot_grid(self.df, self.vent)\n plt.plot(xp, yp, 'b', label='Grid Boundaries')\n plt.plot(xg, yg, 'g', marker='o', ms=3, label='Radial Samples')\n plt.plot(xl, yl, 'r--', label='Sampling line')\n plt.legend(loc='upper left')\n plt.title(r'Radial Sampling Algorithm with Azimuth $\\alpha$ = %d' % azimuth)\n plt.xlabel(\"Easting (m)\")\n plt.ylabel(\"Northing (m)\")\n plt.tight_layout()\n plt.savefig(\"rad_samp.eps\", dpi=200, format='eps')\n return sample", "title": "" }, { "docid": "aa237af44a076b0770486fb808678251", "score": "0.48925152", "text": "def test_fractional_coord_to_oversampled_index_math():\n\n ##NB Example edge case:\n oversampling = 7\n subpix_offset = 0.5\n # Returns index value greater than ``oversampling//2`` :\n assert np.around(subpix_offset * oversampling).astype(int) == 4\n assert (calculate_oversampled_kernel_indices(\n subpixel_coord=subpix_offset, oversampling=oversampling) == 3).all()\n\n # OK, now demonstrate values with oversampling of 0.5, which has easy\n # numbers to calculate since 1/5 = 0.2\n oversampling = 5\n io_pairs = np.array([\n [-0.5, -2],\n [-0.4999, -2],\n [-0.4, -2],\n [-0.35, -2],\n [-0.3, -2], # <-- numpy.around favours even roundings\n [-0.2999, -1],\n [-0.2, -1],\n [-0.11, -1],\n [-0.1, 0], # <-- numpy.around favours even roundings\n [-0.09, 0],\n [-0.01, 0],\n [0.0, 0],\n ])\n\n outputs = calculate_oversampled_kernel_indices(io_pairs[:, 0], oversampling)\n assert (io_pairs[:, 1] == outputs).all()\n # symmetry:\n io_pairs *= -1.\n outputs = calculate_oversampled_kernel_indices(io_pairs[:, 0], oversampling)\n assert (io_pairs[:, 1] == outputs).all()\n\n ## Check it works as expected when the inputs are co-ordinate pairs:\n inputs = np.array([(0.3, 0.3), ])\n outputs = np.array([(2, 2), ])\n assert (calculate_oversampled_kernel_indices(inputs, oversampling) ==\n outputs).all()", "title": "" }, { "docid": "2b857510bacca722fa7c92e204893adc", "score": "0.48875526", "text": "def regrid_esmpy_grid_object(target_nlat, target_nlon,\n grid_obj, interp_method='bilinear'):\n\n print('Regridding using ESMPy....')\n print((' interpolation method: {}'.format(interp_method)))\n print((' target grid: nlat={}, nlon={}'.format(target_nlat,\n target_nlon)))\n\n if grid_obj.lat_grid is None or grid_obj.lon_grid is None:\n lon_grid, lat_grid = np.meshgrid(grid_obj.lon, grid_obj.lat)\n else:\n lon_grid = grid_obj.lon_grid\n lat_grid = grid_obj.lat_grid\n\n if grid_obj.climo is not None:\n new_climo, lat, lon = regrid_esmpy(target_nlat, target_nlon,\n 1,\n grid_obj.climo,\n lat_grid,\n lon_grid,\n len(grid_obj.lat),\n len(grid_obj.lon),\n method=interp_method)\n else:\n new_climo = None\n\n new_data, new_lat, new_lon = regrid_esmpy(target_nlat, target_nlon,\n grid_obj.nsamples,\n grid_obj.data,\n lat_grid,\n lon_grid,\n len(grid_obj.lat),\n len(grid_obj.lon),\n method=interp_method)\n return new_data, new_lat, new_lon, new_climo", "title": "" }, { "docid": "bf464f2fd74ef56c87bd89065e0b54f7", "score": "0.48848587", "text": "def calc_psnr_1D(grid,survey,pset,slist,doplot=None,xlim=[1,100],ylim=[0.01,1]):\n rates=grid.rates\n dmvals=grid.dmvals\n zvals=grid.zvals\n DMobs=survey.DMEGs\n #DMobs=np.sort(DMobs)\n DMobs=np.sort(survey.DMEGs)\n idmsort=np.argsort(survey.DMEGs)\n \n # start by collapsing over z\n pdm=np.sum(rates,axis=0)\n \n ddm=dmvals[1]-dmvals[0]\n kdms=DMobs/ddm\n idms1=kdms.astype('int')\n idms2=idms1+1\n dkdms=kdms-idms1\n pvals=pdm[idms1]*(1.-dkdms) + pdm[idms2]*dkdms\n \n global_norm=np.sum(pdm)\n log_global_norm=np.log10(global_norm)\n \n \n llsum=np.sum(np.log10(pvals))-log_global_norm*DMobs.size\n \n NS=slist.size\n psnrs=np.zeros([survey.Ss.size,NS]) # generates plot for each individual FRB\n for k,s in enumerate(slist):\n # NOTE: to break this into a p(SNR|b) p(b) term, we first take\n # the relative likelihood of the threshold b value compared\n # to the entire lot, and then we calculate the local\n # psnr for that beam only. But this requires a much more\n # refined view of 'b', rather than the crude standatd \n # parameterisation\n \n # calculate vector of grid thresholds\n Emax=grid.Emax\n Emin=grid.Emin\n gamma=grid.gamma\n psnr=np.zeros([survey.Ss.size])\n \n # get vector of thresholds as function of z and threshold/weight list\n # note that the dimensions are, nthresh (weights), z, DM\n Eths = grid.thresholds[:,:,idms1]*(1.-dkdms)+ grid.thresholds[:,:,idms2]*dkdms\n \n ##### IGNORE THIS, PVALS NOW CONTAINS CORRECT NORMALISATION ######\n # we have previously calculated p(DM), normalised by the global sum over all DM (i.e. given 1 FRB detection)\n # what we need to do now is calculate this normalised by p(DM),\n # i.e. psnr is the probability of snr given DM, and hence the total is\n # p(snr,DM)/p(DM) * p(DM)/b(burst)\n # get a vector of rates as a function of z\n #rs = rates[:,idms1[j]]*(1.-dkdms[j])+ rates[:,idms2[j]]*dkdms[j]\n rs = rates[:,idms1]*(1.-dkdms)+ rates[:,idms2]*dkdms\t\n #norms=np.sum(rs,axis=0)/global_norm\n norms=pvals\n \n zpsnr=np.zeros(Eths.shape[1:])\n beam_norm=np.sum(survey.beam_o)\n #in theory, we might want to normalise by the sum of the omeba_b weights, although it does not matter here\n \n \n for i,b in enumerate(survey.beam_b):\n #iterate over the grid of weights\n bEths=Eths/b #this is the only bit that depends on j, but OK also!\n #now wbEths is the same 2D grid\n #wbEths=bEths #this is the only bit that depends on j, but OK also!\n \n #bEobs=bEths*survey.Ss #should correctky multiply the last dimensions\n # we simply now replace survey.Ss with the value of s\n bEobs=bEths*s\n \n for j,w in enumerate(grid.eff_weights):\n temp=(grid.array_diff_lf(bEobs[j,:,:],Emin,Emax,gamma).T*grid.FtoE).T\n zpsnr += temp*survey.beam_o[i]*w #weights this be beam solid angle and efficiency\n \n \n # we have now effectively calculated the local probabilities in the source-counts histogram for a given DM\n # we have to weight this by the sfr_smear factors, and the volumetric probabilities\n # this are the grid smearing factors incorporating pcosmic and the host contributions\n sg = grid.sfr_smear[:,idms1]*(1.-dkdms)+ grid.sfr_smear[:,idms2]*dkdms\n sgV = (sg.T*grid.dV.T).T\n wzpsnr = zpsnr * sgV\n \n \n #THIS HAS NOT YET BEEN NORMALISED!!!!!!!!\n # at this point, wzpsnr should look exactly like the grid.rates, albeit\n # A: differential, and \n # B: slightly modified according to observed and not threshold fluence\n \n # normalises for total probability of DM occurring in the first place.\n # We need to do this. This effectively cancels however the Emin-Emax factor.\n # sums down the z-axis\n psnr=np.sum(wzpsnr,axis=0)\n psnr /= norms #normalises according to the per-DM probability\n \n \n psnrs[:,k]=psnr\n \n \n # checks to ensure all frbs have a chance of being detected\n bad=np.array(np.where(psnr == 0.))\n if bad.size > 0:\n snrll = float('NaN') # none of this is possible! [somehow...]\n else:\n snrll = np.sum(np.log10(psnr))\n \n llsum += snrll\n \n if doplot is not None:\n plt.figure()\n ax=plt.gca()\n ax.set_aspect('auto')\n \n dlogs=np.log(slist[1])-np.log(slist[0])\n wpsnrs = psnrs*(slist)\n norm=np.sum(wpsnrs,axis=1)\n plotpsnrs = (psnrs.T/norm).T #correctly normalised now\n print(\"For plot \",doplot,\" norms were \",norm)\n #now multiply by slist to some power\n plotpsnrs*=slist\n \n xmin=xlim[0]\n xmax=xlim[1]\n ymin=ylim[0]\n ymax=ylim[1]\n \n plt.ylim(ymin,ymax)\n plt.xlim(xmin,xmax)\n plt.yscale('log')\n plt.xscale('log')\n \n linestyles=['-',':','--','-.']\n markerstyles=['o','^','x']\n ylist=[]\n DMs=survey.DMs[idmsort]\n Ss=survey.Ss[idmsort]\n for j,DM in enumerate(DMs):\n ls=linestyles[int(j/10)]\n ms=markerstyles[int(j/10)]\n plt.plot(slist,plotpsnrs[j],linestyle=ls,zorder=1)\n # adds in observed plot\n sobs=Ss[j]\n \n i2=np.where(slist>sobs)[0][0]\n i1=i2-1\n k=(sobs-slist[i1])/(slist[i2]-slist[i1])\n y=(k*plotpsnrs[j,i2]+(1.-k)*plotpsnrs[j,i1])\n plt.scatter(Ss[j],y,color=plt.gca().lines[-1].get_color(),s=70,marker=ms,zorder=2)\n plt.plot(slist,-plotpsnrs[j],label=str(int(round(DM,0))),linestyle=ls,zorder=1,marker=ms,color=plt.gca().lines[-1].get_color())\n \n #plt.plot(DMobs,pvals,'ro')\n plt.xlabel('$s$')\n plt.ylabel('$s \\\\, p_s(s)$')\n \n if xmax == 10:\n from matplotlib.ticker import ScalarFormatter\n ax=plt.gca()\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n \n import matplotlib.ticker as ticker\n \n ax.xaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))\n ax.xaxis.set_minor_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))\n \n plt.legend(loc='upper right',ncol=4,fontsize=10)\n plt.tight_layout()\n ax.set_aspect('auto')\n plt.savefig(doplot)\n plt.close()\n return slist,psnrs", "title": "" }, { "docid": "91518eb9ec06a23a95877f7f11033649", "score": "0.48840034", "text": "def interpolate_ice_fractions(input_file_path: Path=None, out_dir: Path=None, target_grid_config: GridConfig=None):\n out_file_name = \"cis_nic_glerl_interpolated_lc_fix.nc\"\n\n\n ds_in = Dataset(input_file_path)\n lon_s, lat_s = [ds_in.variables[k][:] for k in [\"lon\", \"lat\"]]\n\n time_var_in = ds_in.variables[\"time\"]\n time_var_in_data = time_var_in[:]\n\n ice_cover_var_in = ds_in.variables[\"ice_cover\"]\n\n\n\n with Dataset(out_dir.joinpath(out_file_name), \"w\") as ds_out:\n\n lon_t, lat_t = target_grid_config.get_lons_and_lats_of_gridpoint_centers()\n # layout of the output netcdf file\n ds_out.createDimension(\"time\")\n ds_out.createDimension(\"x\", lon_t.shape[0])\n ds_out.createDimension(\"y\", lon_t.shape[1])\n\n lon_var = ds_out.createVariable(\"lon\", \"f4\", dimensions=(\"x\", \"y\"))\n lat_var = ds_out.createVariable(\"lat\", \"f4\", dimensions=(\"x\", \"y\"))\n\n lon_var[:] = lon_t\n lat_var[:] = lat_t\n\n\n time_var = ds_out.createVariable(\"time\", \"i4\", dimensions=(\"time\",))\n time_var.units = time_var_in.units\n time_var[:] = time_var_in_data\n\n lc_var = ds_out.createVariable(\"LC\", \"f4\", dimensions=(\"time\", \"x\", \"y\"),\n zlib=True,\n least_significant_digit=3)\n lc_var.units = \"-\"\n lc_var.coordinates = \"lon lat\"\n\n\n\n # compare areas in degrees\n\n area_s = get_mean_gridcell_area(lon_s, lat_s)\n area_t = get_mean_gridcell_area(lon_t, lat_t)\n\n\n radius_of_influence = max(area_s, area_t) ** 0.5 * (np.pi / 180.0) * lat_lon.EARTH_RADIUS_METERS\n\n print(\"area_s={}, area_t={}\".format(area_s, area_t))\n\n # spatial interpolation\n n_neighbours = max(int(area_t / area_s + 0.5), 1)\n print(\"nneighbours = {}\".format(n_neighbours))\n\n\n xs, ys, zs = lat_lon.lon_lat_to_cartesian(lon_s.flatten(), lat_s.flatten())\n ktree = KDTree(list(zip(xs, ys, zs)))\n\n\n xt, yt, zt = lat_lon.lon_lat_to_cartesian(lon_t.flatten(), lat_t.flatten())\n\n dists, inds = ktree.query(list(zip(xt, yt, zt)), k=n_neighbours)\n\n for i in range(len(time_var_in_data)):\n the_field = ice_cover_var_in[i, :, :]\n\n\n if np.any(~the_field.mask):\n\n the_field[the_field.mask] = np.nan\n\n if n_neighbours > 1:\n lc_interpolated = np.mean(the_field.flatten()[inds], axis=1)\n else:\n lc_interpolated = the_field.flatten()[inds]\n\n\n lc_interpolated = np.ma.masked_where(np.isnan(lc_interpolated), lc_interpolated)\n\n # reshape back to the 2d array\n lc_interpolated.shape = lon_t.shape\n else:\n lc_interpolated = np.ma.masked_all_like(lon_t)\n\n lc_var[i, :, :] = lc_interpolated\n\n\n # close the input dataset\n ds_in.close()", "title": "" }, { "docid": "91f1a58afc522d9178ee6b24af1ab338", "score": "0.48806185", "text": "def ws2d_sgrid(self, p=None):\n\n with h5py.File(self.rawfile, 'r') as rawh5, h5py.File(self.outname.as_posix(), 'r+') as smth5:\n raw_ds = rawh5.get('data')\n raw_dates_all = [x.decode() for x in rawh5.get('dates')[...]]\n rtres = raw_ds.attrs['temporalresolution'].item()\n rtres = raw_ds.attrs['temporalresolution'].item()\n smt_ds = smth5.get('data')\n smt_dates = smth5.get('dates')\n smt_sgrid = smth5.get('sgrid')\n rawshape = raw_ds.shape\n rawchunks = raw_ds.chunks\n smoothshape = smt_ds.shape\n smoothchunks = smt_ds.chunks\n nodata = raw_ds.attrs['nodata'].item()\n self.temporalresolution = smt_ds.attrs['temporalresolution'].item()\n tshift = raw_ds.attrs['tshift'].item()\n\n # Store run parameters for infotool\n smt_ds.attrs['processingtimestamp'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\n if p:\n smt_ds.attrs['lastrun'] = 'fixed s from grid with p = {}'.format(p)\n smt_ds.attrs['pvalue'] = p\n else:\n smt_ds.attrs['lastrun'] = 'fixed s from grid'\n try:\n del smt_ds.attrs['pvalue']\n except KeyError:\n pass\n\n\n dates = DateHelper(rawdates=raw_dates_all,\n rtres=rtres,\n stres=self.temporalresolution,\n start=self.startdate)\n\n\n dix = dates.getDIX()[-self.nupdate:]\n\n # Resize if date list is bigger than shape of smoothed data\n if len(dates.target) > smoothshape[1]:\n smt_dates.resize((dates.target_length,))\n smt_ds.resize((smoothshape[0], dates.target_length))\n smt_dates[...] = np.array(dates.target, dtype='S8')\n smoothshape = smt_ds.shape\n\n # calculate offsets\n rawoffset = raw_dates_all.index(self.rawdates_nsmooth[0])\n\n # if dataset is smaller or equal then nupdate, take index 0\n try:\n smoothoffset = [x.decode() for x in smt_dates[...]].index(dates.target[-self.nupdate])\n except IndexError:\n smoothoffset = [x.decode() for x in smt_dates[...]].index(dates.target[0])\n\n new_dim = smoothshape[1] - smoothoffset\n\n if self.nworkers > 1:\n if self.tinterpolate:\n shared_array_smooth = init_shared(smoothchunks[0] * new_dim)\n arr_smooth = tonumpyarray(shared_array_smooth)\n arr_smooth.shape = (smoothchunks[0], new_dim)\n arr_smooth[...] = nodata\n vector_daily = dates.getDV(nodata)\n\n # Shift for interpolation\n for rdate in self.rawdates_nsmooth:\n vector_daily[dates.daily.index((fromjulian(rdate) + timedelta(tshift)).strftime('%Y%j'))] = -1\n else:\n vector_daily = None\n shared_array_smooth = None\n arr_smooth = None\n shared_array_raw = init_shared(rawchunks[0] * len(self.rawdates_nsmooth))\n\n parameters = init_parameters(rdim=(rawchunks[0], len(self.rawdates_nsmooth)),\n sdim=(smoothchunks[0], new_dim),\n nd=nodata,\n shared_array_smooth=shared_array_smooth,\n vec_dly=vector_daily,\n dix=dix,\n p=p)\n\n parameters['shared_array_sgrid'] = init_shared(rawchunks[0])\n arr_raw = tonumpyarray(shared_array_raw)\n arr_raw.shape = (rawchunks[0], len(self.rawdates_nsmooth))\n arr_sgrid = tonumpyarray(parameters['shared_array_sgrid'])\n\n pool = mp.Pool(processes=self.nworkers, initializer=init_worker, initargs=(shared_array_raw, parameters))\n # load raw data\n for br in range(0, rawshape[0], rawchunks[0]):\n for bc in range(0, arr_raw.shape[1], rawchunks[1]):\n bco = bc + rawoffset\n arr_raw[:, bc:bc+rawchunks[1]] = raw_ds[br:br+rawchunks[0], bco:bco+rawchunks[1]]\n\n ndix = np.sum(arr_raw != nodata, 1) >= (arr_raw.shape[1] * 0.2) # 20%+ data\n map_index = np.where(ndix)[0]\n if map_index.size == 0:\n continue #no data points, skipping to next block\n\n arr_sgrid[...] = smt_sgrid[br:br+rawchunks[0]]\n _ = pool.map(execute_ws2d_sgrid, map_index)\n\n # write back data\n if self.tinterpolate:\n for bcs, bcr in zip(range(smoothoffset, smoothshape[1], smoothchunks[1]), range(0, arr_smooth.shape[1], smoothchunks[1])):\n smt_ds[br:br+smoothchunks[0], bcs:bcs+smoothchunks[1]] = arr_smooth[:, bcr:bcr+smoothchunks[1]]\n arr_smooth[...] = nodata\n else:\n for bcs, bcr in zip(range(smoothoffset, smoothshape[1], smoothchunks[1]), range(self.array_offset, arr_raw.shape[1], smoothchunks[1])):\n smt_ds[br:br+smoothchunks[0], bcs:bcs+smoothchunks[1]] = arr_raw[:, bcr:bcr+smoothchunks[1]]\n\n # close pool\n pool.close()\n pool.join()\n\n else:\n arr_raw = np.zeros((rawchunks[0], len(self.rawdates_nsmooth)), dtype='double')\n arr_sgrid = np.zeros((rawchunks[0],), dtype='double')\n\n # Create weights array\n wts = arr_raw.copy()\n if self.tinterpolate:\n arr_smooth = np.zeros((smoothchunks[0], new_dim), dtype='double')\n vector_daily = dates.getDV(nodata)\n\n # Shift for interpolation\n for rdate in self.rawdates_nsmooth:\n vector_daily[dates.daily.index((fromjulian(rdate) + timedelta(tshift)).strftime('%Y%j'))] = -1\n else:\n arr_smooth = None\n\n for br in range(0, rawshape[0], rawchunks[0]):\n try:\n arr_smooth[...] = nodata\n except TypeError:\n pass\n wts[...] = 0\n\n for bc in range(0, arr_raw.shape[1], rawchunks[1]):\n bco = bc + rawoffset\n arr_raw[:, bc:bc+rawchunks[1]] = raw_ds[br:br+rawchunks[0], bco:bco+rawchunks[1]]\n wts[...] = (arr_raw != nodata)*1\n ndix = np.sum(wts, 1) >= (arr_raw.shape[1] * 0.2) # 20%+ data\n map_index = np.where(ndix)[0]\n\n if map_index.size == 0:\n continue #no data points, skipping to next block\n arr_sgrid[...] = smt_sgrid[br:br+rawchunks[0]]\n\n for ix in map_index:\n if not p:\n arr_raw[ix, :] = ws2d(y=arr_raw[ix, :], lmda=10**arr_sgrid[ix], w=wts[ix, :])\n else:\n arr_raw[ix, :] = ws2dp(y=arr_raw[ix, :], lmda=10**arr_sgrid[ix], w=wts[ix, :], p=p)\n if self.tinterpolate:\n z2 = vector_daily.copy()\n z2[z2 != nodata] = arr_raw[ix, :]\n z2[...] = ws2d(y=z2, lmda=0.0001, w=np.array((z2 != nodata)*1, dtype='double'))\n arr_smooth[ix, :] = z2[dix]\n else:\n pass\n\n # write back data\n if self.tinterpolate:\n for bcs, bcr in zip(range(smoothoffset, smoothshape[1], smoothchunks[1]), range(0, arr_smooth.shape[1], smoothchunks[1])):\n smt_ds[br:br+smoothchunks[0], bcs:bcs+smoothchunks[1]] = arr_smooth[:, bcr:bcr+smoothchunks[1]]\n arr_smooth[...] = nodata\n else:\n for bcs, bcr in zip(range(smoothoffset, smoothshape[1], smoothchunks[1]), range(self.array_offset, arr_raw.shape[1], smoothchunks[1])):\n smt_ds[br:br+rawchunks[0], bcs:bcs+smoothchunks[1]] = arr_raw[:, bcr:bcr+smoothchunks[1]]", "title": "" }, { "docid": "b6dd00b303cd3a11e32f393e6c923440", "score": "0.4878902", "text": "def build_params_grid(self, res, alpha):\n grid = []\n for i in range(-res, res):\n row = []\n for j in range(-res, res):\n w_new = (\n self.optim_point.cpu()\n + i * alpha * self.dir0\n + j * alpha * self.dir1\n )\n row.append(w_new)\n grid.append(row)\n assert (grid[res][res] == self.optim_point.cpu()).all()\n return grid", "title": "" }, { "docid": "171bb6361afd15c0301f598b3ebae38e", "score": "0.4876526", "text": "def test_interpolation_wrapper(self):\r\n\r\n # Create test data\r\n lon_ul = 100 # Longitude of upper left corner\r\n lat_ul = 10 # Latitude of upper left corner\r\n numlon = 8 # Number of longitudes\r\n numlat = 5 # Number of latitudes\r\n\r\n # Define array where latitudes are rows and longitude columns\r\n A = numpy.zeros((numlat, numlon))\r\n\r\n # Establish coordinates for lower left corner\r\n lat_ll = lat_ul - numlat\r\n lon_ll = lon_ul\r\n\r\n # Define pixel centers along each direction\r\n longitudes = numpy.linspace(lon_ll + 0.5,\r\n lon_ll + numlon - 0.5, numlon)\r\n latitudes = numpy.linspace(lat_ll + 0.5,\r\n lat_ll + numlat - 0.5, numlat)\r\n\r\n # Define raster with latitudes going bottom-up (south to north).\r\n # Longitudes go left-right (west to east)\r\n for i in range(numlat):\r\n for j in range(numlon):\r\n A[numlat - 1 - i, j] = linear_function(longitudes[j],\r\n latitudes[i])\r\n\r\n # Test first that original points are reproduced correctly\r\n for i, eta in enumerate(latitudes):\r\n for j, xi in enumerate(longitudes):\r\n\r\n val = interpolate_raster(longitudes, latitudes, A,\r\n [(xi, eta)], mode='linear')[0]\r\n assert numpy.allclose(val,\r\n linear_function(xi, eta),\r\n rtol=1e-12, atol=1e-12)\r\n\r\n # Then test that genuinly interpolated points are correct\r\n xis = numpy.linspace(lon_ll + 1, lon_ll + numlon - 1, 10 * numlon)\r\n etas = numpy.linspace(lat_ll + 1, lat_ll + numlat - 1, 10 * numlat)\r\n for xi in xis:\r\n for eta in etas:\r\n val = interpolate_raster(longitudes, latitudes, A,\r\n [(xi, eta)], mode='linear')[0]\r\n assert numpy.allclose(val,\r\n linear_function(xi, eta),\r\n rtol=1e-12, atol=1e-12)", "title": "" } ]
dc8ca5ea91bb92119eca9c9fce7484a5
Set lase state to either True or False.
[ { "docid": "650f72297f6ef32dd426f087087f11b2", "score": "0.73475146", "text": "def lase(self, state: bool):\r\n self._lase = state\r\n self.lase_hist.append(state)\r\n\r\n # if the new option is the same as before don't send changes to labjack\r\n if (self.lase_hist[-2] != state) and self._daq:\r\n msg = Message(\"lase\", state, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)", "title": "" } ]
[ { "docid": "f39497edbf7019fa15424233840455d3", "score": "0.6832224", "text": "def set_living(self, state):\n if isinstance(state, bool):\n self.living = state\n else:\n raise TypeError('state must be boolean.')", "title": "" }, { "docid": "a8ee0c74ec5169945e1d6aa415b37f4a", "score": "0.6506362", "text": "def change_light(self):\n self._light_status = not self._light_status", "title": "" }, { "docid": "af1bdf79444ddcb4fe2b3c3cff05ebd5", "score": "0.6251901", "text": "def set_state( self ):", "title": "" }, { "docid": "ee4aa2f5f070d5817bb130f5ce603ab4", "score": "0.6223033", "text": "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "title": "" }, { "docid": "b5a33ba47d1c62a98b939b024920eb61", "score": "0.6182172", "text": "def set_light_on(self):\r\n self._light = \"ON\"", "title": "" }, { "docid": "1c04d38ce8136026d43b8f526b918f1a", "score": "0.6160645", "text": "def turn_on(self, **kwargs):\n self._is_on = True", "title": "" }, { "docid": "a904d0d2c1289fbf74385f7f608f0ba1", "score": "0.6153134", "text": "def turnOn(self):\n self.off = False\n self.turnOnAnimation()", "title": "" }, { "docid": "7a02b7d2ab0328727df87ddd5f8ada0b", "score": "0.61531186", "text": "def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)", "title": "" }, { "docid": "5f635818882d5a51f20e22549d4d2e94", "score": "0.6141362", "text": "def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"", "title": "" }, { "docid": "adafe116ec0d301dc5ac7c075f3198b8", "score": "0.6112937", "text": "def set_lanzar(self):\n self.lanzar = True", "title": "" }, { "docid": "7281132bd18074be4c42c68f9a8166cb", "score": "0.61011404", "text": "def on(self):\n self._set_state(on=True)", "title": "" }, { "docid": "ddbb86a098db7fffb80527a5f0fab92a", "score": "0.6083419", "text": "def laser_state():\n global laser\n check_laser()\n req_data = request.get_json()\n if req_data != None:\n on_off_state = bool(req_data[\"on_off\"])\n intensity = int(req_data[\"intensity\"])\n if on_off_state: #if value is set to on, intensity jumps to 100%\n laser.on()\n try:\n print (\"laser to \" + str (intensity))\n laser.power = intensity\n except ValueError:\n print (\"Bad laser power received.\")\n if not on_off_state: #state switches to on when value changed.\n laser.off()\n lstate = {\"on_off\": laser.state(), \"intensity\": laser.power }\n return jsonify(lstate)", "title": "" }, { "docid": "db0ead947dea9053259249eb08fae114", "score": "0.6075969", "text": "def set_force(state):\n global _FORCE\n _FORCE = bool(state)", "title": "" }, { "docid": "4b799f83f9d3987b0e00ec24d2b8e2e5", "score": "0.6068745", "text": "def toggle(self, **kwargs):\n self.on = False if self.on else True", "title": "" }, { "docid": "25c9be56ce5e2877d2d4074910e7c0b4", "score": "0.60344124", "text": "def set_light_on(self):\n self._light = \"ON\"", "title": "" }, { "docid": "ac2f603c1abcd6112eae7d51d96c525d", "score": "0.60305893", "text": "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "title": "" }, { "docid": "df9bb5a244e431d3f1b80dd831b6e073", "score": "0.60222214", "text": "def setModifyState(self, bool):\n self._canShowModRect = bool\n if bool == False:\n self._modRect.hide()", "title": "" }, { "docid": "1b73c9e803f28825abfed7a0fba766c2", "score": "0.60218036", "text": "def changeState(self):\n if self._state:\n self._state = False\n else:\n self._state = True\n return self._state", "title": "" }, { "docid": "9174530dceee9b70a2f1b3ffd0f1770c", "score": "0.6017718", "text": "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "title": "" }, { "docid": "01a692fa0acd67cfdd18c4178269c424", "score": "0.6013204", "text": "def turn_on(self, **kwargs):\n self._state = True\n\n # Make initial update\n self.update_switch(self._initial_transition)\n\n self.schedule_update_ha_state()", "title": "" }, { "docid": "a2f55fd2c6a0311c51f6fc897e61d6b5", "score": "0.59977084", "text": "def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)", "title": "" }, { "docid": "a82c4b831be24e4081010313ffce5586", "score": "0.5975641", "text": "def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value", "title": "" }, { "docid": "f8d1171a0487f45886c970659d675dc7", "score": "0.5955317", "text": "def lightning_turnon(self):\n self.turnOn()", "title": "" }, { "docid": "746b5564201bed66fb9b65c0af171271", "score": "0.59478045", "text": "def set_light_mode(self, is_lid):\n raise NotImplementedError()", "title": "" }, { "docid": "8dd3603d7fe9a136b495f25a968e7303", "score": "0.59350723", "text": "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "title": "" }, { "docid": "97f2ce1489ac32ee21df29fe2bcef063", "score": "0.5907982", "text": "def set_light_status(self, new_light_status):\n if type(new_light_status) != bool:\n self._logger.write(\"Error! new_light_status should be of type bool\")\n try:\n self._light_status = new_light_status\n except Exception as e:\n self._logger.write(\"Error! could not set light status\")", "title": "" }, { "docid": "821f7f56c021b64f62552f2ac7590eef", "score": "0.5875432", "text": "def turnLightingSystemOn():\n dislin.light('ON')", "title": "" }, { "docid": "d169a47448560d8880dca03c1433f59f", "score": "0.5869379", "text": "def SetToggle(self, flag):\n\n self.up = not flag\n self.Refresh()", "title": "" }, { "docid": "b3f72a9a83a2877a34fc74bcfc7e6161", "score": "0.5864173", "text": "def set_state(self, state: int):", "title": "" }, { "docid": "c60e19a92e99d81fad420686cddd7d3c", "score": "0.58335334", "text": "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "title": "" }, { "docid": "79411e0fcfd4005599328bd8180c0cdc", "score": "0.5832325", "text": "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "title": "" }, { "docid": "a2742c94860a2201933f1e16335f0523", "score": "0.5831255", "text": "def _set_light(self, new_state):\n try:\n self._device.lights = new_state\n except requests.Timeout:\n _LOGGER.error(\"Time out setting %s light to %s\", self.entity_id, new_state)\n return\n\n self._light_on = new_state == ON_STATE\n self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY\n self.async_write_ha_state()", "title": "" }, { "docid": "9ef2196f8fc289a3fb520f4f0c76dceb", "score": "0.5826154", "text": "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "title": "" }, { "docid": "859eb227d5c6f2c219a0ac6d41c30247", "score": "0.5820012", "text": "def lase_on_power_up(self):\r\n return self._lase_on_power_up", "title": "" }, { "docid": "364b27e8a83d7da1ad6a46f119b7f258", "score": "0.58160895", "text": "def set_light_off(self):\r\n self._light = \"OFF\"", "title": "" }, { "docid": "87f18372701c17a1ce667ff12ab1be18", "score": "0.5800379", "text": "def set_status(self, learning=False, testing=False):\n self.learning = learning\n self.testing = testing", "title": "" }, { "docid": "a544f0e0558bac69c7093f5af9f937c9", "score": "0.5790671", "text": "def set_flag(self, new):\n self.flag = new", "title": "" }, { "docid": "3212ac7c1ff2013248f20a0f827f264f", "score": "0.5787344", "text": "def set_state(self):\n self.able = not self.able\n self.save()", "title": "" }, { "docid": "8c27b51248d58a5cc0a4b9ec2befc3cf", "score": "0.57796675", "text": "def on(self):\n if self._hidden:\n self.off()\n return\n if self._state != True:\n self.log_state_change('+')\n self._state = True", "title": "" }, { "docid": "94d4a87456c5c316c26193d8a97b78c8", "score": "0.57641834", "text": "def setLanding(self, land: bool):\n\t\tself._landing = land", "title": "" }, { "docid": "119a95ba14f15ad2d5587f97c233dec2", "score": "0.5755371", "text": "def toggle(self):\r\n self._variable.set(not self._variable.get()) \r\n self._activate()", "title": "" }, { "docid": "b2116f85f7059282595aeec12019451b", "score": "0.57505953", "text": "def standby(self, state):\r\n def toggle_gui(state):\r\n \"\"\"Toggles GUI components when standby is pressed\"\"\"\r\n self.mainWidget.standbyPushButton.setHidden(state)\r\n self.mainWidget.autoRecordPushButton.setHidden(state)\r\n self.mainWidget.recordPushButton.setVisible(state)\r\n self.mainWidget.recordPushButton.setEnabled(state)\r\n self.mainWidget.pauseToolButton.setVisible(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)\r\n\r\n if (state): # Prepare the pipelines\r\n if self.load_backend():\r\n toggle_gui(True)\r\n self.controller.pause()\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} \".format(self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.readyString))\r\n else:\r\n toggle_gui(False)\r\n self.mainWidget.standbyPushButton.setChecked(False)\r\n else:\r\n toggle_gui(False)\r\n self.mainWidget.standbyPushButton.setChecked(False)\r\n\r\n self.mainWidget.playPushButton.setVisible(False)\r\n self.mainWidget.playPushButton.setEnabled(False)", "title": "" }, { "docid": "1b9890c937c567ea38b8438113f608cf", "score": "0.5746374", "text": "def set(self, boolean):\n self._val = boolean", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "d369289dca0a6aa8526666bdc9030610", "score": "0.5711489", "text": "def setEnabled(*args):", "title": "" }, { "docid": "fe28050a6970162ca1c1d6495e1f181e", "score": "0.5691372", "text": "def affection_status_switch_on(self):\n self._affection_status_switch = False", "title": "" }, { "docid": "7a3136ae04a258ba016975e91a93298b", "score": "0.5672242", "text": "async def async_turn_on_off(self, state: bool) -> None:\n await self._cluster_handler.write_attributes_safe(\n {self._zcl_attribute: not state if self.inverted else state}\n )\n self.async_write_ha_state()", "title": "" }, { "docid": "b9ad20e0ccac412c221c3d189dc0f104", "score": "0.56558186", "text": "def clean_on(self, entity, attribute, old, new, kwargs):\n self.call_service(\"light/turn_on\", entity_id=\"group.all_lights\")\n self.sensor_living.set_state_boolean(False)\n self.sensor_bedroom.set_state_boolean(False)\n self.sensor_spare.set_state_boolean(False)", "title": "" }, { "docid": "81a81f27fcb42f2f2ee63e9f9b0b0a1e", "score": "0.565451", "text": "def standby(self):\n self._state = STATE_STANDBY", "title": "" }, { "docid": "81a81f27fcb42f2f2ee63e9f9b0b0a1e", "score": "0.565451", "text": "def standby(self):\n self._state = STATE_STANDBY", "title": "" }, { "docid": "ec250978a8b78080cf999d2ce8729f67", "score": "0.5648565", "text": "def setEnabled(self, boo):\n if boo:\n self.mousePressEvent = self.mousePressEventEnabled\n self.mouseMoveEvent = self.mouseMoveEventEnabled\n self.mouseReleaseEvent = self.mouseReleaseEventEnabled\n else:\n self.mousePressEvent = self.notEnabledDummy\n self.mouseMoveEvent = self.notEnabledDummy\n self.mouseReleaseEvent = self.notEnabledDummy", "title": "" }, { "docid": "01598590c653402aa52457a2121365b1", "score": "0.5644633", "text": "def set_slam_type(self, mode):\n if mode == LandmarkMode.RANSAC:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.LANDMARKS\n self.slam.landmark_mode = LandmarkMode.RANSAC\n elif mode == LandmarkMode.HOUGH:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.LANDMARKS\n self.slam.landmark_mode = LandmarkMode.HOUGH\n elif mode == SlamMode.SCAN_MATCHING:\n self.slam.naive = False\n self.slam.slam_mode = SlamMode.SCAN_MATCHING\n else:\n self.slam.naive = True", "title": "" }, { "docid": "e03d6a785fa5f738c887fd05f860c141", "score": "0.563548", "text": "def lamps(self, **lamp_parameters):\n self._set_state(lamp_parameters.items())", "title": "" }, { "docid": "44e5630acea8f2483fc4b9547be2492c", "score": "0.5625767", "text": "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "title": "" }, { "docid": "299a57662ea811f01289b484b163bfd4", "score": "0.5622658", "text": "def SetStandbyLPMode(self):\n handler = self.get_command_object(\"SetStandbyLPMode\")\n handler()", "title": "" }, { "docid": "25d120c9e9149b590eaf99c8c223011a", "score": "0.5618152", "text": "def set_bool_value(self, event):\n\n self.undo_add()\n\n key_list = list(self.patch.engine.misc_data.keys())\n key = key_list[self.selected_index]\n data = self.patch.engine.misc_data[key]\n\n if self.ValueEnabled.GetValue():\n self.patch.misc[key] = data['on']\n else:\n self.patch.misc[key] = data['off']\n\n self.is_modified(True)\n self.misclist_update_row(self.selected_index)", "title": "" }, { "docid": "abc4a641af4da378c6d835f148b1971a", "score": "0.56139493", "text": "def toggle(self):\n self._variable.set(not self._variable.get())\n self._activate()", "title": "" }, { "docid": "b5e8b8bc7f165da92de8d647f2918bc1", "score": "0.56089425", "text": "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "title": "" }, { "docid": "b30163807db0075dbab4d6f996ea9cb6", "score": "0.5608629", "text": "def set_state(self, state=0):\r\n return self._arm.set_state(state=state)", "title": "" }, { "docid": "2e90601ac06f1ded7687c8679336ae01", "score": "0.56029075", "text": "def set_light_off(self):\n self._light = \"OFF\"", "title": "" }, { "docid": "561939c7b3eeb46ceca7867e2deae899", "score": "0.55861795", "text": "def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()", "title": "" }, { "docid": "294d9bf66eb5fad28ba94192def1d2dc", "score": "0.55838716", "text": "def set_learning_phase(self, train):\n import keras.backend as k\n\n if isinstance(train, bool):\n self._learning_phase = train\n k.set_learning_phase(int(train))", "title": "" }, { "docid": "d0bb386dd14f8c1e6e9096bbba63ccd3", "score": "0.5573867", "text": "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "title": "" }, { "docid": "ec6c1d33ac07f42283433d87a807f808", "score": "0.55726", "text": "def in_fire(self):\n Fire=False\n if self.state>0 and self.state<=5:\n Fire=True\n return Fire", "title": "" }, { "docid": "801b16af667fd45b02b51818da86e6bb", "score": "0.55695194", "text": "def stop_watching(self, entity, attribute, old, new, kwargs):\n self.sensor_living.set_state_boolean(True)", "title": "" }, { "docid": "cd8b1697934b7f7f1994c2a944b83741", "score": "0.5566946", "text": "def fullLatticeCheckChanged(self, val):\n if val == QtCore.Qt.Unchecked:\n self.writeFullLattice = False\n else:\n self.writeFullLattice = True", "title": "" }, { "docid": "8c18ca1ca50a98720e8e2afb89b6b647", "score": "0.5566768", "text": "def _boolean_callback(self, *args):\n\t\tnew_value = args[1].get_boolean()\n\n\t\targs[0].set_state(GLib.Variant.new_boolean(new_value))\n\t\tself.window.set_picture_title()\n\t\tself.get_active_pane().hide_options_menu()", "title": "" }, { "docid": "301ebbbcf3af11838e2454e49204f025", "score": "0.55604386", "text": "def turn_on(self, **kwargs):\n self._brightness = 100\n self._state = 'on'\n #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n #self._light.turn_on()\n _LOGGER.info(\"turn_on() is called\")", "title": "" }, { "docid": "cf28826cd55f5ec7d95473b225ac680a", "score": "0.5558132", "text": "def toggle_dropable(self,new_bool):\n self.dropablee = new_bool", "title": "" }, { "docid": "031c04b0fe93c56919a035115b7ce4c4", "score": "0.55578405", "text": "def light_set(secret):\n if not access(request.url, secret): return \"No.\"\n\n Light.refresh()\n user = access(request.url, secret)\n if not user: return \"No.\"\n tell_sophie(f\"{user} sรฅ status pรฅ lys\")\n\n target_light = request.args.get(\"target_light\")\n target_status = str(request.args.get(\"target_status\")).lower() in [\"on\", \"true\"]\n light = Light.find_light(target_light)\n\n if light:\n tell_sophie(f\"{user}: satt {target_light} til {target_status}\")\n light.set_state(target_status)\n \n return \"Skrudde lyset pรฅ\" if target_status else \"Skrudde lyset av\"\n \n return \"Fant ikke lyset.\"", "title": "" }, { "docid": "444a8735b863cf0e8786e5da10795c2b", "score": "0.5556416", "text": "def __setstate__(self, state):\n return None", "title": "" }, { "docid": "a06ce15136ea8297cee28aa975b913c1", "score": "0.5550863", "text": "def setTrue(self):\n self.cond = CT.TRUE\n self.left = self.right = None\n self.z3 = BoolSort().cast(True)\n self.cleaned = self.Z3Simplified = self.customSimplified = self.checked = True\n self.customSimplifiedValue = CE.TRUE", "title": "" }, { "docid": "3ff82e5ceaf9755b1d4e729213534806", "score": "0.554706", "text": "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "title": "" }, { "docid": "3352c5a7e5b7003c661930860f1d7d39", "score": "0.5545557", "text": "def set_stateless(stateless):\n if stateless is None:\n raise TypeError(\"stateless is null!\")\n if str(stateless) == \"True\":\n AceQLHttpApi.set_stateless(True)\n else:\n AceQLHttpApi.set_stateless(False)", "title": "" }, { "docid": "dd0c311b39e8ebfaf73a2cdd6b878652", "score": "0.55419946", "text": "def manualState(self, tfid, state):\n self.trafficLights.get(int(tfid)).setState(state)\n self.trafficLights.get(int(tfid)).updateState()", "title": "" }, { "docid": "acd8ad255c4aa460038e56aff6520176", "score": "0.55415475", "text": "def set_state(self, state):\n self.state = state", "title": "" }, { "docid": "dcf044e27e211e25dbb3dbe662c882c5", "score": "0.5532123", "text": "def restoreState(self, state):\n self.setVoltage(state['voltage'])\n if state['output'] == True:\n self.turnOn()\n else:\n self.turnOff()", "title": "" }, { "docid": "aa8bb242c886b4b8e89b02c476284649", "score": "0.55223405", "text": "def autoExposureChk(self, state):\n if state == Qt.Checked and self.kinect.kinectConnected == True:\n self.kinect.toggleExposure(True)\n else:\n self.kinect.toggleExposure(False)", "title": "" }, { "docid": "de2aa87c047e0fff30f221ccec735be2", "score": "0.55178416", "text": "def turn_on(self) -> None:\n self._state = self._player.turn_on()", "title": "" }, { "docid": "7c37fc7f84b53c00bf8866b159a17941", "score": "0.55169743", "text": "def set_test_mode(self, on_off):\n if type(on_off) != bool:\n print(\"test mode must be a bool\")\n return\n self.test_mode = on_off", "title": "" }, { "docid": "83b80316749d70bfd6f74dd66f97ef41", "score": "0.55133456", "text": "def _set_villain(self):\n\t\tself.villain_one = donkey.Donkey(100 , constants.THREE_Y,0,500)\n\t\tself.active_sprite_list.add(self.villain_one)", "title": "" }, { "docid": "529edae2ef2c2e6181a74e8ee97dd697", "score": "0.5503255", "text": "def turn_eht_on(self):\n raise NotImplementedError", "title": "" }, { "docid": "019988ab46009f659d5a7cfb17058ff9", "score": "0.55011344", "text": "def toggle(self, env, pos):\n return False", "title": "" }, { "docid": "58222001785357a75a0b8c07832dbf70", "score": "0.54997957", "text": "def cambiar_celeste(self):\r\n self.celeste.setDisabled(True)", "title": "" }, { "docid": "7d548de1e9f2b917ca10b468aff6d8ec", "score": "0.549965", "text": "def __setstate__(self, state):\n l, bl = state\n self.layers = l\n self.best_loss = bl", "title": "" }, { "docid": "f216161a9f1e72528c4f5b53a4f6fffa", "score": "0.54974633", "text": "def action_rapel(self):\n self.state = 'rapel'\n self.state_rapel = '1'", "title": "" }, { "docid": "ec99f92646792faa7febb95026f4deb0", "score": "0.54931945", "text": "def setIdle(self, flag):\n if( flag ) :\n if( self.idleWork == -1) :\n self.idleWork = gobject.idle_add( self.idle_callback )\n else :\n if( self.idleWork != -1) :\n gobject.source_remove( self.idleWork )\n self.idleWork = -1", "title": "" }, { "docid": "864dca7d3878095c070746584dc88b98", "score": "0.54823655", "text": "def light(self, value: bool | int, /) -> None:", "title": "" }, { "docid": "58de0b82ef89017bbf123026202ef84f", "score": "0.54809093", "text": "def toggle(self) -> None:\n ...", "title": "" }, { "docid": "fb577402874e1e42b6e7c6cb3c3338bc", "score": "0.5469964", "text": "def _activate(self):\n # Increase the speed of the ball(s) slightly now the player has the\n # advantage of the laser.\n self.game.paddle.transition(LaserState(self.game.paddle, self.game))\n for ball in self.game.balls:\n ball.base_speed += 1", "title": "" }, { "docid": "80bb89bdf2bde35ab7739aaf44be4812", "score": "0.54677314", "text": "def Set3State(self, allow):\r\n\r\n if self._type != 1:\r\n return False\r\n\r\n self._is3State = allow\r\n return True", "title": "" } ]
0a3f8726bab73ebb368b400097bed051
Initialize the networks and Ops. Assume discrete space for dqn, so action dimension will always be action_space.n
[ { "docid": "c29ac42e89f0c747d00e3c9f5352047d", "score": "0.0", "text": "def init_opt(self):\n action_dim = self.env_spec.action_space.n\n obs_dim = self.env_spec.observation_space.flat_dim\n\n self.episode_rewards = []\n self.episode_qf_losses = []\n\n with tf.name_scope(self.name, \"input\"):\n action_t_ph = tf.compat.v1.placeholder(\n tf.int32, None, name='action')\n reward_t_ph = tf.compat.v1.placeholder(\n tf.float32, None, name='reward')\n done_t_ph = tf.compat.v1.placeholder(tf.float32, None, name='done')\n action = tf.one_hot(action_t_ph, action_dim)\n next_obs = tf.compat.v1.placeholder(\n tf.float32, (None, obs_dim), name='next_observations')\n\n jole_obs = tf.compat.v1.placeholder(\n tf.float32, (None, obs_dim), name='jole_input_observations')\n jole_actions_discrete = tf.compat.v1.placeholder(\n tf.int32, None, name='jole_input_action')\n jole_actions = tf.one_hot(jole_actions_discrete, action_dim)\n jole_clip_return_min = tf.compat.v1.placeholder(\n tf.float32, shape=(), name=\"jole_clip_return_min\")\n jole_clip_return_max = tf.compat.v1.placeholder(\n tf.float32, shape=(), name=\"jole_clip_return_max\")\n use_jole = tf.compat.v1.placeholder(\n tf.float32, shape=(), name=\"use_jole\")\n obs = self.qf.input\n\n # set up jole\n with tf.name_scope(self.name, \"jole\"):\n #get Q(s,a)\n jole_qval = tf.reduce_sum(self.qf.get_qval_sym(jole_obs, name='jole_q_value') * jole_actions, axis=1)\n # get predicted next observations and actions\n jole_predicted_next_obs = tf.reshape(tf.reduce_sum(tf.reshape(self.obs_model.get_fval_sym(jole_obs, name='jole_obs_value'),\n shape=(-1, action_dim, obs_dim)) * tf.expand_dims(jole_actions,-1), axis=1),shape=(-1, obs_dim))\n jole_predicted_reward = tf.reduce_sum(self.reward_model.get_fval_sym(jole_obs, name='jole_reward_value')*jole_actions, axis=1)\n jole_predicted_terminal = self.get_terminal_status(jole_predicted_next_obs)\n \n #jole_predicted_terminal = 0\n #jole_predicted_terminal = tf.argmax(self.terminal_model.get_fval_sym(jole_predicted_next_obs, name='jole_terminal_value'), axis=-1)\n\n # r + Q'(s', argmax_a(Q(s', _)) - Q(s, a)\n if self.double_q:\n jole_target_qval_with_online_q = self.qf.get_qval_sym(\n jole_predicted_next_obs, name=\"jole_next_obs_value\")\n jole_future_best_q_val_action = tf.argmax(\n jole_target_qval_with_online_q, 1)\n jole_future_best_q_val = tf.reduce_sum(\n self.target_qf.get_qval_sym(jole_predicted_next_obs, name=\"jole_next_obs_value\") * tf.one_hot(\n jole_future_best_q_val_action, action_dim),\n axis=1)\n else:\n # r + max_a(Q'(s', _)) - Q(s, a)\n jole_future_best_q_val = tf.reduce_max(\n self.target_qf.get_qval_sym(jole_predicted_next_obs, name=\"jole_next_obs_value\"), axis=1)\n #jole_done_t_ph = tf.condition\n jole_q_best_masked = (1.0 - tf.cast(jole_predicted_terminal, tf.float32)) * jole_future_best_q_val\n #jole_q_best_masked = jole_future_best_q_val\n # if done, it's just reward\n # else reward + discount * future_best_q_val\n jole_target_q_values_before_clip = (jole_predicted_reward + self.discount * jole_q_best_masked)\n jole_target_q_values = jole_target_q_values_before_clip#tf.clip_by_value(jole_target_q_values_before_clip, jole_clip_return_min, jole_clip_return_max)\n\n jole_loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(jole_qval, jole_target_q_values))\n \n self.f_cal_jole_loss = tensor_utils.compile_function(\n inputs=[jole_obs, jole_actions_discrete, jole_clip_return_min, jole_clip_return_max, use_jole],\n outputs=[jole_loss, jole_qval, jole_target_q_values, jole_target_q_values_before_clip])\n\n #train the env model\n with tf.name_scope(self.name, \"env_model\"):\n\n predicted_next_obs = tf.reduce_sum(tf.reshape(self.obs_model.get_fval_sym(obs, name='obs_value'),\n shape=(-1, action_dim, obs_dim)) * tf.expand_dims(action, -1), axis=1)\n predicted_reward = tf.reduce_sum(self.reward_model.get_fval_sym(obs, name='reward_value')*action, axis=1)\n\n #change to predict the delta of s\n original_obs_model_loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(next_obs, predicted_next_obs))\n obs_model_loss = original_obs_model_loss + use_jole * 0.0001 * jole_loss\n original_reward_model_loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(reward_t_ph, predicted_reward))\n reward_model_loss = original_reward_model_loss + use_jole * 0.0001 * jole_loss\n\n predicted_terminal = self.terminal_model.get_fval_sym(next_obs, name=\"terminal_value\")\n terminal_model_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predicted_terminal, labels=tf.cast(tf.squeeze(done_t_ph), dtype=tf.int32))\n\n terminal_model_accurate = tf.reduce_sum(1 - tf.abs(tf.argmax(predicted_terminal, axis=-1) - tf.cast(tf.squeeze(done_t_ph), dtype=tf.int64)))\n\n with tf.name_scope('minimize_obs_model_loss'):\n obs_train_op = self.obs_model_optimizer(\n self.obs_model_lr, name='ObsModelOptimizer').minimize(\n obs_model_loss, var_list=self.obs_model.get_trainable_vars())\n reward_train_op = self.reward_model_optimizer(\n self.reward_model_lr, name='RewardModelOptimizer').minimize(\n reward_model_loss, var_list=self.reward_model.get_trainable_vars())\n terminal_train_op = self.terminal_model_optimizer(\n self.terminal_model_lr, name='TerminalModelOptimizer').minimize(\n terminal_model_loss, var_list=self.terminal_model.get_trainable_vars())\n\n self.f_train_obs_model = tensor_utils.compile_function(\n inputs=[next_obs, obs, action_t_ph, jole_obs, jole_actions_discrete, jole_clip_return_min, jole_clip_return_max, use_jole],\n outputs=[obs_train_op, obs_model_loss, original_obs_model_loss])\n self.f_train_reward_model = tensor_utils.compile_function(\n inputs=[reward_t_ph, obs, action_t_ph, jole_obs, jole_actions_discrete, jole_clip_return_min, jole_clip_return_max, use_jole],\n outputs=[reward_train_op, reward_model_loss, original_reward_model_loss])\n self.f_train_terminal_model = tensor_utils.compile_function(\n inputs=[next_obs, done_t_ph],\n outputs=[terminal_train_op, terminal_model_loss, terminal_model_accurate])\n self.f_obs_model_predict = tensor_utils.compile_function(\n inputs=[obs, action_t_ph],\n outputs=[predicted_next_obs-obs, predicted_next_obs])\n self.f_reward_model_predict = tensor_utils.compile_function(\n inputs=[obs, action_t_ph],\n outputs=[predicted_reward])\n self.f_terminal_model_predict = tensor_utils.compile_function(\n inputs=[next_obs],\n outputs=[predicted_terminal, tf.argmax(predicted_terminal, axis=-1)])\n\n sepe_predicted_next_obs = tf.reduce_sum(tf.reshape(self.sepe_obs_model.get_fval_sym(obs, name='obs_value'),\n shape=(-1, action_dim, obs_dim)) * tf.expand_dims(action, -1), axis=1)\n sepe_predicted_reward = tf.reduce_sum(self.sepe_reward_model.get_fval_sym(obs, name='reward_value')*action, axis=1)\n #change to predict the delta of s\n sepe_obs_model_loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(next_obs, sepe_predicted_next_obs))\n sepe_reward_model_loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(reward_t_ph, sepe_predicted_reward))\n\n with tf.name_scope('minimize_sepe_obs_model_loss'):\n sepe_obs_train_op = self.obs_model_optimizer(\n self.obs_model_lr, name='SepeObsModelOptimizer').minimize(\n sepe_obs_model_loss, var_list=self.sepe_obs_model.get_trainable_vars())\n sepe_reward_train_op = self.reward_model_optimizer(\n self.reward_model_lr, name='SepeRewardModelOptimizer').minimize(\n sepe_reward_model_loss, var_list=self.sepe_reward_model.get_trainable_vars())\n\n f_train_sepe_obs_model = tensor_utils.compile_function(\n inputs=[next_obs, obs, action_t_ph],\n outputs=[sepe_obs_train_op, sepe_obs_model_loss])\n f_train_sepe_reward_model = tensor_utils.compile_function(\n inputs=[reward_t_ph, obs, action_t_ph],\n outputs=[sepe_reward_train_op, sepe_reward_model_loss])\n\n self.f_train_sepe_obs_model = f_train_sepe_obs_model\n self.f_train_sepe_reward_model = f_train_sepe_reward_model\n\n # Copy the parameter of seperate env models when necessary\n with tf.name_scope('copy_sepe_env_models'):\n copy_sepe_obs_model_ops = tensor_utils.get_target_ops(\n self.sepe_obs_model.get_global_vars(),\n self.obs_model.get_global_vars())\n\n copy_sepe_reward_model_ops = tensor_utils.get_target_ops(\n self.sepe_reward_model.get_global_vars(),\n self.reward_model.get_global_vars())\n\n self.f_copy_sepe_obs_model = tensor_utils.compile_function(\n inputs=[], outputs=copy_sepe_obs_model_ops)\n self.f_copy_sepe_reward_model = tensor_utils.compile_function(\n inputs=[], outputs=copy_sepe_reward_model_ops)\n\n # build q networks\n with tf.name_scope(self.name, 'DQN'):\n with tf.name_scope('update_ops'):\n target_update_op = tensor_utils.get_target_ops(\n self.qf.get_global_vars(),\n self.target_qf.get_global_vars())\n\n self._qf_update_ops = tensor_utils.compile_function(\n inputs=[], outputs=target_update_op)\n\n with tf.name_scope('td_error'):\n # Q-value of the selected action\n q_selected = tf.reduce_sum(\n self.qf.q_vals * action, # yapf: disable\n axis=1)\n\n # r + Q'(s', argmax_a(Q(s', _)) - Q(s, a)\n if self.double_q:\n target_qval_with_online_q = self.qf.get_qval_sym(\n self.target_qf.input, self.qf.name)\n future_best_q_val_action = tf.argmax(\n target_qval_with_online_q, 1)\n future_best_q_val = tf.reduce_sum(\n self.target_qf.q_vals * tf.one_hot(\n future_best_q_val_action, action_dim),\n axis=1)\n else:\n # r + max_a(Q'(s', _)) - Q(s, a)\n future_best_q_val = tf.reduce_max(\n self.target_qf.q_vals, axis=1)\n\n q_best_masked = (1.0 - done_t_ph) * future_best_q_val\n # if done, it's just reward\n # else reward + discount * future_best_q_val\n target_q_values = (reward_t_ph + self.discount * q_best_masked)\n\n # td_error = q_selected - tf.stop_gradient(target_q_values)\n loss = tf.reduce_mean(\n tf.compat.v1.squared_difference(tf.stop_gradient(target_q_values), q_selected))\n #loss = tf.compat.v1.losses.huber_loss(\n # q_selected, tf.stop_gradient(target_q_values))\n #loss = tf.reduce_mean(loss)\n loss += use_jole * 0.2 * jole_loss\n\n with tf.name_scope('optimize_ops'):\n optimizer = self.qf_optimizer(self.qf_lr)\n if self.grad_norm_clipping is not None:\n gradients = optimizer.compute_gradients(\n loss, var_list=self.qf.get_trainable_vars())\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(\n grad, self.grad_norm_clipping), var)\n optimize_loss = optimizer.apply_gradients(gradients)\n else:\n optimize_loss = optimizer.minimize(\n loss, var_list=self.qf.get_trainable_vars())\n\n self._train_qf = tensor_utils.compile_function(\n inputs=[\n self.qf.input, action_t_ph, reward_t_ph, done_t_ph,\n self.target_qf.input, jole_obs, jole_actions_discrete, use_jole, jole_clip_return_max, jole_clip_return_min\n ],\n outputs=[loss, optimize_loss, q_selected, target_q_values])\n\n for variable in tf.trainable_variables():\n print(variable)", "title": "" } ]
[ { "docid": "1e53c3d8b3fa5a4ff22e11b3435270ca", "score": "0.7468827", "text": "def init(self, net_dim, state_dim, action_dim):", "title": "" }, { "docid": "bb193d6ac445ca2d0ed5820ee42a2a9a", "score": "0.68246925", "text": "def initialize_models(self):\n # this information might be useful\n state_shape = list(self.env.observation_space.shape)\n img_height, img_width, n_channels = state_shape\n num_actions = self.env.action_space.n\n\n ##############################################################\n ################ YOUR CODE HERE (2 lines) ##################\n self.q_network = nn.Linear(img_height*img_width*n_channels*self.config.state_history, num_actions) # Check what other inputs are needed to the linear layer\n self.target_network = nn.Linear(img_height*img_width*n_channels*self.config.state_history, num_actions)\n # self.target_network = copy.deepcopy(self.q_network)\n #########################################################WWWWWWWWWWW#####\n ######################## END YOUR CODE #######################", "title": "" }, { "docid": "53ac0fe59e45f839cc5f62afc7e7c5dc", "score": "0.6817743", "text": "def set_up_discrete_action_space(self):\n self.action_list = [[self.torque] *\n self.action_dim, [0.0] * self.action_dim]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()", "title": "" }, { "docid": "ad34e0fcf0a8641532413c1d4ec8ac3a", "score": "0.67542005", "text": "def __init__(self,\n observation_space,\n action_space,\n hiddens=[16, 16],\n seed=None,\n lr=5e-4,\n gamma=1.0,\n batch_size=None,\n **kwargs):\n BaseAgent.__init__(**locals())\n\n # Declaring for readibility\n batch_size = self._batch_size\n obs_shape = self._observation_space.shape\n obs_dtype = self._observation_space.dtype\n\n act_shape = self._action_space.shape\n act_dtype = self._action_space.dtype\n num_actions = self._action_space.n\n\n # ================================================================\n # Input nodes of the graph, obervations, actions\n # and hyperparameters, aka tf.placeholders\n # ================================================================\n\n with tf.variable_scope('dqn_vars', reuse=None):\n self.obs_input_node = tf.placeholder(shape=(batch_size, ) + obs_shape,\n dtype=obs_dtype,\n name=\"observation_input\")\n\n self.obs_input_node_target_net = tf.placeholder(shape=(batch_size, ) + obs_shape,\n dtype=obs_dtype,\n name=\"observation_input_target_net\")\n\n #Tensorflow shapes XDDDDDDD\n # https://stackoverflow.com/questions/46940857/what-is-the-difference-between-none-none-and-for-the-shape-of-a-placeh\n self.action = tf.placeholder(shape=[None], dtype=tf.int64, name=\"action_input\")\n self.reward = tf.placeholder(shape=[None], dtype=tf.float32, name=\"reward_input\")\n\n self.done = tf.placeholder(tf.float32, [None], name=\"done\")\n self.importance_sample_weights = tf.placeholder(tf.float32, [None], name=\"weights\")\n\n # ================================================================\n # Here we construct our action-value function Q\n # this will be an MLP, no CNN needed\n # ================================================================\n\n self.q_values = q_mlp(hiddens,\n self.obs_input_node,\n num_actions,\n scope='action_value_function')\n\n self.q_mlp_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.get_variable_scope().name +\n \"/action_value_function\")\n\n # ================================================================\n # Here we construct our target action-value function Q\n # ================================================================\n\n self.q_values_target = q_mlp(hiddens,\n self.obs_input_node_target_net,\n num_actions,\n scope='action_value_function_target')\n\n self.q_mlp_target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.get_variable_scope().name +\n \"/action_value_function_target\")\n\n # ================================================================\n # Bellman equation\n # old estimate\n # Q_old(s_t,a_t)\n # new estimate\n # Q_new(s_t,a_t) = R(s,a_t) + gamma * max_a(Q(s_{t+1},a_t))\n # Objective is to minimize the squared error of the difference\n # between the old and new estimates\n # the difference also mentioned in the literature as td_error(0)\n # the target q_function has 2 connotations, one is the target in\n # supervised learning, the second is the TD target to update the value\n # function for the old state (The TD target)\n # https://en.wikipedia.org/wiki/Temporal_difference_learning\n # ================================================================\n\n # old estimate\n # Q_old(s_t,a_t)\n self.q_value_old = tf.reduce_sum(self.q_values * tf.one_hot(self.action, num_actions),\n 1)\n\n # new estimate\n # Q_new(s_t,a_t) = R(s,a_t) + max_a(Q(s_{t+1},a_t))\n\n # max_a(Q(s_{t+1},a_t)\n self.q_target_max = tf.reduce_max(self.q_values_target, 1)\n self.q_target_max = (1.0 - self.done) * self.q_target_max\n # Q_new(s_t,a_t) = R(s,a_t) + max_a(Q(s_{t+1},a_t))\n self.q_value_new = self.reward + self._gamma * self.q_target_max\n\n # td_error TD(0) = Q_old - Q_new\n self.td_error = self.q_value_old - tf.stop_gradient(self.q_value_new)\n self.errors = huber_loss(self.td_error)\n # self.errors = 0.5 * tf.square(self.td_error)\n # mean squared td_erors = (1/2) * (TD(0))\n\n #TODO: we could use huber_loss\n # we minimize the mean of these weights, unless weights are assigned\n # to this errors, for now, will not weight samples...\n # self.weighted_error = tf.reduce_mean(\n # self.importance_sample_weights * self.errors)\n\n self.weighted_error = tf.reduce_mean(self.errors)\n\n #TODO: gradient normalization is left as an additional exercise\n optimizer = tf.train.AdamOptimizer(learning_rate=self._lr)\n self.optimize = optimizer.minimize(self.weighted_error, var_list=self.q_mlp_vars)\n\n # ================================================================\n # Pointer update q_mlp_target_vars with q_mlp_vars\n # ================================================================\n\n self.q_update_target_vars = q_target_update(self.q_mlp_vars, self.q_mlp_target_vars)\n # ================================================================\n # Action and exploration nodes\n # ================================================================\n # deterministic actions\n # yes, there is a difference between () and [None], [None] is for\n # 1-D arrays, () is for a single scalar value.\n # https://stackoverflow.com/questions/46940857/what-is-the-difference-between-none-none-and-for-the-shape-of-a-placeh\n # yes this is actually interesting\n self.argmax_q_values = tf.argmax(self.q_values, axis=1)\n self.stochastic = tf.placeholder(tf.bool, (), name=\"stochastic\")\n self.new_epsilon = tf.placeholder(tf.float32, (), name=\"n_epsilon\")\n self.epsilon = tf.get_variable(\"epsilon\", (), initializer=tf.constant_initializer(0))\n self.size_obs_batch = tf.shape(self.obs_input_node)[0]\n\n self.random_actions = tf.random_uniform(tf.stack([self.size_obs_batch]),\n minval=0,\n maxval=num_actions,\n dtype=tf.int64)\n self.chose_random = tf.random_uniform(\n tf.stack([self.size_obs_batch\n ]), minval=0, maxval=1, dtype=tf.float32) < self.epsilon\n self.output_actions = tf.where(self.chose_random, self.random_actions,\n self.argmax_q_values)\n self.update_new_epsilon = self.epsilon.assign(\n tf.cond(self.new_epsilon >= 0, lambda: self.new_epsilon, lambda: self.epsilon))\n\n # ================================================================\n # Finalize graph and initiate all variables\n # ================================================================\n self.initializer = tf.initializers.global_variables()\n\n get_session().graph.finalize()\n get_session().run(self.initializer)\n print(\"### agent graph finalized and ready to use!!! ###\")", "title": "" }, { "docid": "03e26025ddcebfa574544c32972faecd", "score": "0.6714426", "text": "def init(self, net_dim, state_dim, action_dim):\r\n self.action_dim = action_dim\r\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n self.cri = QNetTwinDuel(net_dim, state_dim, action_dim).to(self.device)\r\n self.cri_target = deepcopy(self.cri)\r\n self.act = self.cri\r\n\r\n self.criterion = torch.nn.SmoothL1Loss()\r\n self.cri_optimizer = torch.optim.Adam(self.act.parameters(), lr=self.learning_rate)", "title": "" }, { "docid": "674f9b02ccf9dd1a81b14431f8814a38", "score": "0.6548735", "text": "def __init__(self, obs_space_dim, act_space_dim, nodes):\n\n # Input, state, output weights\n self.obs_space_dim = obs_space_dim\n self.act_space_dim = act_space_dim\n self.nodes = nodes\n\n self.sess = tf.InteractiveSession()\n\n x = self.make_rnn()\n\n self.inputs, self.state, self.output, self.next_state, self.W1, \\\n self.W2, self.W3 = x\n\n # Initialize all vars\n self._init_vars()", "title": "" }, { "docid": "69396a7ab501be5ee0f3b6fd15713298", "score": "0.6471708", "text": "def __init__(self, nS, nA, P, isd):\n self.action_space = spaces.Discrete(nA)\n self.observation_space = spaces.Discrete(nS)\n self.nA = nA\n self.P = P\n self.isd = isd\n self.lastaction=None # for rendering", "title": "" }, { "docid": "0d71cdc5c3637802f3bbf0e7acc75714", "score": "0.64206606", "text": "def __init__(self, state_space, action_space, train_device='cpu'):\n super().__init__()\n self.train_device = train_device\n self.state_space = state_space\n self.name = 'ActorNN'\n # bp()\n self.flat_state_space = self.state_space[0] * self.state_space[1] * self.state_space[2]\n self.action_space = action_space\n self.hidden = 100\n self.state_value = 0\n self.fc_0 = torch.nn.Linear(in_features=self.flat_state_space, out_features=self.hidden)\n self.fc_1 = torch.nn.Linear(in_features=self.hidden, out_features=self.action_space)\n self.softmax = torch.nn.Softmax(dim=1)\n self.init_weights()", "title": "" }, { "docid": "c57f3f5e81a3794f8cccff8d41f6e356", "score": "0.6415781", "text": "def _initialize(self):\n common.soft_variables_update(\n self._q_network_1.variables,\n self._target_q_network_1.variables,\n tau=1.0)\n common.soft_variables_update(\n self._q_network_2.variables,\n self._target_q_network_2.variables,\n tau=1.0)\n common.soft_variables_update(\n self._actor_network.variables,\n self._target_actor_network.variables,\n tau=1.0)\n\n ##TODO: override _check_trajectory_dimensions", "title": "" }, { "docid": "f2096f7c4541e34d5afa87e7b68fc9c9", "score": "0.6409854", "text": "def __init__(self, action_space):\n BaseAgent.__init__(self, action_space=action_space)\n\n self.action_space = action_space\n\n self.actions = []\n actions_vec = np.load(\"./saved_files/top1000_actions.npz\")[\"actions\"]\n for i in range(actions_vec.shape[0]):\n act = action_space.from_vect(actions_vec[i])\n self.actions.append(act)\n\n self.actions = self.actions[:1000]\n self.act_num = len(self.actions)\n self.sub_ids = np.load('./saved_files/sub_id_info.npz')['sub_ids']\n self.do_nothing_action = action_space({})\n self.origin_ids = range(len(self.actions))\n\n offset = action_space.n_line\n self.action_to_sub_topo = {}\n for sub_id, sub_elem_num in enumerate(action_space.sub_info):\n self.action_to_sub_topo[sub_id] = (offset, offset + sub_elem_num)\n offset += sub_elem_num\n self.step = 0\n self.powernet_model = PowerNetModel()\n self.to_print_data = []\n\n self.last_disconnect_step = -100\n self.last_diconnect_line = None\n self.simulation_times = 0", "title": "" }, { "docid": "512c6a27cd8308d9173a4a89353996ed", "score": "0.6383129", "text": "def __init__(self, sess, observation_space, action_space, name, batch_size):\n self.obs_space = observation_space\n self.act_space = action_space\n self.n_act = reduce(lambda x, y: x * y, tuple([self.act_space]))\n self.name = name\n self.sess = sess\n self.batch_size = batch_size\n\n # others\n self.global_scope = None", "title": "" }, { "docid": "ed795adbe2a56835fb0400b12fd77b15", "score": "0.63583887", "text": "def __init__(self, state_space, action_space):\n super(DQN_1, self).__init__()\n # input channel: 1, length is 6\n self.state_space = state_space\n self.action_space = action_space\n self.conv1 = nn.Conv1d(in_channels = 1, out_channels = 2, kernel_size = 2,\n dilation = 1, bias = True)\n self.bn1 = nn.BatchNorm1d(2)\n self.dp1 = nn.Dropout(0.25)\n\n self.conv2 = nn.Conv1d(in_channels = 2, out_channels = 4, kernel_size = 2,\n dilation = 2, bias = True)\n self.bn2 = nn.BatchNorm1d(4)\n self.dp2 = nn.Dropout(0.25)\n\n self.conv3 = nn.Conv1d(in_channels = 4, out_channels = 8, kernel_size = 2,\n dilation = 4, bias = True)\n self.bn3 = nn.BatchNorm1d(8)\n self.dp3 = nn.Dropout(0.25)\n\n self.conv4 = nn.Conv1d(in_channels = 8, out_channels = 16, kernel_size = 2,\n dilation = 8, bias = True)\n self.bn4 = nn.BatchNorm1d(16)\n self.dp4 = nn.Dropout(0.25)\n\n self.conv5 = nn.Conv1d(in_channels = 16, out_channels = 32, kernel_size = 2,\n dilation = 16, bias = True)\n self.bn5 = nn.BatchNorm1d(32)\n self.dp5 = nn.Dropout(0.25)\n\n # 35 * 59\n self.dense1= nn.Linear(1888, 1024)\n self.dp6 = nn.Dropout(0.25)\n self.dense2= nn.Linear(1024, 512)\n self.dp7 = nn.Dropout(0.25)\n self.dense3= nn.Linear(512, self.action_space)\n self.dp8 = nn.Dropout(0.25)", "title": "" }, { "docid": "5115f92e2cef6b9ace567501eca3eade", "score": "0.63552684", "text": "def _initialize(self):\n common.soft_variables_update(\n self._critic_network_1.variables,\n self._target_critic_network_1.variables,\n tau=1.0,\n )\n common.soft_variables_update(\n self._critic_network_2.variables,\n self._target_critic_network_2.variables,\n tau=1.0,\n )\n common.soft_variables_update(\n self._actor_network.variables,\n self._target_actor_network.variables,\n tau=1.0,\n )", "title": "" }, { "docid": "534a79f26506bf25ccbbfaa700105a7b", "score": "0.6331956", "text": "def __init__(self, state_space, action_space, hidden_fc1 = 128, hidden_fc2 = 64):\n super(DQN, self).__init__()\n\n self.state_space = state_space\n self.action_space = action_space\n self.hidden_fc1 = hidden_fc1\n self.hidden_fc2 = hidden_fc2\n\n self.layer1 = nn.Linear(state_space, hidden_fc1)\n self.layer2 = nn.Linear(hidden_fc1, hidden_fc2)\n self.output = nn.Linear(hidden_fc2, action_space)", "title": "" }, { "docid": "93cf64109bf8b35a122d7f97bac0d57b", "score": "0.6283489", "text": "def init_dqn(env, nb_actions):\n # Next, we build a very simple model.\n model = Sequential()\n model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(nb_actions))\n model.add(Activation('linear'))\n print(model.summary())\n\n # compile agent\n memory = SequentialMemory(limit=50000, window_length=1)\n policy = BoltzmannQPolicy()\n dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,\n target_model_update=1e-2, policy=policy)\n dqn.model_name = f\"DQN\"\n dqn.compile(Adam(lr=1e-3), metrics=['mae'])\n return dqn", "title": "" }, { "docid": "52d9a657321481a8b6353abad0086aff", "score": "0.62671673", "text": "def __init__(self, action_dim=1):\n self.policy_net = PolicyNet(action_dim=action_dim)\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n self.gamma = 0.99", "title": "" }, { "docid": "3c2669d1745fffdcbaf63d3a686aa707", "score": "0.6229563", "text": "def __init__(\n self,\n action_space: gym.Space,\n observation_space: gym.Space,\n learning_rate: float,\n hidden_size: Iterable[int],\n target_update_freq: int,\n batch_size: int,\n gamma: float,\n use_lunar_scheduler: bool,\n **kwargs,\n ):\n super().__init__(action_space, observation_space)\n\n STATE_SIZE = observation_space.shape[0]\n ACTION_SIZE = action_space.n\n\n # ######################################### #\n # BUILD YOUR NETWORKS AND OPTIMIZERS HERE #\n # ######################################### #\n self.critics_net = FCNetwork(\n (STATE_SIZE, *hidden_size, ACTION_SIZE), output_activation=None\n )\n\n self.critics_target = deepcopy(self.critics_net)\n\n self.critics_optim = Adam(\n self.critics_net.parameters(), lr=learning_rate, eps=1e-3\n )\n\n # ############################################# #\n # WRITE ANY HYPERPARAMETERS YOU MIGHT NEED HERE #\n # ############################################# #\n self.learning_rate = learning_rate\n self.update_counter = 0\n self.target_update_freq = target_update_freq\n self.batch_size = batch_size\n self.gamma = gamma\n self.use_lunar_scheduler = use_lunar_scheduler\n\n # ######################################### #\n\n self.saveables.update(\n {\n \"critics_net\": self.critics_net,\n \"critics_target\": self.critics_target,\n \"critic_optim\": self.critics_optim,\n }\n )", "title": "" }, { "docid": "fdcf72fe62def30b838d320b0b1f2499", "score": "0.62248355", "text": "def __init__(self, state_size, action_size, hidden_dims, random_seed):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n\n # Actor Network (w/ Target Network)\n self.actor_local = DDPG_Net(state_size, action_size, hidden_dims).to(device)\n self.actor_target = DDPG_Net(state_size, action_size, hidden_dims).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=1e-4)\n \n # Make sure the Actor Target Network has the same weight values as the Local Network\n for target, local in zip(self.actor_target.parameters(), self.actor_local.parameters()):\n target.data.copy_(local.data)\n\n # Critic Network (w/ Target Network)\n self.critic_local = DDPG_Net(state_size+action_size, 1, hidden_dims).to(device)\n self.critic_target = DDPG_Net(state_size+action_size, 1, hidden_dims).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=1e-3)\n \n # Make sure the Critic Target Network has the same weight values as the Local Network\n for target, local in zip(self.critic_target.parameters(), self.critic_local.parameters()):\n target.data.copy_(local.data)\n\n # Noise process\n self.noise = OUNoise(action_size, random_seed)\n\n # Replay memory\n self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)", "title": "" }, { "docid": "1db45d66303e710bfd0fb8492fae10ae", "score": "0.62016773", "text": "def build_network(self):\n n_act = len(self.environmentActions)\n \n g = tf.Graph()\n\n with g.as_default(): \n #Builds both trainable and target networks\n self.inputs, self.soft_max,self.q,self.q_best_act = \\\n self.build_layers(trainable = True, prefix = \"net/\")\n self.inputs_target, self.soft_max_target,self.q_target,self.q_best_act_target = \\\n self.build_layers(trainable = False, prefix = \"target/\")\n \n\n # builds the operation to update the target network\n self.update_target_op = []\n trainable_variables = tf.trainable_variables()\n all_variables = tf.global_variables()\n for i in range(0, len(trainable_variables)):\n #print(trainable_variables[i].name + \"->\" +all_variables[len(trainable_variables) + i].name)\n self.update_target_op.append(all_variables[len(trainable_variables) + i].assign(trainable_variables[i]))\n\n \n #actions executed\n self.actions = tf.placeholder('int64', [None], name='action_train')\n self.rewards = tf.placeholder(tf.float32, [None], name='reward_train')\n #Actions suggested by\n self.next_acts = tf.placeholder('int64', [None], name='n_action_train') \n self.isTerminal = tf.placeholder(tf.float32, [None], name='terminal_train')\n \n next_acts = tf.one_hot(self.next_acts, n_act, 1.0, 0.0, name='next_action_one_hot')\n target_max_q = tf.reduce_sum(self.q_target * next_acts, reduction_indices=1, name='qt_next')#tf.max(self.q_target)\n \n gamma = tf.convert_to_tensor(self.gamma)\n action_one_hot = tf.one_hot(self.actions, n_act, 1.0, 0.0, name='action_one_hot')\n self.q_acted = tf.reduce_sum(self.q * action_one_hot, reduction_indices=1, name='q_acted')\n target_q_t = self.rewards + (tf.convert_to_tensor(1.0)-self.isTerminal)*gamma*target_max_q\n \n delta = target_q_t - self.q_acted\n #cost[i] = tf.Print(cost[i], [cost[i]], \"cost\")\n #self.cost = tf.reduce_mean(tf.square(delta), name='loss')#tf.reduce_mean(self.clipped_error(delta), name='loss')\n self.cost = tf.reduce_mean(self.clipped_error(delta), name='loss')\n # add an optimizer\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.alpha).minimize(self.cost)\n \n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\n gpu_options = tf.GPUOptions(allow_growth=True)\n self.session = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))\n self.session.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.update_target()", "title": "" }, { "docid": "83ebae24d577f1bc1412a096124bea29", "score": "0.61932397", "text": "def __init__(self, state_dim, n_actions, gamma=0.99, lmbda=1.0, eps=1e-3, itr_target_update=1e1, device=\"cuda\"):\n self.q_net = Net(state_dim, n_actions).to(device)\n self.q_net_opt = optim.Adam(self.q_net.parameters(), lr=0.001)\n self.target_q_net = Net(state_dim, n_actions).to(device)\n\n self.itr_target_update = itr_target_update\n self.lmbda = lmbda\n self.count = 0\n self.gamma = gamma\n self.eps = eps\n self.device = device\n self.loss_func = nn.MSELoss()\n self.memory = ReplayBuffer(1e4, 64)", "title": "" }, { "docid": "991ae8830e72ca2035c07b9670ceadbf", "score": "0.6179215", "text": "def __init__(self, obs_space_dim, act_space_dim, nodes, dt,\n init_weights=True):\n\n # Input, state, output weights\n self.obs_space_dim = obs_space_dim\n self.act_space_dim = act_space_dim\n self.nodes = nodes\n if init_weights:\n self.weights = tf.Variable(\n set_weights([obs_space_dim + act_space_dim + nodes,\n obs_space_dim + act_space_dim + nodes]),\n dtype=tf.float32, name=\"node_weights\",\n trainable=False)\n else:\n self.weights = tf.get_variable(\"node_weights\",\n (self.state_size, self.state_size),\n dtype=tf.float32,\n trainable=False)\n\n self.bias = tf.zeros([obs_space_dim + act_space_dim + nodes, 1],\n dtype=tf.float32)\n self.dt = dt\n self.sess = tf.InteractiveSession()\n x = self.make_rnn()\n self.inputs, self.state, self.output, self.next_state = x\n\n # Initialize all vars\n self._init_vars()", "title": "" }, { "docid": "feba244b4860f4c5015c1ce0edb11a01", "score": "0.61737394", "text": "def __init__(\n self, observation_space, action_size, model_dict\n ):\n super(ICMNetwork, self).__init__()\n self.state_rep_size = model_dict.state_representation[-1]\n\n input_dim = prod(observation_space.shape)\n\n # state representation\n self.state_rep = build_sequential(input_dim, model_dict.state_representation)\n\n # inverse model\n inverse_model_hiddens = model_dict.inverse_model\n inverse_model_hiddens.append(action_size)\n self.inverse_model = build_sequential(self.state_rep_size * 2, inverse_model_hiddens)\n\n # forward model\n forward_model_hiddens = model_dict.forward_model\n forward_model_hiddens.append(self.state_rep_size)\n self.forward_model = build_sequential(self.state_rep_size + action_size, forward_model_hiddens)", "title": "" }, { "docid": "e5015d82e61cba02bcf35e7b96694481", "score": "0.6163331", "text": "def __init__(self, state_dim, n_actions, gamma=0.99, lmbda=1.0, eps=1e-3, itr_target_update=1e1, device=\"cuda\"):\n self.dqn1 = DoubleQNet(state_dim, n_actions, gamma, lmbda, eps, itr_target_update, device)\n self.dqn2 = DoubleQNet(state_dim, n_actions, gamma, lmbda, eps, itr_target_update, device)\n self.device = device\n self.lmbda = lmbda\n self.eps = eps", "title": "" }, { "docid": "4b8de0b61bb7c0af53754d98716bae32", "score": "0.6139436", "text": "def _initialize(self):\n common.soft_variables_update(\n self._critic_network_1.variables,\n self._target_critic_network_1.variables,\n tau=1.0)\n common.soft_variables_update(\n self._critic_network_2.variables,\n self._target_critic_network_2.variables,\n tau=1.0)", "title": "" }, { "docid": "84a19c1bcb9bd2a5b5acecd312bf2cdb", "score": "0.6135519", "text": "def __init__(self, n_actions):\n super(DQN, self).__init__()\n ###########################\n # YOUR IMPLEMENTATION HERE #\n self.input_shape = [4,84,84]\n self.conv = nn.Sequential(\n nn.BatchNorm2d(self.input_shape[0]), \n nn.Conv2d(self.input_shape[0], 32, kernel_size=8, stride=4),\n # nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n # nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n # nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Flatten()\n )\n conv_out_size = self._get_conv_out(self.input_shape)\n self.fc = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, n_actions),\n # nn.ReLU()\n )", "title": "" }, { "docid": "cf856cdbc07223ec13afbc12f9abfcde", "score": "0.61324555", "text": "def build_main_network(self):\n \n # Computate Q(s_t, .)\n self.Q_distrib = build_critic(self.state_ph,\n trainable=True, scope='main_network')\n\n # Select only the Q-distribution of the action given in the experience,\n # i.e. compute Q(s_t, a_t)\n ind = tf.stack((tf.range(self.batch_size), self.action_ph), axis=1)\n self.Q_distrib_taken_action = tf.gather_nd(self.Q_distrib, ind)", "title": "" }, { "docid": "eb2e180263a4bd2b56f74c4dd2e0b91d", "score": "0.61295474", "text": "def __init__(self,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n device: torch.device,\n settings: dict) -> None:\n self.device = device\n action_size = action_spec.num_values\n state_size = np.prod(observation_spec.shape)\n self.action_size = action_size\n self.state_size = state_size\n self.batch_size = settings['batch_size']\n self.noisy_nets = settings['qnet_settings']['noisy_nets']\n self.distributional = settings[\"qnet_settings\"][\"distributional\"]\n\n if self.distributional:\n # Currently the distributional agent always uses Dueling DQN\n self.qnet = DistributionalDuelDQN(state_size, action_size, settings['qnet_settings'], device).to(device)\n self.q_target = DistributionalDuelDQN(state_size, action_size, settings['qnet_settings'], device).to(device)\n vmin, vmax = settings[\"qnet_settings\"][\"vmin\"], settings[\"qnet_settings\"][\"vmax\"]\n number_atoms = settings[\"qnet_settings\"][\"number_atoms\"]\n self.distribution_updater = DistributionUpdater(vmin, vmax, number_atoms)\n else:\n if settings[\"duelling_dqn\"]:\n self.qnet = DuelDQN(state_size, action_size, settings['qnet_settings']).to(device)\n self.q_target = DuelDQN(state_size, action_size, settings['qnet_settings']).to(device)\n else:\n self.qnet = Dqn(state_size, action_size, settings['qnet_settings']).to(device)\n self.q_target = Dqn(state_size, action_size, settings['qnet_settings']).to(device)\n\n self.q_target.load_state_dict(self.qnet.state_dict())\n self.optimizer = optim.Adam(self.qnet.parameters(), lr=settings['lr'])\n\n self.epsilon = settings[\"epsilon_start\"]\n self.decay = settings[\"epsilon_decay\"]\n self.epsilon_min = settings[\"epsilon_min\"]\n self.gamma = settings['gamma']\n\n self.start_optimization = settings[\"start_optimization\"]\n self.update_qnet_every = settings[\"update_qnet_every\"]\n self.update_target_every = settings[\"update_target_every\"]\n self.number_steps = 0\n self.ddqn = settings[\"ddqn\"]\n\n # Initialize replay memory\n self.prioritized_replay = settings[\"prioritized_buffer\"]\n if self.prioritized_replay:\n self.memory = PrioritizedReplayMemory(device, settings[\"buffer_size\"], self.gamma, settings[\"n_steps\"],\n settings[\"alpha\"], settings[\"beta0\"], settings[\"beta_increment\"])\n else:\n self.memory = ReplayMemory(device, settings[\"buffer_size\"], self.gamma, settings[\"n_steps\"])\n return", "title": "" }, { "docid": "e041c86dabfc270eda8907c86623e342", "score": "0.6114778", "text": "def __init__(self, input_shape, number_of_actions):\n super(DQN, self).__init__()\n ###########################\n self.conv = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=4, stride=2),\n nn.ReLU()\n )\n\n conv_out_size = self._get_conv_out(input_shape)\n\n self.fc = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, number_of_actions)\n )", "title": "" }, { "docid": "b9a0d6a0f2406560ba324db1dbbc64e4", "score": "0.61011684", "text": "def __init__(self, state_size, action_size, fc1_units=100, \n fc2_units=100, fc3_units=100):\n super(Network, self).__init__()\n set_seed()\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, fc3_units)\n self.fc4 = nn.Linear(fc3_units, action_size)", "title": "" }, { "docid": "9da9404c57961a0ae17bebc5b2301554", "score": "0.6074239", "text": "def __init__(self, env):\n self.env = env\n self.observations = self.env.observation_space.shape[0]\n self.actions = self.env.action_space.n\n self.model = self.get_model()", "title": "" }, { "docid": "dee0fdec65e2acaf6ede85e7e8f7809c", "score": "0.60711765", "text": "def initialize_network(self, n_agents: int, n_actions: int):\n model = Sequential()\n model.add(Dense(int(self.hidden_nodes), input_dim=n_agents, activation=\"relu\"))\n model.add(Dense(int(self.hidden_nodes), activation=\"relu\"))\n model.add(Dense(int(self.hidden_nodes), activation=\"relu\"))\n model.add(Dense(n_actions, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n return model", "title": "" }, { "docid": "1a64f75cc4853a5b5cef6795e8562544", "score": "0.60662425", "text": "def initialize_network(self, n_agents: int, n_actions: int):\n model = Sequential()\n model.add(Dense(int(self.hidden_nodes), input_dim=n_agents, activation=\"relu\"))\n model.add(Dense(int(self.hidden_nodes), activation=\"relu\"))\n model.add(Dense(n_actions, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n return model", "title": "" }, { "docid": "ad7cec6092b3a7bb29c16f54565286d1", "score": "0.60560584", "text": "def __init__(self, WIDTH, HEIGHT, possible_actions):\n\n super(DQNetwork, self).__init__()\n print(\"INIT\")\n\n self.CN1_params = {\n 'channels_in':2,\n 'channels_out':128,\n 'kernel':3,\n 'stride':1,\n 'padding':(1,1),\n 'dilation':1,\n 'input_width':WIDTH,\n 'input_height':HEIGHT,\n 'output_width': None,\n 'output_height': None\n }\n self.set_output_dims(self.CN1_params)\n\n self.CN2_params = {\n 'channels_in':self.CN1_params['channels_out'],\n 'channels_out':64,\n 'kernel':3,\n 'stride':3,\n 'padding':(1,1),\n 'dilation':1,\n 'input_width':int(self.CN1_params['output_width']/2), # Maxpool\n 'input_height':int(self.CN1_params['output_height']/2),\n 'output_width': None,\n 'output_height': None\n }\n self.set_output_dims(self.CN2_params)\n\n # input is 25x10x2\n self.conv1 = nn.Conv2d(self.CN1_params['channels_in'], self.CN1_params['channels_out'],\n self.CN1_params['kernel'], stride=self.CN1_params['stride'], padding=self.CN1_params['padding'])\n # elu -> max_pool2d(2, 2)\n \n self.conv2 = nn.Conv2d(self.CN1_params['channels_out'], self.CN2_params['channels_out'],\n self.CN2_params['kernel'], stride=self.CN2_params['stride'], padding=self.CN2_params['padding'])\n # elu -> max_pool2d(2, 2)\n\n self.conv_net_output = int(self.CN2_params['channels_out']*self.CN2_params['output_width']*self.CN2_params['output_height']//4 ) # Max pool\n\n # 5 additional scalar inputs from \n fc1_dim = 5 + self.conv_net_output\n print(\"Fully connected input dimensions:\", fc1_dim)\n self.fc1 = nn.Linear(fc1_dim, 128) # FIX INPUTS\n self.fc2 = nn.Linear(128, possible_actions)\n # -> Q(s, a)\n\n self.loss = nn.MSELoss", "title": "" }, { "docid": "25efef424d1689ebd268463042f62bbd", "score": "0.6043453", "text": "def __init__(self, state_space, action_space):\n super(DQN_2, self).__init__()\n # input channel: 1, length is 6\n self.state_space = state_space\n self.action_space = action_space\n self.conv1 = nn.Conv1d(in_channels = 1, out_channels = 2, kernel_size = 2,\n dilation = 1, bias = True)\n self.bn1 = nn.BatchNorm1d(2)\n self.dp1 = nn.Dropout(0.25)\n\n self.conv2 = nn.Conv1d(in_channels = 2, out_channels = 4, kernel_size = 2,\n dilation = 2, bias = True)\n self.bn2 = nn.BatchNorm1d(4)\n self.dp2 = nn.Dropout(0.25)\n\n self.conv3 = nn.Conv1d(in_channels = 4, out_channels = 8, kernel_size = 2,\n dilation = 4, bias = True)\n self.bn3 = nn.BatchNorm1d(8)\n self.dp3 = nn.Dropout(0.25)\n\n self.conv4 = nn.Conv1d(in_channels = 8, out_channels = 16, kernel_size = 2,\n dilation = 8, bias = True)\n self.bn4 = nn.BatchNorm1d(16)\n self.dp4 = nn.Dropout(0.25)\n\n self.conv5 = nn.Conv1d(in_channels = 16, out_channels = 32, kernel_size = 2,\n dilation = 16, bias = True)\n self.bn5 = nn.BatchNorm1d(32)\n self.dp5 = nn.Dropout(0.25)\n\n # 35 * 59 state_value\n self.dense1= nn.Linear(1888, 1024)\n self.dp6 = nn.Dropout(0.25)\n self.dense2= nn.Linear(1024, 512)\n self.dp7 = nn.Dropout(0.25)\n self.dense3= nn.Linear(512, 1)\n self.dp8 = nn.Dropout(0.25)\n\n # advantage value\n self.dense4 = nn.Linear(1888, 1024)\n self.dp9 = nn.Dropout(0.25)\n self.dense5 = nn.Linear(1024, 512)\n self.dp10 = nn.Dropout(0.25)\n self.dense6 = nn.Linear(512, self.action_space)\n self.dp11 = nn.Dropout(0.25)", "title": "" }, { "docid": "2bb913f4b3a1e1f33f87c010fad52f5a", "score": "0.60431284", "text": "def initialize_network(self):\n self.build_random_layers(self.initial_input_node)\n self.get_semantics_initial_nodes()\n self.semantics = self.output_node.semantics\n self.fitness = self._evaluate()\n self.mutation_level += 1", "title": "" }, { "docid": "c0a8fca49f2e1155557dc388f96e20d2", "score": "0.60410255", "text": "def _configure_observation_space(self):\n self.observation_space.spaces[\"gait_phases\"] = gym.spaces.Box(\n np.array([-1.0] * 4), np.array([1.0] * 4))\n self.observation_space.spaces[\"feet_states\"] = gym.spaces.Box(\n np.array([-2.0] * 4), np.array([2.0] * 4))\n self.observation_space.spaces[\"need_new_swing_target\"] = gym.spaces.Box(\n np.array([0.0] * 4), np.array([1.0] * 4))\n self.observation_space.spaces[\"estimated_base_speed\"] = gym.spaces.Box(\n np.array([-1.0, -1.0, -1.0]), np.array([1.0, 1.0, 1.0]))\n self.observation_space.spaces[\"estimated_body_height\"] = gym.spaces.Box(\n np.array([0.35]), np.array([0.5]))\n self.observation_space.spaces[\"heuristics_com_velocity\"] = gym.spaces.Box(\n np.array([-1.0] * 2), np.array([1.0] * 2))\n self.observation_space.spaces[\"current_toe_target\"] = gym.spaces.Box(\n np.array([-1.0, -1.0, -1.0] * _NUM_LEGS),\n np.array([1.0, 1.0, 1.0] * _NUM_LEGS))\n\n # Needed so that LastActionSensor uses the correct action space.\n for s in self.all_sensors():\n s.on_reset(self)\n for sensor in self.all_sensors():\n if sensor.get_name() not in self._gym_config.ignored_sensor_list:\n if hasattr(sensor, \"observation_space\"):\n self.observation_space.spaces[\n sensor.get_name()] = sensor.observation_space\n\n self.task.reset(self)\n if hasattr(self.task, \"observation_space\"):\n self.observation_space.spaces[\n self.task.get_name()] = self.task.observation_space", "title": "" }, { "docid": "353a96c8b1feeaa60e565bdc7ebd8eb6", "score": "0.60368574", "text": "def init_network(self):\n tf_map_generator = self._hyperparams['network_model']\n #print('dO, dU = ', self._dO, self._dU)\n #print('self._hyperparams = ', self._hyperparams)\n tf_map, self.solver_op, self.summary_op, self.avg_tower_loss, self.act_4prob = tf_map_generator(dim_input=self._dO, dim_output=self._dU, batch_size=self.batch_size, network_config=self._hyperparams['network_params'])\n self.obs_tensor = tf_map.get_input_tensor()\n self.action_tensor = tf_map.get_target_output_tensor()\n self.precision_tensor = tf_map.get_precision_tensor()\n self.act_op = tf_map.get_output_op()\n self.loss_scalar = tf_map.get_loss_op()", "title": "" }, { "docid": "6e878bd6bc70645b8e7b09d2f355e6a0", "score": "0.60305786", "text": "def create_q_network(self, state_dim, action_dim):\n a_layer1_size = 100\n a_layer2_size = 50\n s_layer1_size = 100\n s_layer2_size = 50\n combined_layer1_size = 100\n combined_layer2_size = 100\n\n state_input = tf.placeholder(\"float\", [None, state_dim])\n action_input = tf.placeholder(\"float\", [None, action_dim])\n a_W1 = self.variable([action_dim, a_layer1_size], action_dim)\n a_b1 = self.variable([a_layer1_size], action_dim)\n a_W2 = self.variable([a_layer1_size, a_layer2_size], a_layer1_size)\n a_b2 = self.variable([a_layer2_size], a_layer1_size)\n s_W1 = self.variable([state_dim + action_dim, s_layer1_size], state_dim + action_dim)\n s_b1 = self.variable([s_layer1_size], state_dim + action_dim)\n # s_W2 = tf.Variable(tf.random_uniform([s_layer1_size, s_layer2_size], -3e-5, 3e-5))\n # s_b2 = tf.Variable(tf.random_uniform([s_layer2_size], -3e-5))\n\n W1_action = tf.Variable(tf.eye(a_layer2_size, num_columns=combined_layer1_size))\n W1_state = tf.Variable(tf.zeros([s_layer1_size, combined_layer1_size]))\n\n b1 = tf.Variable(tf.zeros([combined_layer1_size]))\n W2 = tf.Variable(tf.eye(combined_layer1_size, num_columns=combined_layer2_size))\n b2 = tf.Variable(tf.zeros([combined_layer2_size]))\n W3 = tf.Variable(tf.random_uniform([combined_layer2_size, 1], -3e-3, 3e-3))\n b3 = tf.Variable(tf.random_uniform([1], -3e-3, 3e-3))\n\n a_layer1 = tf.nn.relu(tf.matmul(action_input, a_W1) + a_b1)\n a_layer2 = tf.nn.relu(tf.matmul(a_layer1, a_W2) + a_b2)\n s_layer1 = tf.nn.relu(tf.matmul(tf.concat([action_input, state_input], axis=1), s_W1) + s_b1)\n # s_layer2 = tf.nn.relu(tf.matmul(s_layer1, s_W2) + s_b2)\n combined_layer1 = tf.nn.relu(tf.matmul(a_layer2, W1_action) + tf.matmul(s_layer1, W1_state) + b1)\n combined_layer2 = tf.nn.relu(tf.matmul(combined_layer1, W2) + b2)\n q_value_output = tf.identity(tf.matmul(combined_layer2, W3) + b3)\n\n return state_input, action_input, q_value_output, [a_W1, a_b1, a_W2, a_b2, s_W1, s_b1,\n W1_action, W1_state, b1, W2, b2, W3, b3]", "title": "" }, { "docid": "2047ff35dd6e5ee91f3860289e1e7df0", "score": "0.601918", "text": "def _build_network(self):\n # self._observation = tf.placeholder(shape=[None, *self._dim_obs], dtype=tf.uint8, name=\"observation\")\n # self._observation = tf.to_float(self._observation) / 255.0\n # self._observation = tf.placeholder(shape=[None, *self._dim_obs], dtype=tf.float32, name=\"observation\")\n self._observation = self._obs_fn()\n self._action = tf.placeholder(dtype=tf.int32, shape=[None], name=\"action\")\n self._reward = tf.placeholder(dtype=tf.float32, shape=[None], name=\"reward\")\n self._done = tf.placeholder(dtype=tf.float32, shape=[None], name=\"done\")\n # self._next_observation = tf.placeholder(dtype=tf.uint8, shape=[None, *self._dim_obs], name=\"next_observation\")\n # self._next_observation = tf.to_float(self._next_observation) / 255.0\n # self._next_observation = tf.placeholder(shape=[None, *self._dim_obs], dtype=tf.float32, name=\"next_observation\")\n self._next_observation = self._obs_fn()\n\n with tf.variable_scope(\"main/qnet\"):\n # self._qvals = self._dense(self._observation)\n self._qvals = self._value_fn(self._observation)\n\n with tf.variable_scope(\"target/qnet\"):\n # self._target_qvals = self._dense(self._next_observation)\n self._target_qvals = self._value_fn(self._next_observation)", "title": "" }, { "docid": "07a4915c8378f598b9839417216b104b", "score": "0.60049677", "text": "def action_space(self):\n return gym.spaces.Discrete(3)", "title": "" }, { "docid": "6520db6a824ee7565744d29546efe33c", "score": "0.6000687", "text": "def __init__(self, state_dim, n_actions):\n super().__init__()\n self.first = nn.Linear(*state_dim, 128)\n self.common_mid = nn.Linear(128, 128)\n self.critic_mid = nn.Linear(128, 64)\n self.actor_mid = nn.Linear(128, 64)\n self.critic = nn.Linear(64, 1)\n self.actor = nn.Linear(64, n_actions)", "title": "" }, { "docid": "00c7213659fad8ffc6efe71ab0f751be", "score": "0.59996307", "text": "def __init__(self, env_spec, nn_module):\n self._env_spec = env_spec\n self._nn_module = nn_module\n self._obs_dim = env_spec.observation_space.flat_dim\n self._action_dim = env_spec.action_space.flat_dim\n self._action_bound = env_spec.action_space.high", "title": "" }, { "docid": "472a305ea6611b9474191acfe29b4218", "score": "0.5985646", "text": "def __init__(self, state_dim, action_dim, hidden_layers, activations, batch_norm=True):\n super(EnvironmentModelSeparateReward, self).__init__()\n layers_dynamics = [state_dim+action_dim]+hidden_layers+[state_dim]\n layers_reward = [state_dim+action_dim]+hidden_layers+[1]\n self.model_dynamics = NN(layers_dynamics, activations, batch_norm)\n self.model_reward = NN(layers_reward, activations, batch_norm)", "title": "" }, { "docid": "70073ae426eadb0ba0776b5bd3628f8c", "score": "0.5984046", "text": "def __init__(self):\n self.action_space = [[i,j] for i in range(0,m) for j in range(0,m) if i != j]\n self.state_space = list(np.zeros((m+t+d)))\n self.state_init = list(np.zeros((m+t+d)))\n\n # Start the first round\n self.reset()", "title": "" }, { "docid": "454c3c3685b1810d5e93fd617b5a3710", "score": "0.59817994", "text": "def __init__(self, action_space: gym.Space, observation_space: gym.Space):\n self.action_space = action_space\n self.observation_space = observation_space\n\n self.saveables = {}", "title": "" }, { "docid": "1f8d18e326278ba5cbf56c8820d10da3", "score": "0.59752774", "text": "def __init__(self, state_size, action_size, num_agents):\n self.state_size = state_size\n self.action_size = action_size \n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size).to(DEVICE)\n self.actor_target = Actor(state_size, action_size).to(DEVICE)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR, weight_decay=WEIGHT_DECAY_actor)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(num_agents*state_size, num_agents*action_size).to(DEVICE)\n self.critic_target = Critic(num_agents*state_size, num_agents*action_size).to(DEVICE)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY_critic)\n\n # Noise process\n self.noise = OUNoise(action_size) #single agent only\n self.noise_scale = NOISE_START\n \n # Make sure target is initialized with the same weight as the source (makes a big difference)\n self.hard_update(self.actor_target, self.actor_local)\n self.hard_update(self.critic_target, self.critic_local)", "title": "" }, { "docid": "8e2da41b9c156a0a0c6569f0aeace4ba", "score": "0.59651244", "text": "def __init__(self, state_size, action_size, seed, fc1_units, fc2_units, out_size):\n super(Network, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, out_size)\n self.reset_parameters()", "title": "" }, { "docid": "da3ea7d9186de9abc35783ef9f296910", "score": "0.59596187", "text": "def __init__(self, state_size, action_size, num_units, seed):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n # Add fully-connected layers.\n for idx, units in enumerate(num_units):\n if idx == 0:\n modules = [nn.Linear(state_size, units)]\n else:\n modules.append(nn.Linear(num_units[idx-1], units))\n modules.append(nn.ReLU())\n # Add final layer with output equal to action space size.\n modules.append(nn.Linear(num_units[-1], action_size))\n self.model = nn.Sequential(*modules)", "title": "" }, { "docid": "5ec20912ef9b66a4c1f28774d1a52f24", "score": "0.5945939", "text": "def __init__(self, action_size: int, seed: int):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n\n self.layer1 = nn.Sequential(\n nn.Conv3d(in_channels=3, out_channels=256, kernel_size=(1,3,3), stride=(1,3,3)), \\\n nn.BatchNorm3d(256), \\\n nn.ReLU())\n\n self.layer2 = nn.Sequential( \\\n nn.Conv3d(in_channels=256, out_channels=512, kernel_size=(1,3,3), stride=(1,3,3)), \\\n nn.BatchNorm3d(512), \\\n nn.ReLU())\n\n self.layer3 = nn.Sequential( \\\n nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(4, 3, 3), stride=(1, 3, 3)), \\\n nn.BatchNorm3d(512), \\\n nn.ReLU())\n\n self.layer4 = nn.Sequential(\n nn.Linear(in_features=4608, out_features=1024), nn.ReLU())\n self.layer6 = nn.Linear(in_features=1024, out_features=action_size)", "title": "" }, { "docid": "13ce5d0dc51f902579ebbfd0af4772ac", "score": "0.59370804", "text": "def initialize_models(self):\n state_shape = list(self.env.observation_space.shape)\n img_height, img_width, n_channels = state_shape\n num_actions = self.env.action_space.n\n strides = np.array([4, 2, 1]) # The stride size for every conv2d layer\n filter_sizes = np.array([8, 4, 3]) # The filter size for every conv2d layer\n numb_filters = np.array([32, 64, 64]) # number of filters for every conv2d layer\n ##############################################################\n ################ YOUR CODE HERE - 25-30 lines lines ################\n padding_0 = ((strides[0] - 1) * img_height - strides[0] + filter_sizes[0]) // 2\n padding_1 = ((strides[1] - 1) * img_height - strides[1] + filter_sizes[1]) // 2\n padding_2 = ((strides[2] - 1) * img_height - strides[2] + filter_sizes[2]) // 2\n self.q_network = nn.Sequential(\n nn.Conv2d(n_channels * self.config.state_history, 32, kernel_size=8, stride=4, padding=padding_0),\n nn.ReLU(),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=padding_1),\n nn.ReLU(),\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=padding_2),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(4096, 512),\n nn.ReLU(),\n nn.Linear(512, num_actions)\n )\n self.target_network = nn.Sequential(\n nn.Conv2d(n_channels * self.config.state_history, 32, kernel_size=8, stride=4, padding=padding_0),\n nn.ReLU(),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=padding_1),\n nn.ReLU(),\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=padding_2),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(4096, 512),\n nn.ReLU(),\n nn.Linear(512, num_actions)\n )\n ##############################################################\n ######################## END YOUR CODE #######################", "title": "" }, { "docid": "541393fcd6e5146198f004c70583f6e3", "score": "0.5934222", "text": "def set_up_continuous_action_space(self):\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n self.action_high = self.torque * np.ones([self.action_dim])\n self.action_low = -self.action_high", "title": "" }, { "docid": "7c34b3eb09ac49e8091735da3d25f17d", "score": "0.59318215", "text": "def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n config: AlgorithmConfigDict,\n ):\n self.observation_space: gym.Space = observation_space\n self.action_space: gym.Space = action_space\n # the policy id in the global context.\n self.__policy_id = config.get(\"__policy_id\")\n # The base struct of the observation/action spaces.\n # E.g. action-space = gym.spaces.Dict({\"a\": Discrete(2)}) ->\n # action_space_struct = {\"a\": Discrete(2)}\n self.observation_space_struct = get_base_struct_from_space(observation_space)\n self.action_space_struct = get_base_struct_from_space(action_space)\n\n self.config: AlgorithmConfigDict = config\n self.framework = self.config.get(\"framework\")\n\n # Create the callbacks object to use for handling custom callbacks.\n from ray.rllib.algorithms.callbacks import DefaultCallbacks\n\n callbacks = self.config.get(\"callbacks\")\n if isinstance(callbacks, DefaultCallbacks):\n self.callbacks = callbacks()\n elif isinstance(callbacks, (str, type)):\n try:\n self.callbacks: \"DefaultCallbacks\" = deserialize_type(\n self.config.get(\"callbacks\")\n )()\n except Exception:\n pass # TEST\n else:\n self.callbacks: \"DefaultCallbacks\" = DefaultCallbacks()\n\n # The global timestep, broadcast down from time to time from the\n # local worker to all remote workers.\n self.global_timestep: int = 0\n # The number of gradient updates this policy has undergone.\n self.num_grad_updates: int = 0\n\n # The action distribution class to use for action sampling, if any.\n # Child classes may set this.\n self.dist_class: Optional[Type] = None\n\n # Initialize view requirements.\n self.init_view_requirements()\n\n # Whether the Model's initial state (method) has been added\n # automatically based on the given view requirements of the model.\n self._model_init_state_automatically_added = False\n\n # Connectors.\n self.agent_connectors = None\n self.action_connectors = None", "title": "" }, { "docid": "26b6ce520f412cb2b1fa2f3f880b5794", "score": "0.5926083", "text": "def __init__(self, num_inputs, num_actions):\n super().__init__()\n self.conv_layers = nn.Sequential(\n nn.Conv2d(num_inputs, 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n )\n self.fc_layers = nn.Sequential(\n nn.Linear(3136, 512),\n nn.ReLU(),\n nn.Linear(512, num_actions)\n )", "title": "" }, { "docid": "1ada49c973ccc08b5027064bef4ec713", "score": "0.5925615", "text": "def __init__(self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n actor_hidden_activation=\"relu\",\n actor_hiddens=(400, 300),\n critic_hidden_activation=\"relu\",\n critic_hiddens=(400, 300),\n parameter_noise=False,\n twin_q=False,\n exploration_ou_sigma=0.2):\n\n super(DDPGModel, self).__init__(obs_space, action_space, num_outputs,\n model_config, name)\n self.exploration_ou_sigma = exploration_ou_sigma\n\n self.action_dim = np.product(action_space.shape)\n self.model_out = tf.keras.layers.Input(\n shape=(num_outputs, ), name=\"model_out\")\n self.actions = tf.keras.layers.Input(\n shape=(self.action_dim, ), name=\"actions\")\n\n def build_action_net(action_out):\n assert action_out.dtype == tf.float32\n activation = getattr(tf.nn, actor_hidden_activation)\n i = 0\n for hidden in actor_hiddens:\n if parameter_noise:\n import tensorflow.contrib.layers as layers\n action_out = layers.fully_connected(\n action_out,\n num_outputs=hidden,\n activation_fn=activation,\n normalizer_fn=layers.layer_norm)\n else:\n action_out = tf.layers.dense(\n action_out,\n units=hidden,\n activation=activation,\n name=\"action_hidden_{}\".format(i))\n i += 1\n return tf.layers.dense(\n action_out,\n units=self.action_dim,\n activation=None,\n name=\"action_out\")\n\n action_scope = name + \"/action_net\"\n # Save the scope object, since in eager we will execute this\n # path repeatedly and there is no guarantee it will always be run\n # in the same original scope.\n with tf.variable_scope(action_scope) as action_scope_handle:\n pass\n\n # TODO(ekl) use keras layers instead of variable scopes\n if tf.executing_eagerly():\n # Have to use a variable store to reuse variables in eager mode\n import tensorflow.contrib as tfc\n store = tfc.eager.EagerVariableStore()\n\n def build_action_net_scope(model_out):\n with store.as_default():\n with tf.variable_scope(\n action_scope_handle, reuse=tf.AUTO_REUSE):\n return build_action_net(model_out)\n else:\n\n def build_action_net_scope(model_out):\n with tf.variable_scope(\n action_scope_handle, reuse=tf.AUTO_REUSE):\n return build_action_net(model_out)\n\n pi_out = tf.keras.layers.Lambda(build_action_net_scope)(self.model_out)\n self.action_net = tf.keras.Model(self.model_out, pi_out)\n self.register_variables(self.action_net.variables)\n\n # Noise vars for P network except for layer normalization vars\n if parameter_noise:\n assert not tf.executing_eagerly(), \"eager p noise not implemented\"\n with tf.variable_scope(action_scope_handle, reuse=tf.AUTO_REUSE):\n self._build_parameter_noise([\n var for var in self.action_net.variables\n if \"LayerNorm\" not in var.name\n ])\n\n def build_q_net(name, model_out, actions):\n q_out = tf.keras.layers.Concatenate(axis=1)([model_out, actions])\n activation = getattr(tf.nn, critic_hidden_activation)\n for i, n in enumerate(critic_hiddens):\n q_out = tf.keras.layers.Dense(\n n,\n name=\"{}_hidden_{}\".format(name, i),\n activation=activation)(q_out)\n q_out = tf.keras.layers.Dense(\n 1, activation=None, name=\"{}_out\".format(name))(q_out)\n return tf.keras.Model([model_out, actions], q_out)\n\n self.q_net = build_q_net(\"q\", self.model_out, self.actions)\n self.register_variables(self.q_net.variables)\n\n if twin_q:\n self.twin_q_net = build_q_net(\"twin_q\", self.model_out,\n self.actions)\n self.register_variables(self.twin_q_net.variables)\n else:\n self.twin_q_net = None", "title": "" }, { "docid": "87e1a5bc47aef11520f0ad71d4fbbfbd", "score": "0.59187615", "text": "def __init__(self, state_size, action_size, fc1_units, fc2_units):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = torch.manual_seed(SEED)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, fc1_units, fc2_units).to(device)\n self.actor_target = Actor(state_size, action_size, fc1_units, fc2_units).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(state_size, action_size, fc1_units, fc2_units).to(device)\n self.critic_target = Critic(state_size, action_size, fc1_units, fc2_units).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)\n\n # Noise process\n self.noise = OrnsteinUhlenbeck(action_size, SEED)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, SEED, device)", "title": "" }, { "docid": "69af39351db50a7eecb43ad9d65bb52b", "score": "0.5914262", "text": "def __init__(\n self, state_size, action_size, learning_rate, hidden_nuerons,\n gamma, epsilon_decay, memory_batch_size, memory_size,\n tensorflow_session):\n self.action_size = action_size\n self.brain = DQNetwork(\n state_size, action_size, learning_rate, hidden_nuerons)\n self.memory = Memory(memory_size)\n self.session = tensorflow_session\n self.memory_batch_size = memory_batch_size\n self.gamma = gamma\n self.epsilon = 1 # The chance to take a random action.\n self.epsilon_decay = epsilon_decay", "title": "" }, { "docid": "7fc7074a6cb77488a33cc05bd8ef8a4c", "score": "0.59059334", "text": "def __init__(self, state_size, action_size, state_size_full, action_size_full, random_seed):\n self.state_size = state_size\n self.action_size = action_size\n self.state_size_full = state_size_full\n self.action_size_full = action_size_full\n self.seed = random.seed(random_seed)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, random_seed).to(hyperparameters.device)\n self.actor_target = Actor(state_size, action_size, random_seed).to(hyperparameters.device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=hyperparameters.LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(state_size_full, action_size_full, random_seed).to(hyperparameters.device)\n self.critic_target = Critic(state_size_full, action_size_full, random_seed).to(hyperparameters.device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=hyperparameters.LR_CRITIC, weight_decay=hyperparameters.WEIGHT_DECAY)\n\n # Noise process\n self.noise = OUNoise(action_size, random_seed)", "title": "" }, { "docid": "39eebffca9d1cc6f1cfd870f107a3325", "score": "0.5897909", "text": "def __init__(self, action_dim, **kwargs):\r\n super(Actor, self).__init__(kwargs)\r\n self.layer_1 = layers.Dense(32, activation='relu', name='actor_1')\r\n self.layer_2 = layers.Dense(32, activation='relu', name='actor_2')\r\n self.layer_3 = layers.Dense(action_dim, name='linear_action_pred')\r\n self.action_probabilities = layers.Softmax()", "title": "" }, { "docid": "772af47fe835b623813da08269395fea", "score": "0.5896815", "text": "def initialize(\n self,\n net: Union[BaseNetwork, None] = None,\n target_net: Union[BaseNetwork, None] = None,\n num_actions: int = -1,\n lr: float = 5e-5,\n replay_memory_size: int = int(1e4),\n epsilon_start: float = 0.95,\n epsilon_end: float = 0.01,\n epsilon_steps: int = int(1e5),\n tau: float = 0.01,\n gamma: float = 0.99,\n batch_size: int = 64,\n num_envs: int = 16,\n ) -> None:\n if num_actions == -1:\n raise ValueError(\n 'Please provide valid input value for num_actions (positive integer). Currently set to -1.'\n )\n self.gamma = gamma\n self.tau = tau\n self.batch_size = batch_size\n self.num_actions = num_actions\n self.num_envs = num_envs\n\n if net is None or target_net is None:\n raise ValueError(\n 'Please provide input value for net and target_net. Currently set to None.'\n )\n self.net = net\n self.target_net = target_net\n hard_update(self.net, self.target_net)\n self.nets = {'net': self.net, 'target': self.target_net}\n\n self.lr = lr\n # Optimizer\n self.opt = Adam(self.net.parameters(), lr=self.lr)\n\n self.replay_memory_size = replay_memory_size\n # Replay memory\n self.memory = ReplayMemory(self.replay_memory_size)\n\n # Epsilon used for selecting actions\n self.epsilon_start = epsilon_start\n self.epsilon = epsilon_start\n self.epsilon_step = (epsilon_start - epsilon_end) / epsilon_steps\n self.epsilon_end = epsilon_end", "title": "" }, { "docid": "b83990f780af1f4d9dbf6da666b9f515", "score": "0.5894921", "text": "def init_net(self, n_inputs: int) -> None:\n pass", "title": "" }, { "docid": "73b15e245acfdcd226ab4368d73e9e2b", "score": "0.5890328", "text": "def __init__(self, state_dim, action_dim, name=\"NN\", training_set_size=4000):\n self.sess = None\n self.log_prob = None\n self.prob = None\n self.dtype = get_default_tf_dtype()\n self.name = name\n self.x_range = 4.8\n self.theta_range = 180\n self.XData = None\n self.YData = None\n self.state_dim = state_dim\n self.x_dim = state_dim + 2 + action_dim\n self.action_dim = action_dim\n self.param_dim = 2\n self.gp_list = []\n self.training_set_size = training_set_size\n self.global_step = 0\n self.folder = \"../\" + self.name + \"NNData\" + \"/\"\n self.min_omega = 0\n self.max_omega = 1", "title": "" }, { "docid": "21fbe8182770ee9aaa836c553aba7527", "score": "0.5886872", "text": "def __init__(\n\t\tself, state_size, action_size, seed=0,\n\t\tbuffer_size = 1_000_000, batch_size=64, gamma=0.99,\n\t\ttau=1e-3, lr=5e-4, update_every=4, network=\"dqn\"\n\t):\n\t\t\n\t\t# Params\n\t\tself.state_size = state_size\n\t\tself.action_size = action_size\n\t\tself.seed = random.seed(seed)\n\t\t\n\t\t# Hyperparameters\n\t\tself.gamma = gamma\n\t\tself.tau = tau\n\t\tself.update_every = update_every\n\t\t\n\t\t# Policy Network\n\t\tself.policy_network = DQN(state_size, action_size, seed).to(device)\n\t\n\t\t# Target Network\n\t\tself.target_network = DQN(state_size, action_size, seed).to(device)\n\t\t# Optimizer\n\t\tself.optimizer = optim.Adam(self.policy_network.parameters(), lr=lr)\n\t\t\n\t\t# Experience replay\n\t\tself.memory = Memory(action_size, seed, buffer_size, batch_size)\n\t\t\n\t\tself.time_step = 0\n\t\tself.loss = 0", "title": "" }, { "docid": "e82740f121f04efffaa2807086fe4bc2", "score": "0.5883267", "text": "def __init__(self, state_space, action_space, train_device='cpu',\n in_channels=1):\n super().__init__()\n self.train_device = train_device\n self.state_space = state_space\n # bp()\n self.name = 'ActorCNN'\n self.flat_state_space = self.state_space[0] * self.state_space[1] * self.state_space[2]\n self.action_space = action_space\n self.hidden = 100\n self.state_value = 0\n self.cnn_0 = torch.nn.Conv2d(in_channels=in_channels, out_channels=10, kernel_size=3)\n new_size = self.state_space[1:] - self.cnn_0.kernel_size + 1\n # size: kernel_size + 1\n self.cnn_1 = torch.nn.Conv2d(in_channels=10, out_channels=5, kernel_size=3)\n new_size = new_size - self.cnn_1.kernel_size + 1\n # size: kernel_size + 1\n # 28 x 28\n flat_size = new_size[0] * new_size[1] * self.cnn_1.out_channels\n self.fc_0 = torch.nn.Linear(in_features=flat_size, out_features=self.hidden)\n self.fc_1 = torch.nn.Linear(in_features=self.hidden, out_features=self.action_space)\n self.softmax = torch.nn.Softmax(dim=1)", "title": "" }, { "docid": "866f74952376e847291d3f5c0a74a00f", "score": "0.58739775", "text": "def __init__(self, state_space_size: int, action_space_size: int, **kwargs: Any):\n self.quality_table = np.zeros((state_space_size, action_space_size))\n self.learning_rate = kwargs.get(\"learning_rate\", 0.9)\n self.gamma = kwargs.get(\"gamma\", 0.9)", "title": "" }, { "docid": "86cca404ef0f66cb8aeddc2e1a86b18d", "score": "0.5869792", "text": "def init_network(self):\n self.net = network.Network([784, 30, 10])\n return(\"made a network yo\")", "title": "" }, { "docid": "04e39bede6351bcd697a9ad4ffc509c3", "score": "0.5867355", "text": "def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):\n super(GeneralizedQNetwork, self).__init__(state_size, action_size, seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n self.optimizer = optim.Adam(self.parameters(), lr=LR)\n self.loss = nn.MSELoss()\n self.fc1.weight.data.uniform_(*initialize_weights(self.fc1))\n self.fc2.weight.data.uniform_(*initialize_weights(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n self.to(device)", "title": "" }, { "docid": "b0280e1c3b8ec6daa19973dae281c74f", "score": "0.58562523", "text": "def initialise(self):\n #*** Reinitialise the neurons:\n self.layer1 = NeuronLayer(self.logger, self.input_neurons, self.input_variables)\n self.layer2 = NeuronLayer(self.logger, 1, self.input_neurons)\n self.neural_network = NeuralNetwork(self.logger, self.layer1, self.layer2)", "title": "" }, { "docid": "b46a18e693d1b76eecc314e2c1390714", "score": "0.58535457", "text": "def __init__(\n self,\n value_network,\n action_space,\n eps,\n eps_decay,\n batch_size,\n learning_rate,\n discount_factor,\n polyak_tau=1.,\n double=False,\n cuda=False\n ):\n self.value_network = value_network\n self.target_network = copy.deepcopy(value_network)\n self.eps = eps\n self.eps_decay = eps_decay\n self.batch_size = batch_size\n self.discount_factor = discount_factor\n self.tau = polyak_tau\n self.double = double\n self.cuda = cuda\n\n assert isinstance(action_space, Discrete), \\\n \"Action space has to be discrete\"\n\n self.action_space = action_space\n\n if cuda:\n self.value_network.cuda()\n self.target_network.cuda()\n\n ##########################################################################\n ######## TASK 2 ########\n ##########################################################################\n # Define a loss (Huber loss is preferred) and Adam optimizer: #\n self.criterion = None\n\n self.optimizer = None\n ##########################################################################\n ######## TASK 2 ########\n ##########################################################################", "title": "" }, { "docid": "fc92139e8eaac1bb61bc370a1563d7a9", "score": "0.5840321", "text": "def _build_network(self):\n # self._observation = tf.placeholder(shape=[None, *self._dim_obs], dtype=tf.uint8, name=\"observation\")\n # self._observation = tf.to_float(self._observation) / 255.0\n # self._observation = tf.placeholder(dtype=tf.float32, shape=[None, *self._dim_obs], name=\"observation\")\n self._observation = self._obs_fn()\n self._action = tf.placeholder(dtype=tf.int32, shape=[None], name=\"action\")\n self._reward = tf.placeholder(dtype=tf.float32, shape=[None], name=\"reward\")\n self._done = tf.placeholder(dtype=tf.float32, shape=[None], name=\"done\")\n # self._next_observation = tf.placeholder(dtype=tf.uint8, shape=[None, *self._dim_obs], name=\"next_observation\")\n # self._next_observation = tf.to_float(self._next_observation) / 255.0\n # self._next_observation = tf.placeholder(dtype=tf.float32, shape=[None, *self._dim_obs], name=\"next_observation\")\n self._next_observation = self._obs_fn()\n\n with tf.variable_scope(\"main/qnet\"):\n self._qvals = self._value_fn(self._observation)\n\n self._target_qvals = []\n for i in range(self._n_net):\n with tf.variable_scope(f\"target_{i}/qnet\"):\n self._target_qvals.append(self._value_fn(self._next_observation))", "title": "" }, { "docid": "0b91379f4d81884d6c3b97accd13501e", "score": "0.5803112", "text": "def __init__(self, state_shape, n_actions, epsilon=0):\r\n nn.Module.__init__(self)\r\n self.epsilon = epsilon\r\n self.n_actions = n_actions\r\n img_c, img_w, img_h = state_shape\r\n\r\n self.conv1 = nn.Conv2d(4, 16, kernel_size=3, stride=2)\r\n self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)\r\n self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)\r\n self.fc1 = nn.Linear(64 * 7 * 7, 256)\r\n self.fc2 = nn.Linear(256, self.n_actions)", "title": "" }, { "docid": "866e7a9e34b104d659adf567bd3c3e4c", "score": "0.5802942", "text": "def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):\n super(DuelingDQNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n\n self.fc3_value = nn.Linear(fc2_units, 1)\n\n self.fc3_advantage = nn.Linear(fc2_units, action_size)", "title": "" }, { "docid": "f385f2fe821cf13c104cb3f6cd894f53", "score": "0.5799775", "text": "def configure_gym(self):\n\n self.action_space = self.env.action_space\n self.observation_space = self.env.observation_space\n\n # print(\"Observation Space: \", self.env.observation_space)\n # print(\"Action Space: \", self.env.action_space)\n\n\n # think later about adding random seed, pros vs cons\n # self.env.seed(random_seed)", "title": "" }, { "docid": "da98243ec07e0a005edb444bc3aebbfe", "score": "0.5799338", "text": "def __init__(self, num_param, action_dim):\r\n self.num_param = num_param\r\n self.action_dim = action_dim", "title": "" }, { "docid": "19a1d718ee4540615ae6f3b9f634f304", "score": "0.57912964", "text": "def init_run(self):\n \"\"\"\n State dimension: \n * (x,y) for snake's head position => 2\n * {S,W,N,E} for snake's head direction => 1 (converted to integer)\n * X*Y for board positions\n \"\"\"\n global NUM_ACTIONS\n state_dim = 3 + int(np.prod(self.board_size))\n self.augment_data = True\n self._device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.agent = Agent(in_dim=state_dim, out_dim=NUM_ACTIONS, gamma=self.gamma,\n replay_memory_capacity=self.max_capacity,\n learning_rate=self.learning_rate,\n policy=make_q_values_policy(),\n NNModel=LinearModel,\n device=self._device)", "title": "" }, { "docid": "08326cb682396251b6bdffdc486ec536", "score": "0.57764477", "text": "def __init__(self,\n num_actions,\n input_type='vector',\n input_feature=None,\n input_img_size=None):\n super(QNet, self).__init__()\n self.num_actions = num_actions\n self.input_type = input_type\n self.input_feature = input_feature\n self.input_img_size = input_img_size\n\n self.createLayers()", "title": "" }, { "docid": "3ade50299a3c74d95fbe68de30f01688", "score": "0.5776066", "text": "def __init__(self, discrete_dim, discrete_range, environment):\n n = len(discrete_dim)\n discrete_space = []\n for i in range(n):\n high, low = discrete_range[i] # get upper and lower bounds\n dim = discrete_dim[i] # output dimension\n interval = (high-low)/dim # calculate interval\n # create a list of upper thresholds for each discretized value\n space = [low + (j+1)*interval for j in range(dim)] \n discrete_space.append(space)\n \n self.n_var = n\n self.discrete_space = discrete_space\n self.discrete_dim = discrete_dim\n self.env = environment\n self.n_A = environment.action_space.shape[0]\n self.Q = np.zeros(discrete_dim+(self.n_A,))", "title": "" }, { "docid": "0c755f8ec5c3bb427b3d3fe259714add", "score": "0.57745653", "text": "def __init__(self, state_size, action_size, num_agents, num_all_agents, seed,\n batch_size, buffer_size=int(1e6), gamma=0.99, tau=1e-3, lr_actor=4e-4,\n lr_critic=4e-4, weight_decay=0, discrete_actions=False):\n random.seed(seed)\n\n self.state_size = state_size\n self.action_size = action_size\n self.num_agents = num_agents\n self.num_all_agents = num_all_agents\n self.batch_size = batch_size\n self.gamma = gamma\n self.tau = tau\n\n self.noise = OUNoise(action_size, seed)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, seed, use_batch_norm_layers=False).to(device)\n self.actor_target = Actor(state_size, action_size, seed, use_batch_norm_layers=False).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor)\n\n # Critic Network (w/ Target Network)\n if discrete_actions:\n action_size = 1\n self.critic_local = Critic(state_size * num_all_agents, action_size * num_all_agents, seed).to(device)\n self.critic_target = Critic(state_size * num_all_agents, action_size * num_all_agents, seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic, weight_decay=weight_decay)", "title": "" }, { "docid": "54d194cfa3dcd9766655f71db429523e", "score": "0.57720256", "text": "def __init__(self, state_size:int, action_size:int):\n super(QNetwork, self).__init__()\n # self.seed = torch.manual_seed(seed)\n self.l = nn.Sequential(\n nn.Linear(state_size, 64),\n nn.ReLU(True),\n nn.Linear(64, 64),\n nn.ReLU(True),\n nn.Linear(64, action_size)\n )", "title": "" }, { "docid": "58ea1fbe555dcc52b486f6cf2449f46b", "score": "0.5770407", "text": "def __init__(self, state_size, action_size, fc_units=None):\n super(Critic, self).__init__()\n if fc_units is None:\n fc_units = [512, 256]\n self.fc1 = nn.Linear(state_size, fc_units[0])\n self.fc2 = nn.Linear(fc_units[0] + action_size, fc_units[1])\n self.fc3 = nn.Linear(fc_units[1], 1)\n self.reset_parameters()", "title": "" }, { "docid": "43b918e6182d4ad35a2ac7df67ee10f4", "score": "0.5754926", "text": "def __init__(self, state_size, action_size, action_low, action_high, SRS_DropoutFactor, SRS_NnDensity):\n self.state_size = state_size\n self.action_size = action_size\n self.action_low = action_low\n self.action_high = action_high\n self.action_range = self.action_high - self.action_low\n\n # Initialize any other variables here\n self.DropoutFactor = SRS_DropoutFactor\n self.NnDensity = int(SRS_NnDensity)\n \n self.build_model()", "title": "" }, { "docid": "a5f108af1b840d12f2881401a5645e34", "score": "0.5754366", "text": "def __init__(self, n_actions, state_size):\n raise NotImplementedError(\"Environment Constructor not implemented!\")", "title": "" }, { "docid": "57018c203ff0e30382aebbb955c181b6", "score": "0.5752365", "text": "def __init__(self, state_size, action_size, max_action, random_seed):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n\n # Actor Network (w/ Target Network)\n self.actor = Actor(state_size, action_size, max_action, random_seed).to(device)\n self.actor_target = Actor(state_size, action_size, max_action, random_seed).to(device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic = Critic(state_size, action_size, random_seed).to(device)\n self.critic_target = Critic(state_size, action_size, random_seed).to(device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)\n\n self.max_action = max_action\n\n # # Noise process\n # self.noise = OUNoise(action_size, random_seed)\n #\n # # Replay memory\n # self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)", "title": "" }, { "docid": "6bd45596f2d351ae9508936715538da7", "score": "0.5750745", "text": "def __init__(self):\n self.action_space = list(permutations([i for i in range(m)], 2)) + [(0,0)]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n self.action_init = random.choice(self.action_space)\n\n # Start the first round\n self.reset()", "title": "" }, { "docid": "0d9dac7b3ddf2d115c67861dbc555b9f", "score": "0.57414794", "text": "def __init__(self, state_size, action_size, random_seed, number_of_agents):\n\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n self.number_of_agents = number_of_agents\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, random_seed).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=CONFIG['LR_ACTOR'])\n self.actor_target = Actor(state_size, action_size, random_seed).to(device)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(self.state_size * self.number_of_agents, self.action_size * self.number_of_agents,\n random_seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=CONFIG['LR_CRITIC'],\n weight_decay=CONFIG['WEIGHT_DECAY'])\n self.critic_target = Critic(self.state_size * self.number_of_agents, self.action_size * self.number_of_agents,\n random_seed).to(device)", "title": "" }, { "docid": "b2fd7ea0bb560170f9b4a86e2089627e", "score": "0.5739545", "text": "def __init__(self, in_shape, num_actions, learn_rate):\n self.batch_size = 32\n self.discount = 0.99\n self.exp_rate = 1.0\n self.min_exp_rate = 0.001\n self.decay_steps = 20000\n self.decay_delta = (self.exp_rate - self.min_exp_rate) / self.decay_steps\n self.state_shape = in_shape\n self.num_actions = num_actions\n self.learn_rate = learn_rate\n self.sync_rate = 50\n\n self.mem = deque()\n self.max_mem = 50000\n\n self.q = dqn(self.state_shape, self.num_actions, self.learn_rate)\n self.target = dqn(self.state_shape, self.num_actions, self.learn_rate)", "title": "" }, { "docid": "25ec74b4d88d40a5ea3ceb8e4a7cfb11", "score": "0.57388043", "text": "def __init__(self, state_size, action_size, random_seed, hidden_layers=[64, 64]):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n\n\n # Q-Network\n model_params = [state_size, action_size, random_seed, hidden_layers]\n self.qnetwork_local = QNetwork(*model_params).to(device)\n self.qnetwork_target = QNetwork(*model_params).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(),\n lr=LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed, device)\n # Initialize time step (for updating every self.update_every steps)\n self.t_step = 0", "title": "" }, { "docid": "3ea92adb2f8671a0d31963cb067786fc", "score": "0.5736158", "text": "def __init__(self, state_size, action_size, seed,\n fc1_units=32,\n fc2_units=64,\n fc3_units=128,\n fc4_units=64,\n fc5_units=32):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc1_bn = nn.BatchNorm1d(fc1_units)\n self.fc1_dropout = nn.Dropout(0.2)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc2_bn = nn.BatchNorm1d(fc2_units)\n self.fc2_dropout = nn.Dropout(0.2)\n self.fc3 = nn.Linear(fc2_units, fc3_units)\n self.fc3_bn = nn.BatchNorm1d(fc3_units)\n self.fc3_dropout = nn.Dropout(0.2)\n self.fc4 = nn.Linear(fc3_units, fc4_units)\n self.fc4_bn = nn.BatchNorm1d(fc4_units)\n self.fc4_dropout = nn.Dropout(0.3)\n self.fc5 = nn.Linear(fc4_units, fc5_units)\n self.fc5_bn = nn.BatchNorm1d(fc5_units)\n self.fc5_dropout = nn.Dropout(0.3)\n self.fc6 = nn.Linear(fc5_units, action_size)", "title": "" }, { "docid": "80abf1a966977a2c5ca81bd940e0f879", "score": "0.5733312", "text": "def __init__(self, num_inputs, num_actions):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(num_inputs, 128),\n nn.ReLU(),\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, num_actions)\n )", "title": "" }, { "docid": "e4d6e9dfafab8ced6cc9ee12eff1d586", "score": "0.5729422", "text": "def initialize(self, env_spaces, share_memory=False,\n global_B=1, env_ranks=None):\n super().initialize(env_spaces, share_memory,\n global_B=global_B, env_ranks=env_ranks)\n self.q_model = self.QModelCls(**self.env_model_kwargs,\n **self.q_model_kwargs)\n if self.initial_q_model_state_dict is not None:\n self.q_model.load_state_dict(self.initial_q_model_state_dict)\n self.target_model = self.ModelCls(**self.env_model_kwargs,\n **self.model_kwargs)\n self.target_q_model = self.QModelCls(**self.env_model_kwargs,\n **self.q_model_kwargs)\n self.target_q_model.load_state_dict(self.q_model.state_dict())\n assert len(env_spaces.action.shape) == 1\n self.distribution = Gaussian(\n dim=env_spaces.action.shape[0],\n std=self.action_std,\n noise_clip=self.action_noise_clip,\n clip=env_spaces.action.high[0], # Assume symmetric low=-high.\n )", "title": "" }, { "docid": "d5a1ef883ffd46cf2637ceb76b288d78", "score": "0.5727327", "text": "def build_graph(num_actions):\n s, q_values, st, target_q_values, reset_target_network_params, a, y, grad_update = build_models(num_actions)\n\n graph_ops = {\"s\": s,\n \"q_values\": q_values,\n \"st\": st,\n \"target_q_values\": target_q_values,\n \"reset_target_network_params\": reset_target_network_params,\n \"a\": a,\n \"y\": y,\n \"grad_update\": grad_update\n }\n\n return graph_ops", "title": "" }, { "docid": "2b27b585f198d42602da647639258626", "score": "0.57264316", "text": "def __init__(self, state_size:int, action_size:int):\n super(QNetworkD, self).__init__()\n self.l = nn.Sequential(\n nn.Linear(state_size, 64),\n nn.ReLU(True),\n nn.Linear(64, 128),\n nn.ReLU(True),\n nn.Linear(128, 64),\n nn.ReLU(True)\n )\n self.stream_state = nn.Linear(32, 1)\n self.stream_advantage = nn.Linear(32, action_size)", "title": "" }, { "docid": "41fe381ce721fc0541f05e04d6d3bf78", "score": "0.57223666", "text": "def _init_graph(self):\n build_dqn_params = {\n \"n_layers\": self.n_layers,\n \"n_neurons\": self.n_neurons,\n \"activation\": self.activation,\n \"dueling\": self.dueling_dqn,\n \"value_neurons\": self.value_neurons,\n \"advantage_neurons\": self.advantage_neurons\n }\n\n self.session = tf.Session()\n with tf.variable_scope(self.name):\n\n self.states_ph = tf.placeholder(tf.float64, shape=(None,) + self.obs_shape, name=\"states_ph\")\n self.actions_ph = tf.placeholder(tf.int64, shape=(None,) + self.action_shape, name=\"actions_ph\")\n self.rewards_ph = tf.placeholder(tf.float64, shape=(None, 1), name=\"rewards_ph\")\n self.next_states_ph = tf.placeholder(tf.float64, shape=(None,) + self.obs_shape, name=\"next_states_ph\")\n self.dones_ph = tf.placeholder(tf.bool, shape=(None, 1), name=\"dones_ph\")\n\n with tf.variable_scope(\"online\"):\n self.online_qvalues = build_dqn(self.states_ph, self.n_actions, **build_dqn_params)\n\n if self.use_target_network:\n with tf.variable_scope(\"target\"):\n self.target_qvalues = build_dqn(self.states_ph, self.n_actions, **build_dqn_params)\n\n with tf.variable_scope(\"target\", reuse=True):\n self.next_target_qvalues = build_dqn(self.next_states_ph, self.n_actions, **build_dqn_params)\n\n if self.double_dqn:\n # when using DDQN, use the action selected by online network, but its corresponding\n # value predicted by the target network\n with tf.variable_scope(\"online\", reuse=True):\n self.next_online_qvalues = build_dqn(self.next_states_ph, self.n_actions, **build_dqn_params)\n\n self.next_action = tf.argmax(self.next_online_qvalues, axis=1)\n self.next_action_one_hot = tf.one_hot(self.next_action, self.n_actions, dtype=self.next_target_qvalues.dtype)\n self.next_action_qvalue = tf.reduce_sum(self.next_target_qvalues * self.next_action_one_hot, axis=1, keepdims=True)\n\n else: # no double dqn, use only the target network to predict next state value\n self.next_action_qvalue = tf.reduce_max(self.next_target_qvalues, keepdims=True, axis=1)\n\n else: # no target network, do everything with the online network\n with tf.variable_scope(\"online\", reuse=True):\n self.next_online_qvalues = build_dqn(self.next_states_ph, self.n_actions, **build_dqn_params)\n self.next_action_qvalue = tf.reduce_max(self.next_online_qvalues, axis=1, keepdims=True, name=\"next_action_qvalue\")\n # self.next_online_qvalues = tf.identity(self.next_online_qvalues, name=\"next_online_qvalues\")\n\n self.targets = self.rewards_ph + self.gamma * self.next_action_qvalue\n self.target = tf.stop_gradient(tf.where(self.dones_ph, x=self.rewards_ph, y=self.targets), name=\"target\")\n # select the predicted q-values for the taken actions\n self.action_one_hot = tf.one_hot(self.actions_ph, self.n_actions, dtype=tf.float64, name=\"action_one_hot\")\n self.qvalues_for_actions = tf.reduce_sum(self.online_qvalues * self.action_one_hot, axis=1, keepdims=True, name=\"qvalues_for_actions\")\n # compute loss\n self.loss = 0.5 * tf.reduce_mean(tf.squeeze(tf.square(self.qvalues_for_actions - self.target)), name=\"loss\")\n\n # update operations of target model (soft or hard)\n self.online_vars = tf.trainable_variables(scope=self.name + \"/online\")\n\n if self.use_target_network:\n self.target_vars = tf.trainable_variables(scope=self.name + \"/target\")\n\n self.hard_update_ops = [\n tf.assign(target_var, online_var)\n for target_var, online_var in zip(self.target_vars, self.online_vars)\n ]\n\n if self.tau < 1:\n self.soft_update_ops = [\n tf.assign(target_var, self.tau * online_var + (1 - self.tau) * target_var)\n for target_var, online_var in zip(self.target_vars, self.online_vars)\n ]\n\n # minimize the loss, possibly after clipping gradients\n self.optimizer = self.tf_optimizers[self.optimization](learning_rate=self.learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss, self.online_vars)\n if self.gradient_clip is not None:\n a_min, a_max = self.gradient_clip\n self.grads_and_vars = [(tf.clip_by_value(grad, a_min, a_max), var) for grad, var in self.grads_and_vars]\n self.train_op = self.optimizer.apply_gradients(self.grads_and_vars)\n\n # write to tensorboard\n if self.log:\n self.total_reward = tf.placeholder(tf.float64, shape=())\n self.mean_reward = tf.placeholder(tf.float64, shape=())\n total_reward_summary = tf.summary.scalar(\"total_episode_reward\", self.total_reward)\n mean_reward_summary = tf.summary.scalar(\"mean_episode_reward\", self.mean_reward)\n self.episode_summary = tf.summary.merge([total_reward_summary, mean_reward_summary])\n loss_summary = tf.summary.scalar(\"loss\", self.loss)\n qvalue_summary = tf.summary.histogram(\"qvalues\", self.online_qvalues)\n action_summary = tf.summary.scalar(\"prop_no_relocation\",\n tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(self.actions_ph), 0), tf.float64))\n )\n self.step_summary = tf.summary.merge([loss_summary, qvalue_summary, action_summary])\n self.summary_writer = tf.summary.FileWriter(self.logdir, self.session.graph)\n\n self.session.run(tf.global_variables_initializer())", "title": "" }, { "docid": "a35a8af576dfc1e22d60e3ef9524862d", "score": "0.5721131", "text": "def __init__(self, input_dimension):\r\n self.input_dimensions = input_dimension\r\n self.weights = list()\r\n self.bias = list()\r\n self.layers_activation = list()\r\n self.no_of_nodes= list()", "title": "" }, { "docid": "275b774081c54ea95649d6ba9cb1e573", "score": "0.57099533", "text": "def init_variables(self):\n self.imgs = tf.placeholder(tf.float32,self.batch_shape,name='input_imgs')\n self.gt_3d = tf.placeholder(tf.float32,[self.batch_size,self.n_joints*3],name='ground_truth_joints')\n self.dropout_prob = tf.placeholder(tf.float32,name='drouput_keep_probability')\n self.conv_layers,self.fc_layers = [],[]", "title": "" }, { "docid": "c29971b77ea16d21cfadd194d9c2de8e", "score": "0.57088417", "text": "def __init__(self, state_size, action_size, lr, network):\n self.state_size = state_size\n self.action_size = action_size\n self.lr = lr\n self.network = network\n self.build_model()", "title": "" }, { "docid": "be7ac103163422974ee81ad26efa4655", "score": "0.5698926", "text": "def connect_env(self,environment,agentIndex,allAgents):\n super(DQNAgent, self).connect_env(environment,agentIndex,allAgents)\n self.environmentActions = environment.possible_actions()\n self.countReplayActions = np.zeros(len(self.environmentActions))\n #self.graph = tf.Graph()\n #self.session = keras.backend.get_session()\n #with self.session.as_default():\n # with self.graph.as_default():\n self.build_network()\n\n self.update_target()\n\n if self.loadWeights:\n self.load_weights(self.loadStep)", "title": "" }, { "docid": "9594464292ceae4aee5d3dbe693254c7", "score": "0.5694376", "text": "def __init__(self, num_param, action_dim):\n self.num_param = num_param\n self.action_dim = action_dim\n self.param_dim = int(num_param*action_dim)", "title": "" }, { "docid": "8572e4adfb10d6cd01d2a14eb19e751b", "score": "0.5685396", "text": "def __init__(self, state_size, action_size, seed, fc1_units=254, fc2_units=128,fc3_units=64,fc4_units=32,fc5_units=16):\n\n super(QNetwork, self).__init__()\n\n self.seed = torch.manual_seed(seed)\n\n self.fc1 = nn.Linear(state_size, fc1_units)\n\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n\n #self.fc3 = nn.Linear(fc2_units,fc3_units)\n\n #self.fc4 = nn.Linear(fc3_units, fc4_units)\n\n #self.fc5 = nn.Linear(fc4_units, fc5_units)\n\n #self.fc6 = nn.Linear(fc5_units, action_size)\n self.fc3 = nn.Linear(fc2_units, action_size)", "title": "" }, { "docid": "9bce355cd0ea4cc4579beda254628909", "score": "0.56845057", "text": "def __init__(self, state_size, action_size, fc_units=None):\n super(Actor, self).__init__()\n if fc_units is None:\n fc_units = [512, 256]\n self.fc1 = nn.Linear(state_size, fc_units[0])\n self.fc2 = nn.Linear(fc_units[0], fc_units[1])\n self.fc3 = nn.Linear(fc_units[1], action_size)\n self.reset_parameters()", "title": "" }, { "docid": "b50b56d2700a573a2cccf9853b511db1", "score": "0.56830025", "text": "def __init__(self):\n self.space = []\n\n self.access_variables = []\n self.global_variables = []\n self.variables = []\n\n self.access_constraints = []\n self.global_constraints = []\n self.constraints = []", "title": "" }, { "docid": "5895e196de0ef1356a5b2456429ca74a", "score": "0.5682005", "text": "def init_opt(self):\n observation_dim = self.policy.observation_space.flat_dim\n action_dim = self.policy.action_space.flat_dim\n with tf.name_scope('inputs'):\n self._observation = tf.compat.v1.placeholder(\n tf.float32, shape=[None, observation_dim], name='observation')\n self._action = tf.compat.v1.placeholder(tf.float32,\n shape=[None, action_dim],\n name='action')\n self._returns = tf.compat.v1.placeholder(tf.float32,\n shape=[None],\n name='return')\n policy_dist = self.policy.build(self._observation, name='policy').dist\n with tf.name_scope('loss'):\n ll = policy_dist.log_prob(self._action, name='log_likelihood')\n loss = -tf.reduce_mean(ll * self._returns)\n with tf.name_scope('train'):\n self._train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(\n loss)", "title": "" } ]
64077ca2c30dd59cbbb080a41f79cfe4
Reload the index table. Useful if the file is being written too when read
[ { "docid": "dd998432bd98573535cc5d0c24862dd6", "score": "0.6675373", "text": "def reload(self):\n self._scan_file(self.filesize, self.n_entries, self.fpos)", "title": "" } ]
[ { "docid": "56cff3ec649cd56bb356b38988b314b4", "score": "0.7886104", "text": "def reload_index(self, index_dir):\n print(\"Reloading Index...\")\n self.index.loadIndex(index_dir, load_data=True)", "title": "" }, { "docid": "604659cd191b4a69894922483f31180e", "score": "0.7648562", "text": "def reindex(self):\n self._open_db()\n self.db.reindex()", "title": "" }, { "docid": "d901e9323636cde818da6d122fbb7a27", "score": "0.7347745", "text": "def reindex(self):\n self._index()", "title": "" }, { "docid": "ae0a71925f9973936534814105089447", "score": "0.72026277", "text": "def reindex(self):\n pass", "title": "" }, { "docid": "27d591b40f74889d1c05b1a3dfa27a1e", "score": "0.70192176", "text": "def update_index(self):\n logging.debug('Start reading the index from the hdf file')\n with h5py.File(self.filename, 'r') as db:\n self._index = {}\n for name, dataset in db.iteritems():\n if dataset.attrs.get('deleted', False):\n continue\n args = json.loads(dataset.attrs['args'])\n kwargs = json.loads(dataset.attrs['kwargs'])\n key = self.get_key(args, kwargs)\n if key in self._index:\n logging.warn('Database contains key `%s` more than once.',\n key)\n self._index[key] = name\n logging.debug('Found %d items in the hdf file', len(self))", "title": "" }, { "docid": "1f1197fa71db18c33e0319a3c7ca33e2", "score": "0.68565935", "text": "def reload(self):\n self.__ix_searcher = self.__ix_searcher.refresh() # no need to obtain new one with self.indexer.searcher()\n self.__ix_writer = None", "title": "" }, { "docid": "8675be157fe14059d36fbe5b3dd699d7", "score": "0.6745704", "text": "def reindex_and_refresh(self):\n from kitsune.search.es_utils import es_reindex_cmd\n es_reindex_cmd()\n self.refresh(run_tasks=False)", "title": "" }, { "docid": "5e1e6ac4f5c095cc710f5338994b8035", "score": "0.66744506", "text": "def reindex(self):\n return self.db.reindex(self.layout)", "title": "" }, { "docid": "c3ea7a7944234847d0e5437b21ae0f89", "score": "0.6661895", "text": "def reindex(indexes=None):", "title": "" }, { "docid": "656b5383898c3b841b06f864d8aa5f1d", "score": "0.6603855", "text": "def reload(self):\n print(\"Reloading fast text model and index...\")\n self.reload_model(self.fasttext_model_path)\n self.reload_index(self.index_path)\n print(\"Reloading complete.\")", "title": "" }, { "docid": "ea43de67921084dbe9642ea02b5cad7c", "score": "0.6584024", "text": "def update_index(self):", "title": "" }, { "docid": "8ab29617adf35f78c36fb57b6c90ab2e", "score": "0.6556676", "text": "def refresh_index():\n get_conn().indices.refresh(index=settings.ELASTICSEARCH_INDEX)", "title": "" }, { "docid": "78b7ac45afa482c3becc44063bf782ac", "score": "0.6524983", "text": "def flush(self):\r\n if hasattr(self._index, 'flush'):\r\n getattr(self._index, 'flush')()\r\n elif self._stype == 'rtree' and \\\r\n self._filename:\r\n self._index.close()\r\n self._index = RIndex(self._filename)\r\n else:\r\n return False\r\n return True", "title": "" }, { "docid": "97cd61ac655c947595796e14c76e0914", "score": "0.6501427", "text": "def reload(self):\n msg = ''\n for table_name in self._table_names:\n table = self.__dict__[table_name]\n table.reload()\n msg += \"%d %s, \" % (len(table), table_name)\n log.debug(\"%s reloaded\" % msg)", "title": "" }, { "docid": "6292526ccf7c2b9dbc11cf3c077adfe0", "score": "0.6497824", "text": "def reindex(self): \n self.mds_dict = self.make_index(self.mds_basedir)", "title": "" }, { "docid": "5984c7213cd4fad729874f2ed816ac8c", "score": "0.6424305", "text": "def maintain(self):\n for table in self.store.keys():\n self.store.create_table_index(table, optlevel=9, kind='full')", "title": "" }, { "docid": "4d60e55ed7aaba996697a60c83588bed", "score": "0.6392108", "text": "def reindex():\n from .models import whooshee\n\n whooshee.reindex()\n click.echo(\"Index created for models.\")", "title": "" }, { "docid": "5df6daf0f6c8d3f57255271e5fba9059", "score": "0.63417375", "text": "async def reindex(self) -> None:\n if isinstance(self.config.database, PostgresDatabaseConfig):\n exclude_expression = ''\n if self.config.database.immune_tables:\n immune_tables = [f\"'{t}'\" for t in self.config.database.immune_tables]\n exclude_expression = f' AND tablename NOT IN ({\",\".join(immune_tables)})'\n\n async with in_transaction() as conn:\n await conn.execute_script(\n f'''\n DO $$ DECLARE\n r RECORD;\n BEGIN\n FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema(){exclude_expression}) LOOP\n EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';\n END LOOP;\n END $$;\n '''\n )\n else:\n await Tortoise._drop_databases()\n await self.restart()", "title": "" }, { "docid": "5a6adef4dd3dcf6356a7641727e80d79", "score": "0.628733", "text": "def reload_from_file(self):\n msg, self._saved_tables = fh.read_saved_tables_array()\n return msg", "title": "" }, { "docid": "1d0d3e8cf23d2c546e728e6e00bb252f", "score": "0.6258193", "text": "def ReloadFile(self):\r\n pass", "title": "" }, { "docid": "5183f6ff2dca8c2a8516c55c91acb205", "score": "0.62543225", "text": "def reset_file_index_cache() -> None:\n create_file_index_for_dwd_server.cache_clear()", "title": "" }, { "docid": "f29df5fe6ec56f70547850fb6a3ed17f", "score": "0.6233192", "text": "def load(self, index_file):\n self.__load_index(index_file=index_file)", "title": "" }, { "docid": "3c6c33b917580bffc3b3857f428cc916", "score": "0.61953944", "text": "def reload(self):\n self._reader.reload()", "title": "" }, { "docid": "1a6f91f5e8c33ac0e68d431ce082c6a8", "score": "0.61648446", "text": "def recreate_index():\n clear_index()\n index_program_enrolled_users(ProgramEnrollment.objects.iterator())", "title": "" }, { "docid": "e61c8ab70aa306539a3ff1dfbd3eb61a", "score": "0.61570114", "text": "def load_index(self, fn):\n self._indexer.load_index(fn)", "title": "" }, { "docid": "116d8715473b6ab6c65a0c705dbfe910", "score": "0.61549854", "text": "def refresh(self):\n self.client.indices.refresh(index=self.model.search_objects.mapping.index)", "title": "" }, { "docid": "09a685259def8496b85bbd73fca8a558", "score": "0.6154814", "text": "def reset_index(self):\r\n\r\n self._index.reset_index()", "title": "" }, { "docid": "ba23b8d2650e0cb3d2395b29df199a05", "score": "0.6150005", "text": "def index(index_filename=\"output/index\",\n database_filename=\"output/database/database_index.csv\",\n intermediate=\"output/intermediate\"):\n\n # 1. Load the JSON file that contains the wrfout files that we are indexing\n import json\n import xarray\n import pandas as pd\n from tqdm import tqdm\n\n\n filepaths = json.load(open(index_filename, \"r\"))\n old_df = _index_one_file(None)\n old_mtime = 0\n\n if os.path.exists(database_filename):\n old_df = pd.read_csv(database_filename, index_col=0)\n old_mtime = os.path.getmtime(database_filename)\n result = []\n # counter=0\n # 2. Loop through the wrfout files\n for filename in tqdm(filepaths.get(\"wrfout\", [])):\n ls_filename = os.path.join(intermediate,os.path.basename(filename)+\".ls\")\n if old_mtime > os.path.getmtime(ls_filename) and ls_filename in old_df[\"Filename\"]:\n print(\"Found unmodified data\")\n continue\n # 2.a Load the file\n myr = _index_one_file(ls_filename, source_filename=filename)\n result.append(myr)\n df = pd.concat(result)\n \n # 3. Check for the old database file\n # 4. Compare the two database files\n update_file = True\n\n if len(old_df):\n new_df = pd.concat([old_df, df])\n new_df = new_df.loc[~new_df.index.duplicated(keep=\"first\")]\n else:\n new_df = df.copy()\n\n if old_df is not None and new_df.index.equals(old_df.index):\n update_file = False\n\n # 5. If the data is modified, write a new file\n\n if update_file:\n print(\"Updating database index file\")\n directory = os.path.dirname(database_filename)\n if directory is not \"\" and not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n new_df.to_csv(database_filename, index_label=\"Times\")", "title": "" }, { "docid": "dadeeb07ea5ebc1caa3f02de2a1968c5", "score": "0.6148135", "text": "def _clear_file_table(self) -> None:\n self.loaded_files_table.setRowCount(0)\n self.loaded_files_table.setColumnCount(1)", "title": "" }, { "docid": "4638a3f499579bf242588653c3418185", "score": "0.61467594", "text": "def _update_index(self):\n raise NotImplementedError", "title": "" }, { "docid": "e08af37badaf8c6127b878340db98e20", "score": "0.61425626", "text": "def _update_table(self):\n if not os.path.isfile(self._path):\n return\n\n lock = ExclusiveLock(self._path)\n\n with open(self._path, \"r\") as file:\n table = file.readlines()\n\n # Remove trailing empty lines if any\n for item in table[::-1]:\n if len(item.rstrip()) > 0:\n break\n else:\n table.remove(item)\n\n # Get number of table rows\n row_count = len(table)\n\n # Check if table has the same dimentions\n if row_count != self._row_count:\n raise DashboardError(f\"Table read from the existing file '{self._path}' has different number of rows: \"\n f\"{row_count} instead of {self._row_count}.\")\n\n # Update table read from the file with the current table data\n for row in range(self._row_count):\n row_cells = table[row].split()\n\n cell_count = len(row_cells)\n\n if cell_count != self._col_count:\n raise DashboardError(f\"Number of cells mismatch in row={row}, {cell_count} != {self._col_count}\")\n\n for col in range(self._col_count):\n if self._dashboard[row][col] is None:\n self._dashboard[row][col] = row_cells[col].strip()\n\n del lock", "title": "" }, { "docid": "75108e881f62c821005377a9f3186fc7", "score": "0.6107531", "text": "def after_successful_edit(self):\n self.get_laf().regenerate_indexes()", "title": "" }, { "docid": "99c265e8f3fb32a6e07a45349d4a182e", "score": "0.61066204", "text": "def resetindex():\n delete_index()\n upgrade_system()", "title": "" }, { "docid": "8d4e5a284e7ae0833ad2068887272fdc", "score": "0.60664296", "text": "def reindex(self, session=None, **kwargs):\n cmd = SON([(\"reIndex\", self.__name)])\n cmd.update(kwargs)\n with self._socket_for_writes(session) as sock_info:\n return self._command(\n sock_info, cmd, read_preference=ReadPreference.PRIMARY,\n session=session)", "title": "" }, { "docid": "f99de07fea59254b18f7d4a42cfc09b7", "score": "0.60528857", "text": "def load_index(self):\n if not self.index_path:\n return\n if not (self.index and hasattr(self.index, 'from_file')):\n return\n logger.info(\"Loading index from file %s\", self.index_path,)\n try:\n index_fh = open(self.index_path, 'rt')\n except Exception as ex:\n logger.error(\"Error reading index file %s - %s\",\n self.index_path, ex)\n return\n try:\n index = NodeTreeIndex.from_file(index_fh)\n except Exception as ex:\n logger.error(\"Error loading index file - %s\", ex)\n return\n finally:\n index_fh.close()\n self.index = index\n logger.info(\"Loaded index from disk\")", "title": "" }, { "docid": "8b4e2f87c96aa537a7ac7a53c931e276", "score": "0.6052008", "text": "def reload():", "title": "" }, { "docid": "2d9d3b9cf9e52f39b99610c856ac0210", "score": "0.60473067", "text": "def refresh(self):\n pass\n # storage_rev = self.storage.repo_revision()\n # index_rev = self.index.get_last_revision()\n # if storage_rev < index_rev:\n # self.storage.reopen()", "title": "" }, { "docid": "e6ba367f3ef9d9e3cf6d09c05c2f58d6", "score": "0.6029717", "text": "def rebuild_database_index():\n db = get_db()\n \n return \"NOT YET IMPLEMENTED\", 501", "title": "" }, { "docid": "33cb7fc363ed3684d038ff13a8c1cab0", "score": "0.6029223", "text": "def reload(self):\n with self._lock_db, self._lock_io:\n with open(self._filename, newline='') as csvfile:\n reader = csv.reader(csvfile)\n fieldnames = next(reader)\n assert fieldnames == ['Filename', 'Label', 'Count'], (\n 'Label database column names must be \"Filename,Label,Count\"')\n\n self._database = collections.OrderedDict()\n self._by_count = collections.defaultdict(set)\n for imgfile, label, count in reader:\n count = int(count)\n self._database[imgfile] = (label, count)\n self._by_count[count].add(imgfile)", "title": "" }, { "docid": "39177647c8ae9bfe3e2a3cf83f71e325", "score": "0.60244477", "text": "def rindex():", "title": "" }, { "docid": "c6e202305dd20c8fd6e272c627948618", "score": "0.6017492", "text": "def reload_data(self):\n self.load_files()", "title": "" }, { "docid": "5f2d2e828455b5adf8fefcd46a40dacb", "score": "0.6006732", "text": "def refresh_file_table(self) -> None:\n self._clear_file_table()\n col_index = 0\n self.loaded_files_table.setRowCount(len(self.model_collection))\n for row_index, (model_id, filename) in enumerate(\n self.model_collection.items()):\n # display base name\n item = QtWidgets.QTableWidgetItem(os.path.basename(filename))\n # store model_id in data and set full file path as tooltip\n item.setData(QtCore.Qt.UserRole, model_id)\n item.setToolTip(filename)\n self.loaded_files_table.setItem(row_index, col_index, item)\n self.loaded_files_table.resizeRowsToContents()\n self.loaded_files_table.resizeColumnsToContents()\n self.loaded_files_table.show()", "title": "" }, { "docid": "eae62e3a5a1c0d75576e737b9393c67b", "score": "0.5991655", "text": "def invalidate_index(self):\n self.search_index.invalidate()", "title": "" }, { "docid": "ca31a766e84a77ef273b3c22cbd11b3e", "score": "0.59856087", "text": "def refresh_entries(self):\n for index, entry in enumerate(self._lines):\n # print('{}'.format(entry.path, index))\n entry.index = index\n self.write_backend()\n self.list_entries()", "title": "" }, { "docid": "e00e16f8c43afe7f728b140dd936097e", "score": "0.5983941", "text": "def reload(self):", "title": "" }, { "docid": "e0372c71275db3a431e9190b8172f9ec", "score": "0.59780496", "text": "def indexingEnd(self):\n self.__ix_writer.commit(optimize = True)\n # self.indexer.optimize() # no need for this with optimize in previous line\n self.reload()", "title": "" }, { "docid": "1499759b30d60de6e5723e3aa531cd22", "score": "0.59766686", "text": "def reload(self):\n self.load_raw_data(self.metadata[\"fname\"],\n *self.metadata[\"load_raw_data_args\"],\n **self.metadata[\"load_raw_data_kwargs\"])", "title": "" }, { "docid": "82087f6840ad9af8d01bff0ca8cf0b3d", "score": "0.5976538", "text": "def save(self):\n self.file.seek(0, SEEK_SET)\n if self.file.write(self._dataview) != MMB_INDEX_SIZE:\n raise IOError(\"%s: failed to write index\" % self.filename)\n self.clear_modified()", "title": "" }, { "docid": "75460ab96956b6cf67239121652924ac", "score": "0.5958122", "text": "def load(self, filename):\n\n try:\n return cPickle.load(open(filename))\n except Exception, e:\n print 'Error loading index file',filename,'=>',e\n print 'Index possibly corrupt, deleting it'\n os.remove(filename)", "title": "" }, { "docid": "7303024024534d0ed05d90a9dfe69724", "score": "0.59486353", "text": "def es_index():\n reindex_test_es_data()\n yield\n delete_indices()", "title": "" }, { "docid": "b9fe372a38020fbb3d68d4bfe71a9fb9", "score": "0.59473115", "text": "def reload(self):\n\t\tpass", "title": "" }, { "docid": "0a706af6c659a8e6281bf2a30e2ae2c6", "score": "0.59401274", "text": "def do_reindex():\n from zothero import app\n # TODO: Decide whether to force a full update here.\n #\n # Only entries and attachments have a \"modified\" date.\n # If any entries or attachments have changed, only they\n # will be updated if force == False. Entries whose notes\n # have changed are not picked up.\n #\n # Changed notes are only be picked up if *no* entries\n # or attachments were changed (a full update is forced when\n # the library notices that the database has changed,\n # but there are no updated entries or attachments).\n #\n # Setting `force=True` reloads everything on every update.\n app.update_index(force=False)", "title": "" }, { "docid": "d5761aa8a5212d3a271ada56654d32ac", "score": "0.59265435", "text": "def reindex_casefiles(flush=False):\n for collection in Collection.all_casefiles():\n _reindex_collection(collection, flush=flush)", "title": "" }, { "docid": "8dd06479fc776ab1b2934d5d825663d1", "score": "0.5912192", "text": "def reindex(self):\n if not self.need_reindex():\n return\n start = time.time()\n self.env.log.debug('Indexing repository (either repository or indexing criteria have changed)')\n self._open_storage('c')\n new_files = set()\n for node in TracRepoSearchPlugin(self.env).walk_repo(self.repo):\n if node.kind != Node.DIRECTORY:\n # Node has changed?\n if int(self.revs.get(node.path.encode('utf-8'), -1)) != node.rev:\n self.env.log.debug(\"Reindexing %s\" % node.path)\n self._invalidate_file(node.path)\n self._reindex_node(node)\n new_files.add(node.path)\n \n # All files that don't match the new filter criteria must be purged\n # from the index\n invalidated_files = set(self.files.keys())\n invalidated_files.difference_update(new_files)\n for invalid in invalidated_files:\n self._invalidate_file(invalid)\n\n self.sync()\n self._open_storage('r')\n self.env.log.debug('Index finished in %.2f seconds' % (time.time() - start))", "title": "" }, { "docid": "4f1a7ac2b88468d893154dbaa273b342", "score": "0.59031826", "text": "def reload(self):\n pass", "title": "" }, { "docid": "4f1a7ac2b88468d893154dbaa273b342", "score": "0.59031826", "text": "def reload(self):\n pass", "title": "" }, { "docid": "aa976253a654ff0f69005c1bda75e376", "score": "0.5891639", "text": "def update_index():\n print(\"Starting Updating Index\")\n client = algoliasearch.Client(\"YOUR_KEY\", \"YOUR_VALUE\")\n index = client.init_index(\"your_INDEX\")\n print(\"Clearing index\")\n index.clear_index()\n print(\"Loading index\")\n batch = json.load(open('../index.json'))\n index.add_objects(batch)", "title": "" }, { "docid": "97f2799d04dbc060d70f2e9e7e096425", "score": "0.5877284", "text": "def update_index(self):\n self.delete_index()\n self._refresh_materialized_views()\n self.client.indices.create(self.index_name, self.template)\n create_aliases(self.client, self.index_name, True)\n self._add_contents()", "title": "" }, { "docid": "e65a90e00fcfe4c85d0fda39ef550d96", "score": "0.5863353", "text": "def reload(self) -> None:\n self._sections = self._backend.read()", "title": "" }, { "docid": "98694a3bdbdf73240dca4da6b68998f9", "score": "0.5856008", "text": "def do_update(self):\n es_client.indices.refresh(index=self.index)", "title": "" }, { "docid": "c1cc11649ec25b1cf2a5382ed4097987", "score": "0.5855239", "text": "def _load_index(self, index_filename):\n with open(index_filename,'rb') as dict_file:\n self.index = cPickle.load(dict_file)\n dict_file.close()", "title": "" }, { "docid": "181385a3041a430f5ec7f607c4f56e07", "score": "0.58464754", "text": "def update_indexer(self):\n from .recordbuilder import fts_record_for\n if self.__class__.__name__ not in get_indexed_model_names():\n return\n indexer = get_indexer()\n indexer.delete_record(self.id, self.__class__.__name__, False)\n indexer.create_record(fts_record_for(self), False)", "title": "" }, { "docid": "3513fe86ec44d93e7fed246b231fdd0d", "score": "0.5846306", "text": "def reindex(cls):\n for obj in cls.query:\n add_to_index(cls.__tablename__, obj)", "title": "" }, { "docid": "3513fe86ec44d93e7fed246b231fdd0d", "score": "0.5846306", "text": "def reindex(cls):\n for obj in cls.query:\n add_to_index(cls.__tablename__, obj)", "title": "" }, { "docid": "1e8b6866169b3a1f16b9a27afdc3d714", "score": "0.5829134", "text": "def updateIndex(self):\n s = BloxDB()\n with s:\n s.updateIndex(self)\n return True", "title": "" }, { "docid": "ea3459b49316707a00b725e6310bd09a", "score": "0.5825587", "text": "def loadIndex( self, indexFileName ):\n if self.indexIsLoaded():\n return\n\n t0 = time.time()\n self._openSqlDb( indexFileName )\n tables = [ x[0] for x in self.sqlConnection.execute( 'SELECT name FROM sqlite_master WHERE type=\"table\"' ) ]\n versions = None\n try:\n rows = self.sqlConnection.execute( 'SELECT * FROM versions;' )\n versions = {}\n for row in rows:\n versions[row[0]] = ( row[2], row[3], row[4] )\n except:\n pass\n\n try:\n # Check indexes created with bugged bz2 decoder (bug existed when I did not store versions yet)\n if 'bzip2blocks' in tables and 'versions' not in tables:\n raise Exception( \"The indexes created with version 0.3.0 through 0.3.3 for bzip2 compressed archives \"\n \"are very likely to be wrong because of a bzip2 decoder bug.\\n\"\n \"Please delete the index or call ratarmount with the --recreate-index option!\" )\n\n # Check for empty or incomplete indexes\n if 'files' not in tables:\n raise Exception( \"SQLite index is empty\" )\n\n if 'filestmp' in tables or 'parentfolders' in tables:\n raise Exception( \"SQLite index is incomplete\" )\n\n # Check for pre-sparse support indexes\n if 'versions' not in tables or 'index' not in versions or versions['index'][1] < 2:\n print( \"[Warning] The found outdated index does not contain any sparse file information.\" )\n print( \"[Warning] The index will also miss data about multiple versions of a file.\" )\n print( \"[Warning] Please recreate the index if you have problems with those.\" )\n\n if 'metadata' in tables:\n values = dict( list( self.sqlConnection.execute( 'SELECT * FROM metadata;' ) ) )\n if 'tarstats' in values:\n values = json.loads( values['tarstats'] )\n tarStats = os.stat( self.tarFileName )\n\n if hasattr( tarStats, \"st_size\" ) and 'st_size' in values \\\n and tarStats.st_size != values['st_size']:\n raise Exception( \"TAR file for this SQLite index has changed size from\",\n values['st_size'], \"to\", tarStats.st_size)\n\n if self.verifyModificationTime \\\n and hasattr( tarStats, \"st_mtime\" ) \\\n and 'st_mtime' in values \\\n and tarStats.st_mtime != values['st_mtime']:\n raise Exception( \"The modification date for the TAR file\", values['st_mtime'],\n \"to this SQLite index has changed (\" + str( tarStats.st_mtime ) + \")\" )\n\n except Exception as e:\n # indexIsLoaded checks self.sqlConnection, so close it before returning because it was found to be faulty\n try:\n self.sqlConnection.close()\n except:\n pass\n self.sqlConnection = None\n\n raise e\n\n if printDebug >= 1:\n # Legacy output for automated tests\n print( \"Loading offset dictionary from\", indexFileName, \"took {:.2f}s\".format( time.time() - t0 ) )", "title": "" }, { "docid": "dd89f017c3fcb0e7a56d51e8d12c1903", "score": "0.58181405", "text": "def updateTable(self) -> None:\n ...", "title": "" }, { "docid": "ead0d74d38bbe9dce1ac1b52a363b1d9", "score": "0.58144283", "text": "def UpdateHtmlIndex(self):\n index_html = '%s/index.html' % QATOPDIR\n f = open(index_html, 'r')\n lines = f.readlines()\n f.close()\n base = os.path.basename(self.htmlout_file)\n dname = os.path.dirname(self.htmlout_file).split('/')[-1]\n ref_path = './%s/%s' % (dname, base)\n\n# Insert link to new data.\n tree = ElementTree()\n tree.parse(self.xcdfile)\n for el in tree.getiterator():\n if 'scandate' in el.attrib.get('name',''):\n scandate = el.text\n elif 'scantime' in el.attrib.get('name',''):\n scantime = el.text\n linkname = '%s at %s' % (scandate, scantime)\n\n# Rewrite the disk file.\n lines_out = []\n for line in lines:\n if 'Put New Results Here' in line:\n newline = '<li>Data acquired on <a href=%s>%s</a></li>\\n' % (ref_path, linkname)\n lines_out.append(newline)\n lines_out.append(line)\n\n f = open(index_html, 'w')\n f.writelines(lines_out)\n f.close()", "title": "" }, { "docid": "0ab3ba71d6fd82c53b4eeecaf908984d", "score": "0.57934654", "text": "def save_index_off(self):\n self.save_index = False", "title": "" }, { "docid": "179d7ac455457db599dacf1a79c5c58a", "score": "0.5793003", "text": "def refresh(self, reindex=True):\n logger.info(\"Refreshing {}\".format(self))\n self.dataset_list.delete(*self.dataset_list.get_children())\n if reindex:\n self.root.dataset_list_model.reindex()\n for props in self.root.dataset_list_model.yield_properties():\n values = [props[c] for c in self.columns]\n self.dataset_list.insert(\"\", \"end\", values=values)\n logger.info(\"Loaded dataset: {}\".format(props))\n\n dataset_uri = self.root.dataset_list_model.get_active_uri()\n if dataset_uri is not None:\n self.root.load_dataset(dataset_uri)\n else:\n self.root.dataset_model.clear()", "title": "" }, { "docid": "ed6cda056ca523a039c95f9793bad784", "score": "0.57924646", "text": "def loadIndex(self,fileName=\"indexs/index.p\",typeToLoad='index'):\n import pickle\n with open(fileName, 'rb') as fp:\n if(typeToLoad=='index'):\n self.index=pickle.load(fp)\n elif(typeToLoad=='inverse'):\n self.inverse=pickle.load(fp)\n else:\n self.ponderer=pickle.load(fp)", "title": "" }, { "docid": "a9c2d17f66cf287b2b42ef01640a77ab", "score": "0.5777228", "text": "def reindex():\n log_file = current_app.config['PPT_LOG_FILE']\n logger = PPT_Indexer(log_file=log_file)\n source_folder = PurePath(current_app.config['PPT_SOURCE_FOLDER'])\n paths = [str((source_folder)/PurePath(i.path))+\".pptx\" for i in PPT.query.all()]\n messages=[]\n for path in paths:\n messages.append(f'Sync <{path}>')\n if os.path.exists(path):\n try:\n logger.create(path)\n messages.append(f'Redindexed <{path}>')\n except Exception as e:\n messages.append(f'Redindex Error <{path}> - <{e}>')\n else:\n try:\n logger.delete(path)\n messages.append(f'Deleted <{path}>')\n except Exception as e:\n messages.append(f'Deletion Error <{path}> - <{e}>')\n return messages", "title": "" }, { "docid": "5b32ad049036bcb1bb851454514a6d3c", "score": "0.5772073", "text": "def test_index_is_not_rebuilt_at_restart(self):\n\n cluster = self.cluster\n cluster.populate(1).start()\n node = cluster.nodelist()[0]\n\n session = self.patient_cql_connection(node)\n create_ks(session, 'k', 1)\n session.execute(\"CREATE TABLE k.t (k int PRIMARY KEY, v int)\")\n session.execute(\"INSERT INTO k.t(k, v) VALUES (0, 1)\")\n\n logger.debug(\"Create the index\")\n session.execute(\"CREATE INDEX idx ON k.t(v)\")\n block_until_index_is_built(node, session, 'k', 't', 'idx')\n before_files = self._index_sstables_files(node, 'k', 't', 'idx')\n\n logger.debug(\"Verify the index is marked as built and it can be queried\")\n assert_one(session, \"\"\"SELECT table_name, index_name FROM system.\"IndexInfo\" WHERE table_name='k'\"\"\", ['k', 'idx'])\n assert_one(session, \"SELECT * FROM k.t WHERE v = 1\", [0, 1])\n\n logger.debug(\"Restart the node and verify the index build is not submitted\")\n node.stop()\n node.start(wait_for_binary_proto=True)\n after_files = self._index_sstables_files(node, 'k', 't', 'idx')\n assert before_files == after_files\n\n logger.debug(\"Verify the index is still marked as built and it can be queried\")\n session = self.patient_cql_connection(node)\n assert_one(session, \"\"\"SELECT table_name, index_name FROM system.\"IndexInfo\" WHERE table_name='k'\"\"\", ['k', 'idx'])\n assert_one(session, \"SELECT * FROM k.t WHERE v = 1\", [0, 1])", "title": "" }, { "docid": "c095ff60b86cdd657810c7ccb014167d", "score": "0.5769207", "text": "def reset_table_index(self, table):\n query = f'ALTER TABLE {table} ALTER COLUMN id COUNTER(1,1);'\n try:\n self.cur.execute(query)\n except Exception as e:\n print(f'[x] Falha ao resetar o indice da tabela [x]: {e}')\n else:\n self.con.commit()\n print('\\n[!] Indice resetado com sucesso [!]')", "title": "" }, { "docid": "1106aa73fb72829baab9482fd6aa064d", "score": "0.5761425", "text": "def _refresh_index(self):\n new_df = self._reindex((self._row_query,self._col_query))\n\n self._row_index = new_df._row_index\n self._col_index = new_df._col_index\n self._row_counts = new_df._row_counts\n self._col_counts = new_df._col_counts", "title": "" }, { "docid": "4d243774e2673c5eb880958d48c60bf3", "score": "0.5760899", "text": "def rebuild_index():\n local(\"./manage.py rebuild_index --noinput\")", "title": "" }, { "docid": "5201caed7622d33135ae75774f700b09", "score": "0.5750793", "text": "def test_index_updates():\n text_file_name, num_list = create_num_file(100)\n with IndexedOpen(text_file_name) as f:\n first_index_updated_time = f.index.index_path.stat().st_mtime\n\n # no changes, not modified\n with IndexedOpen(text_file_name) as f:\n assert first_index_updated_time == f.index.index_path.stat().st_mtime\n\n time.sleep(0.01)\n\n # change text file\n with open(text_file_name, \"w\") as text_file:\n for i in range(10):\n text_file.write(\"%i\\n\" % i)\n\n # modified time on index changes to match modified file\n with IndexedOpen(text_file_name) as f:\n assert first_index_updated_time != f.index.index_path.stat().st_mtime", "title": "" }, { "docid": "8fc9001a5a2a2ab886a6ef8b3c74462e", "score": "0.5745435", "text": "def load(self, fname, **kwargs):\n fname_ = get_tempf(fname, tempdir=data_dir)\n self.index.loadIndex(fname_, **kwargs)", "title": "" }, { "docid": "006bb1a5691335c4804efd49989305f1", "score": "0.5712444", "text": "def update():\n update_index()\n upgrade()", "title": "" }, { "docid": "6501f174112ab067223899b06fcdd09b", "score": "0.5709443", "text": "def resetTable(self):", "title": "" }, { "docid": "910fb08d17c48a28605de82089b2feb5", "score": "0.5708548", "text": "def recreate_index():\n index = Index(name=get_index_name(), using='default')\n\n for name, doc_type in get_doctypes().items():\n index.doc_type(doc_type)\n\n # Delete the index if it exists.\n try:\n index.delete()\n except NotFoundError:\n pass\n\n # Note: There should be no mapping-conflict race here since the\n # index doesn't exist. Live indexing should just fail.\n\n # Create the index with the mappings all at once.\n index.create()", "title": "" }, { "docid": "b202bfdce6517515663f90b0ffe15ee8", "score": "0.5708262", "text": "def load_crates_from_index(self, force=False):\n\n self._cursor.execute(\"SELECT count(id) FROM crate;\")\n if not force and self._cursor.fetchone()[0] != 0:\n # info already loaded\n return\n\n for root, dirs, files in os.walk(self._index_dir):\n if '.git' in dirs:\n dirs.remove('.git')\n\n lmap(self.load_crate, [os.path.join(root, f) for f in files if f != 'config.json'])", "title": "" }, { "docid": "8d0662e3c892f23c631eb0b245d88213", "score": "0.5700798", "text": "def reload():\n con.populate_db()\n print(\"\\033[1;32m\" + \"Data reloaded\" + \"\\033[0m\")", "title": "" }, { "docid": "2dc48969bac853cbea54e487eaf02587", "score": "0.5697612", "text": "def clear_index(self) -> None:\n self.__index = {}", "title": "" }, { "docid": "a2839269a7a245da76be58529d6b5138", "score": "0.56787175", "text": "def _loadUnindexedTable(filename, **kwargs):\n return pd.read_table(filename, **kwargs)", "title": "" }, { "docid": "cd03759bc10ae9ad7bd8ba8d23c2ec88", "score": "0.5671855", "text": "def reload(self) -> None:\n self.reset()\n self.load()", "title": "" }, { "docid": "ea74c6db0e350d03440e1bfdbd691bd2", "score": "0.56706077", "text": "def reset(self):\n\n table_idxs = 'SELECT * FROM pg_indexes WHERE tablename = \\'%s\\''\n \n for table in tpch_tables: # TODO self.tables would suffice\n \n with self.tpch_cxn.cursor() as curs:\n curs.execute(table_idxs % table)\n idxs = curs.fetchall()\n \n for idx in idxs:\n if '_42' in idx[2]:\n curs.execute('DROP INDEX %s' % idx[2])\n\n self.index_set.clear()", "title": "" }, { "docid": "092f58b258b7b59f856e3bda5b9428c1", "score": "0.5667002", "text": "def reload(self):\n self.load()", "title": "" }, { "docid": "092f58b258b7b59f856e3bda5b9428c1", "score": "0.5667002", "text": "def reload(self):\n self.load()", "title": "" }, { "docid": "58f8b5602bbd7018466485c02885d2bf", "score": "0.5666899", "text": "def compressIndex( self ):\n self._loadIndex( compress=True )", "title": "" }, { "docid": "01ba5fb6204bee4d129d4054733df45d", "score": "0.56631875", "text": "def indexDirect(self):\n\n with open(\"./\" + self.name + \"_index\", \"wb\") as ifile:\n ifcur = 0\n\n # Pour chaque document\n self.parser.initFile(self.source)\n d = self.parser.nextDocument()\n\n # Logging\n log_size = self.parser.countDocument()\n log_accu = 0\n\n while (d):\n\n # Lecture document\n id = d.getId()\n st = self.textRepresenter.getTextRepresentation(d.getText())\n\n # ร‰criture table DocFrom\n dfp, dfb, dfl = d.get(\"from\").split(\";\")\n self.docFrom[id] = (dfp, int(dfb), int(dfl))\n\n # Initialisation stems\n for s, v in st.items():\n log.debug(\"Stem : \" + s)\n spv = self.stems.get(s, (0, 0))[1]\n self.stems[s] = (-1, spv + len(id + str(v)) + 2)\n\n # ร‰criture index\n ifile.write(self.writeDict(st))\n ifile.write(\"\\n\".encode())\n nfcur = ifile.tell()\n self.docs[id] = (ifcur, nfcur - ifcur)\n\n # ร‰criture hyperliens\n links = document.get('links').split(';')[:-1]\n if links:\n lfile.write(docid + \":\" + \";\".join(links) + \"\\n\")\n\n # Itรฉration\n ifcur = nfcur\n d = self.parser.nextDocument()\n\n # Live version (deprecated)\n if self.keep_alive:\n self.index[id] = st\n\n # Logging\n log_accu += 1\n log_perc = log_accu/log_size\n log.info(\"\\rIndexation normale [\" + \"โ–ˆ\"*int(50*log_perc) + \" \"*(50-int(50*log_perc)) + \"] \" + str(int(100*log_perc)) + \"%\")\n log.debug(\"Document : \" + id)\n\n log.info(\"\\b\" * 4 + \"\\033[1;32mTerminรฉ\\033[0m\\n\")", "title": "" }, { "docid": "f9986f3e655aff056599947fcb151cac", "score": "0.5659033", "text": "def rebuild_index(config):\n s = Session()\n s.login()\n base_url = s.url\n s = requests.session()\n r = s.post(\n base_url + 'SearchIndex',\n data={\n 'rebuild': 'Rebuild'\n }\n )\n #Find the confirmation - Preparing to rebuild the search index.\n if r.content.find('the search index') == -1:\n raise Exception('Rebuilding search index failed.\\\n Check Vivo log and admin pages.')\n s.logout()\n return True", "title": "" }, { "docid": "9f0b3eb1d9482ebd92e726d63fed97a3", "score": "0.5656752", "text": "def read_index(self):\n self.document_ids = []\n with open(os.path.join(self.store_path, 'index.json')) as f:\n self.document_ids = f.readlines()", "title": "" }, { "docid": "429fe0723b667426bb3ed16140e72e0a", "score": "0.5649196", "text": "def clean_index(self):\n pass", "title": "" }, { "docid": "8dc052c54ca578df4ba46d28bb34ff93", "score": "0.5648638", "text": "def load_index(self, fn):\n inverted_idx = self._indexer.load_index(fn)\n return inverted_idx", "title": "" }, { "docid": "43ceb1da03be2cf3ac51b551221d562b", "score": "0.5646416", "text": "def _reload(self, filename):\n Debug('files', \"TRIGGER TSS RELOAD\")\n TSS.reload(filename)", "title": "" }, { "docid": "be673727ccf93c16865a3b9a0973da7b", "score": "0.56447476", "text": "def reindex_search(self):\n from djax.models import AxilentContentRecord\n record = AxilentContentRecord.objects.get_record(self)\n record.reindex()", "title": "" }, { "docid": "f55f021d78b223996bc507807258a121", "score": "0.563769", "text": "def update_index(idx: Idx, table_index: int = 1) -> pd.DataFrame:\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'}\n response = requests.get(idx.ic_historical_data_url(), headers=headers)\n if response.status_code != 200:\n raise Exception('Unexpected response status: {}'.format(response.status_code))\n # old index\n dfo = _read_date_indexed_data(idx.filename())\n # new index\n dfn = pd.read_html(response.text, index_col=0)[table_index]\n dfn.index = pd.to_datetime(dfn.index)\n dfn = dfn.sort_index()\n # concat on last day\n lastday = dfn.index[0] + pd.DateOffset(days=-1)\n dfi = pd.concat([dfo[:lastday], dfn], join='inner')\n dfi.to_csv(idx.filename())\n _log.info('Updated {}'.format(idx.describe()))\n return dfi", "title": "" }, { "docid": "fb818b2d9bbd3012c97bb74b63d8ef9a", "score": "0.5635225", "text": "def test_save_load_index(self):\n mytrack = self.t\n ref = 'gene1'\n intervals = []\n for i in xrange(100):\n start = np.random.randint(0, 1150)\n end = start + np.random.randint(1, 50)\n interval = Interval(ref, start, end, i)\n intervals.append(interval)\n mytrack.add(interval)\n # create and save index\n mytrack.index(persist=True)\n # save the index table\n tblcopy = mytrack.indexes['gene1'].tree.tbl.copy()\n tblroot = mytrack.indexes['gene1'].tree.root_id\n # close and reopen the file\n self.tf.close()\n self.tf = TrackFactory(self.filename, 'r', refs=self.refs)\n mytrack = self.tf.get_track('intervals1')\n # compare indexes\n self.assertTrue(np.all(tblcopy == mytrack.indexes['gene1'].tree.tbl))\n self.assertEqual(tblroot, mytrack.indexes['gene1'].tree.root_id)", "title": "" }, { "docid": "00e8f783c5da0c46f334f2fb6393476b", "score": "0.56339073", "text": "def load_index(self, dataset):\n raise NotImplementedError()", "title": "" } ]
e4c501b5a15d5938a40f9b6d7a9e3f64
SQL query for comparison
[ { "docid": "555e45c55f904218e0d7f5311567405f", "score": "0.0", "text": "def test_clean_all(self):\n print(\"TEST NETTOYAGE INTร‰GRALE\")\n CleaningDB.cleaning_all_products()\n for t in TABLES:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM %s;\" %(t)\n cursor.execute(sql, ())\n clean_test = cursor.fetchall()\n\n if clean_test == ():\n print(f\"{t} est bien vide, test rรฉussi !\")\n else:\n print(f\"{t} n'est pas vide, test รฉchouรฉ !\")\n\n connection.commit()\n\n if self.assertEqual(clean_test, ()):\n print(\"\\n\\n >>> 3 <<< ---------- TEST DU NETTOYAGE DE LA BASE DE DONNEES OK ---------- >>> 3 <<< \\n\\n\")\n\n time.sleep(2)\n print(\"\\n\"*50)", "title": "" } ]
[ { "docid": "e6b845161c19b8f140eda6940b5bcaf5", "score": "0.60658246", "text": "def compare_with_old_data_query(self):\n raise NotImplementedError", "title": "" }, { "docid": "6bf6e08760ea97956415d68ce530b8c7", "score": "0.59604883", "text": "def test_linear_conditional_query(self):\n pass", "title": "" }, { "docid": "f059b9e1f3c3ef6b6e17d666d65c5692", "score": "0.5918001", "text": "def compare(self):", "title": "" }, { "docid": "8b5654f6444f01f8b06b3a4753007241", "score": "0.59039503", "text": "def compare_data(self, db_x, db_y):\n # TODO: Improve method\n pass", "title": "" }, { "docid": "6392383420d5a81b8363de78726ffa74", "score": "0.5698731", "text": "def checkForRematch(p1, p2):\n connection = connect()\n cursor = connection.cursor()\n cursor.execute(\"select number_of_matchup, (select min(number_of_matchup) \" +\n \"from view_player_versus where id = %(p1)s) as min from \" +\n \"view_player_versus where id = %(p1)s and opponent = %(p2)s\",\n {\"p1\": p1, \"p2\": p2})\n row = cursor.fetchone()\n connection.close()\n if row[0] > row[1]:\n return False\n else:\n return True", "title": "" }, { "docid": "ea6cc643768682b661c3e7237eed9a21", "score": "0.5672358", "text": "def run_query(select, expected):\n expected = expected.split()\n out = mldb.query('SELECT {} FROM {}'.format(select, 'd1'))\n cols = sorted(out[0][1:])\n if cols != expected:\n mldb.log('{} != {}'.format(cols, expected))\n mldb.log('output was')\n mldb.log(out)\n assert False\n return out", "title": "" }, { "docid": "a88764547179b21d3c20fc3bb5ca0325", "score": "0.5647028", "text": "def comparison_detail ():\n\n with current_app.config.dba.engine.begin () as conn:\n ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')\n ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')\n range_ = request.args.get ('range') or 'All'\n\n res = execute (conn, \"\"\"\n SELECT p.pass_id, p.begadr, p.endadr, v1.labez_clique, v1.lesart,\n v2.labez_clique, v2.lesart,\n is_p_older (p.pass_id, v1.labez, v1.clique, v2.labez, v2.clique) AS older,\n is_p_older (p.pass_id, v2.labez, v2.clique, v1.labez, v1.clique) AS newer,\n is_p_unclear (p.pass_id, v1.labez, v1.clique) OR\n is_p_unclear (p.pass_id, v2.labez, v2.clique) AS unclear\n FROM (SELECT * FROM ranges WHERE range = :range_) r\n JOIN passages p ON (r.passage @> p.passage )\n JOIN apparatus_cliques_view v1 USING (pass_id)\n JOIN apparatus_cliques_view v2 USING (pass_id)\n WHERE v1.ms_id = :ms1 AND v2.ms_id = :ms2\n AND v1.labez != v2.labez AND v1.labez !~ '^z' AND v2.labez !~ '^z'\n AND v1.cbgm AND v2.cbgm\n ORDER BY p.pass_id\n \"\"\", dict (parameters, ms1 = ms1.ms_id, ms2 = ms2.ms_id, range_ = range_))\n\n return list (map (_ComparisonDetailRowCalcFields._make, res))", "title": "" }, { "docid": "4810a81001d36881e3d371aa4717204c", "score": "0.56178385", "text": "def test_unionComparison(self):\n s = Store()\n query = ItemQuery(s, A, AND(A.reftoc == B.storeID,\n B.cref == C.storeID))\n sql, args = query._sqlAndArgs('SELECT', '*')\n self.assertEquals(\n sql,\n 'SELECT * FROM %s, %s, %s '\n 'WHERE ((%s.[reftoc] = %s.oid) AND '\n '(%s.[cref] = %s.oid))' % (\n A.getTableName(s),\n B.getTableName(s),\n C.getTableName(s),\n A.getTableName(s),\n B.getTableName(s),\n B.getTableName(s),\n C.getTableName(s)))\n self.assertEquals(args, [])", "title": "" }, { "docid": "468bd93652813c7225d46f1f08efa630", "score": "0.5571128", "text": "def compare(self, tables_obj):\n def table_vals(sqltable):\n \"\"\" dict {colname, [values]} \"\"\"\n res = {}\n for col_name, col in sqltable.sql_column_names:\n if len(col.values):\n res[col_name] = col.values\n return res\n\n for table_name in self.tables:\n sqltable = self.tables[table_name]\n if table_name not in tables_obj.tables and \\\n table_vals(self.tables[table_name]) == {}:\n # table w/o values and absent tables are both empty\n continue\n sqltable2 = tables_obj.tables[table_name]\n if sqltable.sql_column_names != sqltable2.sql_column_names:\n msg_fmt = \"not equal: Table %s has different columns %s and %s\"\n getLogger(__name__).info(msg_fmt % (table_name,\n sqltable.sql_column_names,\n sqltable2.sql_column_names))\n return False\n for colname in sqltable.sql_column_names:\n sqlcol = sqltable.sql_columns[colname]\n sqlcol2 = sqltable2.sql_columns[colname]\n if len(sqlcol.values) != len(sqlcol2.values):\n msg_fmt = \"not equal: Column %s.%s has different \\\nrows count %d and %d\"\n getLogger(__name__).info(msg_fmt % (table_name, sqlcol.name,\n len(sqlcol.values), \n len(sqlcol2.values)))\n getLogger(__name__).debug('Different columns are: ' +\n \"colvals1 = \" + str(sqlcol.values) +\n \"colvals2 = \" + str(sqlcol2.values))\n return False\n\n if not Tables.cmp_values(sqlcol.values, sqlcol2.values):\n msg_fmt = \"not equal: %s.%s column values val=%s, val2=%s\"\n str_values = []\n str_values2 = []\n try:\n str_values = [str(i) for i in sqlcol.values ]\n str_values2 = [str(i) for i in sqlcol2.values ]\n except:\n if not str_values:\n str_values = ['!!!!Exeption!!!!']\n if not str_values2:\n str_values2 = ['!!!!Exeption!!!!']\n getLogger(__name__).info(msg_fmt % (table_name,\n sqlcol.name,\n str_values,\n str_values2))\n return False\n return True", "title": "" }, { "docid": "dff3e6f15198d8023d260f88a8b795a5", "score": "0.55607283", "text": "def test_oneOfColumnQueryQueryGeneration(self):\n subselect = self.store.query(A).getColumn('type')\n comparison = C.name.oneOf(subselect)\n self.failUnless(IComparison.providedBy(comparison))\n self.assertEquals(\n comparison.getQuery(self.store),\n '%s IN (SELECT %s FROM %s)' % (\n C.name.getColumnName(self.store),\n A.type.getColumnName(self.store),\n A.getTableName(self.store)))\n self.assertEquals(\n comparison.getArgs(self.store),\n [])", "title": "" }, { "docid": "7b19cc9170c8f77e1fb2862fcec31038", "score": "0.55573225", "text": "def test_placeholderComparisonQuery(self):\n s = Store()\n p = Placeholder(PlaceholderTestItem)\n\n # Explicitly call this here, since we're not going through ItemQuery or\n # another more reasonable codepath, which would have called it for us.\n p.getTableAlias(s, ())\n\n value = 0\n comparison = (p.attr > value)\n self.assertEquals(\n comparison.getQuery(s),\n '(placeholder_0.[attr] > ?)')\n self.assertEquals(\n comparison.getArgs(s),\n [value])", "title": "" }, { "docid": "312a3200169179a8bf15e7275f9fb8a2", "score": "0.5526739", "text": "def _equivalence_query(self, conj_machine):\n raise NotImplementedError(\n 'Equivalence Query method is not implemented')", "title": "" }, { "docid": "0d3c29fd0e107ea2cb72ead03806e826", "score": "0.54810417", "text": "def get_compare_identity_query(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "998915c40e99d972eff78d8b48782873", "score": "0.54744995", "text": "def test_ingredient_cmp(self):\n ingra = Ingredient(id=\"b\", columns=[self.basic_table.c.first])\n ingrb = Ingredient(id=\"a\", columns=[self.basic_table.c.last])\n assert ingrb < ingra", "title": "" }, { "docid": "703e3d119ee39b4174977dbac06a450d", "score": "0.5468953", "text": "def matches(self, relation):", "title": "" }, { "docid": "c38c07ce3303b80e127421c16d5b3140", "score": "0.5390999", "text": "def compare_records(azara_record, rui_record):\n\n pass", "title": "" }, { "docid": "b7c84628c827871f82ecd3d222427a26", "score": "0.5384409", "text": "def cmpResult(self, res1, res2):\n try:\n if len(res1) != len(res2):\n return False\n \n for row in res1:\n if row not in res2:\n return False\n except:\n return False\n \n return True", "title": "" }, { "docid": "d95e08e7d4cc72510be71d6e8cdc08b5", "score": "0.53456116", "text": "def test_oneOfColumnQueryQueryGenerationWithArguments(self):\n value = '10'\n subselect = self.store.query(\n D,\n AND(D.id == value,\n D.four == C.name)).getColumn('one')\n\n comparison = C.name.oneOf(subselect)\n self.failUnless(IComparison.providedBy(comparison))\n self.assertEquals(\n comparison.getQuery(self.store),\n '%s IN (SELECT %s FROM %s, %s WHERE ((%s = ?) AND (%s = %s)))' % (\n C.name.getColumnName(self.store),\n D.one.getColumnName(self.store),\n C.getTableName(self.store),\n D.getTableName(self.store),\n D.id.getColumnName(self.store),\n D.four.getColumnName(self.store),\n C.name.getColumnName(self.store)))\n self.assertEquals(\n map(str, comparison.getArgs(self.store)),\n [value])", "title": "" }, { "docid": "4f8101eea6f6946d52852642d1aaf8f3", "score": "0.53136945", "text": "def compare_with_sample(self, sample):\n res = True\n if self.table_name not in sample:\n res = False\n getLogger(__name__).error(\"not equal: Couldn't locate table %s\" % \\\n self.table_name)\n else:\n sample_columns = sample[self.table_name]\n for column_name, data in sample_columns.iteritems():\n try:\n sqlcolumn = self.sql_columns[column_name]\n except:\n getLogger(__name__).error(sys.exc_info())\n getLogger(__name__).error(\"Available columns are: %s\" % \\\n self.sql_columns.keys())\n res = False\n if Tables.cmp_values(sqlcolumn.values, data):\n getLogger(__name__).info(\"cmp %s ok\" % column_name)\n else:\n res = False\n str_values = [str(i) for i in sqlcolumn.values ]\n str_values2 = [str(i) for i in data ]\n getLogger(__name__).error(\n \"not equal: %s.%s column values=%s, sample=%s\" % \\\n (self.table_name, column_name, str_values, str_values2) )\n return res", "title": "" }, { "docid": "ffcd91593ada4f2786eab667ba987c75", "score": "0.5281846", "text": "def eq_data(self, other):\n # Check that there are the same number of instances in each result\n # Then check that they are the same instances\n if len(self.instance_indices) != len(other.instance_indices) \\\n or (self.instance_indices != other.instance_indices).any():\n return False\n conditions = [ (self.goldstandard == other.goldstandard).all()\n , (self.classifications == other.classifications).all()\n ]\n return all(conditions)", "title": "" }, { "docid": "e99430060a221862dac9e36fcee0b817", "score": "0.5279995", "text": "def test_simpleIntegerComparison(self):\n s = Store()\n query = ItemQuery(s, E, E.amount == 0)\n sql, args = query._sqlAndArgs('SELECT', '*')\n self.assertEquals(\n sql,\n 'SELECT * FROM %s WHERE (%s.[amount] = ?)' % (\n E.getTableName(s),\n E.getTableName(s)))\n self.assertEquals(args, [0])", "title": "" }, { "docid": "999864748d84db57615ce1244f5d2c2d", "score": "0.5272093", "text": "def _matches(self, quals, row):\n for qual in quals:\n op = QUAL_OPERATOR_MAP.get(qual.operator)\n if op is None:\n log_to_postgres(\n 'Unknown operator {} in the {} qual. Row will be returned.'.format(qual.operator, qual),\n WARNING,\n hint='Implement that operator in the ambryfdw wrapper.')\n continue\n\n elem_index = self.columns.index(qual.field_name)\n if not op(row[elem_index], qual.value):\n return False\n return True", "title": "" }, { "docid": "814e73bb3df2c0fc75a8e3c6ba16d67b", "score": "0.5247706", "text": "def test_comparisons(self):\n ingr = Ingredient(columns=[self.basic_table.c.first], id=1)\n ingr2 = Ingredient(columns=[self.basic_table.c.first], id=2)\n ingr2copy = Ingredient(columns=[self.basic_table.c.first], id=2)\n dim = Dimension(self.basic_table.c.first, id=3)\n met = Metric(func.sum(self.basic_table.c.first), id=4)\n met2 = Metric(func.sum(self.basic_table.c.first), id=2)\n filt = Filter(self.basic_table.c.first < \"h\", id=92)\n hav = Having(func.sum(self.basic_table.c.first) < 3, id=2)\n\n items = [filt, hav, met2, met, ingr, dim, ingr2]\n self.assertNotEqual(ingr, ingr2)\n self.assertEqual(ingr2, ingr2copy)\n self.assertLess(dim, met)\n self.assertLess(met, filt)\n self.assertLess(filt, hav)\n self.assertLess(dim, hav)\n self.assertEqual(sorted(items), [dim, met2, met, filt, hav, ingr, ingr2])", "title": "" }, { "docid": "8e30255b12af50878e890b805feb1162", "score": "0.52276254", "text": "def comparison_summary ():\n\n with current_app.config.dba.engine.begin () as conn:\n ms1 = Manuscript (conn, request.args.get ('ms1') or 'A')\n ms2 = Manuscript (conn, request.args.get ('ms2') or 'A')\n\n res = execute (conn, \"\"\"\n (WITH ranks AS (\n SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity\n FROM affinity aff\n WHERE ms_id1 = :ms_id1\n AND {prefix}newer > {prefix}older\n ORDER BY affinity DESC\n )\n\n SELECT a.rg_id, a.range, a.common, a.equal,\n a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length\n FROM {view} a\n JOIN ranks r USING (rg_id, ms_id1, ms_id2)\n WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2\n )\n\n UNION\n\n (WITH ranks2 AS (\n SELECT ms_id1, ms_id2, rg_id, rank () OVER (PARTITION BY rg_id ORDER BY affinity DESC) AS rank, affinity\n FROM affinity aff\n WHERE ms_id2 = :ms_id2\n AND {prefix}newer < {prefix}older\n ORDER BY affinity DESC\n )\n\n SELECT a.rg_id, a.range, a.common, a.equal,\n a.older, a.newer, a.unclear, a.affinity, r.rank, ms1_length, ms2_length\n FROM {view} a\n JOIN ranks2 r USING (rg_id, ms_id1, ms_id2)\n WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2\n )\n\n UNION\n\n SELECT a.rg_id, a.range, a.common, a.equal,\n a.older, a.newer, a.unclear, a.affinity, NULL, ms1_length, ms2_length\n FROM {view} a\n WHERE a.ms_id1 = :ms_id1 AND a.ms_id2 = :ms_id2 AND a.newer = a.older\n\n ORDER BY rg_id\n \"\"\", dict (parameters, ms_id1 = ms1.ms_id, ms_id2 = ms2.ms_id,\n view = 'affinity_p_view', prefix = 'p_'))\n\n return list (map (_ComparisonRowCalcFields._make, res))", "title": "" }, { "docid": "89ff2106457df80f5134689b88d9fe9c", "score": "0.5213489", "text": "def __eq__(self, other):\n return self._query == other._query", "title": "" }, { "docid": "3f8e79c308a22712f44f3114da07faec", "score": "0.5207302", "text": "def _is_row_calc(self, sql):\n table_units = sql['from']['table_units']\n ## ่กŒ่ฎก็ฎ—็š„ SQL from ้ƒจๅˆ†ๅŒ…ๅซ 2 ไธชๅญ SQL\n if not (len(table_units) == 2 and table_units[0][0] == 'sql' and table_units[1][0] == 'sql'):\n return False\n\n select = sql['select']\n # select[0]: (agg_id, val_unit)\n # val_unit: (calc_op, col_unit1, col_unit2)\n if not (len(select) == 1 and select[0][1][0] > 0 and select[0][1][1] == select[0][1][2]):\n return False\n return True", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "21a9ed9557efd3fb788cf79700fd23db", "score": "0.51824635", "text": "def testComparison(self):\n\t\tpass", "title": "" }, { "docid": "d0ba0f320a69ae1efee7f190d63a18da", "score": "0.5178775", "text": "def __eq__(self, other: \"DBTuple\") -> bool:\n # Can only check equality between tuples with the same set of columns.\n assert set(self.keys()) == set(other.keys())\n return And([self[c] == other[c] for c in self])", "title": "" }, { "docid": "b82cb7ae1aae94716f91fae713a2350e", "score": "0.51474106", "text": "def __ge__(self, rhs: Any) -> QueryInstance:\n return self._generate_test(\n lambda value: value >= rhs,\n ('>=', self._path, rhs)\n )", "title": "" }, { "docid": "1fbdcfa0efe955bc08bce149e28bb07f", "score": "0.51422906", "text": "def operators():\n\n # How to equate two columns to each other\n print users.c.id == addresses.c.user_id #Returns: users.id=addresses.user_id\n\n #Using a literal value (not a SQLAlchemy clause object),\n # we get a bind parameter\n print users.c.id == 7 # Returns: users.id = id_1\n\n # The 7 literal is embedded in the resulting ColumnElement;\n # we can use the same trick we did with Insert object to see it\n print (users.c.id == 7).compile().params # Returns: {u'id_1':7}\n\n # Most Python operators produce a SQL expression here, like, equals, etc.\n print users.c.id != 7 #Returns: users.id != :id_1\n print users.c.name == None #None means IS NULL, Returns: users.name IS NULL\n print 'fred' > users.c.name #Returns: users.name < :name_1\n\n # Adding two integer columns together, we get an additional expression\n print users.c.id + addresses.c.id #Returns: users.id + addresses.id\n\n # Addind two string columns together, we get something different\n print users.c.name + users.c.fullname #Returns: users.name || users.fullname\n\n # If you come across an operator which really isn't available, you can use\n # the ColumnOperators.op() method to generate whatever operator you need\n print users.c.name.op('tiddlywinks')('foo')\n # Returns: users.name tiddlywinks :name_1", "title": "" }, { "docid": "ff6fa824545f9f403ea5042bf321bfeb", "score": "0.51121616", "text": "def selecteq(table, field, value, complement=False):\n\n return selectop(table, field, value, operator.eq, complement=complement)", "title": "" }, { "docid": "fee8130d970a39bd76720cc544b3ddbf", "score": "0.51011693", "text": "def displayAffected(query, q):", "title": "" }, { "docid": "1a50f22884f46fbcbcc900c84ad1cdec", "score": "0.5098048", "text": "def equivalence_query(self, hypothesis):\n raise NotImplementedError('Equivalence Query method is not implemented')", "title": "" }, { "docid": "c3f7dc8ece6314273f7d06f2ee6bbcf9", "score": "0.50854665", "text": "def test_oneOfValueQueryGeneration(self):\n values = [u'a', u'b', u'c']\n comparison = C.name.oneOf(values)\n self.failUnless(IComparison.providedBy(comparison))\n self.assertEquals(\n comparison.getQuery(self.store),\n '%s IN (?, ?, ?)' % (\n C.name.getColumnName(self.store),))\n self.assertEquals(\n comparison.getArgs(self.store),\n values)", "title": "" }, { "docid": "336eb7990b3c700ae7e9b4873a9fee59", "score": "0.5074794", "text": "def verify_DF_satisfied(df):\t\t\t\n\t\t\n\tcursor = config.connection.cursor()\n\t#reads all the data from columns present in DF\n\tstr=\"SELECT \"\n\tfor i in range (len(df.lhs)):\n\t\tstr=str+df.lhs[i]+\", \"\n\tstr=str+df.rhs+\" FROM \"+df.table_name\n\ttry:\n\t\tcursor.execute(str)\n\t\ttuples=cursor.fetchall()\t\n\texcept sqlite3.OperationalError:\n\t\t#returns False if attributes or tables were not found\n\t\treturn False\n\t\n\t#contains associations between lhs and rhs. For one lhs only one rhs is acceptable\n\t#If for all tuples with the same lhs, rhs remains the same, the DF is met\n\tassoc=[]\n\t\n\tfor i in range(len(tuples)):\n\t\tval=search_in_array(assoc,tuples[i][:-1])\n\t\tif(val==None):\n\t\t\t#if lhs was not found in the association table program adds it\n\t\t\tassoc.append(tuples[i])\n\t\telif(val!=tuples[i][-1]):\n\t\t\t#if lhs was found in association table \n\t\t\t#and the rhs there is different than the rhs in the tuple that is being processed\n\t\t\t#the DF is not satisfied\n\t\t\treturn False\t\n\treturn True", "title": "" }, { "docid": "ecf2a4d791446ce8e3ecd033e98443fc", "score": "0.50742686", "text": "def _sql_where(self, cursor, aliases=None, aggregate=False):\n assert False, \"subclass responsibility\"", "title": "" }, { "docid": "c2891662d63a7ecda37061734624277f", "score": "0.50645906", "text": "def test_basic_query_gt(self):\n date = datetime(2015, 5, 18)\n programs = self._get_first_result_set(\n self._make_query_dict(\"Program\",\n expression=[\"effective date\", \">\",\n date.strftime(DATE_FORMAT_REQUEST)]),\n \"Program\",\n )\n\n self.assertEqual(programs[\"count\"], 13)\n self.assertEqual(len(programs[\"values\"]), programs[\"count\"])\n self.assertTrue(\n all(datetime.strptime(program[\"start_date\"],\n DATE_FORMAT_RESPONSE) > date\n for program in programs[\"values\"]),\n )", "title": "" }, { "docid": "d11cd15a4fe0f98f800be18341cd2b84", "score": "0.5061477", "text": "def match (self,example):\n val = example[self.col]\n if isnumeric(val):\n return (val >= self.val)\n else:\n return (val == self.val)", "title": "" }, { "docid": "9929d4d5a9948d5055073ac297679b29", "score": "0.5058216", "text": "def compter(self,table,connexion,where=''):\n connexion.set_isolation_level(0)\n curs = connexion.cursor()\n if(where==''):\n curs.execute(\"SELECT * FROM \\\"\"+table+\"\\\" \")\n result = curs.fetchall()\n else:\n curs.execute(\"SELECT * FROM \\\"\"+table+\"\\\" WHERE \"+where+\" \")\n result = curs.fetchall()\n return len(result)", "title": "" }, { "docid": "842c8ab4496ec7d0d3f35df63fc87310", "score": "0.50564915", "text": "def load_true_matches(ds1, ds2, id_col='id'):\n dfa = pd.read_csv(ds1)\n dfb = pd.read_csv(ds2)\n a = pd.DataFrame({'ida': dfa.index,\n 'link': dfa[id_col]})\n b = pd.DataFrame({'idb': dfb.index,\n 'link': dfb[id_col]})\n dfj = a.merge(b, on='link', how='inner').drop(columns=['link'])\n the_truth = set()\n for row in dfj.itertuples(index=False):\n the_truth.add((row[0], row[1]))\n return the_truth", "title": "" }, { "docid": "86ee564a02b5869e8d1d028803178d76", "score": "0.5050322", "text": "def _get_clause(self):\n params = [\n (\n primary_key,\n sql.bindparam(\"pk_%d\" % idx, type_=primary_key.type),\n )\n for idx, primary_key in enumerate(self.primary_key, 1)\n ]\n return (\n sql.and_(*[k == v for (k, v) in params]),\n util.column_dict(params),\n )", "title": "" }, { "docid": "b306bd422e0ec165c405d088152b37b9", "score": "0.5049966", "text": "def test_placeholderComparison(self):\n s = Store()\n p = Placeholder(PlaceholderTestItem)\n query = ItemQuery(\n s,\n PlaceholderTestItem,\n PlaceholderTestItem.attr == p.attr)\n sql, args = query._sqlAndArgs('SELECT', '*')\n self.assertEquals(\n sql,\n 'SELECT * '\n 'FROM %s, %s AS placeholder_0 '\n 'WHERE (%s.[attr] = placeholder_0.[attr])' % (\n PlaceholderTestItem.getTableName(s),\n PlaceholderTestItem.getTableName(s),\n PlaceholderTestItem.getTableName(s)))\n self.assertEquals(args, [])", "title": "" }, { "docid": "17f54fdb741bf1f397aaf454b8dd0b24", "score": "0.5049017", "text": "def _get_ec2_equal(column_name, values, delimiter=',', quotes='\"'):\n value_list = values.split(delimiter)\n\n phrases = []\n for value in value_list:\n phrases.append('%s=%s' % (column_name, '%s%s%s' % (quotes, value, quotes)))\n\n return 'and (%s)' % ' or '.join(phrases)", "title": "" }, { "docid": "69ceb77c8c48c0fc5d4f97dc24e42be2", "score": "0.50487417", "text": "def test_simpleReferenceComparison(self):\n s = Store()\n query = ItemQuery(s, A, A.reftoc == A.storeID)\n sql, args = query._sqlAndArgs('SELECT', '*')\n self.assertEquals(\n sql,\n 'SELECT * FROM %s WHERE (%s.[reftoc] = %s.oid)' % (\n A.getTableName(s),\n A.getTableName(s),\n A.getTableName(s)))\n self.assertEquals(args, [])", "title": "" }, { "docid": "893554b4a8c32110a1853352e39b0cde", "score": "0.5012722", "text": "def compare_load_results(self):", "title": "" }, { "docid": "9aea3f8f7189885ce787eb969fdb91e0", "score": "0.50109833", "text": "def testFilter(self):\n df = self.spark.createDataFrame([(2,'Alice'), (5, 'Bob')], ['age', 'name'])\n df_result = Filter(sql_condition='age>3').transform(df)\n\n df_expected = self.spark.createDataFrame([(5, 'Bob')], ['age', 'name'])\n\n self.assertEqual(df_result.collect(), df_expected.collect())", "title": "" }, { "docid": "6f9fe2a7efb0d179e86c5a30120b457e", "score": "0.5007736", "text": "def check_match_param(query_upper_j,query_lower_j,query_lower_parity,query_upper_parity,query_frequency,query_lower_QN,\n query_upper_QN,dataset):\n\n counter = 0\n for row in dataset:\n #use hitran functions here to obtain parameters\n ref_lower_j, ref_upper_j = get_J_value_hitran(row)\n ref_lower_parity = get_lower_parity_hitran(row)\n ref_upper_parity = get_upper_parity_hitran(ref_lower_parity,row)\n ref_frequency = get_freq_hitran(row)\n ref_upper_qn, ref_lower_qn = get_quantum_number_n_hitran(row)\n if ref_lower_j==query_lower_j and ref_upper_j==query_upper_j:\n if ref_lower_parity == query_lower_parity and ref_upper_parity == query_upper_parity:\n if check_frequency(ref_frequency,query_frequency) == True:\n if ref_upper_qn == query_upper_QN and ref_lower_qn==query_lower_QN:\n return row, counter\n counter = counter + 1\n\n return False, counter", "title": "" }, { "docid": "dc44759e38b026d733a33938cf1e2c1b", "score": "0.5006427", "text": "def __eq__(self, other: Any) -> bool:\n return (\n isinstance(other, Query)\n and self.outcomes == other.outcomes\n and self.treatments == other.treatments\n and self.conditions == other.conditions\n )", "title": "" }, { "docid": "ad35468a96b8e8bc354e6eb018ce0a7a", "score": "0.50034165", "text": "def _equal(annotation_from_es, annotation_from_db):\n return (\n annotation_from_es[\"updated\"] == annotation_from_db.updated\n and annotation_from_es[\"user\"] == annotation_from_db.userid\n )", "title": "" }, { "docid": "063c379a0d92a75b197d1bb833b9acbf", "score": "0.4999385", "text": "def test_sql_query(self):\n q = SqlQuery(\"my_table\", COLS, DTYPES, \"alias\")\n self.assertEqual(q.order, [\"alias\"])\n self.assertEqual(q.tables, {\"alias\": \"my_table\"})\n self.assertEqual(q._columns, {\"alias\": EXPANDED_COLS})\n self.assertEqual(q.datatypes, {\"alias\": EXPANDED_DTYPES})\n self.assertEqual(q.condition, None)\n\n # test WHERE\n c1 = EqualsCondition(\"my_table\", \"1\", VALS[0], DTYPES[\"1\"])\n c2 = EqualsCondition(\"my_table\", \"2\", VALS[1], DTYPES[\"2\"])\n c21 = EqualsCondition(\n \"my_table\", \"2___0\", VALS[1][0], EXPANDED_DTYPES[\"2___0\"]\n )\n c22 = EqualsCondition(\n \"my_table\", \"2___1\", VALS[1][1], EXPANDED_DTYPES[\"2___1\"]\n )\n c3 = EqualsCondition(\"my_table\", \"3\", VALS[2], DTYPES[\"2\"])\n c3 = AndCondition(c1, c2, c3)\n q.where(c1)\n self.assertEqual(q.condition, c1)\n q.condition = None\n q.where(c2)\n self.assertIsInstance(q.condition, AndCondition)\n self.assertEqual(len(q.condition.conditions), 2)\n self.assertEqual(q.condition, AndCondition(c21, c22))\n q.condition = None\n q.where(c1)\n q.where(c2)\n self.assertIsInstance(q.condition, AndCondition)\n self.assertEqual(len(q.condition.conditions), 3)\n self.assertEqual(q.condition, AndCondition(c1, c21, c22))\n q.condition = None\n q.where(c2)\n q.where(c1)\n self.assertIsInstance(q.condition, AndCondition)\n self.assertEqual(len(q.condition.conditions), 3)\n self.assertEqual(q.condition, AndCondition(c1, c21, c22))\n q.condition = None\n q.where(c1)\n q.where(c1)\n self.assertIsInstance(q.condition, AndCondition)\n self.assertEqual(len(q.condition.conditions), 1)\n self.assertEqual(q.condition, AndCondition(c1))\n\n # test JOIN\n c4 = JoinCondition(\"alias\", \"1\", \"alias2\", \"1\")\n q.join(\"2ndTable\", COLS, DTYPES, alias=\"alias2\")\n self.assertEqual(q.order, [\"alias\", \"alias2\"])\n self.assertEqual(q.tables, {\"alias\": \"my_table\", \"alias2\": \"2ndTable\"})\n self.assertEqual(\n q._columns, {\"alias\": EXPANDED_COLS, \"alias2\": EXPANDED_COLS}\n )\n self.assertEqual(\n q.datatypes, {\"alias\": EXPANDED_DTYPES, \"alias2\": EXPANDED_DTYPES}\n )\n self.assertEqual(q.condition, AndCondition(c1))\n q.where(c4)\n self.assertEqual(q.condition, AndCondition(c1, c4))", "title": "" }, { "docid": "57aef1a4478dfaf3712cdb885bbd55df", "score": "0.49938014", "text": "def test_oneOfColumnQueryGeneration(self):\n values = A.type\n comparison = C.name.oneOf(values)\n self.failUnless(IComparison.providedBy(comparison))\n self.assertEquals(\n comparison.getQuery(self.store),\n '%s IN (%s)' % (\n C.name.getColumnName(self.store),\n A.type.getColumnName(self.store)))\n self.assertEquals(\n comparison.getArgs(self.store),\n [])", "title": "" }, { "docid": "7bc613e9cc7f2a1a60d9cd542ed5ea15", "score": "0.49918944", "text": "def get_compare_definition_query(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "7042d82f94765d8d96c34c73cb8d4174", "score": "0.49756563", "text": "def test_basic_query_eq(self):\n title = \"Cat ipsum 1\"\n programs = self._get_first_result_set(\n self._make_query_dict(\"Program\", expression=[\"title\", \"=\", title]),\n \"Program\",\n )\n\n self.assertEqual(programs[\"count\"], 1)\n self.assertEqual(len(programs[\"values\"]), programs[\"count\"])\n self.assertEqual(programs[\"values\"][0][\"title\"], title)", "title": "" }, { "docid": "2d420aa68ebd46ea648f55abe84a6478", "score": "0.49742445", "text": "def similarity(self, record1, record2):\n pass", "title": "" }, { "docid": "fc78340782bb3ed17ffa31cf7ce428d3", "score": "0.49641874", "text": "def test_equality(self):\n brs = axelrod.ResultSetFromFile(self.filename, progress_bar=False)\n rs = axelrod.ResultSet(self.players, self.interactions, progress_bar=False)\n self.assertEqual(rs, brs)", "title": "" }, { "docid": "9a6eb642a0cc072a3cd9947d90057f0f", "score": "0.49568665", "text": "def __contains__(self,arg):\r\n queryresult=self.query_by_record_object(arg)\r\n if not queryresult: return False\r\n else: return True", "title": "" }, { "docid": "2a152024b5f45876d92a90e5ab501529", "score": "0.49516338", "text": "def where(self, v):", "title": "" }, { "docid": "8a3863284774f480a8e81ed9ef507bf8", "score": "0.49495605", "text": "def check_match_param_relaxed(query_upper_j,query_lower_j,query_lower_parity,query_upper_parity,query_frequency,query_lower_QN,\n query_upper_QN,dataset):\n\n counter = 0\n for row in dataset:\n #use hitran functions here to obtain parameters\n ref_lower_j, ref_upper_j = get_J_value_hitran(row)\n ref_lower_parity = get_lower_parity_hitran(row)\n ref_upper_parity = get_upper_parity_hitran(ref_lower_parity,row)\n ref_frequency = get_freq_hitran(row)\n ref_upper_qn, ref_lower_qn = get_quantum_number_n_hitran(row)\n if ref_lower_j==query_lower_j and ref_upper_j==query_upper_j:\n if ref_lower_parity == query_lower_parity and ref_upper_parity == query_upper_parity:\n if check_frequency_oneDp(ref_frequency,query_frequency) == True:\n if ref_upper_qn == query_upper_QN and ref_lower_qn==query_lower_QN:\n return row, counter\n counter = counter + 1\n\n return False, counter", "title": "" }, { "docid": "5c1e12d2ef0ce433476fadff492ad20b", "score": "0.49479973", "text": "def compare():\n _compare_result()\n _compare_time()", "title": "" }, { "docid": "7bdee9ac3927d73f6d10cbff878a0048", "score": "0.49479", "text": "def compare(self, a,b):\n\t\tpass", "title": "" }, { "docid": "7bdee9ac3927d73f6d10cbff878a0048", "score": "0.49479", "text": "def compare(self, a,b):\n\t\tpass", "title": "" }, { "docid": "878bf67c00f3d17601fea9e7fcbc4a26", "score": "0.49434602", "text": "def comp_eq(a, b):\n return a.getValue() == b.getValue() \\\n and a.getPartName() == b.getPartName() \\\n and a.getFootprint() == b.getFootprint() \\\n and a.getField(\"Tolerance\") == b.getField(\"Tolerance\") \\\n and a.getField(\"Voltage\") == b.getField(\"Voltage\")", "title": "" }, { "docid": "afec74d3a9b42fe1d864f274988f10ce", "score": "0.4940713", "text": "def where(condition, a, b): # pragma: no cover", "title": "" }, { "docid": "9f63d286912258737cfd91682aa9b2a6", "score": "0.4937024", "text": "def test_multiplePlaceholderComparisons(self):\n s = Store()\n p1 = Placeholder(PlaceholderTestItem)\n p2 = Placeholder(PlaceholderTestItem)\n\n query = ItemQuery(\n s,\n PlaceholderTestItem,\n AND(PlaceholderTestItem.attr == p1.attr,\n PlaceholderTestItem.other == p1.other,\n PlaceholderTestItem.attr == p2.attr,\n PlaceholderTestItem.characters == p2.characters))\n sql, args = query._sqlAndArgs('SELECT', '*')\n self.assertEquals(\n sql,\n 'SELECT * '\n 'FROM %s, %s AS placeholder_0, %s AS placeholder_1 '\n 'WHERE ((%s = placeholder_0.[attr]) AND '\n '(%s = placeholder_0.[other]) AND '\n '(%s = placeholder_1.[attr]) AND '\n '(%s = placeholder_1.[characters]))' % (\n PlaceholderTestItem.getTableName(s),\n PlaceholderTestItem.getTableName(s),\n PlaceholderTestItem.getTableName(s),\n PlaceholderTestItem.attr.getColumnName(s),\n PlaceholderTestItem.other.getColumnName(s),\n PlaceholderTestItem.attr.getColumnName(s),\n PlaceholderTestItem.characters.getColumnName(s)))\n self.assertEquals(args, [])", "title": "" }, { "docid": "a22300bb18c9849365828dd16e38c3cb", "score": "0.49365568", "text": "def generate_query(unique_ids, unique_field_name, wrap_values_in_quotes=False):\n if unique_ids:\n if wrap_values_in_quotes:\n query = \"{} in (-1,{})\".format(unique_field_name, \",\".join(\"'{0}'\".format(w) for w in unique_ids))\n else:\n query = \"{} in (-1,{})\".format(unique_field_name, \",\".join(unique_ids))\n else:\n query = \"{} in (-1)\".format(unique_field_name)\n return query", "title": "" }, { "docid": "690a0beacb7890b0cb26e67c06054fa5", "score": "0.49347508", "text": "def test__filter_big(self):\n threshold1 = self.high / 2\n threshold2 = self.high / 4\n\n pyarrow_con1 = [self.pyarrow_table.col1 > threshold1]\n pandas_con1 = [self.pandas_table.col1 > threshold1]\n pyarrow_result1 = self.pyarrow_table.filter(pyarrow_con1).execute()\n pandas_result1 = self.pandas_table.filter(pandas_con1).execute()\n self.assertTrue(pyarrow_result1.to_pandas().equals(pandas_result1))\n\n pyarrow_con2 = [self.pyarrow_table.col1 < threshold1]\n pandas_con2 = [self.pandas_table.col1 < threshold1]\n pyarrow_result2 = self.pyarrow_table.filter(pyarrow_con2).execute()\n pandas_result2 = self.pandas_table.filter(pandas_con2).execute()\n self.assertTrue(pyarrow_result2.to_pandas().equals(pandas_result2))\n\n pyarrow_con3 = [(self.pyarrow_table.col1 < threshold1),\n (self.pyarrow_table.col1 != threshold2)]\n pandas_con3 = [(self.pandas_table.col1 < threshold1),\n (self.pandas_table.col1 != threshold2)]\n pyarrow_result3 = self.pyarrow_table.filter(pyarrow_con3).execute()\n pandas_result3 = self.pandas_table.filter(pandas_con3).execute()\n self.assertTrue(pyarrow_result3.to_pandas().equals(pandas_result3))\n\n pyarrow_con4 = [(self.pyarrow_table.col1 > threshold1)\n & (self.pyarrow_table.col2 <= threshold2)]\n pandas_con4 = [(self.pandas_table.col1 > threshold1)\n & (self.pandas_table.col2 <= threshold2)]\n pyarrow_result4 = self.pyarrow_table.filter(pyarrow_con4).execute()\n pandas_result4 = self.pandas_table.filter(pandas_con4).execute()\n self.assertTrue(pyarrow_result4.to_pandas().equals(pandas_result4))\n\n pyarrow_con5 = [(self.pyarrow_table.col1 > threshold1)\n | (self.pyarrow_table.col1 < threshold2)]\n pandas_con5 = [(self.pandas_table.col1 > threshold1)\n | (self.pandas_table.col1 < threshold2)]\n pyarrow_result5 = self.pyarrow_table.filter(pyarrow_con5).execute()\n pandas_result5 = self.pandas_table.filter(pandas_con5).execute()\n self.assertTrue(pyarrow_result5.to_pandas().equals(pandas_result5))", "title": "" }, { "docid": "5ad05514ce138afaf3b74d3fb6244e8b", "score": "0.49270868", "text": "def assertQueryEqual(self, qs, sql):\n result = self._clean_sql(str(qs.query))\n expected = self._clean_sql(sql)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "b519fb366f3c0c7f98cbfad884481793", "score": "0.4926189", "text": "def isin(self, \n\t\t\t val: dict):\n\t\tcheck_types([(\"val\", val, [dict], False)])\n\t\tcolumns_check([elem for elem in val], self)\n\t\tn = len(val[list(val.keys())[0]])\n\t\tisin = []\n\t\tfor i in range(n):\n\t\t\ttmp_query = []\n\t\t\tfor column in val:\n\t\t\t\tif (val[column][i] == None):\n\t\t\t\t\ttmp_query += [str_column(column) + \" IS NULL\"]\n\t\t\t\telse:\n\t\t\t\t\ttmp_query += [str_column(column) + \" = '{}'\".format(str(val[column][i]).replace(\"'\", \"''\"))]\n\t\t\tquery = \"SELECT * FROM {} WHERE {} LIMIT 1\".format(self.__genSQL__(), \" AND \".join(tmp_query))\n\t\t\tself._VERTICAPY_VARIABLES_[\"cursor\"].execute(query)\n\t\t\tisin += [self._VERTICAPY_VARIABLES_[\"cursor\"].fetchone() != None] \n\t\treturn (isin)", "title": "" }, { "docid": "1f455dee7fdd6ece3822aeea1f47f07a", "score": "0.49227047", "text": "def check_result(self, query, expected, test_logical=False,\n skip_json=False, output=\"OUTPUT\",\n algebra=MyriaLeftDeepTreeAlgebra):\n actual = self.execute_query(query, test_logical=test_logical,\n skip_json=skip_json, output=output,\n algebra=algebra)\n self.assertEquals(actual, expected)", "title": "" }, { "docid": "1d5fb7eff81b42b1e7033df512493c95", "score": "0.49226305", "text": "def __lt__(self, other):\n return self._query < other._query", "title": "" }, { "docid": "49149b1275a2e3aeb20143a7f345023b", "score": "0.49137047", "text": "def eq_scan(results, cursor, elements):\n eq_(results[0], cursor)\n eq_(results[1], elements)", "title": "" }, { "docid": "35cdb9fb1b580cc2fcc1ebe0be0830c5", "score": "0.49106658", "text": "def __eq__(self, mtrx):\n return self.rows == mtrx.rows", "title": "" }, { "docid": "d4ed362a574462feb191e32184caf9da", "score": "0.490949", "text": "def compare_system_and_attributes_database(self):\n\n # compare - systems\n self.assertTrue(System.objects.filter(system_name='system_csv_08_001').exists())\n self.assertTrue(System.objects.filter(system_name='system_csv_08_002').exists())\n self.assertTrue(System.objects.filter(system_name='system_csv_08_003').exists())\n self.assertTrue(System.objects.filter(system_name='system_csv_08_004').exists())\n # compare - relations\n self.assertTrue(System.objects.get(system_name='system_csv_08_001').case.filter(case_name='case_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_002').case.filter(case_name='case_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_003').case.filter(case_name='case_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_004').case.filter(case_name='case_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_001').company.filter(company_name='company_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_002').company.filter(company_name='company_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_003').company.filter(company_name='company_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_004').company.filter(company_name='company_db_1').exists())\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').dnsname, Dnsname.objects.get(dnsname_name='dnsname_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').dnsname, Dnsname.objects.get(dnsname_name='dnsname_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').dnsname, Dnsname.objects.get(dnsname_name='dnsname_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').dnsname, Dnsname.objects.get(dnsname_name='dnsname_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').domain, Domain.objects.get(domain_name='domain_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').domain, Domain.objects.get(domain_name='domain_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').domain, Domain.objects.get(domain_name='domain_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').domain, Domain.objects.get(domain_name='domain_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').location, Location.objects.get(location_name='location_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').location, Location.objects.get(location_name='location_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').location, Location.objects.get(location_name='location_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').location, Location.objects.get(location_name='location_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').os, Os.objects.get(os_name='os_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').os, Os.objects.get(os_name='os_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').os, Os.objects.get(os_name='os_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').os, Os.objects.get(os_name='os_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').reason, Reason.objects.get(reason_name='reason_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').reason, Reason.objects.get(reason_name='reason_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').reason, Reason.objects.get(reason_name='reason_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').reason, Reason.objects.get(reason_name='reason_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').recommendation, Recommendation.objects.get(recommendation_name='recommendation_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').recommendation, Recommendation.objects.get(recommendation_name='recommendation_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').recommendation, Recommendation.objects.get(recommendation_name='recommendation_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').recommendation, Recommendation.objects.get(recommendation_name='recommendation_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').serviceprovider, Serviceprovider.objects.get(serviceprovider_name='serviceprovider_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').serviceprovider, Serviceprovider.objects.get(serviceprovider_name='serviceprovider_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').serviceprovider, Serviceprovider.objects.get(serviceprovider_name='serviceprovider_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').serviceprovider, Serviceprovider.objects.get(serviceprovider_name='serviceprovider_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_001').systemtype, Systemtype.objects.get(systemtype_name='systemtype_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_002').systemtype, Systemtype.objects.get(systemtype_name='systemtype_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_003').systemtype, Systemtype.objects.get(systemtype_name='systemtype_db_1'))\n self.assertEqual(System.objects.get(system_name='system_csv_08_004').systemtype, Systemtype.objects.get(systemtype_name='systemtype_db_1'))\n self.assertTrue(System.objects.get(system_name='system_csv_08_001').tag.filter(tag_name='tag_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_002').tag.filter(tag_name='tag_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_003').tag.filter(tag_name='tag_db_1').exists())\n self.assertTrue(System.objects.get(system_name='system_csv_08_004').tag.filter(tag_name='tag_db_1').exists())\n\n # return to test function\n return self", "title": "" }, { "docid": "5b1cd0a873a168342e84c452f4dade66", "score": "0.4907669", "text": "def make_query(query):", "title": "" }, { "docid": "0f931062fe12fd1d87d8d66d54d3fca7", "score": "0.49059194", "text": "def check_query_result(expect, _session, _sql):\n session_data_set = _session.execute_query_statement(_sql)\n session_data_set.set_fetch_size(1)\n idx = 0\n while session_data_set.has_next():\n line = session_data_set.next()\n assert (\n str(line) == expect[idx]\n ), f\"line {idx}: actual {str(line)} != expect ({expect[idx]})\"\n idx += 1\n assert idx == len(expect), f\"result rows: actual ({idx}) != expect ({len(expect)})\"\n session_data_set.close_operation_handle()", "title": "" }, { "docid": "b1b16e16473964b2aa05381296ec628f", "score": "0.49029794", "text": "def sort_items(self, query, comparisons):\r\n\r\n c = self.cursor\r\n c.execute(query, comparisons)\r\n return c.fetchall() #returns a list ([] if no matches)\r", "title": "" }, { "docid": "2f4051aa7d7d205e1d68d304a216884a", "score": "0.48946694", "text": "def cql(query):\n return connection.execute(query)", "title": "" }, { "docid": "440651ca97145aee1bc5fb168390e9d5", "score": "0.488791", "text": "def select(self, individuals, not_same_as=None):", "title": "" }, { "docid": "7345c29374db3e4df11d5018976d49e6", "score": "0.48744667", "text": "def test_basic_query_lt(self):\n date = datetime(2015, 5, 18)\n programs = self._get_first_result_set(\n self._make_query_dict(\"Program\",\n expression=[\"effective date\", \"<\",\n date.strftime(DATE_FORMAT_REQUEST)]),\n \"Program\",\n )\n\n self.assertEqual(programs[\"count\"], 9)\n self.assertEqual(len(programs[\"values\"]), programs[\"count\"])\n self.assertTrue(\n all(datetime.strptime(program[\"start_date\"],\n DATE_FORMAT_RESPONSE) < date\n for program in programs[\"values\"]),\n )", "title": "" }, { "docid": "72b6d92d16ce32fa83d9e209ab86d35b", "score": "0.48717645", "text": "def get_identity_comparable(self, connection) -> Tuple:\n # Create other in a dummy schema\n cls = self.__class__\n adj_self = cls(\"alembic_utils\", self.signature, self.definition)\n identity_query = adj_self.get_compare_identity_query()\n with simulate_entity(connection, adj_self):\n # Collect the definition_comparable for dummy schema self\n row = (self.schema,) + tuple(connection.execute(identity_query).fetchone())\n return row", "title": "" }, { "docid": "cc008d830250c4e75e215a4f820969bc", "score": "0.48716184", "text": "def indexed_with_eq_test(self):\r\n cursor = self.prepare()\r\n\r\n # Create\r\n cursor.execute(\"\"\"\r\n CREATE TABLE users (\r\n userid uuid PRIMARY KEY,\r\n firstname text,\r\n lastname text,\r\n age int\r\n );\r\n \"\"\")\r\n\r\n cursor.execute(\"CREATE INDEX byAge ON users(age)\")\r\n\r\n # Inserts\r\n cursor.execute(\"INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)\")\r\n cursor.execute(\"UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479\")\r\n\r\n # Queries\r\n cursor.execute(\"SELECT firstname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND age = 33\")\r\n res = cursor.fetchall()\r\n assert res == [], res\r\n\r\n cursor.execute(\"SELECT firstname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND age = 33\")\r\n res = cursor.fetchall()\r\n assert res == [[ 'Samwise' ]], res", "title": "" }, { "docid": "b1522514f7d736fb106aec893c0a3d14", "score": "0.48715279", "text": "def match(self, example):\n val = example[self.col]\n if(is_numeric(val)):\n return val>=self.val\n else:\n return val == self.val", "title": "" }, { "docid": "c6d6820a2e9e59262ef1402b0e922a81", "score": "0.48711112", "text": "def test_cmp(self):\n self.assertLess(DbRef(\"abc\"), DbRef(\"xyz\"))\n self.assertEqual(DbRef(\"abc\"), DbRef(\"abc\"))\n self.assertGreater(DbRef(\"123\"), DbRef(\"14\"))\n self.assertLess(DbRef(\"123\"), DbRef(\"abc\"))\n # check that it ignores other attributes\n self.assertEqual(DbRef(\"x\", \"y\", \"z\", \"a\", \"b\"), DbRef(\"x\"))", "title": "" }, { "docid": "3afb46f587477de582cd419985b01f70", "score": "0.4867387", "text": "def selectge(table, field, value, complement=False):\n\n value = Comparable(value)\n return selectop(table, field, value, operator.ge, complement=complement)", "title": "" }, { "docid": "63662e18bce4566dd2e3af8c907db1d3", "score": "0.48666567", "text": "def issubquery(self):\n pass", "title": "" }, { "docid": "e526116f2e75599dcd930e6df64abd6e", "score": "0.4863575", "text": "def test__raw_sql(self):\n self.assertEqual(\n TestModel.objects._raw_sql(((1, 2), (3, 4))),\n 'SELECT CASE tests_testmodel.\"id\" WHEN 1 THEN 2 WHEN 3 THEN 4 ELSE 0 END',\n )", "title": "" } ]
3001344b6fc182cc3564ae5a8062821a
Add an OpenAPI extension for marshmallow_enum.EnumField instances
[ { "docid": "9514b932c402f6c2f21dc187ca92ba11", "score": "0.67637134", "text": "def enum_to_properties(self, field, **kwargs):\n import marshmallow_enum\n\n if isinstance(field, marshmallow_enum.EnumField):\n return {\"type\": \"string\", \"enum\": [m.name for m in field.enum]}\n return {}", "title": "" } ]
[ { "docid": "381e3a39199513c60cbc001503cc590d", "score": "0.6585628", "text": "def validate_enum(validator, enums, instance, schema):\n\n if isinstance(instance, tagged.Tagged):\n instance = instance.base\n\n yield from mvalidators.Draft4Validator.VALIDATORS[\"enum\"](validator, enums, instance, schema)", "title": "" }, { "docid": "2817f4dacd1a277da85a79bdc48cba4f", "score": "0.61875755", "text": "def add_enum(self, key: str, name: str, **kwargs):\n args = {\n 'name': name,\n }\n\n args = {**kwargs, **args}\n self.props[key] = EnumProperty(**args)\n self.meta[key] = ('enum', name, kwargs.get('description', ''))", "title": "" }, { "docid": "39ad3157034350be8fd2fb096200240c", "score": "0.6026432", "text": "def _provide_enum_element_for_function(self, enum_name, element_name):\n if enum_name not in self._types:\n raise ParseError(\"Enum '\" + enum_name +\n \"' is not initialized\")\n\n enum = self._types[enum_name]\n\n if not isinstance(enum, Model.Enum):\n raise ParseError(\"'\" + enum_name + \"' is not an enum\")\n\n if element_name not in enum.elements:\n enum.elements[element_name] = Model.EnumElement(name=element_name)\n\n return enum.elements[element_name]", "title": "" }, { "docid": "1dde79117ac550ed3585ccc53c40839a", "score": "0.59806657", "text": "def test_working_on_enum(self):\n pass", "title": "" }, { "docid": "8352033472da8b4c51cae89b2614333b", "score": "0.59471005", "text": "def Enum(**enums):\n enums[\"items\"] = enums.items()\n return type(\"Enum\", (), enums)", "title": "" }, { "docid": "723c918d8b0dec7e20fb5e59a2f75b8e", "score": "0.5946185", "text": "def enum(**enums):\n return type('Enum', (), enums)", "title": "" }, { "docid": "723c918d8b0dec7e20fb5e59a2f75b8e", "score": "0.5946185", "text": "def enum(**enums):\n return type('Enum', (), enums)", "title": "" }, { "docid": "723c918d8b0dec7e20fb5e59a2f75b8e", "score": "0.5946185", "text": "def enum(**enums):\n return type('Enum', (), enums)", "title": "" }, { "docid": "0a2d194b90198e6cb1d3b43dcabbf54c", "score": "0.5785567", "text": "def _get_enum_fields(self) -> [Field]:\n return list(\n filter(\n lambda field: \"enum\" in field.constraints.keys()\n or \"enum_mapping\" in field.descriptor.keys(),\n self.table_schema.fields,\n )\n )", "title": "" }, { "docid": "4f9e1bb4cece27d2879643aab22ddfb9", "score": "0.5772378", "text": "def enum(api, model, obj, **kwargs):\n res = get_list(api, model, obj)\n print_json(res)\n return res", "title": "" }, { "docid": "e17bf08572b51957bfaf4c74bb4ffdd3", "score": "0.5709799", "text": "def Enum(**enums):\n obj = type('Enum', (), enums)\n obj.named_value = dict([(a, v) for a, v in vars(obj).items() if not a.startswith('__')])\n obj.value_named = dict([(v, a) for a, v in obj.named_value.items()])\n return obj", "title": "" }, { "docid": "e3cfb7c6434ba025abfb17ea8d8905cd", "score": "0.570771", "text": "def enum(**enums):\n enums['reverse'] = dict((value, key) for key, value in six.iteritems(enums))\n return type('Enum', (), enums)", "title": "" }, { "docid": "5b310a247b474a5cd48a6100bceba4a4", "score": "0.5703061", "text": "def _enum(self, ln, ast, pb, ns=None):\n assert isinstance(pb, ast_pb2.Decl), repr(pb)\n pb.line_number = ln\n pb.decltype = pb.ENUM\n e = pb.enum\n _set_name(e.name, ast.name, ns)\n pyname = e.name.native\n self.check_known_name(pyname)\n self._typetable.set_type(pyname, e.name.cpp_name)\n if len(ast) > 3:\n for rename in ast[-1]:\n _set_name(e.members.add(), rename)\n return pyname", "title": "" }, { "docid": "dd3a17e46bb5409ea41b1b3481fb1a55", "score": "0.56426984", "text": "def newEnumValue(self, **attrlinks):\n return EnumValue(self, **attrlinks)", "title": "" }, { "docid": "40a645c02b484c1030591f405a96478a", "score": "0.5642141", "text": "def bitfield_add(field, enum):\n field |= 2 ** int(enum)\n return field", "title": "" }, { "docid": "2eb11f121dcee699619fe609f13e4e73", "score": "0.56171185", "text": "def create_enums_pre_validator(enum_cls: type[enum.Enum]):\n\n def _validator(value: Any):\n if value and not isinstance(value, enum_cls) and isinstance(value, enum.Enum):\n return value.value\n return value\n\n return _validator", "title": "" }, { "docid": "e9661821ab327dd1983d4b7dd7ae5e32", "score": "0.554629", "text": "def __init__(self, enum_type, **kwargs):\n # default is the first value\n default = next(iter(enum_type))\n super().__init__(type_=enum_type, default=default, **kwargs)\n self.enum_type = enum_type", "title": "" }, { "docid": "d53610021a3822febc3efbb85ec1c3e1", "score": "0.5517064", "text": "def visit_enum_definition(self, node: \"ASTEnumDefinitionNode\"):\n return self.visit_children(node)", "title": "" }, { "docid": "81b4a7025aeec73e6c6f00cadb1d7f1f", "score": "0.54862934", "text": "def read_enum(self):\n for enum_type in self.obj_info[ENUMS]:\n enum_group = self.obj_info[ENUMS][enum_type]\n for enum, value in enum_group.items():\n self.enums[enum] = value #add to enum dictionary", "title": "" }, { "docid": "3a1422372fbd661c4dddffbb26b184c4", "score": "0.54859954", "text": "def enum_hook(obj):\n if \"__enum__\" in obj:\n # object is marked as enum\n name, member = obj[\"__enum__\"].split(\".\")\n if name == \"StatusType\":\n return getattr(globals()[name], member)\n return obj", "title": "" }, { "docid": "c8d1e3c047f23c9838c293c12cf41ab2", "score": "0.548171", "text": "def def_enum(dct, name):\n return type(name, (Enum,), dct)", "title": "" }, { "docid": "2f84960479c658f213940409b5f7af1f", "score": "0.54636294", "text": "def generate_enum(self):\n with self.l('if {variable} not in {enum}:'):\n self.l('raise JsonSchemaException(\"{name} must be one of {enum}\")')", "title": "" }, { "docid": "6fa59f1b78c3437ad15abdd3e9eb5661", "score": "0.54318726", "text": "def registry(cls) -> 'DefaultDict[ET, Type[EnumSchema]]':\n return cls.__enum__", "title": "" }, { "docid": "7cf9c4468f81bafd049f987ac4cdd363", "score": "0.5420841", "text": "def add_enum(self, name, description, entry_strings, default=None):\n\n entry_strings = [str(e) for e in entry_strings] # Make sure we only get strings\n if default is None:\n default = 0\n else:\n default = entry_strings.index(default)\n self.add(name=name, paramtype=\"int\", description=description, edit_method=name, default=default,\n configurable=True)\n for e in entry_strings:\n self.add(name=name + \"_\" + e, paramtype=\"int\", description=\"Constant for enum {}\".format(name),\n default=entry_strings.index(e), constant=True)\n self.enums.append({'name': name, 'description': description, 'values': entry_strings})", "title": "" }, { "docid": "aab8764d752367c9fba99a063b6c49e1", "score": "0.5415502", "text": "def test_set_enum(self):\n obj = UselessThing(useless_enum=UselessEnum.enval1)\n DBSession.add(obj)\n DBSession.flush()", "title": "" }, { "docid": "a86271973a4fd77b78f3b5bb1ea63e8e", "score": "0.5411199", "text": "def contribute_to_class(self, cls, name, **kwargs): # type: ignore[no-untyped-def]\n super().contribute_to_class(cls, name, **kwargs)\n if \"constraints\" not in cls._meta.original_attrs:\n cls._meta.original_attrs[\"constraints\"] = []\n\n # Note that we cannot use the constraint name interpolation syntax\n # because it's too late at this point. It's the metaclass that actually\n # interpolates the values.\n #\n # Fortunately, we can create a name dynamically.\n cls._meta.constraints.append(\n EnumConstraint(\n members=self.enum._member_names_, # type: ignore[arg-type]\n field_name=name,\n name=f\"{cls._meta.db_table}_{name}_enum\",\n )\n )", "title": "" }, { "docid": "9ca0aad10227b7a98aa1092ff82cef85", "score": "0.5379797", "text": "def oalGetEnum(enum):\n al_enum = oalGetALEnum(enum)\n alc_enum = oalGetALCEnum(enum)\n\n return _format_enum(al_enums.get(enum, []) + alc_enums.get(enum, []))", "title": "" }, { "docid": "09e89c1aa4a67eefa70f32b5719e99f3", "score": "0.53778297", "text": "def parse_enum_options(enum):\n params = []\n for param in enum['values']:\n params.append({\n 'name': param['name'],\n 'value': param['number'],\n 'description': parse_description(param['description']),\n })\n return params", "title": "" }, { "docid": "c456759c52c535e61baa74fcb2cee104", "score": "0.5366475", "text": "def genenum(self,out,e,enumname,prefix,members,enumerable=True):\n pass", "title": "" }, { "docid": "ea1649e9cbc583937abf6bec0015c72e", "score": "0.5354881", "text": "def p_enum_type(self):\n c_result = self._eval_field(_Entity_c_type(), _enum_literal_decl_p_enum_type)\n result = AdaNode._wrap(c_result)\n return result", "title": "" }, { "docid": "a9eb7713d3c3d8435e2d3dcb02ad35a8", "score": "0.53365296", "text": "def enumify(TheModel, name_field='name', val_field='id'):\n fields = getattr(TheModel, name_field), getattr(TheModel, val_field)\n data = list((name.replace(' ', '_').lower(), v) for (name, v) in TheModel.select(*fields).tuples())\n return Enum(TheModel.__name__, data)", "title": "" }, { "docid": "cd5f36ed7caffd22a66e3c1196623a89", "score": "0.5326811", "text": "def _get_enums(self, bitfield: Any, excel_row: int, excel_row_cnt: int) -> None:\n if excel_row_cnt <= 1:\n # There is no enums\n return\n\n enumname_cr = cell_utils.coordinate_from_string(self.header_cells[\"Enum Name\"])\n desc_cr = cell_utils.coordinate_from_string(self.header_cells[\"Description\"])\n value_cr = cell_utils.coordinate_from_string(self.header_cells[\"Value\"])\n\n excel_row += 1\n excel_row_cnt -= 1\n\n for r in range(excel_row, excel_row + excel_row_cnt):\n cell = enumname_cr[0] + str(r)\n if isinstance(self.ws[cell].value, str):\n enum_name = self.ws[cell].value\n enum_descr = self.ws[desc_cr[0] + str(r)].value\n enum_value: str = self.ws[value_cr[0] + str(r)].value\n if enum_value is None:\n print(f\"Warning: The Enum {enum_name} is missing and it will be skipped.\")\n else:\n bitfield.add_enum(RegsEnum(enum_name, enum_value, enum_descr, bitfield.width))", "title": "" }, { "docid": "410580f4013f32cc832c97761bc4992f", "score": "0.5296576", "text": "def get_enum_type(self):\n\n return self", "title": "" }, { "docid": "ce94d1007d9cfe54058746d212875a10", "score": "0.5261371", "text": "def _parse_enum_element(self, element):\n params, subelements, attributes = self._parse_base_item(element, \"\")\n\n if len(subelements) != 0:\n raise ParseError(\"Unexpected subelements in enum element\")\n\n self._ignore_attribute(attributes, \"hexvalue\")\n self._ignore_attribute(attributes, \"scope\")\n self._ignore_attribute(attributes, \"rootscreen\")\n\n internal_name = None\n value = None\n for attribute in attributes:\n if attribute == \"internal_name\":\n internal_name = attributes[attribute]\n elif attribute == \"value\":\n try:\n value = int(attributes[attribute])\n except:\n raise ParseError(\"Invalid value for enum element: '\" +\n attributes[attribute] + \"'\")\n params[\"internal_name\"] = internal_name\n params[\"value\"] = value\n\n # Magic usage is correct\n # pylint: disable=W0142\n return Model.EnumElement(**params)", "title": "" }, { "docid": "56c3bfd7748f7040694d71423701427b", "score": "0.5248424", "text": "def build_standard_field(self, field_name, model_field):\n\n if isinstance(model_field, ModelEnumChoiceField):\n # These are kwargs, generated by `get_field_kwargs`\n # but are not needed for our field.\n # `model_field` is used only in children of DRF's `ModelField`\n # `choices` is not used because we use `field.enum_class` to validate the choice\n # `max_length` is generated from the model field's max_length and we don't use it\n dump_kwargs = ('model_field', 'choices', 'max_length', 'allow_blank')\n\n initial_kwargs = {\n 'enum_class': model_field.enum_class,\n 'choice_builder': model_field.choice_builder,\n **get_field_kwargs(field_name, model_field)\n }\n finalized_kwargs = {\n key: value for key, value in initial_kwargs.items()\n if key not in dump_kwargs\n }\n\n return EnumChoiceField, finalized_kwargs\n\n return super().build_standard_field(field_name, model_field)", "title": "" }, { "docid": "1855253807f3b755495b889223c22b00", "score": "0.524803", "text": "def register_enum(self, permission_enum):\n self.enums[permission_enum.__key__] = permission_enum", "title": "" }, { "docid": "f000e3ace8de558fdab9e986fbe534cc", "score": "0.521793", "text": "def format_enum_value(type_context, value):\n if protoxform_options.has_hide_option(value.options):\n return ''\n leading_comment, trailing_comment = format_type_context_comments(type_context)\n formatted_annotations = format_options(value.options)\n return '%s%s = %d%s;\\n%s' % (\n leading_comment, value.name, value.number, formatted_annotations, trailing_comment)", "title": "" }, { "docid": "f2a5ac0580f249728f8ca747ab8780ea", "score": "0.5191749", "text": "def enum(self) -> Tuple[str, ...]:\n return self.__enum", "title": "" }, { "docid": "f2a5ac0580f249728f8ca747ab8780ea", "score": "0.5191749", "text": "def enum(self) -> Tuple[str, ...]:\n return self.__enum", "title": "" }, { "docid": "f2a5ac0580f249728f8ca747ab8780ea", "score": "0.5191749", "text": "def enum(self) -> Tuple[str, ...]:\n return self.__enum", "title": "" }, { "docid": "f2a5ac0580f249728f8ca747ab8780ea", "score": "0.5191749", "text": "def enum(self) -> Tuple[str, ...]:\n return self.__enum", "title": "" }, { "docid": "f2a5ac0580f249728f8ca747ab8780ea", "score": "0.5191749", "text": "def enum(self) -> Tuple[str, ...]:\n return self.__enum", "title": "" }, { "docid": "96afdddded876c513e963eb9b3944196", "score": "0.5178464", "text": "def parse_enums(self, src):\n parser = header_parsing.ENUM_DECL\n for tokens, _, _ in parser.scanString(src):\n for enum in tokens:\n members = codegen_util.UniqueOrderedDict()\n value = 0\n for member in enum.members:\n # Leftward bitshift\n if member.bit_lshift_a:\n value = int(member.bit_lshift_a) << int(member.bit_lshift_b)\n # Assignment\n elif member.value:\n value = int(member.value)\n # Implicit count\n else:\n value += 1\n members.update({member.name: value})\n self.enums_dict.update({enum.name: members})", "title": "" }, { "docid": "768b5df24ac045daafa5e34b9d85eda8", "score": "0.51711327", "text": "def to_enum(enum_cls, *, type_name=None, **options) -> graphene.Enum:\n\n # note this won't work until\n # https://github.com/graphql-python/graphene/issues/956 is fixed\n deprecation_reason = getattr(enum_cls, \"__deprecation_reason__\", None)\n if deprecation_reason:\n options.setdefault(\"deprecation_reason\", deprecation_reason)\n\n type_name = type_name or (enum_cls.__name__ + \"Enum\")\n enum_data = [(str_to_enum(code.upper()), code) for code, name in enum_cls.CHOICES]\n return graphene.Enum(type_name, enum_data, **options)", "title": "" }, { "docid": "98e83d7589f04402ac90b2042e960422", "score": "0.51643735", "text": "def has_enum(self) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "d6586c7fa7d1b6dc2791dafbf0c3a837", "score": "0.51477224", "text": "def list(cls) -> List[ExtendedEnum]:\r\n return list(map(lambda c: c, cls))", "title": "" }, { "docid": "db3d4c18c3934db3b2807196c7409e67", "score": "0.5105745", "text": "def test_not_working_on_not_enum(self):\n pass", "title": "" }, { "docid": "60d7344707b5d23b952c056a1df18ca2", "score": "0.5081747", "text": "def _parse_enum(self, element, prefix):\n params, subelements, attributes = \\\n self._parse_base_item(element, prefix)\n\n internal_scope = None\n scope = None\n for attribute in attributes:\n if attribute == \"internal_scope\":\n internal_scope = attributes[attribute]\n elif attribute == \"scope\":\n scope = attributes[attribute]\n else:\n raise ParseError(\"Unexpected attribute '\" + attribute +\n \"' in enum '\" + params[\"name\"] + \"'\")\n params[\"internal_scope\"] = internal_scope\n params[\"scope\"] = scope\n\n elements = collections.OrderedDict()\n for subelement in subelements:\n if subelement.tag == \"element\":\n self._add_item(elements, self._parse_enum_element(subelement))\n else:\n raise ParseError(\"Unexpected element '\" + subelement.tag +\n \"' in enum '\" + params[\"name\"] + \"'\")\n params[\"elements\"] = elements\n\n # Magic usage is correct\n # pylint: disable=W0142\n return Model.Enum(**params)", "title": "" }, { "docid": "d83643d977fc3b0d0dba5c1f989d49b1", "score": "0.5078972", "text": "def __init__(self, *enums: object, **kw: Any):\n self._enum_init(enums, kw)", "title": "" }, { "docid": "c5a32016cf7ea78c00886e5e37a58c7c", "score": "0.5078058", "text": "def gen_version_enum(out):\n out.write(\"\"\"\n/**\n * Enumeration of OpenFlow versions\n *\n * The wire protocol numbers are currently used for values of the corresponding\n * version identifiers.\n */\ntypedef enum of_version_e {\n OF_VERSION_UNKNOWN = 0,\n\"\"\")\n\n is_first = True\n max = 0\n for v in of_g.wire_ver_map:\n if is_first:\n is_first = False\n else:\n out.write(\",\\n\")\n if v > max:\n max = v\n out.write(\" %s = %d\" % (of_g.wire_ver_map[v], v))\n\n out.write(\"\"\"\n} of_version_t;\n\n/**\n * @brief Use this when declaring arrays indexed by wire version\n */\n#define OF_VERSION_ARRAY_MAX %d\n\"\"\" % (max + 1))", "title": "" }, { "docid": "ddd2f5197b59a75f57e4eb2c191188e8", "score": "0.50622296", "text": "def _generic_enum_validator(self, value, concept_details, enum):\n errors = []\n if (value not in enum):\n errors.append(\"Value '{}' is not found in enum list for type '{}'.\"\n .format(value, concept_details.type_name))\n return value, errors", "title": "" }, { "docid": "871a839ff55e1f7b461973944d57a6b4", "score": "0.5032432", "text": "def enumeration_column(name, title,\n item_reference_attr=None, # parent item attribute, for enum \n enum_value_attr=None, # enum attribute, for desired value\n ):\n if enum_value_attr is None:\n # then assume that value-attr on enum is same as enum-attr on parent\n enum_value_attr = item_reference_attr\n assert item_reference_attr is not None\n assert enum_value_attr is not None\n def getter(item, formatter):\n enum_obj = getattr(item, item_reference_attr)\n enum_obj = translation.translate_obj(enum_obj)\n return getattr(enum_obj, enum_value_attr)\n return column.GetterColumn(title, getter)", "title": "" }, { "docid": "dff924c17ff317abdde04d3050f183c0", "score": "0.501087", "text": "def test_filter_enum_field_schema_type(schema):\n schema_str = str(schema)\n\n assert (\n '''type ArticleType implements Node {\n \"\"\"The ID of the object\"\"\"\n id: ID!\n headline: String!\n pubDate: Date!\n pubDateTime: DateTime!\n reporter: ReporterType!\n editor: ReporterType!\n\n \"\"\"Language\"\"\"\n lang: TestsArticleLangChoices!\n importance: TestsArticleImportanceChoices\n}'''\n in schema_str\n )\n\n filters = {\n \"offset\": \"Int\",\n \"before\": \"String\",\n \"after\": \"String\",\n \"first\": \"Int\",\n \"last\": \"Int\",\n \"lang\": \"TestsArticleLangChoices\",\n \"lang_In\": \"[TestsArticleLangChoices]\",\n \"reporter_AChoice\": \"TestsReporterAChoiceChoices\",\n \"reporter_AChoice_In\": \"[TestsReporterAChoiceChoices]\",\n }\n filters_str = \", \".join(\n [f\"{filter_field}: {gql_type}\" for filter_field, gql_type in filters.items()]\n )\n assert f\" allArticles({filters_str}): ArticleTypeConnection\\n\" in schema_str", "title": "" }, { "docid": "73295523b1e02abe2704e8d7461d1ac7", "score": "0.50021935", "text": "def getDefaultEnum(self) -> object:\n ...", "title": "" }, { "docid": "52e68b4155883ad8ce94deb17cb922f5", "score": "0.50010544", "text": "def enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)", "title": "" }, { "docid": "09b202dd0973e67c7972406f745c89bc", "score": "0.4999532", "text": "def enum_to_json(enum_obj) -> dict:\n # Populate the dictionary with object meta data\n json_obj = {\n \"__enum__\": str(enum_obj),\n \"__module__\": enum_obj.__module__\n }\n # Populate the dictionary with object properties\n return json_obj", "title": "" }, { "docid": "f99f99f4d0f9f239e57abf0a745fcc5c", "score": "0.49880967", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "f99f99f4d0f9f239e57abf0a745fcc5c", "score": "0.49880967", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "f99f99f4d0f9f239e57abf0a745fcc5c", "score": "0.49880967", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "f99f99f4d0f9f239e57abf0a745fcc5c", "score": "0.49880967", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "f99f99f4d0f9f239e57abf0a745fcc5c", "score": "0.49880967", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "581f169bae576638b0c9cfa3f9641bcc", "score": "0.4987897", "text": "def get_enum_definitions() -> list:\n # Get the file from GitHub\n file_content = get_github_file(\n \"internal_communication\",\n \"master\",\n \"code/headers/frame_enums.hpp\"\n )\n # Seperate file in seperate enum strings\n enum_strings = get_enum_strings(file_content)\n # Loop over all enum strings\n for enum_string in enum_strings:\n yield get_enum_definition(enum_string)", "title": "" }, { "docid": "e3192962e66abef4efeda17264c094ea", "score": "0.4975181", "text": "def __init__(self, name):\n super(EnumDeclaration, self).__init__()\n self.constants = collections.OrderedDict()\n self.name = name", "title": "" }, { "docid": "154c726d211e4ce882be9a7840d76293", "score": "0.4974158", "text": "def test_enums_documented(self):\n\n def error_msg(schema_name, value, enum):\n return (\n \"Could not find a level-3 header (###) entry for \"\n \"{}:'{}' in {}.md.\\n\\n\".format(schema_name, value, enum)\n )\n\n exceptions = [\n \"country_code\", # TL;DR\n \"currency_code\", # TL;DR\n \"nace_code\", # TL;DR\n \"interest_repayment_frequency\", # Same as repayment_frequency\n \"reporting_relationship\", # same as relationship\n ]\n for schema_name in SCHEMA_NAMES:\n for enum, values in schema_enum_registry(schema_name).items():\n if enum in exceptions:\n continue\n\n property_docs = property_doc_name(enum)\n\n if not property_docs:\n raise FileNotFoundError(\n f\"Could not find documentation for: {schema_name} :: {enum}\"\n ) # noqa\n\n _file = os.path.join(DOCS_DIR, property_docs)\n\n for v in values:\n with open(_file) as enum_doc:\n assert \"### {}\".format(v) in enum_doc.read(), error_msg(\n schema_name, v, enum\n ) # noqa", "title": "" }, { "docid": "0c070fbde954b8d554b7177733864839", "score": "0.49677312", "text": "def __repr__(self):\n\n return \"EnumValue: %s(name=%s, value=%d)\" % (self.__class__.__name__, self.name, self.value)", "title": "" }, { "docid": "d6e2ea6d52209b6251c490907f00ae72", "score": "0.496337", "text": "def load_enum(update_def):\n enum = {}\n for path in update_def['column_defs'].values():\n for step in path:\n if 'object' in step and 'enum' in step['object']:\n enum_name = step['object']['enum']\n if enum_name not in enum:\n enum[enum_name] = {}\n enum[enum_name]['get'] = {}\n enum[enum_name]['update'] = {}\n enum_data = read_csv(enum_name, delimiter='\\t')\n for enum_datum in enum_data.values():\n try:\n enum[enum_name]['get'][enum_datum['vivo']] = enum_datum['short']\n except KeyError:\n logger.error(\n u\"Enumeration {} does not have required columns named short and vivo\".format(enum_name))\n raise KeyError\n enum[enum_name]['update'][enum_datum['short']] = enum_datum['vivo']\n return enum", "title": "" }, { "docid": "17a502fc5a16b3fa12af5f50750bb4e7", "score": "0.495965", "text": "def enum_type(self):\n raise Exception(\"Subclass responsibility\")", "title": "" }, { "docid": "bf8eb162621e7e22c5347d0fb98a6b90", "score": "0.49551547", "text": "def test_enum_def(f2003_create):\n code = \"ENUM, BIND(C)\\n\" \" ENUMERATOR :: a = 1\\n\" \"END ENUM\"\n reader = get_reader(code)\n result = Declaration_Construct(reader)\n assert str(result) == code\n assert \"Enum_Def\" in repr(result)", "title": "" }, { "docid": "4ebc0e664aaa170e92b06e59ee5f13ae", "score": "0.49535888", "text": "def isenumvalue(obj):\n\n return hasattr(obj, \"_enumtype\") and obj._enumtype == _ENUM_VALUE", "title": "" }, { "docid": "dbc6e11fdfe293f192366d8694f86a42", "score": "0.49494675", "text": "def _initialize_enums(self):\n return collections.OrderedDict(\n [(\"FunctionID\", Model.Enum(name=\"FunctionID\")),\n (\"messageType\", Model.Enum(name=\"messageType\"))])", "title": "" }, { "docid": "af0edae26415469b0d670fd5c9ec3ad2", "score": "0.49451044", "text": "def _get_enum(_type):\n\tenums = ['analysis_file_types','analysis_types','case_control','dataset_types','experiment_types','file_types','genders','instrument_models','library_selections','library_sources','library_strategies',\n\t'reference_chromosomes','reference_genomes','study_types']\n\t\n\tif not _type in enums:\n\t\traise ValueError(\"Invalid enum: \"+', '.join(enums))\n\n\treturn _result_from_response(requests.get(_api_access_endpoint('/enums/'+_type)))", "title": "" }, { "docid": "ebfeeffc56537b3bf4e257b35ff1f76c", "score": "0.49436685", "text": "def from_value(cls, value):\n return cls.enum(value)", "title": "" }, { "docid": "b5ff9cffa1107c53847feb36771c6d65", "score": "0.494362", "text": "def __reduce__(self):\n\n d = OrderedDict(zip(self._fields, self))\n e = d[self._fields[0]]\n for k, v in d.items():\n d[k] = v.value\n return (_enum_factory, (d, e.value, self._name))", "title": "" }, { "docid": "b9f3dd36de1ef7f94041bbab687de45e", "score": "0.49235365", "text": "def _generate_struct_union_enum(self, n, name):\n if name in ('struct', 'union'):\n members = n.decls\n body_function = self._generate_struct_union_body\n else:\n assert name == 'enum'\n members = None if n.values is None else n.values.enumerators\n body_function = self._generate_enum_body\n s = name + ' ' + (n.name or '')\n if members is not None:\n # None means no members\n # Empty sequence means an empty list of members\n s += '\\n'\n s += self._make_indent()\n self.indent_level += 2\n s += '{\\n'\n s += body_function(members)\n self.indent_level -= 2\n s += self._make_indent() + '}'\n return s", "title": "" }, { "docid": "0a718e793b5ca4b5bc9770f4b45f2c6f", "score": "0.49148607", "text": "def _repr_enum(dumper: yaml.Dumper, data: Enum):\n return dumper.represent_str(data.value)", "title": "" }, { "docid": "8ae62322529bd418c20edd5ac8192f81", "score": "0.49033588", "text": "def test_set_enum_from_string(self):\n obj = UselessThing(useless_enum=\"enval2\")\n DBSession.add(obj)\n DBSession.flush()", "title": "" }, { "docid": "38cbe32530759c10fbf734c9f4749f20", "score": "0.48943394", "text": "def pretty_name(self):\n return 'enum_{}'.format(self.name)", "title": "" }, { "docid": "7f560e0b082e07a493de4975fb53c1fe", "score": "0.4893187", "text": "def rome_enum(name, mapping):\n if hasattr(mapping, 'items'):\n values = { k: v for k,v in mapping.items() }\n else:\n values = { k: i for i,k in enumerate(mapping) }\n # reuse already created type if possible\n if name in types:\n typ = types[name]\n if typ.values != values:\n raise ValueError(\"duplicate enum type with different values: %s\" % name)\n return types[name]\n fields = {\n 'name': name,\n 'values': values,\n 'keys': { v: k for k,v in values.items() }\n }\n return type(name, (EnumType,), fields)", "title": "" }, { "docid": "ebbc2bc84962a18141529830f33eb8f8", "score": "0.48767275", "text": "def type_enum(self) -> TxnExpr:\n return self.makeTxnExpr(TxnField.type_enum)", "title": "" }, { "docid": "936e9280ed1179647c91fc94813cfc99", "score": "0.48750526", "text": "def enum(*values, **kwargs):\n \n if ('name' not in kwargs) or kwargs['name'] is None:\n # Create a probably-unique name. It doesn't really have to be\n # unique, but getting distinct names each time helps with\n # identification in debugging.\n name = 'Enumeration' + hex(random.randint(0,0xfffffff))[2:].upper()\n else:\n name = kwargs['name']\n \n if len(values) == 1:\n # If there's only one value, we have a couple of alternate calling\n # styles.\n if isinstance(values[0], basestring) or hasattr(values[0], '__iter__'):\n values = values[0]\n \n return type(name, (Enumerated,), {'values': values})", "title": "" }, { "docid": "18b5e2d0ac1a2b0e48d21cfd66f6cc3c", "score": "0.48623276", "text": "def accept(self, visitor):\n visitor.visitEnumeration(self)", "title": "" }, { "docid": "ad7cc01c47502d452a517b30c3a42dd5", "score": "0.48616537", "text": "def _get_enum_name(self):\n # check if the debug info is set to get the enum names from\n # there.\n ret_string = \"\"\n if self.debuginfo is not None:\n enum_value_name = \"\"\n error_msg = \"\"\n try:\n # get the enum type name which is after the \"enum \" word\n # 1- is used as an indexer to avoid index error which\n # in this case is favourable\n enum_type_name = self.type_name.split(\"enum \")[-1]\n enum_value_name = self.debuginfo.get_enum(\n enum_type_name,\n self.value\n )\n except InvalidDebuginfoEnum: # The enum type is missing\n # This can happen if an enum type is typdefed. Something like:\n # typedef enum_type new_enum_type; try to dereference\n try:\n # get the enum type\n enum_type_id = self.debuginfo.get_type_info(\n enum_type_name\n )[1]\n # get the referenced type from the enum type.\n enum_type = self.debuginfo.types[enum_type_id]\n enum_type = self.debuginfo.types[enum_type.ref_type_id]\n enum_type_name = enum_type.name\n # Finally, get the enum value name.\n enum_value_name = self.debuginfo.get_enum(\n enum_type.name,\n self.value\n )\n except (InvalidDebuginfoType, InvalidDebuginfoEnum):\n error_msg += (\n \"(enum type \\\"\" + enum_type_name +\n \"\\\" not found for \\\"\" + self.base_name + \"\\\" member)\"\n )\n except KeyError: # the enum is missing the values\n error_msg += (\n \"(enum \\\"\" + enum_type_name +\n \"\\\" has no value \" + cu.hex(self.value) + \")\"\n )\n except KeyError: # the enum is missing the values\n error_msg += (\n \"(enum \\\"\" + enum_type_name +\n \"\\\" has no value \" + cu.hex(self.value) + \")\"\n )\n\n if enum_value_name == \"\":\n enum_value_name = cu.hex(self.value) + \" \" + error_msg\n else:\n if len(enum_value_name) > 1:\n # There are multiple matches for the values. Display all\n # of them one after the other.\n temp_name = \"\"\n for value_name in enum_value_name:\n if temp_name == \"\":\n temp_name += \"( \"\n elif value_name == enum_value_name[-1]:\n temp_name += \" and \"\n else:\n temp_name += \", \"\n temp_name += value_name\n temp_name += \" have value \" + cu.hex(self.value)\n temp_name += \" in \" + enum_type_name + \")\"\n enum_value_name = cu.hex(self.value) + \" \" + temp_name\n else:\n # get_enum panics if there are no matches so we are\n # sure that enum_value_name has at lest one element.\n temp_name = enum_value_name[0]\n enum_value_name = temp_name + \" \" + cu.hex(self.value)\n # concatenate the return string.\n ret_string += self.base_name + \": \" + enum_value_name + \"\\n\"\n elif self.base_name != \"\":\n # Just display the base name and value.\n ret_string += self.base_name + \": \" + cu.hex(self.value) + \"\\n\"\n else:\n # Display the value and name.\n ret_string += self.name + \": \" + cu.hex(self.value) + \"\\n\"\n return ret_string", "title": "" }, { "docid": "75e6d257380eb88c46d86432d62503e2", "score": "0.48559955", "text": "def type(self, type):\n allowed_values = [\"PUBLIC\", \"PRIVATE\"]\n if not value_allowed_none_or_none_sentinel(type, allowed_values):\n type = 'UNKNOWN_ENUM_VALUE'\n self._type = type", "title": "" }, { "docid": "a58f72776ab1c430f3861ffc244d867d", "score": "0.48375952", "text": "def get_enum_type(self):\n\n return None", "title": "" }, { "docid": "caf1170454622e9ad05834e68e2af1bf", "score": "0.4810078", "text": "def get(cls, enum):\n return cls.enum(enum)", "title": "" }, { "docid": "91237d7cbbe3a1fc61469d89bb73144c", "score": "0.4809424", "text": "def _initialize_enums():\n return OrderedDict()", "title": "" }, { "docid": "2cba9c9359d23c08e64219cca963e09a", "score": "0.4803536", "text": "def freeze_enum_props(cls: Type[EnumT]) -> Type[EnumT]:\n ns = vars(cls)\n for name, value in list(ns.items()):\n if not isinstance(value, property) or value.fset is not None or value.fdel is not None:\n continue\n data = {}\n data_exc: dict[EnumT, tuple[Type[BaseException], tuple]] = {}\n\n exc: Exception\n enum: EnumT\n tb: types.TracebackType | None\n for enum in cls:\n # Put the class into the globals, so it can refer to itself.\n try:\n value.fget.__globals__[cls.__name__] = cls\n except AttributeError:\n pass\n try:\n res = value.fget(enum)\n except (ValueError, TypeError) as exc:\n # These exceptions can be recreated later by passing *args. That's not possible\n # for arbitrary exceptions, ensure we only do it for known ones.\n data_exc[enum] = type(exc), exc.args\n except Exception as exc:\n # Something else, need to validate it can be recreated.\n raise ValueError(f'{cls}.{name} raised exception! Add this to the above clause!') from exc\n else:\n data[enum] = res\n if data_exc:\n func = _exc_freeze(data, data_exc)\n else: # If we don't raise, we can use this C function directly.\n func = data.get\n setattr(cls, name, property(fget=func, doc=value.__doc__))\n return cls", "title": "" }, { "docid": "d97d61b53cf53b765672d3e355b67f8d", "score": "0.4784635", "text": "def _complete_enum(enum_hint: type,\n incomplete_value: str):\n matching_names = match_string(incomplete_value, (val.name for val in list(enum_hint)))\n matching_vals = (enum_hint[name] for name in matching_names)\n yield from (Completion(val.name,\n start_position=-len(incomplete_value),\n display_meta=str(val.value))\n for val in matching_vals)", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "19598d67ad0047384e5d2816ba4f2db3", "score": "0.47713646", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "title": "" }, { "docid": "1d83a3fa9900378f3de5d964b1e8edb0", "score": "0.4767459", "text": "def _add_deprecation_version(self, field_or_evalue, deprecation_tag, disallowed_tag):\n if field_or_evalue.options.deprecated and not self._frozen_proto and \\\n not protoxform_options.has_hide_option(field_or_evalue.options):\n # If the field or enum value has annotation from deprecation.proto, need to import it.\n self._requires_deprecation_annotation_import = (\n self._requires_deprecation_annotation_import\n or field_or_evalue.options.HasExtension(deprecation_tag)\n or field_or_evalue.options.HasExtension(disallowed_tag))\n if field_or_evalue.name != ENVOY_DEPRECATED_UNAVIALABLE_NAME:\n # If there's a deprecated version annotation, ensure it is valid.\n if field_or_evalue.options.HasExtension(deprecation_tag):\n if not api_version_utils.is_deprecated_annotation_version(\n field_or_evalue.options.Extensions[deprecation_tag]):\n raise ProtoPrintError(\n 'Error while parsing \"deprecated_at_minor_version_enum\" annotation \"%s\" value for enum value %s.'\n % (\n field_or_evalue.options.Extensions[deprecation_tag],\n field_or_evalue.name))\n else:\n # Add the current version as a deprecated version annotation.\n self._requires_deprecation_annotation_import = True\n field_or_evalue.options.Extensions[\n deprecation_tag] = self._deprecated_annotation_version_value", "title": "" }, { "docid": "86a08c5c7a3f7dab617677db163e9509", "score": "0.4762553", "text": "def _parse_value_to_enum_member(self, enumeration, description, value):\n if value not in enumeration.__members__.values():\n fmt = \"Invalid {description} {value!r} for {code} chunk\"\n raise PNGSyntaxError(fmt.format(\n description=description,\n value=value,\n code=self.chunk_type.code.decode('ascii')\n ))\n return enumeration(value)", "title": "" }, { "docid": "b0d9758ec2f8eaa659b1610cbb5631d4", "score": "0.47520968", "text": "def get_enum_names(attr, node=None):\n attr_path, node, attr = compose_attr(attr, node=node)\n enum_name = cmds.attributeQuery(attr, node=node, listEnum=True)[0]\n # split by :\n enum_name_split = enum_name.split(':')\n\n enum_name_list = []\n enum_index_list = []\n\n index_current = 0\n for part in enum_name_split:\n # maya saving enum format is name=index, so we split = to get index\n part_split = part.split('=')\n name = part_split[0]\n # because maya only save the index if it's not continuously, like 'attr1=1:attr2=5',\n # otherwise will be 'attr1=1:attr2', so we need to check if it has index or not, if not, use current one\n if len(part_split) > 1:\n index = int(part_split[1])\n else:\n index = index_current\n enum_name_list.append(name)\n enum_index_list.append(index)\n\n index_current = index + 1 # add 1 for the current index, so the next enum attr will use if next to it\n\n return enum_name, enum_name_list, enum_index_list", "title": "" }, { "docid": "f50e7768edf3cb93cb4ef5eb24981435", "score": "0.47508657", "text": "def test_enums(self):\n def snake_error(schema, enum, value):\n return \"{} > {} > {} isn't snake_case!\".format(\n schema, enum, value)\n\n def len_error(schema, enum, value, length):\n return \"{} > {} > {} longer than 22 chars! ({})\".format(\n schema, enum, value, length)\n\n exceptions = [\n \"country_code\", # ISO 3166\n \"currency_code\", # ISO 4217\n \"base_currency_code\", # ISO 4217\n \"quote_currency_code\", # ISO 4217\n \"underlying_index_tenor\", # day convention\n \"base_rate\", # Bloomberg tickers,\n ]\n long_names = [\n \"independent_collateral_amount\",\n \"interest_repayment_frequency\",\n \"buy_to_let_house_purchase\",\n \"buy_to_let_further_advance\",\n \"cancelled_payout_agreed\",\n \"firm_operating_expenses\",\n ]\n\n snake_pattern = re.compile(\"[a-z][a-z0-9]*(_[a-z0-9]*)*\")\n for schema in SCHEMA_FILES:\n enums = schema_enum_registry(schema)\n for enum, values in enums.items():\n match = re.match(snake_pattern, enum)\n self.assertIsNotNone(match, snake_error(schema, enum, \"\"))\n self.assertEqual(\n 0, match.start(), snake_error(schema, enum, \"\"))\n self.assertEqual(\n len(enum), match.end(), snake_error(schema, enum, \"\"))\n\n if enum not in long_names:\n self.assertLessEqual(\n len(enum), 22,\n len_error(schema, enum, \"\", len(enum))\n )\n if enum in exceptions:\n continue\n for v in values:\n match = re.match(snake_pattern, v)\n self.assertIsNotNone(match, snake_error(schema, enum, v))\n self.assertEqual(\n 0, match.start(), snake_error(schema, enum, v))\n self.assertEqual(\n len(v), match.end(), snake_error(schema, enum, v))\n\n if v not in long_names:\n self.assertLessEqual(\n len(v), 22,\n len_error(schema, enum, v, len(v))\n )", "title": "" }, { "docid": "473765bbb4574b7eb454ef7ebec14239", "score": "0.47501934", "text": "def oalGetALEnum(enum):\n return _format_enum(al_enums.get(enum, []))", "title": "" }, { "docid": "f6132d32d186dcb99f171c7bbfa36902", "score": "0.47404787", "text": "def validate_ENUM(in_value, restriction):\n value = _get_val(in_value)\n if type(value) is list:\n for subval in value:\n if type(subval) is tuple:\n subval = subval[1]\n validate_ENUM(subval, restriction)\n else:\n if value not in restriction:\n raise ValidationError(\"ENUM : %s\"%(restriction))", "title": "" } ]
61352d110ea023b6cacf94292173435c
Median number of shares or units of a given asset traded over a 21 day period.
[ { "docid": "0d2284f3fe9a982a63f69431d5e090dd", "score": "0.0", "text": "def adv22_day_pct(self) -> float:\n return self.__adv22_day_pct", "title": "" } ]
[ { "docid": "8ba43e7a940ff8dc1b498a4bbbc7fc6b", "score": "0.6867634", "text": "def median(wm):\n return statistics.median(wm)", "title": "" }, { "docid": "3febeacdf96db1146d2eda694f2c3be4", "score": "0.6539522", "text": "def mad(x):\n\n return median(abs(x - median(x)))", "title": "" }, { "docid": "b9b6a1f3d756e6ebf7e6dd14c3083220", "score": "0.6519997", "text": "def mad(data: List[Numeric]) -> float:\n m = median(data)\n return median([abs(xi - m) for xi in data])", "title": "" }, { "docid": "5ae9109b10cac0585ee09eeff4d48e53", "score": "0.6483199", "text": "def median(self):\n numbers = [e.cost for e in self.expenses]\n numbers = sorted(numbers)\n center = len(numbers) / 2\n if len(numbers) % 2 == 0:\n # Return the average of the middle two numbers.\n value = sum(numbers[center - 1:center + 1]) / 2.0\n return round(value, 2)\n else:\n return numbers[center]", "title": "" }, { "docid": "f2bbf8e11aaef1927ca1219fdc877d78", "score": "0.6479245", "text": "def calc_median(numbers: list) -> float:\n length = len(numbers)\n numbers = sorted(numbers)\n if length % 2 == 1:\n return numbers[length//2]\n else: \n return (numbers[length//2 - 1] + numbers[length//2]) / 2", "title": "" }, { "docid": "61b5012cf6f31d2a52985d49b5ed6da0", "score": "0.64396936", "text": "def _median(data):\n data = sorted(data)\n length = len(data)\n if length == 0:\n raise ValueError('No median for empty data.')\n elif length % 2 == 1:\n return data[length // 2]\n else:\n i = length // 2\n return (data[i - 1] + data[i]) / 2", "title": "" }, { "docid": "68ffa6d29f35e8376ee1b98f8a70909a", "score": "0.643604", "text": "def median(some_items):\n some_items.sort()\n n = len(some_items)\n\n if n % 2 == 1:\n return some_items[n // 2]\n else:\n first_middle = some_items[n//2 - 1]\n second_middle = some_items[n // 2]\n median = (first_middle + second_middle) / 2\n return median", "title": "" }, { "docid": "4717a72dfc243cedd4ae7c3a1dbb776e", "score": "0.6400522", "text": "def get_median(numbers: list) -> int:\n numbers.sort()\n return numbers[len(numbers) // 2]", "title": "" }, { "docid": "0f492103cbad78b15e0524adf0e0a6f6", "score": "0.6398276", "text": "def median(data_massive):\n\n _data_check(data_massive)\n\n n = len(data_massive)\n sorted_data = sorted(data_massive)\n mid = n // 2\n if n % 2 == 1:\n return sorted_data[mid]\n else:\n return (sorted_data[mid - 1] + sorted_data[mid]) / 2", "title": "" }, { "docid": "2efdb8501eaafe7177cf39508813b6a3", "score": "0.6332389", "text": "def obtainMedian():\n if n_numbers % 2 == 0:\n #It is even, so it will be obtained the mean of the two numbers in the middle\n middle_index = n_numbers / 2\n median = (numbers[middle_index - 1] + numbers[middle_index]) / 2.0\n else:\n middle_index = n_numbers // 2 #floor calculation\n median = numbers[middle_index]\n\n return {\"median\" : median}", "title": "" }, { "docid": "f85beca2802f4fa24b7a1119693efa26", "score": "0.6329643", "text": "def mymad(data, median=None, sigma=False):\n if median is None: median=np.median(data)\n\n mad=np.median (abs(data-median))\n\n if sigma: mad=mad/0.6745\n\n return mad", "title": "" }, { "docid": "bf2558710e1a1f8f92a291ee043472e8", "score": "0.6300817", "text": "def median(items):\n items = sorted(items)\n if len(items) < 1:\n return None\n if len(items) %2 == 1:\n return items[((len(items)+1)/2)-1]\n if len(items) %2 == 0:\n return float(sum(items[(len(items)/2)-1:(len(items)/2)+1]))/2.0", "title": "" }, { "docid": "62d5af7eb48bf107e85a0768cda8f351", "score": "0.6292693", "text": "def get_median(self):\n wait_times = []\n if len(self._history) == 0:\n return 0\n else:\n for h in self._history:\n wait_times.extend(h.get_waited_time())\n median = statistics.median_low(wait_times)\n return median", "title": "" }, { "docid": "91b11d586ce9146e9f1737fbc1383640", "score": "0.6282233", "text": "def median(x):\n n = len(x)\n sorted_x = sorted(x)\n mid = n // 2\n if n % 2 == 1:\n return sorted_x[mid]\n else:\n return (sorted_x[mid - 1] + sorted_x[mid]) / 2", "title": "" }, { "docid": "fca9cf1b90330b2c9fee14214430f438", "score": "0.6238525", "text": "def findMedian(self):\n return (self.h[self.i][0] * self.i - self.h[-1][0]) / 2.0", "title": "" }, { "docid": "6135e11ea8b68869230b2b24f2a24619", "score": "0.62376726", "text": "def find_median(self, values):\n values.sort()\n if len(values) % 2 == 0:\n index1 = len(values) // 2\n index2 = index1 - 1\n return (values[index1] + values[index2]) / 2\n else:\n return values[len(values) // 2]", "title": "" }, { "docid": "81a1c87b4b6f1c33eb5d46a2b2b709b4", "score": "0.6212687", "text": "def median(numbers):\r\n numbers.sort()\r\n #The sort method sorts a list directly, rather than returning a new sorted list\r\n if (len(numbers) % 2 == 0):\r\n middle_index = int(len(numbers)/2) - 1\r\n next_middle_index = middle_index + 1\r\n return float(float((numbers[middle_index]) + float(numbers[next_middle_index]))/2)\r\n middle_index = int(len(numbers)/2)\r\n return numbers[middle_index]", "title": "" }, { "docid": "49e5c156c98e633410a786cbf9f06442", "score": "0.617391", "text": "def mad(data, axis=None):\n return median(absolute(data - median(data, axis)), axis)", "title": "" }, { "docid": "1b43f449aaf894ac54a1b6116a242331", "score": "0.61446786", "text": "def median(data: list):\n try:\n n = count(data)\n if n % 2 == 0:\n return (data[n // 2] + data[n // 2 - 1]) / 2\n else:\n return data[n // 2]\n except TypeError:\n raise NotANumber", "title": "" }, { "docid": "25f6fa0c28966d7c1a680eef824e91ae", "score": "0.6133176", "text": "def median(X):\n X.sort()\n n = len(X)\n mid = (n - 1) / 2.0\n if n == 0:\n return None\n elif (n - 1) % 2 != 0:\n try:\n m = (X[int(mid)] + X[int(mid + 1)]) / 2.0\n except IndexError:\n print n\n m = None\n except TypeError:\n print mid, X[int(mid)], X[int(mid + 1)], len(X)\n else:\n m = float(X[int(mid)])\n return m", "title": "" }, { "docid": "96da40584b9b98314c3653131d11d62d", "score": "0.61058336", "text": "def median(values):\n values.sort()\n n = len(values)\n if n % 2 == 0:\n med = values[(n//2)-1] + values[n//2]\n med /= 2\n else:\n med = values[ceil(n/2)-1]\n return med", "title": "" }, { "docid": "a9610958718de87b3f95547375a9c8d4", "score": "0.6081682", "text": "def median(data):\n\n sorted_data = sorted(data)\n num_elements = len(sorted_data)\n if num_elements == 0:\n raise ValueError\n if num_elements % 2 == 1:\n return sorted_data[num_elements // 2]\n else:\n return ((sorted_data[num_elements // 2 - 1] + sorted_data[\n num_elements // 2]) / 2)", "title": "" }, { "docid": "f601e6f15c904fea76c932780b9273da", "score": "0.6076493", "text": "def median(values):\n n = len(values)\n values.sort()\n\n if n % 2 == 0:\n median1 = values[n//2]\n median2 = values[n//2 - 1]\n median = (median1 + median2) / 2.0\n else:\n median = values[n//2]\n print(\"Median: {:.4f}\".format(median))", "title": "" }, { "docid": "30685a7523b2146cd3628b9fb7697d06", "score": "0.60691375", "text": "def mean_median(self):\n return pm.mean_median(self)", "title": "" }, { "docid": "66339696e2a425a1dbaaafa6b44f86ab", "score": "0.6059866", "text": "def median(values):\n values = sorted(values)\n length = len(values)\n mid = length // 2\n if (length % 2):\n return values[mid]\n else:\n return (values[mid - 1] + values[mid]) / 2", "title": "" }, { "docid": "02a8e7396b914df25c965021acea3a31", "score": "0.60551715", "text": "def median(self) -> float | None:\n return self._s.median()", "title": "" }, { "docid": "d2a8da79001ba930859ed9ce128f134c", "score": "0.604869", "text": "def median(self):\n return self._median", "title": "" }, { "docid": "dedead981aa9d61d728bbc2104d05037", "score": "0.60437196", "text": "def median(self):\n return np.nanmedian([hdu.data.astype(float) for hdu in self if hdu.data is not None])", "title": "" }, { "docid": "d7351184704734860aa8f110c392433a", "score": "0.6039393", "text": "def histogram_median(hist: drgn.Object, offset: int = 0) -> int:\n canonical_type = sdb.type_canonicalize(hist.type_)\n assert canonical_type.kind == drgn.TypeKind.ARRAY\n assert sdb.type_canonicalize(\n canonical_type.type).kind == drgn.TypeKind.INT\n\n total_space = 0\n for (bucket, value) in enumerate(hist):\n total_space += int(value) << (bucket + offset)\n\n if total_space == 0:\n return 0\n\n space_left, median = total_space / 2, 0\n for (bucket, value) in enumerate(hist):\n space_in_bucket = int(value) << (bucket + offset)\n if space_left <= space_in_bucket:\n median = 1 << (bucket + offset - 1)\n #\n # Size of segments may vary within one bucket thus we\n # attempt to approximate the median by looking at the\n # number of segments in the bucket and assuming that\n # they are evenly distributed along the bucket's range.\n #\n bucket_fill = space_left / space_in_bucket\n median += round(median * bucket_fill)\n break\n space_left -= space_in_bucket\n return median", "title": "" }, { "docid": "01816261f556328674cf8342867f4e0c", "score": "0.6038527", "text": "def mad(data, sigma=True):\n med = np.median(data)\n mad = np.median(np.abs(data - med))\n if sigma==False:\n return mad\n else:\n return mad*1.4826", "title": "" }, { "docid": "71711c6432d85bd0ac7483d25539eacc", "score": "0.6032952", "text": "def median(self):\n return self._e.median()", "title": "" }, { "docid": "8df64c814e3b0dbeb5133e710e42e35c", "score": "0.60223514", "text": "def find_median(arr):\n if not arr:\n return None\n if len(arr) == 1:\n return arr[0]\n if len(arr) == 2:\n return (arr[0] + arr[1]) / 2\n\n # expected location of medians\n m_low = int((len(arr) - 1) / 2)\n m_hig = int(len(arr) / 2)\n\n return sum(find_vals_between_locations(arr, m_low, m_hig)) / 2", "title": "" }, { "docid": "0529fa97f6ac2052842e121788712539", "score": "0.6016261", "text": "def _median_even(xs: List[float]) -> float:\n i = len(xs) // 2\n sorted_xs = sorted(xs)\n return (sorted_xs[i - 1] + sorted_xs[i]) / 2", "title": "" }, { "docid": "a9391e324ac3c9c0c9e6ac691abb5be4", "score": "0.6014593", "text": "def findMedian(self):\r\n small, large = self.heaps\r\n if len(large) > len(small):\r\n return float(large[0])\r\n return (large[0] - small[0]) / 2.0", "title": "" }, { "docid": "a3625728ccf306d1b3e585046666c758", "score": "0.59985566", "text": "def median(self, column_name):\n if column_name in self.int_columns:\n all_values = self.datatype_convertor(column_name) \n sorted_values = sorted(all_values)\n center = len(sorted_values) / 2\n if len(sorted_values) % 2 == 0:\n return sum(sorted_values[center - 1:center + 1]) / 2.0\n else:\n return sorted_values[center]\n else:\n raise Exception(\"no median for date time columns\")", "title": "" }, { "docid": "1643a54d1780a4f8e571f0d2d9b3b826", "score": "0.5996842", "text": "def mean_median(election_results):\n first_party = election_results.election.parties[0]\n data = election_results.percents(first_party)\n\n return numpy.median(data) - numpy.mean(data)", "title": "" }, { "docid": "fe53a4bf288be51160183d6a86524edf", "score": "0.59962285", "text": "def _median_even(xs: List[float]) -> float:\n sorted_xs = sorted(xs)\n hi_midpoint = len(xs) // 2\n\n return (sorted_xs[hi_midpoint - 1] + sorted_xs[hi_midpoint]) / 2", "title": "" }, { "docid": "8473c0ac4d0b89fd21bf255fa0cb402b", "score": "0.59907144", "text": "def medians(file):\n fin = open(file,\"r\")\n reader = csv.reader(fin)\n age, fnlwgt, edun, capg, capl, hpw = ([] for i in range(6))\n total = 0\n for row in reader:\n total+=1\n if(total==1):\n continue\n l = [x.lstrip().rstrip() for x in row]\n # print(\"l= \",l)\n age.append(int(l[0]));\n fnlwgt.append(int(l[2]));\n edun.append(int(l[4]));\n capg.append(int(l[10]));\n capl.append(int(l[11]));\n hpw.append(int(l[12]));\n fin.close()\n return(statistics.median(age),statistics.median(fnlwgt),statistics.median(edun),statistics.median(capg),statistics.median(capl),statistics.median(hpw))", "title": "" }, { "docid": "bd055c0f428e330ce40ab754dc4b2fb1", "score": "0.5988545", "text": "def median(im, win_size=5):\n\treturn ndimage.median_filter(im, win_size)", "title": "" }, { "docid": "51b8258fb453beed0fcd847a9286bb21", "score": "0.59786206", "text": "def get_win_median(self):\r\n return np.median(self.dequa)", "title": "" }, { "docid": "a10a41e4805df008e1544949ece9b36e", "score": "0.59754664", "text": "def median(v):\n n = len(v)\n sorted_v = sorted(v)\n midpoint = n // 2\n \n if n % 2 == 1:\n # if odd, return the middle value\n return sorted_v[midpoint]\n else:\n # if even, return the average of the middle values\n lo = midpoint - 1\n hi = midpoint\n return (sorted_v[lo] + sorted_v[hi]) / 2", "title": "" }, { "docid": "89620a01a03bd06f8f078ae7878ef0a3", "score": "0.5959738", "text": "def get_median(size, numbers):\n\n middle = size/2\n\n if (middle).is_integer():\n median = get_mean(2,[numbers[int(middle-1)],numbers[int(middle)]])\n else:\n median = numbers[int(middle)] \n \n return median", "title": "" }, { "docid": "ec6f7ef31d0278a4ba75b179bb1c15e3", "score": "0.59597325", "text": "def mad(R):\n return np.median(np.abs(R-np.median(R)))/0.67449", "title": "" }, { "docid": "a5c1e34efe05f5a161b1977ff652fdba", "score": "0.5951972", "text": "def median(v):\n n = len(v)\n sorted_v = sorted(v)\n midpoint = n // 2\n if n % 2 == 1:\n # if odd, return the middle value\n return sorted_v[midpoint]\n else:\n # if even, return the average of the middle values\n lo = midpoint - 1\n hi = midpoint\n return (sorted_v[lo] + sorted_v[hi]) / 2", "title": "" }, { "docid": "c54321643bfdbaac08ab619cdcffe695", "score": "0.59359956", "text": "def CalcExceed3TimesMedian(Qvalues):\n\n # Remove NA\n Qvalues = Qvalues.dropna()\n \n # Numb of discharges greater than 3 times the annual median\n median3x = (Qvalues>3*Qvalues.median()).sum()\n \n return (median3x)", "title": "" }, { "docid": "cbbf6fc80a8b0b688ffd9f583bfc0665", "score": "0.5934932", "text": "def median_stellar_metallicity(zone, mdf_key):\n\ts = 0\n\tfor i in range(len(zone.mdf[\"bin_edge_left\"])):\n\t\ts += zone.mdf[mdf_key][i] * (zone.mdf[\"bin_edge_right\"][i] -\n\t\t\tzone.mdf[\"bin_edge_left\"][i])\n\t\tif s >= 0.5: return (zone.mdf[\"bin_edge_left\"][i] +\n\t\t\tzone.mdf[\"bin_edge_right\"][i]) / 2.\n\traise ArithmeticError(\"Median not found.\")", "title": "" }, { "docid": "a4c5a1e61b94aa58668307d0d5fc2881", "score": "0.5931967", "text": "def median_func(data_list):\n length = len_func(data_list)\n data_list.sort()\n if length % 2 == 1:\n return data_list[length // 2]\n else:\n return sum_func(data_list[length // 2 -1:length // 2 + 1]) / 2.0", "title": "" }, { "docid": "3602e8b4d7e427701db9f636a198f572", "score": "0.5922357", "text": "def median(data):\n\n sdata = sorted(data)\n n = len(sdata)\n if n < 1:\n raise ValueError\n return (sdata[n // 2] if n % 2 == 1\n else 0.5 * (sdata[n // 2 - 1] + sdata[n // 2]))", "title": "" }, { "docid": "3ad762825d40f0c17899b947c34e2354", "score": "0.5915113", "text": "def median(arr):\n from math import floor, ceil\n sarr = sorted(arr)\n\n if len(arr)%2 == 0:\n #print(sarr, sarr[int(len(arr)/2-1)], sarr[int(len(arr)/2)])\n return (sarr[int(len(arr)/2-1)]+sarr[int(len(arr)/2)])/2.\n else:\n return sarr[ceil(len(arr)/2)]", "title": "" }, { "docid": "a71aa68d74c6e483cf6fdec361979708", "score": "0.5915032", "text": "def test_median(self):\n assert_that(select(1, 3, 1).median(), equal_to(2.0))\n assert_that(select(3, 0, -1).median(), equal_to(1.5))", "title": "" }, { "docid": "d60799a15694a6cf4732c1db9d0c4b35", "score": "0.5913954", "text": "def _median(self, array_img):\r\n return np.median(array_img, self.axis)", "title": "" }, { "docid": "59e2752530a41296ef494603e2140622", "score": "0.5911842", "text": "def findMedian(self):\r\n if len(self.small) == len(self.large):\r\n return (self.large[0] - self.small[0]) / 2.0\r\n return -float(self.small[0]) if len(self.small) > len(self.large) else float(self.large[0])", "title": "" }, { "docid": "c6c6c7d8174f2d33f6f42de604bf74ce", "score": "0.59090257", "text": "def medianFrequencyOfIntent(vis, intent='OBSERVE_TARGET#ON_SOURCE', \n verbose=False, ignoreChanAvgSpws=True, mymsmd=None):\n if (not os.path.exists(vis)):\n print(\"Could not find measurement set.\")\n return\n if (casaVersion >= casaVersionWithMSMD):\n needToClose = False\n if mymsmd is None:\n mymsmd = createCasaTool(msmdtool)\n mymsmd.open(vis)\n needToClose = True\n try:\n if intent == '':\n spws = getNonWvrSpws(mymsmd)\n else:\n if len(mymsmd.intents()) == 0:\n print(\"There are no intents in this dataset.\")\n return None\n spws = mymsmd.spwsforintent(intent)\n wvrspws = mymsmd.wvrspws()\n if len(wvrspws) > 0:\n if mymsmd.namesforspws(wvrspws[0])[0].find('VLA') >= 0:\n wvrspws = []\n spws = list(set(spws).difference(set(wvrspws)))\n myspws = spws[:]\n if (ignoreChanAvgSpws):\n spws = list(set(spws).difference(set(mymsmd.almaspws(chavg=True))))\n if (len(spws) == 0):\n spws = list(set(myspws).difference(set(mymsmd.wvrspws())))\n except:\n spws = []\n if (len(spws) < 1):\n print(\"medianFrequencyOfIntent(): No spws with intent = '%s'\" % (intent))\n print(\"intents = \", mymsmd.intents())\n if needToClose: mymsmd.close()\n return None\n if verbose: print(\"spws = \", spws)\n freq = []\n for spw in spws:\n freq.append(mymsmd.meanfreq(spw))\n if verbose: print(\"spws %d = %f GHz\" % (spw,freq[-1]*1e-9))\n if needToClose: mymsmd.close()\n else:\n vm = ValueMapping(vis)\n spws = vm.getSpwsForIntent(intent)\n if (len(spws) < 1):\n print(\"medianFrequencyOfIntent(): No spws with intent = '%s'\" % (intent))\n return None\n if verbose: print(\"spws = \", spws)\n freq = []\n for spw in spws:\n if (spw in list(vm.spwInfo.keys())):\n if verbose: print(\"spw = %d, \" % (spw), vm.spwInfo[spw])\n freq.append(vm.spwInfo[spw]['meanFreq'])\n return(np.median(freq))", "title": "" }, { "docid": "ceb9335533b0a1c8de8c96bf4724624a", "score": "0.5897382", "text": "def median(datalist: list) -> float:\n # using integer division here gives the index of the midpoint, due to zero-based indexing.\n midpoint = len(datalist)//2\n\n # Once we've found the midpoint, we calculate the median, which is just the middle value if there are an\n # odd number of values, or the average of the two middle values if there are an even number\n if len(datalist) % 2 == 0:\n median = (datalist[midpoint] + datalist[midpoint-1])/2\n else:\n median = datalist[midpoint]\n return median", "title": "" }, { "docid": "405feb0d9720b22a025799d851b8d83a", "score": "0.58918506", "text": "def medianFilter(self, img):\n# img_median = mh.median_filter(img,Bc=np.ones((3,3)))\n img_median = sk.filters.median(img,mask=np.ones((3,3)))\n return img_median", "title": "" }, { "docid": "7f50e7bf8209859762d2fa659ee97cd6", "score": "0.58729017", "text": "def median_filter(image:\"napari.types.ImageData\", radius_x: int = 1, radius_y: int = 1, radius_z: int = 0) -> \"napari.types.ImageData\":\n import SimpleITK as sitk\n return sitk.Median(image, [radius_x, radius_y, radius_z])", "title": "" }, { "docid": "563ba0e6347b6ffdc784df34fd0bd8f2", "score": "0.58728236", "text": "def base_median(arry):\n result = np.median(arry)\n return result", "title": "" }, { "docid": "e8c170a82de3d60c744c1372a98e7fa7", "score": "0.58537954", "text": "def median_absolute_deviation(x):\n return np.median(np.abs(x - np.median(x)))", "title": "" }, { "docid": "2b6b34d853f8f061d17a99b50d560442", "score": "0.58534276", "text": "def get_median(self):\n return np.median(self._entry_scores)", "title": "" }, { "docid": "ef960b00a57955145c39830c827d4bbf", "score": "0.5843005", "text": "def median_absolute_deviation(x):\n median_absolute_deviation = np.nanmedian(np.abs(x - np.nanmedian(x)))\n return median_absolute_deviation / 0.6745", "title": "" }, { "docid": "ee1febe454dddab29c6bbc0d58d5d5ea", "score": "0.58387524", "text": "def data_median(my_list):\r\n copy = sorted(my_list)\r\n if len(copy)%2 == 0:\r\n rndx = len(copy) // 2\r\n lndx = rndx - 1\r\n median = (copy[rndx] + copy[lndx]) / 2.0\r\n else:\r\n median = copy[len(copy)//2]\r\n return median", "title": "" }, { "docid": "33a5e4c4e955ab19b1322fcf3178c1fe", "score": "0.58255845", "text": "def median(data):\n\n sdata = sorted(data)\n n = len(sdata)\n\n if n == 0:\n raise ValueError(\"Median of empty list/tuple is not possible\")\n\n return (sdata[n//2] if n % 2 == 1\n else 0.5 * (sdata[n//2 - 1] + sdata[n//2]))", "title": "" }, { "docid": "96e0893e2a3cc7f82e271742534071a5", "score": "0.58232737", "text": "def _median_odd(xs: List[float]) -> float:\n return sorted(xs)[len(xs) // 2]", "title": "" }, { "docid": "b441262b0bb6ca5ed1ceccd00ed54894", "score": "0.58222616", "text": "def median(data):\n sorted_data = sorted(data)\n num_elements = len(sorted_data)\n if num_elements % 2 == 1:\n return sorted_data[num_elements // 2]\n elif not sorted_data:\n raise ValueError('Please insert a list that is not empty')\n else:\n return (\n sorted_data[num_elements // 2 - 1] + sorted_data[num_elements // 2]\n ) / 2", "title": "" }, { "docid": "9c281288e05c9ac862aec6a747a623ba", "score": "0.5820214", "text": "def return_median(df,variable): \n\n temp_df = df[df[variable].notnull()]\n temp_df = temp_df[[variable, 'Outcome']].groupby(['Outcome'])[[variable]].median().reset_index()\n value_for_0 = temp_df.iloc[0][variable]\n value_for_1 = temp_df.iloc[1][variable]\n return value_for_0,value_for_1", "title": "" }, { "docid": "2a3d0f7bdb414b6add4b583c251617a5", "score": "0.58178264", "text": "def median(data):\n # Get count/length of dataset\n n = len(data)\n\n # Order the dataset\n s = sorted(data)\n\n # Find middle point\n return (sum(s[n // 2 - 1:n // 2 + 1]) / 2.0, s[n // 2])[n % 2] if n else None", "title": "" }, { "docid": "b0296f9c0cce051133c92eb1199778b6", "score": "0.581046", "text": "def calculate_median(num1: float, num2: float, num3: float) -> float:\n unsorted = [num1, num2, num3]\n largest = max(unsorted)\n smallest = min(unsorted)\n unsorted.remove(largest)\n unsorted.remove(smallest)\n return unsorted[0]", "title": "" }, { "docid": "85d17fe3a26dfa08c7be47ce67e1b767", "score": "0.58054304", "text": "def find_median(quantiles: np.ndarray) -> float:\n num_quantiles = len(quantiles)\n # We assume that we have at least one quantile boundary.\n assert num_quantiles > 0\n\n median_index = int(num_quantiles / 2)\n if num_quantiles % 2 == 0:\n # If we have an even number of quantile boundaries, take the mean of the\n # middle boundaries to be the median.\n return (quantiles[median_index - 1] + quantiles[median_index])/2.0\n else:\n # If we have an odd number of quantile boundaries, the middle boundary is\n # the median.\n return quantiles[median_index]", "title": "" }, { "docid": "add7b2012fa42963391ab175bb4b54a3", "score": "0.57946306", "text": "def getMedianBaselinePerAntenna(asdm, ignoreAntennas=[], percentile=0, sort=False):\n if (not os.path.exists(asdm+'/Antenna.xml')):\n print(\"Could not find Antenna.xml.\")\n return\n return(getExtremeBaselinePerAntenna(asdm, 'median', ignoreAntennas, percentile, sort))", "title": "" }, { "docid": "14a56990e35af081f1869e814adf9af0", "score": "0.5793799", "text": "def _median_odd(xs: List[float]) -> float:\n return sorted(xs)[len(xs) // 2]", "title": "" }, { "docid": "c207ed877d6cee8e648bc18689146314", "score": "0.5788054", "text": "def median_aggregator(time_span, points):\n if points:\n return statistics.median(points)\n else:\n return None", "title": "" }, { "docid": "fca517a92e61d0b7ef310c27d898456c", "score": "0.5782652", "text": "def median(v: List[float]) -> float:\n return _median_odd(v) if len(v) % 2 else _median_even(v)", "title": "" }, { "docid": "a4a39af846dffd5be2da4d6a44d8193d", "score": "0.57756966", "text": "def mad(samples):\n med = median(samples)\n res = map(abs, residuals(samples, med))\n return median(res)", "title": "" }, { "docid": "6f57933e1cc091c55375be968f2ea276", "score": "0.57680106", "text": "def testMedianOfThree(self):\n self.assertEqual(5.9, median([3.1, 7.6, 5.9]))", "title": "" }, { "docid": "f99ff2482dd10b50e1e814c4429a5715", "score": "0.5767194", "text": "def median_mode_change_in_shares(delta_filepath_list):\r\n # Indicator: sometimes more than half the stocks in a fund will be sold off by the same %, ie -1.58% shares sold\r\n # This information may not be critical, but it may paint a unique picture of the ARK fund\r\n mode_median_message_list = []\r\n for fund in delta_filepath_list:\r\n # create a data frame for the today file and the yesterday file\r\n delta_df = pd.read_csv(fund)\r\n\r\n # in the change % of shares of each company in each fund, find the mode, and how many times the mode appears\r\n mode_count=0\r\n mode = delta_df['d_shares_pct'].mode()[0]\r\n for line in delta_df['d_shares_pct']:\r\n if line == mode:\r\n mode_count += 1\r\n mode_msg = 'MODE: ' + str(mode_count) + 'oo' + str(len(delta_df['d_shares_pct'])) + '(' + \\\r\n str(mode) + '% shares). '\r\n\r\n quartile_1_count=0\r\n quartile_1 = delta_df['d_shares_pct'].quantile(q=0.25, interpolation='linear')\r\n for line in delta_df['d_shares_pct']:\r\n if line == quartile_1:\r\n quartile_1_count += 1\r\n quartile_1_msg = 'Q1: ' + str(quartile_1_count) + 'oo' + str(len(delta_df['d_shares_pct'])) + \\\r\n '(' + str(quartile_1) + '% shares). '\r\n\r\n # Count the median repetition\r\n quartile_2_count = 0\r\n quartile_2 = delta_df['d_shares_pct'].quantile(q=0.5, interpolation='linear')\r\n for line in delta_df['d_shares_pct']:\r\n if line == quartile_2:\r\n quartile_2_count += 1\r\n quartile_2_msg = 'Q2: ' + str(quartile_2_count) + 'oo' + str(len(delta_df['d_shares_pct'])) + \\\r\n '(' + str(quartile_2) + '% shares). '\r\n\r\n quartile_3_count=0\r\n quartile_3 = delta_df['d_shares_pct'].quantile(q=0.75, interpolation='linear')\r\n for line in delta_df['d_shares_pct']:\r\n if line == quartile_3:\r\n quartile_3_count += 1\r\n quartile_3_msg = 'Q3: ' + str(quartile_3_count) + 'oo' + str(len(delta_df['d_shares_pct'])) + \\\r\n '(' + str(quartile_3) + '% shares). '\r\n\r\n # Output message for the median and mode trend\r\n mode_median_output = '\\n\\t' + mode_msg + quartile_1_msg + quartile_2_msg + quartile_3_msg\r\n\r\n mode_median_message_list.append(mode_median_output)\r\n\r\n return mode_median_message_list", "title": "" }, { "docid": "1b00d3c93760cf08e9e23b8821ae15e5", "score": "0.576642", "text": "def median(nums):\n \n while len(nums) > 2:\n nums.remove(min(nums))\n nums.remove(max(nums))\n \n if len(nums) == 2:\n return (nums[0] + nums[1])/2\n elif len(nums) == 1:\n return nums[0]", "title": "" }, { "docid": "59e662f1b047515ae93d4dbb1424c839", "score": "0.5764636", "text": "def testMedianOfOne(self):\n self.assertEqual(3, median([3]))", "title": "" }, { "docid": "66ecba3e04e3de5041433eaafc743684", "score": "0.5761386", "text": "def median(self, values):\r\n import math\r\n current = self.value_check(values)\r\n current.sort()\r\n middle = int(math.floor(len(current) / 2))\r\n if len(current) % 2 == 0:\r\n result = (current[middle - 1] + current[middle]) / 2\r\n else:\r\n result = current[middle]\r\n print('The median of ' + str(current) + ' is ' + str(result))\r\n return result", "title": "" }, { "docid": "d5631b016f83b1d09b79c09403452d94", "score": "0.57479143", "text": "def median(lst):\n sorted_lst = sorted(lst)\n list_len = len(lst)\n index = (list_len - 1) // 2\n\n if list_len % 2:\n return sorted_lst[index]\n else:\n return (sorted_lst[index] + sorted_lst[index + 1])/2.0", "title": "" }, { "docid": "07ae2975c0512a06d8bc56f5e8ad2ddd", "score": "0.5744121", "text": "def median(v: List[float]) -> float:\n return _median_even(v) if len(v) % 2 == 0 else _median_odd(v)", "title": "" }, { "docid": "9bee4d9c93d255614e5d455684437613", "score": "0.57425183", "text": "def getMedianWeatherFromAIVForASDM(asdm, quantity='PRESSURE',station='MeteoTB2',\n overwrite=False, verbose=False, apex=False,\n timestamp='', perscan=True):\n if (not os.path.exists(asdm)):\n print(\"Could not find ASDM.\")\n return\n if (type(asdm) == str):\n asdms = asdm.split(',')\n else: # it is already a list\n asdms = asdm\n for asdm in asdms:\n mydict = readscans(asdm)[0]\n startdate = getObservationStartDateFromASDM(asdm)[0].split()[0]\n enddate = getObservationEndDateFromASDM(asdm)[0].split()[0]\n stations = [station]\n results = getWeatherFromAIV(startdate, stations, overwrite=overwrite,\n verbose=verbose, apex=apex)\n data = results\n scans = list(mydict.keys())\n filename = asdm+'.%s.%s.statistics.txt' % (station,quantity)\n f = open(filename, 'w')\n f.write('# Scan #pts Median MAD mean stddev\\n')\n data = {}\n weatherMjdsec = np.array(results[station]['mjdsec'])\n weatherQuantity = np.array(results[station][quantity])\n if (quantity == 'PRESSURE'):\n weatherQuantity *= 0.01\n if (quantity == 'HUMIDITY'):\n weatherQuantity *= 100\n if (quantity == 'WINDDIRECTION'):\n weatherQuantity = np.degrees(weatherQuantity)\n if (timestamp != ''):\n mjdsec = dateStringToMJDSec(timestamp)\n nearestValue = weatherQuantity[np.argmin(np.abs(mjdsec-weatherMjdsec))]\n allValues = []\n for scan in scans:\n scanStartMjdsec = mydict[scan]['startmjd'] * 86400\n scanEndMjdsec = mydict[scan]['endmjd'] * 86400\n idx1 = np.where(weatherMjdsec >= scanStartMjdsec)[0]\n idx2 = np.where(weatherMjdsec < scanEndMjdsec)[0]\n idx = np.intersect1d(idx1,idx2)\n if (len(idx) > 0):\n allValues += list(weatherQuantity[idx])\n data[scan] = {'median': np.median(weatherQuantity[idx]),\n 'MAD': MAD(weatherQuantity[idx]),\n 'mean': np.median(weatherQuantity[idx]),\n 'stddev': np.std(weatherQuantity[idx]),\n 'min': np.min(weatherQuantity[idx]),\n 'max': np.max(weatherQuantity[idx]),\n 'npts': len(idx)}\n f.write('%3d %d %f %f %f %f %f %f\\n' % (scan, len(idx), data[scan]['median'],\n data[scan]['MAD'], data[scan]['mean'],\n data[scan]['stddev'],data[scan]['min'],\n data[scan]['max']))\n f.close()\n print(\"Wrote \", filename)\n if (not perscan):\n data = {'median': np.median(allValues),\n 'MAD': MAD(allValues),\n 'mean': np.median(allValues),\n 'stddev': np.std(allValues),\n 'min': np.min(allValues),\n 'max': np.max(allValues),\n 'npts': len(idx)}\n if (timestamp != ''):\n return(data,nearestValue)\n else:\n return(data)", "title": "" }, { "docid": "739333619f867b3024459ee3b30e6774", "score": "0.5713353", "text": "def getMedian(self, alist):\n if alist == []:\n return []\n #ๅฏนๅˆ—่กจๆŽ’ๅบ\n blist = sorted(alist)\n length = len(alist)\n #ๅˆคๆ–ญๅฅ‡ๅถ\n if length % 2 == 1:\n #ๅฆ‚ๆžœๆ˜ฏๅฅ‡ๆ•ฐ,ๅˆ™่ฟ”ๅ›žไธญ้—ดๅ€ผ\n # length of list is odd so return middle element\n return blist[int(((length + 1) / 2) - 1)]\n else:\n #ๅฆ‚ๆžœๆ˜ฏๅถๆ•ฐ,่ฟ”ๅ›žไธญ้—ดไธคไธชๅ€ผๅพ—ๅนณๅ‡ๅ€ผ\n # length of list is even so compute midpoint\n v1 = blist[int(length / 2)]\n v2 =blist[(int(length / 2) - 1)]\n return (v1 + v2) / 2.0", "title": "" }, { "docid": "b03ec5287eb7268c7ac8925358af1693", "score": "0.5709814", "text": "def test_filter_single_median():\n assert_equal(Filter(window_size=3, method='median')(X_odd), np.array([\n [0.31954031, 0.50636297],\n [0.07103606, 0.83261985],\n [0.07103606, 0.83261985],\n [0.77815675, 0.83261985],\n [0.77815675, 0.79915856],\n [0.46147936, 0.78052918],\n [0.28987689, 0.7102251 ]\n ]))", "title": "" }, { "docid": "d04e01149057aad20ff46fa0e739cb74", "score": "0.5708436", "text": "def test_median_of_singleton():\n assert median([4]) == 4", "title": "" }, { "docid": "55797d14f68a3a91daf54b08cb5ca1be", "score": "0.57066905", "text": "def median(values=None):\n\n sorts = sorted(values)\n length = len(sorts)\n\n if not values:\n result = 0\n # raise ValueError, \"I can't find the median of an empty list.\"\n elif not length % 2:\n result = (sorts[(length / 2)] + sorts[(length / 2) - 1]) / 2.0\n else:\n result = sorts[length / 2]\n\n return result", "title": "" }, { "docid": "504aa3edec467d9dc7c626f66ca1f651", "score": "0.56988335", "text": "def testMedianOfTwo(self):\n self.assertEqual(4.5, median([3.1, 5.9]))", "title": "" }, { "docid": "5b2e60cb5470e13793a4369fb83ffa94", "score": "0.5690954", "text": "def median(first, mid, last):\n median = last\n if (first[0] - mid[0]) * (last[0] - first[0]) >= 0:\n median = first\n elif (mid[0] - first[0]) * (last[0] - mid[0]) >= 0:\n median = mid\n return median", "title": "" }, { "docid": "9e640feab17c7dc99c431e309bdfd949", "score": "0.5683401", "text": "def testMedianOfFive(self):\n self.assertEqual(5.9, median([3.1, 1.3, 7.6, 9.9, 5.9]))", "title": "" }, { "docid": "e8b407066a7a37f5b70e05a58fa39ae7", "score": "0.56656086", "text": "def find_median(filename):\n\trefresh_data_directory()\n\tnumber_of_elements = get_number_of_elements(filename)\n\tmax_capacity = number_of_elements / SCALED_DOWN_FACTOR\n\n\t# if there are less then 100 elements then find median directly\n\t# as current algorithm does not support manipulating 10 input files\n\t# with max capacity less then 10 i.e len(elements) < 100\n\tif number_of_elements < 100:\n\t\treturn find_median_for_small_list(filename)\n\n\tinput_filenames = split_input_data(filename, max_capacity)\n\tinput_buffer_size = max_capacity / len(input_filenames)\n\n\t# create a list of file objects for the input data files\n\tinput_files = [open(filename) for filename in input_filenames]\n\n\t# Fill up the input buffers initially with at MAX input_buffer_size\n\t# values from each of the input files.\n\tinput_buffers = []\n\tfor file in input_files:\n\t\tnumbers = read_numbers_from_file(file, input_buffer_size)\n\t\tinput_buffers.append(numbers)\n\n\toutput_files = merge_numbers(input_buffers, max_capacity, input_buffer_size, input_files)\n\n\t# remove 1 as list are indexed from 0\n\tmedian_element_index = (number_of_elements -1) / 2\n\n\tfile_number = median_element_index / max_capacity\n\tindex_number = median_element_index % max_capacity\n\treturn fetch_value(output_files[file_number], index_number)", "title": "" }, { "docid": "57d20d00dd6c3bfc862af42d6e382ad3", "score": "0.56621724", "text": "def median(lst):\n lst = sorted(lst)\n if len(lst) < 1:\n return None\n# if len(lst) %2 == 1:\n else:\n return lst[((len(lst)+1)/2)-1]\n # else:\n # return float(sum(lst[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0", "title": "" }, { "docid": "80af148e19442dcf9559361923ba4386", "score": "0.56419605", "text": "def medianWavelengthOfIntent(vis, intent='OBSERVE_TARGET#ON_SOURCE', \n verbose=False, ignoreChanAvgSpws=True):\n frequency = medianFrequencyOfIntent(vis, intent, verbose, \n ignoreChanAvgSpws)\n return c_mks/frequency", "title": "" }, { "docid": "4011be2a1dd277f37f2d363b5ee81a1e", "score": "0.5636545", "text": "def getMedianTsysForChannel(asdm, channel, spw):\n mydict = getTsysFromSysCal(asdm,median=True,channel=channel)\n if mydict is None:\n return\n tsys = []\n for antenna in list(mydict.keys()):\n if (antenna != 'median'): \n tsys.append(mydict[antenna][spw]['median'])\n return(np.median(tsys))", "title": "" }, { "docid": "6707aae8dba2023141189b673b289b17", "score": "0.5626508", "text": "def temp_median_filter(scans, d):\n\tresult = []\n\tfor current in range(len(scans)):\n\t\tmedians = []\n\t\ttry:\n\t\t\tfor previousIndex in range(len(scans[current])):\n\t\t\t\tmedians.append(round(np.median([scan[previousIndex]\n\t\t\t\t\tfor scan in scans[max(-1, current - d) + 1: current + 1]]),1))\n\t\texcept IndexError as err:\n\t\t\tprint(err)\n\t\t\texit() # comment this line if u still want the values with varied scans\n\t\tresult.append(medians)\n\treturn result", "title": "" }, { "docid": "f0c36a1f3195c4ee8fb291b2166aa406", "score": "0.5616247", "text": "def median_absolute_deviation(datalist: list) -> float:\n my_median = median(datalist)\n deviations = []\n for item in datalist:\n # We take the absolute difference between the value and the median\n X_value = abs(item - my_median)\n # This creates a dataset that is the absolute deviations about the median\n deviations.append(X_value)\n # The median of the absolute deviations is the median absolute deviation\n return median(sorted(deviations))", "title": "" }, { "docid": "3a5092146d791b5049fa1f7c05414006", "score": "0.56139874", "text": "def median(self, data, kernel_size, plot=False):\r\n if(kernel_size%2==0):\r\n print(colored(\"Median filter kernel size must be odd!\\n\", 'red'))\r\n return\r\n\r\n filtered_data = signal.medfilt(data, kernel_size=kernel_size)\r\n\r\n if(plot):\r\n plt.figure(\"Median filter\")\r\n plt.grid()\r\n\r\n ax = plt.subplot(2, 1, 1)\r\n plt.plot(data, label=\"Before filter\", color='r')\r\n plt.ylabel(\"Amplitude\")\r\n plt.legend(loc='upper right')\r\n plt.title(\"Effect of median filter on the signal\")\r\n\r\n ax2 = plt.subplot(2, 1, 2, sharex=ax, sharey=ax)\r\n plt.plot(filtered_data, label=\"After filter\", color='g')\r\n plt.xlabel(\"Sample\")\r\n plt.ylabel(\"Amplitude\")\r\n plt.legend(loc=\"upper right\")\r\n plt.text(70000, -0.7, \"kernel size = %s\"%(kernel_size))\r\n self.signalInterface.fft_plot(filtered_data)\r\n\r\n \r\n result = AttrDict(data=filtered_data, name=\"Median Filter\", kernel_size=kernel_size)\r\n return result", "title": "" }, { "docid": "a8fd03eb41bd2fb9aa4a5f068ad929e9", "score": "0.56088006", "text": "def AchagrauMediana(self):\r\n return numpy.median(self.grau)", "title": "" }, { "docid": "a8fd03eb41bd2fb9aa4a5f068ad929e9", "score": "0.56088006", "text": "def AchagrauMediana(self):\r\n return numpy.median(self.grau)", "title": "" }, { "docid": "f7d1f0e3f75b63c162acd315b0a21409", "score": "0.56012625", "text": "def nanmedian(a, axis=0):\n if list(map(int, np.__version__.split('.')[:3])) < [1, 8, 1]:\n return(scipy.stats.nanmedian(a,axis)) \n else:\n return(np.nanmedian(a,axis))", "title": "" }, { "docid": "9b9f86625467bf6f5e06f00c04d851d9", "score": "0.5597867", "text": "def nmad(data: np.ndarray, nfact: float = 1.4826) -> float:\n if isinstance(data, np.ma.masked_array):\n data_arr = get_array_and_mask(data, check_shape=False)[0]\n else:\n data_arr = np.asarray(data)\n return nfact * np.nanmedian(np.abs(data_arr - np.nanmedian(data_arr)))", "title": "" }, { "docid": "5c738157b82864c28cf258753021a522", "score": "0.5592984", "text": "def _median(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return np.median(X, axis=0), np.var(X, axis=0)", "title": "" }, { "docid": "ef102823ef6a02910ab9bcd7e97611c6", "score": "0.5592349", "text": "def median_win(window):\n return np.median(window, axis=0)", "title": "" } ]
235106b93a4829d54c95d3f1a8f502a1
Singular value decomposition for m x n matrix and m >= n.
[ { "docid": "ca5da7e6a7c9a56b02bd75cdb26e1f9c", "score": "0.55227935", "text": "def _svd(a: jnp.ndarray,\n is_hermitian: bool,\n max_iterations: int) -> Sequence[jnp.ndarray]:\n\n u, h, _, _ = lax.linalg.qdwh(a, is_hermitian, max_iterations)\n\n v, s = lax.linalg.eigh(h)\n\n # Flips the singular values in descending order.\n s_out = jnp.flip(s)\n\n # Reorders eigenvectors.\n v_out = jnp.fliplr(v)\n\n u_out = u @ v_out\n\n # Makes correction if computed `u` from qdwh is not unitary.\n # Section 5.5 of Nakatsukasa, Yuji, and Nicholas J. Higham. \"Stable and\n # efficient spectral divide and conquer algorithms for the symmetric\n # eigenvalue decomposition and the SVD.\" SIAM Journal on Scientific Computing\n # 35, no. 3 (2013): A1325-A1349.\n def correct_rank_deficiency(u_out):\n u_out, r = lax.linalg.qr(u_out, full_matrices=False)\n u_out = u_out @ jnp.diag(lax.sign(jnp.diag(r)))\n return u_out\n\n eps = jnp.finfo(a.dtype).eps\n u_out = lax.cond(s[0] < a.shape[1] * eps * s_out[0],\n correct_rank_deficiency,\n lambda u_out: u_out,\n operand=(u_out))\n\n return (u_out, s_out, v_out)", "title": "" } ]
[ { "docid": "db982ca250fc556cbd63ac23f542deb7", "score": "0.7124864", "text": "def singular_values(A):\n return jnp.linalg.svd(A, full_matrices=False, compute_uv=False)", "title": "" }, { "docid": "556edafe03784eb6ce50bb664b575aa8", "score": "0.6591163", "text": "def do_svd(self, k) :\n #self.svd = decomposition.TruncatedSVD(n_components=100, n_iterations=5)\n #self.svd_output = self.svd.fit_transform(self.comat)\n\n # check for sparseness\n self.sparsify()\n self.n_components = k\n self.u,self.s,v = sparse.linalg.svds(self.comat, k=self.n_components)\n\n #print self.u.shape, self.s.shape\n self.singular_values = self.s\n self.s_untruncated = np.diag(self.s)\n self.s = np.diag(self.s)\n '''self.s_untruncated = np.diag(self.s)\n self.u = self.u[:, 0:k]\n self.s = self.s_untruncated[0:k, 0:k] '''", "title": "" }, { "docid": "2de64220193cd98642a6581cdee6c410", "score": "0.65394306", "text": "def get_singular_values(M, k):\n singular_values = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values", "title": "" }, { "docid": "08dd4a9d7f3b0f89d7322dfae50faafe", "score": "0.6416927", "text": "def truncated_svd(matrix, n_eigenvecs=None, **kwargs):\n dim_1, dim_2 = matrix.shape\n if dim_1 <= dim_2:\n min_dim = dim_1\n else:\n min_dim = dim_2\n\n if n_eigenvecs is None or n_eigenvecs > min_dim:\n full_matrices = True\n else:\n full_matrices = False\n\n U, S, V = cp.linalg.svd(matrix, full_matrices=full_matrices)\n U, S, V = U[:, :n_eigenvecs], S[:n_eigenvecs], V[:n_eigenvecs, :]\n return U, S, V", "title": "" }, { "docid": "01cb343421e6db37d0d1482ed1d3e462", "score": "0.638594", "text": "def is_singular(M):\n return matrix_rank(M) != M.shape[0]", "title": "" }, { "docid": "dadad1087dcedb83b581a3262cfac6ca", "score": "0.61410356", "text": "def svd_dense(matrix, dimension):\n U, s, Vh = linalg.svd(matrix, full_matrices=False, \n check_finite=False, \n overwrite_a=True)\n U = np.array(U)\n U = U[:, :dimension]\n s = s[:dimension]\n s = np.sqrt(s)\n U = U * s\n U = preprocessing.normalize(U, \"l2\")\n return U", "title": "" }, { "docid": "792d7840a58823014e893d3429c33338", "score": "0.610218", "text": "def pinv_diagsvd(s, m, L):\n t = s.dtype.char\n cond = {0: feps * 1e3, 1: eps * 1e6}[_array_precision[t]]\n cutoff = s[0] * cond\n Si = np.zeros((m, L), t)\n for i in range(len(s)):\n if s[i] > cutoff:\n Si[i, i] = 1.0 / np.conj(s[i])\n return Si", "title": "" }, { "docid": "9b349516efbdcffd627ec7af903614cd", "score": "0.59327865", "text": "def fastsvd(M):\n \n h, w = M.shape\n \n # -- thin matrix\n if h >= w:\n # subspace of M'M\n U, S, V = N.linalg.svd(N.dot(M.T, M))\n U = N.dot(M, V.T)\n # normalize\n for i in xrange(w):\n S[i] = fastnorm(U[:,i])\n U[:,i] = U[:,i] / S[i]\n \n # -- fat matrix\n else:\n # subspace of MM'\n U, S, V = N.linalg.svd(N.dot(M, M.T))\n V = N.dot(U.T, M)\n # normalize\n for i in xrange(h):\n S[i] = fastnorm(V[i])\n V[i,:] = V[i] / S[i]\n return U, S, V", "title": "" }, { "docid": "6ab4fdb6549010693ccc41a7bf8f2df5", "score": "0.58641666", "text": "def svd(mat: T.FloatTensor) -> T.Tuple[T.FloatTensor]:\n return torch.svd(mat)", "title": "" }, { "docid": "979e8609d1c9b9d04149104c63b68c73", "score": "0.586203", "text": "def nullspace_basis(A):\n V = np.linalg.svd(A)[2].T\n rank = np.linalg.matrix_rank(A)\n Z = V[:,rank:]\n return clean_matrix(Z)", "title": "" }, { "docid": "6de71f370330507cd53115a3c53bcee3", "score": "0.5851521", "text": "def x(S):\n\n v = scipy.linalg.eigh(S, eigvals = (0, 0), check_finite = False)[1]\n return np.matrix(v)\n #w, v = scipy.linalg.eigh(S, check_finite = False)\n #return np.matrix(v[:, 0:1])", "title": "" }, { "docid": "29c65207002994fd4cf06445ce3ed51e", "score": "0.58392364", "text": "def is_singular(m):\n for i in range(len(m)):\n if not m[i][i]:\n return True\n return False", "title": "" }, { "docid": "0e338d82f854d75b1551abcf2e48ff45", "score": "0.5817087", "text": "def inverse_matrix(self) -> \"Matrix\":\n if self.determinant() != 0:\n return self.adjugate_matrix().transpose().scalar_product(1/self.determinant())\n raise ValueError(\"Don't exists inverted matrix for singular matrix\")", "title": "" }, { "docid": "d7f5079bb735b716224f2d25930a7080", "score": "0.5767522", "text": "def s(self):\n if self.__s is None:\n self.__set_svd()\n return self.__s", "title": "" }, { "docid": "4ed82a1d5080bbc11cb15b3c9039c359", "score": "0.57219553", "text": "def _comp_svd(self): \n # Return if SVD is already computed\n if self.svd_computed:\n return\n \n # Compute SVD. Note that linalg.svd returns V, not its\n # conjugate transpose as is usual.\n U,s,V = np.linalg.svd(self.A, full_matrices=False)\n self.U = U\n self.s = s\n self.V = V.conj().T\n self.svd_computed = True\n \n # Compute the shape of the transformed space\n self.sshape = np.array(self.shape0)\n self.sshape[self.aaxis] = len(s)\n self.sshape = tuple(self.sshape)\n \n # Compute the axes on which the diagonal multiplication\n # is to be repeated. This is all but axis 0\n ndim = len(self.sshape)\n self.srep_axes = tuple(range(1,ndim))", "title": "" }, { "docid": "390178d65e9e350d77468307dd52eae1", "score": "0.5713367", "text": "def fit_singular_value(self):\n if len(self.eigenvectorData) == 1:\n return self.tensorData[0] / self.eigenvectorData[0]\n else:\n c = np.cov(self.eigenvectorData, self.tensorData)\n return c[0, 1] / c[0, 0]", "title": "" }, { "docid": "d8486521280233dbf2a71cf796f01e36", "score": "0.56981206", "text": "def s_eig(matrix):\n if _is_symmetric(matrix):\n return _symmetric_eig(matrix)\n\n raise ValueError(\"The input matrix should be symmetric.\")", "title": "" }, { "docid": "322ad855ab7a24ceabd8184cb6ae8e8f", "score": "0.5696758", "text": "def __set_svd(self):\n if self.isdiagonal:\n x = np.diag(self.x.flatten())\n else:\n # just a pointer to x\n x = self.x\n try:\n\n u, s, v = np.linalg.svd(x, full_matrices=True)\n v = v.transpose()\n except Exception as e:\n print(\"standard SVD failed: {0}\".format(str(e)))\n try:\n v, s, u = np.linalg.svd(x.transpose(), full_matrices=True)\n u = u.transpose()\n except Exception as e:\n np.savetxt(\"failed_svd.dat\", x, fmt=\"%15.6E\")\n raise Exception(\n \"Matrix.__set_svd(): \"\n + \"unable to compute SVD of self.x, \"\n + \"saved matrix to 'failed_svd.dat' -- {0}\".format(str(e))\n )\n\n col_names = [\"left_sing_vec_\" + str(i + 1) for i in range(u.shape[1])]\n self.__u = Matrix(\n x=u, row_names=self.row_names, col_names=col_names, autoalign=False\n )\n\n sing_names = [\"sing_val_\" + str(i + 1) for i in range(s.shape[0])]\n self.__s = Matrix(\n x=np.atleast_2d(s).transpose(),\n row_names=sing_names,\n col_names=sing_names,\n isdiagonal=True,\n autoalign=False,\n )\n\n col_names = [\"right_sing_vec_\" + str(i + 1) for i in range(v.shape[0])]\n self.__v = Matrix(\n v, row_names=self.col_names, col_names=col_names, autoalign=False\n )", "title": "" }, { "docid": "c77226dc86bc97be9d49511e68d92c48", "score": "0.567817", "text": "def compact_svd(A, tol=1e-6):\n eval, evec = la.eig(A.conj().T@A) #Calculate the eigenvalues and eigenvectors of AH A\n sval = np.sqrt(eval) #Calculate the singular values of A.\n index = np.argsort(sval)[::-1] #Sort the singular values from greatest to least.\n\n sval = np.array([sval[i] for i in index])\n evec = np.array([evec[i] for i in index]) #Sort the eigenvectors the same way as in the previous step.\n\n #Count the number of nonzero singular values (the rank of A).\n rank = 0\n for j in sval:\n if j > tol:\n rank += 1\n #Keep only the positive singular values.\n sval1 = sval[:rank]\n #Keep only the corresponding eigenvectors.\n evec1 = evec[:,:rank]\n #Construct U with array broadcasting.\n U1 = A@evec1/sval1\n\n return U1, sval1, evec1.conj().T", "title": "" }, { "docid": "6e5f2b4a2f7a138d171aeb4dea580bf0", "score": "0.5663796", "text": "def getSmithNormalform(self):\n \n R = self.basedomain\n m = self.rows\n n = self.columns\n if not R.isEuclideanDomain():\n raise NotImplementedError()\n \n d = R.euclidFunction\n \n H = self.copy()\n \"\"\" H|En\n H0 = ----\n Em|0\n \"\"\"\n delta = lambda x,y: 1 if x==y else 0\n H0 = Matrix(n+m,n+m,self.basedomain)\n for i in range(m):\n for j in range(n):\n H0[i,j] = H[i,j]\n H0[i,j+n] = delta(i,j)\n H0[i+m,i] = delta(i,j)\n l = 0\n s = []\n #(1)\n while l<min(m,n):# and !H0.getSubMatrix(l+1,l+1,m-l-1,n-l-1).isZero():\n minIndex = None\n minVal = -1\n for i in range(m):\n for j in range(n):\n if minVal==-1 or d(H0[i,j])<minVal:\n minIndex = (i,j)\n minVal = d(H0[i,j])\n \n \n \n \n \n \n \n S = H0.getSubMatrix(0, 0, m, n)\n Q = H0.getSubMatrix(0, n, m, m)\n P = H0.getSubMatrix(m, 0, n, n)\n \n return (S,Q,P,s,l)", "title": "" }, { "docid": "02c7808c351864c440a32988f0057f8a", "score": "0.5636609", "text": "def null_space(A):\n u, s, vh = np.linalg.svd(A, full_matrices=True)\n tol = max(A.shape) * np.spacing(np.max(s))\n dim = np.sum(s > tol, dtype=int)\n Q = vh[dim:, :].T.conj()\n return Q", "title": "" }, { "docid": "a81277a58c2d4414ceb6941ea56a6fe5", "score": "0.56348985", "text": "def to_nonsingular(C, eps=1e-6):\n try:\n if C.shape[0]!=C.shape[1]:\n raise ValueError\n except ValueError:\n print \"Matrix must be square\" \n \n if np.linalg.cond(C) < 1/sys.float_info.epsilon:\n C = C + np.eye(C.shape[0])*eps\n #if np.linalg.matrix_rank(C) != C.shape[0]:\n # C = C + np.eye(C.shape[0])*ep\n return C", "title": "" }, { "docid": "02df50823c61f5d6b36bcdbdd709557f", "score": "0.5609644", "text": "def svd(matrix, rank=None):\n if matrix.ndim != 2:\n raise ValueError('Input should be a two-dimensional array. matrix.ndim is {} != 2'.format(matrix.ndim))\n dim_1, dim_2 = matrix.shape\n if dim_1 <= dim_2:\n min_dim = dim_1\n else:\n min_dim = dim_2\n\n if rank is None or rank >= min_dim:\n # Default on standard SVD\n U, S, V = scipy.linalg.svd(matrix)\n U, S, V = U[:, :rank], S[:rank], V[:rank, :]\n return U, S, V\n\n else:\n # We can perform a partial SVD\n # First choose whether to use X * X.T or X.T *X\n if dim_1 < dim_2:\n S, U = scipy.sparse.linalg.eigsh(np.dot(matrix, matrix.T), k=rank, which='LM')\n S = np.sqrt(S)\n V = np.dot(matrix.T, U * 1 / S[None, :])\n else:\n S, V = scipy.sparse.linalg.eigsh(np.dot(matrix.T, matrix), k=rank, which='LM')\n S = np.sqrt(S)\n U = np.dot(matrix, V) * 1 / S[None, :]\n\n # WARNING: here, V is still the transpose of what it should be\n U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]\n return U, S, V.T", "title": "" }, { "docid": "f3cd6e79091a38da74637bd1a7bf5730", "score": "0.5568697", "text": "def svd(self):\n try:\n kernel = np.dot(self.k.k_interp_cross,\n inv(self.k.k_pot +\n self.k.lambd *\n np.identity(self.k.k_pot.shape[0])))\n u_svd, sigma, v_svd = np.linalg.svd(kernel, full_matrices=False)\n except LinAlgError:\n raise LinAlgError('Encoutered Singular Matrix Error:'\n 'try changing ele_pos slightly')\n self.plot_svd_sigma(sigma)\n# self.plot_svd_u(u_svd)\n# self.plot_svd_v(v_svd)\n# self.plot_svd_sigma_lambd(sigma)\n return u_svd, sigma, v_svd", "title": "" }, { "docid": "9e11524047bfcb3f0581e64bfc4fd7b3", "score": "0.5564417", "text": "def compress(k, psi, eps=0.00001):\n n=psi.size\n mat = psi.reshape(1<<k, n//(1<<k))\n U, s, Vd = np.linalg.svd(mat, full_matrices=False)\n# print(sum(s**2))\n# print(s**2)\n n_t = truncate(s, eps)\n# print(n_t)\n out = U[:,:n_t] * s[:n_t]\n \n return out.reshape(out.size, 1)", "title": "" }, { "docid": "acee717f7d7c9fb9cf1cef513af87dc8", "score": "0.55550987", "text": "def incre_svd():\n\n\tc = yield\n\ts = np.array([npl.norm(c.astype(float))])\n\t# s = npl.norm(c.astype(float), axis=1)\n\tU0 = c / s\n\tUp = 1.0\n\tV0 = 1.0\n\tVp = 1.0\n\tVpi = 1.0\n\n\twhile True:\n\t\tr = len(s)\n\t\tU = np.dot(U0, Up)\n\t\tV = np.dot(V0, Vp)\n\t\tc = yield U, s, V\n\t\tif c is None:\n\t\t\tcontinue\n\n\t\tI = np.identity(r)\n\t\tO = np.zeros(r)\n\n\t\tl = np.dot(U.T, c)\n\t\tj = c - np.dot(U, l)\n\t\tk = npl.norm(j)\n\t\tj /= k\n\t\t\n\t\tprint(k)\n\t\tif k < trunc:\n\t\t\tk = 0\n\t\t\n\t\tQ = block_diag(np.diag(s), k)\n\t\tQ[:r, -1:] = l\n\t\tA, s, B = npl.svd(Q, full_matrices=False)\n\t\tB = B.T\n\n\t\tif k < trunc:\n\t\t\ts = s[:-1]\n\t\t\tUp = np.dot(Up, A[:-1, :-1])\n\n\t\t\tW, w = np.vsplit(B[:, :-1], [r])\n\t\t\tWi = (I + np.dot(w.T, w) / (1 - np.dot(w, w.T))).dot(W.T)\n\n\t\t\tVp = np.dot(Vp, W)\n\t\t\tVpi = np.dot(Wi, Vpi)\n\t\t\tV0 = np.vstack((V0, np.dot(w, Vpi)))\n\n\t\telse:\n\t\t\tUp = block_diag(Up, 1).dot(A)\n\t\t\tU0 = np.hstack((U0, j))\n\t\t\tV0 = block_diag(V0, 1)\n\t\t\tVp = np.dot(block_diag(Vp, 1), B)\n\t\t\tVpi = np.dot(B.T, block_diag(Vpi, 1))", "title": "" }, { "docid": "1a83e46cb0fd6c511e65f854a1e84156", "score": "0.5549587", "text": "def svdvals(x):\n return np.linalg.svd(x, full_matrices=False, compute_uv=False)", "title": "" }, { "docid": "aff210285af20db89bbb7195baf53269", "score": "0.5545672", "text": "def svd(M):\n\n Ulist = get_list(np.dot(M, M.T))\n Vlist = get_list(np.dot(M.T, M))\n U = np.zeros((len(Ulist[0][1]), len(Ulist)))\n V = np.zeros((len(Vlist[0][1]), len(Vlist)))\n i = None\n\n for i in range(len(Ulist)):\n for j in range(len(Ulist[i][1])):\n U[j][i] = Ulist[i][1][j]\n\n for i in range(len(Vlist)):\n for j in range(len(Vlist[i][1])):\n V[j][i] = Vlist[i][1][j]\n\n V = V.T\n\n sigma = np.zeros((len(Ulist), len(Vlist)))\n\n for i in range(len(Ulist)):\n sigma[i][i] = Ulist[i][0]**0.5\n\n for i in range(len(sigma)):\n temp = np.dot(M,np.matrix(V[i]).T)\n temp_U = np.matrix(U[:,i]).T\n flag = False\n for j in range(len(temp)):\n if temp_U[j] !=0.0:\n if temp[j]/temp_U[j] <0.0 :\n flag=True\n break \n if flag :\n for k in range(len(U[:,i])):\n U[k][i]=-1*U[k][i]\n\n return U, sigma, V", "title": "" }, { "docid": "e16e911d452cf4285738bd43fcc493e7", "score": "0.5522022", "text": "def SVD_get_info(matrix, s_index):\n U, S, V = np.linalg.svd(matrix)\n rows = matrix.shape[0]\n cols = matrix.shape[1]\n s_matrix = np.zeros((rows, cols))\n\n if rows <= cols:\n value = rows\n else:\n value = cols\n for index in range(0, value):\n s_matrix[index, index] = S[index]\n\n A_s = np.zeros((rows, cols))\n for index in range(0, s_index):\n A_s = A_s + compute_matrix(U[:, index], V[:, index], S[index])\n\n non_zeros_s = []\n for s in S:\n if abs(s) > 10 ** (-10):\n non_zeros_s.append(s)\n return U, non_zeros_s, V, len(non_zeros_s), max(non_zeros_s) / min(non_zeros_s), np.max(\n np.sum(matrix - U * s_matrix * V, axis=1)), A_s, np.max(np.sum(matrix - A_s, axis=1))", "title": "" }, { "docid": "2dea07dd5715b13879a12da24b302f22", "score": "0.54840875", "text": "def singular_value_decompose(trajectory):\n # intrinsic dimensionality of the trajectory space\n d = np.linalg.matrix_rank(trajectory)\n # SVD calculation to NumPy\n U, Sigma, V = np.linalg.svd(trajectory)\n V = V.T\n\n # Calculate the elementary matrices of X, storing them in a multidimensional NumPy array.\n # This requires calculating sigma_i * U_i * (V_i)^T for each i, or sigma_i * outer_product(U_i, V_i).\n # Note that Sigma is a 1D array of singular values, instead of the full M x K diagonal matrix.\n traj_elem = np.array([Sigma[i] * np.outer(U[:, i], V[:, i]) for i in range(0, d)])\n\n # Quick sanity check: the sum of all elementary matrices in X_elm should be equal to X, to within a\n # *very small* tolerance:\n if not np.allclose(trajectory, traj_elem.sum(axis=0), atol=1e-10):\n print(\"WARNING: The sum of X's elementary matrices is not equal to X!\")\n\n return traj_elem", "title": "" }, { "docid": "a1c5e4efb22b360c3826532f34729209", "score": "0.5470796", "text": "def get_principal_components(m):\n return numpy.linalg.svd(m.astype(float), full_matrices=False)[0].astype(m.dtype)", "title": "" }, { "docid": "24d8c058d68e0a701a75b50a84c90217", "score": "0.54695576", "text": "def svd(A, full_matrices=True):\n A = np.asarray(A)\n m, n = A.shape\n if m < n:\n U, s, VT = svd(A.T, full_matrices)\n return VT.T, s, U.T\n\n Q, B, RT = bidiag(A)\n for _ in range(20 * n):\n if svd_bidiag_step(Q, B, RT):\n break\n else:\n warn(\"Did not converge\")\n\n U, s, VH = svd_normalize(Q, B.diagonal(), RT)\n if not full_matrices:\n U = U[:,:n]\n return U, s, VH", "title": "" }, { "docid": "90f91bab67ca6ebf6cc97ccde2af1592", "score": "0.54500717", "text": "def _renorm_singular_vals(s, n_chi, renorm_power):\n s_tot_keep = 0.0\n s_tot_lose = 0.0\n for i in range(s.size):\n s2 = s[i]**renorm_power\n if not np.isnan(s2):\n if i < n_chi:\n s_tot_keep += s2\n else:\n s_tot_lose += s2\n return ((s_tot_keep + s_tot_lose) / s_tot_keep)**(1 / renorm_power)", "title": "" }, { "docid": "4d12993d60a724ac3ab7ec7be68af3df", "score": "0.54296756", "text": "def nullspace(matrix, atol=1e-13, rtol=0.0): # noqa: D402\n matrix = np.atleast_2d(matrix)\n _, sigma, vh = svd(matrix)\n tol = max(atol, rtol * sigma[0])\n num_nonzero = (sigma >= tol).sum()\n return vh[num_nonzero:].conj().T", "title": "" }, { "docid": "38b6394dbce15142993411671b9ff0c0", "score": "0.5415292", "text": "def _generate_random_stochastic_matrix(self, n):\n matrix = np.random.rand(n,n)\n self.matrix = matrix / matrix.sum(axis=1)[:,None]\n # Set diagonal to zeroes and add the diagonal num to rest of columns in row to keep stochastic\n for i in range(len(self.matrix)):\n divided_diagonal = self.matrix[i][i] / (len(self.matrix) - 1)\n for j in range(len(self.matrix)):\n if i == j:\n self.matrix[i][j] = 0\n else:\n self.matrix[i][j] += divided_diagonal", "title": "" }, { "docid": "71bc20e0a30b958a1681ce31fd33d2d7", "score": "0.54063797", "text": "def Svd(a, uv, full):\n u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)\n return s, u, v", "title": "" }, { "docid": "60b322f0bdb3ec8d32c857d249b93a73", "score": "0.53811485", "text": "def cadzow_single(x, M, K, min_var=False):\n # variable names based upon Chen et al, JMR 1994 109A 46\n # form the Hankel data matrix X\n N = len(x)\n L = N - M + 1\n X = scipy.linalg.hankel(x[:L], x[L - 1:])\n\n # Compute the SVD of X\n U, s, Vh = scipy.linalg.svd(X)\n\n # correct the singular values and truncate the rank K\n Ul = np.mat(U[:, :K])\n Vlh = np.mat(Vh[:K, :]) # first K columns of V are first K rows of Vh\n sl = s[:K]\n\n if min_var: # adjust singular values using minimum variance method\n # estimate variance of noise singular values\n s2 = (1. / (M - K)) * np.power(s[K:], 2).sum()\n sl = np.array([l - s2 / l for l in sl])\n\n Sl = np.mat(np.diag(sl))\n\n # compute enhanced data vector for rank-reduced data matrix\n Xp = Ul * Sl * Vlh\n\n xp = np.empty_like(x)\n for i, v in enumerate(range(M - 1, -L, -1)):\n # the anti-diagonal is the diagonal with rows reversed\n xp[i] = np.diag(Xp[:, ::-1], v).mean()\n return xp", "title": "" }, { "docid": "2e5d724be36371f63f2267068a4c6642", "score": "0.53770065", "text": "def scaling_matrix(s0, s1, s2):\n m = identity()\n matrix_elem(m, 0, 0, s0)\n matrix_elem(m, 1, 1, s1)\n matrix_elem(m, 2, 2, s2)\n return m", "title": "" }, { "docid": "892d047c3f8bf0c94b9ba11d084330f7", "score": "0.5371953", "text": "def InverseMatrix(matrix):\n vector=[0]*len(matrix)\n # Unveri reversible matrix\n if Determinant(matrix, 1) == 0:\n print(\"Error,Singular Matrix\\n\")\n return\n # result matrix initialized as singularity matrix\n result = MakeIMatrix(len(matrix), len(matrix))\n # loop for each row\n for i in range(len(matrix[0])):\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\n # pivoting process\n matrix, vector = RowXchange(matrix, vector)\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[i][i] = 1 / matrix[i][i]\n result = MultiplyMatrix(elementary, result)\n matrix = MultiplyMatrix(elementary, matrix)\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\n # elementary matrix and multiply with the result matrix )\n for j in range(i + 1, len(matrix)):\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[j][i] = -(matrix[j][i])\n matrix = MultiplyMatrix(elementary, matrix)\n result = MultiplyMatrix(elementary, result)\n\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\n # (make elementary matrix and multiply with the result matrix )\n for i in range(len(matrix[0]) - 1, 0, -1):\n for j in range(i - 1, -1, -1):\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[j][i] = -(matrix[j][i])\n matrix = MultiplyMatrix(elementary, matrix)\n result = MultiplyMatrix(elementary, result)\n\n return matrix", "title": "" }, { "docid": "e8a60561c719398a20bf35d3810404c0", "score": "0.5363729", "text": "def matInv(M):\n m2 = [row[:]+[int(i==j) for j in range(len(M) )] for i,row in enumerate(M) ]\n return [row[len(M[0]):] for row in m2] if gauss_jordan(m2) else None", "title": "" }, { "docid": "23959fe4d5f33ecc525d704b091f4e7d", "score": "0.53496", "text": "def null(A: npy.ndarray, eps: float = 1e-15):\n u, s, vh = npy.linalg.svd(A)\n null_space = npy.compress(s <= eps, vh, axis=0)\n return null_space.T", "title": "" }, { "docid": "70bc54f6d3f90235f82138e986da9dfe", "score": "0.53319997", "text": "def an_glcm_svd(in_glcm, no_of_val2return=15):\n pro_out_svd = []\n for i in range(0, in_glcm.shape[3]):\n u, s, v = np.linalg.svd(in_glcm[:, :, :, i])\n s_sorted = -np.sort(-s, axis=0)\n pro_out_svd.append(np.transpose(s_sorted[0:no_of_val2return]))\n # pyplot.plot(s_sorted[0:no_of_val2return])\n\n out_svd = np.ravel(pro_out_svd)\n\n return out_svd", "title": "" }, { "docid": "126049def76e4464b0d903a2b7d09959", "score": "0.5324177", "text": "def hosvd(X):\n # left singular vectors for each matricization\n U = [scipy.linalg.svd(matricize(X,k), full_matrices=False, check_finite=False)[0]\n for k in range(X.ndim)]\n C = apply_tprod(tuple(Uk.T for Uk in U), X) # core tensor (same size as X)\n return TuckerTensor(U, C)", "title": "" }, { "docid": "a525ab27dd6b11e633c1bf57e9c97f2e", "score": "0.53219146", "text": "def SVD_pseudo_inverse(matrix, compute_full_matrix = False, debug=False):\n n, p = matrix.shape\n U, sigma, Vt = LA.svd(matrix, full_matrices=compute_full_matrix)\n Sigma = np.zeros((min(n, p), min(n, p)))\n Sigma[:min(n, p), :min(n, p)] = np.diag(1 / sigma)\n\n system.log(\"u:{}\".format(U.shape),debug=debug)\n system.log(\"sigma:{}\".format(sigma.shape),debug=debug)\n system.log(\"Vt:{}\".format(Vt.shape,debug=debug))\n # Pen-roose psedoinverse\n pseudo_inverse = (Vt.T.dot(Sigma).dot(U.T))\n return pseudo_inverse", "title": "" }, { "docid": "19851e1f3daeeb4b2a6d105e1591465e", "score": "0.53104556", "text": "def svd(a, full_matrices=True, compute_uv=True, hermitian=False):\n import numpy as _nx\n a, wrap = _makearray(a)\n\n if hermitian:\n # note: lapack svd returns eigenvalues with s ** 2 sorted descending,\n # but eig returns s sorted ascending, so we re-order the eigenvalues\n # and related arrays to have the correct order\n if compute_uv:\n s, u = eigh(a)\n sgn = sign(s)\n s = abs(s)\n sidx = argsort(s)[..., ::-1]\n sgn = _nx.take_along_axis(sgn, sidx, axis=-1)\n s = _nx.take_along_axis(s, sidx, axis=-1)\n u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)\n # singular values are unsigned, move the sign into v\n vt = transpose(u * sgn[..., None, :]).conjugate()\n return SVDResult(wrap(u), s, wrap(vt))\n else:\n s = eigvalsh(a)\n s = abs(s)\n return sort(s)[..., ::-1]\n\n _assert_stacked_2d(a)\n t, result_t = _commonType(a)\n\n m, n = a.shape[-2:]\n if compute_uv:\n if full_matrices:\n if m < n:\n gufunc = _umath_linalg.svd_m_f\n else:\n gufunc = _umath_linalg.svd_n_f\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m_s\n else:\n gufunc = _umath_linalg.svd_n_s\n\n signature = 'D->DdD' if isComplexType(t) else 'd->ddd'\n with errstate(call=_raise_linalgerror_svd_nonconvergence,\n invalid='call', over='ignore', divide='ignore',\n under='ignore'):\n u, s, vh = gufunc(a, signature=signature)\n u = u.astype(result_t, copy=False)\n s = s.astype(_realType(result_t), copy=False)\n vh = vh.astype(result_t, copy=False)\n return SVDResult(wrap(u), s, wrap(vh))\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m\n else:\n gufunc = _umath_linalg.svd_n\n\n signature = 'D->d' if isComplexType(t) else 'd->d'\n with errstate(call=_raise_linalgerror_svd_nonconvergence,\n invalid='call', over='ignore', divide='ignore',\n under='ignore'):\n s = gufunc(a, signature=signature)\n s = s.astype(_realType(result_t), copy=False)\n return s", "title": "" }, { "docid": "bbd5e937acb4bb648399428742325d0f", "score": "0.53097486", "text": "def identity(n):\n return Matrix.diagonal_single_value(1, n)", "title": "" }, { "docid": "a6f9da700b9b119e39ef26de3d08c812", "score": "0.5305327", "text": "def svd(data):\n #center the data\n mean = np.mean(data, axis=0)\n data -= mean\n \n P, D, Q = np.linalg.svd(data, full_matrices=False)\n \n return P, D, Q", "title": "" }, { "docid": "a139707c7a7909a3a35a458eef68b066", "score": "0.5304829", "text": "def projectSemidef(mat):\n eigenvalues, evectors = LA.eig(mat)\n\n numNegativeEigenvalues = 0\n \n #print 'before projection ' + str(mat) + '\\r\\n'\n Dplus = []\n #holds positive eigenvalues\n \n \n for value in eigenvalues:\n if value < 0:\n numNegativeEigenvalues+=1\n #print \"Negative eigenvalue\",value.real\n Dplus.append(max(value, 0))\n \n Dcapped = []\n for num in Dplus:\n Dcapped.append(min(num, 1000))\n D = sp.diag((Dcapped))\n\n newMat = evectors.dot(D).dot(evectors.transpose())\n \n return newMat,numNegativeEigenvalues", "title": "" }, { "docid": "3ab1d572c27e07b6c4ee1dc8d306ef88", "score": "0.53002006", "text": "def draw_svd(X):\n Xp = X - X.mean(axis=0)\n U, S, Vt = np.linalg.svd(Xp)\n Xpp = X.dot(Vt.T)\n Zp = draw_uniform(Xpp)\n Z = Zp.dot(Vt)\n return Z", "title": "" }, { "docid": "b36058a23a2c0cbd58e54d79f9d72de6", "score": "0.5293665", "text": "def assertSU2Matrix(self, M: qtypes.GenericMatrix) -> None:\n self.assertUnitaryMatrix(M)\n self.assertAlmostEqual(numpy.linalg.det(M), 1.0)", "title": "" }, { "docid": "a6b1014bb4a79f74846ae776bc293fcf", "score": "0.5286726", "text": "def get_initial_S(num_nodes):\n S = np.zeros((num_nodes, num_nodes))\n np.fill_diagonal(S, 1)\n return S", "title": "" }, { "docid": "fb66801e78e66a9d1472eabc2b787e98", "score": "0.5284813", "text": "def isSingular(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "ad5db4737b7c11c44a11c58b09919702", "score": "0.5272904", "text": "def random_nonsingular_matrix(ring, k, n):\n while True:\n G = random_matrix(ring, k, n)\n if G.rank() == min(k,n):\n return G", "title": "" }, { "docid": "826095d89049d00f1bece7823ac8c9b5", "score": "0.5271228", "text": "def get_svd_diag(self):\n raise NotImplementedError()", "title": "" }, { "docid": "88465423c309c326b7e0979e7f0bc37a", "score": "0.5269971", "text": "def performSVD(pixSeriesNorm):\n # don't include the single pixel time series which are all zero\n idx = np.any(pixSeriesNorm, axis=1)\n pixSeriesNormNonzero = pixSeriesNorm[idx, :]\n\n evalues, prinComps = np.linalg.svd(pixSeriesNormNonzero, full_matrices=0,compute_uv = 1)[1:]\n return evalues, prinComps", "title": "" }, { "docid": "39e114d9b78c5f05cbef21f3ab0b8abc", "score": "0.5268732", "text": "def GetSparseForceMatrix(MassReciprocal,SpringConstant,SpringIndices):", "title": "" }, { "docid": "c613060510c2a62aae14c02160188a5b", "score": "0.5266448", "text": "def Gram_Schmidt(self):\r\n m,n=self.m,self.n\r\n basis=Matrix(n,m)\r\n MT=self.Transpose()\r\n first=Vector(MT.rows[0])\r\n basis.rows[0]=list(first*(1/first.norm()))\r\n for i in range(1,n):\r\n v=Vector(MT.rows[i])\r\n v_orthogonal=copy.deepcopy(v)\r\n for j in range(i):\r\n u=Vector(basis.rows[j])\r\n v_orthogonal-=u*(u.dot(v)/u.norm())\r\n v_normalized=v_orthogonal/v_orthogonal.norm()\r\n basis.rows[i]=list(v_normalized)\r\n return basis.Transpose()", "title": "" }, { "docid": "9d26eab8010537882e03f534477db38c", "score": "0.52595514", "text": "def get_svd_diag(self):\n self._comp_svd()\n return self.s, self.sshape, self.srep_axes", "title": "" }, { "docid": "fc14f3bb1f08a6574425da258326fde5", "score": "0.525689", "text": "def normalize(matrix):\n \n return matrix / sum1(matrix)", "title": "" }, { "docid": "d1eb439e830f861e0e1dc8dae0b3c598", "score": "0.5252939", "text": "def densify(M, value=0):\n if isinstance(M, sp.csr_matrix):\n M = M.todense()\n if value:\n M[M == 0] = value\n else:\n M = M.to_dense()\n return M", "title": "" }, { "docid": "c5ccd16ff28143079831b2178b699d6e", "score": "0.52431375", "text": "def full_s(self):\n x = np.zeros((self.shape), dtype=np.float32)\n\n x[: self.s.shape[0], : self.s.shape[0]] = self.s.as_2d\n s = Matrix(\n x=x,\n row_names=self.row_names,\n col_names=self.col_names,\n isdiagonal=False,\n autoalign=False,\n )\n return s", "title": "" }, { "docid": "a7752674641c1c9443d061d05ca53c14", "score": "0.5242861", "text": "def tsvd_rand(matrix, n_components):\n l = matrix.shape[0]\n # Is this csc conversion necessary?\n smat = sparse.csc_matrix(matrix)\n U, Sigma, VT = randomized_svd(smat, \n n_components=n_components, \n n_iter=5, random_state=None)\n U = U * np.sqrt(Sigma)\n U = preprocessing.normalize(U, \"l2\")\n return U", "title": "" }, { "docid": "bd8383063d26993a9248fba4ce3ed481", "score": "0.5239239", "text": "def svd_for_cur(M,e):\n U, sigma, V = svd(M)\n \n if (e<1):\n \n diag = np.sum(np.diagonal(sigma)**2)\n energy = e*(diag)\n singulars = np.diagonal(sigma)\n i = None\n for i in range(len(singulars)-1, 0, -1):\n if(diag - singulars[i]**2 < energy):\n break\n else:\n diag = diag - singulars[i]**2\n \n \n sigma = sigma[0:i+1, 0:i+1]\n U = U[::, 0:i+1]\n V = V[0:i+1, ::]\n\n return U, sigma, V", "title": "" }, { "docid": "db970f8d14154c55b67fa167894cf441", "score": "0.5236138", "text": "def svd_compressed(a, k, n_power_iter=0, seed=None, name=None):\n comp = compression_matrix(a, k, n_power_iter=n_power_iter, seed=seed)\n a_compressed = comp.dot(a)\n v, s, u = tsqr(a_compressed.T, name, compute_svd=True)\n u = comp.T.dot(u)\n v = v.T\n u = u[:, :k]\n s = s[:k]\n v = v[:k, :]\n return u, s, v", "title": "" }, { "docid": "66dc71db2fa46cc73e8197321f328e01", "score": "0.5216866", "text": "def calcMassMatrix(self):\n\n if self.S is None:\n \"\"\"Need to calculate it\"\"\"\n \n msq_21 = self.msq_21\n msq_31 = self.msq_31\n \n U = self.calcMixingMatrix()\n\n S_11 = msq_21*U[0][1]*U[0][1].conjugate() + msq_31*U[0][2]*U[0][2].conjugate()\n S_21 = msq_21*U[1][1]*U[0][1].conjugate() + msq_31*U[1][2]*U[0][2].conjugate()\n S_31 = msq_21*U[2][1]*U[0][1].conjugate() + msq_31*U[2][2]*U[0][2].conjugate()\n S_12 = msq_21*U[0][1]*U[1][1].conjugate() + msq_31*U[0][2]*U[1][2].conjugate()\n S_22 = msq_21*U[1][1]*U[1][1].conjugate() + msq_31*U[1][2]*U[1][2].conjugate()\n S_32 = msq_21*U[2][1]*U[1][1].conjugate() + msq_31*U[2][2]*U[1][2].conjugate()\n S_13 = msq_21*U[0][1]*U[2][1].conjugate() + msq_31*U[0][2]*U[2][2].conjugate()\n S_23 = msq_21*U[1][1]*U[2][1].conjugate() + msq_31*U[1][2]*U[2][2].conjugate()\n S_33 = msq_21*U[2][1]*U[2][1].conjugate() + msq_31*U[2][2]*U[2][2].conjugate()\n\n self.S = np.array([[S_11, S_12, S_13],[S_21, S_22, S_23], [S_31, S_32, S_33]])\n\n return self.S", "title": "" }, { "docid": "1d12dc3e39ba2cfcdde868d782ca47eb", "score": "0.52164906", "text": "def spdiaginv(A):\n #check input\n if not issparse(A):\n raise TypeError('Input must be a sparse matrix')\n\n n = A.shape[0] #finding the dimension\n \n Ainv = csr_matrix((n,n))\n \n for i in range(0,n):\n Ainv[i,i] = 1.0/A[i,i]\n \n return Ainv", "title": "" }, { "docid": "e641a0103ee051cd2dc4289fa3d6dcab", "score": "0.52039665", "text": "def plot_singular_values(self, data, colums_to_drop=None):\n # first copy the frame and drop the date\n raw_data = data.copy()\n\n if colums_to_drop is not None:\n raw_data = raw_data.drop(columns=colums_to_drop, axis=1)\n\n # calculate svd\n _, sing, _ = np.linalg.svd(raw_data, full_matrices=True)\n\n # var_explained = np.round(sing**2/np.sum(sing**2), decimals=3)\n var_explained = sing**2/np.sum(sing**2)\n sns.barplot(x=list(range(1, len(var_explained)+1)),\n y=var_explained, color=\"limegreen\")\n plt.xlabel('Singular values', fontsize=16)\n plt.ylabel('Singular values of Corona dataset (normalized)', fontsize=10)\n path = os.path.join(self.path_to_processed, 'singular_val.png')\n plt.savefig(path, dpi=400)", "title": "" }, { "docid": "800cdb8267d74ad3adde87e636107b22", "score": "0.51883125", "text": "def simplex(c, m, b):\n\n m = Matrix(m)\n\n if len(c) != m.cols or len(b) != m.rows:\n raise ValueError(\"The dimensions doesn't match\")\n\n # build full tableau\n tableau = zeros(m.rows + 1, m.cols + m.rows + 2)\n tableau[-1, :-1] = Matrix([[1] + [-_ for _ in c] + [0]*m.rows])\n tableau[:-1, 1:m.cols + 1] = m\n tableau[:-1, m.cols + 1:-1] = eye(m.rows)\n tableau[:, -1] = Matrix(b + [0])\n\n if any(_.is_negative for _ in tableau[:-1, -1]):\n raise NotImplementedError(\"Phase I for simplex isn't implemented.\")\n\n # Pivoting strategy use Bland's rule\n\n def pivot_col(obj):\n low, idx = 0, 0\n for i in range(1, len(obj) - 1):\n if obj[i] < low:\n low, idx = obj[i], i\n return -1 if idx == 0 else idx\n\n def pivot_row(lhs, rhs):\n ratio, idx = oo, 0\n for i in range(len(rhs)):\n if lhs[i] > 0:\n r = rhs[i]/lhs[i]\n if r < ratio:\n ratio, idx = r, i\n return idx\n\n # Now solve\n\n while min(tableau[-1, 1:-1]) < 0:\n col = pivot_col(tableau[-1, :])\n row = pivot_row(tableau[0:-1, col], tableau[0:-1, -1])\n\n tableau[row, :] /= tableau[row, col]\n for r in range(tableau.rows - 1):\n if r == row:\n continue\n tableau[r, :] -= tableau[r, col]*tableau[row, :]\n tableau[-1, :] -= tableau[-1, col]*tableau[row, :]\n\n ans = [S.Zero]*m.cols\n for i in range(1, m.cols + 1):\n if tableau[-1, i] == 0:\n for j in range(tableau.rows - 1):\n if tableau[j, i] == 1:\n ans[i - 1] = tableau[j, -1]\n break\n\n return tableau[-1, -1], tuple(ans)", "title": "" }, { "docid": "986f01384237f5cbe3461c23b5d2f3ef", "score": "0.5186611", "text": "def svd_lowrank(A: Tensor, q: Optional[int] = 6, niter: Optional[int] = 2,\n M: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]:\n if not torch.jit.is_scripting():\n tensor_ops = (A, M)\n if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):\n return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)\n return _svd_lowrank(A, q=q, niter=niter, M=M)", "title": "" }, { "docid": "418c577af8e44b793f6e1a56d46d1eeb", "score": "0.5182167", "text": "def sparsity(S):\n assert len(S.shape) == 2\n (p,p) = S.shape\n off_nnz = np.count_nonzero(S) - p\n s = off_nnz/(p**2-p)\n return s", "title": "" }, { "docid": "6c167cda0f23e9cd1cfd1e90ef640c96", "score": "0.5179208", "text": "def singular_spectral(array, x=130, groups =12):\n from pyts.decomposition import SingularSpectrumAnalysis\n array = np.array([array], dtype=\"float64\")\n ssa = SingularSpectrumAnalysis(window_size=x, groups = groups)\n X_ssa = ssa.fit_transform(array)\n\n plt.figure()\n index = 0\n for n in X_ssa:\n plt.subplot(groups, 1, index+1)\n plt.plot(np.transpose(n))\n index= index+1\n\n from sklearn.decomposition import FastICA\n transformer = FastICA(n_components=groups)\n X_transformed = transformer.fit_transform(np.transpose(X_ssa))\n plt.figure()\n for n in range(X_transformed.shape[1]):\n plt.subplot(X_transformed.shape[1], 1, n+1)\n plt.plot(X_transformed[:, n])\n\n return X_transformed", "title": "" }, { "docid": "b9bd7ba0fb8a93e93219526fc84804a5", "score": "0.51737374", "text": "def compact_svd(A, tol=1e-6):\n L,V=la.eig(A.conj().T@A)\n V=V.T\n s=np.sqrt(L)\n v_sort=np.argsort(-s)\n s=-np.sort(-s)\n v_copy=np.copy(V)\n for i in range(len(v_sort)):\n V[i]=v_copy[v_sort[i]]\n r=len(s)\n for i in range(len(s)-1,-1,-1):\n if s[i]>tol:\n break\n r-=1\n s=s[:r]\n V=V[:r]\n U=(A@V[0])/s[0]\n for i in range(1,r):\n U=np.column_stack((U,(A@V[i])/s[i]))\n return U,s,V.conj()", "title": "" }, { "docid": "504c59e1a6fca4572835dcc61edb082b", "score": "0.5163528", "text": "def plot_singular_spectra():\n\n jacobians = load_eigendistortions('dog')\n\n colors = cm.viridis(np.linspace(.1, 1, 20))\n n_boot = 20\n fig, ax = plt.subplots(1, 1)\n\n for j in tqdm(range(2, len(jacobians))):\n s_boot = torch.zeros(n_boot, 10)\n selector = torch.randint(0, len(jacobians), [n_boot, j])\n\n for i, boot in enumerate(tqdm(selector)):\n tmp = jacobians[boot].mean(0)\n _, S, _ = torch.svd(tmp)\n s_boot[i] = S\n\n ax.errorbar(range(10), s_boot.mean(0), s_boot.std(0), linewidth=3, color=colors[j])\n ax.set(xlabel='index', ylabel='singular value')", "title": "" }, { "docid": "36a6eb04c9b8bca56ea2025649fc4d6c", "score": "0.5143709", "text": "def resolves_matrix(self, M, f):\n sparse_matrix = sp.sparse.csc_matrix(M)\n P = sp.sparse.linalg.spsolve(sparse_matrix, f)\n P.shape = (P.size, 1)\n return P", "title": "" }, { "docid": "da7826fa4f1648fc3ef1efbabf08c1de", "score": "0.51397425", "text": "def test_sparse_svd():\n mask = np.random.randint(0, 2, size=(5, 5 * 4))\n sparse_matrix = np.random.randint(1, 10, size=(5, 3, 5 * 4))\n sparse_matrix = sparse_matrix * mask[:, None, :]\n topk = 2\n\n # raw method\n results = []\n for i in range(sparse_matrix.shape[0]):\n dense_matrix = sparse_matrix[i].T[np.where(mask[i] == 1)].T\n results.append(np.linalg.svd(dense_matrix)[-1][:, :topk])\n y = results\n\n # masked & padded method\n x = vec_sparse_svd(sparse_matrix, mask, topk=topk)\n\n # assert almost equal (ignore extremely small numerical errors)\n for i in range(sparse_matrix.shape[0]):\n np.testing.assert_almost_equal(np.dot(sparse_matrix[i], x[i]),\n np.dot(sparse_matrix[i].T[np.where(mask[i] == 1)].T, y[i]))", "title": "" }, { "docid": "e1cbdee324cff4018f169552a08ed878", "score": "0.5139542", "text": "def identity_matrix(m):\r\n t = tuple([1])\r\n for _ in range(m):\r\n t = t + tuple(0 for _ in range(m)) + tuple([1])\r\n return matrix(m, m, *t)", "title": "" }, { "docid": "fbbb6f2aeecfe9e0ad26a3a6392a8ef9", "score": "0.5138513", "text": "def homogeneous_Ax(A):\n # decomposition Matrix A\n P = np.empty((3, 4))\n u, s, v = np.linalg.svd(A)\n P = v[11, :].reshape((3, 4))\n\n return P", "title": "" }, { "docid": "bf157f29e76986e638eb858cf45d4645", "score": "0.5129821", "text": "def matrix_sqrt(mat: T.FloatTensor) -> T.FloatTensor:\n u, s, v = mat.svd()\n return (u*s.sqrt()).mm(v.t())", "title": "" }, { "docid": "405890627068f74c1cffbc0245faa863", "score": "0.51260555", "text": "def sensing_matrix(n, x, norm_noise=0.0):\n p = len(x)\n x_mat = np.random.normal(0.0, 1.0, size=(n * p)) / np.sqrt(n)\n x_mat = x_mat.reshape((n, p))\n y_tr = np.dot(x_mat, x)\n noise_e = np.random.normal(0.0, 1.0, len(y_tr))\n y_e = y_tr + (norm_noise / np.linalg.norm(noise_e)) * noise_e\n return x_mat, y_tr, y_e", "title": "" }, { "docid": "79b464cb22ab4e5657a065326c341d92", "score": "0.511891", "text": "def null(S, max_error=default_max_error * 1e-3, rank_cutoff=default_rank_eps):\n assert isinstance(S, matrix)\n m, n = S.shape # m is number of metabolites, n is number of reactions\n [u, sigma, v] = svd(S)\n null_mask = ones((n,))\n rank = sum(sigma > rank_cutoff) # use this instead of matrix_rank\n null_mask[:rank] = 0\n \n N = compress(null_mask, v, axis=0).T\n #assert rank < n\n if rank >= n:\n warn(\"rank %d >= %d\" % (rank, n))\n from IPython import embed; embed()\n assert abs(S * N).max() < max_error # make sure it is a null space\n assert type(N) is matrix\n return N", "title": "" }, { "docid": "a81e019cca123f69fd83d58b566f23dd", "score": "0.51175356", "text": "def S_matrix(fs):\n num_bfs = len(fs)\n S = np.zeros((num_bfs, num_bfs))\n\n for i in range(num_bfs):\n for j in range(num_bfs):\n S[i, j] = S_int(fs[i], fs[j])\n\n return S", "title": "" }, { "docid": "ac02f11b44d95dc12d1ef823670baaf0", "score": "0.5103461", "text": "def svd(self, **args):\n U = type(self)(dtype=self.dtype, ctype=self.ctype)\n s = type(self)(dtype=np.float64, ctype=np.array)\n Vh = type(self)(dtype=self.dtype, ctype=self.ctype)\n for key in self._data.keys():\n Ul, sl, Vhl = np.linalg.svd(self[key], **args)\n U[key] = Ul\n s[key] = sl\n Vh[key] = Vhl\n return U, s, Vh", "title": "" }, { "docid": "b89d5929a60fb9f244b41a9b57e93359", "score": "0.5097969", "text": "def basis(u, s, p = 0.5):\n u_rank = np.empty((u.shape[0], u.shape[1], u.shape[1]))\n s_rank = np.empty((u.shape[0], u.shape[1]))\n u_low = []\n\n cumulative_s = np.empty((u.shape[0], u.shape[1]))\n # the sequence of eigenvalues after np.linalg.eig() is chaotic\n # sorting the eigenvalues and corresponding vectors\n for i in range(len(s)):\n idx = np.argsort(s[i])[::-1]\n u_rank[i, :, :] = u[i, :, idx] # eigenvectors: according to the descending order\n s_rank[i, :] = s[i, idx] # eigenvalues: according to the descending order\n cumulative_s[i, :] = np.cumsum(s_rank[i]) / np.sum(s_rank[i]) # the cumulative value of eigenvalues\n k = 0\n # only keep the first \"p\" percentage eigenvalues\n # and corresponding vectors\n for percent in cumulative_s[i]:\n k += 1\n if percent >= p:\n break\n u_low.append(u_rank[i, :, :k]) # in lower-dimension\n\n u = u_low\n \n return u", "title": "" }, { "docid": "438d4b45d5915b93062925ee09278334", "score": "0.50974005", "text": "def makeXmatrix(self, s):\n NT = len(s)\n gs = self.prescale * s\n\n NN = len(self.NL)\n X = np.zeros((NT, NN), dtype='float32')\n\n for n in range(NN):\n if self.NLw != []:\n X[:,n] = X[:,n] + intNL.piece_proc_stim(gs, self.NLx[n], [self.NLx[n]-self.NLw[n], self.NLx[n]+self.NLw[n]])\n else:\n if n==0:\n X[:,n]=X[:,n]+intNL.piece_proc_stim(gs, self.NLx[n], self.NLx[n+1])\n elif n==NN-1:\n X[:,n]=X[:,n]+intNL.piece_proc_stim(gs, self.NLx[n], self.NLx[n-1])\n else:\n X[:,n]=X[:,n]+intNL.piece_proc_stim(gs, self.NLx[n], [self.NLx[n-1],self.NLx[n+1]])\n\n k = self.NL\n\n return X, k", "title": "" }, { "docid": "f36adba9277ae257f27f330f374a6eee", "score": "0.5093576", "text": "def scale(s):\n\t\t\treturn Matrix(numpy.array([[s,0,0],[0,s,0], [0,0,1]], numpy.Float))", "title": "" }, { "docid": "546ce57c18353ae0d5ec227766dd691b", "score": "0.50931567", "text": "def normalized(matrix, eps=0.):\n return matrix / (torch.sqrt(torch.sum(matrix ** 2, dim=1, keepdim=True)) + eps)", "title": "" }, { "docid": "0c2aaf258ab5c4b57a47d84169158c05", "score": "0.5088609", "text": "def singular(F,X,Y,L,pi,version):\n S = []\n if pi == []: I = 1\n else: I = 2\n\n for (tau,l,r) in singular_term(F,X,Y,L,I,version):\n pi1 = pi + [tau]\n F1 = _new_polynomial(F,X,Y,tau,l)\n L1 = F1.domain\n if r == 1: S.append((pi1,F1))\n else: S.extend(singular(F1,X,Y,L1,pi1,version))\n\n return S", "title": "" }, { "docid": "3bb11e93ae6188b77a10c5f67b65157c", "score": "0.5082795", "text": "def centering_matrix(n):\n return np.eye(n) - 1.0 / n", "title": "" }, { "docid": "5680bd40032a90b1ff888882bd9ea5af", "score": "0.50826734", "text": "def convert_from_milnor_matrix(n, basis, p=2, generic='auto'):\n mat = convert_to_milnor_matrix(n,basis,p,generic)\n if mat.nrows() != 0:\n return convert_to_milnor_matrix(n,basis,p,generic).inverse()\n else:\n return mat", "title": "" }, { "docid": "798d5eb14bf6fb4e65d6ceae01d53152", "score": "0.5082102", "text": "def InverseMatrix(matrix, vector):\n # Unveri reversible matrix\n if Determinant(matrix, 1) == 0:\n print(\"Error,Singular Matrix\\n\")\n return\n # result matrix initialized as singularity matrix\n result = MakeIMatrix(len(matrix), len(matrix))\n # loop for each row\n for i in range(len(matrix[0])):\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\n # pivoting process\n matrix, vector = RowXchange(matrix, vector)\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[i][i] = 1 / matrix[i][i]\n result = MultiplyMatrix(elementary, result)\n matrix = MultiplyMatrix(elementary, matrix)\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\n # elementary matrix and multiply with the result matrix )\n for j in range(i + 1, len(matrix)):\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[j][i] = -(matrix[j][i])\n matrix = MultiplyMatrix(elementary, matrix)\n result = MultiplyMatrix(elementary, result)\n\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\n # (make elementary matrix and multiply with the result matrix )\n for i in range(len(matrix[0]) - 1, 0, -1):\n for j in range(i - 1, -1, -1):\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\n elementary[j][i] = -(matrix[j][i])\n matrix = MultiplyMatrix(elementary, matrix)\n result = MultiplyMatrix(elementary, result)\n\n return result", "title": "" }, { "docid": "9763ef0d3d9453eeffe65493094339af", "score": "0.50798", "text": "def stochastic_matrix(graph, size):\n\n prob_tab = np.zeros((size, size))\n for i in range(0, size):\n N_elem = sum_element(graph[i], size)\n prob_tab[i] = divide_by_N(graph[i], size, N_elem)\n return np.transpose(prob_tab) # we transpose it to compute the RIGHT eigenvector of our matrix", "title": "" }, { "docid": "c739bd03552807d51d8022e33c078b4c", "score": "0.5079015", "text": "def diagonal_single_value(value, n):\n return Matrix([[value if row == col else 0 for col in range(n)] for row in range(n)])", "title": "" }, { "docid": "f346b0140c3835135c1ae52778e61653", "score": "0.5065364", "text": "def kb_spmat_nufft(\n image: Tensor,\n scaling_coef: Tensor,\n im_size: Tensor,\n grid_size: Tensor,\n interp_mats: Tuple[Tensor, Tensor],\n norm: Optional[str] = None,\n) -> Tensor:\n is_complex = True\n if not image.is_complex():\n if not image.shape[-1] == 2:\n raise ValueError(\"For real inputs, last dimension must be size 2.\")\n\n is_complex = False\n image = torch.view_as_complex(image)\n\n data = kb_spmat_interp(\n image=fft_and_scale(\n image=image,\n scaling_coef=scaling_coef,\n im_size=im_size,\n grid_size=grid_size,\n norm=norm,\n ),\n interp_mats=interp_mats,\n )\n\n if is_complex is False:\n data = torch.view_as_real(data)\n\n return data", "title": "" }, { "docid": "e2a491bbf67b4a16bb3b8da5e468a39f", "score": "0.50548697", "text": "def __invert_mass_matrix(self,u):\n\n me = fenics_mesh(self.V)\n\n A = 1.0*self.M\n b = fenics_mesh(u)\n\n df.solve(A,me.values.vector(),b.values.vector())\n\n return me", "title": "" }, { "docid": "a35cc6c1e7136acb46054b2259797cd7", "score": "0.5047252", "text": "def nystrom_kernel_svd(X, kernel_f, q):\n\n m, d = X.shape\n\n # Assemble kernel function evaluator.\n K = kernel_f(X, X).cpu().data.numpy()\n W = K / m\n #print (\"W\", W)\n #print (\"K\", K)\n #print (\"Q\", q)\n #print (\"m\", m)\n w, V = sp.linalg.eigh(W, eigvals=(m - q, m - 1))\n U1r, s = V[:, ::-1], w[::-1][:q]\n NU = floatX(U1r[:, :q] / np.sqrt(m))\n\n return s, NU", "title": "" }, { "docid": "b4e97be065d5e2db77eae136e781cb91", "score": "0.5045904", "text": "def enforce_rank2(F):\n assert F.shape == (3, 3)\n\n u, s, v = np.linalg.svd(F)\n d = np.array([[s[0], 0, 0], [0, s[1], 0], [0, 0, 0]]) # s[2] = 0\n F_final = u.dot(d).dot(v) # matrix v is already transposed\n\n assert F_final.shape == (3, 3)\n return F_final", "title": "" }, { "docid": "e5d697f274c0925589cc3b4e4a977a24", "score": "0.5039262", "text": "def single_experiment(n, m, s, use_naive=False):\n D = random_dict(m, n)\n x = get_sparse_x(n, s)\n y = np.dot(D, x)\n if use_naive:\n x_hat = omp_naive(D, y)\n else:\n x_hat = orthogonal_mp(D, y, s)\n x_hat.resize(n, 1)\n error = norm(x - x_hat)\n return error", "title": "" }, { "docid": "5c1df1c8d5d5ffa40ed67476cffdcf3b", "score": "0.5033444", "text": "def tensor_svd(tensor, row_labels, svd_label=\"svd_\",\n absorb_singular_values=None):\n\n t = tensor.copy()\n\n # Move labels in row_labels to the beginning of list, and reshape data\n # accordingly\n total_input_dimension = 1\n for i, label in enumerate(row_labels):\n t.move_index(label, i)\n total_input_dimension *= t.data.shape[i]\n\n column_labels = [x for x in t.labels if x not in row_labels]\n\n old_shape = t.data.shape\n total_output_dimension = int(np.product(t.data.shape) / total_input_dimension)\n data_matrix = np.reshape(t.data, (total_input_dimension,\n total_output_dimension))\n\n try:\n u, s, v = np.linalg.svd(data_matrix, full_matrices=False)\n except (np.linalg.LinAlgError, ValueError):\n # Try with different lapack driver\n warnings.warn(('numpy.linalg.svd failed, trying scipy.linalg.svd with' +\n ' lapack_driver=\"gesvd\"'))\n try:\n u, s, v = sp.linalg.svd(data_matrix, full_matrices=False,\n lapack_driver='gesvd')\n except ValueError:\n # Check for inf's and nan's:\n print(\"tensor_svd failed. Matrix contains inf's: \"\n + str(np.isinf(data_matrix).any())\n + \". Matrix contains nan's: \"\n + str(np.isnan(data_matrix).any()))\n raise # re-raise the exception\n\n # New shape original index labels as well as svd index\n U_shape = list(old_shape[0:len(row_labels)])\n U_shape.append(u.shape[1])\n U = Tensor(data=np.reshape(u, U_shape), labels=row_labels + [svd_label + \"in\"])\n V_shape = list(old_shape)[len(row_labels):]\n V_shape.insert(0, v.shape[0])\n V = Tensor(data=np.reshape(v, V_shape),\n labels=[svd_label + \"out\"] + column_labels)\n\n S = Tensor(data=np.diag(s), labels=[svd_label + \"out\", svd_label + \"in\"])\n\n # Absorb singular values S into either V or U\n # or take the square root of S and absorb into both\n if absorb_singular_values == \"left\":\n U_new = contract(U, S, [\"svd_in\"], [\"svd_out\"])\n V_new = V\n return U_new, V_new\n elif absorb_singular_values == \"right\":\n V_new = contract(S, V, [\"svd_in\"], [\"svd_out\"])\n U_new = U\n return U_new, V_new\n elif absorb_singular_values == \"both\":\n sqrtS = S.copy()\n sqrtS.data = np.sqrt(sqrtS.data)\n U_new = contract(U, sqrtS, [\"svd_in\"], [\"svd_out\"])\n V_new = contract(sqrtS, V, [\"svd_in\"], [\"svd_out\"])\n return U_new, V_new\n else:\n return U, S, V", "title": "" }, { "docid": "19dd975fa6b5d7d234165581fb1cd00c", "score": "0.50306094", "text": "def symnormalise(M):\n\n d = np.array(M.sum(1))\n\n dhi = np.power(d, -1 / 2).flatten()\n dhi[np.isinf(dhi)] = 0.\n DHI = sp.diags(dhi) # D half inverse i.e. D^{-1/2}\n\n return (DHI.dot(M)).dot(DHI)", "title": "" }, { "docid": "ae5a204c5ef68ac820830fa6d8575435", "score": "0.5027315", "text": "def singularity(data, func):\n return func(data)", "title": "" } ]
dd67518d84447aad41eace3b8f0145f1
Suppose we use few model instances of same id, and we must be able to update them independently on root model updated. For this we should use not get_model_by_id(), but get_model_copy_by_id().
[ { "docid": "7db3c694e8d3a4fcddd0927265e53bcf", "score": "0.69966036", "text": "def get_model_copy_by_id(cls, id):\n model = cls.get_model_by_id(id)\n return model.copy() if model else None", "title": "" } ]
[ { "docid": "6042db19415df1c676419f2b89732154", "score": "0.6511655", "text": "def _get_shallow_copy_model(model: nn.Module):\n old_to_new = dict()\n for name, module in _get_dfs_module_list(model):\n new_module = copy(module)\n new_module._modules = OrderedDict()\n for subname, submodule in module._modules.items():\n if submodule is None:\n continue\n setattr(new_module, subname, old_to_new[submodule])\n old_to_new[module] = new_module\n return old_to_new[model]", "title": "" }, { "docid": "c6aefe6d78c0c427112083e3cc8e7165", "score": "0.6133033", "text": "def update_model():\n pass", "title": "" }, { "docid": "911e670b93264caee39194a5fe1536b2", "score": "0.612019", "text": "def update_model(self):\n pass", "title": "" }, { "docid": "1dd5f97eb38f8da1b550f0fcc48531ac", "score": "0.6074638", "text": "def save_model(self, request, obj, form, change):\n try:\n old_obj = obj._meta.model.objects.get(id=obj.id)\n self.old_instance = old_obj.__dict__\n except Exception as e:\n pass\n obj.save()", "title": "" }, { "docid": "17f4c5585fcd762fc42b39d09fa1c1e0", "score": "0.59444875", "text": "def duplicate_model(self):\n # create duplicate model and update weights\n dupe_mod = clone_model(self.model)\n dupe_mod.set_weights(self.model.get_weights())\n return dupe_mod", "title": "" }, { "docid": "6b888108c4a1bb635921142ebd27bd5c", "score": "0.5922577", "text": "def update(self, model):\n pass", "title": "" }, { "docid": "1cd1821353a1f52db804f4617e3cbdf8", "score": "0.5869077", "text": "def update_or_instantiate(self, model: Type[DiffSyncModel], ids: Dict, attrs: Dict) -> Tuple[DiffSyncModel, bool]:\n created = False\n try:\n obj = self.get(model, ids)\n except ObjectNotFound:\n obj = model(**ids, **attrs)\n # Add the object to diffsync adapter\n self.add(obj)\n created = True\n\n # Update existing obj with attrs\n for attr, value in attrs.items():\n if getattr(obj, attr) != value:\n setattr(obj, attr, value)\n\n return obj, created", "title": "" }, { "docid": "8c959d00a41871afa80c0184e2e8c8e1", "score": "0.58674705", "text": "def refetch_model_instance(instance):\n return instance.__class__.objects.get(pk=instance.pk)", "title": "" }, { "docid": "61c7faac2c9b30e815bcd8458e6185d0", "score": "0.5835415", "text": "def test_model_id(model_id):\n model = get_model(model_id)\n if model:\n real_model_id = get_model_id(model)\n assert real_model_id == model_id", "title": "" }, { "docid": "1316c5a1e9a5240cdc3d34d8782a164b", "score": "0.5766455", "text": "def CopyAnsaModel(model):", "title": "" }, { "docid": "c6ccce0246846e0df81556ade02c2e99", "score": "0.57596415", "text": "def __updateModels(self):\n self.__updatellModel() # an exemple\n self.__updatersModel() # an exemple\n pass", "title": "" }, { "docid": "81312f4b0dd6c92cd77b4250205f4c17", "score": "0.5735076", "text": "def test_ids(self):\n obj_bm_1 = BaseModel()\n obj_bm_2 = BaseModel()\n\n self.assertEqual(type(obj_bm_1), type(obj_bm_2))\n self.assertNotEqual(obj_bm_1.id, obj_bm_2.id)\n\n id_2 = obj_bm_2.id\n\n obj_bm_2.id = '1234'\n\n self.assertEqual(obj_bm_2.id, '1234')", "title": "" }, { "docid": "d470dcad2fdec02859d3dd5109faa6e2", "score": "0.569646", "text": "def copy_model(model):\n if isinstance(model, dict):\n newmodel = {}\n for key, value in model.items():\n if isinstance(value, dict) or isinstance(value, list):\n newmodel[key] = copy_model(value)\n else:\n newmodel[key] = value\n return newmodel\n elif isinstance(model, list):\n newlist = []\n for value in model:\n if isinstance(value, bpy.types.Object):\n newlist.append(value)\n elif isinstance(value, dict) or isinstance(value, list):\n newlist.append(copy_model(value))\n else:\n newlist.append(value)\n return newlist\n raise TypeError(\n \"Deep copy failed. Unsuspected element in the dictionary: {}\".format(type(model))\n )", "title": "" }, { "docid": "0cc64869cf9961d7d54a490c738641b5", "score": "0.56640774", "text": "def updateModel(name,model):\n myGlobals.actorState.models[name] = TestHelper.Model(name,model)", "title": "" }, { "docid": "413066e8e1bfec8b1ccce31d52b9fc4a", "score": "0.5599275", "text": "def copy(self):\n\n model = Model(*[chain.copy() for chain in self.chains()])\n atoms = [a for a in self._atoms if a._chain is None]\n atom_copies = [atom.copy() for atom in atoms]\n model._atoms.update(atom_copies)\n model._id, model._name = self._id, self._name\n return model", "title": "" }, { "docid": "985a3ac5d62de552df0cc81d8570c1ac", "score": "0.5593484", "text": "def test_Model_copy(gmodel):\n msg = 'Model.copy does not work. Make a new Model'\n with pytest.raises(NotImplementedError, match=msg):\n gmodel.copy()", "title": "" }, { "docid": "8406d5a5e8a2246c324a6fe046e0ef1d", "score": "0.5560467", "text": "def copy_model_over(from_model, to_model, tau=0.95):\n for to_model, from_model in zip(to_model.parameters(), from_model.parameters()):\n to_model.data.copy_(from_model.data.clone() * (1 - tau) + to_model.data.clone() * tau)", "title": "" }, { "docid": "c201f05ef0e4d6803ce369cc32c10a8a", "score": "0.5550932", "text": "def hard_update(self, local_model, target_model):\n for target_param, param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(param.data)", "title": "" }, { "docid": "c201f05ef0e4d6803ce369cc32c10a8a", "score": "0.5550932", "text": "def hard_update(self, local_model, target_model):\n for target_param, param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(param.data)", "title": "" }, { "docid": "fae848cd040a07b435e55fa238eb9082", "score": "0.55507284", "text": "def create_by_shallow_copy(request, item, exclude=[], options=[]):\n\n # imported_entity = ImportedEntity(\n # source_type=type(item).__name__.lower(),\n # source_id=item.id,\n # )\n # imported_entity.save()\n\n old_id = item.id\n item_type = ContentType.objects.get(app_label='main', model=type(item).__name__.lower())\n\n item.pk = None # NB: Setting the pk to None and saving triggers the copy\n item.id = None\n item.save() # Now this is the new object\n\n item.owner = request.user.person\n\n try:\n item.is_standard = False\n except Exception as e:\n pass\n\n # Making sure we pass back the reference to the old object too ...\n old_item = item_type.get_object_for_this_type(id=old_id)\n item.imported_from = old_item\n item.save()\n\n return old_item, item", "title": "" }, { "docid": "21f583e7823d2bd423199fa7131ea399", "score": "0.55250126", "text": "def update_object_as_child(parent_model_class):", "title": "" }, { "docid": "135de91a68edaebb605e5b85fe3db530", "score": "0.5500453", "text": "def test_save_again(self):\n model = SingletonModel()\n model.save()", "title": "" }, { "docid": "b7411c32ad8460b2c3fd8ab9f14d9cf0", "score": "0.54875785", "text": "def _instance(self, model):\n return self._load(model, {})", "title": "" }, { "docid": "bf6647cc77b63d205c798283c98ea03c", "score": "0.5453198", "text": "def copy_entity(id_id):\n\n if id_id not in entity_dict:\n return id_id\n\n if id_id in id_id_map:\n return id_id_map[id_id]\n\n entity = entity_dict[id_id]\n\n parent_cid, parent_oid = id_id_map[\n (entity.parent_client_id, entity.parent_object_id)]\n\n self_cid, self_oid = copy_entity(\n (entity.self_client_id, entity.self_object_id))\n\n link_id_id = (entity.link_client_id, entity.link_object_id)\n link_cid, link_oid = id_id_map.get(link_id_id, link_id_id)\n\n # Copying entity with its publishing info.\n\n entity_copy = Entity(\n client_id = client.id,\n parent_client_id = parent_cid,\n parent_object_id = parent_oid,\n self_client_id = self_cid,\n self_object_id = self_oid,\n field_client_id = entity.field_client_id,\n field_object_id = entity.field_object_id,\n link_client_id = link_cid,\n link_object_id = link_oid,\n locale_id = entity.locale_id,\n content = entity.content,\n additional_metadata = copy.deepcopy(entity.additional_metadata))\n\n entity_copy.publishingentity.published = entity.publishingentity.published\n entity_copy.publishingentity.accepted = entity.publishingentity.accepted\n\n # DBSession.add(entity_copy)\n CACHE.set(objects = [entity_copy, ], DBSession=DBSession)\n DBSession.flush()\n\n id_id_copy = (entity_copy.client_id, entity_copy.object_id)\n id_id_map[id_id] = id_id_copy\n\n return id_id_copy", "title": "" }, { "docid": "887b034ec550f0992d3a7c086c6310c0", "score": "0.5438378", "text": "def __init__(self, model_id):\n pass", "title": "" }, { "docid": "5084fa8a8c38e11513812466f352877a", "score": "0.5426645", "text": "def save(self, *args, **kwargs):\n self.__class__.objects.exclude(id=self.id).delete()\n super(SingletonModel, self).save(*args, **kwargs)", "title": "" }, { "docid": "8300389a815a8d298d87b6d75b4deca7", "score": "0.5423991", "text": "def update_target(self):\n self.target_model = self.online_model.clone(CloneMethod.clone)", "title": "" }, { "docid": "346a50a7e57c8cf5599d75499dce442a", "score": "0.5397406", "text": "def update_model(arg):\n print(\"um:\")\n return", "title": "" }, { "docid": "2e54f3ea681f4089dbc7a64ca5677182", "score": "0.53831136", "text": "def get_or_instantiate(\n self, model: Type[DiffSyncModel], ids: Dict, attrs: Dict = None\n ) -> Tuple[DiffSyncModel, bool]:\n created = False\n try:\n obj = self.get(model, ids)\n except ObjectNotFound:\n if not attrs:\n attrs = {}\n obj = model(**ids, **attrs)\n # Add the object to diffsync adapter\n self.add(obj)\n created = True\n\n return obj, created", "title": "" }, { "docid": "cfd0eb30ab888266e968e659cef333f5", "score": "0.53403556", "text": "def apply_model(fit_problem, saved_model, instrument, data_id):\n data_path = \"%s/%s\" % (instrument.lower().strip(), data_id.lower().strip())\n if fit_problem is not None:\n data_path = fit_problem.reflectivity_model.data_path\n\n # Make a copy of the ReflectivityModel object\n ref_model = saved_model.fit_problem.reflectivity_model\n ref_model.pk = None\n ref_model.data_path = data_path\n ref_model.save()\n\n if fit_problem is not None:\n old_model = fit_problem.reflectivity_model\n fit_problem.reflectivity_model = ref_model\n fit_problem.remote_job = None\n fit_problem.save()\n old_model.delete()\n else:\n fit_problem = FitProblem(user=saved_model.user, reflectivity_model=ref_model)\n fit_problem.save()\n\n # Copy over the layers\n fit_problem.layers.clear()\n for layer in saved_model.fit_problem.layers.all().order_by('layer_number'):\n layer.id = None\n layer.pk = None\n layer.save()\n fit_problem.layers.add(layer)\n\n fit_problem.save()\n return fit_problem", "title": "" }, { "docid": "2e4e3374f73f51b81ca1aeaaec9ba959", "score": "0.53316605", "text": "def model(self, model):\n global _modelInstances\n if not model in _modelInstances['core']:\n _instantiateCoreModel(model)\n\n return _modelInstances['core'][model]", "title": "" }, { "docid": "5f90f0ceb160a91b0f626f5292adf382", "score": "0.5329745", "text": "def _copy(cls, obj, update_fields=None, exclude_fields=None):\n if update_fields is None:\n update_fields = {}\n if exclude_fields is None:\n exclude_fields = []\n # NOTE: _meta API updated in Django 1.8, will need to re-implement\n default_exclude = ['id', 'ir_cache']\n autofields = [f.name for f in obj._meta.fields if isinstance(f, models.AutoField)]\n exclude = list(set(exclude_fields + autofields + default_exclude)) # eliminate duplicates\n\n # local fields + many-to-many fields = all fields - related m2m fields\n local_fields = [f.name for f in obj._meta.local_fields]\n m2m_local_fields = [f.name for f in obj._meta.many_to_many]\n m2m_all_fields = list(set(obj._meta.get_all_field_names()) - set(local_fields)) # includes related_names\n\n # Remove excluded fields from fields that are copied\n local_kwargs = { k:getattr(obj,k) for k in local_fields if k not in exclude }\n # By default, skip copying of related relations\n m2m_kwargs = { k:getattr(obj,k) for k in m2m_local_fields if k not in exclude }\n\n # Separate update_fields into local & m2m\n local_update = { k:v for (k,v) in update_fields.iteritems() if k in local_fields }\n # Allow related relations to be copied if in update_fields\n m2m_update = { k:v for (k,v) in update_fields.iteritems() if k in m2m_all_fields }\n \n local_kwargs.update(local_update)\n m2m_kwargs.update(m2m_update)\n\n new_obj = cls(**local_kwargs)\n # raise Exception(\"{}, class: {}\".format(new_obj, type(new_obj)))\n\n with transaction.atomic():\n with delay_tile_serialization():\n new_obj.get_pk()\n\n for (k,v) in m2m_kwargs.iteritems():\n if isinstance(v,list) or isinstance(v, models.query.QuerySet):\n setattr(new_obj, k, v)\n elif callable(getattr(v, 'all', None)):\n # assume this is a RelatedManager, can't check directly b/c generated at runtime\n setattr(new_obj, k, v.all())\n else:\n raise TypeError(\"Value '{}' can't be assigned to \\\n ManyToManyField '{}'\".format(v, k))\n\n new_obj.save() # run full_clean to validate\n return new_obj", "title": "" }, { "docid": "8dd9dd725a3d7d5cc01f286809ade448", "score": "0.5313831", "text": "def update_model(self):\n for layer in self.model:\n layer.update()", "title": "" }, { "docid": "c14ca003ec8eae6162b6ab42644d2701", "score": "0.53120327", "text": "def copy(self):\n c = self.__class__(self.model_id, update_on_init=False)\n attr = self.attributes()\n c._set_attr_from_dict(attr)\n return c", "title": "" }, { "docid": "f564a94fc38c86d75fb15ea4aabd0de9", "score": "0.530263", "text": "def _reset_model(self):\n self.model._get_model()", "title": "" }, { "docid": "49dc8557a127ba9224bbf02624bec051", "score": "0.52834177", "text": "def doIndividualModels(self):\n raise RuntimeError, \"Not implemented in ModelBuilder\"", "title": "" }, { "docid": "b6e31b48aefd7ce36288581f0730d935", "score": "0.52815425", "text": "def deep_copy(request, from_item, to_item, exclude=[], options=[]):\n\n # Get a list of all the relational fields in the model\n # fk_fields = [x.name for x in from_item._meta.get_fields() if type(x) == ForeignKey]\n # fk_fields.remove('owner')\n # fk_fields.remove('imported_from')\n # for f in fk_fields:\n # # Duplicate foreign key item\n # related_object = getattr(from_item, f)\n # new_related_object, related_object = create_by_shallow_copy(request, related_object)\n # deep_copy(request, related_object, new_related_object)\n # setattr(to_item, f, new_related_object)\n\n m2o_fields = [x.name for x in from_item._meta.get_fields()\n if type(x) == ManyToOneRel\n and x.name not in exclude]\n\n for f in m2o_fields:\n for related_object in getattr(from_item, f).all():\n # Create new related object\n related_object, new_related_object = create_by_shallow_copy(request, related_object, exclude, options)\n deep_copy(request, related_object, new_related_object, exclude, options)\n getattr(to_item, f).add(new_related_object)\n\n m2mr_fields = [(x.name, x.field.name) for x in from_item._meta.get_fields()\n if type(x) == ManyToManyRel\n and x.name not in exclude]\n\n for f, r in m2mr_fields:\n for related_object in getattr(from_item, f).all():\n # Create new related object\n related_object, new_related_object = create_by_shallow_copy(request, related_object, exclude, options)\n deep_copy(request, related_object, new_related_object, exclude, options)\n getattr(to_item, f).add(new_related_object)\n\n # # Upstream manytomany fields still copied as links - have to link to new items\n # m2mf_fields = [x.name for x in from_item._meta.get_fields() if type(x) == ManyToManyField]\n # for f in m2mf_fields:\n # for related_object in getattr(from_item, f).all():\n # getattr(to_item, f).add(related_object)\n\n to_item.depends_on = None\n to_item.imported_from = from_item.imported_from\n to_item.owner = request.user.person\n to_item.save()\n\n return from_item, to_item", "title": "" }, { "docid": "f939fe1e4f85a10d2f700c35e0b37a8d", "score": "0.52639073", "text": "def save_object_with_update(self, obj):\n\n w = None\n if type(obj) != ModelPy2SQL:\n new_id = self.save_object(obj)\n w = ModelPy2SQL(obj, new_id)\n else:\n tbl_nm = Py2SQL.__get_object_table_name(obj.obj)\n q = \"SELECT * FROM {} WHERE {}={}\" \\\n .format(tbl_nm, PY2SQL_COLUMN_ID_NAME, obj.get_id())\n self.cursor.execute(q)\n rows = self.cursor.fetchall()\n if len(rows) == 0:\n mes = \"No \" + str(obj.obj.__class__.__name__) + \" instance objects in \" + tbl_nm + \" with id: \" + str(\n obj.get_id())\n raise Exception(mes)\n\n self.__redefine_id_function(obj.get_id())\n self.__redefine_pyid_col_name()\n self.save_object(obj.obj)\n self.__reset_pyid_col_name()\n self.__reset_id_function()\n w = obj\n\n return w", "title": "" }, { "docid": "8f2e2e0d8e758a1e834443fd8bc80e71", "score": "0.52531564", "text": "def _copyModel(self, obj):\n super()._copyModel(obj)\n self.constructed = True", "title": "" }, { "docid": "e6c30d53af505b9e7de3b9e86ecb7da9", "score": "0.52290964", "text": "def update_model(self, budget=0):\n raise NotImplementedError", "title": "" }, { "docid": "ec33572b62035808f33674ddf57e344a", "score": "0.5199888", "text": "def copy_global_model(self, node_list):\n for node in node_list:\n node.model = self.global_model.copy()", "title": "" }, { "docid": "290da501f00da3a567201fa986e69e73", "score": "0.51970464", "text": "def _patch_mjlib_accessors(self, model, data):", "title": "" }, { "docid": "7e33be5f2ab539b03101c7611ac02505", "score": "0.51960015", "text": "def apply_modifications(model):\n model_path = os.path.join(\n tempfile.gettempdir(),\n next(tempfile._get_candidate_names()) + '.h5'\n )\n try:\n model.save(model_path)\n return load_model(model_path)\n finally:\n os.remove(model_path)", "title": "" }, { "docid": "ad15d1225ab65661dead8cabac41ec9e", "score": "0.51906836", "text": "def copy_all(model, from_alias, to_alias):\n print(f\"Querying all {model} from {from_alias}\")\n with context_managers.switch_db(model, from_alias) as model:\n all = model.objects.all()\n print(f\"Saving all {model} into {to_alias}\")\n for x in tqdm(all, total=all.count(), desc=\"Copy\", unit=\"docs\"):\n with context_managers.switch_db(model, to_alias) as model:\n # MongoEngine tracks changes, we need to force the save\n # or we'll get nothing.\n x.save(force_insert=True)", "title": "" }, { "docid": "5198d3f4af14f89d2e2f9f0bebe9d998", "score": "0.51862997", "text": "def CopyPartsToAnsaModel(parts, model):", "title": "" }, { "docid": "b272869e54b84d2e1c525c68e3dfd420", "score": "0.51851946", "text": "def test_clones_complex_types(self):\n rel = self.relation.where(id=re.compile('foo'))\n rel.clone()", "title": "" }, { "docid": "762534ff91fea87fb527a451e673c904", "score": "0.5176326", "text": "def update_from_form(self, model_instance, form):\n saved_id = None\n if '_id' in model_instance:\n saved_id = model_instance['_id']\n form.populate_obj(model_instance)\n if saved_id:\n model_instance['_id'] = saved_id\n return model_instance", "title": "" }, { "docid": "35468e7fb541affa34bda2af10121f4e", "score": "0.5175879", "text": "def test_save(self):\n model = SingletonModel()\n model.save()", "title": "" }, { "docid": "8deac5edbd5b2adee2de1ab289e60c20", "score": "0.5166439", "text": "def _update_model(self, X_all, Y_all):\n Y_all = Y_all.flatten()\n\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.train(X_all, Y_all)", "title": "" }, { "docid": "63c21bd5394826577b6a4707aefb828b", "score": "0.5163478", "text": "def model():\n return _model", "title": "" }, { "docid": "3a914ca0e7d4bd7768f55d552110cda2", "score": "0.5161448", "text": "def get_model_update(self):\n return NotImplementedError", "title": "" }, { "docid": "fd0bd2b6a00086499b74b073d63cc6ed", "score": "0.5159826", "text": "def __new(self, sid, model):\n if model == 'account.account':\n if sid in self.account_processed:\n return self.account_processed[sid]\n if model in self.newtab and sid in self.newtab[model]:\n return self.newtab[model][sid]\n self.cr_sqlite.execute(\"select new_id from table_old_id where old_id = %s and objet = '%s' and base = '%s'\" % (\n sid, model, self.connectionsource.dbname))\n res = None\n try:\n res = self.cr_sqlite.fetchone()\n if res:\n res = res[0]\n except sqlite3.DataError:\n pass\n return res", "title": "" }, { "docid": "b1b633a761f2c276e63d7bd161233e55", "score": "0.5131045", "text": "def test_model_property_set(self):\n m = self.TestModel(id=1)\n m.id = 2\n self.assertEqual(m.id, 2)", "title": "" }, { "docid": "dd160b3bbfac0295de82e5c6767ac122", "score": "0.5126958", "text": "def crud_update(self, request, id=None):\n data, is_single = self._deserialize_create_or_replace_request()\n if is_single:\n try:\n instance = self._meta.simplified._meta.model.objects.get(pk=id)\n except self._meta.simplified._meta.model.DoesNotExist, e:\n return ForbiddenSerializableResult(e)\n return self._create_or_replace_single(data, instance)\n else:\n return self._create_or_replace_many(data, update=True)", "title": "" }, { "docid": "518606b03530416b32cd88db3813c739", "score": "0.5125479", "text": "def test_model_add_person_with_plusequals(empty_model: Model):\n bob = Person(name=\"Bob\")\n empty_model += bob\n assert bob in empty_model.people\n assert bob.id != \"\"", "title": "" }, { "docid": "be44b46ce70012144ee032e8f295ef13", "score": "0.5124328", "text": "def update_instance(self, base_id):\n data = self.get_json(self.object_url % base_id)\n cleaned_data = self.clean_data(data)\n return self.save_instance(cleaned_data)", "title": "" }, { "docid": "fd2de419ad14de8020af45bd1acdca96", "score": "0.51045597", "text": "def _update_model(self):\r\n\r\n ### --- input that goes into the model (is unziped in case there are categorical variables)\r\n X_inmodel = self.space.unzip_inputs(self.X)\r\n Y_inmodel = list(self.Y)\r\n \r\n self.model.updateModel(X_inmodel, Y_inmodel)", "title": "" }, { "docid": "0391f001a69db30c87c11527c72fd8a7", "score": "0.50973177", "text": "def __init__(self,id=-1):\n Model.__init__(self)\n self.clear()", "title": "" }, { "docid": "04afa8e68226f20ba67a64e190170712", "score": "0.5097214", "text": "def create_and_get(model, **kw):\n o = create(model, **kw)\n return model.objects.get(pk=o.pk)", "title": "" }, { "docid": "1badbefa520be900f82df92050339d27", "score": "0.50867975", "text": "def test_serialize_dupe_model():\n course = factories.CourseFactory.create()\n serialized_data = serializers.LearningResourceSerializer(\n instance=course.learning_resource\n ).data\n serialized_data.pop(\"id\")\n\n dupe_course_serializer = serializers.LearningResourceSerializer(\n data=serialized_data\n )\n assert not dupe_course_serializer.is_valid()\n\n serialized_data[\"readable_id\"] = \"new-unique-id\"\n non_dupe_course_serializer = serializers.LearningResourceSerializer(\n data=serialized_data\n )\n assert non_dupe_course_serializer.is_valid(raise_exception=True)", "title": "" }, { "docid": "f382d672b3d133905016645eb3da60b9", "score": "0.5082832", "text": "def test_concurrent_updates(self):\n instance = TestCounterModel.create()\n new1 = TestCounterModel.get(partition=instance.partition)\n new2 = TestCounterModel.get(partition=instance.partition)\n\n new1.counter += 5\n new1.save()\n new2.counter += 5\n new2.save()\n\n actual = TestCounterModel.get(partition=instance.partition)\n assert actual.counter == 10", "title": "" }, { "docid": "0330cd1559bd21d5597fe8a023327c94", "score": "0.5082308", "text": "def test_model_get_relationship_by_id(empty_model: Model):\n sys1 = empty_model.add_software_system(name=\"sys1\")\n sys2 = empty_model.add_software_system(name=\"sys2\")\n relationship = empty_model.add_relationship(source=sys1, destination=sys2, id=\"r1\")\n assert empty_model.get_relationship(\"r1\") is relationship", "title": "" }, { "docid": "a5fe0a0c670f98c4580cd08f59ef0662", "score": "0.50822556", "text": "def test_model_cannot_add_relationship_with_same_id_as_existing(empty_model: Model):\n sys1 = empty_model.add_software_system(name=\"sys1\")\n sys2 = empty_model.add_software_system(name=\"sys2\")\n empty_model.add_relationship(source=sys1, destination=sys2, id=\"r1\")\n with pytest.raises(\n ValueError, match=\"Relationship.* has the same ID as Relationship.*\"\n ):\n empty_model.add_relationship(source=sys1, destination=sys2, id=\"r1\")", "title": "" }, { "docid": "ad54e9471bba69437a96b5127d6b6ce1", "score": "0.50775003", "text": "def cross_reference(self, model):\n self.make_current()", "title": "" }, { "docid": "34b8e3524bea1d388b4049a7ca06131d", "score": "0.5075643", "text": "def with_input_model(self, model_id):\n return self._with_input(model_id, self.project.project_key, \"model\")", "title": "" }, { "docid": "34b8e3524bea1d388b4049a7ca06131d", "score": "0.5075643", "text": "def with_input_model(self, model_id):\n return self._with_input(model_id, self.project.project_key, \"model\")", "title": "" }, { "docid": "2d32471f0f677f17c428f95d4649e75f", "score": "0.5067017", "text": "def update_model(self, **kwargs):\n return self.client.execute(\"product/update_model\", \"POST\", kwargs)", "title": "" }, { "docid": "2cef36f19b1014afef784126d2edf469", "score": "0.506119", "text": "def CopyEntitiesToAnsaModel(model, entities, copy_container_contents):", "title": "" }, { "docid": "a4d55bdef158b6a470d515b6cafa87d9", "score": "0.5060616", "text": "def apply_to(self, model: nn.Module) -> None:\n with torch.no_grad():\n for name, val in self._get_model_state_iterator(model):\n assert (\n name in self.state\n ), f\"Name {name} not exist, available names are {self.state.keys()}\"\n val.copy_(self.state[name])", "title": "" }, { "docid": "d3b380c852f0a43a4f1b191396ef72bd", "score": "0.50604355", "text": "def load_model_by_id(model_id):\n with open(f'{MODEL_REGISTRY_DIR}/{model_id}.p', 'rb') as f:\n model = pickle.load(f)\n return model", "title": "" }, { "docid": "98d2e975dcc78c9fc9ecfa5e92171763", "score": "0.5060348", "text": "def copy_state(model):\n copy_dict = OrderedDict()\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n copy_dict[k] = v.cpu() if v.is_cuda else v.clone()\n\n return copy_dict", "title": "" }, { "docid": "8f20fd31cdd8cc4fb3cb1af75d5584e6", "score": "0.5051386", "text": "def update(self, model: Ingestion) -> Ingestion:\n raise NotImplementedError(\"Cannot update an Ingestion.\")", "title": "" }, { "docid": "ab0f58aae9890b9a26e77d14cb5b3f61", "score": "0.5028936", "text": "def reset_model(self, model):\n m_id = model.model_id if model.model_id is not None else model.name\n m_id += '_tmp'\n if m_id in self.loaded_models:\n delete_gazebo_models([m_id])\n rospy.sleep(0.5)\n self.load_models([model])", "title": "" }, { "docid": "590ba311084405d7a404a3bfabdca496", "score": "0.5018563", "text": "def test_update_models(self):\n target_one = next(self.target_generator)\n target_two = next(self.target_generator)\n target_three = next(self.target_generator)\n\n new_location = self.location_service.create_model(city='barCity', state='barState', country='barCountry', latitude=3, longitude=3)\n self.service.update_models(filter_args={'ip': [target_one.ip,\n target_two.ip,\n target_three.ip]},\n update_args={'location': new_location})\n\n self.assertEqual('FooCity', target_one.location.city)\n self.assertEqual('FooCity', target_two.location.city)\n self.assertEqual('FooCity', target_three.location.city)\n\n target_one = self.service.get_model(ip=target_one.ip)\n target_two = self.service.get_model(ip=target_two.ip)\n target_three = self.service.get_model(ip=target_three.ip)\n\n self.assertEqual(new_location, target_one.location)\n self.assertEqual(new_location, target_two.location)\n self.assertEqual(new_location, target_three.location)\n\n new_location_two = self.location_service.create_model(city='foofoo', state='barbar', country='foobarfoobar', latitude=4, longitude=4)\n self.service.update_models(case_sensitive=False,\n filter_args={'ip': [target_two.ip,\n target_three.ip],\n 'location': new_location},\n update_args={'location': new_location_two})\n\n target_one = self.service.get_model(ip=target_one.ip)\n target_two = self.service.get_model(ip=target_two.ip)\n target_three = self.service.get_model(ip=target_three.ip)\n\n self.assertEqual(new_location, target_one.location)\n self.assertEqual(new_location_two, target_two.location)\n self.assertEqual(new_location_two, target_three.location)\n\n self.assertRaises(AttributeError, lambda: self.service.update_models(filter_args={'ip': target_three.ip},\n update_args={'foo': 'bar'}))\n self.assertRaises(AttributeError, lambda: self.service.update_models(filter_args={'foo': 'bar'},\n update_args={}))", "title": "" }, { "docid": "51a3275faa3efadda918a97564f46107", "score": "0.50176454", "text": "def hotswap_product_models(self):", "title": "" }, { "docid": "c3059aa63eb1197c138a9e9653bb636c", "score": "0.50169086", "text": "def patch_model(model: nn.Module,\n cfg: mmengine.Config,\n backend: str = Backend.DEFAULT.value,\n ir: IR = IR.DEFAULT,\n recursive: bool = True,\n **kwargs) -> nn.Module:\n return MODULE_REWRITER.patch_model(model, cfg, backend, ir, recursive,\n **kwargs)", "title": "" }, { "docid": "fdc737c7369edbb6635fe05b4b5b2bd5", "score": "0.5006109", "text": "def get_pk(self, *args, **kwargs):\n try:\n super(BaseModel, self).save(*args, **kwargs)\n except serializers.SerializerError:\n # ignore errors on serialization of incomplete model\n pass", "title": "" }, { "docid": "ac516915dae975abeb855a7a1c5c1825", "score": "0.50047016", "text": "def _update(self):\n\n obj = self.__class__.get(**dict([(field, self.__getattribute__(field))\n for field in self.unique_together]))\n for attribute in self.__class__.attributes:\n if hasattr(obj, attribute):\n self.__setattr__(attribute, obj.__getattribute__(attribute))\n self.full = True", "title": "" }, { "docid": "62ae526b6c003367910597d23f7e7524", "score": "0.4995073", "text": "def test_update2(self):\n cli = self.create()\n captureOutput = io.StringIO()\n sys.stdout = captureOutput\n cli.onecmd(\"update BaseModel\")\n sys.stdout = sys.__stdout__\n string = captureOutput.getvalue()\n self.assertEqual(string[:-1], \"** instance id missing **\")\n sys.stdout.flush()", "title": "" }, { "docid": "cd1fb66f098e7883d82718c3e41c9636", "score": "0.49878076", "text": "def initialize_put(self, item_id):\r\n self.model = self.get_model(item_id)", "title": "" }, { "docid": "d8d1590963cab8d199b5adb88aa8cd20", "score": "0.49859834", "text": "def _save_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "0914d51c3481c27ce01141f726150990", "score": "0.4983335", "text": "def SetCurrentAnsaModel(model):", "title": "" }, { "docid": "056bf79e58d46c6a92fb79443ce4d852", "score": "0.49781185", "text": "def manage_clone(self, ob, id, REQUEST=None):\n if not ob.cb_isCopyable():\n raise CopyError, eNotSupported % escape(ob.getId())\n try:\n self._checkId(id)\n except:\n raise CopyError, MessageDialog(\n title='Invalid Id',\n message=sys.exc_info()[1],\n action ='manage_main')\n\n self._verifyObjectPaste(ob)\n\n try:\n ob._notifyOfCopyTo(self, op=0)\n except ConflictError:\n raise\n except:\n raise CopyError, MessageDialog(\n title=\"Clone Error\",\n message=sys.exc_info()[1],\n action='manage_main')\n\n orig_ob = ob\n ob = ob._getCopy(self)\n ob._setId(id)\n notify(ObjectCopiedEvent(ob, orig_ob))\n\n self._setObject(id, ob)\n ob = self._getOb(id)\n\n ob._postCopy(self, op=0)\n\n compatibilityCall('manage_afterClone', ob, ob)\n\n notify(ObjectClonedEvent(ob))\n\n return ob", "title": "" }, { "docid": "8658c8283a3974327caf9f8902c3209c", "score": "0.4977528", "text": "def get_object(self, id=None):\n # try:\n # obj = UpdateModel.objects.get(id=id)\n # except UpdateModel.DoesNotExist:\n # obj = None\n\n if id is None:\n return None\n\n qs= self.get_queryset().filter(id=id)\n if qs.count() == 1:\n return qs.first()\n return None", "title": "" }, { "docid": "4f0d3a8f9716cd3df6b515263cbdad20", "score": "0.49757797", "text": "def _model(self):\n pass", "title": "" }, { "docid": "b53057915f74a76f20f0960320190395", "score": "0.4970088", "text": "def testSetDuplicatedBuildModel(self):\n manifest_branch = self.GetRandomString()\n build_id = self.GetRandomString()\n build_target = self.GetRandomString()\n build_type = self.GetRandomString()\n artifact_type = self.GetRandomString()\n\n builds = model.BuildModel.query().fetch()\n self.assertEqual(len(builds), 0)\n container = (\n build_info.BUILD_INFO_RESOURCE.combined_message_class(\n manifest_branch=manifest_branch,\n build_id=build_id,\n build_target=build_target,\n build_type=build_type,\n artifact_type=artifact_type,\n ))\n api = build_info.BuildInfoApi()\n response = api.set(container)\n\n self.assertEqual(response.return_code, model.ReturnCodeMessage.SUCCESS)\n builds = model.BuildModel.query().fetch()\n self.assertEqual(len(builds), 1)\n\n container = (\n build_info.BUILD_INFO_RESOURCE.combined_message_class(\n manifest_branch=manifest_branch,\n build_id=build_id,\n build_target=build_target,\n build_type=build_type,\n artifact_type=artifact_type,\n ))\n api = build_info.BuildInfoApi()\n response = api.set(container)\n self.assertEqual(response.return_code, model.ReturnCodeMessage.SUCCESS)\n builds = model.BuildModel.query().fetch()\n self.assertEqual(len(builds), 1)", "title": "" }, { "docid": "c0f9c66fecbdd1f71e530baf624122e8", "score": "0.49610165", "text": "def main():\n # work dir\n new_model_dir = \"models.kipoi_compatible\"\n os.system(\"mkdir -p {}\".format(new_model_dir))\n \n # models\n orig_model_table = \"models_orig.tsv\"\n models = pd.read_csv(orig_model_table, sep=\"\\t\")\n\n # go through each model to update\n for orig_model_idx in range(models.shape[0]):\n model_info = models.iloc[orig_model_idx]\n \n # copy over data\n model_data_name = \"{}/{}.model.data-00000-of-00001\".format(\n new_model_dir, model_info[\"names\"])\n os.system(\"cp {} {}\".format(model_info[\"args_data_url\"], model_data_name))\n models.loc[orig_model_idx, \"args_data_url\"] = model_data_name\n \n # copy over index\n model_index_name = \"{}/{}.model.index\".format(\n new_model_dir, model_info[\"names\"])\n os.system(\"cp {} {}\".format(model_info[\"args_index_url\"], model_index_name))\n models.loc[orig_model_idx, \"args_index_url\"] = model_index_name\n \n # adjust meta and get new md5 sum\n model_meta_name = \"{}/{}.model.meta\".format(\n new_model_dir, model_info[\"names\"])\n adjust_meta_file(model_info[\"args_meta_url\"], model_meta_name)\n models.loc[orig_model_idx, \"args_meta_url\"] = model_meta_name\n md5sum_val = get_md5sum(model_meta_name)\n models.loc[orig_model_idx, \"args_meta_md5\"] = md5sum_val\n\n # save out to models.tsv\n models.to_csv(\n \"models.tsv\", sep=\"\\t\", index=False, header=True)\n\n return", "title": "" }, { "docid": "e149c521fbb6c8fc4ee1f14048c719fd", "score": "0.49562418", "text": "def test_patch_device_datasource_instance_by_id(self):\n pass", "title": "" }, { "docid": "4a5c2c8887490c2d898b5b1028fcdc5d", "score": "0.49530682", "text": "def update_model(self):\n self.request = self.model.get_request(self.events)\n self.model.user_request(self.request)", "title": "" }, { "docid": "aa74edb3e39c7d2fdc095a14032ecf08", "score": "0.4951605", "text": "def test_model_set_data(self):\n m = self.TestModel(id=1)\n data = {'id': 5}\n m.set_data(data=data)\n self.assertEqual(m.id, 5)", "title": "" }, { "docid": "1afb0566060f59b5be46dfc6310d495c", "score": "0.49404955", "text": "def test_update_model(self):\n target_one = next(self.target_generator)\n target_two = next(self.target_generator)\n target_three = next(self.target_generator)\n\n new_location = self.location_service.create_model(city='FOO', state='BAR', country='FOOBAR', latitude=1, longitude=1)\n new_location_two = self.location_service.create_model(city='BAR', state='FOO', country='BARFOO', latitude=2, longitude=2)\n\n self.service.update_model(filter_args={'ip': target_one.ip},\n update_args={'location': new_location})\n self.service.update_model(case_sensitive=False,\n filter_args={'ip': target_two.ip.upper(),\n 'location': [target_one.location, target_two.location]},\n update_args={'location': new_location_two})\n\n self.assertEqual('FooCity', target_one.location.city)\n self.assertEqual(45, target_two.location.latitude)\n self.assertEqual('FooCity', target_two.location.city)\n\n target_one = self.service.get_model(ip=target_one.ip)\n target_two = self.service.get_model(ip=target_two.ip)\n\n self.assertEqual(new_location.city, target_one.location.city)\n self.assertEqual(2, target_two.location.latitude)\n self.assertEqual(new_location_two.city, target_two.location.city)\n\n self.assertRaises(AttributeError, lambda: self.service.update_model(filter_args={'ip': [target_one.ip,\n target_three.ip]},\n update_args={}))\n self.assertRaises(AttributeError, lambda: self.service.update_model(filter_args={'ip': target_three.ip},\n update_args={'foo': 'bar'}))\n self.assertRaises(AttributeError, lambda: self.service.update_model(filter_args={},\n update_args={}))", "title": "" }, { "docid": "cbdf4764fdf978fb8380b270b3ca34c2", "score": "0.4937378", "text": "def apply_modifications(model, custom_objects=None):\n # The strategy is to save the modified model and load it back. This is done because setting the activation\n # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the\n # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since\n # multiple inbound and outbound nodes are allowed with the Graph API.\n model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')\n try:\n model.save(model_path)\n return load_model(model_path, custom_objects=custom_objects)\n finally:\n os.remove(model_path)", "title": "" }, { "docid": "c78d803518225c1ed59c18c5e7413793", "score": "0.49364877", "text": "def reset_model(self):\r\n name = \"?\"\r\n description = \"\"\r\n if self.has_model:\r\n name = self.model.name\r\n description = self.model.description\r\n self.model = self.__instantiate_model_by_id(name, description)", "title": "" }, { "docid": "0e95a91096219421828ca0ccdb7cde4e", "score": "0.49351624", "text": "def test_clone_nested_sklearn():\n from sklearn.ensemble import GradientBoostingRegressor\n\n from sktime.forecasting.compose import make_reduction\n\n sklearn_model = GradientBoostingRegressor(random_state=5, learning_rate=0.02)\n original_model = make_reduction(sklearn_model)\n copy_model = original_model.clone()\n copy_model.set_params(estimator__random_state=42, estimator__learning_rate=0.01)\n\n # failure condition, see issue #4704: the setting of the copy also sets the orig\n assert original_model.get_params()[\"estimator__random_state\"] == 5", "title": "" }, { "docid": "4a56fdc98af6963dd5b1089e3315bf86", "score": "0.49278396", "text": "def test_model_cannot_add_relationship_with_same_id_as_element(empty_model: Model):\n sys1 = empty_model.add_software_system(name=\"sys1\")\n sys2 = empty_model.add_software_system(name=\"sys2\")\n with pytest.raises(\n ValueError, match=\"Relationship.* has the same ID as SoftwareSystem.*\"\n ):\n empty_model.add_relationship(source=sys1, destination=sys2, id=sys1.id)", "title": "" }, { "docid": "055fce32509ec44e31692d5934aa72aa", "score": "0.49250305", "text": "def update_model(\n model_artifact,\n parameters: dict = None,\n metrics: dict = None,\n extra_data: dict = None,\n inputs: List[Feature] = None,\n outputs: List[Feature] = None,\n feature_vector: str = None,\n feature_weights: list = None,\n key_prefix: str = \"\",\n labels: dict = None,\n write_spec_copy=True,\n store_object: bool = True,\n):\n\n if hasattr(model_artifact, \"artifact_url\"):\n model_artifact = model_artifact.artifact_url\n\n if isinstance(model_artifact, ModelArtifact):\n model_spec = model_artifact\n elif is_store_uri(model_artifact):\n model_spec, _ = store_manager.get_store_artifact(model_artifact)\n else:\n raise ValueError(\"model path must be a model store object/URL/DataItem\")\n\n if not model_spec or model_spec.kind != \"model\":\n raise ValueError(f\"store artifact ({model_artifact}) is not model kind\")\n\n if parameters:\n for key, val in parameters.items():\n model_spec.parameters[key] = val\n if metrics:\n for key, val in metrics.items():\n model_spec.metrics[key_prefix + key] = val\n if labels:\n for key, val in labels.items():\n model_spec.labels[key] = val\n if inputs:\n model_spec.inputs = inputs\n if outputs:\n model_spec.outputs = outputs\n if feature_weights:\n model_spec.feature_weights = feature_weights\n if feature_vector:\n model_spec.feature_vector = feature_vector\n\n if extra_data:\n for key, item in extra_data.items():\n if hasattr(item, \"target_path\"):\n extra_data[key] = item.target_path\n\n upload_extra_data(model_spec, extra_data, prefix=key_prefix, update_spec=True)\n\n if write_spec_copy:\n spec_path = path.join(model_spec.target_path, model_spec_filename)\n\n # the model spec yaml should not include the tag, as the same model can be used with different tags,\n # and the tag is not part of the model spec but the metadata of the model artifact\n model_spec_yaml = _remove_tag_from_spec_yaml(model_spec)\n store_manager.object(url=spec_path).put(model_spec_yaml)\n\n model_spec.db_key = model_spec.db_key or model_spec.key\n if store_object:\n mlrun.get_run_db().store_artifact(\n model_spec.db_key,\n model_spec.to_dict(),\n model_spec.tree,\n iter=model_spec.iter,\n project=model_spec.project,\n )\n return model_spec", "title": "" }, { "docid": "a22f21895ff49c63c142547f4112caa8", "score": "0.49242228", "text": "def _refresh_object(obj_type, obj_id):\n # type: (db.Model) -> db.Model\n return obj_type.query.get(obj_id)", "title": "" }, { "docid": "60b3e5c36bda568345c5416765b4c6f8", "score": "0.4923886", "text": "def save(self):\n model = self.__class__\n _id = self._id\n\n if not _id: # if insert\n model.runtime.set_set([], self.data)\n _id = Query.insert(model.runtime)\n if _id:\n self.data[model.primarykey.name] = _id # set primarykey value\n self._cache = self.data.copy() # sync cache after save\n return _id\n else: # update\n # only update changed data\n dct = dict(set(self.data.items()) - set(self._cache.items()))\n\n if not dct:\n return 1 # data not change\n re = model.at(_id).update(**dct)\n if re:\n self._cache = self.data.copy() # sync cache after save\n return re # success update\n return 0", "title": "" }, { "docid": "f5ae1cb3a58cf1f66b92879dde920b6d", "score": "0.49233195", "text": "def save(self, *args, **kwargs):\n if not self.id:\n super().save(*args, **kwargs)\n self.update()\n super().save(args, kwargs)", "title": "" }, { "docid": "368772923b7a1cd21bae552112681ffe", "score": "0.49186698", "text": "def modify_model(roi,which,model,free=None,keep_old_flux=True):\n\n manager,index = roi.mapper(which)\n source = roi.get_source(which)\n\n if model is not None:\n if not isinstance(model,Model):\n raise Exception(\"model must be of type Models.Model\")\n\n if keep_old_flux: \n emin,emax=roi.bin_edges[[0,-1]]\n model.set_flux(roi.get_model(which).i_flux(emin=emin,emax=emax),emin=emin,emax=emax)\n\n manager.models[index]=model\n\n if hasattr(manager,'bgmodels') and hasattr(manager.bgmodels[index],'smodel'):\n manager.bgmodels[index].smodel=model\n\n if hasattr(source,'model'): source.model=model\n if hasattr(source,'smodel'): source.smodel=model\n\n if free is not None: \n model=roi.get_model(which)\n\n if isinstance(free,bool):\n free=np.asarray([free]*len(model.get_all_parameters()))\n\n assert(len(free)==len(model.get_all_parameters()))\n for i in xrange(len(free)):\n model.freeze(i,freeze=not free[i])\n\n roi.__update_state__()", "title": "" } ]
83a57e4c1e60f8cf3070ed419312c611
Get the initial arguments JSON.
[ { "docid": "2a6c308824d75aae2ffd0e84373d535d", "score": "0.86039567", "text": "def get_initial_arguments_json(self):\n # Define this in subclasses\n raise NotImplementedError", "title": "" } ]
[ { "docid": "0aa6ceae496229d0bfae3964aeeb7ca2", "score": "0.8692744", "text": "def get_initial_arguments_json(self):\n return self.get_object().arguments", "title": "" }, { "docid": "098d839e7def9d209ce24da0c6fb7d74", "score": "0.7873527", "text": "def get_initial_arguments_json(self):\n tasktype = self.get_object()\n\n return {\n **{key: \"value\" for key in tasktype.required_arguments},\n **tasktype.required_arguments_default_values,\n }", "title": "" }, { "docid": "6e4348c51d24165e5399fedd6c18cb81", "score": "0.71119666", "text": "def args():\n return {}", "title": "" }, { "docid": "9cdd829ff720094f1a8b3e43f4ea511c", "score": "0.709088", "text": "def get_args() -> str:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-j\", \"--json\", type=str, default=\"\", help=\"start GUI with provided tiling\"\n )\n return parser.parse_args().json", "title": "" }, { "docid": "c141b60405240fbfdb99b9d98648e636", "score": "0.7089883", "text": "def args(self):\n if not self.args_raw:\n return []\n\n return json.loads(self.args_raw)", "title": "" }, { "docid": "16c89dcb7ede33898745528ff9055dbf", "score": "0.69552094", "text": "def getInitialArguments(self):\n return ()", "title": "" }, { "docid": "38f646fd06c9e5c0cf3fd09dfcc8f6ee", "score": "0.68463033", "text": "def get_arguments() -> Dict[str, Any]:\n\tconfig = {}\n\targs, unknowns = parser.parse_known_args()\n\t# preprocess the JSON files.\n\t# TODO Expand the url access to the other JSON file arguments ?\n\tif args.config is not None:\n\t\tif not json_stream_to_structure('--config', args.config, config):\n\t\t\texit(1)\n\n\tif args.creds is not None:\n\t\tif not json_stream_to_structure('--creds', args.creds, config):\n\t\t\texit(1)\n\n\t# load the parameters. first the known, then the unknowns\n\targs = cleanup_empty_args(args)\n\tconfig.update(args)\n\tconfig.update(parse_unspecified_argument_list(unknowns))\n\t# amend the parameters (check internal consistency)\n\t# Installation can't be silent if config is not passed\n\tif args.get('config') is None:\n\t\tconfig[\"silent\"] = False\n\telse:\n\t\tconfig[\"silent\"] = args.get('silent')\n\n\t# avoiding a compatibility issue\n\tif 'dry-run' in config:\n\t\tdel config['dry-run']\n\n\treturn config", "title": "" }, { "docid": "a0c2802314b53a3248d3df668bf81645", "score": "0.67208606", "text": "def init_args(self):\n return self._init_args", "title": "" }, { "docid": "3c8cc2cf8719462a16ae5d9e7e4acfbd", "score": "0.669432", "text": "def initial_parameters(self, **kwargs):\n return dict()", "title": "" }, { "docid": "ee5d048ecf6dd9e86a9506a299800713", "score": "0.64429605", "text": "def args(self) -> Mapping[str, str]:\n return pulumi.get(self, \"args\")", "title": "" }, { "docid": "ae970bef0cdc78349e2402aa10ab2227", "score": "0.6409676", "text": "def _get_default_ag_args(cls) -> dict:\n return {}", "title": "" }, { "docid": "734556aba45cf4f5bebed33ab1a37959", "score": "0.6389047", "text": "def deserialize_init_args(cls, args: Dict[str, Any]) -> Dict[str, Any]:\n return extract_init_args(args=args, class_=cls)", "title": "" }, { "docid": "782d46f04452b4141f413ccba8ece845", "score": "0.6387698", "text": "def get_arguments():\n logger.info(\"Getting the arguments\")\n\n with open(\"~/.config/steam_credentials.json\", \"r\") as credential_file:\n credentials = json.load(credential_file)\n\n args = {\n \"app_id\": \"271590\",\n \"install_location\": \"/mnt/storage/steam/SteamLibrary/\",\n \"platform\": \"windows\",\n \"username\": credentials[\"username\"],\n \"password\": credentials[\"password\"]\n \n }\n\n logger.info(\"logger arguments got\")\n logger.debug(pprint.pformat(args))\n return args", "title": "" }, { "docid": "55f12ec49f0e51e80ac7fe99560dd82d", "score": "0.6352962", "text": "def args(self):\n args = self.inputs_optional\n args.update(self.inputs_required)\n return dict(args)", "title": "" }, { "docid": "7b364235e073a2f1bb66a8adae3b6a8f", "score": "0.6325199", "text": "def args(self):\n params = self.data.items()\n return dict((k, v) for k, v in params if k not in self.PARAMS)", "title": "" }, { "docid": "94290d2b2fc991c94350c0b72823d9fd", "score": "0.6309865", "text": "def kwargs(self):\r\n if not self.serialized_kwargs:\r\n return {}\r\n return json.loads(self.serialized_kwargs)", "title": "" }, { "docid": "cfe8bc5a2bc42b0f06ae8c12f8fd6674", "score": "0.6306999", "text": "def kwargs(self):\n if not self.kwargs_raw:\n return {}\n\n return json.loads(self.kwargs_raw)", "title": "" }, { "docid": "09d959559c2de97546c5aa884b252928", "score": "0.62643117", "text": "def args(self, args):\n self.args_raw = json.dumps(args)", "title": "" }, { "docid": "706dbdcb5dfdbdc19d82b41e3f57765d", "score": "0.62280256", "text": "def get_arguments(self):\n\t\treturn self.args", "title": "" }, { "docid": "32d4b121d2941146067e36f313581e62", "score": "0.62216276", "text": "def get_arguments(self):\n return self.arguments", "title": "" }, { "docid": "8fabace3dcfcd8d39d011e21d918811b", "score": "0.6217465", "text": "def args(self):\n return self[\"args\"]", "title": "" }, { "docid": "95f557078b51ff26a9c1cba6f20644b6", "score": "0.62023", "text": "def get_cli_arguments(self):\n pass", "title": "" }, { "docid": "be66598618df588b34e2079c146756bf", "score": "0.61077", "text": "def _get_init_args(self):\n\n args = {}\n for rop in self.ro_properties:\n if rop in self.properties:\n args[rop] = self.properties[rop]\n return args", "title": "" }, { "docid": "aeb15532c76a205898d7e169372a0f27", "score": "0.61047333", "text": "def get_args_config() -> Dict:\n parser = create_arg_parser()\n return vars(parser.parse_args())", "title": "" }, { "docid": "75c6284db905d4f6e3104c9434945e66", "score": "0.6086651", "text": "def get_arguments(self):\n ApiCli.get_arguments(self)\n\n if self.args.tenant_id is not None:\n self._tenant_id = self.args.tenant_id\n\n if self.args.fingerprint_fields is not None:\n self._fingerprint_fields = self.args.fingerprint_fields\n\n if self.args.title is not None:\n self._title = self.args.title\n\n if self.args.source is not None:\n self._source = self.args.source\n\n if self.args.severity is not None:\n self._severity = self.args.severity\n\n if self.args.message is not None:\n self._message = self.args.message\n\n event = {}\n\n if self._title is not None:\n event['title'] = self._title\n\n if self._severity is not None:\n event['severity'] = self._severity\n\n if self._message is not None:\n event['message'] = self._message\n\n if self._source is not None:\n if 'source' not in event:\n event['source'] = {}\n if len(self._source) >= 1:\n event['source']['ref'] = self._source[0]\n if len(self._source) >= 2:\n event['source']['type'] = self._source[1]\n\n self._process_properties(self.args.properties)\n if self._properties is not None:\n event['properties'] = self._properties\n\n if self._fingerprint_fields is not None:\n event['fingerprintFields'] = self._fingerprint_fields\n\n self.data = json.dumps(event, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "title": "" }, { "docid": "b4b69f590048071ff85e5bf4a0c90686", "score": "0.6070603", "text": "def arguments(self):\n return []", "title": "" }, { "docid": "df5136ba77b9dfce5a551ab213ac126a", "score": "0.60497284", "text": "def _initializer_args(self):", "title": "" }, { "docid": "7062b834991594ae6510e79f358a8522", "score": "0.604459", "text": "def get_arguments(self):\n return self.arguments", "title": "" }, { "docid": "079d9d4891fa3e08ba3c4cbcee2be6af", "score": "0.6044228", "text": "def localGetInitParams(self):\n return {}", "title": "" }, { "docid": "e864efe69ea1656ab37fa93425a617b6", "score": "0.6042186", "text": "def serialize_args(self):\n\n return (self.name,), {'required': True}", "title": "" }, { "docid": "413460b20db25796baa2035d8ab4a1bb", "score": "0.602425", "text": "def args(self):\n return self._parsed_args", "title": "" }, { "docid": "c96f0524dd6f5d8526b1f3a149af62fd", "score": "0.600535", "text": "def getArguments(self):\n ApiCli.getArguments(self)\n\n # Get the host group name\n if self.args.hostGroupName is not None:\n self.hostGroupName = self.args.hostGroupName\n\n # Get the list of sources separated by commas\n if self.args.sources is not None:\n self.sources = self.args.sources\n\n payload = {\"name\": self.hostGroupName, \"hostnames\": []}\n if self.sources is not None:\n source_list = str.split(self.sources, ',')\n for s in source_list:\n payload['hostnames'].append(s)\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json', \"Accept\": \"application/json\"}", "title": "" }, { "docid": "7331260d5f62d4248c67ce7a4c056fe8", "score": "0.5976057", "text": "def get_params():\n try:\n logger.info('get_params')\n parser = ArgumentParser(description='ejecucion manual')\n parser.add_argument('json_test', type=str, metavar='<json_test>', help='')\n parser.add_argument('--debug', action=\"count\", default=0, help='')\n args = parser.parse_args()\n return (\n json.loads(args.json_test), None, args.debug\n )\n except Exception as exc:\n exc_output = sys.exc_info()\n logger.error('{},{}'.format(str(exc),str(exc_output[2].tb_lineno)))\n return {\n 'status' : False\n , 'resultado': str(exc)\n }", "title": "" }, { "docid": "b238fe877cc688d7995f1b7efe57e20c", "score": "0.59645027", "text": "def arguments(self) -> Optional[str]:\n return pulumi.get(self, \"arguments\")", "title": "" }, { "docid": "5971db716428d640165fbcb14f134b62", "score": "0.59543496", "text": "def get_docker_argument_dict(self):\n environment_dict = self.extract_environment()\n volume_dict = self.extract_volumes()\n volume_dict.update(self.extract_required_commands())\n return {'environment':environment_dict, 'volumes':volume_dict}", "title": "" }, { "docid": "72558ff6350443ac31f62b52936baa9a", "score": "0.59486043", "text": "def get_args(self):\n args = dict()\n args[\"num_parallel_workers\"] = self.num_parallel_workers\n return args", "title": "" }, { "docid": "9a9a179af054e25a73fafb84182f5e02", "score": "0.5923588", "text": "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "title": "" }, { "docid": "fe65f90d8ddb3de940cea4ef092b3994", "score": "0.5922049", "text": "def get_args():\n\n # More data validation would be a good idea here\n # Strip anything other than characters listed\n signer_email = pattern.sub(\"\", request.form.get(\"signer_email\"))\n signer_name = pattern.sub(\"\", request.form.get(\"signer_name\"))\n cc_email = pattern.sub(\"\", request.form.get(\"cc_email\"))\n cc_name = pattern.sub(\"\", request.form.get(\"cc_name\"))\n envelope_args = {\n \"signer_email\": signer_email,\n \"signer_name\": signer_name,\n \"cc_email\": cc_email,\n \"cc_name\": cc_name,\n \"status\": \"sent\",\n }\n args = {\n \"account_id\": session[\"ds_account_id\"],\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"],\n \"envelope_args\": envelope_args\n }\n return args", "title": "" }, { "docid": "0fa9e5755aa071939f7e74cd93313836", "score": "0.58999556", "text": "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-n\",\n \"--name\",\n action=\"append\",\n dest=\"names\",\n default=[],\n help=(\"Extension Attribute to create or update\"),\n )\n parser.add_argument(\n \"--replace\",\n help=\"overwrite an existing Extension Attribute\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--script\", default=\"\", help=\"Full path to the template script to upload\",\n )\n parser.add_argument(\n \"--url\", default=\"\", help=\"the Jamf Pro Server URL\",\n )\n parser.add_argument(\n \"--user\",\n default=\"\",\n help=\"a user with the rights to create and update an extension attribute\",\n )\n parser.add_argument(\n \"--password\", default=\"\", help=\"password of the user\",\n )\n parser.add_argument(\n \"--prefs\",\n default=\"\",\n help=(\n \"full path to an AutoPkg prefs file containing \"\n \"JSS URL, API_USERNAME and API_PASSWORD, \"\n \"for example an AutoPkg preferences file which has been configured \"\n \"for use with JSSImporter (~/Library/Preferences/com.github.autopkg.plist) \"\n \"or a separate plist anywhere (e.g. ~/.com.company.jcds_upload.plist)\"\n ),\n )\n parser.add_argument(\n \"-k\",\n \"--key\",\n action=\"append\",\n dest=\"variables\",\n default=[],\n metavar=\"KEY=VALUE\",\n help=(\"Provide key/value pairs for script value substitution. \"),\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"print verbose output headers\",\n )\n args = parser.parse_args()\n\n # Add variables from commandline. These might override those from\n # environment variables and recipe_list\n cli_custom_keys = {}\n for arg in args.variables:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n print(f\"Invalid variable [key=value]: {arg}\")\n cli_custom_keys[key] = value\n\n return args, cli_custom_keys", "title": "" }, { "docid": "0fa9e5755aa071939f7e74cd93313836", "score": "0.58999556", "text": "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-n\",\n \"--name\",\n action=\"append\",\n dest=\"names\",\n default=[],\n help=(\"Extension Attribute to create or update\"),\n )\n parser.add_argument(\n \"--replace\",\n help=\"overwrite an existing Extension Attribute\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--script\", default=\"\", help=\"Full path to the template script to upload\",\n )\n parser.add_argument(\n \"--url\", default=\"\", help=\"the Jamf Pro Server URL\",\n )\n parser.add_argument(\n \"--user\",\n default=\"\",\n help=\"a user with the rights to create and update an extension attribute\",\n )\n parser.add_argument(\n \"--password\", default=\"\", help=\"password of the user\",\n )\n parser.add_argument(\n \"--prefs\",\n default=\"\",\n help=(\n \"full path to an AutoPkg prefs file containing \"\n \"JSS URL, API_USERNAME and API_PASSWORD, \"\n \"for example an AutoPkg preferences file which has been configured \"\n \"for use with JSSImporter (~/Library/Preferences/com.github.autopkg.plist) \"\n \"or a separate plist anywhere (e.g. ~/.com.company.jcds_upload.plist)\"\n ),\n )\n parser.add_argument(\n \"-k\",\n \"--key\",\n action=\"append\",\n dest=\"variables\",\n default=[],\n metavar=\"KEY=VALUE\",\n help=(\"Provide key/value pairs for script value substitution. \"),\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"print verbose output headers\",\n )\n args = parser.parse_args()\n\n # Add variables from commandline. These might override those from\n # environment variables and recipe_list\n cli_custom_keys = {}\n for arg in args.variables:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n print(f\"Invalid variable [key=value]: {arg}\")\n cli_custom_keys[key] = value\n\n return args, cli_custom_keys", "title": "" }, { "docid": "1280beb161340645d41280b6462eb761", "score": "0.5897348", "text": "def arguments(self) -> Optional[Sequence['outputs.ArgumentResponse']]:\n return pulumi.get(self, \"arguments\")", "title": "" }, { "docid": "95e577175b040524cabc6458533d6466", "score": "0.5894388", "text": "def kwargs(self):\n return self.raw.get('k') or {}", "title": "" }, { "docid": "22de0f378c5790c75abd9343fb59b4de", "score": "0.58837", "text": "def getArguments(self, raw = False):\n if raw:\n return self.__arguments\n\n arguments = {}\n for name, value in self.__arguments.iteritems():\n if isinstance(value, (list, dict)):\n continue\n arguments[name] = value\n\n return arguments", "title": "" }, { "docid": "5473ff485073c8f51eaf3bf5b6a8e4a9", "score": "0.58655787", "text": "def generateargs(self):\n pass", "title": "" }, { "docid": "7f7d5a339372a9ceebd32b4aeeac2099", "score": "0.5854206", "text": "def get_args(self):\n return self.args", "title": "" }, { "docid": "7f7d5a339372a9ceebd32b4aeeac2099", "score": "0.5854206", "text": "def get_args(self):\n return self.args", "title": "" }, { "docid": "b88a4feab969df9afbd300ca4a8d2e83", "score": "0.58499515", "text": "def get_args_config() -> Dict:\n parser = create_arg_parser()\n config = vars(parser.parse_args())\n return config", "title": "" }, { "docid": "e8355411aed12f9677b20d7f0348c534", "score": "0.58480287", "text": "def get_params():\n\n temp = sys.argv[1:]\n cmd = temp[0] if '--' in temp[0] else '--run'\n args = temp[1:] if '--' in temp[0] else temp\n return {\n 'cmd': cmd,\n 'args': args,\n }", "title": "" }, { "docid": "638a8758127a7784246ee2613abed54f", "score": "0.5842995", "text": "def getJSONObj(**kwargs):\n return json.dumps(kwargs)", "title": "" }, { "docid": "05ca2fa4b1ebb500170feef6a1612eeb", "score": "0.5827282", "text": "def extract_arguments(self):\n pass", "title": "" }, { "docid": "c3e7842c9f54ff359d6d1e7d78a81c49", "score": "0.5824833", "text": "def api_read_options():\n return request.args.to_dict()", "title": "" }, { "docid": "5b6b128b3f7cadc0efa12c01e489a3e3", "score": "0.5822039", "text": "def test_args_bundle():\n\n with api.Bundle(TEST_CONTEXT) as b:\n b.add_params(serialized_json_args)\n b.name = 'output'\n\n b = api.get(TEST_CONTEXT, 'output')\n\n assert(b.params == serialized_json_args)", "title": "" }, { "docid": "3ffd63294fb8fdb35970f940c8b44def", "score": "0.58203965", "text": "def get_run_arguments(self, parsed_cli_dict):\n pass", "title": "" }, { "docid": "db1cfdf18e3f27259ed154c168c4b4ec", "score": "0.58155864", "text": "def _getArgs(self):\n return [\n self._maps_,\n {'encoding': self._charset_, 'path_encoding': self._fcharset_}\n ]", "title": "" }, { "docid": "8464dc7d6398b606fe1fb32993dbdcc8", "score": "0.58014965", "text": "def get_params(self, *args, **kwargs):\n return {}", "title": "" }, { "docid": "a6bf96098858bd9ec552f35106d252e0", "score": "0.57864213", "text": "def parse_arguments():\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-y', '--year', help = 'year')\n\tparser.add_argument('-m', '--month', help = 'month')\n\targs = parser.parse_args()\n\tdict_args = vars(args)\n\treturn dict_args", "title": "" }, { "docid": "33f6a77aad3cdcc51829a0388c060bce", "score": "0.5783465", "text": "def get_args(discussion: dict) -> dict:\n path = f'{domain}/arguments?discussionId={discussion[\"id\"]}'\n return requests.get(path).json()", "title": "" }, { "docid": "d0e36b2f7352210bae1dbf3fc5b01bab", "score": "0.57758504", "text": "def get_frontend_args(self) -> Dict[str, Any]:\n\n with open(self.opts['onboarding_data'], \"r\", encoding=\"utf-8-sig\") as f:\n onboarding_data = json.loads(f.read())\n\n with open(self.opts['annotation_buckets'], \"r\", encoding=\"utf-8-sig\") as f:\n annotation_buckets = json.loads(f.read())\n\n return {\n \"task_description\": self.opts['task_description'],\n \"task_title\": self.opts['task_title'],\n \"onboarding_data\": onboarding_data,\n \"annotation_buckets\": annotation_buckets,\n \"annotate_last_utterance_only\": self.opts['annotate_last_utterance_only'],\n \"ask_reason\": self.opts['ask_reason'],\n \"frame_height\": '100%',\n \"num_subtasks\": self.opts[\"subtasks_per_unit\"],\n \"block_mobile\": True,\n }", "title": "" }, { "docid": "2a90bedba8754bf0a4c749364e6b1cd2", "score": "0.5772908", "text": "def read_args() -> Dict[str, str]:\n if len(sys.argv) != 4:\n print(\"Usage: python -m projects.pj01.weather [FILE] [COLUMN] [OPERATION]\")\n exit()\n return {\n \"FILE\": sys.argv[1],\n \"COLUMN\": sys.argv[2],\n \"OPERATION\": sys.argv[3]\n }", "title": "" }, { "docid": "f5c990b8eb25b7eb0e1f577eb1897340", "score": "0.5765304", "text": "def parse_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--out-json-fname', action='store', default='project_info_new.json', help='xxx')\n\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "3324c1c704c1f108c8fd2b493c5b907f", "score": "0.5757195", "text": "def getArgs(self):\n agent_type = Agent(_ZERG) # specify agent race\n bot_type = Bot(_TERRAN, 1) # specify bot race and difficulty\n player_types = [agent_type, bot_type]\n\n agent_interface = sc2_env.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=32,\n action_space=\"FEATURES\", # actions.ActionSpace.FEATURES,\n use_feature_units=True)\n\n env_args = dict(\n map_name=\"Simple64\",\n step_mul=8,\n game_steps_per_episode=0,\n score_index=-1,\n disable_fog=True,\n agent_interface_format=agent_interface,\n players=player_types)\n\n # add visualization if running only one game environment\n if self.n_envs == 1:\n env_args['visualize'] = True\n\n return env_args", "title": "" }, { "docid": "b24e2287442d6b2a73f00b1dc05589f9", "score": "0.57415056", "text": "def minimal_params(self):\n return MultiDict(verb=self.verb)", "title": "" }, { "docid": "ea3b8bfb7b7baf245710f5d51d33cb9b", "score": "0.571111", "text": "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "title": "" }, { "docid": "d499f89242df1cb5589e6261c46af35d", "score": "0.57069594", "text": "def getArgs(store):", "title": "" }, { "docid": "bba38635144b4c289e8564d15f0ec2b8", "score": "0.5705944", "text": "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--resize', dest='resize', action='store_true',\n help='Will resize images locally first. Not needed, but'\n ' will reduce network traffic.')\n parser.add_argument('--input_dir', default='',\n help='A directory containing .jpg or .jpeg files to serialize into a '\n 'request json')\n parser.add_argument('--input_image', default='',\n help='A directory containing .jpg or .jpeg files to serialize into a '\n 'request json')\n\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "228e890ebef965fcad2ed6cc745d0eab", "score": "0.5702716", "text": "def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:\n return {}", "title": "" }, { "docid": "9d824116ba735cf109741cf23e298354", "score": "0.569832", "text": "def getTestArguments():\n arguments = {\n \"AcquisitionEra\": \"WMAgentCommissioning10\",\n \"Requestor\": \"[email protected]\",\n \"InputDataset\": \"/MinimumBias/Commissioning10-v4/RAW\",\n \"CMSSWVersion\": \"CMSSW_3_9_7\",\n \"ScramArch\": \"slc5_ia32_gcc434\",\n \"ProcessingVersion\": \"2\",\n \"SkimInput\": \"output\",\n \"GlobalTag\": \"GR10_P_v4::All\",\n\n \"CouchURL\": os.environ.get(\"COUCHURL\", None),\n \"CouchDBName\": \"scf_wmagent_configcache\",\n # or alternatively CouchURL part can be replaced by ConfigCacheUrl,\n # then ConfigCacheUrl + CouchDBName + ConfigCacheID\n \"ConfigCacheUrl\": None,\n\n \"ProcScenario\": \"cosmics\",\n \"DashboardHost\": \"127.0.0.1\",\n \"DashboardPort\": 8884,\n \"TimePerEvent\" : 1,\n \"Memory\" : 1,\n \"SizePerEvent\" : 1,\n }\n\n return arguments", "title": "" }, { "docid": "072515c132f1369fcb794ec7c4e4baac", "score": "0.5697437", "text": "def GetBackendStartupArgs(self):\n raise NotImplementedError()", "title": "" }, { "docid": "eae989f5fa2be25f1d597e5fe6e7861e", "score": "0.56864893", "text": "def args(self) -> str:\n return self._args", "title": "" }, { "docid": "98f28ba78e0440cb53ab54cef95e6cf7", "score": "0.5672628", "text": "def assemble_args(self):\n raise NotImplementedError", "title": "" }, { "docid": "932f51e3a2e7f9d4d79b2947b1ed1bf8", "score": "0.56685966", "text": "def get_args():\n return {\n \"account_id\": session.get(\"ds_account_id\"), # Represents your {ACCOUNT_ID}\n \"access_token\": session.get(\"ds_access_token\"), # Represents your {ACCESS_TOKEN}\n \"clickwrap_id\": session.get(\"clickwrap_id\"),\n \"clickwrap_name\": session.get(\"clickwrap_name\"),\n }", "title": "" }, { "docid": "78ef6e0adecc16f0ea9c9663fb7feed9", "score": "0.5666633", "text": "def params():\n return {}", "title": "" }, { "docid": "a78fc9f8b55a8174c53527316ebce7c5", "score": "0.5666374", "text": "def command_args(self):\n return self.options.appArgs", "title": "" }, { "docid": "f521e39203beb8ef4d794d9c1e8cf80d", "score": "0.56545717", "text": "def format_all_args_json(func_name=None, named=None, nameless=None):\n\n def _is_serializable(v):\n try:\n json.dumps(v)\n return True\n except TypeError:\n return False\n args_filtered = filter(lambda(k, v): isinstance(k, str) and _is_serializable(v), named)\n arg_dict = dict(args_filtered)\n return 'Arguments to {func_name}: {dump}'.format(func_name=func_name, dump=json.dumps(arg_dict))", "title": "" }, { "docid": "b08342912b501c0b228d4d9008cf7ed7", "score": "0.5648711", "text": "def deserialize_args(args, kwargs):\n\n return args, kwargs", "title": "" }, { "docid": "57a4975bddf653ddd0d119ebde106864", "score": "0.56476486", "text": "def args(self):\n return self._impl.args", "title": "" }, { "docid": "0c8b19d82f42772a312bcf6ea77edf17", "score": "0.5639451", "text": "def _getArgs():\n\n parser = argparse.ArgumentParser(\n description=\"This will bump the version numbers for NS Apollos files.\")\n parser.add_argument(\"-v\", \"--version\", help=\"Version number to update to\")\n parser.add_argument(\"--ota\", action=\"store_true\", help=\"This will publish the bundle to Expo\")\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "4162428e5c17b4d0245f389efea7ee6b", "score": "0.56340736", "text": "def get_args(self):\n return self._args", "title": "" }, { "docid": "c0c403a1ceaaee33cf2884a9827bcfd3", "score": "0.5629083", "text": "def args(self) -> Namespace:\n return self._args", "title": "" }, { "docid": "11ccef6ae687483c12f18114e53bdbf1", "score": "0.56232595", "text": "def get_json(**kwargs):\n return json.dumps(kwargs)", "title": "" }, { "docid": "1fe0cea85ba572407461e3c7fbf98049", "score": "0.56170344", "text": "def getCloneArgs(self):\n\n values = {\n \"key\": self.subnode_key.makeClone(),\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "title": "" }, { "docid": "ba3bb016142ea9ae7ead241b06792ad2", "score": "0.5606854", "text": "def kwargs(self) -> tp.Dict[str, tp.Any]:\n return {}", "title": "" }, { "docid": "ad1f6576aef47ac5cfbbdd4b9df33c3a", "score": "0.56058806", "text": "def extract_arguments(args, prefix=DATA_PREFIX):\n data = {}\n for key, value in iteritems(args.__dict__):\n if key.startswith(prefix) and value is not None:\n parts = key[len(prefix):].split('__')\n # Think of `d` as a pointer into the resulting nested dictionary.\n # The `for` loop iterates over all parts of the key except the last\n # to find the proper dict into which the value should be inserted.\n # If the subdicts do not exist, they are created.\n d = data\n for p in parts[:-1]:\n assert p not in d or isinstance(d[p], dict)\n d = d.setdefault(p, {})\n # At this point `d` points to the correct dict and value can be\n # inserted.\n d[parts[-1]] = value if value != '' else None\n return data", "title": "" }, { "docid": "446ace13faaa0670c3ab287c0bb4a22c", "score": "0.55949044", "text": "def __json__():", "title": "" }, { "docid": "ed056e83852d9e81601f643fe0f44e7e", "score": "0.55946916", "text": "def arguments(self):\n arguments = [\n \"-G{}\".format(self.generator),\n ] + self.definitions + [\n self.source\n ]\n return arguments", "title": "" }, { "docid": "686828c515903b8b852140666bd94fb6", "score": "0.55855685", "text": "def _get_default_args():\n yesterday = datetime.datetime.combine(\n datetime.datetime.today() - datetime.timedelta(days=1),\n datetime.datetime.min.time())\n\n default_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': yesterday,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': datetime.timedelta(minutes=5),\n 'schedule_interval': None\n }\n\n return default_args", "title": "" }, { "docid": "505421116ab52001bac42563446c0113", "score": "0.55821586", "text": "def _args_to_dict() -> Mapping[str, str]:\n arguments = {}\n for argument in sys.argv[1:]:\n if '=' in argument:\n separated = argument.find('=')\n key, value = argument[:separated], argument[separated + 1:]\n arguments[key] = value\n return arguments", "title": "" }, { "docid": "a2afb3a6bab6d7641ce4d0107172e77e", "score": "0.5581905", "text": "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process data')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', help='Debugging level', required=False, default=0)\n logging.info(\"End to parsing arguments from command line\")\n return parser.parse_args()", "title": "" }, { "docid": "3779183c448afc5b9b2bca67d863ca5d", "score": "0.55787635", "text": "def __json_encode__(self) -> Dict[str, Any]:\n return {\n \"cls\": type(self),\n \"args\": tuple(getattr(self, \"__init_args__\", OrderedDict()).values()),\n \"kwargs\": dict(getattr(self, \"__init_kwargs__\", OrderedDict())),\n }", "title": "" }, { "docid": "382788cada031f24037e19616a660577", "score": "0.5578662", "text": "def args(self):\n return self._args", "title": "" }, { "docid": "55ffa69a9ba7b50174c8592299fa77c8", "score": "0.55770165", "text": "def get_parms():\n if not ARG.LIBRARY:\n get_library()\n if not ARG.NEURONBRIDGE:\n if ARG.SOURCE == 'file':\n ARG.NEURONBRIDGE = NB.get_neuronbridge_version_from_file()\n else:\n ARG.NEURONBRIDGE = NB.get_neuronbridge_version(DBM.neuronMetadata, ARG.LIBRARY)\n if ARG.NEURONBRIDGE:\n ARG.NEURONBRIDGE = \"v\" + ARG.NEURONBRIDGE\n if not ARG.NEURONBRIDGE:\n terminate_program(\"No NeuronBridge version selected\")\n if not ARG.JSON and ARG.SOURCE == 'file':\n print(\"Select a JSON file:\")\n json_base = CLOAD.json_dir + f\"/{ARG.NEURONBRIDGE}\"\n jsonlist = list(map(lambda jfile: jfile.split('/')[-1],\n glob.glob(json_base + \"/*.json\")))\n jsonlist.sort()\n terminal_menu = TerminalMenu(jsonlist)\n chosen = terminal_menu.show()\n if chosen is None:\n terminate_program(\"No JSON file selected\")\n ARG.JSON = '/'.join([json_base, jsonlist[chosen]])", "title": "" }, { "docid": "864532ce4a894f53455b6bbb9fe971f9", "score": "0.55680084", "text": "def parseargs():\n parser = argparse.ArgumentParser(\n description=\"Generate a Linux device driver from JSON\")\n parser.add_argument('-c', '--config',\n help=\"Config file containing autogen configuration in JSON format\")\n parser.add_argument('-w', '--working-dir', default=\".\",\n help=\"Working directory to generate the device driver in\")\n args = parser.parse_args()\n return (args.config, args.working_dir )", "title": "" }, { "docid": "3a03bbcdaf69fc41e5d76e07ed559cf7", "score": "0.5560919", "text": "def parse_args():\n\n arg_index = 1\n arg_dict = {}\n while arg_index < len(sys.argv) - 1:\n arg_dict[sys.argv[arg_index][2:]] = sys.argv[arg_index + 1]\n arg_index += 2\n\n return arg_dict", "title": "" }, { "docid": "0d0b3520f7d2de4d65c097ea71524bd0", "score": "0.55597323", "text": "def ToRequestArgs(self):", "title": "" }, { "docid": "6b8c67b7e32841909245d1aa444672aa", "score": "0.5554402", "text": "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "title": "" }, { "docid": "0def2479817a8da1f65b6c543bf649c2", "score": "0.5553215", "text": "def getTestArguments():\n\n arguments = {\n \"StdJobSplitAlgo\": \"ParentlessMergeBySize\",\n \"StdJobSplitArgs\": {\"files_per_job\": 1},\n \"UnmergedLFNBase\": \"/store/temp/WMAgent/unmerged\",\n \"MergedLFNBase\": \"/store/results\",\n \"MinMergeSize\": 1*1024*1024*1024,\n \"MaxMergeSize\": 3*1024*1024*1024,\n \"MaxMergeEvents\": 100000,\n \"DataTier\": 'USER',\n \"Scenario\": \"\",\n \"AcquisitionEra\": \"Whatever\",\n \"Requestor\": \"[email protected]\",\n \"InputDataset\": \"/MinimumBias/Run2010A-Dec22ReReco_v1/USER\",\n \"CMSSWVersion\": \"CMSSW_3_X_Y\",\n \"ScramArch\": \"slc5_ia32_gcc434\",\n \"ProcessingVersion\": \"1\",\n # These may not be needed\n \"GlobalTag\": \"GR10_P_v4::All\",\n \"CouchURL\": os.environ.get(\"COUCHURL\", None),\n \"ConfigCacheUrl\": None,\n \"DashboardHost\": \"127.0.0.1\",\n \"DashboardPort\": 8884,\n }\n\n return arguments", "title": "" }, { "docid": "62896d7aeeb3d4cb34528ee0334146b3", "score": "0.5551297", "text": "def get_args():\n args = { }\n for item in argv:\n ind = item.find('=')\n if ind > 0:\n args[item[:ind]] = eval(item[ind + 1:])\n return args", "title": "" }, { "docid": "21580ba1b5c19e8bd51118380c4f80aa", "score": "0.5551255", "text": "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n \"default\": self.subnode_default.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "title": "" }, { "docid": "13d1bb5476e516da5d6eb3df5aaa832a", "score": "0.55460906", "text": "def get_params(self, *argv, **kwargs) -> dict:\n return self._params.copy()", "title": "" }, { "docid": "724fe209175b50c8e26a6cccd6893c73", "score": "0.55406255", "text": "def get_preprocessing_args():\r\n\r\n parser = argparse.ArgumentParser(description='PyTorch BERT Model')\r\n parser = add_preprocessing_args(parser)\r\n\r\n args = parser.parse_args()\r\n\r\n return args", "title": "" } ]
101c8997fedd063b35ebd1bab2cdf156
Warp frames to 84x84 as done in the Nature paper and later work.
[ { "docid": "55e1dc24ca078905558c0fcc429341b7", "score": "0.0", "text": "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n\n if env.unwrapped.spec.id == 'MontezumaRevengeNoFrameskip-v4':\n self.ego_game = MontezumaEgoFrame()\n elif env.unwrapped.spec.id == 'GravitarNoFrameskip-v4':\n self.ego_game = GravitarEgoFrame()\n elif env.unwrapped.spec.id == 'PitfallNoFrameskip-v4':\n self.ego_game = PitfallEgoFrame()\n else:\n raise Exception(\"Ego motion not supported for env: {env}\")\n\n # https://github.com/openai/gym/blob/master/gym/spaces/dict.py\n self.observation_space = spaces.Dict({'normal': spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1),\n dtype=np.uint8),\n 'ego': spaces.Box(low=0, high=255,\n shape=(self.ego_game.ego_h,\n self.ego_game.ego_w,\n 1),\n dtype=np.uint8)})", "title": "" } ]
[ { "docid": "441f2546b813a8c8f51f599898f61cba", "score": "0.5635383", "text": "def liftFrame(self, frame):\n\t\tframe.lift()", "title": "" }, { "docid": "81435cd2d848904bced17e4bc3157f38", "score": "0.55682594", "text": "def adjust_images(self, now=0):\n if self.direction != self.old_direction:\n self.walkframes = self.walkframe_dict[self.direction]\n self.old_direction = self.direction\n self.redraw = True\n self.make_image(now)", "title": "" }, { "docid": "52dfde1d68c715d45c05f2ac9a305aea", "score": "0.54805905", "text": "def deform_frame(frame, num_pts=5, std_displacement=20, mean_displacement=0):\n\n # get the displacement values; x, y\n def_grid = np.random.randn(num_pts, num_pts, 2) * std_displacement + mean_displacement\n\n # get the coordinates of the frame\n x, y = np.meshgrid(np.arange(0, frame.shape[1]), np.arange(0, frame.shape[0]))\n\n # get the locations of the displacement values\n locations_x = np.linspace(0, frame.shape[1], num_pts)\n locations_y = np.linspace(0, frame.shape[0], num_pts)\n warped_x, warped_y = np.meshgrid(locations_x, locations_y)\n orig_x = warped_x + def_grid[:, :, 1]\n orig_y = warped_y + def_grid[:, :, 0]\n\n # Interpolate the warp coordinates on the image grid\n grid_z = griddata(np.concatenate((np.expand_dims(orig_x.flatten(), axis=1),\n np.expand_dims(orig_y.flatten(), axis=1)), axis=1),\n np.concatenate((np.expand_dims(warped_x.flatten(), axis=1),\n np.expand_dims(warped_y.flatten(), axis=1)), axis=1),\n (x, y),\n method='cubic')\n # separate back into x and y\n map_x = np.append([], [ar[:, 1] for ar in grid_z]).reshape(x.shape)\n map_y = np.append([], [ar[:, 0] for ar in grid_z]).reshape(x.shape)\n map_x_32 = map_x.astype('float32')\n map_y_32 = map_y.astype('float32')\n\n # map the image to the new coordinates\n deformed_frame = cv2.remap(frame, map_y_32, map_x_32, cv2.INTER_CUBIC)\n\n return deformed_frame", "title": "" }, { "docid": "ccb487d4b6cd8bab74f0d1df924559bc", "score": "0.54737234", "text": "def raster(self, file, dst): # <------------------------------------------ Use rio python bindings here, the compression isnt' working\n self.sp.call([\"rio\", \"warp\", file, dst,\n \"--like\", self.template,\n \"--co\", \"compress=lzw\", # <----------------------------- Might not be working?\n \"--co\", \"blockysize=128\",\n \"--co\", \"blockxsize=128\",\n \"--co\", \"tiled=yes\"])", "title": "" }, { "docid": "d66bb0ca489e26da890e68c671f015d1", "score": "0.54567075", "text": "def _align_window(self, georange):\n midlon, midlat = self.set_target_area_midpoint(georange)\n deltalon = georange[3] - georange[2]\n deltalat = georange[1] - georange[0]\n georange = (midlat - deltalat / 2,\n midlat + deltalat / 2,\n midlon - deltalon / 2,\n midlon + deltalon / 2)\n if self.use_mercator:\n IMAGE_LON_RANGE_LIMIT = IMAGE_LON_RANGE_MERC_LIMIT\n self.merc_proj = Proj(proj='merc', ellps='WGS84')\n coord_tuple = self.merc_proj(georange[2:], georange[:2])\n georange = coord_tuple[1] + coord_tuple[0]\n imaspect = (georange[3] - georange[2]) / (georange[1] - georange[0])\n if imaspect > self.figaspect:\n # Image is wider than canvas, pad upper and lower edges\n lon1 = georange[2]\n lon2 = georange[3]\n if lon2 - lon1 > IMAGE_LON_RANGE_LIMIT:\n lonmid = (lon1 + lon2) / 2\n lon1 = lonmid - IMAGE_LON_RANGE_LIMIT / 2\n lon2 = lonmid + IMAGE_LON_RANGE_LIMIT / 2\n lmid = (georange[0] + georange[1]) / 2\n ldelta = (lon2 - lon1) / self.figaspect\n lat1 = lmid - ldelta / 2\n lat2 = lmid + ldelta / 2\n else:\n # Image is taller than canvas, pad left and right edges\n IMAGE_LAT_RANGE_LIMIT = IMAGE_LON_RANGE_LIMIT / self.figaspect\n lat1 = georange[0]\n lat2 = georange[1]\n if lat2 - lat1 > IMAGE_LAT_RANGE_LIMIT:\n latmid = (lat1 + lat2) / 2\n lat1 = latmid - IMAGE_LAT_RANGE_LIMIT / 2\n lat2 = latmid + IMAGE_LAT_RANGE_LIMIT / 2\n lmid = (georange[2] + georange[3]) / 2\n ldelta = (lat2 - lat1) * self.figaspect\n lon1 = lmid - ldelta / 2\n lon2 = lmid + ldelta / 2\n return lat1, lat2, lon1, lon2", "title": "" }, { "docid": "3f6b0017e5a60bc6d878901843121130", "score": "0.5417628", "text": "def warp_image(self, inputs, outputs):\n for s in self.depth_scales:\n for frame_id in self.frame_ids[:1]:\n # image warping\n img_ref = inputs[(\"color\", frame_id, 0)]\n outputs[('warp_img', 1, frame_id, s)] = nnFunc.grid_sample(\n img_ref, outputs[('reproj_xy', 1, frame_id, s)], \n mode='bilinear', padding_mode='border')\n \n return outputs", "title": "" }, { "docid": "c01ca7a23f78d7bb14b75ce61030d7fb", "score": "0.53766626", "text": "def _extract_warped_image_landmarks(self, item):\n logging.debug('.. warp the registered image and get landmarks')\n path_dir = self._get_path_reg_dir(item)\n path_im_ref, path_im_move, _, path_lnds_move = self._get_paths(item, prefer_pproc=False)\n path_log = os.path.join(path_dir, self.NAME_LOG_REGISTRATION)\n\n # warp moving landmarks to reference frame\n path_img_warp = os.path.join(path_dir, os.path.basename(path_im_move))\n dict_params = {\n 'exec_Fiji': self.params['exec_Fiji'],\n 'path_bsh': self.PATH_SCRIPT_WARP_LANDMARKS,\n 'source': path_im_move,\n 'target': path_im_ref,\n 'output': path_dir,\n 'transf-inv': os.path.join(path_dir, self.NAME_TRANSF_INVERSE),\n 'transf-dir': os.path.join(path_dir, self.NAME_TRANSF_DIRECT),\n 'warp': path_img_warp,\n }\n # export source points to TXT\n pts_source = load_landmarks(path_lnds_move)\n save_landmarks(os.path.join(path_dir, self.NAME_LANDMARKS), pts_source)\n # execute transformation\n exec_commands(self.COMMAND_WARP_LANDMARKS % dict_params, path_logger=path_log, timeout=self.EXECUTE_TIMEOUT)\n # load warped landmarks from TXT\n path_lnds_warp = os.path.join(path_dir, self.NAME_LANDMARKS_WARPED)\n if os.path.isfile(path_lnds_warp):\n points_warp = load_landmarks(path_lnds_warp)\n path_lnds_warp = os.path.join(path_dir, os.path.basename(path_lnds_move))\n save_landmarks(path_lnds_warp, points_warp)\n else:\n path_lnds_warp = None\n # return results\n return {\n self.COL_IMAGE_MOVE_WARP: path_img_warp,\n self.COL_POINTS_MOVE_WARP: path_lnds_warp,\n }", "title": "" }, { "docid": "64265309e64b8705597c7f1543cfeb4f", "score": "0.5342173", "text": "def pad_scannet(frame):\n\n w,h = frame['image'].size\n if w==1296 and h==968:\n frame['image'] = ImageOps.expand(frame['image'], border=(0,2))\n frame['intrinsics'][1, 2] += 2\n if 'instance' in frame and frame['instance'] is not None:\n frame['instance'] = ImageOps.expand(frame['instance'], border=(0,2))\n return frame", "title": "" }, { "docid": "79bdb17411408800bee0289ba789f5aa", "score": "0.53028655", "text": "def apply_wrappers(env):\n env = SkipFrame(env, skip=4)\n env = GrayScaleObservation(env, keep_dim=False)\n env = ResizeObservation(env, shape=84)\n env = TransformObservation(env, f=lambda x: x / 255.)\n env = FrameStack(env, num_stack=4)\n return env", "title": "" }, { "docid": "9a7b792cc9936b4a0577210ada2f7cd6", "score": "0.52731484", "text": "def shift_images(prj, sx, sy):\r\n\r\n from skimage import transform as tf\r\n from skimage.feature import register_translation\r\n\r\n # Needs scaling for skimage float operations.\r\n prj, scl = scale(prj)\r\n\r\n # For each projection\r\n for m in range(prj.shape[0]):\r\n tform = tf.SimilarityTransform(translation=(sy[m], sx[m]))\r\n prj[m] = tf.warp(prj[m], tform, order=5)\r\n\r\n # Re-normalize data\r\n prj *= scl\r\n\r\n return prj", "title": "" }, { "docid": "458be3793f0ef3eae3b912896554aa62", "score": "0.5236977", "text": "def grid_view(self):\n n_row = np.ceil(np.sqrt(len(self.image_info))).astype(int)\n n_row = max(1, n_row)\n scene_size, _ = self.images_bounds()\n for image_info, pos in zip(self.image_info.values(), itertools.product(range(n_row), repeat=2)):\n translate_2d = np.multiply(scene_size[-2:], pos)\n for layer in image_info.layers:\n self._shift_layer(layer, translate_2d)\n\n if image_info.mask is not None:\n self._shift_layer(image_info.mask, translate_2d)\n\n if image_info.segmentation is not None:\n self._shift_layer(image_info.segmentation, translate_2d)\n self.viewer.reset_view()", "title": "" }, { "docid": "840741698f7322dc031da82acb7fedfa", "score": "0.5231403", "text": "def warp_image(tcks, image):\n spine_tck, width_tck = tcks\n\n #730 was determined for the number of image samples to take perpendicular to the spine\n #from average length of a worm (as determined from previous tests)\n warped = resample.warp_image_to_standard_width(image, spine_tck, width_tck, width_tck, int(spine_tck[0][-1] // 5))\n #warped = resample.sample_image_along_spline(image, spine_tck, 730)\n mask = resample.make_mask_for_sampled_spline(warped.shape[0], warped.shape[1], width_tck)\n #warped = colorize.scale(warped).astype('uint8')\n #warped[~mask] = 255\n\n return (warped, mask)", "title": "" }, { "docid": "b8e9b7a5fe7f9ce6e32b07aeab0b448c", "score": "0.52108294", "text": "def setup_frames(self):\n self.frames.append(self.get_image(0, 0, 16, 16))", "title": "" }, { "docid": "ce4762b35ccbcd42ab3e439a65c36114", "score": "0.5193187", "text": "def fast_warp(img, tf, output_shape=(53,53), mode='reflect'):\r\n m = tf._matrix\r\n img_wf = np.empty((output_shape[0], output_shape[1], 3), dtype='float32')\r\n for k in xrange(3):\r\n img_wf[..., k] = skimage.transform._warps_cy._warp_fast(img[..., k], m, output_shape=output_shape, mode=mode)\r\n return img_wf", "title": "" }, { "docid": "903a6b8293e1dc32e7d1a34e37759151", "score": "0.5189731", "text": "def alignAndStack(p):\n p.addReferenceCatalog()\n p.determineAstrometricSolution()\n p.adjustWCSToReference()\n p.resampleToCommonFrame()\n p.scaleCountsToReference()\n p.stackFrames()\n p.storeProcessedScience(suffix=\"_image\")\n return", "title": "" }, { "docid": "878f6b612ec4192ea6adab98e58d6d22", "score": "0.5185659", "text": "def windmill(self):\r\n sp = self.spacing\r\n\r\n tw = self.tile_width\r\n tl = self.tile_length\r\n s_tw = (tw - sp) / 2\r\n s_tl = (tl - sp) / 2\r\n\r\n cur_y = 0\r\n while cur_y < self.length:\r\n cur_x = 0\r\n\r\n while cur_x < self.width:\r\n self.add_plane(cur_x, cur_y, tw, s_tl) # bottom\r\n self.add_plane(cur_x + tw + sp, cur_y, s_tw, tl) # right\r\n self.add_plane(cur_x + s_tw + sp, cur_y + tl + sp, tw, s_tl) # top\r\n self.add_plane(cur_x, cur_y + s_tl + sp, s_tw, tl) # left\r\n self.add_plane(cur_x + s_tw + sp, cur_y + s_tl + sp, s_tw, s_tl) # center\r\n\r\n cur_x += tw + s_tw + (2*sp)\r\n cur_y += tl + s_tl + (2*sp)", "title": "" }, { "docid": "6fb6ad078d6eed6cc4b81daf9603c771", "score": "0.5173672", "text": "def warp(filenames, output):\n from osgeo import gdal\n log.debug(\"Running 'warp' method.\")\n\n # The merge function returns a single array and the affine transform info\n gdal.Warp(output, filenames, format=\"GTiff\",\n options=[\"COMPRESS=LZW\", \"TILED=YES\"])\n\n return output", "title": "" }, { "docid": "062c0cbb3c136fd91bf6c2fcc74b1cbb", "score": "0.51641756", "text": "def adjust_coordinates(self, ref, nsigma=1.0, inplace=False):\n\n out = self if inplace else self.copy()\n\n # Determine the pixel offset of features in the current\n # image relative to features in the reference image.\n dy, dx = out.estimate_coordinate_offset(ref, nsigma)\n\n # Offset the WCS of the current image by the pixel shift found above.\n out.wcs.set_crpix1(out.wcs.get_crpix1() + dx)\n out.wcs.set_crpix2(out.wcs.get_crpix2() + dy)\n\n # Calculate the resulting shift in pixel coordinates, for display\n # to the user.\n units = u.arcsec if self.wcs.unit is u.deg else self.wcs.unit\n offset = np.array([-dy, -dx]) * self.wcs.get_axis_increments(units)\n self._logger.info(\"Shifted the coordinates by dy=%.3g dx=%.3g %s\" %\n (offset[0], offset[1], units))\n return out", "title": "" }, { "docid": "8077ca0fd7a5452c7413a82a0ed32fe2", "score": "0.5147017", "text": "def plan(self, width, offset=0, gap=1):\n image_output = Image(self.image_input, self.verbose)\n image_output.data *= 0\n coordinates_input = self.image_input.getNonZeroCoordinates()\n\n # for all points with non-zeros neighbors, force the neighbors to 0\n for coord in coordinates_input:\n image_output.data[:,:,coord.z-width:coord.z+width] = offset + gap * coord.value\n\n return image_output", "title": "" }, { "docid": "6cb80ede30a6a6acdb6144077e9bd06c", "score": "0.5138111", "text": "def animate(self, trajectory):\n frames = [self.new_image(fill=0)] * self.anchorCount\n # minimum_x = min(t[0] for t in trajectory)\n # if minimum_x < 0:\n # self.x_offset = -minimum_x\n # else:\n # self.x_offset = 0\n # print \"X offset set to %s\" % self.x_offset\n\n for i, (x, y) in enumerate(map(lambda x: self.transform(*x, rounded=True), trajectory)):\n frame = self.new_image(fill=1)\n if self.n_trace > 0:\n for n in reversed(range(1, self.n_trace + 1)):\n if i - n - 1 < 0:\n continue\n x_from, y_from = self.transform(*trajectory[i - n - 1])\n x_to, y_to = self.transform(*trajectory[i - n])\n\n xdiff = abs(x_to - x_from)\n if xdiff == 0:\n if self.ignoreY:\n down = True\n if y_from > y_to:\n y_from, y_to = y_to, y_from\n down = False\n for n_line, y_ in enumerate(range(round(y_from, down),\n round(y_to, down) + 1)):\n p_y = y_\n p_x = x_to\n frame[p_x - self.trace_width:p_x + self.trace_width,\n p_y - self.trace_width:p_y + self.trace_width] = (\n n * 1. / self.n_trace)\n continue\n else:\n delta = (y_to - y_from) / xdiff\n down = True\n if x_from > x_to:\n x_from, x_to = x_to, x_from\n down = False\n for n_line, x_ in enumerate(range(round(x_from, down), round(x_to, down) + 1)):\n p_x = x_\n p_y = min(self.y_max - 1, round(y_from + delta * n_line))\n frame[p_x - self.trace_width:p_x + self.trace_width,\n p_y - self.trace_width:p_y + self.trace_width] = (n * 1. / self.n_trace)\n frame[x - self.hand_width:x + self.hand_width,\n y - self.hand_height:y + self.hand_height] = 0\n frames.append(frame)\n return frames", "title": "" }, { "docid": "37a55d0ad62cb2c8b7fc39f15b644215", "score": "0.51147467", "text": "def image_shift():\n #shift the flat corrected files( they ll be use when combining 3 exposures)\n #shift_files(cr = 'no')\n flat_frames, bias_frames, arc_frames, arc_targets, object_files, targets = files()\n target_names = f7(targets)\n\n #shift the cr cleaned images (to be used for 1 and 2 exposures)\n shift_files()\n\n #check if the shifted images exist\n #for filename in glob.glob(os.path.join(data_path, \"s_*.fits\")):\n # if os.path.exists(filename):\n # os.remove(filename)\n # print 'removing'\n\n iraf.images(_doprint=0)\n iraf.imgeom(_doprint=0)\n iraf.imshift.setParam('input', '@'+os.path.join(data_path, 'input_shift.csv'))\n iraf.imshift.setParam('output', '@'+os.path.join(data_path_reduced, 'output_shift.csv'))\n iraf.imshift.setParam('shifts_file', os.path.join(data_path,'shift.csv') )\n\n iraf.imshift()\n\n return None", "title": "" }, { "docid": "91acae483e0a1bb967d3c827108b0356", "score": "0.5112516", "text": "def adjust_frames(self, now):\n if self.action_state != \"dead\":\n animation_dict = self.all_animations[bool(self.hit_state)]\n animation = animation_dict[self.action_state][self.direction]\n else:\n animation = self.death_anim\n self.redraw = True\n if self.direction_stack or self.hit_state or self.redraw:\n self.image = animation.get_next_frame(now)\n self.redraw = False", "title": "" }, { "docid": "93536009eb4a074dab10aa4e40211788", "score": "0.51049244", "text": "def _warp(self, y):\n d = self.gp.warping_function.d\n mpsi = self.gp.warping_function.psi\n\n z = d * y\n for i in range(len(mpsi)):\n a, b, c = mpsi[i]\n z += a * self.tanh(b * (y + c))\n return z", "title": "" }, { "docid": "8e219dbc497820399686ad12ab79f31f", "score": "0.5081783", "text": "def move_to_workorigin1(self):\n self._send_cmd1(\"G0 X0 Y300 Z0\\r\")", "title": "" }, { "docid": "d1cefb73e8821ae16fb83f0a2c515e7c", "score": "0.5063477", "text": "def MakeMapsUnpolarized(frame):\n if frame.type != core.G3FrameType.Map or \"Wpol\" not in frame:\n return\n\n wgt = frame[\"Wpol\"].TT\n del frame[\"Wpol\"]\n del frame[\"Q\"]\n del frame[\"U\"]\n\n wgt_out = maps.G3SkyMapWeights(frame[\"T\"], polarized=False)\n wgt_out.TT = wgt\n\n frame[\"Wunpol\"] = wgt_out\n\n return frame", "title": "" }, { "docid": "bd165f613c7476898be17b9274ce7a27", "score": "0.5010908", "text": "def frame_to_beam(self, x, y, z, r):\n if self.sample.origin.get() == \"edge\":\n return self.current_frame.frame_to_beam(x, y, z, r)\n elif self.sample.origin.get() == \"center\":\n x0 = self.current_frame.width/2.0\n y0 = self.current_frame.height/2.0\n return self.current_frame.frame_to_beam(x + x0, y + y0, z, r)", "title": "" }, { "docid": "41638dcc3ca5886a2711d57f137cc223", "score": "0.5003515", "text": "def update_shape(self):\n self.map_obj.move_absolute(self.pose2D.pose)", "title": "" }, { "docid": "9c2f350a242b415861ab3fd9e534503d", "score": "0.49990892", "text": "def rectify(self, frames):\r\n k1 = self.dist_coefs['left'][0][0]\r\n k2 = self.dist_coefs['left'][0][1]\r\n p1 = self.dist_coefs['left'][0][2]\r\n p2 = self.dist_coefs['left'][0][3]\r\n k3 = self.dist_coefs['left'][0][4]\r\n \r\n origin_shape = frames[0].shape\r\n new_frames = []\r\n \r\n # undistort image\r\n for i, side in enumerate((\"left\", \"right\")):\r\n new_frames.append(cv2.remap(frames[i],\r\n self.undistortion_map[side],\r\n self.rectification_map[side],\r\n cv2.INTER_NEAREST))\r\n \r\n # get distortion depth\r\n position = []\r\n for tmp_point in frames[2]:\r\n tmp = np.linalg.solve(self.lidar_rot,(tmp_point.reshape(3,1) - self.lidar_trans))\r\n tmp = tmp.reshape(3,)\r\n tmp[0] = tmp[0] / tmp[2]\r\n tmp[1] = tmp[1] / tmp[2]\r\n \r\n x = tmp[0]\r\n y = tmp[1]\r\n r = (x**2+y**2)**0.5\r\n x = x*(1+k1*r**2+k2*r**4)+p2*(r**2+2*x**2) + 2*p1*x*y\r\n y = y*(1+k1*r**2+k2*r**4)+p1*(r**2+2*y**2) + 2*p2*x*y\r\n \r\n x = x * self.cam_mats['left'][0,0] + self.cam_mats['left'][0,2]\r\n y = y * self.cam_mats['left'][1,1] + self.cam_mats['left'][1,2]\r\n position.append((x,y))\r\n \r\n position = np.array(position)\r\n depth = np.zeros(origin_shape[:2])\r\n \r\n for i,point in enumerate(position):\r\n x = int(round(point[0]))\r\n y = int(round(point[1] ))\r\n if y < origin_shape[0] and y>0 and x<origin_shape[1] and x>0 and frames[2][i][0] > 0:\r\n depth[y,x] = frames[2][i][0] \r\n \r\n # undistort depth\r\n depth = cv2.remap(depth,\r\n self.undistortion_map['left'],\r\n self.rectification_map['left'],\r\n cv2.INTER_NEAREST)\r\n new_frames.append(depth)\r\n \r\n # get disparity(undistort) from depth \r\n disparity = np.ones(depth.shape) * -1\r\n \r\n # our baseline : 57cm, width : 935 pixel\r\n disparity = np.where(depth>0,(789.68925*self.baseline)/depth,-1)\r\n \r\n for i in range(disparity.shape[0]):\r\n for j in range(disparity.shape[1]):\r\n if disparity[i,j]>0:\r\n disparity[i,j] = -1 if j-disparity[i,j]<=0 else disparity[i,j]\r\n new_frames.append(disparity)\r\n return new_frames", "title": "" }, { "docid": "b0bbf0ae8a4abe347b3c1642f7488813", "score": "0.4967618", "text": "def add_frame_shift(self, handle, basis):\n angle, axis = angle_and_axis(basis)\n\n if angle == 0:\n axis = matrix.col((0, 0, 1))\n\n if basis.include_translation:\n translation = basis.translation\n else:\n translation = matrix.col((0, 0, 0))\n\n axis = IMGCIF_TO_MCSTAS * axis\n translation = IMGCIF_TO_MCSTAS * translation\n\n self.create_vector(\n handle,\n os.path.basename(basis.name),\n angle,\n depends_on=basis.depends_on,\n equipment=\"detector\",\n equipment_component=basis.equipment_component,\n transformation_type=\"rotation\",\n units=\"deg\",\n vector=axis,\n offset=translation,\n offset_units=\"mm\",\n )", "title": "" }, { "docid": "ad418349d2c5b2fd8858657645048e38", "score": "0.49637097", "text": "def getWarp(img, Homograph):\n\n out_size = (1200,800)\n\n warpedImg = cv2.warpPerspective(img, Homograph, out_size)\n\n warpedImg = np.uint8(warpedImg)\n\n return warpedImg", "title": "" }, { "docid": "6ea44cf96c8eed80e656e2ddc5d51a63", "score": "0.49542364", "text": "def preprocess(self):\n\n x = self.image_size[0]\n y = self.image_size[1]\n offset = self.offset\n\n # Corner points in processed image, where frame will be transformed to\n pts = np.float32([[offset, offset], [offset + x, offset],\n [offset, offset + y], [offset + x, offset + y]])\n\n # creates an perspective transform and applies it to the images,\n # in order to obtain a \"top-down\" view of the images\n M = cv2.getPerspectiveTransform(self.cornerPoints, pts)\n\n self.frames_processed = []\n for img in self.frames:\n self.frames_processed.append(cv2.warpPerspective(\n img, M, (y + 2 * offset, x + 2 * offset)))\n\n # Extract specified color plane if image is not grayscale\n if len(np.shape(self.frames_processed)) >= 4:\n for i in range(len(self.frames_processed)):\n self.frames_processed[i] = cv2.split(\n self.frames_processed[i])[self.color]\n\n # Apply a min. and max. threshold\n for i in range(len(self.frames_processed)):\n retval, self.frames_processed[i] = cv2.threshold(\n self.frames_processed[i], self.min_threshold, 255, cv2.THRESH_TOZERO)\n retval, self.frames_processed[i] = cv2.threshold(\n self.frames_processed[i], self.max_threshold, 255, cv2.THRESH_TOZERO_INV)\n\n # Apply a circular mask to cut off parts not on the foil\n mask = np.zeros(shape=self.frames_processed[0].shape, dtype=\"uint8\")\n center = (np.asarray(\n np.shape(self.frames_processed[0])) / 2).astype(int)\n circ = cv2.circle(img=mask, center=(center[0], center[\n 1]), radius=self.radius, color=[255, 255, 255], thickness=-1)\n\n for i in range(len(self.frames_processed)):\n self.frames_processed[i] = cv2.bitwise_and(\n src1=self.frames_processed[i], src2=circ)\n\n # convert from row-major to column-major ordering\n for i in range(len(self.frames_processed)):\n # self.frames_processed[i] = self.frames_processed[i].T\n self.frames_processed[i] = np.flip(self.frames_processed[i], 0)", "title": "" }, { "docid": "8d7c3b4d74baf2c6cf2c0f46bc34e6ee", "score": "0.49517193", "text": "def projectionW(self):\n\n NW=self.NW\n NLmax=self.NLmax\n wB=self.wB\n zFX,zFG=auxF.gaussianInt([-1,1],30)\n cScale=1.0\n\n scaleDerv=np.zeros((NW,NW,NW,NW))\n for i in range(NW):\n for j in range(NW):\n scaleTemp1=np.zeros((NW,NW))\n scaleTemp2=np.zeros((NW,NW))\n for k in range(1,NW):\n intG=(zFX**2)*auxF.freqExpansion(zFX,2*k)-zFX*auxF.freqExpansion(zFX,2*k-1)\n scaleTemp1[k,j]=2*k*np.sum(zFG*intG*auxF.freqExpansion(zFX,2*i))\n\n intG=(zFX**2)*auxF.freqExpansion(zFX,2*k)-zFX*auxF.freqExpansion(zFX,2*k-1)\n scaleTemp2[i,k]=2*k*np.sum(zFG*intG*auxF.freqExpansion(zFX,2*j))\n scaleDerv[:,:,i,j]=-(scaleTemp1+scaleTemp2)\n self.scaleDerv=scaleDerv\n\n wTransPHtoPP=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n wTransPHEtoPP=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n wTransPPtoPH=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n wTransPHEtoPH=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n wTransPPtoPHE=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n wTransPHtoPHE=np.zeros((NLmax,NW,NW,len(wB),NW,NW))\n\n wFX=auxF.backMap(zFX,cScale)\n wP1=np.tile(wFX[:,np.newaxis,np.newaxis],(1,len(wFX),len(wB)))\n wP2=np.tile(wFX[np.newaxis,:,np.newaxis],(len(zFX),1,len(wB)))\n wBE=np.tile(wB[np.newaxis,np.newaxis,:],(len(zFX),len(zFX),1))\n\n wFXE=np.tile(wFX[:,np.newaxis],(1,len(wB)))\n wBEe=np.tile(wB[np.newaxis,:],(len(zFX),1))\n\n zP1=np.tile(zFX[:,np.newaxis,np.newaxis],(1,len(zFX),len(wB)))\n zP2=np.tile(zFX[np.newaxis,:,np.newaxis],(len(zFX),1,len(wB)))\n zPE=np.tile(zFG[:,np.newaxis,np.newaxis],(1,len(zFX),len(wB)))\n zPE=zPE*np.tile(zFG[np.newaxis,:,np.newaxis],(len(zFX),1,len(wB)))\n\n lTempXtoY1=np.zeros((len(zFX),len(zFX),len(wB),NLmax))\n lTempXtoY2=np.zeros((len(zFX),len(zFX),len(wB),NLmax))\n\n for i in range(NLmax):\n zT=auxF.forMap((wP1+wP2),cScale)\n lTempXtoY1[...,i]=zPE*auxF.freqExpansion(zT,2*i)\n\n zT=auxF.forMap((wP2-wP1),cScale)\n lTempXtoY2[...,i]=zPE*auxF.freqExpansion(zT,2*i)\n\n lTempP1=np.zeros((len(zFX),len(zFX),len(wB),NW))\n lTempP2=np.zeros((len(zFX),len(zFX),len(wB),NW))\n lTempP3=np.zeros((len(zFX),len(zFX),len(wB),NW))\n lTempP4=np.zeros((len(zFX),len(zFX),len(wB),NW))\n\n lTempW1=np.zeros((len(zFX),len(zFX),len(wB),NW))\n lTempWn=np.zeros((len(zFX),len(wB),NW))\n\n for i in range(NW):\n zT=auxF.forMap(0.5*(wBE-(wP2-wP1)),cScale)\n lTempP1[...,i]=auxF.freqExpansion(zT,2*i)\n\n zT=auxF.forMap(0.5*(wBE+(wP2-wP1)),cScale)\n lTempP2[...,i]=auxF.freqExpansion(zT,2*i)\n\n zT=auxF.forMap(0.5*(wBE-(wP1+wP2)),cScale)\n lTempP3[...,i]=auxF.freqExpansion(zT,2*i)\n\n zT=auxF.forMap(0.5*(wBE+(wP1+wP2)),cScale)\n lTempP4[...,i]=auxF.freqExpansion(zT,2*i)\n\n lTempW1[...,i]=auxF.freqExpansion(zP1,2*i)\n lTempWn[...,i]=auxF.freqExpansion(auxF.forMap(wFXE,cScale),2*i)\n \n wTemp1=np.zeros((len(zFX),NW,NW,len(wB),NW))\n wTemp2=np.zeros((len(zFX),NW,NW,len(wB),NW))\n for i in range(NLmax):\n for j in range(NW):\n for k in range(NW):\n for l in range(NW):\n wTemp1[:,j,k,:,l]=np.sum(lTempXtoY1[...,i]*lTempP1[...,j]*lTempP2[...,k]*lTempW1[...,l],axis=0)\n wTemp2[:,j,k,:,l]=np.sum(lTempXtoY2[...,i]*lTempP3[...,j]*lTempP4[...,k]*lTempW1[...,l],axis=0)\n \n for j in range(NW):\n intG=lTempWn[...,j]\n intG=np.tile(intG[:,np.newaxis,np.newaxis,:,np.newaxis],(1,NW,NW,1,NW))\n wTransPHtoPP[i,:,:,:,:,j]=np.sum(intG*wTemp1,axis=0)\n wTransPHEtoPP[i,:,:,:,:,j]=np.sum(intG*wTemp2,axis=0)\n wTransPPtoPH=wTransPHtoPP[:]\n for j in range(NW):\n wTransPHEtoPH[:,j,:,:,:,:]=wTransPHEtoPP[:,j,:,:,:,:]\n wTransPPtoPHE[:,j,:,:,:,:]=wTransPHtoPP[:,j,:,:,:,:]\n wTransPHtoPHE[:,j,:,:,:,:]=wTransPHEtoPP[:,j,:,:,:,:]\n\n self.wTransPHtoPP=wTransPHtoPP\n self.wTransPHEtoPP=wTransPHEtoPP\n self.wTransPPtoPH=wTransPPtoPH\n self.wTransPHEtoPH=wTransPHEtoPH\n self.wTransPPtoPHE=wTransPPtoPHE\n self.wTransPHtoPHE=wTransPHtoPHE", "title": "" }, { "docid": "3248b419e6d8d540d2fe80fe779b3801", "score": "0.49496794", "text": "def image_warp(img, reversed=False, box=False):\n # Define four source coordinates (calibration box)\n src = np.array([[689,450],\n [1038,675], \n [280,675],\n [594,450]], dtype=np.float32)\n \n # Four desired or warped (dst - destination) points\n offset = 280 \n dst = np.float32([[1279-offset, 0],\n [1279-offset, 719],\n [offset, 719],\n [offset, 0]])\n \n if reversed == False:\n # Compute the perspective transform, M\n M = cv2.getPerspectiveTransform(src, dst) \n warped_img = cv2.warpPerspective(img, M, img.shape[1::-1], flags=cv2.INTER_LINEAR)\n if box == True:\n cv2.polylines(img, np.int32([src]), True, (255,0,255),thickness=2)\n cv2.polylines(warped_img, np.int32([dst]), True, (255,0,255),thickness=4)\n # Returns warped image - uses linear interpolation\n return warped_img\n \n elif reversed == True:\n # Compute the inverse to unwarp the image by swapping the input parameters\n Minv = cv2.getPerspectiveTransform(dst, src)\n # Returns unwarped image - uses linear interpolation\n unwarped_img = cv2.warpPerspective(img, Minv, img.shape[1::-1], flags=cv2.INTER_LINEAR)\n return unwarped_img, Minv\n \n return None", "title": "" }, { "docid": "a32c9c6fbbcac70233bb5d32906a34fc", "score": "0.494236", "text": "def warp_xyz_PANDAS(xyz, inprj, outprj):\n\n\tinprj=pyproj.Proj(inprj)\n\toutprj=pyproj.Proj(outprj) \n\txyz['x_warp'], xyz['y_warp']=pyproj.transform(inprj, outprj, xyz['x'].values, xyz['y'].values)\n\n\treturn xyz", "title": "" }, { "docid": "22709964b848803afdfd18e34788f2bc", "score": "0.49386546", "text": "def forward(self):\n self.flow_fields, _ = self.netFlow(self.from_img, self.from_kpt, self.to_kpt)\n _, _, H, W = self.flow_fields[-1].size()\n from_img = F.interpolate(self.from_img, (H,W))\n self.fake_B = self.extractor(from_img, self.flow_fields[-1])\n _, _, H, W = self.to_img.size()\n self.fake_B = F.interpolate(self.fake_B, (H,W))", "title": "" }, { "docid": "410a18f048251f84896848e080a3a096", "score": "0.49291277", "text": "def trapzWarp(pic,cx,cy,ismask=False):\r\n Y,X = pic.shape[:2]\r\n src = np.array([[0,0],[X,0],[X,Y],[0,Y]])\r\n dst = np.array([[cx*X,cy*Y],[(1-cx)*X,cy*Y],[X,Y],[0,Y]])\r\n tform = tf.ProjectiveTransform()\r\n tform.estimate(src,dst)\r\n im = tf.warp(pic, tform.inverse, output_shape=(Y,X))\r\n return im if ismask else (im*255).astype('uint8')", "title": "" }, { "docid": "2ee5d0f1d5902a3a511d9f9ad95436e9", "score": "0.49290416", "text": "def anonymise_images(frames, nose_points, right_ear_points, left_ear_points):\n\n padx = 100\n pady = 80\n nose_x = 0\n nose_y = 0\n for idx, path in enumerate(frames):\n # print(len(frames))\n # print(frames)\n\n frame = Image.open(path)\n if nose_points[idx][0] != 0 and nose_points[idx][1] != 0:\n nose_x = nose_points[idx][0]\n nose_y = nose_points[idx][1]\n # print(nose_x, nose_y)\n\n elif left_ear_points[idx][0] != 0 and left_ear_points[idx][1] != 0:\n nose_x = left_ear_points[idx][0]\n nose_y = left_ear_points[idx][1]\n elif right_ear_points[idx][0] != 0 and right_ear_points[idx][1] != 0:\n nose_x = right_ear_points[idx][0]\n nose_y = right_ear_points[idx][1]\n else:\n # No point available - check previous point\n try:\n if nose_points[idx - 1][0] != 0 and nose_points[idx - 1][1] != 0:\n nose_x = nose_points[idx - 1][0]\n nose_y = nose_points[idx - 1][1]\n # print(nose_x, nose_y)\n\n elif left_ear_points[idx - 1][0] != 0 and left_ear_points[idx - 1][1] != 0:\n nose_x = left_ear_points[idx - 1][0]\n nose_y = left_ear_points[idx][1]\n elif right_ear_points[idx - 1][0] != 0 and right_ear_points[idx - 1][1] != 0:\n nose_x = right_ear_points[idx - 1][0]\n nose_y = right_ear_points[idx - 1][1]\n except IndexError:\n nose_x = 0\n nose_y = 0\n\n point1 = nose_x - padx\n point2 = nose_y - pady\n if point1 < 0:\n point1 = 0\n if point2 < 0:\n point2 = 0\n\n nose = (int(point1), int(point2), int(nose_x + padx), int(nose_y + pady))\n cropped_frame = frame.crop(nose)\n blurred_frame = cropped_frame.filter(ImageFilter.GaussianBlur(radius=20))\n # print(nose)\n # sys.exit()\n frame.paste(blurred_frame, nose)\n\n outpath = \"{}\\\\{}.png\".format(\"blurred_images\", idx + 1)\n print(outpath)\n print(idx, \"idx\")\n print(\"path\", path)\n frame.save(outpath)\n if idx == 350:\n print(frames)\n print(len(frames))\n\n input_files = []\n print(\"Finished FOR loop\")\n for filename in glob.glob(\"{}\\\\*.png\".format(\"blurred_images\")):\n input_files.append(filename)\n # print(input_files)\n # Stupid python input_files.sort(key=lambda x: int(float(os.path.basename(x).split('.')[0][1:])))\n print(\"Start sorting\")\n input_files.sort(key=lambda f: int(re.sub('\\D', '', f)))\n print(\"Finished blurring\")\n return input_files", "title": "" }, { "docid": "3a8c0c56d12f0b3d7885e23a034b311b", "score": "0.49254176", "text": "def perspective_pipeline():\n files_to_transform = glob(\"output_images\\\\lines_*.jpg\")\n for file in files_to_transform:\n warped, unwarped, _, _ = \\\n perspective_transform(file)\n # warped = warped[2*warped.shape[0]//3:warped.shape[0],100:warped.shape[0]-100,:]\n # warped = cv2.resize(warped, (0, 0), fx=0.5, fy=0.5)\n cv2.imwrite('output_images\\\\warped_'+file.split('\\\\')[-1], warped)\n cv2.imwrite('output_images\\\\unwarped_'+file.split('\\\\')[-1], unwarped)\n return", "title": "" }, { "docid": "3a8c0c56d12f0b3d7885e23a034b311b", "score": "0.49254176", "text": "def perspective_pipeline():\n files_to_transform = glob(\"output_images\\\\lines_*.jpg\")\n for file in files_to_transform:\n warped, unwarped, _, _ = \\\n perspective_transform(file)\n # warped = warped[2*warped.shape[0]//3:warped.shape[0],100:warped.shape[0]-100,:]\n # warped = cv2.resize(warped, (0, 0), fx=0.5, fy=0.5)\n cv2.imwrite('output_images\\\\warped_'+file.split('\\\\')[-1], warped)\n cv2.imwrite('output_images\\\\unwarped_'+file.split('\\\\')[-1], unwarped)\n return", "title": "" }, { "docid": "4ab9f0fe2671fda54f449161be19afa5", "score": "0.48990732", "text": "def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):\r\n output = F.interpolate(\r\n framewise_output.unsqueeze(1),\r\n size=(frames_num, framewise_output.size(2)),\r\n align_corners=True,\r\n mode=\"bilinear\").squeeze(1)\r\n\r\n return output", "title": "" }, { "docid": "0f746c13a4f44b35fa82954e80dcac25", "score": "0.48944145", "text": "def allPix2world(self):\n allpts = []\n \n current_depthPic = self.depthPic.copy()\n current_depthMsg = self.depthMsg\n self.cur_PicID = self.PicID\n \n # binarize the current depth picture (in order to select the contours of the cracks\n # on the picture)\n bin_current_depthPic = cv2.threshold(current_depthPic,\n 1e-5, 255,\n cv2.THRESH_BINARY)[1]\n \n tst = np.uint8( bin_current_depthPic.copy() )\n \n try:\n contours, hierarchy = cv2.findContours(tst, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n except:\n contours, hierarchy = cv2.findContours(tst, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1:]\n\n for i in range (len(contours)):\n # for each contour corresponding to a crack, return the barycenter of this crack, and express \n # it into the cave frame\n cnt = contours[i]\n M = cv2.moments(cnt)\n cx = int(M['m10']/(M['m00']+1*10**-5))\n cy = int(M['m01']/(M['m00']+1*10**-5))\n depth = current_depthPic[cy,cx]\n bary_fis = self.fromCamRef2caveRef(self.fromPix2camRef( (cx,cy,depth) ), current_depthMsg )\n if(bary_fis.point.z > 0.15): \n # odd condition, added to remove problems related to frame changes \n # (indicates points related to cracks at the robot position)\n \n allpts.append(bary_fis)\n return allpts", "title": "" }, { "docid": "f6199ec111f2fdc0dcbdda2c83121f0a", "score": "0.48817325", "text": "def cropToWorm(self):\n # measure bounding box\n self.boundingBox = cv2.boundingRect(self.wormContour) # x,y,w,h\n\n # crop frame\n self.bwWormImage = self.bwFrame[\n self.boundingBox[1]:self.boundingBox[1]+self.boundingBox[3],\n self.boundingBox[0]:self.boundingBox[0]+self.boundingBox[2]]\n self.grayWormImage = self.grayFrame[\n self.boundingBox[1]:self.boundingBox[1]+self.boundingBox[3],\n self.boundingBox[0]:self.boundingBox[0]+self.boundingBox[2]]", "title": "" }, { "docid": "3f7ae8acef7ee13f08a112b456ae21f6", "score": "0.4873585", "text": "def warpToOrigin(poly):\n x0, x1, y0, y1 = poly.boundingBox()\n poly.shift(-x0, -y0)", "title": "" }, { "docid": "bf1d33ef0d9fb98f6e063d5ff7b22bc1", "score": "0.48595637", "text": "def pan(self, rel):\n \n plog = logging.getLogger('fgt.pan').debug\n\n xpad = self.psz.x*self._overdraw//3\n ypad = self.psz.y*self._overdraw//3\n x, y = self.render_origin.x, self.render_origin.y\n\n plog(\"pan mvp={} rel={}\".format(self.map_viewport, rel))\n vpx = self.map_viewport.x - rel.x # viewport shift\n vpy = self.map_viewport.y + rel.y # in opengl window CS\n \n if vpx > 2 * xpad:\n delta = (vpx - 2 * xpad) // self.psz.x + 1 # overshoot in tiles\n plog(\"vpx>2xpad {}>{} delta={}\".format(vpx, 2*xpad, delta))\n x += delta # move origin thus many tiles\n vpx -= delta * self.psz.x # compensate\n elif vpx < xpad:\n delta = (xpad - vpx) // self.psz.x + 1\n plog(\"vpx<xpad {}<{} delta={}\".format(vpx, xpad, delta))\n x -= delta\n vpx += delta * self.psz.x\n\n # here the delta sign is inverted when moving render origin\n # because df grid coordinate system's y axis direction is \n # opposite to GL window one.\n if vpy > 2 * ypad:\n delta = (vpy - 2 * ypad) // self.psz.y + 1\n plog(\"vpy>2ypad {}>{} delta={}\".format(vpy, 2*ypad, delta))\n y -= delta\n vpy -= delta * self.psz.y\n elif vpy < ypad:\n delta = (ypad - vpy) // self.psz.y + 1\n plog(\"vpy<ypad {}<{} delta={}\".format(vpy, ypad, delta))\n y += delta\n vpy += delta * self.psz.y\n \n self.map_viewport = self.map_viewport._replace(x=vpx, y=vpy)\n self.render_origin = self.render_origin._replace(x=x, y=y)\n plog(\"pan result mvp={} ro={}\".format(self.map_viewport, self.render_origin))", "title": "" }, { "docid": "2edf6a90f30b8fe2decf8d8bb128761c", "score": "0.48549756", "text": "def format_frames(reader, writer, new_dim=(304, 304)):\n for frame in reader:\n frame = square_crop(frame)\n frame = cv2.resize(frame, dsize=new_dim, interpolation=cv2.INTER_CUBIC)\n writer.append_data(frame)\n return", "title": "" }, { "docid": "29cd16ad632548dde392ae1b98a6539e", "score": "0.48546916", "text": "def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1):\n m = tf.params # tf._matrix is\n return skimage.transform._warps_cy._warp_fast(img, m, output_shape=output_shape, mode=mode, order=order)", "title": "" }, { "docid": "fd3353ecc193b75ddecb9a1250ddce35", "score": "0.485427", "text": "def _add_frame(self, frame):\n self.full_state[:, :, 3, :, : :] = self.full_state[:, :, 2, :, : :]\n self.full_state[:, :, 2, :, : :] = self.full_state[:, :, 1, :, : :]\n self.full_state[:, :, 1, :, : :] = self.full_state[:, :, 0, :, : :]\n self.full_state[:, :, 0, :, : :] = frame", "title": "" }, { "docid": "9f93b94213d79d0b51a34137751355c0", "score": "0.4853093", "text": "def warp(self, image):\n return image.warp_to_mask(self.template.mask, self.transform,\n warp_landmarks=False)", "title": "" }, { "docid": "155f05291f960f960bb374784047534f", "score": "0.48498905", "text": "def move_forward_world_scene(self):\n if self._stt == 20:\n self._world[:, :self._show_width + self._width * 3] = self._world[:, self._width:]\n self._world[:, self._show_width + self._width * 3:] = self.give_patch()\n self._stt = 0\n\n self._stt += 2\n return self._world[:, self._stt:self._show_width + self._stt]", "title": "" }, { "docid": "fdfc088028555592fe1fc11540fd41cc", "score": "0.484502", "text": "def _move(self, x, y, new_x, new_y):\n self[new_x][new_y] = self[x][y]\n self[x][y] = [0] * NUM_ENCODERS\n self[x][y][TIME_IDX] = self[new_x][new_y][TIME_IDX] # set time back to empty tile", "title": "" }, { "docid": "c8479c0c4423800278d219136d9f9fdf", "score": "0.48339567", "text": "def __transform_image(self, bin_thresholded_img,undist_img):\n bin_warped,color_warped = self.ti.warp_image(bin_thresholded_img,undist_img)\n return bin_warped,color_warped", "title": "" }, { "docid": "caf6d92968b8100f8dcabc8201371f23", "score": "0.4825236", "text": "def shift_frames(self, srcDataset, trgDataset, params):\n # TODO update docstrings\n \n useCaseToApply = params.cfgDict['useCaseToApply']\n roicolMod = params.cfgDict['roicolMod']\n p2c = params.cfgDict['p2c']\n voxShift = self.voxShift\n \n if p2c:\n print('\\n\\n', '-'*120)\n print(' Running of shift_frames():')\n print('-'*120, '\\n')\n \n #print(f'dir(self):\\n{dir(self)}\\n')\n \n # Have the source pixel arrays been resampled or not?\n #if hasattr(self, 'resPixarrByRoi'):\n if useCaseToApply in ['1', '2a', '2b']:\n #slcNum = srcDataset.slcNum # initial value\n if roicolMod == 'RTSTRUCT':\n #_2sIndsBy_ = list(srcDataset.c2sIndsByRoi)\n _2sIndsBy_ = list(srcDataset.f2sIndsByRoi) # 27/09/21\n pixarrBy_ = list(srcDataset.pixarrByRoi)\n else:\n _2sIndsBy_ = list(srcDataset.f2sIndsBySeg)\n pixarrBy_ = list(srcDataset.pixarrBySeg)\n \n #elif useCaseToApply in ['3a', '3b', '4a', '4b']: # 26/09/21\n else:\n # Use the resampled source slice number for srcSlcNum, and the\n # resampled pixel arrays for srcPixarrByRoi:\n #srcSlcNum = self.resSlcNum # does this still exist? (06/09/21)\n #srcPixarrByRoi = self.resPixarrByRoi # does this still exist? (06/09/21)\n #slcNum = self.slcNum # (06/09/21)\n if roicolMod == 'RTSTRUCT':\n #_2sIndsBy_ = list(self.c2sIndsByRoi) # (15/09/21)\n _2sIndsBy_ = list(self.f2sIndsByRoi) # 27/09/21\n pixarrBy_ = list(self.pixarrByRoi) # (15/09/21)\n else:\n _2sIndsBy_ = list(self.f2sIndsBySeg) # (06/09/21)\n pixarrBy_ = list(self.pixarrBySeg) # (06/09/21)\n \n if p2c:\n print(f' _2sIndsBy_ prior to shifting = {_2sIndsBy_}')\n print(f' voxShift that will be applied = {voxShift}')\n \n shifted_2SindsBy_ = [] # initial value\n shiftedPixarrBy_ = [] # initial value\n \n # Loop through each pixel array:\n #for s in range(len(srcPixarrByRoi)):\n for s in range(len(pixarrBy_)):\n # Proceed only if there is at least one frame in this pixel array:\n #if srcPixarrByRoi[s].shape[0]:\n if pixarrBy_[s].shape[0]:\n # Replace pixarrBy_[s] with the result of shifting the \n # in-plane elements and add the voxel shift along z to:\n \"\"\"\n shiftedPixarrBySeg.append(\n shift_frame(\n frame=srcPixarrByRoi[s], voxShift=voxShift\n )\n )\n \"\"\"\n shiftedPixarrBy_.append(\n shift_frame(\n frame=pixarrBy_[s], voxShift=voxShift\n )\n )\n \n #F = len(replacedF2SindsByRoi[s])\n F = len(_2sIndsBy_[s])\n \n # Shift the contour-/frame-to-slice indices by voxShift[2] to \n # account for the z-shift:\n \"\"\"\n shiftedF2SindsBySeg.append(\n [replacedF2SindsByRoi[s][i] + voxShift[2] for i in range(F)]\n #[f2sIndsBySeg[s][i] + voxShift[2] for i in range(F)]\n )\n \"\"\"\n shifted_2SindsBy_.append(\n [_2sIndsBy_[s][i] + voxShift[2] for i in range(F)]\n )\n \n if p2c:\n unique = get_unique_items(shiftedPixarrBy_[s])\n \n print(f' There are {len(unique)} unique items in',\n f'shiftedPixarrBy_[{s}] after shifting the frame')\n \n if p2c:\n print(' shifted_2SindsBy_ after shifting =',\n f'{shifted_2SindsBy_}')\n print('end of shift_frames\\n')\n print('-'*120)\n \n \"\"\"\n self.shiftedPixarrBySeg = shiftedPixarrBySeg\n self.shiftedF2SindsBySeg = shiftedF2SindsBySeg\n #self.pixarrBySeg = shiftedPixarrBySeg\n #self.f2sIndsBySeg = shiftedF2SindsBySeg\n \"\"\"\n if roicolMod == 'RTSTRUCT':\n #self.c2sIndsByRoi = shifted_2SindsBy_\n self.f2sIndsByRoi = shifted_2SindsBy_ # 27/09/21\n self.pixarrByRoi = shiftedPixarrBy_\n else:\n self.f2sIndsBySeg = shifted_2SindsBy_\n self.pixarrBySeg = shiftedPixarrBy_", "title": "" }, { "docid": "101b830bc76daf4de9386395de0325d6", "score": "0.48246372", "text": "def forward(self, x):\n x = f.interpolate(x, size=(int(self.out_frames / 4), 14, 14), mode='trilinear')\n x = self.conv3d_1x(x)\n x = self.conv3d_1y(x)\n\n x = f.interpolate(x, size=(int(self.out_frames / 2), 28, 28), mode='trilinear')\n x = self.conv3d_1a(x)\n x = self.conv3d_1b(x)\n\n x = f.interpolate(x, size=(self.out_frames, 56, 56), mode='trilinear')\n x = self.conv3d_2a(x)\n x = self.conv3d_2b(x)\n\n x = f.interpolate(x, size=(self.out_frames, 112, 112), mode='trilinear')\n x = self.conv3d_3a(x)\n x = self.conv3d_3b(x)\n\n return x", "title": "" }, { "docid": "131297fd4988dea0d0bf4aec28461f31", "score": "0.48235196", "text": "def run(self):\n rate = rospy.Rate(self._rate)\n t0 = rospy.Time.now().to_sec()\n while not rospy.is_shutdown():\n # get time ...\n t1 = rospy.Time.now().to_sec()\n dt = t1-t0\n t0 = t1\n\n # update map ...\n pts = []\n try:\n pos, _ = self._tf.lookupTransform(self._map_frame, self._drone_frame, rospy.Time(0))\n #print pos\n pts.append(pos[:2])\n except tf.Exception as e:\n rospy.loginfo_throttle(1.0, \"TF Lookup Failed: {}\".format(e))\n pts = self.m2px(pts, center=True)\n self._ex.update(pts, dt)\n if self._viz:\n cv2.imshow('viz', self._ex._map)\n cv2.waitKey(10)\n rate.sleep()", "title": "" }, { "docid": "c734a47665244e566e708fe6deeb23da", "score": "0.48208952", "text": "def project(self):\n self._image.clear()\n # self.cw.clear(self._image._orig, self._image._data, self._image.width, self._image.height)\n\n pr = lambda box: box.project(self._image)\n for box in self._boxes:\n pr(box)", "title": "" }, { "docid": "790c7873d52fd078e2b9074b6e1442ff", "score": "0.48191157", "text": "def warp_image(image, homography):\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)\n h, w, z = image.shape\n # calcul des offset\n p = np.array([[0, w, w, 0], [0, 0, h, h], [1, 1, 1, 1]])\n p_prime = np.dot(homography, p)\n\n yrow = p_prime[1] / p_prime[2]\n xrow = p_prime[0] / p_prime[2]\n ymin , xmin, ymax, xmax = min(yrow), min(xrow), max(yrow), max(xrow)\n\n # prise en compte des offset\n new_mat = np.array([[1, 0, -1 * xmin], [0, 1, -1 * ymin], [0, 0, 1]])\n homography = np.dot(new_mat, homography)\n\n # height and width of new image frame\n height = int(round(ymax - ymin))\n width = int(round(xmax - xmin))\n size = (width, height)\n # Do the warp\n warped = cv2.warpPerspective(src=image, M=homography, dsize=size)\n\n return warped, (int(xmin), int(ymin))", "title": "" }, { "docid": "9a0c2f53ea75b51139b2ecaef381503d", "score": "0.48174608", "text": "def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):\n output = F.interpolate(\n framewise_output.unsqueeze(1),\n size=(frames_num, framewise_output.size(2)),\n align_corners=True,\n mode=\"bilinear\").squeeze(1)\n\n return output", "title": "" }, { "docid": "9d84a90610a6989178a94a861acb12d3", "score": "0.48166803", "text": "def scale_side_windup(self):\n self.drive.move(-0.2, 0)", "title": "" }, { "docid": "77d415e61d23fc70c66d36c18ee85026", "score": "0.4812333", "text": "def stichImages(img1, img2, Homo_12):\r\n img_h1, img_w1, img_d1 = img1.shape\r\n\r\n img_h2, img_w2, img_d2 = img2.shape\r\n\r\n corners = np.array([[0,img_w2,0,img_w2],[0,0,img_h1,img_h1],[1,1,1,1]])\r\n\r\n warpedCorners = np.matmul(Homo_12, corners)\r\n\r\n warpedCorners = warpedCorners/warpedCorners[2,:]\r\n\r\n warp_corner = np.ceil(warpedCorners)\r\n\r\n cols = img2.shape[1]\r\n\r\n minrow = min(1,min(warp_corner[1,:]))\r\n\r\n maxcol = max(cols,max(warp_corner[0,:]))\r\n\r\n mincol = min(1,min(warp_corner[0,:]))\r\n\r\n W_out = 2000\r\n\r\n height = 2000\r\n\r\n out_size = (W_out, height)\r\n\r\n s = W_out / (maxcol-mincol)\r\n\r\n scaleM = np.array([[s,0,0],[0,s,0],[0,0,1]])\r\n\r\n transM = np.array([[1,0,0],[0,1,-minrow],[0,0,1]])\r\n\r\n M = np.matmul(scaleM,transM)\r\n\r\n\r\n img2Warped = cv2.warpPerspective(img2, np.matmul(M,Homo_12), out_size)\r\n\r\n img1Warped = cv2.warpPerspective(img1, np.matmul(scaleM,transM), out_size)\r\n\r\n\r\n\r\n mask1 = distance_transform_edt(img1Warped)\r\n\r\n mask2 = distance_transform_edt(img2Warped)\r\n\r\n\r\n\r\n result1 = np.multiply(img1Warped,mask1)\r\n\r\n result2 = np.multiply(img2Warped,mask2)\r\n\r\n\r\n\r\n pano_im = np.divide(np.add(result1, result2), np.add(mask1, mask2))\r\n\r\n pano_im = np.nan_to_num(pano_im)\r\n\r\n pano_im = np.uint8(pano_im)\r\n\r\n return pano_im", "title": "" }, { "docid": "3aaf600a205ae5c8bc5c8248065dcc77", "score": "0.4807913", "text": "def incrementFrame(self):\n currentFrame = self.camera.get(cv2.CAP_PROP_POS_FRAMES)\n self.setFrame(currentFrame + 1.8)", "title": "" }, { "docid": "cfc509a35f49e3272f5716992f91cbd3", "score": "0.48033518", "text": "def create_base_map(map_width=10000, dry_run=False):\n\n print(f\"Creating base map for {G.LOCALE}\")\n # Create an area a bit bigger than the data\n lat_max, lat_min = G.REGION['LAT_MAX'], G.REGION['LAT_MIN']\n lon_max, lon_min = G.REGION['LON_MAX'], G.REGION['LON_MIN'] \n\n # Setup gdalwarp args\n # Define the extent of the map in lon/lat\n te_arg = f\"-te {lon_min:.7f} {lat_min:.7f} {lon_max:.7f} {lat_max:.7f}\"\n t_srs_arg = f\"-t_srs '{G.PROJ4}'\"\n\n zoom = \"-oo ZOOM_LEVEL=16\"\n zoom = \"\"\n command = \"gdalwarp\"\n command += \" \" + zoom\n command += \" \" + t_srs_arg\n command += \" \" + te_arg\n command += \" \" + f\"-te_srs EPSG:4326 -ts {map_width} 0 -r bilinear\"\n command += \" \" + \"-of vrt\"\n command += f\" {G.MBTILES_PATH} /tmp/chart.vrt\"\n run_system_command(\"rm /tmp/chart.vrt\", dry_run=dry_run)\n run_system_command(command, dry_run=dry_run)\n\n # command = \"gdal_translate -co compress=LZW seattle.vrt seattle.tif\"\n command = \"gdal_translate\"\n command += \" -co COMPRESS=JPEG -co TILED=YES\"\n command += f\" /tmp/chart.vrt {G.BASE_MAP_PATH}\"\n run_system_command(f\"rm {G.BASE_MAP_PATH}\", dry_run=dry_run)\n run_system_command(command, dry_run=dry_run)\n\n command = \"gdaladdo --config COMPRESS_OVERVIEW JPEG --config INTERLEAVE_OVERVIEW PIXEL\"\n command += \" -r average\"\n command += f\" {G.BASE_MAP_PATH}\"\n command += \" 2 4\"\n run_system_command(command, dry_run=dry_run)", "title": "" }, { "docid": "d6934cedad0b4b5b155d1b2256464c27", "score": "0.48029196", "text": "def fixed_pixel_window(survey_filename):\n far_left = max(ra)\n far_right = min(ra)\n top = min(dec)\n bottom = max(dec)\n #expand window\n far_left += 0.5*u.arcminute\n far_right -= 0.5*u.arcminute\n top -= 0.5*u.arcminute\n bottom += 0.5*u.arcminute\n far_left,bottom = get_location(far_left,bottom,survey_filename)\n far_right,top = get_location(far_right,top,survey_filename)\n x = np.arange(far_left, far_right)\n y = np.arange(top,bottom)\n X,Y = np.meshgrid(x,y)\n return X,Y", "title": "" }, { "docid": "3444b1a63fc538931f75460a45861bef", "score": "0.47911578", "text": "def warp(self, input: BMImage|BMImageArray, matrix) -> BMImage|BMImageArray:\n pass", "title": "" }, { "docid": "d4b2d7bf0c62cb6ce0a1e11251e4c650", "score": "0.47910342", "text": "def transform_warp_impl(src: torch.Tensor, dst_pix_trans_src_pix: torch.Tensor,\n dsize_src: Tuple[int, int], dsize_dst: Tuple[int, int],\n grid_mode: str, padding_mode: str) -> torch.Tensor:\n dst_norm_trans_src_norm: torch.Tensor = src_norm_to_dst_norm(\n dst_pix_trans_src_pix, dsize_src, dsize_dst)\n\n src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n return homography_warp(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode)", "title": "" }, { "docid": "2d1d41e735a933755e662a65de5bd07c", "score": "0.4789899", "text": "def unwarpImage(img, nx, ny, cam, dst, debug = False):\n\n ## 1. UNDISTORT \n undist = cv2.undistort(img, cam.cameraMatrix, cam.distCoeffs, None, cam.cameraMatrix)\n \n ## 2. Convert to Gray Scale\n imgGray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)\n\n ## 3. Find corners\n ret, corners = cv2.findChessboardCorners(imgGray, (nx, ny), cv2.CALIB_CB_ADAPTIVE_THRESH)\n if ret:\n\n cornerCount = corners.shape[0]\n if (cornerCount % nx) == 0:\n ## 4a. if corners found Draw corners \n cv2.drawChessboardCorners(undist, (nx, ny), corners, ret) \n \n ## 4b. define 4 source points \n src = np.float32([corners[0,0], corners[nx-1,0], corners[nx*(ny-1),0], corners[nx*ny-1,0]])\n\n ## 4c. define 4 destination points\n ## passed as parameters \n\n ## 4d. get M tranformation matrix \n M = cv2.getPerspectiveTransform(src, dst)\n\n if debug: \n print(' img shape: ',undist.shape, ' Corners detected: ', corners.shape[0], \n ' M:', M.shape)\n print( f' src point (corner {0:2d}, 0): {src[0]} tranformed to dst point: {dst[0]}')\n print( f' src point (corner {nx-1:2d}, 0): {src[1]} tranformed to dst point: {dst[1]}')\n print( f' src point (corner {nx*(ny-1):2d}, 0): {src[2]} tranformed to dst point: {dst[2]}')\n print( f' src point (corner {nx*ny-1:2d}, 0): {src[3]} tranformed to dst point: {dst[3]}')\n else:\n print(' Not all corners have been detected!! nx:', nx, ' ny: ', ny, ' corners detected: ', cornerCount)\n M = np.eye(3)\n else:\n M = np.eye(3)\n \n ## 4e. warp image to top-down view\n warped = cv2.warpPerspective(undist, M, undist.shape[1::-1], flags=cv2.INTER_LINEAR)\n return undist, warped, M", "title": "" }, { "docid": "3e153107f9ac3e3cd5cb17073a5f27a2", "score": "0.4787746", "text": "def part_5a():\n\n #I1(x0) = I0(x0 - tu0)\n I_0 = cv2.imread('input_images/TestSeq/Shift0.png', 0) / 1.\n I_1 = cv2.imread('input_images/TestSeq/ShiftR10.png', 0) / 1.\n\n\n k_size = 15 # TODO: Select a kernel size\n k_type = \"uniform\" # TODO: Select a kernel type\n sigma = 0 # TODO: Select a sigma value if you are using a gaussian kernel\n interpolation = cv2.INTER_CUBIC # You may try different values\n border_mode = cv2.BORDER_REFLECT101 # You may try different values\n\n im_array = []\n t_values = np.arange(0, 1.2, .2)\n\n U, V = ps4.optic_flow_lk(I_0, I_1, k_size=k_size, k_type=k_type, sigma=sigma)\n\n for val in t_values:\n print val\n scaled_U = U*val\n scaled_V = V*val\n warped = ps4.warp(I_0, U=-scaled_U, V=-scaled_V, interpolation=interpolation, border_mode=border_mode)\n cv2.imwrite(str(val)+'.png', warped)\n im_array.append(warped)\n\n r1 = np.concatenate((im_array[0], im_array[1], im_array[2]), axis=1)\n r2 = np.concatenate((im_array[3], im_array[4], im_array[5]), axis=1)\n\n complete = np.concatenate((r1, r2), axis=0)\n\n cv2.imwrite('output/ps4-5-1-a-1.png', complete.astype(np.int16))\n print 'FINISHED PART_5A'", "title": "" }, { "docid": "be86f3fa6d46d5e3004d46e5423ee9cb", "score": "0.47833246", "text": "def stitch_four(size, x, z, out_path, in_path):\n nw_path = in_path + '/%i,%i.png' % (x, z)\n sw_path = in_path + '/%i,%i.png' % (x, z+1)\n ne_path = in_path + '/%i,%i.png' % (x+1, z)\n se_path = in_path + '/%i,%i.png' % (x+1, z+1)\n\n out = Image.new('RGBA', (2*size, 2*size))\n\n if os.path.isfile(nw_path):\n out.paste(im=Image.open(nw_path), box=(0, 0))\n if os.path.isfile(sw_path):\n out.paste(im=Image.open(sw_path), box=(0, size))\n if os.path.isfile(ne_path):\n out.paste(im=Image.open(ne_path), box=(size, 0))\n if os.path.isfile(se_path):\n out.paste(im=Image.open(se_path), box=(size, size))\n\n out.thumbnail((256, 256))#, Image.NEAREST)\n out.save(out_path, 'PNG')", "title": "" }, { "docid": "459caf7420221068005e6e842812972f", "score": "0.4764356", "text": "def beam_to_frame(self, x, y, z, r):\n if self.sample.origin.get() == \"edge\":\n return self.current_frame.beam_to_frame(x, y, z, r)\n elif self.sample.origin.get() == \"center\":\n _x, _y, z, r = self.current_frame.beam_to_frame(x, y, z, r)\n x = _x - self.current_frame.width/2.0\n y = _y - self.current_frame.height/2.0\n return x, y, z, r", "title": "" }, { "docid": "23273f76fc3e30e7c86e9d7105a24dad", "score": "0.4759625", "text": "def cutout_30mas_v1(h_seg, v_drz):\n hdr1 = pyfits.getheader(h_seg)\n hdr2 = pyfits.getheader(v_drz)\n nx1 = hdr1['naxis1']\n ny1 = hdr1['naxis2']\n # Now calculate the corners of the cutout in the 30mas frame; 1=60mas frame,\n # 2 = 30mas frame\n wcs1 = pywcs.WCS(hdr1)\n wcs2 = pywcs.WCS(hdr2)\n sky00 = wcs1.wcs_pix2sky([[1, 1]], 1)\n corner00 = np.floor(wcs2.wcs_sky2pix(sky00, 1)).astype('int')[0]\n sky11 = wcs1.wcs_pix2sky([[nx1, ny1]], 1)\n corner11 = np.ceil(wcs2.wcs_sky2pix(sky11, 1)).astype('int')[0]\n xlo, ylo = corner00\n xhi, yhi = corner11\n print \"xlo, xhi, ylo, yhi\", xlo, xhi, ylo, yhi\n output = os.path.splitext(v_drz)[0] + '_center.fits'\n v_drz_array = pyfits.getdata(v_drz)\n v_drz_hdr = pyfits.getheader(v_drz)\n v_drz_hdr['crpix1'] = v_drz_hdr['crpix1'] - xlo\n v_drz_hdr['crpix2'] = v_drz_hdr['crpix2'] - ylo\n v_drz_array_new = v_drz_array[ylo:yhi+1, xlo:xhi+1]\n pyfits.append(output, v_drz_array_new, v_drz_hdr)\n # iraf.imcopy(\"%s[%d:%d,%d:%d]\" % (v_drz, xlo, xhi, ylo, yhi), output)", "title": "" }, { "docid": "0cc609715c99ffe530ccbfb60b016a63", "score": "0.4749802", "text": "def event_m50_36_x0(z162=_, z163=0, z164=_, z146=_):\r\n \"\"\"State 0,1: Poly drama warp\"\"\"\r\n PlayCutsceneAndWarpToMap(z162, z163, z164, z146, 0)\r\n assert CutsceneWarpEnded() != 0\r\n \"\"\"State 2: End state\"\"\"\r\n return 0", "title": "" }, { "docid": "eec539598b3ebcedc7aaac27eb537ffa", "score": "0.47493", "text": "def adjust_sprite_positions(self):\r\n self.adjust_juwico_position()", "title": "" }, { "docid": "ce4e3869521d985b70e4c41f685feac8", "score": "0.47344378", "text": "def fix_shifts(self):\n for i in range(self.size):\n self.samples[i].fix_shifts(self.cycleMeans)", "title": "" }, { "docid": "72357799b641767221768b5a05cf812e", "score": "0.47308883", "text": "def wavelet_transform(im_cont, im_dir):\n\n # 4.1 Convert to 8-bit\n print(\"4.1 converting greyscale image to 8-bit\")\n im_cont_8 = im_cont.astype(np.uint8)\n\n\n # 4.2 define images to be generated\n print(\"4.2 defining images to be generated\")\n titles = ['Approximation', ' Horizontal detail', 'Vertical detail', 'Diagonal detail']\n cA, cD = pywt.dwt([1, 2, 3, 4], 'db1')\n\n\n # 4.3 get coefficient matrices\n print(\"4.3 getting coefficient matrices\")\n LL, (LH, HL, HH) = pywt.dwt2(im_cont_8, 'haar')\n\n\n # 4.4 generate figure for coefficient matrices\n fig = plt.figure(figsize=(12, 3))\n for i, a in enumerate([LL, LH, HL, HH]):\n ax = fig.add_subplot(1, 4, i + 1)\n ax.imshow(a, interpolation=\"nearest\", cmap=plt.cm.gray)\n ax.set_title(titles[i], fontsize=10)\n ax.set_xticks([])\n ax.set_yticks([])\n\n fig.tight_layout()\n plt.savefig(im_dir + \"4. Wavelet Transform.png\")\n plt.close()\n\n return LL, (LH, HL, HH)", "title": "" }, { "docid": "6c35dfffd3a1f20bf6f951ceadefd9fd", "score": "0.4727692", "text": "def warp(self, x, flo):\n\n B, C, H, W = x.shape\n # mesh grid\n xx_pd = fluid.layers.range(0, W, 1, 'float32')\n xx_pd = fluid.layers.reshape(xx_pd, shape=[1, -1])\n xx_pd = fluid.layers.expand(x=xx_pd, expand_times=[H, 1])\n xx_pd = fluid.layers.reshape(xx_pd, shape=[1, 1, H, W])\n xx_pd = fluid.layers.expand(x=xx_pd, expand_times=[B, 1, 1, 1])\n\n yy_pd = fluid.layers.range(0, H, 1, 'float32')\n yy_pd = fluid.layers.reshape(yy_pd, shape=[-1, 1])\n yy_pd = fluid.layers.expand(x=yy_pd, expand_times=[1, W])\n yy_pd = fluid.layers.reshape(x=yy_pd, shape=[1, 1, H, W])\n yy_pd = fluid.layers.expand(x=yy_pd, expand_times=[B, 1, 1, 1])\n grid_pd = fluid.layers.concat(input=[xx_pd, yy_pd], axis=1)\n flo_pd = flo\n vgrid_pd = fluid.layers.elementwise_add(grid_pd, flo_pd)\n vgrid_pd_0 = 2.0 * fluid.layers.slice(vgrid_pd, axes=[1], starts=[0], ends=[1]) / max(W - 1, 1) - 1.0\n vgrid_pd_1 = 2.0 * fluid.layers.slice(vgrid_pd, axes=[1], starts=[1], ends=[2]) / max(H - 1, 1) - 1.0\n vgrid_pd = fluid.layers.concat(input=[vgrid_pd_0, vgrid_pd_1], axis=1)\n vgrid_pd = fluid.layers.transpose(vgrid_pd, [0, 2, 3, 1])\n output = fluid.layers.grid_sampler(name='grid_sample', x=x, grid=vgrid_pd)\n\n mask = fluid.layers.zeros_like(x)\n mask = mask + 1.0\n mask = fluid.layers.grid_sampler(name='grid_sample', x=mask, grid=vgrid_pd)\n mask_temp1 = fluid.layers.cast(mask < 0.9990, 'float32')\n mask = mask * (1 - mask_temp1)\n mask = fluid.layers.cast(mask > 0, 'float32')\n outwarp = fluid.layers.elementwise_mul(output, mask)\n\n return outwarp", "title": "" }, { "docid": "71ceec62064c17841a7b181724735f2a", "score": "0.47227576", "text": "def _distribute_frames(self) -> None:\n i = 0\n while self._process_imgs:\n ret, val = self._cam_reader.get_next_new_frame()\n if ret:\n (frame, timestamp, num_writes) = val\n self._hand_out_frame(frame, timestamp, i, num_writes)\n i = self._increment_counter(i)\n else:\n tsleep(.001)", "title": "" }, { "docid": "7ce7e9a1d510603734648c1fa8475196", "score": "0.471986", "text": "def compute_warp_crossings(self):\n raise NotImplementedError", "title": "" }, { "docid": "56c95c837071468f350d240d8a79ac0f", "score": "0.47139332", "text": "def warp(image, U, V):\n\n # TODO: Your code here\n warped = np.zeros(image.shape)\n\n warped[0] = image[0]\n for y in range(0, image.shape[0]):\n for x in range(0, image.shape[1]):\n try:\n warped[y][x] = image[y + V[y, x]][x + U[y, x]]\n except:\n do = 1\n # print \"whoops\"\n return warped", "title": "" }, { "docid": "3fe7693eed94f6f6af2f2f13244c4417", "score": "0.4713564", "text": "def splat_forward(forward, frame0, frame1, splatty, \n row, col, t=0.5):\n h = frame0.shape[0]\n w = frame0.shape[1]\n motion = forward[row][col]\n # Scale to cartesian space\n ux = motion[0]/w\n uy = -motion[1]/h\n (x, y) = image_to_cartesian(frame0, col, row)\n # xp and yp are the coordinates in the interpolated image\n xp = x + t*ux\n yp = y + t*uy\n splats = splat(splatty, xp, yp)\n for s in splats:\n if check_indices(splatty, s[0], s[1]):\n old = splatty[s[1]][s[0]]\n if np.isnan(old[0]) or np.isnan(old[1]):\n splatty[s[1]][s[0]] = motion\n else:\n old_ptc = follow_intensity(frame0, frame1,\n old, s[1], s[0], t)\n new_ptc = follow_intensity(frame0, frame1,\n motion, s[1], s[0], t)\n if (new_ptc < old_ptc):\n splatty[s[1]][s[0]] = motion", "title": "" }, { "docid": "db4859a9895f26aac0e75427e2a46da1", "score": "0.47110748", "text": "def _WarpRaster(self, dst, dst_ndv, src, src_ndv, resampling):\n vrt_savedir = os.path.join(self.workdir, \"Intermediate VRTs\")\n if not os.path.exists(vrt_savedir):\n os.makedirs(vrt_savedir)\n\n src_fname = os.path.basename(src.GetDescription())\n vrt_fname = os.path.splitext(src_fname)[0] + \"_WARPED_\" + resampling + \".vrt\"\n savepath = os.path.join(vrt_savedir, vrt_fname)\n\n resampling_methods = {\"average\": 5, \"cubspline\": 3, \"nearest\": 0}\n\n warp_options = gdal.WarpOptions(\n format=\"VRT\",\n outputBounds=self.BBox[\"coords\"],\n outputBoundsSRS=self.BBox[\"SRS\"],\n srcSRS=osr.SpatialReference(wkt=src.GetProjection()),\n dstSRS=osr.SpatialReference(wkt=dst.GetProjection()),\n xRes=dst.GetGeoTransform()[1],\n yRes=abs(dst.GetGeoTransform()[5]),\n srcNodata=src_ndv,\n dstNodata=dst_ndv,\n resampleAlg=resampling_methods[resampling],\n )\n return gdal.Warp(savepath, src, options=warp_options)", "title": "" }, { "docid": "77de8efee2b98170fbe38fbe17d26392", "score": "0.47045562", "text": "def _rii_dra(self):\n # create new cairo context to draw to pixmap image buffer\n context = cairo.Context(self._surface)\n \n # set transform from heks reference frame to x reference frame\n context.transform(self._ffraann_eewt)\n \n # create kr_sr with new context and pass it to handle draw method\n # to allow subclass to fill draw buffer\n kr_sr = Kr_sr(context)\n self.hand_l_dra(kr_sr)\n \n # copy buffer to screen \n self._x_con.core.CopyArea(\n self._pixmap,\n self._window,\n self._xgc,\n 0, 0, 0, 0, \n self._sish[1], \n self._sish[0])\n\n # flush requests to x server\n self._x_con.flush()", "title": "" }, { "docid": "f6dad2b4a3f1deab691933a6a4d88702", "score": "0.46964577", "text": "def draw_main_stickers(self, frame):\r\n for x,y in self.stickers:\r\n cv2.rectangle(frame, (x,y), (x+30, y+5), (255,255,255), 1)", "title": "" }, { "docid": "2f285fe1d41b74b6f412d9214cd3cfbe", "score": "0.46852785", "text": "def imaging(input_model, reference_files):\n detector = cf.Frame2D(name='detector', axes_order=(0, 1), unit=(u.pix, u.pix))\n focal = cf.Frame2D(name='focal', axes_order=(0, 1), unit=(u.arcmin, u.arcmin))\n sky = cf.CelestialFrame(reference_frame=coord.ICRS())\n distortion = imaging_distortion(input_model, reference_files)\n fitswcs_transform = pointing.create_fitswcs_transform(input_model)\n pipeline = [(detector, distortion),\n (focal, None)]\n #(sky, None)]\n return pipeline", "title": "" }, { "docid": "2f285fe1d41b74b6f412d9214cd3cfbe", "score": "0.46852785", "text": "def imaging(input_model, reference_files):\n detector = cf.Frame2D(name='detector', axes_order=(0, 1), unit=(u.pix, u.pix))\n focal = cf.Frame2D(name='focal', axes_order=(0, 1), unit=(u.arcmin, u.arcmin))\n sky = cf.CelestialFrame(reference_frame=coord.ICRS())\n distortion = imaging_distortion(input_model, reference_files)\n fitswcs_transform = pointing.create_fitswcs_transform(input_model)\n pipeline = [(detector, distortion),\n (focal, None)]\n #(sky, None)]\n return pipeline", "title": "" }, { "docid": "8715840518d1fa150cb29813340dc5b7", "score": "0.4683782", "text": "def resize(self):\n self.positiveSamples = scale_images(self.positiveSamples, 24, 24)\n self.negativeSamples = scale_images(self.negativeSamples, 24, 24)", "title": "" }, { "docid": "1084f0e92db427fa5965f6b8639fbc0e", "score": "0.4683137", "text": "def constrain_roi(self, frame):\n h, w = frame.shape\n frame = frame[np.ceil(h * 0.4):, np.ceil(w * 0.45):np.ceil(w * 0.92)]\n return frame", "title": "" }, { "docid": "9d995ad321a5a63b3d0b1f4b43597a40", "score": "0.4680839", "text": "def forward(self, x, im_sizes, image_offset,\n gt_boxes=None, gt_classes=None, gt_rels=None, *arg):\n\n raise NotImplementedError('forward')", "title": "" }, { "docid": "3b96b322d954909722957e221719660d", "score": "0.46782768", "text": "def _make_forward_stage(self, define):\n size=1\n if 'FORWARD_SIZE' in define:\n size= define['FORWARD_SIZE']\n\n root = NodePath(\"forwardRoot\")\n tex = Texture()\n tex.set_wrap_u(Texture.WM_clamp)\n tex.set_wrap_v(Texture.WM_clamp)\n aux_tex = Texture()\n aux_tex.set_wrap_u(Texture.WM_clamp)\n aux_tex.set_wrap_v(Texture.WM_clamp)\n buff_size_x = int(base.win.get_x_size()*size)\n buff_size_y = int(base.win.get_y_size()*size)\n\n\n winprops = WindowProperties()\n winprops.set_size(buff_size_x, buff_size_y)\n props = FrameBufferProperties()\n props.set_rgb_color(True)\n props.set_rgba_bits(8, 8, 8, 8)\n props.set_srgb_color(True)\n if 'FORWARD_AUX' in define:\n props.set_aux_rgba(1)\n props.set_depth_bits(0)\n buff = base.graphicsEngine.make_output(\n base.pipe, 'forward_stage', 2,\n props, winprops,\n GraphicsPipe.BF_resizeable,\n base.win.get_gsg(), base.win)\n buff.add_render_texture(tex=tex, mode=GraphicsOutput.RTMBindOrCopy, bitplane=GraphicsOutput.RTPColor)\n if 'FORWARD_AUX' in define:\n buff.add_render_texture(tex=aux_tex,mode=GraphicsOutput.RTMBindOrCopy, bitplane=GraphicsOutput.RTPAuxRgba0)\n buff.set_clear_active(GraphicsOutput.RTPAuxRgba0, True)\n buff.set_clear_color((0, 0, 0, 0))\n cam = base.make_camera(win=buff)\n cam.reparent_to(root)\n lens = base.cam.node().get_lens()\n cam.node().set_lens(lens)\n mask = BitMask32.bit(self.modelMask)\n mask.set_bit(self.lightMask)\n cam.node().set_camera_mask(mask)\n return root, tex, cam, buff, aux_tex", "title": "" }, { "docid": "b5d2097cc1a0b3ad189ad31e271ca298", "score": "0.46767053", "text": "def _coadd_frames(self):\n imagePathList = []\n weightPathList = []\n for frame in self._current_offset_paths:\n imagePathList.append(self._current_offset_paths[frame])\n weightPathList.append(self.weight_paths[frame])\n configs = dict(self._swarp_configs)\n configs.update({'RESAMPLE': 'N', 'SUBTRACT_BACK': 'N'})\n swarp = Swarp(imagePathList, self.stack_name,\n weightPaths=weightPathList,\n configs=configs, workDir=self.workdir)\n swarp.run()\n coaddPath, coaddWeightPath = swarp.mosaic_paths()\n\n coaddHeader = astropy.io.fits.getheader(coaddPath, 0)\n coaddFrame = ResampledWCS(coaddHeader)\n\n return coaddPath, coaddWeightPath, coaddFrame", "title": "" }, { "docid": "79249a7f79373fde224cfcb1118903a1", "score": "0.46666753", "text": "def move_overlay(cx, cy, display_w, display_h):\n factor = display_h / 480.0\n x = int((display_w - 640.0 * factor) / 2.0 + cx * factor - 32.0 / 2.0 * factor)\n y = int(cy * factor - 32.0 / 2.0 * factor)\n overlay.window = (x, y, int(32 * factor), int(32 * factor))", "title": "" }, { "docid": "5bd4a4e0eca54f2bee8eee3f75923439", "score": "0.46641573", "text": "def move_forward(self):\n if not self.alive:\n return\n x = self.position.x + self.SHIFT[self.orientation][0]\n y = self.position.y + self.SHIFT[self.orientation][1]\n self.position = Point(x,y)", "title": "" }, { "docid": "9d69ed15867e74589c634fccfe79aeb3", "score": "0.46624434", "text": "def shift_spectrum(self):\n self.fit_window_ref = self.fit_window - self.shift # Shifting the fitting window indices for the ref spectrum", "title": "" }, { "docid": "58e6e01650c253d168f3ac0c16ad3dd9", "score": "0.46518144", "text": "def backward_warp(images, optical_flows, bilinear_sample=True):\n if mod is not None:\n return mod.backward_warp(images, optical_flows)\n else:\n with tf.name_scope('warp'):\n return spatial_transformer_network(images, optical_flows, True, bilinear_sample=bilinear_sample)", "title": "" }, { "docid": "db3d3020ae55f6e0b31a525632273705", "score": "0.46497932", "text": "def movecp():\n obj = rs.ObjectsByType (8|16, select=0, state=0)\n if obj:\n surfguid = obj[0]\n surfobj = _objref(surfguid)\n #basenurb = surfobj.Surface().ToNurbsSurface()\n #Generate initial surface files\n #surfaceAnal(surfguid, outfname=\"init\", _LPP= 36000 )\n # Modify the surface\n xt = 1000\n yt = 800\n zt = 100\n lstTup = [(8,i,(xt,yt,zt)) for i in range(2,3)]\n modsur = modSurface(surfobj, lstTup =lstTup)\n if modsur:\n guidMod = modsur.Id\n #surfaceAnal(guidMod, outfname=\"mod\"+str(n*100),_LPP= 36000)\n \n #stationoffset(guidMod,_fore= 35956, _end=50, _zmin=-20, _zmax=5000,fname=\"off_xpan_mod\")\n rs.DeleteObject(guidMod)\n #rs.ObjectColor( guidMod, (0,255,0))", "title": "" }, { "docid": "82a23e5910df1ce372da59b4e3fe38c3", "score": "0.4648296", "text": "def make_warper(alignment, downsample_factor, grid_spacing, output_shape):\n global WARPER\n ze, ye, xe = [int(np.ceil(_ / grid_spacing)) * grid_spacing\n for _ in output_shape]\n xa, ya, za = [np.arange(0, _, grid_spacing)\n for _ in (xe, ye, ze)]\n src = [(x / downsample_factor,\n y / downsample_factor,\n z / downsample_factor)\n for x, y, z in alignment[\"moving\"]]\n dest = alignment[\"reference\"]\n warper = Warper(src, dest)\n approximator = warper.approximate(za, ya, xa)\n WARPER = approximator", "title": "" }, { "docid": "26bd8b58ce1dbd57fcf5511186191520", "score": "0.46435228", "text": "def processFrame(self, frame, show_debug_info=False):\n max_x = np.max(self.__template_coords[:,0])\n min_x = np.min(self.__template_coords[:,0])\n max_y = np.max(self.__template_coords[:,1])\n min_y = np.min(self.__template_coords[:,1])\n template_width = max_x - min_x + 1\n template_height = max_y - min_y + 1\n\n # Make the pyramid of images (multiresolution processing)\n pyramid = []\n pyramid.append(frame)\n\n width = round(frame.shape[1]/2.0)\n height = round(frame.shape[0]/2.0)\n dst = frame\n for i in range(self.__pyramid_levels):\n dst = cv2.pyrDown(dst, (width, height))\n pyramid.append(dst)\n width = round(width/2.0)\n height = round(height/2.0)\n\n if show_debug_info:\n cv2.imshow('PyrDown', dst)\n cv2.waitKey()\n\n if (width < template_width) or (height < template_height):\n break\n\n scale_factor = pow(2.0, len(pyramid)-1)\n motion_params = self.__motion_model.scaleParams(self.__params, 1.0/scale_factor)\n\n if self.__optimizer.show_iter:\n print \"============= Tracker starts processing frame\\n\"\n\n for i in range(len(pyramid)-1, 0, -1):\n if self.__optimizer.show_iter:\n print \"\\n==Iterations for \", pyramid[i].shape[0], \"x\", pyramid[i].shape[1], \" pixels\\n\"\n\n motion_params = self.__optimizer.solve(pyramid[i], motion_params)\n\n if i > 0:\n motion_params = self.__motion_model.scaleParams(motion_params, 2.0)\n\n if self.__optimizer.show_iter:\n print \"============= Tracker ENDS processing frame\\n\"\n\n self.__params = np.copy(motion_params)\n\n return", "title": "" }, { "docid": "80da46f6864e710cfb1acb16d5a3326a", "score": "0.4638612", "text": "def prestack(self):\n\n if not self.is_ready_for_prestack:\n raise(Exception(\"CCPimage not ready for prestack\"))\n\n xs_latitudes = np.asarray(\n np.linspace(self.xs_lat1, self.xs_lat2, self.nx))\n xs_longitudes = np.asarray(\n np.linspace(self.xs_lon1, self.xs_lon2, self.nx))\n\n xs_amps_ps = np.zeros((self.nz, self.nx, self.n_traces))\n xs_amps_pps = np.zeros((self.nz, self.nx, self.n_traces))\n xs_amps_pss = np.zeros((self.nz, self.nx, self.n_traces))\n\n for iz in _progressbar(range(self.nz), '', 25):\n\n for i_coor in range(self.n_traces):\n\n lat_tr = self.lat_depth[iz, i_coor]\n lon_tr = self.lon_depth[iz, i_coor]\n distance_tests = np.empty(self.nx)\n\n for i_xs in range(self.nx):\n lat_xs = xs_latitudes[i_xs]\n lon_xs = xs_longitudes[i_xs]\n distance_tests[i_xs] = haversine(\n lat_xs, lon_xs, lat_tr, lon_tr)\n\n minimum_distance = np.amin(distance_tests)\n ix = np.where(distance_tests ==\n np.amin(distance_tests))[0][0]\n\n nonzero_count = np.count_nonzero(\n xs_amps_ps[iz, ix, :])\n new_amp_ps = self.amp_ps_depth[iz, i_coor]\n if xs_amps_ps[iz, ix, 0] == 0.:\n xs_amps_ps[iz, ix, 0] = new_amp_ps\n else:\n xs_amps_ps[iz, ix, nonzero_count] = new_amp_ps\n\n nonzero_count = np.count_nonzero(\n xs_amps_pps[iz, ix, :])\n new_amp_pps = self.amp_pps_depth[iz, i_coor]\n if xs_amps_pps[iz, ix, 0] == 0.:\n xs_amps_pps[iz, ix, 0] = new_amp_pps\n else:\n xs_amps_pps[iz, ix, nonzero_count] = new_amp_pps\n\n nonzero_count = np.count_nonzero(\n xs_amps_pss[iz, ix, :])\n new_amp_pss = self.amp_pss_depth[iz, i_coor]\n if xs_amps_pss[iz, ix, 0] == 0.:\n xs_amps_pss[iz, ix, 0] = new_amp_pss\n else:\n xs_amps_pss[iz, ix, nonzero_count] = new_amp_pss\n\n self.xs_amps_ps = xs_amps_ps\n self.xs_amps_pps = xs_amps_pps\n self.xs_amps_pss = xs_amps_pss\n self.is_ready_for_ccp = True\n self.is_ready_for_gccp = True\n\n del self.amp_ps_depth\n del self.amp_pps_depth\n del self.amp_pss_depth\n del self.lon_depth\n del self.lat_depth", "title": "" }, { "docid": "5f4d2fb0720e6e9e3eb943206574b418", "score": "0.46381888", "text": "def d_broadcastPositionNow(self):\n self.d_clearSmoothing()\n self.d_broadcastPosHpr()", "title": "" }, { "docid": "1c8b81776cd55c9309e13140575bcc85", "score": "0.46322897", "text": "def plate_frames_from_camera_frames(plate_frame_filename_dict, video2frame_dict, saveDir):\n \n print(\"Saving timelapse frames (96-well)...\")\n for (frame_idx, rig_video_set) in tqdm(plate_frame_filename_dict.items()):\n \n file_dict = get_rig_video_set(rig_video_set[0]) # gives channels as well \n assert sorted(file_dict.values()) == sorted([Path(i) for i in rig_video_set])\n \n # define multi-panel figure\n columns = 3\n rows = 2\n h_in = 4\n x_off_abs = (3600-3036) / 3036 * h_in\n x = columns * h_in + x_off_abs\n y = rows * h_in\n fig, axs = plt.subplots(rows,columns,figsize=[x,y])\n \n x_offset = x_off_abs / x # for bottom left image\n width = (1-x_offset) / columns # for all but top left image\n width_tl = width + x_offset # for top left image\n height = 1/rows # for all images\n \n plt.ioff()\n for channel, rawvideopath in file_dict.items():\n \n _loc, rotate = CH2PLATE_dict[channel]\n _ri, _ci = _loc\n \n # create bbox for image layout in figure\n if (_ri == 0) and (_ci == 0):\n # first image (with well names), bbox slightly shifted\n bbox = [0, height, width_tl, height]\n else:\n # other images\n bbox = [x_offset + width * _ci, height * (rows - (_ri + 1)), width, height] \n \n # get location of subplot for camera\n ax = axs[_loc] \n \n # read average frame for rawvideopath\n av_frame_path = video2frame_dict[str(rawvideopath)]\n img = cv2.imread(av_frame_path)\n \n # rotate image 180ยฐ to align camera FOV if necessary\n if rotate:\n img = np.rot90(img, 2) \n \n # plot image without axes/labels\n ax.imshow(img)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \n # set image position in figure\n ax.set_position(bbox)\n \n if saveDir:\n saveName = rawvideopath.parent.stem + '.png'\n savePath = Path(saveDir) / \"plate_frame_timelapse\" / saveName\n savePath.parent.mkdir(exist_ok=True)\n if not savePath.exists():\n fig.savefig(savePath,\n bbox_inches='tight',\n dpi=300,\n pad_inches=0,\n transparent=True)\n # close figure \n plt.close(fig)", "title": "" }, { "docid": "8f7ba28bc0dce47f8905e20063807b66", "score": "0.46311837", "text": "def forward(self, cols):\n new_col = self.cursor['col'] + cols[0]\n if new_col > self.bounds['col']:\n offset = self.bounds['col'] - self.cursor['col']\n self.cursor['col'] = copy.deepcopy(self.bounds['col'])\n else:\n offset = cols[0]\n self.cursor['col'] = new_col\n command = self.image_writer.forward([offset])\n self.logger.warn(command)\n return command", "title": "" }, { "docid": "487511de1474b3c4973c5635f7823c8c", "score": "0.46303654", "text": "def anonymise_coronal_images(frames, nose_points, right_ear_points, left_ear_points):\n\n padx = 60\n pady = 60\n nose_x = 0\n nose_y = 0\n for idx, path in enumerate(frames):\n frame = Image.open(path)\n if nose_points[idx][0] != 0 and nose_points[idx][1] != 0:\n nose_x = nose_points[idx][0]\n nose_y = nose_points[idx][1]\n # print(nose_x, nose_y)\n\n elif left_ear_points[idx][0] != 0 and left_ear_points[idx][1] != 0:\n nose_x = left_ear_points[idx][0]\n nose_y = left_ear_points[idx][1]\n elif right_ear_points[idx][0] != 0 and right_ear_points[idx][1] != 0:\n nose_x = right_ear_points[idx][0]\n nose_y = right_ear_points[idx][1]\n else:\n # No point available - check previous point\n try:\n if nose_points[idx - 1][0] != 0 and nose_points[idx - 1][1] != 0:\n nose_x = nose_points[idx - 1][0]\n nose_y = nose_points[idx - 1][1]\n # print(nose_x, nose_y)\n\n elif left_ear_points[idx - 1][0] != 0 and left_ear_points[idx - 1][1] != 0:\n nose_x = left_ear_points[idx - 1][0]\n nose_y = left_ear_points[idx][1]\n elif right_ear_points[idx - 1][0] != 0 and right_ear_points[idx - 1][1] != 0:\n nose_x = right_ear_points[idx - 1][0]\n nose_y = right_ear_points[idx - 1][1]\n except IndexError:\n nose_x = 0\n nose_y = 0\n\n point1 = nose_x - padx\n point2 = nose_y - pady\n if point1 < 0:\n point1 = 0\n if point2 < 0:\n point2 = 0\n\n nose = (int(point1), int(point2), int(nose_x + padx), int(nose_y + pady))\n cropped_frame = frame.crop(nose)\n blurred_frame = cropped_frame.filter(ImageFilter.GaussianBlur(radius=20))\n # print(nose)\n # sys.exit()\n frame.paste(blurred_frame, nose)\n\n outpath = \"{}\\\\{}.png\".format(\"blurred_coronal_images\", idx + 1)\n print(outpath)\n frame.save(outpath)\n input_files = []\n for filename in glob.glob(\"{}\\\\*.png\".format(\"blurred_coronal_images\")):\n input_files.append(filename)\n # print(input_files)\n # Stupid python input_files.sort(key=lambda x: int(float(os.path.basename(x).split('.')[0][1:])))\n input_files.sort(key=lambda f: int(re.sub('\\D', '', f)))\n return input_files", "title": "" }, { "docid": "226f427f00b471e63afe5223d5426ab6", "score": "0.4629292", "text": "def tick(self):\n\tview = smp.GetActiveView()\n\n\t# lidar orientation and position\n\tR_l = self.orientations[self.i]\n\tT_l = self.pts[self.i, :]\n\n\t# move camera\n\tfor c in self.cameras:\n\t\tif c.timestep_inside_range(self.i):\n\t\t\tprint c.type\n\t\t\tview.CameraPosition = c.interpolate_position(self.i, R_l, T_l, np.asarray(list(view.CameraPosition)))\n\t\t\tview.CameraFocalPoint = c.interpolate_focal_point(self.i, R_l, T_l)\n\t\t\tview.CameraViewUp = c.interpolate_up_vector(self.i, R_l)\n\t\t\tbreak\n\n\t# move 3d model (vtk Transform rotation are angle axis in degrees)\n\tif self.model is not None:\n\t\tself.model.Transform.Translate = self.pts[self.i, :3]\n\t\to = R_l.as_rotvec()\n\t\tangle_rad = np.linalg.norm(o)\n\t\tangle_deg = np.rad2deg(angle_rad)\n\t\tself.model.Transform.Rotate = o * angle_deg / angle_rad\n\n\tsmp.Render()\n\n\t# save frame\n\tif len(frames_output_dir) > 0:\n\t\timageName = os.path.join(frames_output_dir, \"image_%04d.png\" % (self.image_index))\n\t\tsmp.WriteImage(imageName)\n\n\tself.image_index += 1\n\tself.i += 1", "title": "" } ]
a3d3c12ec19b6263be1c8b39b2a8c1fc
Return number of variable figure elements, assume zero.
[ { "docid": "ec10f9e111a0f970fd9481390af80121", "score": "0.63331455", "text": "def count_variable_connections(self):\r\n self.browser.implicitly_wait(1)\r\n try:\r\n figs = self.browser.find_elements_by_class_name('variable-connection')\r\n count = len(figs)\r\n finally:\r\n self.browser.implicitly_wait(TMO)\r\n return count", "title": "" } ]
[ { "docid": "a376965fb7e385d6100eeb757767e5fd", "score": "0.7942516", "text": "def count_variable_figures(self):\r\n self.browser.implicitly_wait(1)\r\n try:\r\n figs = self.browser.find_elements_by_class_name('variable-figure')\r\n count = len(figs)\r\n finally:\r\n self.browser.implicitly_wait(TMO)\r\n return count", "title": "" }, { "docid": "2bda28bc04c7bb82a2efc86859836516", "score": "0.67973095", "text": "def getNumDims(self):\n dataDict = self.__dict__\n result = len(self.numPoints)\n return result", "title": "" }, { "docid": "f8cded979354588d5a9859ebd4e3e3c8", "score": "0.66951495", "text": "def NumElements(self) -> int:", "title": "" }, { "docid": "f9dbc4cd4c8dc14932b3ca0d01d57284", "score": "0.66749644", "text": "def numeroElements(self):\n count=0\n for c in self._components:\n count+=1\n return count", "title": "" }, { "docid": "fef6aae0b546bfeafaad7b7026d4b8c1", "score": "0.66631395", "text": "def n_subplots(ax_im):\n return len(ax_im.get_figure().get_axes())", "title": "" }, { "docid": "c6f258fc4a4c223ee2a42cc44932be32", "score": "0.66007113", "text": "def n_elements(self):\n return self.pixels.size", "title": "" }, { "docid": "1ae686b9ac9667950e6320041f806cd5", "score": "0.65995926", "text": "def num_elements(self):\n return self.num_elem", "title": "" }, { "docid": "ec4fb80cb720a2af9506003782050714", "score": "0.65229803", "text": "def get_number_of_elements(self):\n return len(self.elements)", "title": "" }, { "docid": "02668c95e71be32a10a9ca3942883098", "score": "0.64943594", "text": "def get_number_of_3d_elements(self):\n number_of_3d_elements = 0\n for element in self.elements.values():\n if element_dictionary_inverse[\n (element.elem_type, \"abaqus\")] in elements_3d:\n number_of_3d_elements += 1\n return number_of_3d_elements", "title": "" }, { "docid": "4fc177c03d4039eee3d26c038f4e3194", "score": "0.64099026", "text": "def number_of_elements(self):\n return self.network.number_of_nodes()", "title": "" }, { "docid": "7a11cdd7482bcfa78c106a6af4a77000", "score": "0.6366252", "text": "def nvariables(self):\n return len(self.variables())", "title": "" }, { "docid": "25ca572121dea12d6049f0ca3994ce7b", "score": "0.6354053", "text": "def n_objects(self):\n\n return getattr(self, self.parameters[0] ).shape[1]", "title": "" }, { "docid": "fb394176e7b50a8761e08b5446aa3ad3", "score": "0.63430464", "text": "def size(self) -> int:\n return len(self.xys[0])", "title": "" }, { "docid": "31614fe7070a5c915434783742a19dc6", "score": "0.6331607", "text": "def countWellSample (self):\n return len(self._listChildren())", "title": "" }, { "docid": "0e587b02e0e6429fa14d859c6458ff5f", "score": "0.6329149", "text": "def dim(self):\n\n return len(self.items)", "title": "" }, { "docid": "ae45e1f107042caadee0c6e6eb21dd3a", "score": "0.6316936", "text": "def get_element_count(self):\n\t\treturn self.element_count", "title": "" }, { "docid": "b9815b7e13fc34d9f3f027fcc577fa08", "score": "0.6311129", "text": "def nb_elements(self):\n __LOG__.debug('** Resource::nb_elements ({0})'.format({'name':self.name}))\n return 1", "title": "" }, { "docid": "bf5fc59cab1e632fe1179e346e2da45f", "score": "0.63075477", "text": "def num_elements(x):\n return tf.TensorShape(x).num_elements()", "title": "" }, { "docid": "06da125c6891a9e6211a06cefed299c2", "score": "0.62947434", "text": "def size(self) -> int:\n return np.sum([control.size for control in self.controls])", "title": "" }, { "docid": "959cae706490f9c0034b3de42215f1bf", "score": "0.6294111", "text": "def size(self):\n count = 0\n for i in self.lyst:\n if i is not None:\n count += 1\n return count", "title": "" }, { "docid": "387560474614578f78e9e80199d10d31", "score": "0.62841386", "text": "def size(self):\n return len(self.x)", "title": "" }, { "docid": "412d4f3ab7f1b9ceffde2e3c3d8dd87b", "score": "0.62840784", "text": "def getNumPlots(self):\n return len(self.plots)", "title": "" }, { "docid": "981cfc29d24aecdf5c5e1c2cf3c6db79", "score": "0.6273411", "text": "def component_count(self):\r\n return self._evaluator.kernel_dimension", "title": "" }, { "docid": "c8045183b283a12ce242e193ef094cbb", "score": "0.62638843", "text": "def exo_dim(self):\n return 0", "title": "" }, { "docid": "aac4f00faad90962299baf73e5f24557", "score": "0.6262257", "text": "def num_variables(self) -> int:\n return self.data.num_variables()", "title": "" }, { "docid": "8b74c2e7323f721b9062c28d54da1d8f", "score": "0.6251873", "text": "def symcount(self):\n return len(self.axes)", "title": "" }, { "docid": "d5eddf04acda1138d354bbc07a67ba54", "score": "0.6249127", "text": "def _get_num_objects(self):\n return len(self.current_image)", "title": "" }, { "docid": "14d6c825a16dfb80ba4273812f8213e0", "score": "0.6239202", "text": "def numOfElements(self):\n return self.__elements", "title": "" }, { "docid": "7e94f367e68ef2c45e5e67965643ea45", "score": "0.6236973", "text": "def __len__(self):\n total = 1\n for n in self.shape: # Number of patches generated in each dimension\n total *= n\n return total", "title": "" }, { "docid": "1a50a2168b329a79fff0479d1df35e2b", "score": "0.61958", "text": "def dimension(self):\n return len(self.basis())", "title": "" }, { "docid": "53a696c8d67513b12aacf14106ecf912", "score": "0.6192462", "text": "def n_true_elements(self):\n return self.n_true_pixels() * self.n_channels", "title": "" }, { "docid": "b718a6c764ac6ac25fb2b2cac88522a3", "score": "0.6174429", "text": "def get_amount_of_particles(self):\n return len(self.particles)", "title": "" }, { "docid": "0bc5185bb3dc34635f40011a59bb018d", "score": "0.61732376", "text": "def dim(self) -> int:\n pass", "title": "" }, { "docid": "81530722d346bd386c31bbec79cda4b6", "score": "0.6173171", "text": "def __size(self,x):\n\t\treturn x.N if x is not None else 0", "title": "" }, { "docid": "c197f8d41d5892d23acdb59f823fce21", "score": "0.6170255", "text": "def _getNumComponents(self):\n return len(self.components)", "title": "" }, { "docid": "a4d67289854800f04830616ee4a7e026", "score": "0.6148701", "text": "def number_of_variables(self):\n return self._num_of_var", "title": "" }, { "docid": "a4d67289854800f04830616ee4a7e026", "score": "0.6148701", "text": "def number_of_variables(self):\n return self._num_of_var", "title": "" }, { "docid": "6787aa9649e8ec35b4f95eda3b7e957d", "score": "0.61468595", "text": "def nb_elements(self):\n __LOG__.debug('** Task::nb_elements ({0})'.format({'name':self.name}))\n return 1", "title": "" }, { "docid": "81fb9ff19c7939974e1aa06d53beeeb3", "score": "0.61468065", "text": "def get_number_of_trainable_elements(network: types.Network) -> types.Float:\n num_elements_list = []\n for var in network.trainable_variables:\n num_elements = var.get_shape().num_elements()\n if num_elements is None:\n raise ValueError(\n f'Variable:{var} is expected to have a known shape, but found '\n 'otherwise.')\n num_elements_list.append(num_elements)\n return sum(num_elements_list)", "title": "" }, { "docid": "0f876cd8386a536d5c65229555dd4034", "score": "0.6139817", "text": "def n_measures(self):\n return len(self.measures)", "title": "" }, { "docid": "7f994fc3ee9e7989ff52e1b35385994f", "score": "0.6137833", "text": "def _num_of_figures(self, mark):\n table = self._game_instance._table\n num_of_figures = 0\n\n for i in table:\n if i == mark:\n num_of_figures += 1\n\n return num_of_figures", "title": "" }, { "docid": "0f3dd716e4f588f9b0aabe006ee6c764", "score": "0.6135164", "text": "def n_dims(self):", "title": "" }, { "docid": "7fdf3ae01c8cde6d5636cc2468152329", "score": "0.612341", "text": "def _number_of_nodes(self):\n if self._nodes is None:\n return 0\n return len(self._nodes)", "title": "" }, { "docid": "24ba0eca4c2be90be3992e11db06cd4d", "score": "0.61187536", "text": "def __len__(self):\n shape_elms = list(self._iter_member_elms())\n return len(shape_elms)", "title": "" }, { "docid": "e9d44f905098cef305b7af3b3a335e05", "score": "0.61112225", "text": "def num_markers(self):", "title": "" }, { "docid": "9613adc79b5e3ec26e9858dd5aee4ab6", "score": "0.6109844", "text": "def number_of_items(self):", "title": "" }, { "docid": "0f345f1295ad50bfc21811d69dea3f0d", "score": "0.60968417", "text": "def get_count(self) -> int:\n return len(self._bars)", "title": "" }, { "docid": "15b74f1dbe27f086572f1da28492aba7", "score": "0.60950637", "text": "def example_count(a):\n try:\n return jnp.shape(a)[0]\n except IndexError:\n return 1", "title": "" }, { "docid": "1fd6c6ff6bf54536b618d0457b98e0b5", "score": "0.6091652", "text": "def NumControlPointsInV(self) -> int:", "title": "" }, { "docid": "72dde40f2864417b75928ea6e50aa7d6", "score": "0.6091303", "text": "def no_of_volumes(self) -> int:\n return len(self.volumes)", "title": "" }, { "docid": "892fd62b615571af19053ea135f03301", "score": "0.60909534", "text": "def ndim(x):\n return x.ndim", "title": "" }, { "docid": "bf2f70d8b24690d00069fc633630e79c", "score": "0.60660636", "text": "def get_count(self):\n return len(self.__elements)", "title": "" }, { "docid": "7875102ac8135e19886bc4e5c6e62523", "score": "0.6065927", "text": "def dim(self):\n return self.__n", "title": "" }, { "docid": "ef91adbae8d8cdffd4a3c7389ac96e38", "score": "0.6051413", "text": "def _get_num_plates(self):\n return len(self.dataset)", "title": "" }, { "docid": "f3ab9551a7013b0de36ca2230357ea34", "score": "0.6051023", "text": "def get_number_of_features(self):", "title": "" }, { "docid": "5627d11b32ddd16e4738646a4bdd34db", "score": "0.604985", "text": "def dim(self):\n val = self.sample(np.random).value\n if isinstance(val, (list, np.ndarray)):\n return len(val)\n return 1", "title": "" }, { "docid": "972946250c4beea819e398da603ea894", "score": "0.6045562", "text": "def get_number_of_dimensions( q ):\n return len(q.shape) - 1", "title": "" }, { "docid": "80a7c33edd6f496287d61b55c02021c8", "score": "0.60428125", "text": "def size(self):\n return self.points.shape[0]", "title": "" }, { "docid": "81a8600d319f8e5e51215f68260ebb77", "score": "0.6042228", "text": "def _num_elements(losses):\n with K.name_scope('num_elements') as scope:\n return K.cast(K.size(losses, name=scope), losses.dtype)", "title": "" }, { "docid": "3d1928f8467f97580b1c2d584078f887", "score": "0.6039669", "text": "def indivs_numof_get(self):\n return len(self._map_indivs)", "title": "" }, { "docid": "076312f1d69fde9b129c864f95825090", "score": "0.60381424", "text": "def __len__(self) -> int:\n\n return (\n self._reflectances.shape[0]\n if self._reflectances is not None\n else 0\n )", "title": "" }, { "docid": "6fc227a2978ea187644c5ca36c3f1537", "score": "0.60357964", "text": "def num_features(self):\n\n graph0 = self.get(0)\n print(\"num_features, data.x.shape\")\n pprint(graph0.x.shape)\n return graph0.x.shape[1]\n #return 4", "title": "" }, { "docid": "1ed9b21a668c64a8032943ebe14b169d", "score": "0.60332114", "text": "def size():", "title": "" }, { "docid": "a1534d34e0b2ea241a5169a8cba746d8", "score": "0.6032182", "text": "def transientsCount(self):\n r = 0\n while (r < self.rowDim and sum(self[r]) != 0):\n r += 1\n return r", "title": "" }, { "docid": "48ecea4e4a9eacc895098f97ec95462c", "score": "0.60286605", "text": "def count(self):\n return np.sum(self.grid != 0)", "title": "" }, { "docid": "cf72fc54dd6238685ddae51dc3f87c6d", "score": "0.6028529", "text": "def ndata(self):\n return len(self.flat)", "title": "" }, { "docid": "37e03c30b5b5046d040f597d3a888d79", "score": "0.60270494", "text": "def get_number_of_2d_elements(self):\n number_of_2d_elements = 0\n for element in self.elements.values():\n if element_dictionary_inverse[\n (element.elem_type, \"abaqus\")] in elements_2d:\n number_of_2d_elements += 1\n return number_of_2d_elements", "title": "" }, { "docid": "20bca89bab838ae3d61c0287b6b30b52", "score": "0.6025065", "text": "def Dimension(self) -> int:", "title": "" }, { "docid": "e3f13e5463e17ee7aed1791e0ab60982", "score": "0.601973", "text": "def num_inner_dimensions(self):\n return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])", "title": "" }, { "docid": "8e48edaae7ada3bed57ed99203a218c6", "score": "0.60190284", "text": "def size(self):\n return len(self.points)", "title": "" }, { "docid": "42a3cef7ce18cf875c29a19648e33cdc", "score": "0.60040647", "text": "def nbr_dims() -> int:\n pass", "title": "" }, { "docid": "42a3cef7ce18cf875c29a19648e33cdc", "score": "0.60040647", "text": "def nbr_dims() -> int:\n pass", "title": "" }, { "docid": "0696cd568d0b3823989170bb044102d0", "score": "0.60037565", "text": "def size(self, plot_type):\n count = 0\n for plot in self._plots:\n if plot['type'] == plot_type:\n count += 1\n return count", "title": "" }, { "docid": "176d883dea3743d01b45aef886f175bb", "score": "0.5986763", "text": "def ncols(self):\r\n return sum([len(a.values) for a in self.values_axes])", "title": "" }, { "docid": "03200948c461c6192cab3d607cbf270e", "score": "0.59843975", "text": "def getNumObjects(self):\n return None", "title": "" }, { "docid": "70260fd97e940cb4e62d3dfb53592ebb", "score": "0.59803736", "text": "def num_nodes_generated(self):\n return len(self.frontier) + len(self.explored)", "title": "" }, { "docid": "9ecdbb5426f6a214e3a499c7939efe80", "score": "0.597389", "text": "def dim(self):\n return self._n_dim", "title": "" }, { "docid": "f8dfdcb2860f3baf77cfec9fafc6acc7", "score": "0.5973632", "text": "def GetComponentCount(self):\n return len(self.visible)", "title": "" }, { "docid": "c085545df277dfd92d0201dc276cfe0d", "score": "0.59693205", "text": "def NumControlPoints(self) -> int:", "title": "" }, { "docid": "7ff9153d2a228fcf25fd242b23efd67b", "score": "0.5968504", "text": "def nbr_dims() -> int:\n return 5", "title": "" }, { "docid": "5cf59769a6930d151d01a983a498987c", "score": "0.5967604", "text": "def nb_nodes(self):\n T = self.times[1] - self.times[0]\n card_W = self.get_card_W()\n nb_nodes = card_W / T\n return nb_nodes", "title": "" }, { "docid": "3539b3b3bdcc9a676afe680c68cacca7", "score": "0.59662354", "text": "def num_nodes(self):\n return self.node_emb.size(0)", "title": "" }, { "docid": "03c5bd45b4b06f5ef32d97a4f1a7e022", "score": "0.59543794", "text": "def calculate_number_observation(self, one_dimensional_array):\n \n number_observation = 0\n \n try:\n \n number_observation = one_dimensional_array.size \n \n except Exception:\n \n self.print_exception_message()\n \n return number_observation", "title": "" }, { "docid": "96fadcae718818f36f6956abff47aaff", "score": "0.5950197", "text": "def get_number_of_nodes(self):\n return len(self.nodes)", "title": "" }, { "docid": "e512fa362084a8169088e2b96d0aef30", "score": "0.5946819", "text": "def nb_elements(self):\n __LOG__.debug('** GroupOfResources::nb_elements ({0})'.format({'name':self.name}))\n return len(self.resources)", "title": "" }, { "docid": "d7904967059fcfcc39f240c937706bca", "score": "0.5943794", "text": "def get_points_no(self)->int:\n return len(self._points)", "title": "" }, { "docid": "37f6be9c80e8fe2363d6a3baaebf8f7e", "score": "0.59409314", "text": "def __len__(self):\n if self.patch_size is not None:\n return len(self.indices) * self.nb_patches_by_img\n return len(self.indices)", "title": "" }, { "docid": "749b2fd563173f29a5960354044acc2a", "score": "0.5936649", "text": "def number_of_nodes(G):\n return G.number_of_nodes()", "title": "" }, { "docid": "7838d6a3a164a9d78438f48748aa631a", "score": "0.59359455", "text": "def numBlobs(self):\n return self.__x.shape[0]", "title": "" }, { "docid": "577e2164a62790ca3abf39c876d26897", "score": "0.59357166", "text": "def sv_count(self) -> int:\n return self._df_svpos.shape[0]", "title": "" }, { "docid": "85c34d2ef44cf921b00af160d0a76cf9", "score": "0.59325266", "text": "def dim(self):\n\t\treturn self._point.dim", "title": "" }, { "docid": "aa57c495a88bfbe1a89fb9d52978d2e0", "score": "0.5925587", "text": "def __len__(self):\r\n return np.size(self._values)", "title": "" }, { "docid": "f5b7fd5c53ff8e55033eb7f5730a9078", "score": "0.59199166", "text": "def get_n_boxes(self):\n self.n_boxes = 0\n for shape in self.feature_shapes:\n self.n_boxes += np.prod(shape) // self.n_anchors\n return self.n_boxes", "title": "" }, { "docid": "0e1d44f46f6f4b04d23a4078dd95f436", "score": "0.59196305", "text": "def size(data):\n return data[0].get_value(borrow=True).shape[0]", "title": "" }, { "docid": "9cd1df339740f4fb4043640e049d656c", "score": "0.59191597", "text": "def n_scales(self):\n return len(self.scales)", "title": "" }, { "docid": "d720d352f431df4dd75b96816abb3d41", "score": "0.5912972", "text": "def __len__(self):\r\n return len(self._info_axis)", "title": "" }, { "docid": "aa0ffcf8cfe8b0b1639675e68f69bb5b", "score": "0.5912313", "text": "def dim(self) -> int:\n return [p.size(0) for p in self.model.parameters()][-1]", "title": "" }, { "docid": "40f6bf57a1aa77378b3ad1ae0840031f", "score": "0.5911294", "text": "def _get_num_images(self):\n return len(self.current_plate)", "title": "" }, { "docid": "7aefe7d5cbfbd5c9b2b7c8a085b0c6d0", "score": "0.5910732", "text": "def num_points(self) -> int:\r\n return len(self.points)", "title": "" }, { "docid": "564dc2ba3286d9913c5c52cc96084780", "score": "0.5906855", "text": "def size(self) -> int:\n return len(self.g)", "title": "" } ]
5518a8e037777ad1a20b958e7a9b5d51
Generate the different list items by selecting a random element from each list. Needs an list of invoice names as an input.
[ { "docid": "6a4ad741be49c6bd7279709f7a92b888", "score": "0.6615224", "text": "def generate_list_items(listItemList):\n listItems = []\n numberOfListItems = random.randint(1,10)\n currencies = [\"\",\" ยฃ\", \" $\", \" CHF\"] # \" โ‚ฌ\",\n totalSyn = [\"\", \"Gesamt \", \"Gesamt: \", \"Total \", \"Total: \", \"Summe \", \"Summe: \", \"Endbertrag \", \"Endbetrag: \"]\n curr = random.choice(currencies)\n invoiceTotal = 0\n for x in range(0, numberOfListItems):\n itemName = random.choice(listItemList)\n itemQuantity = random.randint(1, 999)\n itemAmount = round(random.uniform(1.00, 999.00), 2)\n invoiceTotal += itemAmount\n if curr != \" $\":\n itemAmount = str(itemAmount).replace(\".\", \",\")\n item = {\n \"ItemName\": itemName,\n \"ItemQuantity\": itemQuantity,\n \"ItemAmount\": str(itemAmount) + curr,\n }\n listItems.append(item)\n invoiceTotal = str(\"{:.2f}\".format(invoiceTotal))\n if curr != \" $\":\n invoiceTotal = invoiceTotal.replace(\".\", \",\")\n formatted_Total = random.choice(totalSyn) + invoiceTotal + curr\n return listItems, formatted_Total", "title": "" } ]
[ { "docid": "2dd8a6155aa29361b6440537abf3c93f", "score": "0.5974432", "text": "def sample_generator(iterator, items_wanted=1):\n selected_items = [None] * items_wanted\n for item_index, item in enumerate(iterator):\n for selected_item_index in range(items_wanted):\n if not random.randint(0, item_index):\n selected_items[selected_item_index] = item\n return selected_items", "title": "" }, { "docid": "84756a05b50779e06fb99ff754cc004b", "score": "0.5920718", "text": "def test_select_trial_multiple_items(self):\n self.mock.random.return_value = 0.5\n trial_1 = data_types.Trial(probability=0.5)\n trial_2 = data_types.Trial(probability=0.3)\n self.assertEqual(trials.select_trial([trial_1, trial_2]), trial_2)", "title": "" }, { "docid": "80ad5f3383c5ada212acd82821d84b47", "score": "0.5891485", "text": "def generate_products():\n products = []\n c = list(itertools.product(ADJECTIVES, NOUNS))\n print(len(c))\n for i in range(0, 25):\n name = convertTuple(c[i])\n price = randint(1, 100)\n weight = randint(0, 40)\n flammability = round(uniform(0, 1), 2)\n prod = Product(name, price, weight, flammability)\n products.append(prod)\n\n return products", "title": "" }, { "docid": "3894f0b4148ab235cd334ba0836a1877", "score": "0.5888438", "text": "def selector(self, input_list, check_list, return_list):\n output = None\n #print(check_list)\n for item in input_list:\n #print(item)\n if (item in check_list):\n #print(\"if statement reached\")\n output = random.choice(return_list)\n break\n #print(output)\n return output", "title": "" }, { "docid": "f83bb98f55bf5c7556ea3eaeb92495f4", "score": "0.58695716", "text": "def my_gen(self, list_n):\n while True:\n location = np.random.randint(len(list_n[0]))\n data = list_n[0][location]\n yield data", "title": "" }, { "docid": "544b920bf3eb72ca845b68d5f2de2352", "score": "0.5826953", "text": "def random_pivot(lst):\n return choice(lst)", "title": "" }, { "docid": "dd3cbad1786083e82a5acc0a94a1f19e", "score": "0.58078486", "text": "def randomize():\n random.shuffle(chores)\n random.shuffle(names)", "title": "" }, { "docid": "628f658827f97f14a6feb289a44f54f7", "score": "0.57796395", "text": "def generate_invoice_number(invoiceDate, invoiceName):\n NumberSynonyms = [\"Rechnung\", \"Rechnungsnummer\", \"RechnungsNr.\", \"Rechnung Nr.\", \"Nummer\", \"Nr.\", \"Rg.-Nr.\", \"Vorgangsnummer\"]\n ColonOptions = [\" \", \": \"]\n StartOptions = [\"\", \"RE-\"]\n NameOptions = [\"\", invoiceName[0:random.randint(2, 5)].upper()+\"-\"]\n DateOptions = [\"\", invoiceDate.strftime(\"%Y-\"), invoiceDate.strftime(\"%d-%m-%Y-\"), invoiceDate.strftime(\"%d/%m/%Y-\")]\n invoiceNR = str(random.randrange(1, 99999)).zfill(5)\n # Concatenate the lists\n invoiceNumber = random.choice(NumberSynonyms) + random.choice(ColonOptions) + random.choice(StartOptions) + random.choice(NameOptions)\n invoiceNumber = invoiceNumber + random.choice(DateOptions) + invoiceNR\n return invoiceNumber", "title": "" }, { "docid": "9fbe36278027be57a174abe7861e6507", "score": "0.57774365", "text": "def assignRandomNames(name_list=[], fname=\"names-default\"):\n if len(name_list)==0:\n name_list = _EXAMPLE\n all_users = name_list\n\n random.shuffle(all_users)\n generateConfig(users2Dict(all_users), fname, 'names')", "title": "" }, { "docid": "1db88f4ebe8cab366510bd9e357699bb", "score": "0.57762593", "text": "def randomize(values):\n def picker():\n return random.choice(values)\n return picker", "title": "" }, { "docid": "9c8da8aab409123348152f84a84c7778", "score": "0.5757608", "text": "def random_choose(lst):\n \n set_lst = []\n \n for element in lst:\n for idx in range(1, len(element)):\n set_lst.append((element[0], element[idx]))\n\n return random.choice(set_lst)", "title": "" }, { "docid": "d23c8e6ee89e340fcee225db03801dde", "score": "0.5743515", "text": "def f1(lst,M):\n if M > len(lst) or M < 0:\n raise ValueError(\"M must be from 0 to len(lst) inclusive.\")\n\n for i in range(M):\n selection = randint(i,len(lst)-1)\n lst[i], lst[selection] = lst[selection], lst[i]\n \n return lst[:M]", "title": "" }, { "docid": "9ab9c2806b5a923f6646f1191c6655bb", "score": "0.57408863", "text": "def random_phrase(str_list):\n # type: List[str] -> str\n return random.choice(str_list)", "title": "" }, { "docid": "d765ad6d5166f278808617ef0809ff32", "score": "0.5702175", "text": "def put_items(self):\n free_paths = self.paths.difference(self.start)\n free_paths = free_paths.difference(self.goal)\n items_object = set(random.sample(free_paths, 3))\n for item in items_object:\n self.items.add(item)\n print(item)", "title": "" }, { "docid": "439aa85a562c4c4484b1ae267f5f39f0", "score": "0.56922925", "text": "def test_legal_names(self):\n report = inventory_report()\n product_names = report.combinednamelist\n adj_list = ADJECTIVES()\n noun_list = NOUNS()\n random_prod_selection = random.choice(product_names)\n self.assertIN(adj_list, random_prod_selection)\n self.assertIN(noun_list, random_prod_selection)\n self.assertIN(\" \", random_prod_selection)", "title": "" }, { "docid": "325a5aaacadb654586d889b072d482db", "score": "0.56900746", "text": "def make_text(chains):\n random_output =[]\n\n \n\n\n #choose random tuple, pick random word from key value\n for key in chains:\n pull_from_list = choice(chains[key])\n pull_from_tuple = choice(list(key))\n if chains[key] == []:\n break\n else:\n random_output.append(pull_from_tuple)\n random_output.append(pull_from_list)\n \n \n # picking= random_output.choice(chains[key])\n # random_output.append(picking)\n\n \n print(random_output) \n \n return ' '.join(random_output)", "title": "" }, { "docid": "e6e012ea5462255b2735666f8d01bddb", "score": "0.5678214", "text": "def generate_List(random_seed, size1, size2):\n seed(random_seed)\n new_list1 = []\n new_list2 = []\n for i in range(size1):\n new_list1.append(choice(ascii_uppercase))\n for j in range(size2):\n new_list2.append(choice(ascii_uppercase))\n return new_list1, new_list2", "title": "" }, { "docid": "96ed2f7fddab36abfd3178f219664dbe", "score": "0.5664313", "text": "def getRandom(self):\n return random.choice(self.list)", "title": "" }, { "docid": "7503fef6b9e5f566f636b7c253c89554", "score": "0.56270266", "text": "def generateNames(personel_type: chr, number: int):\n\n if personel_type == 'e':\n # generate employees\n first_names = file2list(\n 'first_names_f_e.txt') + file2list('first_names_m_e.txt')\n elif personel_type == 'c':\n # generate customers\n first_names = file2list(\n 'first_names_f_c.txt') + file2list('first_names_m_c.txt')\n else:\n return list()\n\n last_names = file2list('last_names.txt')\n\n if _GEN_VERBOSE:\n print('first names:', first_names)\n print('last names:', last_names)\n\n names = [random.choice(first_names) + ' ' +\n random.choice(last_names) for _ in range(number)]\n\n if _GEN_VERBOSE:\n print(names)\n\n return names", "title": "" }, { "docid": "b4ced1175f1c36e2193f6a38cea7f18d", "score": "0.55965894", "text": "def run_list_randomizer(number,prov_list,providers):\r\n bhaurgentlist = []\r\n num_assigned = prov_lst_dict(prov_list) #this will help to keep track of how many times people have been assigned for equitability check later\r\n day = 1\r\n count = 1\r\n rev_len = int(len(providers)*(.5)+1) #reverse length, the amount of days before a provider can be selected again. Based on 1/2 the number of providers, 6 providers = 3 days\r\n rand = 0\r\n run_number = number\r\n brk = False\r\n while count < run_number:\r\n exp_list = list(providers.keys())\r\n random.shuffle(exp_list)\r\n while exp_list != []:\r\n name_choice = exp_list.pop(random.randint(0,len(exp_list)-1))\r\n if check_day(name_choice,day,providers):\r\n if check_list(name_choice, count, rev_len, prov_list, bhaurgentlist):\r\n count +=1\r\n bhaurgentlist.append(name_choice)\r\n num_assigned[name_choice] += 1\r\n day +=1\r\n if day > 5:\r\n day = 1\r\n break\r\n if exp_list == []:\r\n #only run if options have been run out\r\n #returns a list with None and a dictionary with bad1 and bad2 as keys and 1 and 5 as values\r\n #this will break the loop and trigger the randomizer to run again from scratch\r\n return [None, {'bad':1,\"bad2\":5}]\r\n return [bhaurgentlist, num_assigned]", "title": "" }, { "docid": "74300097c362f6f6aa24ad378ff7fac3", "score": "0.55811286", "text": "def pick_random():\n return random.choice(ALL)", "title": "" }, { "docid": "d448289ca866b89ae78492df2ca54b46", "score": "0.5554932", "text": "def random_choice(a: List):\n i = np.random.randint(0, len(a))\n return a.copy()[i]", "title": "" }, { "docid": "ee56314ea0df39038a62d57c3f818dee", "score": "0.54937834", "text": "def choose_one(_lst):\n return _lst[int(random(len(_lst)))]", "title": "" }, { "docid": "bd11f0dcce9cf85123b66f9b6ea67188", "score": "0.5489053", "text": "def _choose_clients(selection_criteria, client_list):\n return client_list", "title": "" }, { "docid": "0442931e30e797074a0eb9bedfdcad6d", "score": "0.54881185", "text": "def generate_products(number_of_products=30):\n\n products = {} # empty list\n\n for i in range(number_of_products):\n # list of adjective and noun\n adj_list = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n noun_list = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n adj = random.choice(adj_list)\n noun = random.choice(noun_list)\n\n # set defaults\n name = '{} {}'.format(adj, noun)\n price = random.randint(5, 100)\n weight = random.randint(5, 100)\n flammability = random.uniform(0.0, 2.5)\n\n products[i] = product(name,price,weight,flammability)\n return products", "title": "" }, { "docid": "5beb4f394c197aafb7bd84011d1f91a1", "score": "0.5485925", "text": "def sample(lista, n):\n # select n indexes, randomly\n first_sample_indexes = random.sample(range(len(lista)), n)\n sample1 = []\n sample2 = []\n for i in range(len(lista)):\n if i in first_sample_indexes:\n sample1.append(lista[i])\n else:\n sample2.append(lista[i])\n return (sample1, sample2)", "title": "" }, { "docid": "580f81283bb39c74eff71b530af7f457", "score": "0.5469653", "text": "def random(listParams):\n if len(listParams) == 1 :\n return rd.randrange(listParams[0])\n else:\n return rd.randint(listParams[0], listParams[1])", "title": "" }, { "docid": "3989a8070d8b9ab22eb37870a47cf6a0", "score": "0.546107", "text": "def generate_products(self, num_product=30):\n self.products = [' '.join(sample(ADJECTIVES, 1) + sample(NOUNS, 1))\n for _ in range(num_product)]\n self.products_set = set(self.products)\n self.products = len(self.products_set)\n\n print(\"Unique product names:\", self.products)\n\n return", "title": "" }, { "docid": "9555a90e6d2204c1478bdbebe3e5fa91", "score": "0.545312", "text": "def generate_products(n=30):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\n\n return [Product(name=random.choice(adjectives) + ' ' +\n random.choice(nouns),\n price=random.randint(5, 100),\n weight=random.randint(5, 100),\n flammability=random.uniform(0.0, 2.5)) for _ in range(n)]", "title": "" }, { "docid": "1482ced46c7266072196073d2fa46a33", "score": "0.5434989", "text": "def buildTermsToProc(self, num):\r\n self.termsToProc = [x for x in random.sample(self.db.keys(), num)]", "title": "" }, { "docid": "757e1d33ed8e5fd4f7834306bfbab1f0", "score": "0.5412064", "text": "def random_generate_buslist(bus_sum, station_sum):\n buslist = []\n for bus_id in range(1, bus_sum + 1):\n pass\n return buslist", "title": "" }, { "docid": "5f10b555d8073f6e22673039a560537f", "score": "0.5402142", "text": "def GenerateRandomInventory(bikes, minimum = 0, maximum = 10):\n \n inventory = dict([bike, randint(minimum, maximum)] for bike in bikes)\n \n return inventory", "title": "" }, { "docid": "249775d36df4df56706890847114507a", "score": "0.53964615", "text": "def random_selection(loot, weight_so_far=0):\n items = []\n assignment_loot = list(loot)\n while True:\n random_item = random.randint(0, len(assignment_loot)-1)\n weight_to_add = assignment_loot[random_item].weight\n if weight_so_far + weight_to_add <= 5000:\n weight_so_far += weight_to_add\n items.append(assignment_loot[random_item])\n del assignment_loot[random_item]\n if not assignment_loot:\n break\n return items", "title": "" }, { "docid": "33b1345448e3af79a758df96f760ba99", "score": "0.53924894", "text": "def choose_random_elements(_list, num_elements=5000):\n indices = random.sample(range(len(_list)), k=num_elements)\n choices = []\n for index in sorted(indices, reverse=True):\n choices.append(_list.pop(index))\n return _list, choices", "title": "" }, { "docid": "2cca4e1279bb3554731bb7fddf26f220", "score": "0.5388824", "text": "def element_per_list(self, lista):\n temporal_list = []\n for position in range(len(lista[0])):\n rnd = random.randint(0, (self.family_number - 1))\n temporal_list.append(lista[rnd][position])\n\n return temporal_list", "title": "" }, { "docid": "a5a98dda98217968c89d9edfc5e7ea84", "score": "0.53843015", "text": "def choice_stdlib(nb_choosen:int, it:iter, it_size=None, random=RANDOMIZER):\n return random.sample(it, nb_choosen)", "title": "" }, { "docid": "8f937312f1cd503765f6b022fec8ee8d", "score": "0.5382082", "text": "def start_generate_data(self, number_of_items=None):\n sleep_time = 1. / 30\n if number_of_items is None:\n while True:\n time.sleep(sleep_time)\n yield [str(randint(1, 100)) for j in range(10)]\n else:\n i = 0\n while i < number_of_items:\n time.sleep(sleep_time)\n i += 1\n yield [str(randint(1, 100)) for j in range(10)]", "title": "" }, { "docid": "22a14712cd982c12fcbc6c1625b59ba2", "score": "0.5380084", "text": "def generate_forfait(student_set: set) -> InvoiceItem:\n unique_student = len(list(set(student_set)))\n forfait = InvoiceItem(**{\"type\": \"Forfait inter-sessions\", \"unit_price\": 30})\n forfait.price = unique_student * forfait.unit_price\n forfait.count = unique_student\n return forfait", "title": "" }, { "docid": "43108c6fa3f4e1848f8cbbcb5c09f0eb", "score": "0.5378384", "text": "def test_generate_names():\n Michael_Jordan = Donor(\"Michael Jordan\", 598.71)\n Kevin_Durrant = Donor(\"Kevin Durrant\", 953.48)\n Carmelo_Anthony = Donor(\"Carmelo Anthony\", 10.07)\n dc1 = DonorCollection(Michael_Jordan, Kevin_Durrant)\n full_name_list = [\"Michael Jordan\", \"Kevin Durrant\"]\n names1 = ('\\n').join(full_name_list)\n\n full_name_list2 = [\"Michael Jordan\", \"Kevin Durrant\", \"Carmelo Anthony\"]\n names2 = ('\\n').join(full_name_list2)\n assert names1 == dc1.generate_list_of_names()\n\n dc1.add_donor(Carmelo_Anthony)\n assert names2 == dc1.generate_list_of_names()", "title": "" }, { "docid": "8206011791c326b7b25282ea3f4cf2ed", "score": "0.53764474", "text": "def _sample_cubes_random(self, cube_list):\n LOG.debug(\"_sample_cubes_random\")\n random_sample_count = self.input_data.get_value(InputType.RANDOM_SAMPLING_COUNT)\n\n random_ids = self._get_random_ids(cube_list[0], random_sample_count)\n constraint = iris.Constraint(sample=random_ids)\n selected_cubes = iris.cube.CubeList()\n\n for cube in cube_list:\n selected_cubes.append(cube.extract(constraint))\n return selected_cubes", "title": "" }, { "docid": "48cdee2b70c0c9f1bf685e6a3e91d42d", "score": "0.5369085", "text": "def generate_specific_item(self, general_item_type):\n return self.items[general_item_type].random()", "title": "" }, { "docid": "da7ce42dd8961b45fa6fb3008c770c07", "score": "0.5361228", "text": "def random_list()->list:\n return [i * random.randint(0,100) for i in range(100*100)]", "title": "" }, { "docid": "aff13599148ca5cfc7d1d0778e1dac8a", "score": "0.53591144", "text": "def pick6():\n ticket = []\n for num in range(6):\n ticket.append(random.randint(0, 99))\n return ticket", "title": "" }, { "docid": "74d553a06a97d972dca009dc69a788d1", "score": "0.5358901", "text": "def pick(stuff):\n total = 0\n for i in stuff:\n total += i[1]\n r = random()*total\n for i in stuff:\n if r < i[1]:\n return (i[0], int((random()**2*(i[2][1]-i[2][0]+1)) + i[2][0]))\n break\n else:\n r -= i[1]", "title": "" }, { "docid": "90f231f55b5f8cc32b10cead7abe7025", "score": "0.5356403", "text": "def generate_individual(self):\n for i in range(self.size()):\n gene = int(round(random.random()))\n self.genes[i] = gene", "title": "" }, { "docid": "050e3a8aed27cd1f1d00058148da07ce", "score": "0.5352428", "text": "def random_selection(population):\n return random.choice(population)", "title": "" }, { "docid": "050272e1d84db5ea2c642b2a9418f764", "score": "0.53359026", "text": "def generarlistarandom():\r\n listarandom = []\r\n cant = random.randint(10,99)\r\n for i in range(cant):\r\n listarandom.append(random.randint(1000,9999))\r\n return listarandom", "title": "" }, { "docid": "9b645f9ded8ca0aceef6efb3d41f7c1f", "score": "0.5332811", "text": "def paulis_gen_func(nr_seqs, cycles):\n def gen_random(cycles):\n s_gates = [\"X90 \", \"Y90 \", \"Z45 \"]\n lis = []\n for length in cycles:\n i = 0\n gates = []\n gates.append(s_gates[1] + \"qb_1\")\n sim_str = ' ' if 'Z' in s_gates[1][0:3] else 's '\n gates.append(s_gates[1][0:3] + sim_str + \"qb_2\")\n gates.append(s_gates[2] + \"qb_1\")\n sim_str = ' ' if 'Z' in s_gates[2][0:3] else 's '\n gates.append(s_gates[2][0:3] + sim_str + \"qb_2\")\n gates.append(\"CZ \" + \"qb_1 qb_2\")\n if length > 0:\n while i < (length - 1):\n last_1_gate1 = gates[-3][0:4]\n\n choice1 = []\n for gate in s_gates:\n choice1.append(gate)\n choice1.remove(last_1_gate1)\n gate1 = random.choice(choice1)\n gates.append(gate1 + 'qb_1')\n\n last_1_gate2 = gates[-3][0:3] + ' '\n choice2 = []\n for gate in s_gates:\n choice2.append(gate)\n choice2.remove(last_1_gate2)\n gate2 = random.choice(choice2)\n sim_str = ' ' if 'Z' in gate2[:3] else 's '\n gates.append(gate2[:3] + sim_str + 'qb_2')\n gates.append(\"CZ \" + 'qb_1 qb_2')\n i += 1\n lis.append(gates)\n return lis\n return [gen_random(cycles) for _ in range(nr_seqs)]", "title": "" }, { "docid": "1212166345da1e1580dd281613047361", "score": "0.5325203", "text": "def test_select_trial_single_item(self):\n self.mock.random.return_value = 0.0\n trial = data_types.Trial(probability=0.5)\n self.assertEqual(trials.select_trial([trial]), trial)", "title": "" }, { "docid": "069ee590fc4410a20f40318aad709735", "score": "0.53246355", "text": "def sample(iterable, n):\n reservoir = []\n for t, item in enumerate(iterable):\n if t < n:\n reservoir.append(item)\n else:\n m = randint(0,t)\n if m < n:\n reservoir[m] = item\n return reservoir", "title": "" }, { "docid": "a1879c28afd05994a616a4af264fb909", "score": "0.53220254", "text": "def getRandom(self):\n return self.lst[random.randint(0, len(self.lst)-1)]", "title": "" }, { "docid": "fefb07a25baa8eb39fb6c2408da2a64f", "score": "0.532178", "text": "def population(count):\n return [ individual() for x in range(count) ]", "title": "" }, { "docid": "7f437bcba37afb4c2afbf34bee633c1f", "score": "0.5307889", "text": "def initial_clients(self):\r\n listOfNames= ['Adam Paul', 'Aldea Alex', 'Anghel Victor', 'Bocioanca Alex', 'Birza Ana', 'Cornea Mihai', 'Suciu Cezar', 'Fleseriu Madalina',\r\n 'Achimet Clarissa', 'Fetean Flavius', 'Idu Sergiu', 'Avramita Beniamin', 'Razvan Muntean', 'Sabin Encea', 'Roxana Cindrea',\r\n 'Alex Damian', 'Veres Konrad', 'Teodora Surdu', 'Albu Andreea', 'Serafin Diana', 'Paraschiv Noela', 'Surdu Andreea']\r\n\r\n clientsCount = 0\r\n while clientsCount < 10:\r\n clientID = random.randint(1, 9999)\r\n clientID = str(clientID)\r\n clientName = random.choice(listOfNames)\r\n try:\r\n clientToAdd = Client(clientID, clientName)\r\n self.add(clientToAdd)\r\n clientsCount += 1\r\n except:\r\n pass", "title": "" }, { "docid": "f1f7de2fc6996cea847c9c409c1efa17", "score": "0.5306838", "text": "def generate_individual():\n genes = []\n for i in range(_n_cities):\n genes.append(i + 1)\n random.shuffle(genes)\n return genes", "title": "" }, { "docid": "3d37ede23639416b21feefbe6a179b09", "score": "0.5302517", "text": "def random_description():\n description = list(REC.RENTAL_ITEM.values())\n return random.choice(description)", "title": "" }, { "docid": "37f719d159b2ffaddd18de9a5378e8d4", "score": "0.52969825", "text": "def getRandomTargetFromTargetList(target_list):\n\n index = random.randrange(0,len(target_list))\n string = target_list[index]\n del target_list[index]\n return string", "title": "" }, { "docid": "e04f23428729d397a2f4cbe9e6644e38", "score": "0.5294186", "text": "def getRandom(self) -> int:\n \n return random.choice(self.list )", "title": "" }, { "docid": "962022e4b62723f22a86662f47edc260", "score": "0.5293681", "text": "def selecta(buys, receives, n):\r\n \r\n def rand(buys, receives, n):\r\n \"\"\"\r\n randomly selects two people\r\n \"\"\"\r\n b = randint(0, n - 1)\r\n r = randint(0, n - 1)\r\n return buys[b], receives[r]\r\n \r\n b, r = rand(buys, receives, n)\r\n while b == r:\r\n b, r = rand(buys, receives, n)\r\n return b, r", "title": "" }, { "docid": "c83ab100fe94802489ea7fe678d61ad3", "score": "0.52887905", "text": "def generateStart(_count):\n\tpop = []\n\tfor i in range(_count):\n\t\ttmp = \"\"\n\t\tfor i in range(random.randint(1,41)):\n\t\t\ttmp += random.choice(letters)\n\t\tpop.append(tmp)\n\treturn pop", "title": "" }, { "docid": "ae7caa84c77bce1b7b109e7d7dfcca7c", "score": "0.5285601", "text": "def shuffel(self):\n \n self.complete =False\n while not self.complete:\n self.random_list_item = 0\n random.shuffle(self.element)\n for self.item in self.door:\n self.door[self.item]=self.element[self.random_list_item]\n self.random_list_item = self.random_list_item +1\n #print(random_list_item)\n #if random_list_item == 2:\n self.complete =True", "title": "" }, { "docid": "904e94c7b215a526234537a62a05a2bb", "score": "0.5280725", "text": "def randwords_multiple(wordlist,wpm,effwpm,filename,call,wordspace):\n os.system('cls')\n print(\"\"\"\\nYou selected a file generation from randomized\nwords up to a certain range (words may be repeated)\"\"\")\n rnge=int(input(\"\\nEnter the max range (max 5000):\"))\n amount=int(input(\"\\nHow many words do you want in the file? \"))\n words=wordlist[:rnge-1]\n output=[]\n for i in range(0,amount-1,1):\n output.append(random.choice(words))\n\n print(\"\\n..List generated\")\n writefile(output,wpm,effwpm,filename,call,wordspace)\n input(\"\\nPress ENTER to continue..\")", "title": "" }, { "docid": "98d12de603003dcdd007fd4ef539ee55", "score": "0.52791184", "text": "def pick(self):\n return self[random.choice(list(self.names()))]", "title": "" }, { "docid": "9db3ad9af79a9d4b6aeb700bff083fae", "score": "0.52768177", "text": "def _random_item(self, general_item_type):\n # A scroll will have a random spell inscribed on it.\n if general_item_type == M_Item.SCROLL:\n return self._scroll()\n\n item_name_template = M_ITEM_TEMPLATE_TABLE.random()\n\n item_info = []\n wizard_name = None\n\n # Get a random entry for each category in the item name template.\n for table in item_name_template.fields:\n # Wizard names are split into prefixes and suffixes across two\n # tables, so when the prefix table comes up, we'll generate\n # the whole name, then skip the suffix when it comes up next (since\n # we don't want to accidentally generate two wizard names).\n if self.is_wizard_name(table, M_ItemName):\n if wizard_name is None:\n wizard_name = self.generate_wizard_name(M_ItemName)\n item_info.append(wizard_name)\n\n elif table == M_ItemName.ITEM:\n item = self.generate_specific_item(general_item_type)\n item_info.append(item)\n\n else:\n feature = self.tables[table].random()\n item_info.append(feature)\n\n name_string = item_name_template.string\n\n # # We have to unpack the item info list for this string formatting to work.\n item_name = name_string.format(*item_info)\n return item_name", "title": "" }, { "docid": "0ed7cde1e90ac58ef8c1d65ecf43c079", "score": "0.52749825", "text": "async def choose(self, ctx, *args):\n\t\tchoices = ' '.join(args)\n\t\tmyList = choices.split(\";\")\n\t\tmsg = random.choice(myList)\n\t\tthonk = \" <:thonking:455992031752355870> \"\n\t\tawait ctx.send(thonk + msg)", "title": "" }, { "docid": "e088d5b8bd2c8f586a1c633a99609464", "score": "0.5272468", "text": "def get_random_items(self, N, replace = True, excluding = None):\n if self.num_items == 0:\n raise ValueError(\"Graph has no items\")\n return np.random.choice([i for i in self.get_items() if i != excluding], N, replace)", "title": "" }, { "docid": "9bac0d71172ede2bb31e7a5013edc3fe", "score": "0.5271484", "text": "def uniform_random_choice(choices):\n random_index = np.random.randint(len(choices))\n return choices[random_index]", "title": "" }, { "docid": "c26deca3c7b079e6c1a7f28a23baa530", "score": "0.52706194", "text": "def make_ingredients():\n local_ingredients = [\n {\n \"name\": \"pizza dough extra\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 2.5,\n \"minimum_limit\": 100,\n },\n {\n \"name\": \"pizza dough std\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 2.1,\n \"minimum_limit\": 200,\n },\n {\n \"name\": \"extra-virgin olive oil\",\n \"id_unit\": 'L',\n \"value_unit\": 1,\n \"unit_price\": 2.5,\n \"minimum_limit\": 50,\n },\n {\n \"name\": \"Frankโ€™s hot sauce\",\n \"id_unit\": 'Ml',\n \"value_unit\": 200,\n \"unit_price\": 2.4,\n \"minimum_limit\": 2000,\n },\n {\n \"name\": \"bulk butter\",\n \"id_unit\": 'Kg',\n \"value_unit\": 2,\n \"unit_price\": 4.4,\n \"minimum_limit\": 20,\n },\n {\n \"name\": \"chicken legs\",\n \"id_unit\": '.',\n \"value_unit\": 100,\n \"unit_price\": 45.6,\n \"minimum_limit\": 10,\n },\n {\n \"name\": \"whole-milk plain yogurt\",\n \"id_unit\": 'G',\n \"value_unit\": 1,\n \"unit_price\": 5.0,\n \"minimum_limit\": 2,\n },\n {\n \"name\": \"lemon juice\",\n \"id_unit\": 'L',\n \"value_unit\": 1,\n \"unit_price\": 3.2,\n \"minimum_limit\": 2,\n },\n {\n \"name\": \"garlic powder\",\n \"id_unit\": 'G',\n \"value_unit\": 1000,\n \"unit_price\": 5.2,\n \"minimum_limit\": 1000,\n },\n {\n \"name\": \"mild blue cheese\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 6.2,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"mozzarella cheese\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 5.2,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"parmesan cheese\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 4,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"gruyere cheese\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 4.4,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"White cheese\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 4.4,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"tomato sauce\",\n \"id_unit\": 'L',\n \"value_unit\": 1,\n \"unit_price\": 3.3,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"rotten tomato sauce\",\n \"id_unit\": 'L',\n \"value_unit\": 1,\n \"unit_price\": 2.1,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"salt\",\n \"id_unit\": 'G',\n \"value_unit\": 1000,\n \"unit_price\": 2,\n \"minimum_limit\": 2000,\n },\n {\n \"name\": \"pepper\",\n \"id_unit\": 'G',\n \"value_unit\": 1000,\n \"unit_price\": 3,\n \"minimum_limit\": 2000,\n },\n {\n \"name\": \"salmon slices\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 5,\n \"minimum_limit\": 3,\n },\n {\n \"name\": \"fresh egg\",\n \"id_unit\": '.',\n \"value_unit\": 1,\n \"unit_price\": 0.5,\n \"minimum_limit\": 40,\n },\n {\n \"name\": \"mushrooms\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 2.5,\n \"minimum_limit\": 4,\n },\n {\n \"name\": \"ham\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 3.4,\n \"minimum_limit\": 5,\n },\n {\n \"name\": \"prosciutto sliced\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 4.1,\n \"minimum_limit\": 2,\n },\n {\n \"name\": \"Pepperoni\",\n \"id_unit\": 'Kg',\n \"value_unit\": 1,\n \"unit_price\": 3.1,\n \"minimum_limit\": 4,\n },\n ]\n return local_ingredients", "title": "" }, { "docid": "c96939123c771d001f730b60ca04ccd9", "score": "0.5270418", "text": "def generateItems(self, c):\n self.itemValues = np.random.normal(50, 15, size = c)\n self.itemCounts = [0] * c", "title": "" }, { "docid": "940c7a341ac5d47ea85764031584c279", "score": "0.52671874", "text": "def buildList(listSize):\n randomList = random.sample(xrange(1, listSize+100), listSize)\n return randomList", "title": "" }, { "docid": "4f509afeb2320503b26881a52fa03e0a", "score": "0.52663606", "text": "def DiscreteMCOperator(valuelist):\n assert valuelist != [], \"'valuelist' must not be empty!\"\n yield random.choice(valuelist)", "title": "" }, { "docid": "a24cbaffbbacabb36aae5100bb9a8696", "score": "0.5262703", "text": "def __call__(self, key, values):\n for item in random.sample(list(values), SAMPLE_SIZE):\n yield (item,)", "title": "" }, { "docid": "6443530c511874bdc7dc1a335501c488", "score": "0.52616894", "text": "def random_code(lst):\n # Creates a random number from 0 until te length of te list\n code_choice = lst[random.randint(0, (len(lst)-1))]\n return code_choice", "title": "" }, { "docid": "bcd9203bcc10a3c55a17d094ef3f4fec", "score": "0.5260873", "text": "def randoms(self, qty):\n count = self.aggregate(count=Count('id'))['count']\n rand_ids = sample(xrange(1, count), qty)\n return self.filter(id__in=rand_ids)", "title": "" }, { "docid": "3573a01d9e87ffc240d9666702406a6c", "score": "0.5257253", "text": "def randomSelection(team_dict):", "title": "" }, { "docid": "b297d96c2dbadd8ba560af0b0a2edd9b", "score": "0.52546716", "text": "def generateColorstring():\n string = []\n for i in range(1, 5):\n string.append(random.choice(kleurenList))\n return string", "title": "" }, { "docid": "93a38eb787b2043a6e38d8f33f5f3a7c", "score": "0.5247118", "text": "def return_random(self): \n \n j = random.randint(0,len(self.list_IDs)-1)\n \n X,S,mel,y = self.__data_generation([self.list_IDs[j]])\n\n return X,S,mel,y", "title": "" }, { "docid": "2c9f9eb9d96b42021cc294ebd675bf7c", "score": "0.52359647", "text": "def generate_items(self) -> None:\r\n pass", "title": "" }, { "docid": "a0024f6dc0bef4c8a01be6e789b0982c", "score": "0.52323323", "text": "def test_random_selection():\r\n results = []\r\n random_selection = yaml_file[\"random_selection_shelves\"]\r\n shelf1 = random_selection['lower']\r\n shelf2 = random_selection['upper']\r\n #loop the reaction 30 times, it is likely both possible selections will\r\n #occur if the selection is random\r\n for y in range(30):\r\n lab = Laboratory(shelf1, shelf2)\r\n results_current = lab.run_full_experiment(reactions= False)\r\n results.append(results_current)\r\n result_four = yaml_file[\"results_4\"]\r\n result_five = yaml_file[\"results_5\"]\r\n assert(result_four in results and result_five in results)", "title": "" }, { "docid": "09cdcffc3c67bdb266d3b7c643f4a5af", "score": "0.5226376", "text": "def getLoot(self):\n\n listeObjet = self.eventJson[\"loot\"]\n\n inv = Inventaire(0,0,4)\n\n if len(listeObjet) > 0:\n for i in range(2):\n objet = random.choice(listeObjet)\n if objet == \"Patate\":\n inv.ajouter(Objet.objets[objet], random.randint(1,7))\n else:\n inv.ajouter(Objet.objets[objet])\n return inv", "title": "" }, { "docid": "2675d750aba2fd81e4467f57a0cb8010", "score": "0.5208492", "text": "async def choose(self, ctx, *, choices : str):\r\n try:\r\n possible = choices.split(\";\")\r\n if len(possible) < 2: raise Exception()\r\n final_msg = await ctx.reply(embed=self.bot.util.embed(author={'name':\"{}'s choice\".format(ctx.author.display_name), 'icon_url':ctx.author.avatar_url}, description=random.choice(possible), color=self.color))\r\n except:\r\n final_msg = await ctx.reply(embed=self.bot.util.embed(title=\"Give me a list of something to choose from, separated by `;`\", color=self.color))\r\n await self.bot.util.clean(ctx, final_msg, 45)", "title": "" }, { "docid": "7079dd6345c7f4d1231696e5ae85c628", "score": "0.5205025", "text": "def generate_in_random_order(point_list):\n point_list = copy.copy(point_list)\n random.shuffle(point_list)\n for candidate in point_list:\n yield candidate", "title": "" }, { "docid": "22aa7dd18bba1f595d16d8e27642a7df", "score": "0.5202232", "text": "def getRandom(self):\n # pick a random number from the list\n return choice(self.nums)", "title": "" }, { "docid": "dcea50bd397c894c8dc4a3685ecdbe13", "score": "0.5195701", "text": "def _choice_rec(nb_choosen:int, it:iter, it_size:int, random:callable):\n if nb_choosen == 0:\n return []\n try:\n elem = next(it)\n it_size -= 1\n except StopIteration:\n return []\n likelihood = (nb_choosen / (1+it_size))\n if random() <= likelihood:\n return [elem] + _choice_rec(nb_choosen - 1, it, it_size, random)\n else: # don't take the elem\n return _choice_rec(nb_choosen, it, it_size, random)", "title": "" }, { "docid": "258bf8fcc1bb5861beff8e25df5a9079", "score": "0.51934403", "text": "def generate_fruit(screen):\n\n n = random.randint(1,3)\n if n == 1:\n return Apple(screen)\n elif n == 2:\n return Banana(screen)\n else:\n return Strawberry(screen)", "title": "" }, { "docid": "7dcef38696af77d7f700d58a94ac3362", "score": "0.5187952", "text": "def get_randomSample(conn, sample):\n sql = \"\"\" SELECT id FROM ideas_ \"\"\"\n mycursor = conn.cursor()\n mycursor.execute(sql)\n ids = [item[0] for item in mycursor.fetchall()]\n sample_list = tuple(random.sample(ids, sample))\n sql_2 = (\"\"\"SELECT ideas FROM ideas_ WHERE id IN {}\"\"\".format(sample_list))\n mycursor.execute(sql_2)\n results = mycursor.fetchall()\n data_list = list(chain.from_iterable(results))\n return data_list", "title": "" }, { "docid": "7daf3aa117dd11e15d61ce31fb8eb35e", "score": "0.51739115", "text": "def generator(random: random.Random, args: Dict) -> List:\r\n\r\n return [random.uniform(0.0, 1.0) for _ in range(cc.N_EVOLVABLE_GENES)]", "title": "" }, { "docid": "9bd598a0c18dc37de9d24bcc2e90c1ea", "score": "0.5173508", "text": "def mock_productos():\n for i in range(1, 1000):\n Producto('Producto ' + str(i), 'Descripcion ' + str(i), i).insert()", "title": "" }, { "docid": "f5b69ea3ecbc478ee3c780ce12964f0a", "score": "0.51713943", "text": "def sample(l, n):\n if type(l) is dict:\n l = set(l)\n if n == 0:\n return []\n if len(l) > n:\n return random.sample(l, n)\n return list(l)", "title": "" }, { "docid": "2963cf3495caf1dd970a06a339681cf4", "score": "0.51621", "text": "def iter_name_lists(min_len=3, max_len=10):\n names_gen = faker.Faker()\n for length in range(min_len, max_len):\n yield {names_gen.first_name() for _ in range(length)}", "title": "" }, { "docid": "437ae0043f69af8788cad2ccb1121143", "score": "0.5160986", "text": "def randomStringGenerator(l):\n from random import choice\n chars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n randomString = \"\"\n for i in range(l):\n randomString += choice(chars)\n return randomString", "title": "" }, { "docid": "ba51953bbaa6c7ec4ee9a55ade328d07", "score": "0.51586825", "text": "async def rip(self, ctx: Context, *, names: comma_separated_values):\n\n for name in names:\n if name != 'EVERYONE':\n name = name.lower()\n if name in ME_ALIASES:\n name = ctx.author.name.lower()\n out = await self.db.find_one(\n {'names': {'$in': [name]}},\n {'rips': ''})\n if out:\n try:\n await ctx.send(random.choice(out['rips']))\n except IndexError:\n return", "title": "" }, { "docid": "ce93612d46679a409be0cd6cf13d11de", "score": "0.5153481", "text": "def buildTermsToProc(self, num):\r\n self.termsToProc = []\r\n keys = self.db.keys()\r\n for x in range(num):\r\n self.termsToProc.append(random.choice(keys))", "title": "" }, { "docid": "da5ff8d89fd768ee26edc0cf3fd07215", "score": "0.5152104", "text": "def random_movies():\n movies = list(Movie.objects.all())\n return sample(movies, 3)", "title": "" }, { "docid": "91489a4d0529edb7aaa61ecf14cfb918", "score": "0.5147231", "text": "def get_initiative():\n return [random.randint(1, 20), random.randint(1, 20)]", "title": "" }, { "docid": "999023a1ddd53e3f618dffc7dfb58a2f", "score": "0.51452154", "text": "def get_data(self):\n if random.randint(0,8) == 1:\n return random.choice(['Sexy','Beautiful','Handsome','Gorgeous'])\n else:\n return self.name", "title": "" }, { "docid": "375cbbd1fff613d5845d05bfe63043e4", "score": "0.5145059", "text": "def rnd_indexes(ln, nind):\n import random\n rindxs = random.sample(range(0,ln),nind)\n\n return rindxs", "title": "" }, { "docid": "a1513041a7ba1511b30fbf30438e8c49", "score": "0.5144503", "text": "def _selection(self, ranked, elites):\n mating_pool = []\n for i in range(elites):\n mating_pool.append(ranked[i])\n \n remaining = random.sample(ranked[elites:], len(ranked)-elites)\n mating_pool = mating_pool+remaining\n return mating_pool", "title": "" }, { "docid": "9be4a20d721aa9c2e8e1c47bd5044ed1", "score": "0.51441395", "text": "def get_item(data_list, removal_list):\n item = data_list[random.randint(0, len(data_list))]\n removal_list.append(item)\n data_list.remove(item)\n return item", "title": "" }, { "docid": "dd3092013f0241ba2b5b7df3eabb8461", "score": "0.5143673", "text": "def generateText(d, N):\n pass\n pw = random.choice(d['$'])\n print(pw)\n for i in range(0, N): \n nw = random.choice(d[pw])\n print(nw)\n pw = nw\n if nw[-1] in '.!?':\n pw = '$'\n #first choose words that follow $ radomly\n #second word will be randomly chosen among the list of words that followc firsr word\n #if seees a ?!. it should choose a random words from amon those that follow $", "title": "" }, { "docid": "628d351ad8d99d6b81fbfa0379d0fa51", "score": "0.5143418", "text": "def choose_word(word_list):\n return random.choice(word_list)", "title": "" } ]
4fa87d05fb8569c1d1b575e0e1c84c93
run the given function while collecting arguments to a target function
[ { "docid": "bfc485186e6c37622ae9b1a850793e88", "score": "0.0", "text": "def get_contexts_via_monitor(driver, caller_va, decoder_fva: int, index: viv_utils.InstructionFunctionIndex):\n try:\n caller_fva = index[caller_va]\n except KeyError:\n logger.trace(\" unknown function\")\n return []\n\n logger.trace(\"emulating: %s, watching %s\" % (hex(caller_fva), hex(decoder_fva)))\n monitor = CallMonitor(caller_va)\n with installed_monitor(driver, monitor):\n with floss.api_hooks.defaultHooks(driver):\n try:\n driver.run(caller_fva)\n except Exception as e:\n logger.debug(\"error during emulation of function: %s\", str(e))\n contexts = monitor.get_contexts()\n\n logger.trace(\" results:\")\n for _ in contexts:\n logger.trace(\" <context>\")\n\n return contexts", "title": "" } ]
[ { "docid": "f6aaecb9bcb2c146ba087c9f92a0222e", "score": "0.7327121", "text": "def run(self):\n if self.function:\n self.function(*self.args)", "title": "" }, { "docid": "0f44c5f93b96523d3fd03ac59bdde2cc", "score": "0.7171412", "text": "def run(self):\n self.fn(*self.args, **self.kwargs)", "title": "" }, { "docid": "c5b34c96b6268f8a342d8aa3f097510c", "score": "0.7152547", "text": "def run(self):\n result = self.fn(*self.args, **self.kwargs)", "title": "" }, { "docid": "9691b8ffb7c5594ef68fafd1e101fb19", "score": "0.6811954", "text": "def run(args, func=len):\n kwargs = {}\n\n # get arg set\n for key, val in vars(args).items():\n if not callable(val):\n kwargs[key] = val\n\n # run and print result\n sys.stdout.write(str(func(**kwargs)))\n sys.stdout.flush()\n return", "title": "" }, { "docid": "f0e7cc744b692fc1727c50253b512213", "score": "0.6425038", "text": "def execute(self, func, **kwargs):", "title": "" }, { "docid": "6d73105c7b5aa8ba15231f37c688a694", "score": "0.64166284", "text": "def apply(self, func):\n return func(*self.args, **self.kwargs)", "title": "" }, { "docid": "27ef8186a2a1ce53f3b09de897ffd54e", "score": "0.63753736", "text": "def process_arguments(self, node, state, call_source):\n return [\n self.gen_grfn(arg, state, call_source)\n for arg in node.args\n ]", "title": "" }, { "docid": "ddf773de0e6fb026b5133d658aa8044a", "score": "0.63665676", "text": "def main():\n known_args, unknown_args = parse_arguments().parse_known_args()\n known_args.func(known_args=known_args)", "title": "" }, { "docid": "42f49819b1fd16303f3a17b81be1603b", "score": "0.627256", "text": "def apply_function(fn, *args):\n return fn(*args)", "title": "" }, { "docid": "2bcdf737b320fadfdeeaeecc1e1a0ac3", "score": "0.62473434", "text": "def __call__(self, *args):\n assert len(args) == 2 + self.expectedIDs\n id1 = ID_ANY\n id2 = ID_ANY\n target = args[0]\n if self.expectedIDs == 0:\n func = args[1]\n elif self.expectedIDs == 1:\n id1 = args[1]\n func = args[2]\n elif self.expectedIDs == 2:\n id1 = args[1]\n id2 = args[2]\n func = args[3]\n else:\n raise ValueError(\"Unexpected number of IDs\")\n \n self.Bind(target, id1, id2, func)", "title": "" }, { "docid": "e016da7a5f27c8497d5b5530ad78710d", "score": "0.6231041", "text": "def map(self, func, args_list):\r\n for args in args_list:\r\n self.add_task(func, args)", "title": "" }, { "docid": "5f4b313cc38d8e827276a469546d5843", "score": "0.62268883", "text": "def apply(self, func):\r\n return func(**self.kwargs)", "title": "" }, { "docid": "4784edd943c734d10e14b8ab0be196a5", "score": "0.61788833", "text": "def map_custom (func, *args):\n result = []\n if not len(args) == 1:\n for i in range(len(args)):\n result.append(func(args[i]))\n else:\n for arg in args:\n for i in range(len(arg)):\n# print(arg[i])\n result.append(func(arg[i]))\n return result", "title": "" }, { "docid": "bab4e44d404a79d36623d1dce2de8fef", "score": "0.61541927", "text": "def _Processor(function):\n function('foo')", "title": "" }, { "docid": "2a8edcd29ee6c2851e5b8b3fd520a7e3", "score": "0.61330926", "text": "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "title": "" }, { "docid": "2a8edcd29ee6c2851e5b8b3fd520a7e3", "score": "0.61330926", "text": "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "title": "" }, { "docid": "2a8edcd29ee6c2851e5b8b3fd520a7e3", "score": "0.61330926", "text": "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "title": "" }, { "docid": "e9009aa4304ce62dd945f1f94c33bf29", "score": "0.6129845", "text": "def __call__(self, func, *args, **kwargs):\n return self.Run(func, *args, **kwargs)", "title": "" }, { "docid": "4e085db2c8b5e2b376ec7d56cd35ae1e", "score": "0.6096024", "text": "def run(self, fn, args=(), kwargs=None, options=None):\n _validate_run_function(fn)\n return super().run(fn, args, kwargs, options)", "title": "" }, { "docid": "1240a605fda13fdd7f345e2fd346d71f", "score": "0.6065847", "text": "def run(self):\n return self._target(*self._args, **self._kwargs)", "title": "" }, { "docid": "49eaf539d4ddeddf43cf083a44ef2a77", "score": "0.60335827", "text": "def wetRun(self, func, *args, **kwargs):\n return func(*args, **kwargs)", "title": "" }, { "docid": "5a1d636ccdad7325aa31ff0ffa208dd4", "score": "0.60332155", "text": "def _wrapped_f(argstup):\n # if has_mpi:\n # print(\"Rank %s/%s, node: %s\" %(comm.rank, comm.size, MPI.Get_processor_name()))\n func = argstup[0]\n args = argstup[1]\n kwargs = argstup[2]\n return func(*args, **kwargs)", "title": "" }, { "docid": "2df75998f2cb64afb28b283b302e0294", "score": "0.6017655", "text": "def map(self, func, args_list):\n for args in args_list:\n self.add_job(func, args)", "title": "" }, { "docid": "ebbcde6f244121e37c6dfb919d091009", "score": "0.59994066", "text": "def run(self, *args, **kwargs):\r\n return self.f(*args, **kwargs)", "title": "" }, { "docid": "dfacc4d9aec135ecd3909f683b7d3bd3", "score": "0.59984225", "text": "def call(fn, *arg):\n return fn(*arg)", "title": "" }, { "docid": "b95896447034ef29e4edf771a4b51fa2", "score": "0.5996346", "text": "def _call_function(self, f, args: tuple, kwargs: dict):\n return f(*args, **kwargs)", "title": "" }, { "docid": "73178121cf611e2cf1d06898bbb94c09", "score": "0.5952403", "text": "def main():\n function = args.function\n if len(sys.argv) >= 4:\n argument = args.argument\n else:\n argument = \"None\"\n assert function in ['benchmark', 'indicator'], \\\n 'Function is not one of benchmark or indicator : ' + function\n process(function, argument)", "title": "" }, { "docid": "78c381a42c4dd203402042bd5ae5f51e", "score": "0.59185743", "text": "def processRun( self, run, *args, **kwds ):\n pass", "title": "" }, { "docid": "78c381a42c4dd203402042bd5ae5f51e", "score": "0.59185743", "text": "def processRun( self, run, *args, **kwds ):\n pass", "title": "" }, { "docid": "9f3853e8eec4d2fa22ff982a71a9914d", "score": "0.5913255", "text": "def apply(self, _input, profile_run=False, **kw): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "e4922253e8e45ee9d2e6580763c14308", "score": "0.5882692", "text": "def run_cflow_on_function(source_files_list, function_name, args_list=None):\n\n if args_list is None:\n args_list = ['--main', function_name]\n else:\n args_list = ['--main', function_name] + args_list\n\n return run_cflow(source_files_list, args_list)", "title": "" }, { "docid": "6084fe3c8aaa32ae2e38143065723c42", "score": "0.5867746", "text": "def input_fn(*args):\n return args", "title": "" }, { "docid": "03ee785c0b639f523f8ad3cecb7e51b8", "score": "0.58369756", "text": "def run_step(self, perturbations, function_values,\n current_input, current_value):\n raise NotImplementedError(\"Abstract method\")", "title": "" }, { "docid": "c61ea844b02d11bbb6e23364faa06bec", "score": "0.5831604", "text": "def apply_(func_, a):\n for x in a:\n func_(x)", "title": "" }, { "docid": "d57b3098314150418498238b9819716b", "score": "0.58183765", "text": "def as_args(function):\n return lambda x: function(*x)", "title": "" }, { "docid": "9f540be5149fa834a35d022879ecf538", "score": "0.5812871", "text": "def act_process(act_fun, next_id):\n return lambda pdef, d: dirac_of((apply_from_data(act_fun, d), get_process(pdef, next_id)))", "title": "" }, { "docid": "8b41530b9af9ee80fcb5a6f49c59ff31", "score": "0.58033895", "text": "def callFunction(self):\r\n self.callback(*self.args)#Star unpacks\r", "title": "" }, { "docid": "d2515c997bc5b92782ff500b280bb16a", "score": "0.5800658", "text": "def agent_run(*args):\r\n Trainer(*args)()", "title": "" }, { "docid": "dc1b126f5372c1c15189bdf3304c82b7", "score": "0.5793429", "text": "def _exec(self):\n try:\n self._result = self._f(*self._args, **self._kwargs)\n finally:\n self._complete.set()", "title": "" }, { "docid": "56c7cdc73009d7eb1bf6366a296e9d34", "score": "0.5778688", "text": "def __call__(self, x, *newargs, **newkwargs):\n # loop over input arguments (non-keyword)\n for n in range(len(newargs)):\n arg = self.input_arguments[n]\n name, funcno = self.argument_map[arg]\n self.arguments[funcno][name] = newargs[n]\n # Loop over keyword input arguments\n for arg in newkwargs.keys():\n name, funcno = self.argument_map[arg]\n self.arguments[funcno][name] = newkwargs[arg]\n # Apply limits to arguments\n # Any argument values outside the limits will be reverted to defaults\n self.applylimits()\n # Call each function using the stored argument values\n # Sum all the profile functions together\n return np.sum([fn(x, **ags) for fn, ags in zip(self.functions, self.arguments)], axis=0)", "title": "" }, { "docid": "424ca484752f6fcfdb68782320b68df5", "score": "0.57687956", "text": "def test_feed_args():\n def feed_args_target(a: int, b: str) -> str:\n \"\"\"The target function of `feed_args`.\"\"\"\n return \"{0}-{1}\".format(a, b)\n\n options = {\"a\": 1, \"b\": \"example\", \"c\": 2}\n kwargs = feed_args(feed_args_target, options)\n assert feed_args_target(**kwargs) == \"{0}-{1}\".format(options[\"a\"], options[\"b\"])", "title": "" }, { "docid": "952f0232ce1fc71e09bb26468ab13833", "score": "0.5767042", "text": "def run(func, env):\n lower_costful(func)", "title": "" }, { "docid": "493fcdafe530e0be24d16fd6a4647319", "score": "0.5762383", "text": "def run(*tup, **key_val):", "title": "" }, { "docid": "a272059c734992bc69bdc777aebaab33", "score": "0.5759676", "text": "def my_fn(*args):\n return sum(args)", "title": "" }, { "docid": "a272059c734992bc69bdc777aebaab33", "score": "0.5759676", "text": "def my_fn(*args):\n return sum(args)", "title": "" }, { "docid": "0533586e1af6d3dffba5a613402c933e", "score": "0.5755177", "text": "def map(f, ma, *args):", "title": "" }, { "docid": "56e6d94daab81063524850c541004fb3", "score": "0.5753439", "text": "def runner(*args):\n def call(obj, attr):\n \"\"\"Function for calling objects\n\n :param obj: object to call\n :param attr: name of attribute\n :return: None\n \"\"\"\n attr_in_query = attr in args or not args\n if isinstance(obj, FunctionType) and attr_in_query:\n try:\n obj()\n except TypeError as error:\n print(\n '!!!! {} from {} call '\n 'error: {}'.format(obj.__name__, obj.__module__, error)\n )\n except Exception:\n print(\n '!!!! {} from {} call '\n 'error'.format(obj.__name__, obj.__module__)\n )\n\n all_attrs = globals()\n\n list_of_modules = [\n obj for attr, obj in all_attrs.items()\n if type(obj).__name__ == 'module'\n ]\n\n for module in list_of_modules:\n for attr in module.__dir__():\n obj = getattr(module, attr)\n call(obj, attr)\n\n for attr, obj in all_attrs.items():\n if attr != 'runner':\n call(obj, attr)", "title": "" }, { "docid": "5b3de3b4b5dbc63c0acd2e32cc196d3c", "score": "0.57347655", "text": "def apply(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "cdd9860f50713fa30b135f4c0ceca911", "score": "0.57336426", "text": "def _call(self, fn, kwargs):\n step_name = _nameof(fn, str(self.steps.index(fn)))\n with tracer(step_name, step=fn, **kwargs) as tr:\n try:\n if not isinstance(fn, FunctionType):\n fn = fn.__call__\n valid_args, rest = partition_keywords(fn, kwargs)\n results = fn(**valid_args)\n if not isinstance(results, dict) and len(valid_args) == 1:\n (field_name,) = valid_args.keys()\n results = {field_name: results}\n kwargs = {**kwargs, **results}\n tr.set_results(**kwargs)\n except Exception as err:\n tracer().emit_error(error=err)\n raise\n return kwargs", "title": "" }, { "docid": "b022e4b5248659f2cf33cc3d01b7a381", "score": "0.57335556", "text": "def run(self, args=None):\n parsed_args, extra_args = self.parser.parse_known_args(args)\n return parsed_args.func(parsed_args, extra_args=extra_args)", "title": "" }, { "docid": "90dcb909b2679cf5a21582fa79c98394", "score": "0.573309", "text": "def run_pass(self, process_op, **kwargs):", "title": "" }, { "docid": "7711e31fa68e1815c38e8a0f10d42d01", "score": "0.57139397", "text": "def call(fn, arg):\n return fn(arg)", "title": "" }, { "docid": "c41ad6316f05e66e26e0c817206c7578", "score": "0.57109517", "text": "def run(self, *args, **kwargs):", "title": "" }, { "docid": "8df484bfa29f6d3b4bbfe1dad593f09a", "score": "0.5701777", "text": "async def execute(self, fn, *args):\n infs = [self.get_inferrer_for(poss)\n for poss in await fn.get()]\n argrefs = [VirtualReference(a) for a in args]\n return await execute_inferrers(self, infs, None, argrefs)", "title": "" }, { "docid": "7f0071c1922ffd374f4d00918a24f59d", "score": "0.56956017", "text": "def exec_range_func(start, end):\n local_micro_offset = self.micro_offset + 1\n\n def exec_func(*inputs):\n if len(inputs) == 1:\n inputs = inputs[0]\n for idx, layer in enumerate(self.forward_funcs[start:end]):\n self.curr_layer = idx + self._local_start\n if self.seed_layers:\n new_seed = self.base_seed * local_micro_offset + self.curr_layer\n if self.seed_fn:\n self.seed_fn(new_seed)\n else:\n ds_utils.set_random_seed(new_seed)\n inputs = layer(inputs)\n return inputs\n return exec_func", "title": "" }, { "docid": "4883d414cba78d3c1c439243d4eac0ca", "score": "0.5692745", "text": "def etl():\n args = parse_args()\n args.func(args)\n log('Done')", "title": "" }, { "docid": "39897729f7d049f96b3d8e31a71923fa", "score": "0.5684544", "text": "def Invoke(self, targets, arguments):\n raise NotImplementedError('Must be overridden.')", "title": "" }, { "docid": "241fa1cc97087be54499a7c780546716", "score": "0.5675571", "text": "def call_func(args):\n args['func_ref'](args['host'])", "title": "" }, { "docid": "59852a9509de4d623d6faf9aa3891125", "score": "0.5673957", "text": "def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):\n self.run_functional_metric_arguments_test(\n preds=preds,\n target=target,\n metric_functional=retrieval_fall_out,\n message=message,\n exception_type=ValueError,\n kwargs_update=metric_args,\n )", "title": "" }, { "docid": "6f74d33c2e5abd6284f1dbcbc5e1beb0", "score": "0.5656909", "text": "def apply_to(x, f):\n return f(x)", "title": "" }, { "docid": "d6c8b6fbb7c41babb4b7d55f480b4a6c", "score": "0.56498975", "text": "def apply(self, args, env):\n # BEGIN PROBLEM 2\n \"*** YOUR CODE HERE ***\"\n args_list = []\n try:\n if args is nil:\n return self.fn()\n elif args.second is nil:\n if self.use_env:\n return self.fn(args.first, env)\n return self.fn(args.first)\n elif args.second.second is nil:\n if self.use_env:\n return self.fn(args.first, args.second.first, env)\n return self.fn(args.first, args.second.first)\n else:\n while args is not nil:\n if isinstance(args.first, Pair):\n args_list.extend(scheme_eval(args.first, env))\n else:\n args_list.append(args.first)\n args = args.second\n return self.fn(*args_list)\n except:\n raise SchemeError(\"Cannot call {0} as it's not a procedure\".format(args))\n\n # END PROBLEM 2", "title": "" }, { "docid": "3bd53a6c3697c48b29685d652ff46bde", "score": "0.5642604", "text": "def args_unpack(func):\n def wrapper(self, quest, current):\n return func(self, **quest.args)\n return wrapper", "title": "" }, { "docid": "1b49406e369e66e31cbe38e173ab4521", "score": "0.5636255", "text": "def __call__(self, *args, **kwargs):\n def x(func):\n self.regster(func, args)\n return func\n return x", "title": "" }, { "docid": "72da7c5a232c6bccfe20ed1a29a1d24a", "score": "0.56357664", "text": "def simulate(assignment, user, location, func_name, args):\n print 'calling %s(%s, %s, %s, *%s)' % (\n func_name, repr(assignment), repr(user), repr(location), repr(args))", "title": "" }, { "docid": "711d155e6b5dfc64ee5b9a56a89dcba5", "score": "0.56275517", "text": "def process_function(self):\n\t\tpass", "title": "" }, { "docid": "4da5501f9e8d11d994cb2b90b8ab0721", "score": "0.56247807", "text": "def do_args(self, args):\n\t return do_args(args)", "title": "" }, { "docid": "800975508ca138248e910f73591b533e", "score": "0.5614281", "text": "def expand_args(func):\n def wrapped(args):\n return func(*args)\n return wrapped", "title": "" }, { "docid": "3e1951037af3e993a4c0146f74da2615", "score": "0.56136745", "text": "def run(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "ea0675d8be5443a38bbe5b89712ab0c5", "score": "0.5613615", "text": "def run_my_funcs(x, y):\n print(x, y)\n my_squares(x)\n my_join(x, y)\n return 0", "title": "" }, { "docid": "8a765c11a3be68b623b4c4928f416d08", "score": "0.561273", "text": "def processRun( self, run, *args, **kwds ):\n pass", "title": "" }, { "docid": "9fecec9dd0aca184d005d7e94723ae1b", "score": "0.56079394", "text": "def _postprocess_arguments(self, args, **kwargs):\n for p in self.output:\n p._arg_apply(args[p.name], kwargs.get(p.name))", "title": "" }, { "docid": "9c717d12c0f792c61a3e46a52c299d00", "score": "0.5604267", "text": "def invoke(objects, name, *args, **kwargs):\n ...", "title": "" }, { "docid": "302d84946a063fd383fb21f86afff135", "score": "0.5598869", "text": "def run(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:\n return runner.invoke(labels, [*args])", "title": "" }, { "docid": "44f023c4c8c6176c6ca57b1b4281e1a3", "score": "0.5593249", "text": "def run_algo(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "b1cf26ac147202e7a24dce4d9ee5b848", "score": "0.5589001", "text": "def parse_functions(l_arguments):\n logging.debug('executing parse_functions function')\n logging.debug('arguments %s', l_arguments)\n\n functions = []\n tasks = []\n\n # an implicit execution\n logging.debug('checking first argument')\n zero = l_arguments[0].split(envs.common.split_function)[0]\n if zero not in envs.common.functions.keys():\n logging.warning('can not find function, executing built-in run')\n l_arguments.insert(0, 'run')\n logging.debug('new arguments %s', l_arguments)\n\n # step 1: split all to lists of functions with args\n logging.debug('spliting all arguments to lists of functions with args')\n for f in l_arguments:\n if f in envs.common.functions.keys():\n functions.append([f])\n elif envs.common.split_function in f:\n function, args = f.split(envs.common.split_function)\n if function in envs.common.functions.keys():\n functions.append([function])\n functions[-1].extend(args.split(envs.common.split_args))\n else:\n functions[-1].append(f)\n else:\n # NOTE: functions[-1].extend(args.split(envs.common.split_args))\n # don't uses in this case\n functions[-1].append(f)\n logging.debug('functions %s', functions)\n\n # step 2: parse args and kwargs for each function\n logging.debug('parsing args and kwargs for each function')\n for f in functions:\n fnct = f[0]\n args = f[1:]\n kwargs = {}\n for a in args[::-1]:\n e = a.find('=')\n if e != (-1 or 0) and a[e-1] not in envs.common.arithmetic_symbols:\n k, v = a.split('=')\n k = k.strip()\n v = v.strip()\n kwargs[k] = v\n args.pop()\n else:\n break\n tasks.append((fnct, args, kwargs))\n logging.debug('tasks %s', tasks)\n\n return tasks", "title": "" }, { "docid": "9aab8f53a755629f2b999515ee5533e0", "score": "0.5587532", "text": "def my_fn(x, y, *args):\n return x + y + sum(args)", "title": "" }, { "docid": "ef40eb9f72f6235c9e3975d1640f10c0", "score": "0.557973", "text": "def pass_func():\n pass", "title": "" }, { "docid": "5cccc9e3364cce109bd6e3c2f2d8f4e3", "score": "0.5572115", "text": "def fx(self, func, *args, **kwargs):\r\n \r\n return func(self, *args, **kwargs)", "title": "" }, { "docid": "638429560187d164891edabe46eeaee7", "score": "0.55712926", "text": "def make_run_mock(self, *_args, **_kwargs):\n # pylint: disable=unused-argument\n args.append(_args)\n kwargs.append(_kwargs)", "title": "" }, { "docid": "e8aef49affbe36a82b0fbe2bc1cb33e5", "score": "0.5570207", "text": "def _shared_functions(self, *args):\n start = args[0][0]\n repeat = True\n output = []\n idx = 0\n for i in args[0]:\n if i == start:\n if repeat is False:\n idx += 1\n repeat = True\n else:\n pass\n else:\n repeat = False\n output.append(self.f[self._names[idx]](i, *args[1:]))\n return output", "title": "" }, { "docid": "5ffe03798a405ba1a7cf7f826a49fe16", "score": "0.5559582", "text": "def Apply(*features, **kwargs) -> flow.Result: # pylint: disable=invalid-name", "title": "" }, { "docid": "32ab898ad713395b8804216817dddd12", "score": "0.55509406", "text": "def _run_kwargs(self):", "title": "" }, { "docid": "5bda9a50622be0cffa0bdc8af0e5e5ea", "score": "0.55345637", "text": "def _accumulator_check_args(self, variable=None, context=None, params=None, target_set=None):\n\n # PARAMS ------------------------------------------------------------\n\n # # MODIFIED 11/27/16 OLD:\n # # If parameter_validation is set, the function was called with params,\n # # and they have changed, then validate requested values and assign to target_set\n # if self.prefs.paramValidationPref and params and not params is None and not params is target_set:\n # # self._validate_params(params, target_set, context=FUNCTION_CHECK_ARGS)\n # self._validate_params(request_set=params, target_set=target_set, context=context)\n\n # If params have been passed, treat as runtime params\n # (relabel params as runtime_params for clarity)\n if context.execution_id in self._runtime_params_reset:\n for key in self._runtime_params_reset[context.execution_id]:\n self._set_parameter_value(key, self._runtime_params_reset[context.execution_id][key], context)\n self._runtime_params_reset[context.execution_id] = {}\n\n runtime_params = params\n if runtime_params:\n for param_name in runtime_params:\n if param_name in self.parameters:\n if param_name in {FUNCTION, INPUT_PORTS, OUTPUT_PORTS}:\n continue\n if context.execution_id not in self._runtime_params_reset:\n self._runtime_params_reset[context.execution_id] = {}\n self._runtime_params_reset[context.execution_id][param_name] = getattr(self.parameters, param_name)._get(context)\n self._set_parameter_value(param_name, runtime_params[param_name], context)", "title": "" }, { "docid": "46a677adcd512aae5c684f9c5b06de9a", "score": "0.55339324", "text": "def multiargs(function):\n def wrapper(*args, **kwargs):\n if len(args) == 0:\n return function()\n arg = args[0]\n args = args[1:]\n if type(arg) in (tuple, list, set):\n return map(lambda _: function(_, *args, **kwargs), arg)\n else:\n return function(arg, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "81e23f8a45ccc8997d05f4047bed63ea", "score": "0.55155945", "text": "def execute():\n # 1. Receive inputs as you want\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-x\", type=float, required=True)\n inputs = parser.parse_args()\n\n # 2. Perform computations\n y, dy = function(inputs.x)\n\n # 3. Gather and report results\n results = list()\n results.append(dict(name=\"example_objective\", type=\"objective\", value=y))\n results.append(dict(name=\"example_gradient\", type=\"gradient\", value=[dy]))\n\n report_results(results)", "title": "" }, { "docid": "dcf519a0e55b5346fe8a5aa323c2b7c0", "score": "0.54970175", "text": "def bind(ma, f, *args, **kwargs):", "title": "" }, { "docid": "f1ad3301c6d58e4b8a3b308d202e33ff", "score": "0.54826754", "text": "def expandCall(kargs):\n func=kargs['func']\n del kargs['func']\n out=func(**kargs)\n return out", "title": "" }, { "docid": "da16c2466e58c99f71f0d627c2515c82", "score": "0.5479298", "text": "def custom_map(function, *collections): \n for list_of_args in zip(*collections):\n function(*list_of_args)", "title": "" }, { "docid": "052362977b5a02e80618d3839701ee8e", "score": "0.54741836", "text": "def _perform_func_intent(eff):\n assert type(eff.intent) is Func\n return eff.intent.func()", "title": "" }, { "docid": "b6ee4aaad81de742ae11038f7efbdd01", "score": "0.5473314", "text": "def _apply_func(self, *args, out_dict: dict = None):\n if out_dict is None:\n out_dict = deepcopy(self.vfuncs)\n\n apply_func_cached = self._memory.cache(_apply_func_cached)\n out_dict = apply_func_cached(out_dict, self._async, self._lazy, *args)\n\n prev = tuple()\n for arg in args:\n if PREV_KEY in arg:\n prev += (arg[PREV_KEY],)\n out_dict[PREV_KEY] = (self,) + prev\n\n if self._mlflow is not None:\n run_dict = {}\n # log subkeys as params and value as metric\n for k, v in out_dict.items():\n if k == PREV_KEY:\n continue\n origins = np.array([subk.origin for subk in k])\n # ignore init origins and the last origin (this Vset)\n param_idx = [\n i for i in range(len(k[:-1])) if origins[i] != 'init'\n ]\n # get or create mlflow run\n run_dict_key = tuple(subk.value for subk in k[:-1])\n if run_dict_key in run_dict:\n run_id = run_dict[run_dict_key]\n else:\n run = self._mlflow.create_run(self._exp_id)\n run_id = run.info.run_id\n run_dict[run_dict_key] = run_id\n # log params\n for idx in param_idx:\n subkey = k[idx]\n param_name = subkey.origin\n # check if the origin occurs multiple times\n if np.sum(origins == param_name) > 1:\n occurence = np.sum(origins[:idx] == param_name)\n param_name = param_name + str(occurence)\n self._mlflow.log_param(\n run_id, param_name, subkey.value\n )\n self._mlflow.log_metric(run_id, k[-1].value, v)\n return out_dict", "title": "" }, { "docid": "9d84852a6f1b1af1c17fc285dd353176", "score": "0.547247", "text": "def main(**kwargs):\n ...", "title": "" }, { "docid": "c64505a07f958af68d538989f6265248", "score": "0.54644835", "text": "def linvoke(objects, name, *args, **kwargs):\n ...", "title": "" }, { "docid": "6e59f1ed961b076d511b86298e7471f3", "score": "0.5458061", "text": "def step_func(step, foo=None, bar=None):\n pass", "title": "" }, { "docid": "21651893072e126268995dfb35713553", "score": "0.5456965", "text": "def real(self):\n return self.fn(*self.args, **self.kwargs)", "title": "" }, { "docid": "dc3662654b45d2dc125f5d4451b69e8e", "score": "0.54557604", "text": "def fire(self, earg=None):\n\n for func in self._getfunctionlist():\n func(self.obj, earg)", "title": "" }, { "docid": "f73abc751fc281ecf7def3514ffeeb1f", "score": "0.5448461", "text": "def _run(self, x, y):\n raise NotImplementedError", "title": "" }, { "docid": "d0da708585545d2fec705742320551c9", "score": "0.54451257", "text": "def execute(self, parsed_arguments):", "title": "" }, { "docid": "4275349e29229210c2bf08457e8add35", "score": "0.5443935", "text": "def showargs(function):\n\n def inner(*args, **kwargs):\n return function((args, kwargs), *args, **kwargs)\n\n return inner", "title": "" }, { "docid": "b32c15bbb8027af3c35ea908e5d0f0e7", "score": "0.544249", "text": "def wrapfunc(self, *args, **kwargs):\n datasets = self.filter_datasets()\n logger.info('Running {0} with args={1} and kwargs={2} for ids={3}'.format(\n func.__name__, args, kwargs, [x['id'] for x in datasets]))\n return [func(x['id'], *args, **kwargs) for x in datasets]", "title": "" }, { "docid": "de007b2068edd77f0d946560c0312edd", "score": "0.54385746", "text": "def callfunc(func, args: tuple) -> Any:\n return func(args[0], args[1], args[2])", "title": "" }, { "docid": "c6217a2c120a94c738ef133779dbd8e3", "score": "0.5432682", "text": "def fetch_invoke(f, state):\n\n #f_ = f() # get rid of partial when state is known\n f_ = f\n return f_(**ExploreParams.get_relevant_params(f_, state))", "title": "" } ]
aad0522ace98fd658bbd18bf9978bf1b
Set the callback to call when data is received from the Arduino
[ { "docid": "11166a6a9720889551bf439677821d1c", "score": "0.7634608", "text": "def set_data_received_callback(self, callback):\n self._data_received_callback = callback", "title": "" } ]
[ { "docid": "89ce51f1ed4e906a68428dcb0030d809", "score": "0.690485", "text": "def _listener_callback(self, data):\n self._dynamixel_current_state = data", "title": "" }, { "docid": "434a456c2ff3757c8479112bdc016566", "score": "0.67482555", "text": "def callback():", "title": "" }, { "docid": "434a456c2ff3757c8479112bdc016566", "score": "0.67482555", "text": "def callback():", "title": "" }, { "docid": "434a456c2ff3757c8479112bdc016566", "score": "0.67482555", "text": "def callback():", "title": "" }, { "docid": "929c305a26548da75084362182e463db", "score": "0.66768205", "text": "def set_data_callback(self, callback_function):\n\n self._data_callback = callback_function", "title": "" }, { "docid": "2b4e0be925e7fb1963e89f2f242ea69d", "score": "0.66419166", "text": "def set_mavlink_callback(self, callback):\n self.mavrx_callback = callback", "title": "" }, { "docid": "09291458afd63d63b17ae4422b709e90", "score": "0.6624433", "text": "def __init__(self, serial, callback):\n self._update_data = callback\n super().__init__(serial, callback=self._process_value)", "title": "" }, { "docid": "34e1b5b335ac3eb32e1f1914f1a3af38", "score": "0.6592995", "text": "def laser_callback(self, data):\n self.scan_data = data\n self.recorder.save_scan(data)", "title": "" }, { "docid": "d08c2a67f8388b47e831e3e59f4c5715", "score": "0.6559456", "text": "def callback() -> None:", "title": "" }, { "docid": "3159889aef1463ddb792034ae978c18a", "score": "0.65070605", "text": "def on_msg(self, callback):\n self._msg_callback = callback", "title": "" }, { "docid": "aa5d664b5933ef7208fd4ba295a4040a", "score": "0.6485662", "text": "def on_read(self, data):\n pass", "title": "" }, { "docid": "f7fc075ebc277784ae8f0f2fa8c78d68", "score": "0.6463448", "text": "def dataReceived(self, data):", "title": "" }, { "docid": "fbcbd39dc7c1a6784fd966c63f028332", "score": "0.64287937", "text": "def subscribe(self, callback):\n self._data_changed.connect(callback)", "title": "" }, { "docid": "d60234d6f97be2c4a0630a561a5c0ae2", "score": "0.64154196", "text": "def data_received(self, data):", "title": "" }, { "docid": "c83e1bc6612336592ad4eb3d72a16888", "score": "0.63558763", "text": "def _rx(self,data):\n\t\tif (data and self._rx_callback):\n\t\t\tnum = int.from_bytes(data, byteorder=BYTE_ORDER)\n\t\t\tself._rx_callback(num)", "title": "" }, { "docid": "4a74b1fb427804a5fffd688472513713", "score": "0.63441956", "text": "def set_callback(self, callback=None):\r\n self.callback = callback", "title": "" }, { "docid": "fb2058155d43289032a616d0a6cabe73", "score": "0.62692523", "text": "def read_callback():\n plugin.read_callback()", "title": "" }, { "docid": "fb2058155d43289032a616d0a6cabe73", "score": "0.62692523", "text": "def read_callback():\n plugin.read_callback()", "title": "" }, { "docid": "ab68bfc5d91f47d780e4e6e9fb9112e8", "score": "0.6231066", "text": "def callback(self, callback, *args, **kwds):\n ...", "title": "" }, { "docid": "0e181d11c5b10ec0c90a60bcf0a51d04", "score": "0.61959004", "text": "def callback(self, data: str, buffer: str, args: str) -> int:\n raise NotImplementedError(\"callback method not implemented\")", "title": "" }, { "docid": "03490cd7d7c4128daf4da568f6d10c08", "score": "0.6171783", "text": "def metronomeCB(self, msg): \n if len(msg) == 3:\n metronome = msg[2]\n LiveUtils.getSong().metronome = metronome", "title": "" }, { "docid": "90b88ca7b592152e510d79c3cff68af5", "score": "0.61332464", "text": "def accel_callback(self,data):\n self.duty_cycle = data.data", "title": "" }, { "docid": "2c8c2b970d908de5e79ee1f6f62c2a54", "score": "0.6118887", "text": "def subscribe(self, callback):\n pass", "title": "" }, { "docid": "33465b4576296f6a141796b0efa52218", "score": "0.6118655", "text": "def set_callback(self, f):\n self.callback = f", "title": "" }, { "docid": "000f8a7b2a7ae4b2695f040d037d9e39", "score": "0.61034596", "text": "def callback(data):\n global status\n status = data.data", "title": "" }, { "docid": "70dd825b71420e6eb73a597a2135853f", "score": "0.60980815", "text": "def set_callback(self, callback: Callback) -> None:\n self.callback = callback", "title": "" }, { "docid": "abe2dff9f84ee16207bcbf9d190b15be", "score": "0.6090795", "text": "def rx_handler (self, callback):\n # Not sure if this is a good idea, but it might be...\n if self.rx_handler is not None or callback is not None:\n log.debug(\"Resetting rx_handler on %s?\", self)\n if callback is None: callback = _dummy_handler\n self._custom_rx_handler = callback", "title": "" }, { "docid": "030c84c9e9888ae56d56900723ead7ee", "score": "0.60892534", "text": "def callback(self):\n\t\tpass", "title": "" }, { "docid": "3498309078a7fb71172af8f3788797cf", "score": "0.60682017", "text": "def data_received(self, data):\n print(\"data is \", data)\n pass", "title": "" }, { "docid": "a71bdaf577e55b3da80a58f3eaa6fd09", "score": "0.60605234", "text": "def callback(name, *args):\n\n ...", "title": "" }, { "docid": "c69a63827cd2577c180e8948be793cbf", "score": "0.60309947", "text": "def _callback_sensor_data(self, carla_sensor_data):\n if not rospy.is_shutdown():\n if self.synchronous_mode:\n if self.sensor_tick_time:\n self.next_data_expected_time = carla_sensor_data.timestamp + \\\n float(self.sensor_tick_time)\n self.queue.put(carla_sensor_data)\n else:\n self.publish_transform(self.get_ros_transform(\n trans.carla_transform_to_ros_transform(carla_sensor_data.transform)))\n self.sensor_data_updated(carla_sensor_data)", "title": "" }, { "docid": "ab7c11dbbfdc1d33179599febf5ce497", "score": "0.6029671", "text": "def data_received(self, data):\n self.protocol.data_received(data)", "title": "" }, { "docid": "ce0395eefbf7545054a1b67f25bb80da", "score": "0.6003549", "text": "def set_callback(self, func):\n self.callback = func", "title": "" }, { "docid": "5c35280f306055e6674a2e29b3f3905c", "score": "0.5990975", "text": "def scan_callback(scan_msg):\n\n # Save a global reference to the most recent sensor state so that\n # it can be accessed in the main control loop.\n # (The global keyword prevents the creation of a local variable here.)\n global SCAN\n SCAN = scan_msg", "title": "" }, { "docid": "f2e1e348588379677d01159b507ec8e5", "score": "0.5988066", "text": "def _call_callback(self, callback):\r\n callback()", "title": "" }, { "docid": "6df2bb2fd65b211090e525a5238630ae", "score": "0.5955982", "text": "def mp_callback(self, event):\n value = event.events[0].value.decode('utf-8')\n self.mp_data = json.loads(value)\n self.new_data = True", "title": "" }, { "docid": "039efe410d221fa8a5dcf45f060bb559", "score": "0.5918004", "text": "def _callback(self, data=None):\n if data is None:\n data = self\n for callback in self._update_callbacks:\n sig = signature(callback)\n if len(sig.parameters) == 0:\n callback()\n if len(sig.parameters) == 1:\n callback(data)\n if len(sig.parameters) == 2:\n callback(self, data)\n # If there were no callbacks to be made, make a call upwards\n # We may have new devices that need to be handled\n if len(self._update_callbacks) == 0 and self._pyelk is not None:\n #_LOGGER.debug('_callback - promoting callback')\n self._pyelk.promoted_callback(self, data)", "title": "" }, { "docid": "100ba9680b651ee46989379e8054f3e1", "score": "0.59166634", "text": "def on_receive(self, func):\n self._on_receive.append(self.callback_handeler(func))\n return func", "title": "" }, { "docid": "10906d4e7b7c5d75a118cabeb4321898", "score": "0.5908059", "text": "def callback_weather(data):\n global weather_status\n weather_status = data", "title": "" }, { "docid": "d7031b9bf68ef1a8f027d08f3e894bea", "score": "0.59000367", "text": "def _callbackfun(self, gpio, level, tick):\n if self._edge == 1:\n self._tick = tick\n elif self._edge == 2:\n diff = pigpio.tickDiff(self._tick, tick)\n self._micros = diff\n self._reading = True\n self._edge += 1", "title": "" }, { "docid": "f1cc2416c1bdb4bbd2457d9d91aa841a", "score": "0.58971685", "text": "def __init__(self, callback):\n self.__callback = callback", "title": "" }, { "docid": "a2c5cdd4c4e5973e6ef0e207f55762de", "score": "0.58827394", "text": "def initDataReceived(self, data):\n #print \">>:\", data\n dcall_discard(self, 'init_dcall')\n\n if data[0] == '$':\n from dtella.client.dc import AbortConnection\n AbortConnection(self, data, \"ADC\", \"adc://localhost:%s\" %\n self.main.state.clientport)\n else:\n # Passed all tests, let it through\n self.dataReceived = self._dataReceived\n self.dataReceived(data)", "title": "" }, { "docid": "a5109b9b7505445ff744da2343ffb3f7", "score": "0.5881564", "text": "def set_callback(self, cb: Callable):\n self.cb = cb", "title": "" }, { "docid": "134cf558584d5de2b9966fb7b6b46653", "score": "0.5862347", "text": "def request_callback(self, callback):\n self._callback = self.wrap_callback(callback)", "title": "" }, { "docid": "9a9d611b51e27f08fed85948916890d0", "score": "0.58617336", "text": "def OnData(self, data, bytes):\n self.packet = LenkoPacket(254)\n\n # Gather the packet! token, command, size, payload, chksum\n# print 'received data:', data\n# self.data = self.unpack(data, bytes)\n self.data = data\n# print 'received data 2nd:', self.data\n self.checksum = 0\n\n for self.d in self.data:\n\n if (self.state == self.STARTTOKEN):\n if (self.d == ord('#')):\n\n self.state = self.COMMAND\n else:\n print \"no start token yet...\"\n\n elif (self.state == self.COMMAND):\n # Fake here, turn Request to a Response by an increment... compensate above ..\n self.d+= 1\n self.packet.setCommand(self.d)\n self.state = self.LENGTH\n\n elif (self.state == self.LENGTH):\n self.m_length = self.d\n self.state = self.PAYLOAD\n\n elif (self.state == self.PAYLOAD):\n if self.m_length:\n self.packet.push_back(self.d)\n self.m_length = self.m_length -1\n\n if not self.m_length:\n self.state = self.CHKSUM\n\n elif (self.state == self.CHKSUM):\n # For now, compensate for incr from REQ to RESP, see above ...\n if (self.checksum & 255) == (self.d + 1):\n self.callback(self.packet)\n else:\n print 'wrong checksum, expected:', (self.checksum & 255), 'got:', self.d\n else:\n print 'wrong state:', self.state\n self.state = self.STARTTOKEN\n\n self.checksum+= self.d", "title": "" }, { "docid": "4eeb8b755aae311a422eb97518663ec4", "score": "0.5857131", "text": "def on_connected(self, writeSerialPort, readSerialPort):\n self.writeSerialPort = writeSerialPort\n self.readSerialPort = readSerialPort\n self.disableAutogain()\n self.enableTimestamps()\n self.readoutRegisters()", "title": "" }, { "docid": "f15194e209dbdab104845f36ca76161b", "score": "0.5855954", "text": "def _call_callback(self, callback):\r\n if self.io_loop._running:\r\n self.io_loop.add_callback(callback)\r\n else:\r\n callback()", "title": "" }, { "docid": "909b1a6f702d24d655dbca6d4552ebef", "score": "0.58501494", "text": "def subscribe(self, callback):\n raise NotImplementedError", "title": "" }, { "docid": "dfcff2306676a1a3804df42462ecdb4a", "score": "0.5830854", "text": "def on_load(self, callback):\n self._callback = callback", "title": "" }, { "docid": "3715a6c51927ce9b628f23d98f7c5b83", "score": "0.58208495", "text": "def set_message_callback(self, callback):\n self.__cb_message = callback", "title": "" }, { "docid": "81f44b767c86dec9343db309b90e34bd", "score": "0.5815049", "text": "def _callback(self, chunk):\n command, data = chunk.split(' ', 1)\n\n if command == 'set_status':\n self.set_status(data)\n\n elif command == 'set_header':\n args = loads(args)\n self.set_header(args['name'], args['value'])\n\n elif command == 'write':\n self.write(data)", "title": "" }, { "docid": "5cfa5656a4fdf917e1f5e0d11f5f7c4c", "score": "0.58137745", "text": "def UpCallback(self, callback): \n self.__upCallback = callback", "title": "" }, { "docid": "10d312b3e6080d8a0e9a9548f59cd62b", "score": "0.58070624", "text": "def _cont_read_data(self, callback, bufname, verbose=False):\n regname = '%s_addr' % bufname\n chanreg = '%s_chan' % bufname\n a = self.r.read_uint(regname) & 0x1000\n addr = self.r.read_uint(regname)\n b = addr & 0x1000\n while a == b:\n addr = self.r.read_uint(regname)\n b = addr & 0x1000\n tic = time.time()\n idle = 0\n n = 0\n try:\n while True:\n try:\n a = b\n if a:\n bram = '%s_a' % bufname\n else:\n bram = '%s_b' % bufname\n data = self.r.read(bram, 4 * 2 ** 12)\n addrs = addr\n chans = self.r.read_int(chanreg)\n res = callback(data, addrs, chans)\n except Exception, e:\n logger.error(\"read only partway because of error:\", exc_info=True)\n res = False\n n += 1\n if res:\n break\n addr = self.r.read_uint(regname)\n b = addr & 0x1000\n while a == b:\n addr = self.r.read_uint(regname)\n b = addr & 0x1000\n idle += 1\n if verbose:\n print (\"\\r got %d\" % n),\n sys.stdout.flush()\n except KeyboardInterrupt:\n pass\n tot = time.time() - tic\n logger.debug(\"read %d in %.1f seconds, %.2f samples per second, idle %.2f per read\" % (\n n, tot, (n * 2 ** 12 / tot), idle / (n * 1.0)))", "title": "" }, { "docid": "670c84534946405a7482b14f216bec3c", "score": "0.579221", "text": "def _on_temperature(self, data):\n val = numpy.int16(numpy.uint16(int.from_bytes(data[0:2], byteorder='little')))\n\n if self.debug:\n print(\"Temperature: {}\".format(val))\n\n self.on_temperature(val)\n for callback in self._temperature_callbacks.values():\n callback(val)", "title": "" }, { "docid": "fc9bda526c651eaa797cb3c3e5d1c14f", "score": "0.5781895", "text": "def callback_wiredbot_teleop(msg):\n global ch3_data\n ch3_data = msg.data[2]", "title": "" }, { "docid": "658c6a33e1012cb4ae85936d552c2cf6", "score": "0.5769296", "text": "def datachange_notification(self, node, val, data):\n if not val:\n return # the \"mic_active\" is False -> do nothing\n print(\"The mic is active!\")\n # self.client.connect() # <- wtf point\n self.shouldProcessInput = True", "title": "" }, { "docid": "2c50e8fef7e831168ad0ba88bbbe149a", "score": "0.57658964", "text": "def listener():\n rospy.init_node(\"houseapi\", anonymous=True, disable_signals=True)\n rospy.Subscriber(\"weather\", WeatherdataArray, callback_weather)", "title": "" }, { "docid": "7ae7d6d4461d068fa1b73e026d3f5330", "score": "0.5759869", "text": "def on_update(self, callback: Callable[[NetworkDevice], None]):\n self.__on_update.append(callback)", "title": "" }, { "docid": "53278f00585947c9a33bf468fd30136b", "score": "0.57441807", "text": "def callback_message(self, message):\n pass", "title": "" }, { "docid": "b1abcf3c07165e0386c517fdbf17918c", "score": "0.5741307", "text": "def callback(self, msg_id, msg):\n pass", "title": "" }, { "docid": "8fa285a440c700570e694e96e4107d1b", "score": "0.57224554", "text": "def callback(self, callback):\n if callback is not None and len(callback) > 128:\n raise ValueError(\"Invalid value for `callback`, length must be less than or equal to `128`\") # noqa: E501\n\n self._callback = callback", "title": "" }, { "docid": "8ad6a98fc57778f408433bf2b22224bc", "score": "0.57205904", "text": "def recognising_callback(self, msg):\n self.recognising = msg.data", "title": "" }, { "docid": "1a1e8cf97ee80244914d11132e6aafa3", "score": "0.57087976", "text": "def datagramReceived(self, data):\n try:\n obj = json.loads(data)\n except ValueError, e:\n log.err(e, 'Invalid JSON in stream: %r' % data)\n return\n\n if u'text' in obj:\n obj = platform.Status.fromDict(obj)\n else:\n log.msg('Unsupported object %r' % obj)\n return\n\n self.callback(obj)", "title": "" }, { "docid": "1acee95713bd0c018450d65039d81383", "score": "0.5698619", "text": "def callback(gpio, level, tick):\n # access out of scope variables\n nonlocal bit, hH, hL, tH, tL, CS, high_tick, tov\n nonlocal no_response\n # Measure tick diff to get edge\n diff = pigpio.tickDiff(high_tick, tick)\n val = 0\n\n # Falling edge, high to low\n if level == 0:\n # Edge length determines if bit is 1 or 0.\n # 50 <= diff < 200, 1 bit\n # diff < 50, 0 bit\n # diff >= 200, bad bit\n if diff >= 50:\n val = 1\n if diff >= 200: # Bad bit?\n CS = 256 # Force bad checksum.\n else:\n val = 0\n\n # Format bits into 5 bytes corresponding to \n # hH : humidity high byte\n # hL : humidity low byte\n # tH : temp high byte\n # tL : temp low byte\n # CS : checksum byte\n\n if bit >= 40: # Message complete.\n bit = 40\n elif bit >= 32: # In checksum byte.\n CS = (CS<<1) + val\n\n if bit == 39:\n # 40th bit received.\n self.pi.set_watchdog(gpio, 0)\n no_response = 0\n total = hH + hL + tH + tL\n\n # Value of CS will be affected by a bad bit\n # CS = 256\n if (total & 255) == CS: # Is checksum ok?\n self._rh = ((hH<<8) + hL) * 0.1\n if tH & 128: # Negative temperature.\n mult = -0.1\n tH = tH & 127\n else:\n mult = 0.1\n self._temp = ((tH<<8) + tL) * mult\n tov = time.time()\n else:\n # BAD CHECKSUM\n print('ERROR: Bad checksum')\n\n elif bit >=24: # in temp low byte\n # left shift by one and add bit\n # for example\n # a = 2 # 0010\n # bit = 1\n # a = (a<<1) + bit # 0010 -> 0100 then add 1 -> 0101 = 5\n tL = (tL<<1) + val \n elif bit >=16: # in temp high byte\n tH = (tH<<1) + val\n elif bit >= 8: # in humidity low byte\n hL = (hL<<1) + val\n elif bit >= 0: # in humidity high byte\n hH = (hH<<1) + val\n else: # header bits\n pass\n # increment bit by 1\n bit += 1\n # Rising edge, low to high\n elif level == 1:\n high_tick = tick\n # If 0.25 s, then??\n if diff > 250000:\n bit = -2\n hH = 0\n hL = 0\n tH = 0\n tL = 0\n CS = 0\n # No change, watchdog timeout\n else:\n self.pi.set_watchdog(gpio, 0)\n # Too few data bits received.\n if bit < 8:\n no_response += 1\n print('ERROR: insufficient bits received ({} bits)'.format(bit))\n if no_response > MAX_NO_RESPONSE:\n no_response = 0\n # Cycle power if sensor is powered by GPIO pin\n if self.power_gpio is not None:\n print('Cycling power at GPIO {}...'.format(self.power_gpio))\n self.online = False\n self.pi.write(self.power_gpio, 0)\n time.sleep(2)\n self.pi.write(self.power_gpio, 1)\n time.sleep(2)\n self.online = True\n print('Done.')\n # Short message receieved.\n elif bit < 39:\n no_response = 0\n print('ERROR: message received is shorter than expected ({} bits)'.format(bit))\n # Full message received.\n else: \n no_response = 0", "title": "" }, { "docid": "525c93d8cdc6d4b7019d670446ccf15e", "score": "0.5698311", "text": "def laser_callback(self, msg):\n \n self.laser_ranges = msg.ranges\n self.laser_angle_increment = msg.angle_increment", "title": "" }, { "docid": "b5dcb06bc46abc03a2c261b87f5a98fb", "score": "0.56933475", "text": "def dataReceived(self, data):\n self.resetTimeout()\n LengthDelimitedStream.dataReceived(self, data)", "title": "" }, { "docid": "5b937ad6b10e3bb8c7e91c5e7cab8172", "score": "0.5688993", "text": "def subscription_callback(self, message):\n data = json.loads(message.data)\n\n self.LE_project.setText(message.attributes['projectId'])\n self.LE_registry.setText(message.attributes['deviceRegistryId'])\n self.LE_region.setText(message.attributes['deviceRegistryLocation'])\n\n sample_values = [message.attributes['deviceId']] + \\\n ['{}: {}'.format(k, v) for k, v in data.items() if k != 'timestamp']\n sample_time = datetime.datetime.fromtimestamp(data['timestamp'])\n serialno, led_status = sample_values\n\n self.add_data(sample_time.strftime(\"%H:%M:%S\"), serialno, led_status)\n\n message.ack()", "title": "" }, { "docid": "4fdcc0c3a573b6967d0b30b0d1065b0a", "score": "0.5671575", "text": "def receive(self, data):\n pass", "title": "" }, { "docid": "4fdcc0c3a573b6967d0b30b0d1065b0a", "score": "0.5671575", "text": "def receive(self, data):\n pass", "title": "" }, { "docid": "4fdcc0c3a573b6967d0b30b0d1065b0a", "score": "0.5671575", "text": "def receive(self, data):\n pass", "title": "" }, { "docid": "862e2f6d5ae151899ae7937ab08b552c", "score": "0.5660634", "text": "def register_callback(self, callback):\n self._callback = callback", "title": "" }, { "docid": "9663522268c547d2573e0a5fc078f4e6", "score": "0.56496", "text": "def data_received(self, chunk):", "title": "" }, { "docid": "a4042a8aefdf254940abf779e9cbb3fb", "score": "0.5642518", "text": "def listener(self, name, m):\n self._rcin.time_boot_ms = m.time_boot_ms\n self._rcin.chancount = m.chancount\n self._rcin.chan_raw = {\n 1: m.chan1_raw,\n 2: m.chan2_raw,\n 3: m.chan3_raw,\n 4: m.chan4_raw,\n 5: m.chan5_raw,\n 6: m.chan6_raw,\n 7: m.chan7_raw,\n 8: m.chan8_raw,\n 9: m.chan9_raw,\n 10: m.chan10_raw,\n 11: m.chan11_raw,\n 12: m.chan12_raw,\n 13: m.chan13_raw,\n 14: m.chan14_raw,\n 15: m.chan15_raw,\n 16: m.chan16_raw,\n 17: m.chan17_raw,\n 18: m.chan18_raw\n }\n # Notify all observers of new message (with new value)\n self.notify_attribute_listeners('rcin', self._rcin)", "title": "" }, { "docid": "f6de35458794acace963e65ad1e91fad", "score": "0.5633531", "text": "def accel_callback(self,data):\n self.curr_time_dc = rospy.get_rostime().to_sec() - self.t0\n self.duty_cycle = data.data", "title": "" }, { "docid": "f6de35458794acace963e65ad1e91fad", "score": "0.5633531", "text": "def accel_callback(self,data):\n self.curr_time_dc = rospy.get_rostime().to_sec() - self.t0\n self.duty_cycle = data.data", "title": "" }, { "docid": "2f2589f356db1d0cb886ef84538e8359", "score": "0.56243503", "text": "def dataReceived(self, data):\n assert False # Client should rx the data", "title": "" }, { "docid": "6648476c977371239c78daebbc89846c", "score": "0.5614759", "text": "def event_in_cb(self, msg):\n self.event = msg.data", "title": "" }, { "docid": "6648476c977371239c78daebbc89846c", "score": "0.5614759", "text": "def event_in_cb(self, msg):\n self.event = msg.data", "title": "" }, { "docid": "dc23efad3c6b8a6becac17a3f1314bd1", "score": "0.5614219", "text": "def didReceiveData(self, connection, data):\n self.log.append(('didReceiveData', connection, data))", "title": "" }, { "docid": "0b0672bf8f3d939df718361008ebb803", "score": "0.56133187", "text": "def register_callback(self, deviceID, sensorNames):\n lines = []\n # \"convert\" deviceID to digi XBeeDevice object\n xbee = self.devices[deviceID].xbee\n # remove previously set callbacks\n xbee.set_dio_change_detection(None)\n # create a list of IO lines to monitor for value changes\n for name in sensorNames:\n lines.append(self.devices[deviceID].sensors[name].pinLine)\n # xbee-python library function to register change detection sampling :D\n xbee.set_dio_change_detection(lines)", "title": "" }, { "docid": "ea411a6656af061ba4fb5b52395e0b0d", "score": "0.561215", "text": "def callbackmethod9(self, addr, state, value):\n _LOGGER.debug('Called method 9 callback')\n self.callbackvalue9 = value", "title": "" }, { "docid": "a0fbf21b1c7f942f7176a55b3e2517c6", "score": "0.56100327", "text": "def set_callback_for_switch(callback):\n if callback is not None:\n s1.irq(trigger=Pin.IRQ_FALLING, priority=1, handler=callback)\n else:\n s1.irq(trigger=0)", "title": "" }, { "docid": "c2f369128bd5165b244a6f441d7ffa47", "score": "0.56051844", "text": "def on_message(self, event):\n handle_result = self.callback(event.message, self.data)\n self.on_message_handled(event, handle_result)", "title": "" }, { "docid": "595348d4a682c33d6eaf3ea2df164491", "score": "0.56045866", "text": "def set_callback(self, callback):\n self.spa = MethodType(callback, self)\n self.mpa = MethodType(callback, self)\n self.msa = MethodType(callback, self)\n self.mf = MethodType(callback, self)", "title": "" }, { "docid": "595348d4a682c33d6eaf3ea2df164491", "score": "0.56045866", "text": "def set_callback(self, callback):\n self.spa = MethodType(callback, self)\n self.mpa = MethodType(callback, self)\n self.msa = MethodType(callback, self)\n self.mf = MethodType(callback, self)", "title": "" }, { "docid": "8a55f4b11b8f927546aa6f76877c30db", "score": "0.5596248", "text": "def set_callback(self, callback: StateChangedProtocol) -> None:\n self.state_changed_callback = callback", "title": "" }, { "docid": "ea032a0388f11fa19e7f75e7675ed3a1", "score": "0.5591106", "text": "def receive_data(self, data):\n face = self.get_face()\n face.receive_data(data)", "title": "" }, { "docid": "5f533b3f51300a21e82affb14b804165", "score": "0.5589051", "text": "def _fire_cb(self, msg_name, data):\n event = self.callback_lookup.get(msg_name)\n # Ignore unknown callbacks\n if event is not None:\n for callback in self.registered_callbacks[event]:\n callback(data)", "title": "" }, { "docid": "b5084721165bfde5d7fed66ea2f78c51", "score": "0.5581485", "text": "def on_data(self, text: str, options: int):\n pass", "title": "" }, { "docid": "18ab120c6f3a18f8404620d2528f4a7c", "score": "0.557483", "text": "def __call__(self):\n (device, uart) = self.connect_to_device()\n\n try:\n while True:\n self.receive_data(uart);\n finally:\n device.disconnect()", "title": "" }, { "docid": "7130f61ee070d8c366d498d98f286b25", "score": "0.55690813", "text": "async def on_change(self, data):", "title": "" }, { "docid": "705921cdf7d87f301b7fd7670563c4b9", "score": "0.55666286", "text": "def on_ready(self):\n self.prepare_for_departure()\n self.callbacks.motor_direction = self.trend", "title": "" }, { "docid": "88ed3ab66f69d7c3530a815c72f2eb28", "score": "0.5564924", "text": "def rx_callback(self, gpio):\n timestamp = int(time.perf_counter() * 1000000)\n duration = timestamp - self._rx_last_timestamp\n\n if duration > 5000:\n if abs(duration - self._rx_timings[0]) < 200:\n self._rx_repeat_count += 1\n self._rx_change_count -= 1\n if self._rx_repeat_count == 2:\n for pnum in range(1, len(PROTOCOLS)):\n if self._rx_waveform(pnum, self._rx_change_count, timestamp):\n _LOGGER.debug(\"RX code \" + str(self.rx_code))\n break\n self._rx_repeat_count = 0\n self._rx_change_count = 0\n\n if self._rx_change_count >= MAX_CHANGES:\n self._rx_change_count = 0\n self._rx_repeat_count = 0\n self._rx_timings[self._rx_change_count] = duration\n self._rx_change_count += 1\n self._rx_last_timestamp = timestamp", "title": "" }, { "docid": "d6959af343653babcb655c5849d9a3ba", "score": "0.5557763", "text": "def _communicate_callback(self, output):\n self._output = output\n self._event.Signal()", "title": "" }, { "docid": "40bf801c8811d861c4cf66a1d66a0b93", "score": "0.5556591", "text": "def call(self, callback, **params):\n callback(params)", "title": "" }, { "docid": "7df7bb1c537691db5f23f467fb6072da", "score": "0.5541715", "text": "def on_connect():", "title": "" }, { "docid": "7df7bb1c537691db5f23f467fb6072da", "score": "0.5541715", "text": "def on_connect():", "title": "" }, { "docid": "7df7bb1c537691db5f23f467fb6072da", "score": "0.5541715", "text": "def on_connect():", "title": "" }, { "docid": "079a50b86bcfe2b6a11f57ffbde210e7", "score": "0.55376935", "text": "def data_callback(self, data_object, sender=None):\n if self.event_log_filter(data_object):\n msg = \"Received EVR: {}\".format(data_object.get_str(verbose=True))\n self.__log(msg, TestLogger.BLUE, sender=\"GDS\")\n if (\n self.last_evr is not None\n and data_object.get_time() < self.last_evr.get_time()\n ):\n msg = \"API detected out of order evrs!\"\n msg = msg + \"\\nReceived First:{}\".format(\n self.last_evr.get_str(verbose=True)\n )\n msg = msg + \"\\nReceived Second:{}\".format(data_object.get_str(verbose=True))\n self.__log(msg, TestLogger.ORANGE)\n self.last_evr = data_object", "title": "" }, { "docid": "7980231fe47b977adfdb128269f7cd69", "score": "0.5537608", "text": "def callbackA(self, gpio):\n\n # read INTF register\n\n if self.bank == '16bit':\n reg = 0x0E\n else: # self.bank = '8bit'\n reg = 0x07\n\n regValue = self.single_access_read(reg)\n\n pin = -1\n\n for i in range(0,8):\n if regValue == (1<<i):\n pin = i\n break\n\n value = self.input_at_interrupt(pin) \n\n if self.callBackFuncts[pin][value] != 'empty':\n self.callBackFuncts[pin][value](pin)\n\n return", "title": "" } ]
ccdd6186cc6ccd4013894a7d4f98def8
find the module(s) given the name(s)
[ { "docid": "7d55dcfc22921a14d2acde0e99e59f72", "score": "0.6622528", "text": "def find_modules(self, requested_names):\n found_modules = set()\n for requested_name in requested_names:\n is_instance = ' ' in requested_name\n\n for module_name, module in self.py3_wrapper.output_modules.items():\n if module['type'] == 'py3status':\n name = module['module'].module_nice_name\n else:\n name = module['module'].module_name\n if is_instance:\n if requested_name == name:\n found_modules.add(module_name)\n else:\n if requested_name == name.split(' ')[0]:\n found_modules.add(module_name)\n\n if self.debug:\n self.py3_wrapper.log('found %s' % found_modules)\n return found_modules", "title": "" } ]
[ { "docid": "25fe60cd21b7004b8076805f2bb02ae5", "score": "0.7484802", "text": "def _find_package_in_modules(name: str) -> Optional[str]:\n package_specs = importlib.util.find_spec(name)\n\n try:\n importlib.util.module_from_spec(package_specs) # type: ignore\n except AttributeError:\n return None\n\n return f\"import {name}\"", "title": "" }, { "docid": "d12166ebaef78d4925f18a063df714d5", "score": "0.6785047", "text": "def get_module_info(name):\r\n for available_module in AVAILABLE_MODULES:\r\n if name == available_module['name']:\r\n return available_module\r\n\r\n return None", "title": "" }, { "docid": "891cf788c34756f768f70af83fa02da9", "score": "0.67754394", "text": "def manual_find(self, name, path):\r\n finder = None\r\n for entry in path:\r\n try:\r\n finder = sys.path_importer_cache[entry]\r\n except KeyError:\r\n finder = self._path_hooks(entry)\r\n if finder:\r\n loader = finder.find_module(name)\r\n if loader:\r\n return loader, entry\r\n return None, None # nothing found!\r", "title": "" }, { "docid": "fe4261cda59c8f48cbe7d760e8fa670b", "score": "0.6726379", "text": "def find_class_by_name(name, modules):\n modules = [getattr(module, name, None) for module in modules]\n return next(a for a in modules if a)", "title": "" }, { "docid": "46a875e80a4abb96d435f34b7c38cf08", "score": "0.66844374", "text": "def find_modules(self, requested_names):\n found_modules = set()\n for requested_name in requested_names:\n is_instance = \" \" in requested_name\n\n for module_name, module in self.py3_wrapper.output_modules.items():\n if module[\"type\"] == \"py3status\":\n name = module[\"module\"].module_nice_name\n else:\n name = module[\"module\"].module_name\n if is_instance:\n if requested_name == name:\n found_modules.add(module_name)\n else:\n if requested_name == name.split(\" \")[0]:\n found_modules.add(module_name)\n\n if self.debug:\n self.py3_wrapper.log(f\"found {found_modules}\")\n return found_modules", "title": "" }, { "docid": "542740b3aa451d1ab757a9cb02aa64f4", "score": "0.6624253", "text": "def class_by_name(name):\n for module in _modules:\n if module.__name__ == name:\n return module", "title": "" }, { "docid": "e07ed59547c7f23b2f612c2106444965", "score": "0.65439564", "text": "def find_module(self, fullname, path=None):\n for so in _SEARCH_ORDER:\n try:\n path = self.get_filename(fullname, so)\n self.request = self.session.get(path)\n self.request.raise_for_status()\n return self\n except requests.exceptions.RequestException as e:\n self.log.debug(\"Unable to import %s: [%s]\", path, e)", "title": "" }, { "docid": "e60ba3ace90e02e85ada0696a9319e69", "score": "0.6497988", "text": "def find_module(cls, *args, **kwargs): # real signature unknown\r\n pass", "title": "" }, { "docid": "345ba9a132514c6dff95d834b711d10f", "score": "0.643123", "text": "def getAllModules():\n packageObject= inspect.getmodule( ffl.grabbers )\n packageDir= os.path.dirname( packageObject.__file__ )\n listings= os.listdir( packageDir )\n print(\"retrieving all names which end in Grabber.py ...\")\n full_files= [ os.path.join( packageDir, aListing ) for aListing in listings \n if os.path.isfile( os.path.join( packageDir, aListing ) ) and \n os.path.join( packageDir, aListing ).endswith(\".py\") and\n re.search( \".*Grabber\\.py$\" , aListing ) ]\n \n importModuleNames= [ \"ffl.grabbers.\" + os.path.split( aName )[-1].split( \".py\" )[0] for aName in full_files ]\n return importModuleNames", "title": "" }, { "docid": "4cd660055d640907a0673b742715ec5d", "score": "0.6424584", "text": "def find_module(module_name):\n\n if os.path.exists(module_name):\n module_name = os.path.normpath(module_name.rstrip(\".py\")).replace(os.path.sep, '.')\n\n module = None\n def import_error(msg):\n if msg[-1] != \"\\n\":\n msg += \"\\n\"\n if (module is not None) and hasattr(module, '__file__'):\n msg += module.__name__ + \" is in \" + module.__file__ + \"\\n\"\n msg += \"sys.path is \" + str(sys.path)\n raise ImportError(msg)\n\n # Import the whole module chain\n try:\n module = __import__(module_name)\n except ImportError as e:\n import_error(\"Cannot import '{0}' : {1}\".format(module_name, e))\n\n # Traverse the namespaces down to the module\n # e.g. If module_name is \"eHive.examples.LongMult.AddTogether\", module represents \"LongMult\" at this stage\n for submodule in module_name.split('.')[1:]:\n if hasattr(module, submodule):\n module = getattr(module, submodule)\n else:\n import_error(\"Cannot find '{0}' in '{1}'\".format(submodule, module))\n # e.g. module now represents \"AddTogether\"\n\n if not hasattr(module, '__file__'):\n import_error('\"{0}\" is a namespace, not a module'.format(module.__name__))\n\n # NB: We assume that the runnable has the same name as the file itself\n class_name = module_name.split('.')[-1]\n\n # get the class in the module\n if not hasattr(module, class_name):\n # it could be a typo ... Let's print the available modules by decreasing distance to the required name\n possible_modules = [_ for _ in dir(module) if isinstance(getattr(module, _), type) and issubclass(getattr(module, _), BaseRunnable)]\n if possible_modules:\n import difflib\n possible_modules.sort(key=lambda s: difflib.SequenceMatcher(a=class_name, b=s, autojunk=False).ratio(), reverse=True)\n s = \"No class named '{0}' in the module '{1}'.\\n\"\n s += \"Warning: {1} contains {2} Runnable classes ({3}). Should one of them be renamed ?\"\n import_error(s.format(class_name, module_name, len(possible_modules), ', '.join('\"%s\"' % _ for _ in possible_modules)))\n else:\n import_error(\"Warning: {} doesn't contain any Runnable classes\".format(module_name))\n\n # Check that the class is a runnable\n c = getattr(module, class_name)\n if not isinstance(c, type):\n import_error(\"{0} (found in {1}) is not a class but a {2}\".format(class_name, module.__file__, type(c)))\n if not issubclass(c, BaseRunnable):\n import_error(\"{0} (found in {1}) is not a sub-class of eHive.BaseRunnable\".format(class_name, module.__file__))\n\n return c", "title": "" }, { "docid": "ab233bbaa71434bcd94c29e625e42f01", "score": "0.6414958", "text": "def get_module(name):\r\n try:\r\n return Module.query.filter(Module.name == name).first()\r\n\r\n except:\r\n return None", "title": "" }, { "docid": "c95a10952293728e42303572db449126", "score": "0.6406842", "text": "def search(q):\n\n\tresult = []\n\n\tfor m in ModuleSIRegister.loadedmodules:\n\t\tfor u in m.units:\n\t\t\tif q in u.symbol or q in u.name:\n\t\t\t\tresult.append(u)\n\n\tif not result: raise LookupError(\"No matching unit.\")\n\tassert len(result)==1, \"Multiple units match that name.\" # should not occur with shipped modules\n\treturn result[0]", "title": "" }, { "docid": "b79fd330f93cd50859740e6e0663ab5a", "score": "0.6368079", "text": "def model_find_module_name(model, module_to_find):\n for name, m in model.named_modules():\n if m == module_to_find:\n return name\n return None", "title": "" }, { "docid": "f90cb0bbe62884c25cfd32c02f484335", "score": "0.6363757", "text": "def find(parent_package, predicate): \n for system in filter(predicate, resource_listdir(parent_package, '')):\n (module_name, ext) = path.splitext(system)\n if ext == \".py\":\n retval = _load_module(parent_package, module_name)\n if retval is not None:\n yield (module_name, retval)", "title": "" }, { "docid": "c1cc9c799a421546e60ed17b14e61520", "score": "0.63297075", "text": "def _find_package(self, name: str) -> Optional[str]:\n for check in [\n \"_find_package_in_modules\",\n \"_find_package_in_typing\",\n \"_find_package_in_common_statements\",\n \"_find_package_in_our_project\",\n ]:\n package = getattr(self, check)(name)\n if package is not None:\n return package\n return None", "title": "" }, { "docid": "b894be382fe4b8a9e0a01c7e144bd0e7", "score": "0.6310267", "text": "def SearchModuleName(self, nameModule):\n\n result = self.session.query(Modules).filter(Modules.module_name == nameModule).first()\n return result", "title": "" }, { "docid": "2c9a3cdb84618e58eb37670258021816", "score": "0.6291205", "text": "def whatis(name):\n module = pymod.modulepath.get(name)\n if module is None:\n raise ModuleNotFoundError(name, mp=pymod.modulepath)\n pymod.mc.load_partial(module, mode=pymod.modes.whatis)\n return module.format_whatis()", "title": "" }, { "docid": "d86fefb5d42081c04e013f8a40d58a29", "score": "0.6261796", "text": "def find_plugins():\n return {\n name: import_module(name)\n for _finder, name, _ispkg in pkgutil.iter_modules()\n if name.startswith(\"boardfarm_\")\n }", "title": "" }, { "docid": "2e761c414b176938f482ab5b8e1bfce0", "score": "0.624652", "text": "def _resolve(name):\n name = name.split('.')\n used = name.pop(0)\n found = __import__(used)\n for n in name:\n used = used + '.' + n\n try:\n found = getattr(found, n)\n except AttributeError:\n __import__(used)\n found = getattr(found, n)\n return found", "title": "" }, { "docid": "21b247a44bcfafee255016c539506f26", "score": "0.61411244", "text": "def resolve(name):\r\n name = string.split(name, '.')\r\n used = name.pop(0)\r\n found = __import__(used)\r\n for n in name:\r\n used = used + '.' + n\r\n try:\r\n found = getattr(found, n)\r\n except AttributeError:\r\n __import__(used)\r\n found = getattr(found, n)\r\n return found", "title": "" }, { "docid": "6e794cf5af1e9e43783e1e9fd9f1a238", "score": "0.6140008", "text": "def findModules(self):\n for modulepath in findModules(self._root_path):\n module = Module(modulepath)\n self.addModule(module)", "title": "" }, { "docid": "a25eb612bc0bf733882c892f62c1cbbc", "score": "0.61257344", "text": "def find_module(self, fullname, path=None):\n raise NotImplementedError(\"Implement in subclass\")", "title": "" }, { "docid": "339ed847e0f3efbf64ec61fd6f369bc8", "score": "0.6097629", "text": "def testModuleFinding():\r\n # type () -> ()\r\n # should fail if the module name contains a '_'\r\n try:\r\n m = Module.fromName('_')\r\n Test.fail(\"ModuleException not thrown: '_' in name\")\r\n except ModuleException:\r\n pass\r\n except Exception as e:\r\n # TODO: it catches e as the type ModuleException, yet comes here???\r\n # TODO: type(e) != Exception, but prints to the same thing\r\n pass\r\n\r\n # MMBN6F contains a module called 'Battle' and a module called 'BattleMenu'...\r\n # the module 'Battle' should NOT contain files from the 'BattleMenu' module!\r\n # all files in fact must contain \"Battle_\" at their start!\r\n battle = Module.fromName(\"Battle\")\r\n for file in battle.files:\r\n for ea, name in file.getNames():\r\n if '_' not in name:\r\n Test.fail(\"name must contain '<modulename>_'\")\r\n modulename = name[0:name.index('_')]\r\n Test.assertEquals(modulename, \"Battle\", \"Not all items are part of the module\")", "title": "" }, { "docid": "91e67f56ef199b87090fd1e4efd4a426", "score": "0.60870284", "text": "def get_packages_by_name(self, name):\n\n packages = []\n\n for package in self.packages:\n if package.package == name:\n packages.append(package)\n\n return packages", "title": "" }, { "docid": "1a43c1ea8c2c2f21534cde69bf6ab1f6", "score": "0.60816014", "text": "def _split_by_module_and_remainder(name):\n module_path = name.split('.')\n member_path = []\n last_exception = None\n while True:\n try:\n module = importlib.import_module('.'.join(module_path))\n break\n except ImportError as error:\n member_path = [module_path.pop()] + member_path\n last_exception = error\n if not module_path:\n raise ModulesLoaderException(name, last_exception)\n return module, member_path, last_exception", "title": "" }, { "docid": "86177b30574bf592cd2d7e566a06a7a9", "score": "0.60536706", "text": "def getModulesImportingNameFrom(self, name, module):\n result = []\n for path in self._names.get((module.getPath(), name), {}).keys():\n result.append(self._modules[path])\n return result", "title": "" }, { "docid": "ad6d2ac281d185230bec124bc17303e6", "score": "0.6049731", "text": "def load_module(name):\n mod = __import__(name)\n for sub in name.split('.')[1:]:\n mod = getattr(mod, sub)\n return mod", "title": "" }, { "docid": "d98a45e8fc729d69a673a3be9a8783ec", "score": "0.59862524", "text": "def module(name):\n if inspect.ismodule(name):\n return name\n else:\n return import_module(name)", "title": "" }, { "docid": "3d5c734d16fe0254d03165dac5ea1b9c", "score": "0.59850764", "text": "def index(tagname):\n mods = glob.glob1(__path__[0], '*.py')\n keys = []\n usedin = {}\n\n for m in mods:\n if m[:2] == '__':\n continue\n modname = __name__ + '.' + m[:-3]\n path = modname.split('.')\n module = __import__(modname)\n\n # find the deepest submodule\n for modname in path[1:]:\n #try:\n module = getattr(module, modname)\n #except Exception as e:\n # print('Could not import module at path, %s, mod %s, name %s' % (path, module, modname))\n # traceback.format_exc()\n # return\n\n if hasattr(module, 'content'):\n c = module.content\n for k in c.keys():\n if k == tagname:\n keys.append(m)\n\n for item in c[k]:\n if str.find(item, tagname) != -1:\n usedin[(m, k)] = 1\n return keys, usedin.keys()", "title": "" }, { "docid": "35fed343c1935319b47533af4195888e", "score": "0.5983773", "text": "def find_module(self,fullname,path=None):\n if path is None:\n mi = self._get_module_type(fullname)\n if mi is not None:\n return self\n else:\n if isinstance(path,basestring):\n return None\n for p in path:\n if p == self.archive:\n return self.find_module(fullname)\n if p.startswith(self.archive + SEP):\n return self.__class__(p).find_module(fullname)\n return None", "title": "" }, { "docid": "3fc989d21d8fe7d43e08db18cb7dc407", "score": "0.5976669", "text": "def find_modules(directory):\n modules=[]\n files = glob.glob(\"{}/*.py\".format(directory))\n for file in files:\n if file.endswith(\"__init__.py\"):\n continue\n else:\n name, ext = splitext(basename(file))\n modules.append(name)\n return modules", "title": "" }, { "docid": "7b9ab0859cce9c5aa0657cfb6eab9a38", "score": "0.5967699", "text": "def discover_modules():\n lopath = pathlib.Path(__file__).resolve().parent\n def filetest(pathobj):\n \"\"\" Tests if a Path Object is a valid submodule \"\"\"\n _module = importlib.import_module(\"aldb2.WebModules.\"+pathobj.stem)\n for required_attr in [\"SITENAME\",]:\n if not getattr(_module,required_attr,None):\n del _module\n return False\n for required_method in [\"match_url\",\"parse_siteid\",]:\n if not callable(getattr(_module,required_method,None)):\n del _module\n return False\n return True\n\n for pyfile in filemodules.iterdir_re(lopath,\".*\",test = filetest):\n mod = importlib.import_module(\"aldb2.WebModules.\"+pyfile.stem)\n MODULES[mod.SITENAME] = mod", "title": "" }, { "docid": "ec81f8036fe78251ee68bf04fbce13e4", "score": "0.59648955", "text": "def importModule(name):\n\n\tmod = __import__(name)\n\tcomponents = name.split('.')\n\tfor comp in components[1:]:\n\t\tmod = getattr(mod, comp)\n\treturn mod", "title": "" }, { "docid": "99417df844b335cd69231ee2c623588f", "score": "0.5964421", "text": "def get_module(name):\n if name.startswith('/') or WINDOWS_PATH.match(name):\n basename, modulename = module_name_for_filename(name)\n path = [basename]\n else:\n modulename = name\n path = None\n if modulename in sys.modules:\n return sys.modules[modulename]\n return load_qualified_module(modulename, path)", "title": "" }, { "docid": "a645d85400941be8de66a4fa8e37163c", "score": "0.5964066", "text": "def _find_module(module):\n parts = module.split(\".\")\n module_path = parts[0:-1]\n package = \".\".join(module_path)\n module_name_part = parts[-1]\n # See function docstring for the rationale of this algorithm.\n for sys_path_item in sys.path:\n cur_path = os.path.join(sys_path_item, *module_path)\n try:\n f, path, _desc = imp.find_module(module_name_part, [cur_path])\n except ImportError:\n pass\n else:\n if f:\n f.close()\n else:\n path = _find_package_main(path)\n if path is None:\n raise ImportError(\n f\"No module named {module}.__main__ ('{module}' is \"\n \"a package and cannot be directly executed)\"\n )\n return ModuleInfo(path, package)\n raise ImportError(f\"No module named {module}\")", "title": "" }, { "docid": "9f965053ed1af3a98e81b6a2fadd9f8d", "score": "0.5951107", "text": "def path_to_module(name):\n\tmodule = [ ]\n\twhile name:\n\t\tif name == mypath:\n\t\t\tmodule.append(mypkg)\n\t\t\tbreak\n\t\t(name,base) = os.path.split(name)\n\t\tmodule.append(base)\n\tmodule.reverse()\n\treturn '.'.join(module)", "title": "" }, { "docid": "2caa31128507b48e21e7a450351a9492", "score": "0.5950764", "text": "def find_related_imports(self, fullname):\n related = self._related_cache.get(fullname)\n if related is not None:\n return related\n\n modpath, src, _ = self.get_module_source(fullname)\n if src is None:\n return []\n\n maybe_names = list(self.generate_parent_names(fullname))\n\n co = compile(src, modpath, 'exec')\n for level, modname, namelist in scan_code_imports(co):\n if level == -1:\n modnames = [modname, '%s.%s' % (fullname, modname)]\n else:\n modnames = [\n '%s%s' % (self.resolve_relpath(fullname, level), modname)\n ]\n\n maybe_names.extend(modnames)\n maybe_names.extend(\n '%s.%s' % (mname, name)\n for mname in modnames\n for name in namelist\n )\n\n return self._related_cache.setdefault(fullname, sorted(\n set(\n name\n for name in maybe_names\n if sys.modules.get(name) is not None\n and not is_stdlib_name(name)\n and u'six.moves' not in name # TODO: crap\n )\n ))", "title": "" }, { "docid": "291e498955f78aa8cacfa863e53d8014", "score": "0.5934568", "text": "def find_dotted(names, parentdir=None):\n filename = None\n for name in names:\n mod = ImpImporter(parentdir).find_module(name)\n if not mod:\n break\n filename = mod.get_filename()\n if not filename:\n break\n parentdir = dirname(filename)\n else:\n return filename", "title": "" }, { "docid": "1e8bf628e6a05f57e83b6cb1a3eff1ae", "score": "0.59185535", "text": "def _findModules(root, extensions, skip, parent=\"\"):\n for fileName in os.listdir(root):\n if fileName in skip:\n continue\n path = os.path.join(root, fileName)\n if os.path.isdir(path):\n if _isIdentifier(fileName) and os.path.exists(os.path.join(path, \"__init__.py\")):\n if parent:\n packageName = parent + \".\" + fileName\n else:\n packageName = fileName\n for module in _findModules(path, extensions, skip, packageName):\n yield module\n elif not parent:\n for module in _findModules(path, extensions, skip, \"\"):\n yield module\n elif \".\" in fileName:\n moduleName, ext = fileName.split(\".\", 1)\n if ext in extensions and _isIdentifier(moduleName):\n if parent and moduleName == \"__init__\":\n yield parent\n elif parent:\n yield parent + \".\" + moduleName\n else:\n yield moduleName", "title": "" }, { "docid": "588044ecb2200d845f5071c962d05287", "score": "0.5882386", "text": "def moduleByName(self, module_name, station=None):\n s = None\n if station:\n s = self.stationByName(station)\n if not s:\n return None\n if s[\"module_name\"] == module_name:\n return s\n else:\n for s in self.stations.values():\n if \"module_name\" in s:\n if s[\"module_name\"] == module_name:\n return s\n break\n for m in self.modules:\n module = self.modules[m]\n if module[\"module_name\"] == module_name:\n if not s or module[\"main_device\"] == s[\"_id\"]:\n return module\n return None", "title": "" }, { "docid": "293a301ba3aab98b841939070eebb0d5", "score": "0.585641", "text": "def _find_package_in_common_statements(name: str) -> Optional[str]:\n if name in common_statements.keys():\n return common_statements[name]\n\n return None", "title": "" }, { "docid": "ffca6e98308d0b7be014b838063bbb9d", "score": "0.5851527", "text": "def _find_package_in_our_project(name: str) -> Optional[str]:\n # Find the package name\n project_package = os.path.basename(here()).replace(\"-\", \"_\")\n package_objects = extract_package_objects(project_package)\n\n # nocover: as the tests are run inside the autoimport virtualenv, it will\n # always find the objects on that package\n if package_objects is None: # pragma: nocover\n return None\n try:\n return package_objects[name]\n except KeyError:\n return None", "title": "" }, { "docid": "87d87edf64ad880f07009d14679fa946", "score": "0.58485496", "text": "def list_package_modules(package_name):\n directory = str(package_name)\n if not (os.path.isdir(directory) and os.path.exists(directory)):\n print(f\"Invalid package {package_name}.\")\n return None\n\n search_path = [directory]\n all_modules = [x[1] for x in pkgutil.iter_modules(path=search_path)]\n print(all_modules)\n\n return all_modules", "title": "" }, { "docid": "806ab6615be041cd27864906aaa8b340", "score": "0.58266675", "text": "def get_module(mod_name):\n if mod_name is '':\n raise ImportError, \"Unable to import empty module\"\n\n mod = __import__(mod_name)\n components = mod_name.split('.')\n\n for comp in components[1:]:\n mod = getattr(mod, comp)\n\n return mod", "title": "" }, { "docid": "7f60a23fd8dbd5abdce3a0307b6d9c93", "score": "0.58206403", "text": "def find_submodules(modulename):\n # First, figure out where this module exists in the file system.\n module = safe_load_module(modulename)\n if not module: # This is not a valid python module or package.\n return []\n filename = module.__file__\n\n # If this is an actual module (rather than a package), then\n # there's no more submodules and we're done.\n if not filename.endswith(\"__init__.py\") and \\\n not filename.endswith(\"__init__.pyc\") and \\\n not filename.endswith(\"__init__.pyo\"):\n return [modulename]\n\n # Since it's a package, get a list of all the modules inside it\n # and recurse on those.\n dirname = os.path.dirname(filename)\n submodulenames = {} # prevent duplicates\n for filename in os.listdir(dirname):\n filename = os.path.splitext(filename)[0]\n if filename == '__init__':\n continue\n elif not filename:\n continue\n name = \"%s.%s\" % (modulename, filename)\n submodulenames[name] = 1\n submodulenames = submodulenames.keys()\n submodulenames.sort()\n\n submodules = []\n for name in submodulenames:\n try:\n x = find_submodules(name)\n except ImportError, x:\n raise\n pass # ignore things that aren't valid modules (e.g. CVS)\n else:\n submodules.extend(x)\n\n return submodules", "title": "" }, { "docid": "7ef3d07dbf705917fd9ae6f6153ba588", "score": "0.5820015", "text": "def scan_modules():\n modules = {}\n\n def callback(path, modname, desc, modules=modules):\n if modname and modname[-9:] == \".__init__\":\n modname = modname[:-9] + \" (package)\"\n if modname.find(\".\") < 0:\n modules[modname] = 1\n\n def onerror(modname):\n callback(None, modname, None)\n\n with warnings.catch_warnings():\n # ignore warnings from importing deprecated modules\n warnings.simplefilter(\"ignore\")\n ModuleScanner().run(callback, onerror=onerror)\n return list(modules.keys())", "title": "" }, { "docid": "64827fad8a499beafee2783219c521ed", "score": "0.5782156", "text": "def _importAndReturnModule(name):\n module = __import__(name)\n parts = name.split('.')\n for part in parts[1:]:\n module = getattr(module, part)\n return module", "title": "" }, { "docid": "c2f643d3c91a56669b468ecbc5b0c1d4", "score": "0.57818115", "text": "def findImports(mod):\n visitor = ImportFinder()\n compiler.walk(mod, visitor)\n return visitor.getMap()", "title": "" }, { "docid": "cb63d5b12cbb750ba8a159200fa22d7e", "score": "0.57603955", "text": "def find_tool_by_name(self, name):\n return None\n # return self.tools.find_one({'name':name})", "title": "" }, { "docid": "c1f92f9665e58d44bed70505b205c5dd", "score": "0.57516575", "text": "def findDeps( sModule, oFileOut ):\n\n\toFileOut.write( getRequire( sModule ) + \"\\n\" )\n\n\t# Only traverse rda modules\n\tif sModule[ 0:3 ] != \"rda\": return\n\n\trgsModule = []\n\toFile = open( getFilename( sModule ), \"r\" )\n\tfor sLine in oFile.readlines():\n\t\tif sLine[ 0:12 ] != \"dojo.require\": continue\n\t\trgsModule.append( getModule( sLine ) )\n\toFile.close()\n\n\tfor sModule in rgsModule:\n\t\tfindDeps( sModule, oFileOut )", "title": "" }, { "docid": "3905b61e26804c8e9fa82bbdb9072ad4", "score": "0.57461786", "text": "def _get_f90_modules(source):\n if not f90_ext_match(source):\n return []\n modules = []\n with open(source) as f:\n for line in f:\n m = f90_module_name_match(line)\n if m:\n name = m.group('name')\n modules.append(name)\n # break # XXX can we assume that there is one module per file?\n return modules", "title": "" }, { "docid": "52e254b5e037ee2855477751d6a78e2d", "score": "0.57460684", "text": "def whichmodule(obj, name):\n module_name = getattr(obj, '__module__', None)\n if module_name is not None:\n return module_name\n for module_name, module in list(sys.modules.items()):\n if module_name == '__main__' or module is None:\n continue\n try:\n if _getattribute(module, name)[0] is obj:\n return module_name\n except AttributeError:\n pass\n return '__main__'", "title": "" }, { "docid": "d50fd719b747826901dd88903c2ceae3", "score": "0.5735261", "text": "def find_functions(func_name):\n func_name = f\"define_{func_name}\"\n this_module = sys.modules[__name__]\n for name, funcs in this_module.__dict__.items():\n if name.lower() == func_name.lower():\n return funcs\n raise ValueError(f\"no function name {func_name}\")\n # return getattr(this_module, func_name)", "title": "" }, { "docid": "d61274b90a7a609d3564b38d7990e580", "score": "0.5733286", "text": "def get_module_list(path):\n\treturn [(module.find_module(name).load_module(name)) for module, name, is_pkg in pkgutil.walk_packages([path])]", "title": "" }, { "docid": "4f2d7810ffd26d8b65a7b434d4e4b902", "score": "0.57293826", "text": "def find_py(self, path):\n b = []\n names1 = '.py'\n names2 = '.hex'\n for root, dirs, files in os.walk(path):\n for name in files:\n if names1 in name or names2 in name:\n b.append(name)\n return b", "title": "" }, { "docid": "09fa60431d042947ce57c361519d32d6", "score": "0.5712117", "text": "def find_module(self, fullname, path=None):\n warnings.warn(\"importlib.abc.Finder along with its find_module() \"\n \"method are deprecated and \"\n \"slated for removal in Python 3.12; use \"\n \"MetaPathFinder.find_spec() or \"\n \"PathEntryFinder.find_spec() instead\",\n DeprecationWarning)", "title": "" }, { "docid": "1ab12c1b8bcb3a71eae93625df6efb62", "score": "0.5710915", "text": "def get_module(self, name):\n module = getattr(self, name, None)\n if module is not None:\n assert isinstance(module, Module)\n return module", "title": "" }, { "docid": "3259132b098b813cfad540991575ae0b", "score": "0.5708175", "text": "def find_bcm(self, path):\n b = []\n names = '.bcm'\n for root, dirs, files in os.walk(path):\n for name in files:\n if names in name:\n b.append(name)\n return b", "title": "" }, { "docid": "c25289357659d6d208bb2f1e1fe95505", "score": "0.56900436", "text": "def import_module(name):\n __import__(name)\n return sys.modules[name]", "title": "" }, { "docid": "98f194eafa07949c66762ea1529222e1", "score": "0.5684248", "text": "def importname(modulename, name):\n logger = logging.getLogger()\n if not modulename:\n m = name\n else:\n m = '%s.%s' % (modulename, name)\n\n try:\n module = __import__(m)\n for c in m.split('.')[1:]:\n module = getattr(module, c)\n return module\n\n except ImportError as ie:\n logger.error('Module %s in package %s won\\'t import: %s' %\n (repr(modulename), repr(name), ie))\n\n except StandardError as e:\n logger.error('Module %s not in in package %s: %s' %\n (repr(modulename), repr(name), e))\n\n return None", "title": "" }, { "docid": "3e2b1f71188c4467c82f8ad808c10f8c", "score": "0.56818616", "text": "def searchFunctionByName(self, name, heuristic = 90, module = None, version = None):\r\n #the name is case insensitive\r\n name = name.lower()\r\n \r\n #Heuristic search\r\n poss_return = []\r\n for data in self.dictionaries:\r\n if name == data[0].lower():\r\n #support version matching\r\n if version and version.lower() != data[6].lower():\r\n continue\r\n \r\n #self.imm.Log(\"trying with: %s, version: %s\" % ( data[0], data[5]))\r\n if len(data) < 9: data[7] = \"\" #support for older entries\r\n poss_return += self._searchFunctionByHeuristic(data[1], data[2], data[3], data[4], heuristic, module, string.split(data[7],\"|\"))\r\n return poss_return", "title": "" }, { "docid": "e2d4d54815af95ae22ab6e71b0c476d5", "score": "0.5673747", "text": "def _get_module_and_name(self, parameter_name):\n module, name = parameter_name.split(\".\", 1)\n if module in self._modules:\n return self.__getattr__(module), name\n else:\n raise AttributeError(\n \"Invalid parameter name {}. {} has no module {}\".format(parameter_name, type(self).__name__, module)\n )", "title": "" }, { "docid": "6fe09348b06348609e09acc112020e16", "score": "0.5671707", "text": "def lookup_module(id):\n return _module_registry[id]", "title": "" }, { "docid": "0de0d7129dedbba1523025c4fbaa8c21", "score": "0.5669394", "text": "def lookup(name, namespace):\n dots = name.count('.')\n if dots > 0:\n moduleName, objName = '.'.join(\n name.split('.')[:-1]), name.split('.')[-1]\n module = __import__(moduleName)\n return getattr(module, objName)\n else:\n modules = [obj for obj in list(namespace.values()) if str(\n type(obj)) == \"<type 'module'>\"]\n options = [getattr(module, name)\n for module in modules if name in dir(module)]\n options += [obj[1]\n for obj in list(namespace.items()) if obj[0] == name]\n if len(options) == 1:\n return options[0]\n if len(options) > 1:\n raise Exception('Name conflict for %s')\n raise Exception('%s not found as a method or class' % name)", "title": "" }, { "docid": "fcc635707ea659e56273a9636882c546", "score": "0.56620634", "text": "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "title": "" }, { "docid": "63c3e0b6c23aacf79c7cb8c6ae611d03", "score": "0.5661314", "text": "def find_header_module(header_name: str) -> Any:\n name_token = HeaderProcessor.name_token(header_name)\n if name_token[0] == \"_\": # these are special\n return None\n if name_token in HeaderProcessor.header_aliases:\n name_token = HeaderProcessor.header_aliases[name_token]\n try:\n module_name = f\"redbot.message.headers.{name_token}\"\n __import__(module_name)\n return sys.modules[module_name]\n except (ImportError, KeyError, TypeError):\n return None", "title": "" }, { "docid": "2ee415665a09c0b9f9b3aa3630c122aa", "score": "0.56590873", "text": "def find_modules(root_path, prefix=[], pattern='^test_.*\\\\.py$'):\n if is_string(prefix):\n prefix = [prefix]\n elif not isinstance(prefix, list):\n raise TypeError(\"Invalid prefix type '%s'\" % type(prefix).__name__)\n\n modules = []\n for path, dirs, files in os.walk(root_path):\n base = prefix + os.path.relpath(path, root_path).split(os.sep)\n for filename in files:\n if re.search(pattern, filename):\n name, ext = os.path.splitext(filename)\n modules.append(base + [name])\n\n return modules", "title": "" }, { "docid": "9cf3a48da395f2edb9b5707b8ab15033", "score": "0.56538457", "text": "def test_load_modules():\n utils.load_pysam_modules()\n\n # try a few modules to make sure modules were loaded properly\n assert utils.filename_to_module(\" Test_pvsamv1.json \").__name__ == \"PySAM.Pvsamv1\"\n assert utils.filename_to_module(\" Test_cashloan.json \").__name__ == \"PySAM.Cashloan\"\n assert utils.filename_to_module(\" Test_BeLpE.json \").__name__ == \"PySAM.Belpe\"", "title": "" }, { "docid": "b9091c3b27cd3de50dc8580ff6f0dd7d", "score": "0.56399506", "text": "def find_module(fullname, path=None):\n if not fullname.startswith(__name__ + '.'):\n # Not a quilt submodule.\n return None\n\n submodule = fullname[len(__name__) + 1:]\n parts = submodule.split('.')\n\n if len(parts) == 1:\n for package_dir in PackageStore.find_package_dirs():\n file_path = os.path.join(package_dir, parts[0])\n if os.path.isdir(file_path):\n return FakeLoader(file_path)\n elif len(parts) == 2:\n user, package = parts\n store = get_store(user, package)\n if store:\n file_path = store.get_path()\n return PackageLoader(file_path, store)\n\n return None", "title": "" }, { "docid": "8797b1af0bcef1fae7a996f2b4f39488", "score": "0.5632986", "text": "def is_module(name):\n for ext in CODE_FILES:\n if name.endswith(ext):\n return name[:-len(ext)]", "title": "" }, { "docid": "9381fec2589c920a5f3819f73d103348", "score": "0.5626422", "text": "def _get_module_type(self,fullname):\n pathhead = self.prefix + fullname.rsplit(\".\",1)[-1] \n for suffix,ispkg,iscode in self._zip_searchorder:\n path = pathhead + suffix\n if path in self._files:\n if ispkg:\n return self.MI_PACKAGE\n else:\n return self.MI_MODULE\n return None", "title": "" }, { "docid": "0ba8d068c49bd839aa75bb7926c71c79", "score": "0.56181574", "text": "def get_modules(self):\n if \"module\" in self.df_columns():\n return [\n self.get_name(mod_idx, \"module\") for mod_idx in self.df_unique(\"module\")\n ]", "title": "" }, { "docid": "09cbe100ba886b6e759a7b752a62e4a4", "score": "0.5614674", "text": "def get_modules(self, path):\n\t\tmodules = []\n\t\tfor finder, name, ispkg in pkgutil.walk_packages([path]):\n\t\t\ttry:\n\t\t\t\tloader = finder.find_module(name)\n\t\t\t\tmod = loader.load_module(name)\n\t\t\texcept:\n\t\t\t\tprint \"Skipped module '%s' due to an error.\" %(name)\n\t\t\telse:\n\t\t\t\tif hasattr(mod, 'WORDS'):\n\t\t\t\t\tmodules.append(mod)\n\t\t\t\telse:\n\t\t\t\t\tprint \"Skipped module %s because it misses the \\\n\t\t\t\t\t\tWORDS constant.\" % (name)\n\t\tmodules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')\n\t\t\t\t\t else 0, reverse=True)\n\t\treturn modules", "title": "" }, { "docid": "7c5da32347cc4f8e4fbafeb6641c015b", "score": "0.56054705", "text": "def importName(modulename, name):\n try:\n module = __import__(modulename, globals(), locals(), [name])\n except ImportError:\n return None\n\n #it is necessary to check for KeyError exception in case the module exists but doesn't contain specified object \n try:\n return vars(module)[name]\n except KeyError:\n return None", "title": "" }, { "docid": "960aba2a81a6b8ddabd9b9013c3255d3", "score": "0.56031483", "text": "def get_object(name: str) -> Any:\n names = name.split(\".\")\n for k in range(len(names), 0, -1):\n module_name = \".\".join(names[:k])\n try:\n obj = importlib.import_module(module_name)\n except ModuleNotFoundError:\n continue\n for attr in names[k:]:\n try:\n obj = getattr(obj, attr)\n except AttributeError:\n ...\n return obj\n raise ValueError(f\"Could not find object: {name}\")", "title": "" }, { "docid": "746cfa8b62e0a4d23699b2ee1d594a98", "score": "0.55864877", "text": "def get_metadata_module(self, name, block_hash=None):\n self.init_runtime(block_hash=block_hash)\n\n for module in self.metadata_decoder.metadata.modules:\n if module.name == name:\n return module", "title": "" }, { "docid": "c20920a61dac93fcc1648ca30c049d0c", "score": "0.55857664", "text": "def find_related_module(app, related_name):\n\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n return\n\n try:\n imp.find_module(related_name, app_path)\n except ImportError:\n return\n\n return importlib.import_module(\"%s.%s\" % (app, related_name))", "title": "" }, { "docid": "99c9d8a8a3edde607c2b64e62312dfec", "score": "0.55790675", "text": "def lookup(name, namespace):\r\n\tdots = name.count('.')\r\n\tif dots > 0:\r\n\t\tmoduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]\r\n\t\tmodule = __import__(moduleName)\r\n\t\treturn getattr(module, objName)\r\n\telse:\r\n\t\tmodules = [obj for obj in namespace.values() if str(type(obj)) == \"<type 'module'>\"]\r\n\t\toptions = [getattr(module, name) for module in modules if name in dir(module)]\r\n\t\toptions += [obj[1] for obj in namespace.items() if obj[0] == name ]\r\n\t\tif len(options) == 1: return options[0]\r\n\t\tif len(options) > 1: raise Exception, 'Name conflict for %s'\r\n\t\traise Exception, '%s not found as a method or class' % name", "title": "" }, { "docid": "d21d56aed25786b524a3dcefa577af3e", "score": "0.5577841", "text": "def find_packages():\n return ['restfulclient']", "title": "" }, { "docid": "868c0f5d37d18b3c6e5b17a579407827", "score": "0.5573851", "text": "def _find_refs(tree, module, name, sphinx_version):\n _log.debug(f\"looking for module {module}\")\n # There are 3 types of refs we are looking for\n # 1. manually added with index directive\n xpath_expr = '//li[contains(.,\"{m}\")]/ul/li/a'.format(m=module)\n _log.debug(f\"manually indexed: xpath expr={xpath_expr}\")\n elements = tree.xpath(xpath_expr)\n hrefs = [e.get(\"href\") for e in elements if e.text.strip() == name]\n if hrefs:\n _log.debug(f\"found manually indexed docs at: {hrefs[0]}\")\n return hrefs # found something; stop\n if name:\n # 2a. embedded in the page\n target = \"{}.{}\".format(module, name)\n subtype = \"embedded\" # for logging\n else:\n # 2b. generated by autodoc in the apidoc folder\n target = \"module-{}\".format(module) # how Sphinx names these\n subtype = \"autodoc\" # for logging\n xpath_expr = '//li/a[contains(@href,\"#{}\")]/@href'.format(target)\n _log.debug(f\"{subtype}: xpath expr={xpath_expr}\")\n hrefs = [p for p in tree.xpath(xpath_expr) if p.endswith(target)]\n if hrefs:\n _log.debug(f\"found {subtype} docs at: {hrefs[0]}\")\n return hrefs", "title": "" }, { "docid": "222ec4b8b0dc7ae66db958a786ef556d", "score": "0.5568299", "text": "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "title": "" }, { "docid": "8cb06d192b499b011f809e1103578acb", "score": "0.55671024", "text": "def _get_or_define_module(full_name, modules):\n module = modules.get(full_name)\n if not module:\n module = types.ModuleType(full_name)\n modules[full_name] = module\n\n split_name = full_name.rsplit('.', 1)\n if len(split_name) > 1:\n parent_module_name, sub_module_name = split_name\n parent_module = _get_or_define_module(parent_module_name, modules)\n setattr(parent_module, sub_module_name, module)\n\n return module", "title": "" }, { "docid": "fb7f8bdf9bb250333003fc8668347f4e", "score": "0.556447", "text": "def find_module(target, startsearch=MODENA_WORKING_DIR):\n\n pth = os.path.abspath(startsearch)\n while target not in os.listdir(pth):\n pth = os.path.abspath(os.path.join(pth,'..')) # step back a directory\n if os.path.ismount(pth): # break if we hit \"root\"\n pth = None\n break\n\n if pth is not None:\n sys.path.insert(0, os.path.join(pth,target))\n else:\n print \"Could not find directory: %s\" %(target)", "title": "" }, { "docid": "3551c480a3e3396ff901d9c8f9c796df", "score": "0.55609095", "text": "def find_dotted_module(modname, rname, parentdir, level):\n # Check for builtins.\n if modname in builtin_module_names:\n return join(libpath, modname), None\n\n errors = []\n names = modname.split('.')\n for i in range(level - 1):\n parentdir = dirname(parentdir)\n # Try relative import, then global imports.\n fn = find_dotted(names, parentdir)\n if not fn:\n try:\n fn = module_cache[modname]\n except KeyError:\n fn = find_dotted(names)\n module_cache[modname] = fn\n\n if not fn:\n errors.append((ERROR_IMPORT, modname))\n return None, errors\n\n # If this is a from-form, try the target symbol as a module.\n if rname:\n fn2 = find_dotted([rname], dirname(fn))\n if fn2:\n fn = fn2\n else:\n errors.append((ERROR_SYMBOL, '.'.join((modname, rname))))\n # Pass-thru and return the filename of the parent, which was found.\n\n return fn, errors", "title": "" }, { "docid": "897d16cb912c5c196e670125f13a4fd4", "score": "0.55417156", "text": "def find_mod_objs(modname, onlylocals=False):\n\n __import__(modname)\n mod = sys.modules[modname]\n\n if hasattr(mod, '__all__'):\n pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]\n else:\n pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']\n\n # filter out modules and pull the names and objs out\n ismodule = inspect.ismodule\n localnames = [k for k, v in pkgitems if not ismodule(v)]\n objs = [v for k, v in pkgitems if not ismodule(v)]\n\n # fully qualified names can be determined from the object's module\n fqnames = []\n for obj, lnm in zip(objs, localnames):\n if hasattr(obj, '__module__') and hasattr(obj, '__name__'):\n fqnames.append(obj.__module__ + '.' + obj.__name__)\n else:\n fqnames.append(modname + '.' + lnm)\n\n if onlylocals:\n valids = [fqn.startswith(modname) for fqn in fqnames]\n localnames = [e for i, e in enumerate(localnames) if valids[i]]\n fqnames = [e for i, e in enumerate(fqnames) if valids[i]]\n objs = [e for i, e in enumerate(objs) if valids[i]]\n\n return localnames, fqnames, objs", "title": "" }, { "docid": "b797764a8ca92bc3215f4f2f680e5b1b", "score": "0.5539677", "text": "def _get_mod_dlls(self, mod: Union[str, mobase.IModInterface]) -> Sequence[str]:\n if isinstance(mod, str):\n mod = self.organizer.modList().getMod(mod)\n plugins = mod.fileTree().find(\"BepInEx/plugins/\", mobase.IFileTree.DIRECTORY)\n if isinstance(plugins, mobase.IFileTree):\n return [name for p in plugins if (name := p.name()).endswith(\".dll\")]\n else:\n return []", "title": "" }, { "docid": "54368ae4d8ffa0a9bed066b68e1d64a9", "score": "0.55392605", "text": "def resolve_extensions(bot: commands.Bot, name: str) -> list:\n\n exts = []\n for ext in braceexpand(name):\n if ext.endswith('.*'):\n module_parts = ext[:-2].split('.')\n path = pathlib.Path(*module_parts)\n exts.extend(find_extensions_in(path))\n elif ext == '~':\n exts.extend(bot.extensions)\n else:\n exts.append(ext)\n\n return exts", "title": "" }, { "docid": "df98c869587cdc618812f4d0c1eae155", "score": "0.5534736", "text": "def get_match_module(self):\n code = self.get_match_code()\n if code is not None:\n mod = types.ModuleType()\n code = __builtins__.compile(code, '__match__', 'exec')\n exec(code, mod.__dict__)\n return mod\n else:\n return None", "title": "" }, { "docid": "021970f7e7a8ad07227e3af356138fa0", "score": "0.5529943", "text": "def get_module(self, address):\n mods = [name for (name, (start, end)) in self.module_regions.items() \\\n if start <= address <= end]\n\n if len(mods) == 1:\n name = mods[0]\n return self.loaded_modules[name]\n elif len(mods) > 1:\n logging.error(\"DLLs are overlapping. Something is terribly wrong\")\n\n return None", "title": "" }, { "docid": "30a62cf9fcc86e29955cdc81c24b05bf", "score": "0.55230033", "text": "def find_management_module(app_name):\n parts = app_name.split('.')\n parts.append('management')\n parts.reverse()\n part = parts.pop()\n path = None\n\n # When using manage.py, the project module is added to the path,\n # loaded, then removed from the path. This means that\n # testproject.testapp.models can be loaded in future, even if\n # testproject isn't in the path. When looking for the management\n # module, we need look for the case where the project name is part\n # of the app_name but the project directory itself isn't on the path.\n try:\n f, path, descr = imp.find_module(part,path)\n except ImportError as e:\n if os.path.basename(os.getcwd()) != part:\n raise e\n\n while parts:\n part = parts.pop()\n f, path, descr = imp.find_module(part, path and [path] or None)\n return path", "title": "" }, { "docid": "8a9ed11ea9c477b5888bb55c10aab9c9", "score": "0.5518648", "text": "def find(glob):\n return load(pathtools.glob_files(\".\", glob))", "title": "" }, { "docid": "4b4f14bf3836e7b3b589268a90dfe332", "score": "0.5504623", "text": "def _get_module_members(self, module_loader, name):\n search_path = module_loader.path\n if search_path.endswith('/'):\n raise InvalidAutoloadPath(\n 'Autoload path cannot have a trailing slash')\n\n return importlib.import_module(\n module_loader.path.replace('/', '.') + '.' + name)", "title": "" }, { "docid": "e91dd23b8057524e9efaa766fa217224", "score": "0.5494041", "text": "def list_modules(*args):\n modules = set()\n if not args:\n for func in __salt__:\n modules.add(func.split(\".\")[0])\n return sorted(modules)\n\n for module in args:\n if \"*\" in module:\n for func in fnmatch.filter(__salt__, module):\n modules.add(func.split(\".\")[0])\n else:\n for func in __salt__:\n mod_test = func.split(\".\")[0]\n if mod_test == module:\n modules.add(mod_test)\n return sorted(modules)", "title": "" }, { "docid": "49e9ec22a3be100726e7a068727fccd3", "score": "0.5493763", "text": "def _find_package_in_typing(name: str) -> Optional[str]:\n typing_objects = extract_package_objects(\"typing\")\n\n try:\n return typing_objects[name]\n except KeyError:\n return None", "title": "" }, { "docid": "7784c1861bd263d61b7d4471d5c3c377", "score": "0.54914194", "text": "def search_modules():\n search_term = request.args.get(\"q\", \"\").strip()\n if search_term:\n per_page = current_app.config[\"WHEELODEX_SEARCH_RESULTS_PER_PAGE\"]\n ### TODO: Limit to the latest data-having version of each project?\n q = (\n db.session.query(Project, Wheel, Module)\n .join(Version, Project.versions)\n .join(Wheel, Version.wheels)\n .join(WheelData, Wheel.data)\n .join(Module, WheelData.modules)\n )\n if \"*\" in search_term or \"?\" in search_term:\n q = q.filter(Module.name.ilike(glob2like(search_term), escape=\"\\\\\"))\n else:\n q = q.filter(db.func.lower(Module.name) == db.func.lower(search_term))\n ### TODO: Order results by something?\n results = q.paginate(per_page=per_page)\n else:\n results = None\n return render_template(\n \"search_modules.html\",\n search_term=search_term,\n results=results,\n )", "title": "" }, { "docid": "fd5e2e22c710422735bc8a6a448a90f6", "score": "0.5486612", "text": "def discover_packages(base):\n\n mod = importlib.import_module(base)\n mod_fname = mod.__file__\n mod_dirname = os.path.normpath(os.path.dirname(mod_fname))\n\n for root, _dirnames, filenames in os.walk(mod_dirname):\n for _ in fnmatch.filter(filenames, '__init__.py'):\n yield '.'.join(os.path.relpath(root).split(os.sep))", "title": "" }, { "docid": "44124367b8f22ab09862047032e88ace", "score": "0.54827505", "text": "def find_modules(script,\n add_script = False, include_stdlib = False, use_source = False,\n path = None, debug = 0, excludes = [], replace_paths = []):\n\n def normabs(p):\n return os.path.normcase(os.path.normpath(os.path.abspath(p)))\n\n prefix = normabs(sys.prefix)\n\n modules = []\n ma = modules.append\n finder = modulefinder.ModuleFinder(path = path, debug = debug,\n excludes = excludes, replace_paths = replace_paths)\n finder.run_script(script)\n\n names = finder.modules.keys()\n if not add_script: names.remove('__main__')\n names.sort()\n for name in names:\n mod = finder.modules[name]\n fn = mod.__file__\n if not fn: continue\n if not include_stdlib and normabs(fn).startswith(prefix): continue\n\n if not use_source and '.py' == os.path.splitext(fn)[1]: fn += 'c'\n\n ans = name.split('.')\n if mod.__path__:\n ans.append(os.path.basename(fn))\n else:\n ans[-1] = os.path.basename(fn)\n\n ma((fn, '/'.join(ans)))\n\n return modules", "title": "" }, { "docid": "5fa17bfae2bf47dbd39a47eca2876fb9", "score": "0.5480114", "text": "def my_import(name): \r\n mod = __import__(name)\r\n components = name.split('.')\r\n for comp in components[1:]:\r\n mod = getattr(mod, comp)\r\n return mod", "title": "" }, { "docid": "e5fffb29c0b2998f4cf0245153a4bc0d", "score": "0.54800373", "text": "def get_module_list():\n\n import ceasiompy.__init__\n\n ignore_submods = [\n '__init__.py',\n '__version__.py',\n '__pycache__',\n ]\n\n # Path for main CEASIOMpy library\n lib_dir = os.path.dirname(ceasiompy.__init__.__file__)\n\n dirnames = glob(os.path.join(lib_dir, '*'))\n module_list = []\n for dirname in dirnames:\n submod_name = os.path.basename(dirname)\n if submod_name in ignore_submods:\n continue\n module_name = 'ceasiompy.' + submod_name\n module_list.append(module_name)\n\n return module_list", "title": "" }, { "docid": "4401d7a85c67c9fee1f2861641a77125", "score": "0.54778796", "text": "def find_module(self, fullname, path):\n warnings.warn(\"MetaPathFinder.find_module() is deprecated since Python \"\n \"3.4 in favor of MetaPathFinder.find_spec() and is \"\n \"slated for removal in Python 3.12\",\n DeprecationWarning,\n stacklevel=2)\n if not hasattr(self, 'find_spec'):\n return None\n found = self.find_spec(fullname, path)\n return found.loader if found is not None else None", "title": "" } ]
1822aa152e4ae6101470c797e06ea372
Save model pipeline (tfidf + model), target names mapping and config.
[ { "docid": "5812b96de2030d1febf685852c3f1f32", "score": "0.7611598", "text": "def save_model(\n pipe: Pipeline,\n target_names_mapping: Dict[int, str],\n config: Dict[str, Any],\n) -> None:\n\n # save pipe\n joblib.dump(pipe, config[\"path_to_save_model\"])\n\n # save target names mapping\n with open(config[\"path_to_save_target_names_mapping\"], mode=\"w\") as fp:\n json.dump(target_names_mapping, fp)\n\n # save config\n shutil.copy2(config[\"path_to_config\"], config[\"path_to_save_folder\"])", "title": "" } ]
[ { "docid": "e40de5c6aa241e21541b11fbaa5a2203", "score": "0.72795296", "text": "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "title": "" }, { "docid": "f44c48b51254da0f5311f1e7d429aa89", "score": "0.71536785", "text": "def save_model(self):\n joblib.dump(self.pipeline, 'model_le.joblib')\n print(colored(\"model.joblib saved locally\", \"green\"))", "title": "" }, { "docid": "af8295504981fe243996f74c472e8fcc", "score": "0.68219584", "text": "def _save_model(self):\n\t\tself.model.save(self.model_output_path)\n\n\t\tmodel_params = {\n\t\t\t\"max_len_chars\": self.sObj.max_len_chars,\n\t\t\t\"n_words\": self.sObj.n_words,\n\t\t\t\"max_seq_len\": self.sObj.max_seq_len,\n\t\t\t\"n_tags\": self.sObj.n_tags,\n\t\t\t\"n_chars\": self.sObj.n_chars\n\t\t} \n\t\tsave_model_params(NER_MAPPING_PATH, model_params)", "title": "" }, { "docid": "284d3e4691289fe7f39ab56cb5c659f3", "score": "0.6778391", "text": "def save(self, model_path: str):\n try:\n with open(f'{model_path}.metadata.pkl', 'wb') as f:\n pickle.dump(self.documents, f)\n pickle.dump(self.word2tokens, f)\n\n if self.tfidf_context._initialized:\n with open(f'{model_path}.tfidf.pkl', 'wb') as f:\n self.tfidf_context.export_model(f)\n if self.word2vec_context._initialized:\n with open(f'{model_path}.word2vec.pkl', 'wb') as f:\n self.word2vec_context.export_model(f)\n if self.ner_context._initialized:\n with open(f'{model_path}.ner.pkl', 'wb') as f:\n self.ner_context.export_model(f)\n\n except:\n raise ValueError('error occured when saving model. check your model path.')", "title": "" }, { "docid": "5440b7f62d13e7dbb26b1343dc7c45c4", "score": "0.6765858", "text": "def save_model():\r\n X, Y, max_len, total_words = dataset_preparation(data)\r\n model = create_model(X, Y, max_len, total_words)\r\n model.save(os.path.join(os.getcwd(),\"seq2seq_model.h5\"))", "title": "" }, { "docid": "de9c7e4bf0718193e4edd072dfc37e3b", "score": "0.6737168", "text": "def save_model_locally(self):\n\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored(\"model.joblib saved locally\", \"green\"))", "title": "" }, { "docid": "8c3c6245287e58f3bc97b340563bf685", "score": "0.6728479", "text": "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n return self", "title": "" }, { "docid": "fb542257d5c521aaaae01d05b55de6e8", "score": "0.66354614", "text": "def save_model(pipeline, model_filepath):\n pickle.dump(model.best_estimator_, open(model_filepath, 'wb'))", "title": "" }, { "docid": "2208eed223d148a1b5dfbdd52a40a72f", "score": "0.66045207", "text": "def save_model(self):\n\n if not self.fitted:\n print('LDA model is not fitted')\n return\n\n with open(self.filename, 'wb') as file:\n dump({'lda': self.lda, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "title": "" }, { "docid": "d3798cd4c96af95abc4489c956abfaa7", "score": "0.6478598", "text": "def save_model(\n name: str,\n pipeline: ext.TransformersPipeline,\n task_name: str | None = None,\n task_definition: dict[str, t.Any] | None = None,\n *,\n signatures: ModelSignaturesType | None = None,\n labels: dict[str, str] | None = None,\n custom_objects: dict[str, t.Any] | None = None,\n external_modules: t.List[ModuleType] | None = None,\n metadata: dict[str, t.Any] | None = None,\n) -> bentoml.Model: # noqa\n _check_flax_supported()\n if not isinstance(\n pipeline,\n LazyType[\"ext.TransformersPipeline\"](\"transformers.pipelines.base.Pipeline\"), # type: ignore\n ):\n raise BentoMLException(\n \"'pipeline' must be an instance of 'transformers.pipelines.base.Pipeline'. \"\n \"To save other Transformers types like models, tokenizers, configs, feature \"\n \"extractors, construct a pipeline with the model, tokenizer, config, or feature \"\n \"extractor specified as arguments, then call save_model with the pipeline. \"\n \"Refer to https://huggingface.co/docs/transformers/main_classes/pipelines \"\n \"for more information on pipelines. If transformers doesn't provide a task you \"\n \"need, refer to the custom pipeline section to create your own pipelines.\"\n \"\"\"\n ```python\n import bentoml\n from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer.from_pretrained(\"distilgpt2\")\n model = AutoModelForCausalLM.from_pretrained(\"distilgpt2\")\n generator = pipeline(task=\"text-generation\", model=model, tokenizer=tokenizer)\n\n bentoml.transformers.save_model(\"text-generation-pipeline\", generator)\n ```\n \"\"\"\n )\n\n context = ModelContext(\n framework_name=\"transformers\",\n framework_versions={\"transformers\": get_pkg_version(\"transformers\")},\n )\n\n if signatures is None:\n signatures = {\n \"__call__\": {\"batchable\": False},\n }\n logger.info(\n 'Using the default model signature for Transformers (%s) for model \"%s\".',\n signatures,\n name,\n )\n\n if task_name is not None and task_definition is not None:\n from transformers.pipelines import TASK_ALIASES\n from transformers.pipelines import SUPPORTED_TASKS\n\n try:\n import cloudpickle # type: ignore\n except ImportError: # pragma: no cover\n raise MissingDependencyException(\n \"Module `cloudpickle` is required in order to use to save custom pipelines.\"\n )\n\n logger.info(\n \"Arguments 'task_name' and 'task_definition' are provided. Saving model with pipeline task name '%s' and task definition '%s'.\",\n task_name,\n task_definition,\n )\n\n if pipeline.task is None or pipeline.task != task_name:\n raise BentoMLException(\n f\"Argument 'task_name' '{task_name}' does not match pipeline task name '{pipeline.task}'.\"\n )\n\n impl: type = task_definition[\"impl\"]\n if type(pipeline) != impl:\n raise BentoMLException(\n f\"Argument 'pipeline' is not an instance of {impl}. It is an instance of {type(pipeline)}.\"\n )\n\n if task_name in TASK_ALIASES:\n task_name = TASK_ALIASES[task_name]\n\n if task_name in SUPPORTED_TASKS:\n if SUPPORTED_TASKS[task_name] != task_definition:\n raise BentoMLException(\n f\"Argument `task_definition` '{task_definition}' does not match pipeline task \"\n \"definition '{SUPPORTED_TASKS[task_name]}'.\"\n )\n else:\n SUPPORTED_TASKS[task_name] = task_definition\n\n options = TransformersOptions(\n task=task_name,\n pt=tuple(\n auto_class.__qualname__ for auto_class in task_definition.get(\"pt\", ())\n ),\n tf=tuple(\n auto_class.__qualname__ for auto_class in task_definition.get(\"tf\", ())\n ),\n default=task_definition.get(\"default\", {}),\n type=task_definition.get(\"type\", \"text\"),\n )\n\n with bentoml.models.create(\n name,\n module=MODULE_NAME,\n api_version=API_VERSION,\n labels=labels,\n context=context,\n options=options,\n signatures=signatures,\n custom_objects=custom_objects,\n external_modules=external_modules,\n metadata=metadata,\n ) as bento_model:\n pipeline.save_pretrained(bento_model.path)\n\n with open(bento_model.path_of(PIPELINE_PICKLE_NAME), \"wb\") as f:\n cloudpickle.dump(pipeline, f)\n\n return bento_model\n\n else:\n options = TransformersOptions(task=pipeline.task)\n\n with bentoml.models.create(\n name,\n module=MODULE_NAME,\n api_version=API_VERSION,\n labels=labels,\n context=context,\n options=options,\n signatures=signatures,\n custom_objects=custom_objects,\n external_modules=external_modules,\n metadata=metadata,\n ) as bento_model:\n pipeline.save_pretrained(bento_model.path)\n\n return bento_model", "title": "" }, { "docid": "f18c91baddb551f40023a9f0dbfbca25", "score": "0.6430201", "text": "def save(self):\n logging.vlog(1, \"PPO epoch [% 6d]: saving model.\", self._epoch)\n old_model_files = gfile.glob(\n os.path.join(self._output_dir, \"model-??????.pkl\"))\n params_file = os.path.join(self._output_dir, \"model-%06d.pkl\" % self._epoch)\n with gfile.GFile(params_file, \"wb\") as f:\n pickle.dump(\n (self._policy_and_value_opt_state, self._model_state,\n self._total_opt_step), f)\n # Remove the old model files.\n for path in old_model_files:\n gfile.remove(path)\n # Reset this number.\n self._n_trajectories_done = 0\n self._last_saved_at = self._epoch", "title": "" }, { "docid": "8d802692a7e59720aa67cca4a30637c2", "score": "0.6426835", "text": "def save(self):\n self.model.save(self.modelOutputFile)", "title": "" }, { "docid": "6da1dcf6141f6d609167ee5533f82235", "score": "0.63932204", "text": "def _save_model(self):\n\t\tf = open(self.filters_file, 'w')\n\t\tnumpy.savez(f, lasagne.layers.get_all_param_values(self.network['prob']))\n\t\tf.close()", "title": "" }, { "docid": "a2123f70ebc282ab611278be28d9a3ae", "score": "0.6367365", "text": "def save_model(self, output_model: ModelEntity):\n logger.info(\"called save_model\")\n buffer = io.BytesIO()\n hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))\n labels = {label.name: label.color.rgb_tuple for label in self._labels}\n model_ckpt = torch.load(self._model_ckpt)\n modelinfo = {\n \"model\": model_ckpt,\n \"config\": hyperparams_str,\n \"labels\": labels,\n \"confidence_threshold\": self.confidence_threshold,\n \"VERSION\": 1,\n }\n if self._recipe_cfg is not None and should_cluster_anchors(self._recipe_cfg):\n modelinfo[\"anchors\"] = {}\n self._update_anchors(modelinfo[\"anchors\"], self._recipe_cfg.model.bbox_head.anchor_generator)\n\n torch.save(modelinfo, buffer)\n output_model.set_data(\"weights.pth\", buffer.getvalue())\n output_model.set_data(\n \"label_schema.json\",\n label_schema_to_bytes(self._task_environment.label_schema),\n )\n output_model.precision = self._precision", "title": "" }, { "docid": "f7c5334c18eba647827f9ff97dca14cb", "score": "0.62957025", "text": "def save_trained_model(self):\n save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())\n logger.info(f\"DQL Trader: Saved trained model\")", "title": "" }, { "docid": "f7c5334c18eba647827f9ff97dca14cb", "score": "0.62957025", "text": "def save_trained_model(self):\n save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())\n logger.info(f\"DQL Trader: Saved trained model\")", "title": "" }, { "docid": "fb03826e2153a1a9b3dcf4fb8a1895b2", "score": "0.62825096", "text": "def _save_model(model: Pipeline, model_path: PosixPath):\n path = model_path.joinpath(f'model_{date.today().isoformat()}.joblib')\n dump(model, path)", "title": "" }, { "docid": "368a267f8a6cbf1767de291d7592597d", "score": "0.6278829", "text": "def save_model(model, filepath):\r\n save_vars(model.state_dict(), filepath)", "title": "" }, { "docid": "d20e336b45d25154a44a922b401daa49", "score": "0.62665826", "text": "def _save_model(self, steps=0):\n for brain_name in self.trainers.keys():\n self.trainers[brain_name].save_model()\n self.logger.info('Saved Model')", "title": "" }, { "docid": "4e4b117fb3cd266482935a52957443c4", "score": "0.6259899", "text": "def save_multi_model(self, model, tag, model_id):\n model_fname = self.log_dir / f\"m{model_id}_{tag}_model.pt\"\n state_dict = {key.replace(f'm{model_id}_', ''): model.state_dict()[key] for key in model.state_dict().keys() if f'm{model_id}_' in key}\n torch.save(state_dict, model_fname)\n self.print_log(f\"m{model_id}: {tag} model is saved.\")\n return", "title": "" }, { "docid": "69b505ed6e5baf8bc148e87e09e57654", "score": "0.62493855", "text": "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators=100)))\n ])\n return pipeline", "title": "" }, { "docid": "865f7725bad4401835f728c1beea541f", "score": "0.62491375", "text": "def save_models(self, fname):\n torch.save(self.target_actor.state_dict(), str(fname) + '_actor.pt')\n torch.save(self.target_critic.state_dict(), str(fname) + '_critic.pt')\n print('Models saved successfully')", "title": "" }, { "docid": "a4a394ea25da7520cdc6da06668b293e", "score": "0.62430763", "text": "def save_model(self, model, tag):\n model_fname = self.log_dir / f\"{tag}_model.pt\"\n torch.save(model.state_dict(), model_fname)\n # self.print_log(f\"{tag} model is saved.\")\n return", "title": "" }, { "docid": "112e12ecd53b44985080c5f94199b514", "score": "0.62419987", "text": "def save_model(model,filename):\n joblib.dump(model,filename)", "title": "" }, { "docid": "dc406a714499fff6ec2a36143d670bb2", "score": "0.6241963", "text": "def save_model_and_weights(self):", "title": "" }, { "docid": "ae7ef5b8438d19291822ae7073e9b1c7", "score": "0.6230667", "text": "def save_model(self):\n from faiss_utils import save_model\n save_model(\n k=self.k,\n k_large=self.k_large,\n n_clusters=self.n_clusters,\n metric=self.metric,\n score_threshold=self.score_threshold\n )\n self.next(self.end)", "title": "" }, { "docid": "6bd64d6666f94a01e7b5793071fdf747", "score": "0.6229459", "text": "def save_model(self):\n saved_path = Path(self.config.model_configuration[\"model_save_path\"]).resolve()\n os.makedirs(os.path.dirname(saved_path), exist_ok=True)\n torch.save(self.model.state_dict(), str(saved_path))\n with open(os.path.dirname(saved_path) + \"/model_parameters.txt\", \"w+\") as f:\n f.write(str(self.config))\n f.write('\\n')\n f.write(str(' '.join(sys.argv)))", "title": "" }, { "docid": "7700d4ed5584ea67e170b8bd9c9df083", "score": "0.62287927", "text": "def save_model(model: Pipeline, model_filepath: str):\n with open(model_filepath, 'wb') as file:\n pickle.dump(model, file, protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "613b727a16883ff43dc1efb1a68dc722", "score": "0.62147063", "text": "def save_model_parameters(self):\r\n path = os.path.join(self.out_dir, \"model.pth\")\r\n torch.save(self.model.state_dict(), path)", "title": "" }, { "docid": "73c4c12d9388e24a0f75f14bb5f506e9", "score": "0.61907476", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier())),\n ])\n\n return pipeline", "title": "" }, { "docid": "0bd2b45b5d84cd94f90394a1dc3b1988", "score": "0.61833173", "text": "def save_model(self):\r\n d = self.words\r\n f = open(self.name + '_words', 'w')\r\n f.write(str(d))\r\n f.close()\r\n \r\n g = self.word_lengths\r\n h = open(self.name + '_word_lengths', 'w')\r\n h.write(str(g))\r\n h.close()\r\n\r\n i = self.stems\r\n j = open(self.name + '_stems', 'w')\r\n j.write(str(i))\r\n j.close()\r\n\r\n k = self.sentence_lengths\r\n m = open(self.name + '_sentence_lengths', 'w')\r\n m.write(str(k))\r\n m.close()\r\n\r\n o = self.common\r\n p = open(self.name + '_common', 'w')\r\n p.write(str(o))\r\n p.close()", "title": "" }, { "docid": "e68f9bc237c3fe77549dc33441fd892f", "score": "0.61526054", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ]\n )\n\n return pipeline", "title": "" }, { "docid": "5dc7549050148e57115fc93e7c78d8f1", "score": "0.6151695", "text": "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n to_save['use_stereo'] = self.opt.use_stereo\n torch.save(to_save, save_path)\n\n # save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n # torch.save(self.model_optimizer.state_dict(), save_path)", "title": "" }, { "docid": "4523b67b592c2905c83a8f0c2c263d27", "score": "0.6146057", "text": "def save_model(self, path='../models/svm/svm.pkl'):\n joblib.dump(self.model, path)", "title": "" }, { "docid": "8acfc78f7f0d868faa46f63fae3bae93", "score": "0.6114543", "text": "def save_model(model):\n torch.save(model.state_dict(),\n os.path.join(args.model_dir, f'model={args.model_type}_hs={args.hidden_size}_bs={args.batch_size}'\n f'_epochs={args.epochs}_clip={args.grad_clipping}.pt'))", "title": "" }, { "docid": "ba5bc246f8377bd7233014c9939ab852", "score": "0.61121464", "text": "def save_model(self, path):\n \n print(\"Saving model to\", path)\n torch.save({\n \"trunk_state_dict\": self.trunk.state_dict(),\n \"embedder_state_dict\": self.embedder.state_dict(),\n \"classifier_state_dict\": self.classifier.state_dict(),\n \"trunk_optimizer_state_dict\": self.trunk_optimizer.state_dict(),\n \"embedder_optimizer_state_dict\": self.embedder_optimizer.state_dict(),\n \"classifier_optimizer_state_dict\": self.classifier_optimizer.state_dict(),\n }, path + \"/models.h5\")", "title": "" }, { "docid": "5dc954aa2788c4c0e0dea9726a7f0209", "score": "0.6102323", "text": "def save(self):\n \n Guesser.save(self)\n\n torch.save(self.dan_model, \"%s.torch.pkl\" % self.model_filename)\n self.train_data.save(self.model_filename)", "title": "" }, { "docid": "edad6c03ee7373d00bdbdad9c6601ee5", "score": "0.609199", "text": "def save_final_model(model, path: str):\n\n _, multi_gpu = check_gpu_status()\n\n # Basic details\n checkpoint = {\n 'class_to_idx': model.class_to_idx,\n 'idx_to_class': model.idx_to_class,\n 'epochs': model.epochs,\n }\n\n # Extract the the state dictionary\n if multi_gpu:\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['state_dict'] = model.state_dict()\n\n # Add the optimizer\n checkpoint['optimizer'] = model.optimizer\n checkpoint['optimizer_state_dict'] = model.optimizer.state_dict()\n\n # Save the data to the path\n torch.save(checkpoint, path)", "title": "" }, { "docid": "dc129b75603d043537eee2f1d074b008", "score": "0.6088584", "text": "def save(self):\n\n torch.save(self.model.state_dict(), self.modelFile)\n\n print('Model saved at ', self.modelFile)", "title": "" }, { "docid": "b8427538027b7742639d45e5528a6258", "score": "0.60865074", "text": "def save_models_pkl(self, problem_name='', path=''):\n\n today = date.today().strftime(\"%d%m%Y\")\n for i, clf in enumerate(self.classifiers[1:]):\n model = self.grid[i+1]\n idx = model.best_index_\n mean = model.cv_results_['mean_test_score'][idx]\n std = model.cv_results_['std_test_score'][idx]\n with open(path + \"{}_{}_{}_{}_{}_{}.pkl\".format(today, problem_name,\n self.n_objects,\n clf.upper(), int(mean*100),\n int(std*100)), \"wb\") as file:\n pickle.dump(model, file)", "title": "" }, { "docid": "0441b27c47a4e908cb8f1e63cc1c6767", "score": "0.60806066", "text": "def _save_model():\n \n logger.info(\"Saving the model into model dir\")\n \n model_dir = os.environ['SM_MODEL_DIR']\n output_dir = os.environ['SM_OUTPUT_DATA_DIR']\n \n # copy model_final.pth to model dir\n model_path = os.path.join(output_dir, \"model_final.pth\")\n new_model_path = os.path.join(model_dir, 'model.pth')\n shutil.copyfile(model_path, new_model_path)\n \n \n shutil.copytree('/opt/ml/code/', os.path.join(model_dir, 'code'))\n\n # copy config.yaml to model dir\n config_path = os.path.join(output_dir, \"config.yaml\")\n new_config_path = os.path.join(model_dir, \"config.yaml\")\n shutil.copyfile(config_path, new_config_path)\n\n try:\n # copy checkpoint file to model dir\n checkpoint_path = os.path.join(output_dir, \"last_checkpoint\")\n new_checkpoint_path = os.path.join(model_dir, \"last_checkpoint\")\n shutil.copyfile(checkpoint_path, new_checkpoint_path)\n except Exception:\n logger.debug(\"D2 checkpoint file is not available.\")", "title": "" }, { "docid": "5141d202edb56b70410b67e07dd31b3d", "score": "0.6070674", "text": "def save_model(\n model,\n model_name: str,\n prep_pipe_=None,\n verbose: bool = True,\n use_case: Optional[MLUsecase] = None,\n **kwargs,\n):\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing save_model()\")\n logger.info(f\"save_model({function_params_str})\")\n\n from copy import deepcopy\n\n logger.info(\"Adding model into prep_pipe\")\n\n if use_case == MLUsecase.TIME_SERIES:\n from pycaret.utils.time_series.forecasting.pipeline import (\n _add_model_to_pipeline,\n )\n\n if prep_pipe_:\n pipeline = deepcopy(prep_pipe_)\n model = _add_model_to_pipeline(pipeline=pipeline, model=model)\n else:\n logger.warning(\n \"Only Model saved. Transformations in prep_pipe are ignored.\"\n )\n else:\n if isinstance(model, Pipeline):\n logger.warning(\"Only Model saved as it was a pipeline.\")\n elif not prep_pipe_:\n logger.warning(\n \"Only Model saved. Transformations in prep_pipe are ignored.\"\n )\n else:\n model_ = deepcopy(prep_pipe_)\n model_.steps.append((\"trained_model\", model))\n model = model_\n\n model_name = f\"{model_name}.pkl\"\n joblib.dump(model, model_name, **kwargs)\n if verbose:\n if prep_pipe_:\n pipe_msg = \"Transformation Pipeline and \"\n else:\n pipe_msg = \"\"\n print(f\"{pipe_msg}Model Successfully Saved\")\n\n logger.info(f\"{model_name} saved in current working directory\")\n logger.info(str(model))\n logger.info(\n \"save_model() successfully completed......................................\"\n )\n gc.collect()\n return model, model_name", "title": "" }, { "docid": "ff7468aed1bf800643e98c89e8313149", "score": "0.6069747", "text": "def saveModelAndVector(self):\n \n pickle.dump(self.vector, open('vectorizer.sav', 'wb'))\n pickle.dump(self.svm, open('classifier.sav', 'wb'))", "title": "" }, { "docid": "e3b09d8cad6cd733387c7d88c5c28438", "score": "0.60678285", "text": "def save_model(self):\n save_dir = os.path.join(self.save_path, \"model\")\n os.makedirs(save_dir, exist_ok=True)\n global_step = self.sess.run(tf.train.get_global_step())\n self.saver.save(\n self.sess,\n os.path.join(save_dir, \"model\"),\n global_step,\n write_meta_graph=True\n )", "title": "" }, { "docid": "51b343c1fd02f0c7bcadc83aba49d0a1", "score": "0.6061582", "text": "def build_model():\n\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n #('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(MultinomialNB()))\n ])\n\n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'vect__max_df': (0.5, 0.75, 1.0),\n 'clf__estimator__alpha': [0.2, 0.6, 1]\n }\n\n\n return pipeline", "title": "" }, { "docid": "11cbb8c3c029f5c8017b5767e8e11c8f", "score": "0.6058362", "text": "def save(self):\n with open(f\"{self.output_folder}/trained_model-{self.lang}.pickle\", \"wb\") as file:\n pickle.dump(self.model, file)\n return", "title": "" }, { "docid": "0c58dd87e55f08fc3b0f92f4e5e328b7", "score": "0.6058161", "text": "def save_model(\n self,\n save_name: str,\n ):\n save_id = self.db.create_model_record(save_name=save_name, model=self)\n self.save_token_maps(save_id=save_id)\n self.save_vocab(save_id=save_id)\n self.save_alpha(save_id=save_id)\n self.save_eta(save_id=save_id)\n self.save_topic_names(save_id=save_id)\n\n corpus_list = [list(doc) for doc in self.corpus]\n with open(self.SAVES_DIR + f'/{save_id}_corpus.json', 'w') as f:\n json.dump(obj=corpus_list, fp=f)\n\n corpus_raw_list = [list(doc) for doc in self.corpus_raw]\n with open(self.SAVES_DIR + f'/{save_id}_corpus_raw.json', 'w') as f:\n json.dump(obj=corpus_raw_list, fp=f)", "title": "" }, { "docid": "2240147bc1730c795581ff72e255b7af", "score": "0.6052336", "text": "def save_model(model, model_path='model.pt') -> None:\n torch.save(model.state_dict(), model_path)", "title": "" }, { "docid": "6a684436e033c102d076c6cfa2c3b397", "score": "0.6050939", "text": "def save(self, model_path: str = DEFAULT_MODEL_PATH, **kwargs):\n self.check_if_model_is_trained()\n self.create_model_path(model_path)\n model_file_name = os.path.join(model_path, self.MODEL_FILENAME)\n joblib.dump(self.model, model_file_name)\n self.model_byte_size = os.path.getsize(model_file_name)\n joblib.dump(self.classes2string, os.path.join(model_path, self.CLASSES_FILENAME))", "title": "" }, { "docid": "3b2a04deb02822a4df7e1786738cfd66", "score": "0.60461587", "text": "def save_model(self, model):\n tf.saved_model.save(model, self.saved_model_path)", "title": "" }, { "docid": "318a7968c06068a03c8e572d36429bd0", "score": "0.60446143", "text": "def save_model(trained_model):\n trained_model.to('cpu')\n\n trained_model.class_to_idx = image_datasets['train'].class_to_idx\n\n checkpoints = {'arch': arch,\n 'classifier': trained_model.classifier,\n 'state_dict': trained_model.state_dict(),\n 'mapping': trained_model.class_to_idx}\n\n torch.save(checkpoints, save_dir)", "title": "" }, { "docid": "c436c46631c1b1b222dc34362587ca59", "score": "0.60426354", "text": "def save(self):\n print \"<< Saving >>\"\n joblib.dump(self.longitude_regression_model, self.longitude_regression_model_save_path)\n joblib.dump(self.latitude_regression_model, self.latitude_regression_model_save_path)\n joblib.dump(self.floor_classifier, self.floor_classifier_save_path)\n joblib.dump(self.building_classifier, self.building_classifier_save_path)", "title": "" }, { "docid": "038d5d88e1edab9714db942f03a2d26d", "score": "0.60419387", "text": "def save_model(self, path=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'binaries', 'sequential_sensory_data_model.bin')):\n\n pc.dump((self.best_performing_model, self.models, self.standard_scaler, self.sequentializer), open(path, 'wb'))", "title": "" }, { "docid": "568858de48f0643a844768b2ddb09805", "score": "0.603787", "text": "def save(self, filename):\r\n self.svm.saveModel(filename)", "title": "" }, { "docid": "64d5ab4718bf3b6e23cb380ea57f9e73", "score": "0.6034688", "text": "def save():\n save_filename = model_name + '.pt'\n torch.save(model, save_filename)\n print('Saved as %s' % save_filename)", "title": "" }, { "docid": "05062bde6716645a56ac33e3988da300", "score": "0.6034482", "text": "def save_model(self):\n model_saved_path = os.path.join(self.params.dump_path, \"best_model.pth\")\n if self.use_label_encoder:\n torch.save({\n \"coarse_tagger\": self.coarse_tagger.state_dict(),\n \"fine_tagger\": self.fine_tagger.state_dict(),\n \"sent_repre_generator\": self.sent_repre_generator.state_dict()\n }, model_saved_path)\n else:\n torch.save({\n \"coarse_tagger\": self.coarse_tagger.state_dict(),\n \"fine_tagger\": self.fine_tagger.state_dict(),\n }, model_saved_path)\n logger.info(\"Best model has been saved to %s\" % model_saved_path)\n\n opti_saved_path = os.path.join(self.params.dump_path, \"opti.pth\")\n torch.save(self.optimizer.state_dict(), opti_saved_path)\n logger.info(\"Best model opti has been saved to %s\" % opti_saved_path)\n\n self.model_saved_path = model_saved_path\n self.opti_saved_path = opti_saved_path", "title": "" }, { "docid": "91eac461ec32f697288933ebf160a54a", "score": "0.60169923", "text": "def save(self):\n if self._model is not None:\n joblib.dump(self._model, self._model_path)\n else:\n raise TypeError(\"The model is not trained yet, use .train() before saving\")", "title": "" }, { "docid": "c1616b4807a732a487ac79a4d4391ef5", "score": "0.60143775", "text": "def save(self):\n joblib.dump(self.transformer, open(self.trans_path, 'wb'))\n joblib.dump(self.tree, open(self.tree_path, 'wb'))", "title": "" }, { "docid": "9866af7ea790d412471df53bb19dcbb7", "score": "0.60127956", "text": "def save_model(model, output):\n\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n\n print(\"Model successfully saved at: {}\".format(output))", "title": "" }, { "docid": "0ac28f879296ef7e087a41b04dd09411", "score": "0.60114706", "text": "def save_model(self, export_model):\n\n net_dict = self.net.state_dict()\n torch.save({'net_dict': net_dict}, export_model)", "title": "" }, { "docid": "b6bfa7134b3e5912bdf21c3fa1d90324", "score": "0.59933233", "text": "def save_model(self):\n assert self._model is not None, 'Model not yet built or loaded'\n self._model.save(self._save_prefix + '.hdf5')", "title": "" }, { "docid": "fe6128a64eb48f43ddd30e44ea71b06b", "score": "0.5987131", "text": "def pre_save(self):\n with codecs.open(self.output_file+'.txt', 'w', encoding='utf-8') as output:\n output.write('Model Name: %s\\n' % self.model_name);\n output.write('Training Files: %s\\n' % self.train_files);\n output.write('Validation Files: %s\\n' % self.valid_files);\n output.write('Test Files: %s\\n' % self.test_files);\n output.write('Output Path: %s\\n' % self.output_path)\n output.write('File Type: %s\\n' % ('Text' if self.file_type == 'T' else 'Binary'))\n output.write('Vocabulary Size: %d\\n' % self.vocab_size)\n output.write('NUmber of Word Class: %d\\n' % self.class_size)\n output.write('Size of Feature Vectors: %d\\n' % self.vector_dim)\n output.write('Grammar Order: %d\\n' % self.gram_order)\n output.write('Initial Learning Rate: %.2f\\n' % self.alpha)\n output.write('Regularization Parameter: %.9f\\n' % self.beta)\n output.write('Minimum Improvement rate: %.5f\\n' % self.min_improve)\n output.write('Maximun Interation: %d\\n' % self.iter_num)\n output.write('Input Level: %s\\n' % ('Word' if self.input_unit == 'W' else 'Character'))\n output.write('Mark for Start of Sentence: %s\\n' % self.sentence_start)\n output.write('Mark for End of Sentence: %s\\n' % self.sentence_end)\n output.write('Mark for Words Out of Vocabulary: %s\\n' % self.unknown_word)\n output.write('Enable Direct Connections: %r\\n' % self.en_direct)\n output.write('Enable Bias Terms: %r\\n' % self.en_bias)\n output.write('Rate for Cutoff Learning Rate: %d\\n' % self.alpha_cut)\n output.write('Random Seed: %d\\n' % self.random_seed)", "title": "" }, { "docid": "7c3d963ea0e49d0f659b68ec9e351571", "score": "0.59834707", "text": "def _save_model(config, model):\n from openem_train.util.model_utils import keras_to_tensorflow\n if config.classify_do_validation():\n best = glob.glob(os.path.join(config.checkpoints_dir('classify'), '*best*'))\n else:\n best = glob.glob(os.path.join(config.checkpoints_dir('classify'), '*periodic*'))\n latest = max(best, key=os.path.getctime)\n model.load_weights(latest)\n os.makedirs(config.classify_model_dir(), exist_ok=True)\n keras_to_tensorflow(model, ['cat_species_1', 'cat_cover_1'], config.classify_model_path())", "title": "" }, { "docid": "c68924a51579b06e2a29bf619a07d653", "score": "0.59820837", "text": "def save_model(self, save_format=None):\n\n # saving the trained model to disk is mandatory to then beeing able to upload it to storage\n # Implement here\n # file = self.model_output_name\n # joblib.dump(self.model, file)\n # print(f\"saved {file} locally\")\n\n # self.upload_model_to_gcp(file)\n # print(f\"uploaded {file} to gcp cloud storage under \\n => {MODEL_STORAGE_LOCATION+file}\")\n \n MODEL_SAVE = 'gs://' + BUCKET + '/' + MODEL_STORAGE_LOCATION + self.model_output_name\n self.model.save(MODEL_SAVE, save_format=save_format)\n return None", "title": "" }, { "docid": "8a81e9f85cd726f46f6dd9132a5fee1e", "score": "0.5979264", "text": "def save_model(self, filename):\n torch.save(self.model.state_dict(), filename)", "title": "" }, { "docid": "8a81e9f85cd726f46f6dd9132a5fee1e", "score": "0.5979264", "text": "def save_model(self, filename):\n torch.save(self.model.state_dict(), filename)", "title": "" }, { "docid": "8abb012e43672788da7fa17093116f81", "score": "0.5974229", "text": "def save_model(workdir, identifier, model):\n modelfile = join(workdir, \"results\", identifier, \"model\", identifier+\".gensim\")\n model.save(modelfile)", "title": "" }, { "docid": "fcd2b5f0bb2b6f16b8274ffc744ba37f", "score": "0.59720826", "text": "def save_model(reg):\n\n # saving the trained model to disk is mandatory to then beeing able to upload it to storage\n # Implement here\n joblib.dump(reg, 'model.joblib')\n print(\"saved model.joblib locally\")\n\n # Implement here\n upload_model_to_gcp()\n print(f\"uploaded model.joblib to gcp cloud storage under \\n => {STORAGE_LOCATION}\")", "title": "" }, { "docid": "51d8c7b4c30481f8a22f0a9d441b1ae8", "score": "0.5964786", "text": "def _save_model(self, model):\n self._interface.save_model(model,\n self._workspace.output_model_filepath)", "title": "" }, { "docid": "8d3994e2ab1a57283208987bca93007d", "score": "0.5958277", "text": "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)", "title": "" }, { "docid": "749ef6af9bb4ba656ca68ed7de4a2cf1", "score": "0.59471244", "text": "def save_models(self, step):\n model = {'DNN': self.DNN.state_dict(),\n 'dnn_optimizer': self.dnn_optimizer.state_dict(),\n 'step': step}\n torch.save(model, os.path.join(self.trial_directory, f'model_{step}.pth'))", "title": "" }, { "docid": "458b46656d73daf66750bc016a126695", "score": "0.59405506", "text": "def save_model(model, model_path):\n model.wv.save_word2vec_format(model_path + '/vectors.txt', binary=False)\n model.save(model_path + '/model.bin')", "title": "" }, { "docid": "afa2e60078c1f58fa816f328e5cffbe2", "score": "0.59299266", "text": "def save_model(self):\n self.model.save('model_save.h5')\n del model", "title": "" }, { "docid": "d8d8e7bd31f246a1d7b413f190b7488a", "score": "0.5929061", "text": "def save(self):\n # Save model checkpoint\n checkpoint_path = self.checkpoint_path\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n\n logging.info(\"Saving model checkpoint to %s\", checkpoint_path)\n\n train_state: Dict[str, Any] = {\"step\": self.step}\n if self.use_fp16:\n # Need to save the automatic mixed precision state_dict\n # See https://github.com/NVIDIA/apex#checkpointing\n\n # But first ensure cuda memory is relatively contiguous because the\n # call to `amp.state_dict()` seems to allocate cuda memory, which\n # can fail if cuda memory is fragmented.\n refresh_cuda_memory()\n train_state[\"amp\"] = amp.state_dict()\n\n for name, module in self.modules.items():\n if name == \"model\":\n module.save_pretrained(checkpoint_path)\n else:\n train_state[name] = module.state_dict()\n\n with open(\n os.path.join(checkpoint_path, \"train_state.pt\"), \"wb\"\n ) as train_state_file:\n torch.save(train_state, train_state_file)\n\n with open(\n os.path.join(checkpoint_path, \"train_config.json\"), \"wt\"\n ) as train_config_file:\n json.dump(\n self.args,\n train_config_file,\n indent=2,\n default=lambda obj: getattr(obj, \"__dict__\", {}),\n )\n\n self.save_metrics()", "title": "" }, { "docid": "71aa02544ea9f8d433824389dd5ed792", "score": "0.5924007", "text": "def save_model(self):\n if not os.path.exists(self.model_out_dir):\n os.makedirs(self.model_out_dir)\n out_dir = os.path.join(self.model_out_dir, self.net_type)\n if not os.path.exists(out_dir):\n os.makedirs(os.path.join(out_dir))\n if not os.path.exists(os.path.join(out_dir, \"model_number.txt\")):\n model_number = np.array([0])\n else:\n model_number = np.fromfile(os.path.join(out_dir, \"model_number.txt\"),\n dtype=int)\n model_file_name = self.net_type + \"-\" + str(model_number[0])\n self.model.save_weights(os.path.join(out_dir, model_file_name + \".h5\"))\n model_number[0] += 1\n model_number.tofile(os.path.join(out_dir, \"model_number.txt\"))", "title": "" }, { "docid": "881c424da5cd9714ffe0596e483e387e", "score": "0.59137124", "text": "def save_models(self, step):\n model = {'DNN': self.DNN.state_dict(),\n 'dnn_optimizer': self.dnn_optimizer.state_dict(),\n 'D': self.D.state_dict(),\n 'd_optimizer': self.d_optimizer.state_dict(),\n 'G': self.G.state_dict(),\n 'g_optimizer': self.g_optimizer.state_dict(),\n 'step': step}\n torch.save(model, os.path.join(self.trial_directory, f'model_{step}.pth'))", "title": "" }, { "docid": "9cd025728391763de86a9fccf32f93a9", "score": "0.59087694", "text": "def save_model(self, model):\n\n self.model_best.fit(self.X, self.y)\n model_name = type(self.classifier).__name__\n save_model_filepath = os.path.join(MODELS_DIRECTORY, model_name)\n\n with open(f\"{save_model_filepath}.pkl\",\"wb\") as f:\n pickle.dump(self.model_best, f)\n print(f\"Saving best {type(self.classifier).__name__} model to \"\n f\"{save_model_filepath}.\")", "title": "" }, { "docid": "97f61d03aff0228518bd70319daaab87", "score": "0.59011585", "text": "def save_model(self):\n save_model(outcode_regex=self.outcode_regex, boilerplate_text=self.boilerplate_text)\n self.next(self.end)", "title": "" }, { "docid": "14a0612d20c68ef1399968f0d02f1507", "score": "0.58933395", "text": "def save_model_metadata(metadata,filename):\n filename = filename + ' metadata'\n joblib.dump(metadata,filename)", "title": "" }, { "docid": "01e36b2f33bd12f047614b0b9897c10c", "score": "0.58928216", "text": "def save_model(model, modelfile: str):\r\n torch.save(model, modelfile)", "title": "" }, { "docid": "dfddc362f2f9ac0226585cd1df232f80", "score": "0.58899873", "text": "def save_model(self, config):\n filepath = config['genpath']\n if(os.path.exists(filepath)):\n print('Replace existing model')\n os.remove(filepath) #delete old model to replace it with the new one\n else:\n print(\"File do not exist\")\n path_head, path_tail = os.path.split(filepath)\n if(not os.path.exists(path_head)):\n print('Path do not exist, create Path: ' + path_head)\n os.makedirs(path_head)\n self.training_iterations += config['train_iterations']\n torch.save({'state_dict': self.state_dict(), \n 'iterations': self.training_iterations}, filepath)", "title": "" }, { "docid": "9e01d02bb217d8e4194f3e5f116acf91", "score": "0.58729506", "text": "def persist(self, model_dir):\n # type: (Text) -> Dict[Text, Any]\n\n featurizer_file = os.path.join(model_dir, self.name + \".pkl\")\n utils.pycloud_pickle(featurizer_file, self)\n return {\"featurizer_file\": self.name + \".pkl\"}", "title": "" }, { "docid": "90ca17ecd6c0503cb6cf647c12ec83ce", "score": "0.5868528", "text": "def save_model(model, save_path):\n torch.save(model.state_dict(), save_path)", "title": "" }, { "docid": "99059452687e63803fb616f0a7a747c2", "score": "0.58670187", "text": "def save_models(self, episode_count):\n\t\ttorch.save(self.target_actor.state_dict(), './Models/' + str(episode_count) + '_actor.pt')\n\t\ttorch.save(self.target_critic.state_dict(), './Models/' + str(episode_count) + '_critic.pt')\n\t\tprint('Models saved successfully')", "title": "" }, { "docid": "f99879904fd1fcc0b440231ccf9e3f81", "score": "0.5862115", "text": "def save(self, filename):\n if self.verbose:\n print 'Saving classifier in \"%s\"...' % filename\n\n svm_save_model(filename, self.model)", "title": "" }, { "docid": "77b186b38511a379b4176b626c5ce468", "score": "0.58597314", "text": "def save_model(self, path):\n torch.save(self.model.state_dict(), path)", "title": "" }, { "docid": "01b7a80a34afc30fe95c443b12037322", "score": "0.5859497", "text": "def SaveModel(self,clf,name):\n \n file = open(self.params['model_path'] + name + \".pkl\", 'wb')\n pickle.dump(clf,file)\n file.close()\n \n print(name + \" has been saved.\")", "title": "" }, { "docid": "576b8617b820620dea831a00ba9c6492", "score": "0.58589184", "text": "def persist(self, model_dir):\n\n if self.ent_tagger:\n model_file_name = os.path.join(model_dir, LSTM_MODEL_FILE_NAME)\n preprocessing_file_name = os.path.join(model_dir, LSTM_PREPROCESS_FILE_NAME)\n param_file_name = os.path.join(model_dir, LSTM_PARAM_FILE_NAME)\n\n self.ent_tagger.save(model_file_name, param_file_name, preprocessing_file_name)\n\n return {\"classifier_file\": LSTM_MODEL_FILE_NAME, \"preprocessing_file\": LSTM_PREPROCESS_FILE_NAME}", "title": "" }, { "docid": "6e2b1687d7b56abf837988588a67ba4b", "score": "0.5856753", "text": "def save(self):\n if self.model is None:\n raise Exception(\"[Exception] You have to build the model first.\")\n\n print(\"[INFO] Saving model...\")\n json_string = self.model.to_json()\n open(self.config.hdf5_path + self.config.exp_name + '_architecture.json', 'w').write(json_string)\n print(\"[INFO] Model saved\")", "title": "" }, { "docid": "44a2820153099540e44890c2fcba5d89", "score": "0.5856742", "text": "def saveModelDataToFile(self):\n pass", "title": "" }, { "docid": "ea3ac82382ee4f46cd3f8f5f4aacf3f0", "score": "0.5855541", "text": "def save_model(self, filepath: str):\n # logger.info('Store the model to path: \"%s\"', filepath)\n self.tokenizer.save_pretrained(filepath)\n self.model.save_pretrained(filepath)", "title": "" }, { "docid": "998cfdb8d2acd74a313a4cacab40b3fe", "score": "0.5854257", "text": "def save_model(self):\n with open(os.path.join(self.model_dir, 'weights.pkl'), 'wb') as file:\n pickle.dump(self.w, file)\n with open(os.path.join(self.model_dir, 'features_id_dict.pkl'), 'wb') as file:\n pickle.dump(self.features_id_dict, file)\n with open(os.path.join(self.model_dir, 'bars.pkl'), 'wb') as file:\n pickle.dump(self.bars_dict, file)\n\n with open(os.path.join(self.model_dir, 'mini.txt'), 'w') as file:\n file.write(str(self.likl_func) + '\\n' + str(self.likl_grad))", "title": "" }, { "docid": "a6b5aa36f45545b9025c3caa9702ce53", "score": "0.5846831", "text": "def save_model(model):\n with NamedTemporaryFile(suffix='_model.h5') as model_file:\n model.save(model_file.name)\n ex.add_artifact(model_file.name)", "title": "" }, { "docid": "a12ae18a7a3bfd4031b283a03b7ab5e9", "score": "0.5846307", "text": "def run_save_model_outputs():\n from Spatiotemporal_VAE.analysis_scripts.thesis_save_model_outputs import OutputSavers\n # Input dataframe\n df_path = \"/mnt/data/full_feas_tasks_phenos_nanMasks_idpatient_leg.pickle\"\n\n # Output dataframes. One for the general results. One for the PhenoNet identificaiton\n df_save_path = \"/mnt/thesis_results/data/model_outputs_full_final.pickle\"\n df_pheno_save_path = \"/mnt/thesis_results/data/model_phenos_outputs_full_final.pickle\"\n\n identifier_set = [\"Thesis_B\", \"Thesis_B+C\", \"Thesis_B+C+T\", \"Thesis_B+C+T+P\"]\n model_classess = [BaseContainer, ConditionalContainer, ConditionalContainer, PhenoCondContainer]\n model_container_set = []\n data_gen = GaitGeneratorFromDFforTemporalVAE(df_path, m=512, n=128,\n train_portion=0.8,\n gait_print=False, seed=0)\n\n\n for model_identifier, model_class in zip(identifier_set, model_classess):\n model_container_kwargs = {\n \"model_class\": model_class,\n \"model_identifier\": model_identifier,\n \"df_path\": None,\n \"datagen_batch_size\": 512,\n \"gaitprint_completion\": False,\n \"train_portion\": 0.80,\n \"seed\": 0\n }\n model_container_set.append(model_container_kwargs)\n\n saver = OutputSavers(data_gen=data_gen,\n model_container_set=model_container_set,\n identifier_set=identifier_set,\n save_df_path=df_save_path,\n save_pheno_df_path=df_pheno_save_path)\n\n # Run forward inference here\n saver.forward_batch()", "title": "" }, { "docid": "71fcee5196e181a2e1a4ddd0b268d417", "score": "0.5841598", "text": "def save(self, model_path):\n assert False, 'Not support training now'", "title": "" }, { "docid": "7a37afcedd530eb6e4d02452d2766662", "score": "0.58385485", "text": "def save(self, path='{}/model.pt'.format(PROJECT_PATH)):\n torch.save({\n 'collector': self.collector.state_dict(),\n 'enemy': self.collector.state_dict(),\n 'optim': self.optimizer.state_dict(),\n }, path)", "title": "" }, { "docid": "fb1533b76b75afa6f4a45b644f4a6692", "score": "0.58385265", "text": "def save_model(self, path):\n torch.save(self.model, path)", "title": "" }, { "docid": "14a22ed8905b755f121efe775eb07f25", "score": "0.5836641", "text": "def save(self):\n with open('pickles/model.p', 'wb') as handle:\n cPickle.dump(self._model, handle)", "title": "" }, { "docid": "7700f56fc0d57562735d764eb6c1fbff", "score": "0.58274704", "text": "def save_model(self):", "title": "" }, { "docid": "7700f56fc0d57562735d764eb6c1fbff", "score": "0.58274704", "text": "def save_model(self):", "title": "" } ]
d392101f4958355ebaa3029234bddc9f
Searches JIRA for issues using a JQL query.
[ { "docid": "6249cf1b0a95f8479077c980266af686", "score": "0.6391476", "text": "def complete_search(\r\n self, jql, validate_query=None, fields=None, expand=None,\r\n properties=None, fields_by_keys=None,\r\n batch_size=DEFAULT_BATCH_SIZE):\r\n query = {'jql': jql}\r\n if validate_query is not None:\r\n query['validateQuery'] = validate_query\r\n\r\n if fields is not None:\r\n query['fields'] = fields\r\n\r\n if expand is not None:\r\n query['expand'] = expand\r\n\r\n if properties is not None:\r\n query['properties'] = properties\r\n\r\n if fields_by_keys is not None:\r\n query['fieldsByKeys'] = fields_by_keys\r\n\r\n return self.get_all(\r\n 'issues',\r\n 'search',\r\n query=query,\r\n batch_size=batch_size)", "title": "" } ]
[ { "docid": "79d565c73706b0e9138be3112ed4be00", "score": "0.7664945", "text": "def issues_search(self, jql='order by key', start_at=0, max_results=1000, validate_query=True, fields='id'):\n\n loop_count = max_results // BATCH_SIZE_ISSUES + 1\n issues = list()\n last_loop_remainder = max_results % BATCH_SIZE_ISSUES\n\n while loop_count > 0:\n api_url = f'{self.host}/rest/api/2/search?jql={jql}&startAt={start_at}&maxResults={max_results}' \\\n f'&validateQuery={validate_query}&fields={fields}'\n response = self.get(api_url, \"Could not retrieve issues\")\n\n current_issues = response.json()['issues']\n issues.extend(current_issues)\n start_at += len(current_issues)\n loop_count -= 1\n if loop_count == 1:\n max_results = last_loop_remainder\n\n return issues", "title": "" }, { "docid": "dfff293075a1f93cd479bf44ea7180f7", "score": "0.7151396", "text": "def execute_jql(\n self, jql: str, start: int = 0, step_size: int = 50\n ) -> dict:\n conn = self.__get_connection()\n return conn.search_issues(\n jql,\n startAt=start,\n maxResults=step_size,\n json_result=True,\n validate_query=False,\n expand=\"names,changelog\",\n fields=\"*all\",\n )", "title": "" }, { "docid": "7969059086129ea747bd16ada9f05097", "score": "0.6858446", "text": "def get_issues(self, jql_query, limit=10000):\n limit_per_api_request = min(100, limit)\n current = 0\n total = 1\n issues = []\n while current < min(total, limit):\n response = self.jira_client.jql(\n jql_query, limit=limit_per_api_request, start=current\n )\n total = response[\"total\"]\n issues = issues + response[\"issues\"]\n current = len(issues)\n logging.info(f'\"{jql_query}\" returned {len(issues)} issues')\n return issues[: min(limit, len(issues))]", "title": "" }, { "docid": "8f015f038d85a3befcfaf8683eeb9cf9", "score": "0.68327713", "text": "def search(self, project, term, state=\"open\"):\r\n return self.get_values(\"search\", project, state, quote_plus(term),\r\n filter=\"issues\", datatype=Issue)", "title": "" }, { "docid": "f488da5d795b8c164178d0aed1ddfabc", "score": "0.68244636", "text": "def search(api_url, session, jql):\n search_url = \"{}/search\".format(api_url)\n count = 0\n\n while True:\n payload = {'jql': jql, 'startAt': count}\n resp = session.get(search_url, params=payload)\n resp.raise_for_status()\n data = resp.json()\n tmp_issues = data[\"issues\"]\n retrieved = len(tmp_issues)\n if retrieved <= 0:\n break\n\n count += retrieved\n\n # yield issues one by one\n for issue in tmp_issues:\n yield issue", "title": "" }, { "docid": "d2fc8eb6aec4a1b283f9374b08daf6fc", "score": "0.65700984", "text": "def _get_all_issues(self, jql):\n block_size = 1000\n block_num = 0\n total_num_issues = 0\n all_issues = []\n while True:\n start_idx = block_num * block_size\n\n issues = self.jira.search_issues(jql,\n start_idx,\n block_size)\n num_issues = len(issues)\n total_num_issues += num_issues\n self.class_logger.debug(\"Block %s, %s issues\" % (block_num, num_issues))\n block_num += 1\n\n if num_issues == 0:\n self.class_logger.debug(\"Finished retrieving information from %s issues\" % total_num_issues)\n break\n\n all_issues.extend(issues)\n\n return all_issues", "title": "" }, { "docid": "28aff06cc435180c146a1a9be8c913df", "score": "0.6477296", "text": "def get_issues(self, query_string, max_results=10000):\n issues = self.client.search_issues(query_string, maxResults=max_results)\n return issues", "title": "" }, { "docid": "da7874ce831d577781b04971cf0d13a5", "score": "0.64641774", "text": "def search_for_jira_issues(self, report_id):\n return self.jira_client.search_issues('''project = %s AND summary ~ \"%s\"''' %\n (self.jira_project, report_id),\n maxResults=1)", "title": "" }, { "docid": "d49d685991cd36b3597b53755f16f36e", "score": "0.6357285", "text": "def jira_issues(server_url, project_name):\n jira = JIRA({'server': server_url})\n issues = jira.search_issues(f'project = {project_name}', maxResults=None)\n return [extract_fields(iss) for iss in issues]", "title": "" }, { "docid": "5364529cb0dbbcab7fa372e18adb7768", "score": "0.63278675", "text": "def search(self, issue_id=None):\n try:\n results, _ = self._get_results(self._query)\n except Exception as e:\n raise WBCApiError('Error while searching: {} {}'.format(e.__class__, str(e)))\n\n return results", "title": "" }, { "docid": "a29d680bcc85879609bd7461918f5e71", "score": "0.6242677", "text": "def search_projects(self) -> None:\n tinput = get_input('Search [o]pen issues only, [c]losed, or [a]ll?')\n substring = get_input('Search for what substring?')\n matches = []\n jira_projects = self.get_all_cached_jira_projects()\n\n # cache list of seen columns for the query\n columns = {}\n for project in list(jira_projects.values()):\n results = project.get_matching_issues(substring, tinput)\n for r in results:\n matches.append(r)\n for k, v in r.items():\n # This is going to blast out our ability to filter on reviewer or reviewer 2. For now.\n if 'custom' not in k:\n columns[k] = True\n original_list = JiraUtils.sort_custom_jiraissues_by_key(matches)\n display_list = original_list\n\n df = DisplayFilter.default()\n while True:\n df.display_and_return_sorted_issues(self, display_list)\n print_separator(30)\n cinput = get_input(\n '[#] to open an issue in browser, [c] to clear column filters, [f] to specify a specific field to match on, [q] to return to menu:')\n if str.lower(cinput) == 'f':\n col_name = pick_value('Filter on which column?', list(columns.keys()), False)\n newlist = []\n for ji in display_list:\n if col_name in ji:\n if substring in ji[col_name]:\n newlist.append(ji)\n display_list = newlist\n elif str.lower(cinput) == 'c':\n display_list = original_list\n elif not str.lower(cinput) == 'q':\n try:\n jira_issue = display_list[int(cinput) - 1]\n JiraUtils.open_issue_in_browser(\n self._jira_connections[jira_issue.jira_connection_name].url, jira_issue.issue_key)\n except ValueError:\n print('Bad input. Try again.')\n elif str.lower(cinput) == 'q':\n break", "title": "" }, { "docid": "2ba41ee87edf8c26c801e4b3f7500871", "score": "0.6139314", "text": "def query_jira_server(endpoint_Url, username, password, jql_query):\r\n # here we are going to use basic authentication to authorize \r\n # with the Jira Server REST API\r\n auth = f\"{username}:{password}\"\r\n \r\n # the combined authorization header text must be converted to Base64 \r\n # before we can use it in the authorization header\r\n b64auth = to_base_64_string(auth)\r\n\r\n # set the http authorization header to the Base64 value we generated above\r\n # assemble the Jira authorization header, which is a combination of the\r\n username and password\r\n hdr = {'Authorization':f'Basic {b64auth}',\r\n # use this to 'spoof' JIRA servers which only allows requests\r\n # from common browsers\r\n # urllib's default user-agent is 'Python-urllib/3.8' (on Python 3.8)\r\n 'User-Agent':\r\n 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'}\r\n\r\n # here we define the fields that the query should return to us\r\n url = (f\"{endpoint_Url}/rest/api/2/search?\"\r\n f\"jql={urllib.parse.quote_plus(jql_query)}\"\r\n \"&fields=issuetype,project,summary,assignee,reporter,status,\"\r\n \"created,resolutiondate\"\r\n \"&maxResults=1000\")\r\n # make the GET query to the Jira Server search REST endpoint\r\n r = requests.get(url,headers=hdr)\r\n\r\n # convert response text into a Python Object by using object key and value\r\n return json.loads(r.text, object_hook=lambda d: SimpleNamespace(**d))", "title": "" }, { "docid": "322ae1b8730a7022c63eea1b0e210319", "score": "0.6070221", "text": "def get_issues(jira, project, startdate='', enddate=''):\n block_size = 1000\n block_num = 0\n all_issues = []\n\n jql = 'project={0}'.format(project)\n if startdate and enddate:\n jql = 'project={0} and created >= {1} and created <= {2}'.format(project, startdate, enddate)\n\n while True:\n start_idx = block_num * block_size\n logger.info('Getting issues from %d to %d ...' % (start_idx + 1, start_idx + block_size))\n\n issues = jira.search_issues(jql, start_idx, block_size)\n if len(issues) == 0:\n # Retrieve issues until there are no more to come\n break\n block_num += 1\n for issue in issues:\n # logger.info('%s: %s' % (issue.key, issue.fields.summary))\n all_issues.append(issue)\n logger.info('%d issues retrieved.' % len(all_issues))\n return all_issues", "title": "" }, { "docid": "9e4ba561cbd0acd5ebd68bffe1d69735", "score": "0.60214704", "text": "def query_jira(baseuri, query, username, password, fields=None, options=None):\n cookie = jira_login_cookie(baseuri, username, password)\n\n params = [('jql', query), ('tempMax', 1000)]\n for field in fields:\n params.append(('field', field))\n query_uri = urljoin(baseuri,\n 'sr/jira.issueviews:searchrequest-xml/temp/SearchRequest.xml?' + urlencode(params))\n if (options.show_uri):\n print(query_uri)\n sys.exit(0)\n req = Request(query_uri, headers={'Cookie': cookie})\n with urlopen(req) as fh:\n xml = fh.read().decode(\"utf-8\")\n if (options.show_xml):\n print(xml)\n sys.exit(0)\n # return parsed etree root element\n return ElementTree.fromstring(xml) # FIXME - would be better to parse fh but need to decode", "title": "" }, { "docid": "fc46d4b0effbf2c4199f7aeb56de2f0e", "score": "0.5991224", "text": "def getListIssues(self, filter):\n listissues = self.jira.search_issues(filter,maxResults=100)#maxResults=800)\n return listissues", "title": "" }, { "docid": "0b30a5cb3a9dcad03a141a893fcc9f7e", "score": "0.58900803", "text": "def RunIssueQuery(\n self, cnxn, left_joins, where, order_by, shard_id=None, limit=None):\n return [], False", "title": "" }, { "docid": "0b30a5cb3a9dcad03a141a893fcc9f7e", "score": "0.58900803", "text": "def RunIssueQuery(\n self, cnxn, left_joins, where, order_by, shard_id=None, limit=None):\n return [], False", "title": "" }, { "docid": "ee736b6f2d15b0fd2ba950c397ef69ef", "score": "0.58780134", "text": "def get_issues(query_func=run_query, issue_filter=\"states: OPEN, first:1\"):\n # Query to get issues, comments, and reactions to comments; the `last:1` is currently hard coded to only impact a single issue. Remove to process all.\n query = f\"\"\"\n query {{\n repository(owner:\"{config['owner']}\", name:\"{config['repository']}\") {{\n openIssues: issues({issue_filter}) {{\n edges {{\n node {{\n id\n title\n updatedAt\n createdAt\n url\n labels(first:100) {{\n edges {{\n node {{\n id\n name\n }}\n }}\n }}\n comments(last:100) {{\n edges {{\n node {{\n author {{login}}\n updatedAt\n createdAt\n reactions(last:1) {{\n edges {{\n node {{\n createdAt\n }}\n }}\n }}\n }}\n }}\n }}\n }} cursor\n }}\n pageInfo {{\n startCursor\n endCursor\n }}\n }}\n }}\n }}\n \"\"\"\n response = query_func(query) # Execute the query\n if 'data' not in response.keys():\n print(response)\n elif 'errors' in response.keys():\n print(response['errors'])\n result = response['data']['repository']['openIssues']['edges']\n return [issue['node'] for issue in result], result[-1]['cursor']", "title": "" }, { "docid": "20abf9fab42018b2a7d636c60c49bc43", "score": "0.5860811", "text": "def main():\n args = parse_args()\n\n jira_session = requests.Session()\n jira_session.auth = (args.user, args.password)\n jira_session.verify = args.verify_ssl\n\n print(\"searching for issues\")\n issues = search(args.url, jira_session, args.jql)\n\n dump(jira_session, issues, args.dst)", "title": "" }, { "docid": "5a4565083743ddfe30452b5698e2d78c", "score": "0.58594745", "text": "def _issues_from_jira(self, filter=None):\n\n batch_size = 100\n work_items = []\n\n for category in self.categories:\n\n n = 0\n while 1:\n\n jql = self.categories[category]\n if filter is not None:\n jql = jql + filter\n\n issue_batch = self.jira.search_issues(jql,\n startAt=n,\n maxResults=batch_size,\n expand='changelog')\n\n if issue_batch is None:\n #TODO: Fix mocking so we can get rid of this.\n # 'expand' seems to have some magic meaning in Mockito...\n issue_batch = self.jira.search_issues(jql,\n startAt=n,\n maxResults=batch_size)\n\n for issue in issue_batch:\n\n issue.category = category\n issue_history = None\n cycles = {}\n\n date_created = datetime.strptime(issue.fields.created[:10], '%Y-%m-%d')\n\n if issue.changelog is not None:\n issue_history = history_from_jira_changelog(issue.changelog, date_created, self.until_date)\n\n try:\n\n for cycle in self.cycles:\n reopened_state = None\n after_state = None\n start_state = None\n exit_state = None\n end_state = None\n include_states = None\n exclude_states = None\n\n if 'ignore' in self.cycles[cycle]:\n reopened_state = self.cycles[cycle]['ignore']\n\n if 'after' in self.cycles[cycle]:\n after_state = self.cycles[cycle]['after']\n\n if 'start' in self.cycles[cycle]:\n start_state = self.cycles[cycle]['start']\n\n if 'exit' in self.cycles[cycle]:\n exit_state = self.cycles[cycle]['exit']\n\n if 'include' in self.cycles[cycle]:\n include_states = self.cycles[cycle]['include']\n\n if 'exclude' in self.cycles[cycle]:\n exclude_states = self.cycles[cycle]['exclude']\n\n if 'end' in self.cycles[cycle]:\n end_state = self.cycles[cycle]['end']\n\n cycles[cycle] = cycle_time(issue_history,\n start_state=start_state,\n after_state=after_state,\n include_states=include_states,\n exclude_states=exclude_states,\n end_state=end_state,\n reopened_state=reopened_state)\n\n else:\n\n cycles[cycle] = cycle_time(issue_history,\n start_state=start_state,\n after_state=after_state,\n include_states=include_states,\n exclude_states=exclude_states,\n exit_state=exit_state,\n reopened_state=reopened_state)\n\n except AttributeError:\n\n pass\n\n state_transitions = []\n if issue.changelog is not None:\n for change in issue.changelog.histories:\n st = self.state_transition(change)\n state_transitions.append(st)\n\n work_items.append(WorkItem(id=issue.key,\n title=issue.fields.summary,\n state=issue.fields.status.name,\n type=issue.fields.issuetype.name,\n history=issue_history,\n state_transitions=state_transitions,\n date_created=date_created,\n cycles=cycles,\n category=category))\n\n if len(issue_batch) < batch_size:\n break\n n += batch_size\n sys.stdout.write('.')\n sys.stdout.flush()\n\n return work_items", "title": "" }, { "docid": "ef7de3e18fd8d751d582ce1628c276cd", "score": "0.58533776", "text": "def test_search_issues(self):\n cassette_name = self.cassette_name('search_issues')\n with self.recorder.use_cassette(cassette_name):\n issues = self.gh.search_issues('github3 labels:bugs')\n assert isinstance(next(issues), github3.search.IssueSearchResult)\n\n assert isinstance(issues, github3.structs.SearchIterator)", "title": "" }, { "docid": "24f959727b1bb97bbad2c127014ab3ed", "score": "0.57934344", "text": "def __get_issues_amount(self, jql: str) -> int:\n conn = self.__get_connection()\n\n return conn.search_issues(jql, maxResults=0, json_result=True)[\"total\"]", "title": "" }, { "docid": "0512fac0544a3c708e614a8a219cfd25", "score": "0.57879496", "text": "def search_gitlab_repo(wf, query):\n projects = wf.cached_data('gitlab_projects', max_age=0)\n\n # update gitlab api data\n if not wf.cached_data_fresh('gitlab_projects', max_age=3600) and not is_running('gitlab_update'):\n cmd = ['/usr/bin/python', wf.workflowfile('gitlab.py')]\n run_in_background('gitlab_update', cmd)\n wf.rerun = 0.\n\n if query and projects:\n projects = wf.filter(query, projects, key=search_for_project, min_score=20)\n\n if not projects:\n wf.add_item('No projects found', icon=ICON_WARNING)\n wf.send_feedback()\n return 0\n\n for proj in projects:\n if proj['issues_enabled']:\n subtitle = \"issue:{0: <3} star:{1: <3} fork:{2: <3}\".format(proj['open_issues_count'],\n proj['star_count'],\n proj['forks_count'])\n else:\n subtitle = \"\"\n wf.add_item(title=proj['name_with_namespace'],\n subtitle=subtitle,\n arg=proj['web_url'],\n valid=True,\n icon=None,\n uid=proj['id'])\n wf.send_feedback()", "title": "" }, { "docid": "54c732708c89f867d2996f90d46b29d2", "score": "0.57834965", "text": "def get_source_issues(self):\n self.source = self.authenticate(\n self.credentials['from']['username'],\n self.credentials['from']['password'],\n self.credentials['from']['server']\n )\n if self.source:\n for frm, to in self.projects.items():\n print('Getting worklogs for %s' % (frm,))\n\n jql = self.createFromJQL(frm)\n\n print(jql)\n\n return frm, to, self.source.search_issues(jql)\n else:\n print('Unable to connect to %s.' % (self.credentials['from']['server'],))\n\n return None", "title": "" }, { "docid": "b42100e0c644b606c5313cc5361f2be7", "score": "0.5782892", "text": "def get_issue_list_raw(self, query_parameters):\n\n return self._request(\n '/issues',\n method='GET',\n query_parameters=query_parameters)", "title": "" }, { "docid": "f92f3890af32eb122d40a29c205e4223", "score": "0.57565665", "text": "def _get_issues(self, user, owner, repo, title):\n url = \"%s/repos/%s/%s/issues\" % (self._api_url, owner, repo)\n params = {\"state\": \"open\", \"creator\": user}\n data = self.get_all_pages(url, params=params)\n matched_issues = []\n if not self._bad_response and data:\n for i in data:\n if i[\"title\"] == title:\n matched_issues.append(i)\n return matched_issues", "title": "" }, { "docid": "89e7d8992d5bd0dc06620bbcc0218b2f", "score": "0.56684595", "text": "def bugs_caught(client):\n jql = f\"{standard_jql} AND status changed from Testing to 'In Progress'\"\n return len(client.search_issues(jql))", "title": "" }, { "docid": "45e9d89fcd17035212db7b4a29d969b0", "score": "0.56381136", "text": "def filter_issue_results(data):\n query = Q()\n final_query = []\n filter_queryset = FilterIssueQueryset(data)\n\n # Based on the parameters from the form, find the appropriate filter\n # method and call it against the current queryset.\n for key in data.iterkeys():\n handler = getattr(filter_queryset, 'filter_%s' % key)\n if handler:\n query = handler(query)\n\n if query and len(query):\n final_query = models.Issue.objects.filter(query).select_related()\n return final_query", "title": "" }, { "docid": "547bb479fb8d736c65c8613ee9476ebe", "score": "0.56244355", "text": "def get_issues(self):\n \n # Get prd new feature link ids, then we'll get those issues\n# jql = '((\"FL Project\" = \"G.1.0\") and ((project = \"Titan\") or (project = \"Griffin\")) and (issuetype = \"PRD\"))'\n jql = '((\"FL Project\" = \\\"%s\\\") and (project = \\\"%s\\\") and (issuetype = \"PRD\"))' % (self.options.fl_project, self.options.project)\n self.jql = jql\n print \"Getting PRD issues\"\n print \"jql: %s\" % jql\n self.prd_issue_list = list(jira_utils.get_issues(jql)) \n \n # Get prd new feature link ids, then we'll get those issues\n nf_link_ids = []\n\n for item in self.prd_issue_list:\n for issue_link in item['fields']['issuelinks']:\n try:\n if issue_link['inwardIssue']['fields']['issuetype']['name'] == \"New Feature\":\n nf_link_ids.append(issue_link['inwardIssue']['key']) # Add New feature id to list\n except KeyError: \n continue # Some issue links don't have the field \"inwardIssue\", skip those and continue\n \n nf_link_ids_string = \",\".join(sorted(nf_link_ids))\n jql = 'key in (' + nf_link_ids_string + ')' # Like: key in (\"Titan-1234\", \"Titan-5678\")\n \n print \"Getting PRDs New Feature link issues\"\n print \"jql: %s\" % jql\n self.new_feature_issue_list = list(jira_utils.get_issues(jql))", "title": "" }, { "docid": "556f03da4cd4fc5b08c95b6b43282760", "score": "0.56068", "text": "def getJiraTickets():\r\n subtaskid = createSubTask(\"pull service desk incidents from JIRA API\", getMaintaskId())\r\n try:\r\n MyJira = Jira(**jira_api)\r\n fromDate = getLastsuccessfulExtarctionDate()\r\n\r\n # Condition JIRA API JQL to fetch the required issues\r\n Condition_string = f\"project='\" + jira_project + \"' \"\r\n\r\n if fromDate is not None:\r\n Condition_string = Condition_string + \"AND (created >'\" + fromDate.strftime(\r\n \"%Y-%m-%d\") + \"' OR Updated > '\" + fromDate.strftime(\"%Y-%m-%d\") + \"')\"\r\n\r\n JSON_List = MyJira.getIssues(condition=Condition_string)\r\n Json_DataFrame = MyJira.create_df(JSON_List)\r\n # Returning the pandas dataframe with JIRA issues\r\n updateSubTask(subtaskid, \"SUCCESS\")\r\n return Json_DataFrame\r\n except (Exception, psycopg2.Error) as error:\r\n insertErrorLog(subtaskid, error)", "title": "" }, { "docid": "5a6498e13e131ceadc42e8d924cc4c98", "score": "0.5577767", "text": "def get_all_issues() -> Iterator[jira.resources.Issue]:\n # there seems to be a bug in the jira library where it only grabs the first 50 results (even if\n # maxResults evaluates to False, as instructed to do by the docs). we'll handle the pagination\n # ourselves.\n start_at = 0\n BATCH_SIZE = 50\n while True:\n new_results = JiraClient().search_issues(\n 'project=%s' % JIRA_PROJECT_KEY,\n startAt=start_at,\n maxResults=BATCH_SIZE,\n fields='key'\n )\n if new_results:\n logger.info('got jira issues %s - %s', start_at, start_at + BATCH_SIZE)\n for r in new_results:\n yield r\n start_at += BATCH_SIZE\n else:\n break", "title": "" }, { "docid": "49c94654f1253bdb4f47951adf999384", "score": "0.5551", "text": "def passing_qe(client):\n jql = f\"{standard_jql} AND type in (Bug, Story)\"\n issues = client.search_issues(jql, expand='changelog')\n if len(issues) < 1:\n log.warning(f'No issues could be found for JQL: {jql}')\n return\n start_time = None\n end_time = None\n differences = []\n for issue in issues:\n changelog = issue.changelog\n for history in changelog.histories:\n for item in history.items:\n if item.field == 'status':\n if issue.fields.project.key not in ['FACTORY', 'BST', 'COMPOSE', 'NOS']:\n # If this is QE\n if item.toString == 'Merged' and not end_time:\n start_time = datetime.strptime(history.created[:-5], '%Y-%m-%dT%H:%M:%S.%f')\n elif (item.toString == 'Verified' or item.toString == 'In Progress') and start_time:\n end_time = datetime.strptime(history.created[:-5], '%Y-%m-%dT%H:%M:%S.%f')\n else:\n # If this is not QE\n if item.toString == 'Testing' and not end_time:\n start_time = datetime.strptime(history.created[:-5], '%Y-%m-%dT%H:%M:%S.%f')\n elif (item.toString == 'In Progress' or item.toString == 'Release Pending') and start_time:\n end_time = datetime.strptime(history.created[:-5], '%Y-%m-%dT%H:%M:%S.%f')\n if start_time and end_time:\n # Always check this as this can happen multiple times in an issue\n if start_time < end_time and end_time > start_time:\n differences.append(end_time - start_time)\n start_time = None\n end_time = None\n\n if len(differences) < 1:\n log.warning(f'No issues could be found for function passing_qe')\n return -1\n else:\n return get_average(differences)", "title": "" }, { "docid": "8ad8565d4f32a4bab234402a85f05682", "score": "0.55164003", "text": "def fetch_issues(repo_owner, repo_name):\n\n issue_list_query = f\"\"\"query {{\n repository(owner: \"{repo_owner}\", name: \"{repo_name}\") {{\n issues(last: 10) {{\n edges {{\n node {{\n body \n title\n state\n }}\n }}\n }}\n }}\n }}\"\"\"\n\n issue_list_query_by_label = f\"\"\"{{\n repository(owner: \"{repo_owner}\", name: {repo_name}\") {{\n issues(last: 10, filterBy: {{labels: \"SplunkUFVersion\"}}) {{\n edges {{\n node {{\n title\n }}\n }}\n }}\n }}\n }}\"\"\"\n\n url = 'https://api.github.com/graphql'\n response = requests.post(url, json={'query' : issue_list_query}, headers={\"Authorization\": \"Bearer {}\".format(github_key)})\n response_dict = json.loads(response.text)\n return response_dict['data']['repository']['issues']['edges']", "title": "" }, { "docid": "0415622c42f6b7280ed4f65af92b550f", "score": "0.5486984", "text": "def _indexIssue(issue, updates):\n fields = [search.TextField(name=\"summary\", value=issue.summary),\n search.TextField(name=\"description\", value=issue.description),\n search.AtomField(name=\"id\", value=str(issue.key().id_or_name())),\n search.AtomField(name=\"type\", value=issue.type),\n search.NumberField(name=\"priority\", value=issue.priority),\n search.AtomField(name=\"state\", value=issue.state),\n search.AtomField(name=\"resolution\", value=issue.resolution)]\n\n if not updates:\n issue, updates = getIssue(issue.key().id())\n\n comments = \"\"\n for update in updates:\n if update.comment:\n if comments:\n comments += \"\\r\\n<hr />\\r\\n\"\n comments += update.comment\n fields.append(search.HtmlField(name=\"comments\", value=comments))\n\n doc = search.Document(\n doc_id = str(issue.key()),\n fields = fields)\n\n index = search.Index(name=\"issue\")\n index.put(doc)", "title": "" }, { "docid": "8c2f4b8de1ab2c12f123896cf73fa648", "score": "0.5485311", "text": "def search(\r\n self, jql, validate_query=None, fields=None, expand=None,\r\n properties=None, fields_by_keys=None, start_at=None,\r\n max_results=None):\r\n query = {'jql': jql}\r\n if validate_query is not None:\r\n query['validateQuery'] = validate_query\r\n\r\n if fields is not None:\r\n query['fields'] = fields\r\n\r\n if expand is not None:\r\n query['expand'] = expand\r\n\r\n if properties is not None:\r\n query['properties'] = properties\r\n\r\n if fields_by_keys is not None:\r\n query['fieldsByKeys'] = fields_by_keys\r\n\r\n if start_at is not None:\r\n query['startAt'] = start_at\r\n\r\n if max_results is not None:\r\n query['maxResults'] = max_results\r\n\r\n return self.get('search', query=query).json()", "title": "" }, { "docid": "a4577ff927f21d2b039917b0c819e5c6", "score": "0.54259205", "text": "def search_issue_by_title(self, title, org, repo):\n query = \"{} in:Title repo:{}/{}\".format(title, org, repo)\n issues = self.gh.search_issues(query)\n\n for i in issues:\n if i.title == title:\n return i\n return None", "title": "" }, { "docid": "5193c15cc5f45ff7e4a078dee440f8f6", "score": "0.5420996", "text": "def list(self, project, state=\"open\"):\r\n return self.get_values(\"list\", project, state, filter=\"issues\",\r\n datatype=Issue)", "title": "" }, { "docid": "27a8b07ae69eecc7c3bb421615b41048", "score": "0.54194176", "text": "def list_issues(message, *groups):\n param = groups[0]\n expanded = groups[1]\n params = {}\n if param:\n params['state'] = param\n resp = Issue().list(params=params)\n if len(resp) == 0:\n message.reply('0 issues found.')\n return\n if expanded:\n for i in resp:\n message.reply(f\"Issue: {i['html_url']}\\nTitle: {i['title']}\\nDescription: {i['body']}\")\n else:\n _ = [message.reply(i['html_url']) for i in resp]\n #print(f\"Got a message {message.body} with groups '{groups}'\")", "title": "" }, { "docid": "5e3ded80f618d1c23e1a3bc799d23f0a", "score": "0.53975636", "text": "def search(self):\n params = {}\n params = dict(params.items() + self._time_frame.items())\n\n if self._includefields:\n params['include_fields'] = list(self._includefields)\n if self._bug_numbers:\n bugs = []\n for bug in self._bug_numbers:\n result = self._bugsy.request('bug/%s' % bug,\n params=params)\n bugs.append(Bug(self._bugsy, **result['bugs'][0]))\n\n return bugs\n else:\n if self._keywords:\n params['keywords'] = list(self._keywords)\n if self._assigned:\n params['assigned_to'] = list(self._assigned)\n if self._summaries:\n params['short_desc_type'] = 'allwordssubstr'\n params['short_desc'] = list(self._summaries)\n if self._whiteboard:\n params['short_desc_type'] = 'allwordssubstr'\n params['whiteboard'] = list(self._whiteboard)\n if self._change_history['fields']:\n params['chfield'] = self._change_history['fields']\n if self._change_history.get('value', None):\n params['chfieldvalue'] = self._change_history['value']\n\n results = self._bugsy.request('bug', params=params)\n error = results.get(\"error\", None)\n if error:\n raise SearchException(results['message'])\n return [Bug(self._bugsy, **bug) for bug in results['bugs']]", "title": "" }, { "docid": "b6404660ce35e67e590c5e53eef2be5a", "score": "0.53950787", "text": "def get_all_issues_for_version(auth_jira, release_version):\n\n jql_query = 'project={} and fixVersion={} ORDER BY issueKey ASC'\\\n .format(str(CXX_PROJ_ID), release_version)\n return auth_jira.search_issues(jql_query, maxResults=0)", "title": "" }, { "docid": "b22bbd25587a7b1a8838b965cf29b3d1", "score": "0.5372189", "text": "def get_project_issues(self, componentKeys, branch, selects=None, **kwargs):\n if selects:\n if isinstance(selects, str):\n selects = selects.split(',')\n\n params = {\n 'additionalFields': '_all',\n 'componentKeys': componentKeys,\n 'branch': branch\n }\n if kwargs:\n self.sonarqube.copy_dict(params, kwargs)\n\n page_num = 1\n page_size = 1\n total = 2\n\n while page_num * page_size < total:\n resp = self.sonarqube._make_call('get', API_ISSUES_SEARCH, **params)\n response = resp.json()\n\n page_num = response['paging']['pageIndex']\n page_size = response['paging']['pageSize']\n total = response['paging']['total']\n\n params['p'] = page_num + 1\n\n for issue in response['issues']:\n # ้€‰ๆ‹ฉ้œ€่ฆๆ•ฐๆฎ\n for key in list(issue.keys()):\n if selects and key not in selects:\n del issue[key]\n yield issue\n\n if page_num >= 100:\n break", "title": "" }, { "docid": "531c789a98f255f667f245bc69c71317", "score": "0.5364529", "text": "def request_issues_by_sprint(cfg):\n issues = []\n\n sprint_issues_url = cjm.request.make_cj_agile_url(\n cfg, \"sprint/{0:d}/issue\".format(cfg[\"sprint\"][\"id\"]))\n\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_request(\n cfg, sprint_issues_url,\n {\"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for issue in response_json[\"issues\"]:\n issues.append(cjm.issue.extract_issue_data(cfg, issue))\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return issues", "title": "" }, { "docid": "13eb4d2a940b2bb1a2d899d52fc6d413", "score": "0.53419036", "text": "def get_issues(project=\"ipython/ipython/\", state=\"open\"):\n f = urlopen(\"http://github.com/api/v2/json/issues/list/%s%s\" % (project,\n state))\n return json.load(f)['issues']", "title": "" }, { "docid": "1e977430fcffa998317d59d212c709bc", "score": "0.5335836", "text": "def search_matching_jira_tickets(ES, search_phrase:str) -> Iterator[dict]:\n for issue in jira_issue_db.search_jira_issues(ES, search_phrase, max_count=30):\n yield {\n \"text\": \"%s: %s\" % (issue.key, issue.summary),\n \"value\": issue.key,\n }", "title": "" }, { "docid": "26b6e50a549cc84abe2c5efa364206f4", "score": "0.5326994", "text": "def test_get_issue_by_id(self):\n i = self.build_issue_object()\n issue_list = i.list_all()\n assert issue_list is not None\n\n target_id = issue_list[0][\"id\"]\n issues = i.list_all(id=target_id)\n assert issues is not None\n\n for issue in issues:\n assert issue[\"id\"] == target_id", "title": "" }, { "docid": "620e7a7a64e85a39600953ffd87c50f3", "score": "0.53086376", "text": "def get_issue_count(self, query_string):\n issues = self.client.search_issues(query_string)\n return len(issues)", "title": "" }, { "docid": "e4ce4ccc37ef478dabb35b8fd17ab39a", "score": "0.52998155", "text": "def request_issues_by_comment(cfg, comment):\n issues = []\n\n sprint_issues_url = cjm.request.make_cj_url(cfg, \"search\")\n\n jql = 'project = \"{0:s}\" AND comment ~ \"{1:s}\"'.format(cfg[\"project\"][\"key\"], comment)\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_post_request(\n cfg, sprint_issues_url,\n json={\"jql\": jql, \"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for issue in response_json[\"issues\"]:\n issues.append(cjm.issue.extract_issue_data(cfg, issue))\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return issues", "title": "" }, { "docid": "449fe2f6e83ee588c2c3f2dda7ee4a44", "score": "0.52984947", "text": "def getIssues(jira, board_id, sprint_id):\n r_json = jira._get_json('board/%s/sprint/%s/issue?maxResults=1000' % (board_id, sprint_id),\n base=jira.AGILE_BASE_URL)\n issues = [Issue(jira._options, jira._session, raw_issues_json) for raw_issues_json in\n r_json['issues']]\n return issues", "title": "" }, { "docid": "5a3e4cfe4a1cc042857ef6752d229506", "score": "0.5288987", "text": "def ScienceDirectSearchV2(self, date, issue):\n\n try:\n results = []\n\n request = Request('search', date=date, issue=issue, start_idx=0)\n\n data = self.APIRequest(request)\n totalResults = data['resultsFound']\n\n if totalResults > 0:\n for pagenum in range(math.ceil(totalResults / 100)):\n prefix = 'Progress [Year:{}, Issue:{}]:'.format(date, issue)\n progress.printProgressBar(pagenum + 1, math.ceil(totalResults / 100),\n prefix=prefix, suffix='Complete', length=30)\n start_idx = pagenum * 100\n\n request = Request('search', date=date, issue=issue, start_idx=start_idx)\n results.append(self.APIRequest(request))\n\n return results\n else:\n return 0\n except Exception as error:\n print(colored(error, 'red'))\n raise Exception(error)", "title": "" }, { "docid": "59e3a8e991625a9ff4a378a49bdccc90", "score": "0.5287545", "text": "def update_index_issues(self, gh_token, config):\n # Get the set of indexed ids:\n # ------\n indexed_issues = set()\n p = QueryParser(\"kind\", schema=self.ix.schema)\n q = p.parse(\"issue\")\n with self.ix.searcher() as s:\n results = s.search(q,limit=None)\n for result in results:\n indexed_issues.add(result['id'])\n\n\n # Get the set of remote ids:\n # ------\n # Start with api object\n g = Github(gh_token)\n\n # Now index all issue threads in the user-specified repos\n\n # Start by collecting all the things\n remote_issues = set()\n full_items = {}\n\n # Iterate over each repo \n list_of_repos = config['REPOSITORIES']\n for k, r in enumerate(list_of_repos):\n\n if '/' not in r:\n err = \"Error: specify org/reponame or user/reponame in list of repos\"\n logging.error(err)\n raise Exception(err)\n\n this_org, this_repo = re.split('/',r)\n try:\n org = g.get_organization(this_org)\n repo = org.get_repo(this_repo)\n except:\n err = \"Error: could not gain access to repository %s\"%(r)\n logging.error(err)\n continue\n\n # Iterate over each issue thread\n open_issues = repo.get_issues(state='open')\n closed_issues = repo.get_issues(state='closed')\n\n for j, issue in enumerate(open_issues):\n # For each issue/comment URL,\n # grab the key and store the \n # corresponding issue object\n key = issue.html_url\n value = issue\n remote_issues.add(key)\n full_items[key] = value\n\n for j, issue in enumerate(closed_issues):\n key = issue.html_url\n value = issue\n remote_issues.add(key)\n full_items[key] = value\n\n # Stop early if testing\n if config['TESTING'] is True and k>=1:\n break\n\n\n writer = self.ix.writer()\n count = 0\n\n # Drop issues in indexed_issues\n for drop_issue in indexed_issues:\n writer.delete_by_term('id',drop_issue)\n\n\n # Add any issue in remote_issues\n for add_issue in remote_issues:\n item = full_items[add_issue]\n self.add_issue(writer, item, gh_token, config, update=False)\n count += 1\n\n\n writer.commit()\n\n msg = \"Done, updated %d Github issues in the index\" % count\n logging.info(msg)", "title": "" }, { "docid": "3ca6740cf27ff63ae8e19fb286def7c4", "score": "0.52828497", "text": "def indexIssue(issue, updates = None):\n deferred.defer(_indexIssue, issue, updates, _queue=\"issuesync\")", "title": "" }, { "docid": "46d52fe3b648c6f12df4aa51e0a8155a", "score": "0.527279", "text": "def fetch_issues(self, org, repo, output):\n client = graphql.GraphQLClient()\n\n num_issues_per_page = 100\n query_template = \"\"\"{{\nrepository(owner: \"{org}\", name: \"{repo}\") {{\n issues(first:{num_issues_per_page} {issues_cursor}) {{\n totalCount\n pageInfo {{\n endCursor\n hasNextPage\n }}\n edges{{\n node {{\n author {{\n __typename\n ... on User {{\n login\n }}\n\n ... on Bot{{\n login\n }}\n }}\n title\n body\n comments(first:20, ){{\n totalCount\n edges {{\n node {{\n author {{\n __typename\n ... on User {{\n login\n }}\n\n ... on Bot{{\n login\n }}\n \t\t\t}}\n body\n createdAt\n }}\n }}\n }}\n }}\n }}\n }}\n}}\n}}\n\"\"\"\n\n\n shard = 0\n num_pages = None\n if not os.path.exists(output):\n os.makedirs(output)\n\n total_issues = None\n has_next_issues_page = True\n # TODO(jlewi): We should persist the cursors to disk so we can resume\n # after errors\n issues_cursor = None\n while has_next_issues_page:\n issues_cursor_text = \"\"\n if issues_cursor:\n issues_cursor_text = \"after:\\\"{0}\\\"\".format(issues_cursor)\n query = query_template.format(org=org, repo=repo,\n num_issues_per_page=num_issues_per_page,\n issues_cursor=issues_cursor_text)\n results = client.run_query(query)\n\n if results.get(\"errors\"):\n logging.error(\"There was a problem issuing the query; errors:\\n%s\",\n \"\\n\".join(results.get(\"errors\")))\n return\n\n if not total_issues:\n total_issues = results[\"data\"][\"repository\"][\"issues\"][\"totalCount\"]\n num_pages = int(np.ceil(total_issues/float(num_issues_per_page)))\n logging.info(\"%s/%s has a total of %s issues\", org, repo, total_issues)\n\n shard_file = os.path.join(\n output, \"issues-{0}-{1}-{2:03d}-of-{3:03d}.json\".format(org, repo, shard,\n num_pages))\n\n issues = process_issue_results(results)\n with open(shard_file, \"w\") as hf:\n for i in issues:\n json.dump(i, hf)\n hf.write(\"\\n\")\n logging.info(\"Wrote shard %s to %s\", shard, shard_file)\n shard += 1\n\n page_info = results[\"data\"][\"repository\"][\"issues\"][\"pageInfo\"]\n issues_cursor = page_info[\"endCursor\"]\n has_next_issues_page = page_info[\"hasNextPage\"]", "title": "" }, { "docid": "833ec764ad8d832514c23442fca75b90", "score": "0.5271666", "text": "def do_list(client, args):\n\trepository = get_repository_name(args.repository)\n\tstatus = args.status or 'open'\n\tissues = client.issues.list(repository, state=status)\n\tif not issues:\n\t\tprint '%s has no %s issues' % (repository, status)\n\telse:\n\t\tprint '%s has the following %s issues' % (repository, status)\n\t\tprint 'Issue# - Title'\n\tfor issue in issues:\n\t\tprint '%s - %s' % (issue.number, issue.title)", "title": "" }, { "docid": "1c6212b29cc69b49106a24ee9aca3c1b", "score": "0.5266384", "text": "def query_issue(self, issue_id):\n issue = self._jira.issue(issue_id)\n return issue, self._comment_and_attachment_process(issue=issue)", "title": "" }, { "docid": "ab9788ba3d182046290a7e313b69e116", "score": "0.5262339", "text": "def search_issues_and_features(request):\n\n # Create an instance of the search query \n q = request.GET['q']\n\n # Query the database - filter issues and features based on what's searched on the site \n search_issue = Item.objects.filter(Issue__icontains=request.GET['q']).values('Issue', 'done')\n search_feature = Feature.objects.filter(name__icontains=request.GET['q']).values('name', 'done')\n\n return render(request, 'search_results.html', {\n \"search_issue\": search_issue, \n \"search_feature\": search_feature, \n \"q\": q})", "title": "" }, { "docid": "17d45a12e89bd1250abcc921e6ea5efc", "score": "0.5250662", "text": "def search(query, username, startTime=None, endTime=None, channel=None):", "title": "" }, { "docid": "1d2cb9e6b244c74766e62ca39e2e8014", "score": "0.5249345", "text": "def test_get_issue_list(self):\n i = self.build_issue_object()\n issue_list = i.list_all()\n assert issue_list is not None", "title": "" }, { "docid": "8d830a643d7724b32a20cb0088644873", "score": "0.52437", "text": "def get_issues(repos_name):\n r = requests.get(f'{source_url}{repos_name}/issues?state=all')\n data = r.json()\n return jsonify(form_response(data, get_issues.__name__))", "title": "" }, { "docid": "bf4fbe54894c13c557d552a910c19524", "score": "0.52188545", "text": "def mantis_get_all_issues_command(client, args):\n if args is not None:\n params = args\n resp = client.get_issues(params=params).get('issues')\n issues = [create_output_result(issue) for issue in resp]\n readable_output = tableToMarkdown(\"Mantis Issue Details\", issues, headers=TABLE_HEADERS)\n results = CommandResults(\n readable_output=readable_output,\n outputs_prefix=\"Mantis.issue\",\n outputs_key_field=TABLE_HEADERS,\n outputs=issues\n )\n return results", "title": "" }, { "docid": "e29d1416babbaa489295d8cf13078fc8", "score": "0.52005184", "text": "def workaround_exact_search_match(self, jql, client, data):\r\n jql = jql + ' AND status not in (Obsolete, Closed)'\r\n\r\n exist_list = self.searchIssues(jql, client, field=\"key,summary\")\r\n new_list = []\r\n for issue_key in exist_list:\r\n exist_summary = issue_key.fields.summary\r\n if data[\"summary\"] == exist_summary:\r\n new_list.append(issue_key)\r\n return new_list", "title": "" }, { "docid": "c713503f4a91e9663810084580a0b0ef", "score": "0.5198353", "text": "def search_tickets(query):\n\n ids = []\n\n results = api_request('GET', host + \"tickets?query=\" + quote(query))\n if results.get('message', None):\n return []\n done = False\n while not done:\n for item in results['items']:\n ids.append(int(item['id']))\n if 'next_page' in results:\n results = api_request('GET', results['next_page'])\n else:\n done = True\n return sorted(ids)", "title": "" }, { "docid": "5371a06735d6a1a8522f5dcf157cc68f", "score": "0.5195031", "text": "def api_search(\n query: str, max_num_results: int, session=scoped_session(sessionmaker(bind=engine))\n):\n result = search(query, int(max_num_results))\n result_problems = (\n session.query(Problem).filter(Problem.problem_id.in_(tuple(result))).all()\n )\n result_problems = list(map(object_as_dict, result_problems))\n if google.authorized:\n resp = google.get(\"/oauth2/v1/userinfo\")\n assert resp.ok, resp.text\n return render_template(\"default_logged_in.html\", email=resp.json()[\"email\"], problems=result_problems)\n else:\n return render_template(\"default.html\", problems=result_problems)", "title": "" }, { "docid": "c067a21571d94c4afaba179a5e620e8d", "score": "0.5194793", "text": "def list_all_issues():\n conditions = {}\n result = query_issues_filter_by(**conditions).all()\n return jsonify([create_issue_read_dict(issue, last_read) for (issue,last_read) in result])", "title": "" }, { "docid": "6ebd656f49be963e15011a64fb3ffc53", "score": "0.51888585", "text": "def test_issue_list_resolved(self):\n status = \"resolved\"\n i = self.build_issue_object()\n result = i.list_all(status=status)\n assert result is not None\n for issue in result:\n assert issue[\"status\"] == \"resolved\"", "title": "" }, { "docid": "630babd3a4086470ce8d174cfb98d611", "score": "0.518504", "text": "def search(self, problem):\n abstract", "title": "" }, { "docid": "67315b440f7c05a5ceca19348b33d47b", "score": "0.5172828", "text": "def test_issues_list(self):\n resp = self.client.get('/api/issues/', HTTP_AUTHORIZATION=f'Bearer {self.token}')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "38af04f021849e2982e3857b7e496619", "score": "0.51677966", "text": "def get_all_issues(self):\n\n issues = self.getprojectissues(self.p_id)\n if issues:\n return issues\n else:\n raise TypeError(\"No Project ID found!\")", "title": "" }, { "docid": "e6d98549d6ab28f0a0aaa1364af0f32a", "score": "0.5157461", "text": "def search(self, start=(date.today() - timedelta(weeks=1)), end=date.today(),\n types=ALL, clients=ALL, tasks=ALL, categories=ALL,\n limit=1000, date_type='created', description=None, jira_id=None, user='apiuser', dry=False):\n\n data = {\n 'api_key': self.qik_api_key,\n 'date_range_from': parse_date(start).strftime('%Y-%m-%d'),\n 'date_range_to': parse_date(end).strftime('%Y-%m-%d'),\n 'rate': validate_field_collection(self._session, Type, types),\n 'client': validate_field_collection(self._session, Client, clients),\n 'task': validate_field_collection(self._session, Task, tasks),\n 'categories': validate_field_collection(self._session, Category, categories),\n 'users': user,\n 'limit': validate_limit(limit),\n 'date_type': validate_date_type(date_type),\n }\n\n if dry:\n return data\n response = requests.get(urljoin(self.qik_api_url, 'entries', 'search.json'), params=data)\n return response.content", "title": "" }, { "docid": "d15e7f48308a5a50a9e49688737c8430", "score": "0.51475686", "text": "def bing_query(query, issues = None, top=6):\n\n key = \"nvlGQnxPcpay6X4rWtEfvJJrX5aQXQL+/+p/7jZjgs0=\"\n result = {}\n for issue in issues:\n query_formatted = \"%27\" + query.replace(\" \",\"%27\")\n issue_formatted = \"%27\" + issue.replace(\" \",\"%27\")\n query_formatted += issue_formatted + \"%27\"\n\n url = 'https://api.datamarket.azure.com/Data.ashx/Bing/Search/Web?Query='\n url += query_formatted + \"&$top=\" + str(top) + '&$format=json'\n\n response = requests.get(url, auth=(key,key))\n response_json = response.json()\n\n result[issue] = response_json.get('d', 'No response').get('results', 'No response')\n\n return result", "title": "" }, { "docid": "b37e3ac5fbe8e05d94017ce4f92a32fa", "score": "0.51475024", "text": "def get(self, owner, repo, milestone, **kwargs):\n url = '%s/%s/%s/issues' % (self.base_url, owner, repo)\n kwargs['milestone'] = milestone.number\n query_string = urllib.urlencode(kwargs)\n url = url + '?' + query_string\n result = []\n while url:\n response = urllib2.urlopen(url)\n url = self.get_next_link(response)\n raw = response.read()\n issues = json.loads(raw)\n result.extend([Issue(owner, repo, json_data)\n for json_data in issues])\n return result", "title": "" }, { "docid": "8e09ba3abd42b44565582d6d6888c5de", "score": "0.5146769", "text": "def get_issue(issue_key):\n\n jira = JIRA(server='http://uptomuch.atlassian.net',\n basic_auth=(user.USERNAME, user.PASSWORD))\n issue = jira.issue(issue_key)\n return issue", "title": "" }, { "docid": "e944725ddfe4eaf18dcab1ba3533e838", "score": "0.51458645", "text": "def update_all_jira_issues():\n logger.info(\"updating all jira issues\")\n count = 0\n for issue in jira_issue_aservice.get_all_issues():\n count += 1\n update_jira_issue.delay(issue.key, do_invalidate_cache=False)\n logger.info(\"queued %s jira issues\", count)", "title": "" }, { "docid": "2ce36650783508de5131ca45f57f401c", "score": "0.5140978", "text": "def search():\n query = request.form.get(\"query\")\n employees = list(mongo.db.employees.find({\"$text\": {\"$search\": query}}))\n return render_template(\"employees.html\", employees=employees)", "title": "" }, { "docid": "506423b93ac63df5b02eda042b37154a", "score": "0.5136829", "text": "def process_issue_results(data):\n edges = data.get(\"data\").get(\"repository\").get(\"issues\").get(\"edges\")\n\n issues = []\n for e in edges:\n issues.append(e[\"node\"])\n\n return issues", "title": "" }, { "docid": "30054e552556363c5ab718c341e20216", "score": "0.5135901", "text": "def search(self, query):", "title": "" }, { "docid": "ace76f8a00b97fcea9b959bdd76b3464", "score": "0.5112975", "text": "def get_issue_by_ref(self, pid, issue_ref):\n self.autologin()\n r = super().get(self.api + '/issues/by_ref?ref=%d\\&project=%d' %\n (issue_ref, pid))\n return TaigaIssue(data=r.json())", "title": "" }, { "docid": "313ea063f92af22e3300342c2f817bef", "score": "0.5109189", "text": "def query(self, nrql, params={}, max_retries=MAX_RETRIES):\n parsed_nrql = self.__parse_nrql(nrql, params)\n count_retries = 0\n status_code = 0\n while True:\n try:\n count_retries += 1\n response = requests.get(\n self.__url, headers=self.__headers, params={'nrql': parsed_nrql})\n status_code = response.status_code\n if status_code == 200:\n results = response.json()\n break\n else:\n self.__logger(\n 'warning: got a {} response fetching {} ({}/{})',\n status_code, self.__url, count_retries, max_retries, stop=False)\n except requests.RequestException:\n if count_retries >= max_retries:\n break\n finally:\n if count_retries >= max_retries and status_code != 200:\n self.__logger(\n 'warning: gave up fetching {} after {} attempts',\n self.__url, max_retries, stop=False)\n results = []\n break\n\n return results", "title": "" }, { "docid": "b07bfcac8cfe9a4dbe39dcf3e13446a6", "score": "0.5096993", "text": "def search(self, query):\n raise NotImplementedError()", "title": "" }, { "docid": "489160641112530d91437c4284176ba8", "score": "0.5082217", "text": "def get_sonar_jira_issues():\n issue_matrix = pd.DataFrame([], columns=[\"Sonarqube_id\", \"Jira_id\", \"comments\"])\n\n try:\n sonar_issues = []\n projects_list = sonar_projects.split(\",\")\n issue_type_list = sonar_issue_type.split(\",\")\n for sonar_project in projects_list:\n for _issue_type in issue_type_list:\n response = requests.get(\n sonar_auth_url\n + \"/api/issues/search?additionalFields=comments&types=\"\n + sonar_issue_type\n + \"&projects=\"\n + sonar_project\n + \"&branch=\"\n + sonar_branch\n + \"&statuses=OPEN,REOPENED,CONFIRMED\"\n )\n data_json = response.json()\n sonar_issues = sonar_issues + data_json[\"issues\"]\n\n except Exception as e:\n print(e)\n\n for issue in sonar_issues:\n existing_jira_id = get_jira_id_in_comments(issue[\"comments\"])\n if existing_jira_id:\n issue_matrix.loc[-1] = [issue[\"key\"], existing_jira_id, \"\"]\n else:\n Jira_id = create_jira_issue(issue)\n comment = \"\"\n if Jira_id:\n response = requests.post(\n sonar_auth_url\n + \"/api/issues/add_comment?issue=\"\n + issue[\"key\"]\n + \"&text=\"\n + jira_base_url\n + \"/browse/\"\n + Jira_id\n )\n if not response.ok:\n comment = \"Unable to add Jira URL to issue in Sonarqube\"\n issue_matrix.loc[-1] = [issue[\"key\"], Jira_id, comment]\n else:\n issue_matrix.loc[-1] = [\n issue[\"key\"],\n \"Jira id was not created due to earlier exception\",\n comment,\n ]\n issue_matrix.index = issue_matrix.index + 1\n issue_matrix = issue_matrix.sort_index()\n\n return issue_matrix", "title": "" }, { "docid": "c1442c810a8efb1e97cc995e8d374035", "score": "0.50741833", "text": "def execute_query(cls, query):\n print(query)\n connection = cls.get_connection()\n response = []\n try:\n results = connection.search(query, **{\n 'fl': fl,\n 'fq': fq,\n 'rows': rows\n })\n print('try in solr request')\n except:\n print('Exception in solr request')\n else:\n docs = pd.DataFrame(results.docs)\n print(\"Number of hits: {0}\".format(len(results)))\n for i in results:\n pprint.pprint(i)\n # response.add(i)\n return results", "title": "" }, { "docid": "52f578092f2bff1e0fd844e7a78e9139", "score": "0.5066396", "text": "def get_epics(url=None, headers=None, project_id=None):\r\n data = {\"jql\": \"project = {} AND type = Epic\".format(project_id),\r\n \"fields\": [\"key\", \"customfield_10004\"]}\r\n list_rep = []\r\n response = JiraIssue.search_issues(url=url, headers=headers, search_request=data)\r\n issues = response.json()[\"issues\"]\r\n for item in issues:\r\n list_rep.append((item[\"key\"], item[\"fields\"][\"customfield_10004\"]))\r\n return list_rep", "title": "" }, { "docid": "5042843dc073b3b79b61083eb4cfd649", "score": "0.506209", "text": "def aqlSearch(self, aql_query):\n \n self.logger.info('%s.aqlSearch starts', __name__)\n try:\n response = self.arielClient.create_search(aql_query)\n response_json = json.loads(response.read().decode('utf-8'))\n self.logger.info(response_json)\n search_id = response_json['search_id']\n response = self.arielClient.get_search(search_id)\n\n error = False\n while (response_json['status'] != 'COMPLETED') and not error:\n if (response_json['status'] == 'EXECUTE') | \\\n (response_json['status'] == 'SORTING') | \\\n (response_json['status'] == 'WAIT'):\n response = self.arielClient.get_search(search_id)\n response_json = json.loads(response.read().decode('utf-8'))\n else:\n error = True\n\n response = self.arielClient.get_search_results(\n search_id, 'application/json')\n \n body = response.read().decode('utf-8')\n body_json = json.loads(body)\n\n return body_json\n #looks like:\n #{'events': [{'field1': 'field1 value',\n # 'field2': 'field2 value'},\n # {'field1': 'fied1 value',\n # 'field2': 'field2 value'}\n # ]}\n except Exception as e:\n self.logger.error('%s.aqlSearch failed', __name__, exc_info=True)\n raise", "title": "" }, { "docid": "688653649142bca682db42ff5592ecdb", "score": "0.50615454", "text": "def search(query=None, **kwargs):\n\n repos = get_repos(org=kwargs[\"org\"], query=query)\n if query is not None:\n click.secho(\"Displaying {} Results For {}\".format(len(repos), query))\n else:\n click.secho(\"Displaying {} Results\".format(len(repos)))\n try:\n pl = ViewerPlugin(viewer=\"search\")\n pl.visualize(repos=repos)\n except NoPluginError:\n repo_table(repos)", "title": "" }, { "docid": "6d1c6f1f5460417e6618f061857d1b4a", "score": "0.5053347", "text": "def _find_issue(self, message, issue_id):\n try:\n issue = self.client.get(issue_id).json()[\"fields\"]\n except KeyError:\n return\n\n assignee = issue.get(\"assignee\")\n if assignee:\n assignee = assignee.get(\"displayName\")\n\n msg = \"%s: %s [Status: %s, Priority: %s, Assignee: %s] %s\"\n message.dispatch(msg % (\n issue_id,\n issue[\"summary\"],\n issue[\"status\"][\"name\"],\n issue[\"priority\"][\"name\"],\n assignee,\n \"%s/jira/browse/%s\" % (self.client.domain, issue_id)\n ))", "title": "" }, { "docid": "e849ef03014ad96c7e3f4e5c74a90ca6", "score": "0.5051967", "text": "def list_by_label(self, project, label):\r\n return self.get_values(\"list\", project, \"label\", label,\r\n filter=\"issues\", datatype=Issue)", "title": "" }, { "docid": "8f9f05316abfb368a97c0e1e01bd48ea", "score": "0.5046602", "text": "def count_issue_raw(self, query_parameters):\n\n return self._request(\n '/issues/count',\n method='GET',\n query_parameters=query_parameters)", "title": "" }, { "docid": "7750afc5bf7f82b0e30c3811f7d6dcff", "score": "0.50406605", "text": "def open_issues(self):\n url = (\n SEARCH_END_POINT\n + \"issues?q=state%3Aopen+repo:\"\n + self.repo_name\n + \"+type%3Aissues\"\n )\n r = requests.get(url, headers=self.headers)\n if r.ok:\n return r.json()[\"total_count\"]\n else:\n return None", "title": "" }, { "docid": "be42d3745ceb99c64e0a3fd7d2bd78fb", "score": "0.5039097", "text": "def get_jira_issue(jira_issue_id):\n return ErrataConnector()._get(f\"/jira_issues/{jira_issue_id}.json\")", "title": "" }, { "docid": "38b961af0bebe75997f17b8462c149cd", "score": "0.5037171", "text": "def search(q: str = Query(..., description=q_descr), # noqa: D103\n keyed: Optional[bool] = Query(False, description=keyed_descr),\n incl: Optional[str] = Query(None, description=incl_descr),\n excl: Optional[str] = Query(None, description=excl_descr)):\n try:\n resp = query_handler.search(html.unescape(q), keyed=keyed,\n incl=incl, excl=excl)\n except InvalidParameterException as e:\n raise HTTPException(status_code=422, detail=str(e))\n\n return resp", "title": "" }, { "docid": "079c4e9281087723a52ff41115e3d813", "score": "0.50361526", "text": "def get_issues(self, page=1):\n \n issues = Issue.objects.filter(open=True).order_by('pk')\n return self.get_paginated(issues, page, 30)", "title": "" }, { "docid": "48839b51e7831bfe2652e54ae52679ac", "score": "0.5031393", "text": "def get_defects_list(self, tc_key):\n try:\n issue_links = []\n bug_jql_str = \"project='%s' AND issue in linkedIssues('%s') AND type=bug AND status!=Closed\" % (self.project, tc_key, )\n # get statuses\n statuses = [x['name'] for x in self._get_all_statuses()]\n if 'Verified' in statuses:\n bug_jql_str = \"project='%s' AND issue in linkedIssues('%s') AND type=bug AND status!=Closed AND status!=Verified\" % (self.project, tc_key, )\n bugs = self.jira.search_issues(bug_jql_str)\n issue_links += [self.get_issue_key(bug) for bug in bugs if self.get_issue_key(bug) not in issue_links]\n self.class_logger.debug(\"Get list of issues with JQL: '%s'\" % bug_jql_str)\n return list(set(issue_links))\n except JIRAError as err:\n self.class_logger.warning(\"JIRAError: %s\" % (err, ))\n except Exception as err:\n self.class_logger.warning(\"Can\\'t get list of issues for. Error: %s\" % (err, ))\n return None", "title": "" }, { "docid": "82700b789fe0a6c1f8b11d0a63d1ff80", "score": "0.50255024", "text": "def deferred_or_declined(client):\n jql = f\"{standard_jql} AND resolution = Deferred\"\n deferred_issues = len(client.search_issues(jql))\n if deferred_issues == 0:\n log.warning(f'No deferred issues could be found for JQL: {jql}')\n jql = f\"{standard_jql} AND resolution = \\\"Won't Fix\\\"\"\n declined_issues = len(client.search_issues(jql))\n if declined_issues == 0:\n log.warning(f'No declined issues could be found for JQL: {jql}')\n return deferred_issues, declined_issues", "title": "" }, { "docid": "8144aec59cdb10c4783b966c5841473e", "score": "0.502453", "text": "def do_query(issues, config_file=None, logger=None, context=None):\n with open(config_file, \"r\") as f:\n config = yaml.load(f)\n if \"os_type\" in config:\n if config[\"os_type\"] == \"linux\":\n os_type = \"sys-i386-64\"\n else:\n os_type= \"sys-i386-snow-leopard\"\n else:\n os_type = \"sys-i386-64\"\n if \"defoe_path\" in config :\n defoe_path= config[\"defoe_path\"]\n else:\n defoe_path = \"./\"\n\n preprocess_type = query_utils.extract_preprocess_word_type(config)\n if \"data\" in config:\n data_file = query_utils.extract_data_file(config,\n os.path.dirname(config_file))\n else:\n data_file = None\n\n if \"start_year\" in config:\n start_year = int(config[\"start_year\"])\n else:\n start_year = None\n\n if \"start_year\" in config:\n end_year = int(config[\"end_year\"])\n else:\n end_year = None\n \n if \"num_target\" in config:\n num_target=config[\"num_target\"]\n else:\n num_target= None\n\n if \"target_filter\" in config:\n target_filter=config[\"target_filter\"]\n else:\n target_filter = \"or\"\n\n keysentences = []\n with open(data_file, 'r') as f:\n for keysentence in list(f):\n k_split = keysentence.split()\n sentence_word = [query_utils.preprocess_word(\n word, preprocess_type) for word in k_split]\n sentence_norm = ''\n for word in sentence_word:\n if sentence_norm == '':\n sentence_norm = word\n else:\n sentence_norm += \" \" + word\n keysentences.append(sentence_norm)\n # [(year, article_string), ...]\n \n if num_target: \n target_sentences = keysentences[0:num_target]\n else\n target_sentences= None\n\n keysentences = keysentences[lexicon_start:]\n if start_year and end_year:\n clean_articles = issues.flatMap(\n lambda issue: [(issue.date.year, issue, article, clean_article_as_string(\n article, defoe_path, os_type), issue.filename, article.quality) for article in issue.articles if int(issue.date.year)>= start_year and int(issue.date.year)<= end_year])\n\n elif start_year:\n clean_articles = issues.flatMap(\n lambda issue: [(issue.date.year, clean_article_as_string(\n article, defoe_path, os_type), issue.filename, article.quality) for article in issue.articles if int(issue.date.year)>= start_year])\n elif end_year:\n clean_articles = issues.flatMap(\n lambda issue: [(issue.date.year, clean_article_as_string(\n article, defoe_path, os_type), issue.filename, article.quality) for article in issue.articles if int(issue.date.year)<= end_year])\n else:\n clean_articles = issues.flatMap(\n lambda issue: [(issue.date.year, clean_article_as_string(\n article, defoe_path, os_type), issue.filename, article.quality) for article in issue.articles])\n\n \n # [(year, preprocess_article_string), ...]\n t_articles = clean_articles.flatMap(\n lambda cl_article: [(cl_article[0], \n preprocess_clean_article(cl_article[1], preprocess_type), cl_article[2], cl_article[3])]) \n if target_sentences:\n if target_filter == \"or\":\n target_articles = t_articles.filter(\n lambda year_article: any(\n target_s in year_article[1] for target_s in target_sentences))\n\n else:\n target_articles = t_articles\n target_articles = reduce(lambda r, target_s: r.filter(lambda year_page: target_s in year_article[1]), target_sentences, target_articles)\n else:\n target_articles = t_articles\n\n\n # [(year, clean_article_string, issue.filename, article.quality)\n filter_articles = target_articles.filter(\n lambda year_article: any(\n keysentence in year_article[1] for keysentence in keysentences))\n\n matching_idx = filter_articles.map(\n lambda year_article_file_ocr: (\n (year_article_file_ocr[0],\n year_article_file_ocr[1],\n year_article_file_ocr[2],\n get_text_keysentence_idx(year_article_file_orc[1],\n keysentences))\n year_article_file_ocr[3])\n )\n )\n\n # [(year, [(filename, word, [concordance, ...], ocr), ...])]\n concordance_words = matching_idx.flatMap(\n lambda year_article_file_matches_ocr: [\n (year_article_file_matches_ocr[0],\n (year_article_file_matches_ocr[2],\n word_idx[0],\n get_concordance_string(year_article_file_matches_ocr[1], \n word_idx[0], \n word_idx[1], \n window)\n year_article_file_matches_ocr[4]))\n for word_idx in year_article_file_matches_ocr[3]])\n\n # [(year, [(filename, word, corcondance, ocr),\n # (filename, word, concordance, ocr), ...]), ...]\n\n\n result = concordance_words.groupByKey() \\\n .map(lambda year_match:\n (year_match[0], list(year_match[1]))) \\\n .collect()\n return result", "title": "" }, { "docid": "aae2b2c883111579be061d548fc72a23", "score": "0.5013358", "text": "def get_carddetails(jira, db, issues):\n logger.info(' Number of issues found [' + str(issues.__len__()) + ']')\n for issue in issues:\n logger.debug(issue.key + ' [' + issue.fields.summary + ']')\n\n team = \"\"\n\tif issue.fields.labels.__len__() > 0:\n logger.debug(', '.join(issue.fields.labels))\n for t in issue.fields.labels:\n m = teampattern.match(t)\n if m:\n team = t[5:]\n \n #iterate through each issue and add it to the database\n db.append({'key': issue.key,\n 'assignee': issue.fields.assignee.name if issue.fields.assignee is not None else \"Unassigned\",\n 'summary': issue.fields.summary,\n 'fixversion': issue.fields.fixVersions[0].name if issue.fields.fixVersions.__len__() > 0 else \"\" ,\n 'labels': ', '.join(issue.fields.labels) if issue.fields.labels.__len__() > 0 else \"\" ,\n 'confidence': issue.fields.customfield_11200,\n 'status': issue.fields.status.name,\n 'rank': issue.fields.customfield_10900,\n 'engineeringprogress': issue.renderedFields.customfield_10204,\n 'team' : team})", "title": "" }, { "docid": "64ada01271db8320e427563d074bcb33", "score": "0.5002547", "text": "def query_issue_ids_with_index(self, project_key, start_idx, block_size):\n issues = self._jira.search_issues('project=' + project_key, start_idx, block_size)\n return [issue.key for issue in issues]", "title": "" }, { "docid": "77d0ab57d119b7237355833b5f93aa90", "score": "0.49973476", "text": "def get_issue(self, project, issue_iid):\n url = self.url + '/api/v4/projects/' + urllib.parse.quote(project, safe='') + '/issues/' + str(issue_iid)\n headers = { 'PRIVATE-TOKEN': self.token }\n response = requests.get(url, headers=headers)\n \n if response.status_code == 404:\n return None\n elif response.status_code > 299:\n raise Exception('Unhandled http response code')\n \n return response.json()", "title": "" }, { "docid": "0b32de560f61ae5ad93659ad275ad242", "score": "0.49886084", "text": "def do_query(issues, config_file=None, logger=None, context=None):\n threshold = 1\n if config_file is not None and\\\n os.path.exists(config_file) and\\\n os.path.isfile(config_file):\n with open(config_file, \"r\") as f:\n config = yaml.load(f)\n value = config[\"threshold\"]\n threshold = max(threshold, value)\n\n # [article, article, ...]\n articles = issues.flatMap(lambda issue:\n [article for article in issue.articles])\n\n # [(word, 1), (word, 1), ...]\n words = articles.flatMap(lambda article:\n [(query_utils.normalize(word), 1) for word in article.words])\n\n # [(word, 1), (word, 1), ...]\n # =>\n # [(word, count), (word, count), ...]\n word_counts = words. \\\n reduceByKey(add). \\\n filter(lambda word_year: word_year[1] > threshold). \\\n collect()\n return word_counts", "title": "" }, { "docid": "d07f00e1fb609519843ff15543a2072d", "score": "0.49829307", "text": "def get_issue_by_name(self, name, issue_type=\"Test Case\"):\n self._set_default_custom_fields()\n jql_str = \"project='%s' AND issuetype='%s' AND summary ~ '%s'\" % (self.project, issue_type, name, )\n try:\n issues = self._get_all_issues(jql_str)\n for issue in issues:\n if self.get_summary(issue) == name:\n self.class_logger.debug(\"Found %s: '%s'\" % (issue_type, name))\n return issue\n return None\n except IndexError:\n return None", "title": "" }, { "docid": "87dc903c0d05146fd4b581c636357048", "score": "0.49798182", "text": "def search_query():\n response = requests.request(\"GET\", BASE_URL, params=query)\n return response", "title": "" }, { "docid": "9ebd928d8c80bc8c1ed04814d50cc20c", "score": "0.4971561", "text": "def search(self, query):\r\n return self.get_values(\"search\", query, filter=\"repositories\",\r\n datatype=Repository)", "title": "" } ]
513423d0730ce67c2ce4b29c54cea9b8
Name of the torrent
[ { "docid": "f502bdaaa7e80f70f74bffc52d4af1b2", "score": "0.6839437", "text": "def name(self):\n if 'name' not in self.metainfo['info'] and self.path is not None:\n self.metainfo['info']['name'] = os.path.basename(self.path)\n return self.metainfo['info'].get('name', None)", "title": "" } ]
[ { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "edc7919d4d56da18ec23e5bdee1b835a", "score": "0.7028946", "text": "def getName(self) -> unicode:\n ...", "title": "" }, { "docid": "0c5df51fa3c75c12c909d399efd382d6", "score": "0.69144017", "text": "def name(self) -> str:\n return self._tp.name", "title": "" }, { "docid": "29ac708ecfdd2dc50f68feceabbb36f8", "score": "0.68522793", "text": "def name():\n\n return", "title": "" }, { "docid": "978620e4fcd9ea0c6956671de262d9d6", "score": "0.67488927", "text": "def name(self):\n return self._info['name']", "title": "" }, { "docid": "978620e4fcd9ea0c6956671de262d9d6", "score": "0.67488927", "text": "def name(self):\n return self._info['name']", "title": "" }, { "docid": "7600a06c839675b23a2f04715d706591", "score": "0.6742731", "text": "def name(self) -> str:\n return self.raw[\"name\"]", "title": "" }, { "docid": "2a0ff9505423d71afa08cf7c0807897f", "score": "0.6709706", "text": "def Name(self) -> str:", "title": "" }, { "docid": "2a0ff9505423d71afa08cf7c0807897f", "score": "0.6709706", "text": "def Name(self) -> str:", "title": "" }, { "docid": "a6248fd392a289f96c067568ff7e2312", "score": "0.6694416", "text": "def name(self):\r\n return self.data.title", "title": "" }, { "docid": "59aec70e7c7f96d14927455626d481db", "score": "0.6693344", "text": "def name(self):\n return '๋„ค์ด๋ฒ„ ๋‚ ์”จ'", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "b9ccfc564a5db8d7cd3cab98607dc05b", "score": "0.66866124", "text": "def name(self) -> str:", "title": "" }, { "docid": "3930676cf030287e76e9c40051c26835", "score": "0.66841495", "text": "def get_name(self):\n res = cmd_get_name(self.hid)\n if res:\n self.name = bytes(res[1:25]).decode(\"utf-8\")", "title": "" }, { "docid": "cfa81d19b1d9dacd411f856ae23fb0f5", "score": "0.66560733", "text": "def name(self) -> str:\n ...", "title": "" }, { "docid": "73e1c49b4d984b1930970a7e7f5a9ae6", "score": "0.66543585", "text": "def name(self):\n return self.name_str(self, use_revision=False)", "title": "" }, { "docid": "f8aea4acde9a91a1470a7c91114a5e57", "score": "0.66538143", "text": "def name(self):\n return self._network.tags.get('Name', self._network.name)", "title": "" }, { "docid": "2d5b5fa3e5c2f0f0ca31a92a5f4653e1", "score": "0.66456604", "text": "def name(self):\n return self.raw.get(\"name\")", "title": "" }, { "docid": "29b23b2cc5fcab17cb54150a684a6b6c", "score": "0.66424924", "text": "def get_name(self):\r\n\t\tpass", "title": "" }, { "docid": "5bf0ac2a04245d4947bb0a6fc864f907", "score": "0.66347075", "text": "def get_name():\n pass", "title": "" }, { "docid": "7ea433312562d9529d838ab68315606e", "score": "0.6633245", "text": "def name(self):\n return self._stream.name", "title": "" }, { "docid": "8483b21f4d91e3b3370421a1ebfc7ffc", "score": "0.66207296", "text": "def name(self):\r\n return self.data.name.strip()", "title": "" }, { "docid": "62b41f47b947c129b227f90021b3abd5", "score": "0.65891224", "text": "def name(self) -> str:\n pass", "title": "" }, { "docid": "62b41f47b947c129b227f90021b3abd5", "score": "0.65891224", "text": "def name(self) -> str:\n pass", "title": "" }, { "docid": "62b41f47b947c129b227f90021b3abd5", "score": "0.65891224", "text": "def name(self) -> str:\n pass", "title": "" }, { "docid": "62b41f47b947c129b227f90021b3abd5", "score": "0.65891224", "text": "def name(self) -> str:\n pass", "title": "" }, { "docid": "0de1248f6ac2d14a823a07da9cf5c13b", "score": "0.6582181", "text": "def name(self):\r\n\t\treturn self._name", "title": "" }, { "docid": "32f4ecf3c424c1f305e7ab1b158fd9de", "score": "0.657422", "text": "def name(self) -> str:\n return self.data['name']", "title": "" }, { "docid": "65d2d961b971c468e6d2c8ff56685467", "score": "0.6572875", "text": "def name(self):\n\n return self.get_nuke_node_name()", "title": "" }, { "docid": "5b34c084b9d60d1798f5fc4e445d95e8", "score": "0.6548928", "text": "def get_name(self):\n pass", "title": "" }, { "docid": "5b34c084b9d60d1798f5fc4e445d95e8", "score": "0.6548928", "text": "def get_name(self):\n pass", "title": "" }, { "docid": "bf6f7b7142df5297dd0a4dc3e9aaabb8", "score": "0.6544273", "text": "def name(self):\n\t\treturn self._name", "title": "" }, { "docid": "bf6f7b7142df5297dd0a4dc3e9aaabb8", "score": "0.6544273", "text": "def name(self):\n\t\treturn self._name", "title": "" }, { "docid": "bf6f7b7142df5297dd0a4dc3e9aaabb8", "score": "0.6544273", "text": "def name(self):\n\t\treturn self._name", "title": "" }, { "docid": "bf6f7b7142df5297dd0a4dc3e9aaabb8", "score": "0.6544273", "text": "def name(self):\n\t\treturn self._name", "title": "" }, { "docid": "bf6f7b7142df5297dd0a4dc3e9aaabb8", "score": "0.6544273", "text": "def name(self):\n\t\treturn self._name", "title": "" }, { "docid": "68fef606de84023f7d1a421df5c926e0", "score": "0.65420246", "text": "def name(self):\n return self._getdata(\"name\")", "title": "" }, { "docid": "68fef606de84023f7d1a421df5c926e0", "score": "0.65420246", "text": "def name(self):\n return self._getdata(\"name\")", "title": "" }, { "docid": "68fef606de84023f7d1a421df5c926e0", "score": "0.65420246", "text": "def name(self):\n return self._getdata(\"name\")", "title": "" }, { "docid": "68fef606de84023f7d1a421df5c926e0", "score": "0.65420246", "text": "def name(self):\n return self._getdata(\"name\")", "title": "" }, { "docid": "5149552f133bc47b1a546e4d93e9ca5f", "score": "0.6540506", "text": "def name(self) -> str:\n return self._data.name", "title": "" }, { "docid": "04a8a5077ba98023a90f5369378e11fc", "score": "0.6511574", "text": "def get_name(self):\n\t\treturn self.name", "title": "" }, { "docid": "04a8a5077ba98023a90f5369378e11fc", "score": "0.6511574", "text": "def get_name(self):\n\t\treturn self.name", "title": "" }, { "docid": "8c5ebdbd6f99517ab6c3708e995a47cb", "score": "0.65113235", "text": "def name( self ):\n\t\treturn self._name", "title": "" }, { "docid": "8a16deb979ff841f0df1137b2465a9bb", "score": "0.6507158", "text": "def name(self):\n return '{}'.format(self._name)", "title": "" }, { "docid": "98d61f305effd0b5789a6b9b5e846a0d", "score": "0.649966", "text": "def name(self):\n return self._file.name", "title": "" }, { "docid": "98d61f305effd0b5789a6b9b5e846a0d", "score": "0.649966", "text": "def name(self):\n return self._file.name", "title": "" }, { "docid": "356b938c4117b74a03493cb75db7e0cb", "score": "0.6496704", "text": "def get_name(self):\n\t\t\n\t\treturn self.name", "title": "" }, { "docid": "4ce5cf7480b0046d6b22ddbb9cce2f24", "score": "0.6492549", "text": "def name(self) :\n\t\ttry :\n\t\t\treturn self._name\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4ce5cf7480b0046d6b22ddbb9cce2f24", "score": "0.6492549", "text": "def name(self) :\n\t\ttry :\n\t\t\treturn self._name\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4ce5cf7480b0046d6b22ddbb9cce2f24", "score": "0.6492549", "text": "def name(self) :\n\t\ttry :\n\t\t\treturn self._name\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4ce5cf7480b0046d6b22ddbb9cce2f24", "score": "0.6492549", "text": "def name(self) :\n\t\ttry :\n\t\t\treturn self._name\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4ce5cf7480b0046d6b22ddbb9cce2f24", "score": "0.6492549", "text": "def name(self) :\n\t\ttry :\n\t\t\treturn self._name\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "5c4a891cf9d6efca3fd9e3efc80a61de", "score": "0.6490195", "text": "def name() -> str:\n pass", "title": "" }, { "docid": "973cd13a8b7967e00ddded933f5fe19c", "score": "0.6485841", "text": "def name(self):\r\n\t\treturn os.path.basename(self.location())", "title": "" }, { "docid": "ac0ca0fe06b300713995391dbf4a528b", "score": "0.6484197", "text": "def name(self):\n return '4 - Transmission finale'", "title": "" }, { "docid": "c9e85727de9f008a90ab463e625523e2", "score": "0.64841485", "text": "def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self.__light.nick_name", "title": "" }, { "docid": "9653f6a7d0da56e01acd8aee100a87e2", "score": "0.64805835", "text": "def name(self)-> str:", "title": "" }, { "docid": "897ed57751b0b9f27326d01d90d73573", "score": "0.64780414", "text": "def name(self) -> str:\r\n return self._name", "title": "" }, { "docid": "387b9ffa2b8ad9d171e353fc775a9d7b", "score": "0.64765286", "text": "def name(self):\n\n\t\treturn self._name", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "e6b98fb018b60f0fb5381b1c2a0f0f33", "score": "0.6476422", "text": "def name(self):\n pass", "title": "" }, { "docid": "4a59eea2d6a577fd24bfebdb0214c365", "score": "0.64644146", "text": "def name(self):\n return self._fileobj.name", "title": "" }, { "docid": "d0ffe0ebde3d56ebae9d16c2846b5d36", "score": "0.64571875", "text": "def name(self) -> str:\n return self._get(\"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" }, { "docid": "dab9232825e5690c39a1715767d2679e", "score": "0.64564896", "text": "def name(self) -> str:\n return pulumi.get(self, \"name\")", "title": "" } ]
8e5f61833b87cea1b17fb804514a3fbc
Returns a list of possible board layouts, given the player whose turn it is, and a list of valid moves.
[ { "docid": "3a957289a63ff511b3ccdcd8b6daffa7", "score": "0.7096633", "text": "def get_possible_boards(board, moves, player):\n boards = []\n for move in moves:\n new_board = board[:]\n new_board[int(move)-1] = player\n boards.append(new_board)\n return boards", "title": "" } ]
[ { "docid": "0e50adc01cbb6cb681b3a100d0724f31", "score": "0.65457636", "text": "def available_moves(self, player):\n pieces = 0\n if player == Player.white:\n pieces = self._white_pieces\n elif player == Player.black:\n pieces = self._black_pieces\n else:\n raise ValueError(\"Only white and black players can move\")\n\n max_width, max_height = self.WIDTH - 1, self.HEIGHT - 1\n for x in range(self.WIDTH):\n for y in range(self.HEIGHT):\n index = x + y * self.WIDTH\n if (pieces >> index) & 1:\n if x != 0 and self.get(x - 1, y) == Player.none:\n yield Move(x, y, Direction.west)\n if x != max_width and self.get(x + 1, y) == Player.none:\n yield Move(x, y, Direction.east)\n if y != 0 and self.get(x, y - 1) == Player.none:\n yield Move(x, y, Direction.north)\n if y != max_height and self.get(x, y + 1) == Player.none:\n yield Move(x, y, Direction.south)", "title": "" }, { "docid": "fd2650407725dbb4d4d1f16f25b20f5f", "score": "0.6523557", "text": "def possible_moves_board(self, turn = True):\n possible_moves = np.where(self.board==0)[0]\n posibilities = np.zeros((len(possible_moves), 9))\n for i, move_ind in enumerate(possible_moves):\n new_board = np.copy(self.board)\n new_board[move_ind] = turn\n posibilities[i] = new_board\n\n return posibilities", "title": "" }, { "docid": "23cb05d920787816c26f1a63fbfa5de0", "score": "0.64932907", "text": "def get_moves_by_combination(self, player):\n moves = []\n for combo in self.winning_combos:\n move = []\n for cell in combo:\n b, r, c = self.find(self.board, cell)\n if self.board[b][r][c] == player:\n move += [cell]\n moves += [move]\n return moves", "title": "" }, { "docid": "791326d74328771015b1f180bb7a0da7", "score": "0.64919853", "text": "def legal_moves(self):\n moves = []\n if self.winner() is None: # comparisons with None are weird\n for row in range(len(self.board)):\n for col in range(len(self.board)):\n if self.board[row][col] == \" \":\n moves.append([row, col])\n return moves", "title": "" }, { "docid": "5c567b9074306056ecc34c421c0b220a", "score": "0.6445187", "text": "def legal_moves(self):\n player = self.player\n game_set = self.game_set\n moves = []\n\n # If the player is white, we want to loop over all white pieces,\n # otherwise we want to loop over all black pieces\n if player == 1:\n pieces = game_set.white_pieces\n else:\n pieces = game_set.black_pieces\n\n # For each piece we loop through all the squares between the piece and\n # the edge of the board in each of the four directions (neighbours)\n for piece in pieces:\n for neighbour in game_set.neighbours:\n for i in range(1,6):\n new_sq = add(piece, neighbour, const=i)\n \n # If the current square is off the board, contains\n # another piece, or is a special square while the\n # current piece is not a king, move onto looping over\n # the next direction\n if not game_set.is_on_board(new_sq):\n break\n if game_set.board[new_sq] != 0:\n break\n if new_sq in game_set.special_squares and \\\n game_set.board[piece] != 2:\n break\n moves.append(piece + new_sq)\n\n return moves", "title": "" }, { "docid": "67663e010dbb435ab297e6f449ce35e6", "score": "0.64038146", "text": "def get_available_moves(self, player):\n available_moves = []\n player_sign = config().get('board_signs')[player]\n\n player_positions = self.get_positions(player)\n if len(player_positions) == 3:\n available_positions = self.get_positions('available')\n for position in player_positions:\n available_move = {\n 'from': position,\n 'to': available_positions\n }\n available_moves.append(available_move)\n return available_moves\n\n for y_index, y in enumerate(self._game_board):\n for x_index, x in enumerate(y):\n if x == player_sign:\n available_neighbours = self.get_available_neighbours(\n y_index, x_index)\n if len(available_neighbours) == 0:\n continue\n\n available_move = {\n 'from': [y_index, x_index],\n 'to': available_neighbours\n }\n available_moves.append(available_move)\n\n return available_moves", "title": "" }, { "docid": "1dc864973da32fa7a91c22a687c8f537", "score": "0.6401441", "text": "def possible_moves(self):\n moves = [(row, col) for row in range(self.size)\n for col in range(self.size)\n if self.board[row][col] is None]\n return moves", "title": "" }, { "docid": "c2b1c3ac30ad5770d97bb929f61eb463", "score": "0.6275409", "text": "def get_available_moves(self, board):\n\n i, j = self.index\n available_moves = []\n\n # Vertical \n explore_i = i\n while explore_i >= 0 and j >= 0 and explore_i < 8 and j < 8:\n explore_i += 1\n if explore_i >= 0 and explore_i < 8:\n if board[explore_i][j] == 0:\n available_moves.append((explore_i, j))\n continue\n if board[explore_i][j] != 0 and board[explore_i][j].color != self.color:\n available_moves.append((explore_i, j))\n break\n else:\n break\n\n explore_i = i\n while explore_i >= 0 and j >= 0 and explore_i < 8 and j < 8:\n explore_i -= 1\n if explore_i >= 0 and explore_i < 8:\n if board[explore_i][j] == 0:\n available_moves.append((explore_i, j))\n continue\n if board[explore_i][j] != 0 and board[explore_i][j].color != self.color:\n available_moves.append((explore_i, j))\n break\n else:\n break\n\n # Horizontal\n explore_j = j\n while i >= 0 and explore_j >= 0 and i < 8 and explore_j < 8:\n explore_j += 1\n if explore_j >= 0 and explore_j < 8:\n if board[i][explore_j] == 0:\n available_moves.append((i, explore_j))\n continue\n if board[i][explore_j] != 0 and board[i][explore_j].color != self.color:\n available_moves.append((i, explore_j))\n break\n else:\n break\n \n explore_j = j\n while i >= 0 and explore_j >= 0 and i < 8 and explore_j < 8:\n explore_j -= 1\n if explore_j >= 0 and explore_j < 8:\n if board[i][explore_j] == 0:\n available_moves.append((i, explore_j))\n continue\n if board[i][explore_j] != 0 and board[i][explore_j].color != self.color:\n available_moves.append((i, explore_j))\n break\n else:\n break\n\n return available_moves", "title": "" }, { "docid": "4144bf7eb7f7ecc8d3d7bf2ce5067358", "score": "0.62628543", "text": "def get_goal_layout(self, goal_moves):\n game = Game(self.sourcefile)\n for choice in tuple(goal_moves):\n game.move(choice)\n game.board.create_layout()\n\n return game.board.layout", "title": "" }, { "docid": "ac0493edbc2af100cbfebccf820c565f", "score": "0.6253777", "text": "def get_possible_moves(self) -> list:\n one = 0\n two = 0\n winning_num = math.ceil(((self.board_size + 1) * 3) / 2)\n for row in self.ley_lines:\n one += row.count(\"1\")\n two += row.count(\"2\")\n if one >= winning_num or two >= winning_num:\n return []\n\n letters = []\n for row in self.horizontal_rows:\n for letter in row:\n if letter.isalpha():\n letters.append(letter)\n return letters", "title": "" }, { "docid": "ac59e9c0daf9865556907d77a1e3264f", "score": "0.6245685", "text": "def possible_moves(self, board, player_id):\n moves = []\n for c in range(self.columns()):\n move = self.simulate_move(column=c, input_board=board, player_id=player_id)\n if move is not None:\n moves.append((move, c))\n return moves", "title": "" }, { "docid": "929b48dfa827d93a6c5d5ee37e7da21e", "score": "0.6229917", "text": "def get_legal_moves(self, board):", "title": "" }, { "docid": "dea81b31db016a9ea2d6789b7ee52197", "score": "0.62144685", "text": "def get_possible_moves(self) -> List[str]:\n total_ley_line = len(self.lls[0]) * 3\n p1_count = sum([\n int(item == \"1\")\n for sublist in self.lls\n for item in sublist\n ])\n p2_count = sum([\n int(item == \"2\")\n for sublist in self.lls\n for item in sublist\n ])\n if p1_count >= total_ley_line / 2 or p2_count >= total_ley_line / 2:\n # If the game is currently over, return empty possible move list.\n return []\n return [\n node\n for row in self.graph\n for node in row\n if (node not in [\"1\", \"2\"])\n ]", "title": "" }, { "docid": "eef04fb26bc540c680db18f21a3047b1", "score": "0.6185031", "text": "def get_possible_moves(self):\n\n return [(row, col) for row, col in\n [(i // self.cols, i % self.cols) for i in range(self.rows*self.cols)]\n if self.is_wall_spot((row, col)) and self.is_empty((row, col))]", "title": "" }, { "docid": "5b09dbc2ed236cf438cf44bb8977a6b5", "score": "0.6169334", "text": "def get_possible_moves(self):\n if self.current_player == \"p1\":\n if self.player1l != 0 and self.player2l != 0 and self.player2r != 0\\\n and self.player1r != 0:\n self.possible_moves = [\"ll\", \"lr\", \"rl\", \"rr\"]\n elif self.player1r == 0 and self.player1l != 0 and \\\n self.player2l != 0 and self.player2r != 0:\n self.possible_moves = [\"ll\", \"lr\"]\n elif self.player1r != 0 and self.player1l == 0 and \\\n self.player2l != 0 and self.player2r != 0:\n self.possible_moves = [\"rl\", \"rr\"]\n elif self.player1r != 0 and self.player1l != 0 and \\\n self.player2l != 0 and self.player2r == 0:\n self.possible_moves = [\"rl\", \"ll\"]\n elif self.player1r != 0 and self.player1l != 0 and \\\n self.player2l == 0 and self.player2r != 0:\n self.possible_moves = [\"lr\", \"rr\"]\n elif self.player1r == 0 and self.player1l != 0 and \\\n self.player2l != 0 and self.player2r != 0:\n self.possible_moves = [\"lr\"]\n elif self.player1r != 0 and self.player1l == 0 and \\\n self.player2l == 0 and self.player2r != 0:\n self.possible_moves = [\"rr\"]\n elif self.player1r == 0 and self.player1l != 0 and \\\n self.player2l != 0 and self.player2r == 0:\n self.possible_moves = [\"ll\"]\n elif self.player1r != 0 and self.player1l == 0 and \\\n self.player2l != 0 and self.player2r == 0:\n self.possible_moves = [\"lr\"]\n elif self.player1r == 0 and self.player1l == 0:\n self.possible_moves = []\n else:\n if self.player1l != 0 and self.player2l != 0 and self.player2r != 0\\\n and self.player1r != 0:\n self.possible_moves = [\"ll\", \"lr\", \"rl\", \"rr\"]\n elif self.player1l != 0 and self.player2l != 0 and \\\n self.player2r == 0 and self.player1r != 0:\n self.possible_moves = [\"ll\", \"lr\"]\n elif self.player1l != 0 and self.player2l == 0 and \\\n self.player2r != 0 and self.player1r != 0:\n self.possible_moves = [\"rl\", \"rr\"]\n elif self.player1l == 0 and self.player2l != 0 and \\\n self.player2r == 0 and self.player1r != 0:\n self.possible_moves = [\"rl\", \"ll\"]\n elif self.player1l != 0 and self.player2l != 0 and \\\n self.player2r == 0 and self.player1r == 0:\n self.possible_moves = [\"lr\", \"rr\"]\n elif self.player1l == 0 and self.player2l != 0 and \\\n self.player2r == 0 and self.player1r != 0:\n self.possible_moves = [\"lr\"]\n elif self.player1l == 0 and self.player2l == 0 and \\\n self.player2r != 0 and self.player1r != 0:\n self.possible_moves = [\"rr\"]\n elif self.player1l != 0 and self.player2l != 0 and \\\n self.player2r == 0 and self.player1r == 0:\n self.possible_moves = [\"ll\"]\n elif self.player1l != 0 and self.player2l == 0 and \\\n self.player2r != 0 and self.player1r == 0:\n self.possible_moves = [\"rl\"]\n elif self.player2r == 0 and self.player2l == 0:\n self.possible_moves = []\n return self.possible_moves", "title": "" }, { "docid": "04ca7743c75fdf57f44bf8f8277753dc", "score": "0.60924506", "text": "def get_possible_move(self):\n moves = set()\n for i, r in enumerate(self._board):\n for j, c in enumerate(r):\n if c is None:\n moves.add((i, j))\n return moves", "title": "" }, { "docid": "1c683546618bd37ba7931045e74ce6ae", "score": "0.60839665", "text": "def solver(board: List=None, length: int=4, validset: str=''):\n exclude = set()\n if board is None:\n # guess 1st solution\n return choices(validset, k=length)\n elif len(board) == 1:\n # guess 2nd attempt\n for attempt, black, white in board:\n good = (black + white)\n if good == length:\n return choices(attempt, k=length)\n elif good > 0:\n newvals = (choices(attempt, k=good) + \n choices([x for x in validset if x not in attempt], \n k=(length - good)))\n return choices(newvals, k=length)\n else:\n # everything to be excluded\n exclude.update(attempt)\n return choices([x for x in validset if x not in exclude], \n k=length)\n else:\n s_board = sorted(board, key=itemgetter(1, 2), reverse=True)\n print(s_board)\n return tuple(\"quit\")", "title": "" }, { "docid": "077ab77b03b610b8a889aef3a50a9140", "score": "0.6060835", "text": "def get_valid_moves(self, board, current_move=True):\n valid_moves = []\n options = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\n for op in options:\n for i in range(1, 9):\n row = self.row + op[0] * i\n col = self.col + op[1] * i\n if row < 0 or col < 0:\n break\n if board.is_space_empty(row, col) is not None:\n if board.is_space_empty(row, col):\n valid_moves.append((row, col))\n else:\n if board.team_on(row, col) != self.team:\n valid_moves.append((row, col))\n break\n else:\n break\n\n if current_move:\n valid_moves = self.king_in_harms_way(board, valid_moves)\n\n return valid_moves", "title": "" }, { "docid": "75bedf89e9fdb3c58db54b6f3e34a15c", "score": "0.6054074", "text": "def get_possible_moves(self,board):\n # determine possible move directions from the color of this checker\n # explore_direction: a list of tuples of coordinates of possible move\n # - current position.\n # e.g. now we have a checker piece at 1,1 black. It can move to 2,0\n # and 2,2. explore_direction will be [(1,-1), (1, 1)]\n if self.color == '.':\n return []\n directions = {\"W\":[(-1,-1),(-1,1)],\"B\":[(1,-1),(1,1)]}\n result = []\n multiple_jump = []\n board = copy(board)\n is_capture = False\n explore_direction = directions[self.color]\n if self.is_king:\n explore_direction.extend(directions[board.opponent[self.color]])\n # a king can go all directions\n # but do we allow fly king?\n for i in explore_direction:\n pos_x,pos_y = self.row+i[0],self.col+i[1]\n if board.is_in_board(pos_x,pos_y):\n if board.board[pos_x][pos_y].color == '.':\n result.append(Move([(self.row,self.col),(pos_x,pos_y)]))\n # save_color = board.board[self.row][self.col].color\n save_color = board.board[self.row][self.col].color\n board.board[self.row][self.col].color = \".\"\n self.binary_tree_traversal(self.row,self.col,multiple_jump, board, explore_direction, [],save_color)\n # filter out those at margins\n if multiple_jump != []:\n is_capture = True\n result = []\n for jump in multiple_jump:\n jump.insert(0,(self.row,self.col))\n result.append(Move(jump))\n board.board[self.row][self.col].color = save_color\n return result, is_capture", "title": "" }, { "docid": "b3b28b93dfd9cf304ec80e49b87c3129", "score": "0.6045116", "text": "def get_possible_moves(self) -> list:\n s = []\n p1 = 0\n p2 = 0\n for leyline in self.leyline:\n if leyline == '1':\n p1 += 1\n if leyline == '2':\n p2 += 1\n if p1 >= len(self.leyline) / 2 or p2 >= len(self.leyline) / 2:\n return s\n for rows in self.cell:\n for cells in rows:\n if cells != '1' and cells != '2':\n s.append(cells)\n s = set(s)\n s = list(s)\n s.sort()\n return s", "title": "" }, { "docid": "88c02f6a53663126a4317aa634b2d042", "score": "0.6030034", "text": "def get_valid_moves(self, board, player):\n moves = []\n for index in range(100):\n if board[index]==EMPTY:\n moves_list = self.is_move_valid(board, player, index)\n if len(moves_list)!=0:\n moves.append(index)\n return moves", "title": "" }, { "docid": "4063a2e2c9ca5844e82f6cd6fddfd69c", "score": "0.6021742", "text": "def available_moves(self, player):\n if player not in self.next_players():\n raise ValueError('Not turn of player %s' % player)\n\n return filter(lambda pos: pos not in self._board, Board.ALL_POSITIONS)", "title": "" }, { "docid": "86fa87756bb85df1f7a5e24bb56bcbe1", "score": "0.6021095", "text": "def get_possible_moves(self) -> list:\n lst = []\n total = len(self.left_diagonal +\n self.right_diagonal + self.horizontal)\n p1l = self.left_diagonal.count(1)\n p2l = self.left_diagonal.count(2)\n p1r = self.right_diagonal.count(1)\n p2r = self.right_diagonal.count(2)\n p1h = self.horizontal.count(1)\n p2h = self.horizontal.count(2)\n if sum([p1l, p1r, p1h]) >= int(ceil(total / 2)) or sum(\n [p2l, p2r, p2h]) >= int(ceil(total)):\n return []\n\n for x in sum(self.grid, []):\n if x in self.constant_letters:\n lst.append(x)\n return lst", "title": "" }, { "docid": "1a36a3a594e546b7bf585a9a433e3823", "score": "0.5998229", "text": "def board7():\n return [\n ['X', 'X', 'O', 'X'],\n ['X', 'O', 'X', 'O'],\n ['X', 'O', 'O', 'O'],\n ['O', 'O', 'O', 'O'],\n ]", "title": "" }, { "docid": "423d1bf21b32a1b6756fa5c68793af6c", "score": "0.5979864", "text": "def get_moves(self, player):\n moves = []\n cnt = 0\n for i in range(3):\n for x in range(3):\n for y in range(3):\n if self.board[i][x][y] == player:\n moves += [cnt]\n cnt += 1\n return moves", "title": "" }, { "docid": "b1487e34f3b36e0e56338f57c355ba82", "score": "0.5976825", "text": "def board9():\n return [\n ['X', 'X', 'O', 'X', 'O'],\n ['X', 'O', 'X', 'O', 'O'],\n ['X', 'O', 'X', 'O', 'X'],\n ['O', 'X', 'O', 'X', 'O'],\n ['O', 'O', 'X', 'X', 'O'],\n ]", "title": "" }, { "docid": "6fcb8c86c6a864f48b8d1e6a0ce9bbbb", "score": "0.59546113", "text": "def getPossibleMoves(self, board):\n possible = []\n for row in range(self.size):\n for col in range(self.size):\n if board[row][col] == -1:\n possible.append((row, col))\n return possible", "title": "" }, { "docid": "defbf261e69c157490888a41afba264f", "score": "0.5923475", "text": "def board4():\n return [\n ['X', 'O', 'X'],\n ['X', 'X', 'O'],\n ['O', '*', 'O'],\n ]", "title": "" }, { "docid": "cfe3f83b24779e3925f64962411ceeb3", "score": "0.59094137", "text": "def get_valid_moves(self):\n valid_moves = []\n for row in range(0, len(self.board_state)):\n for column in range(0, len(self.board_state[row])):\n if not self.board_state[row][column]:\n valid_moves.append((row, column))\n return valid_moves", "title": "" }, { "docid": "51a8318215eff5e3dd486e5995255145", "score": "0.5895124", "text": "def getAllPossibleMoves(self):\n possibleMoves = []\n pieceList = self.getPlayerPieceList()\n\n for i in range(len(pieceList)):\n # eval function takes the input string as executable code and execute them\n movelist = eval('self.gamerule.possibleMoveFor{}(pieceList[i],self.pieceList)'.\n format(re.findall('(?:Black|Red)(.*)', pieceList[i]['Name'])[0]))\n\n for item in movelist:\n possibleMoves.append((pieceList[i], item))\n\n return possibleMoves", "title": "" }, { "docid": "58e3415732705dbeda83c46f1be8f96b", "score": "0.58554685", "text": "def available_combos(self, player):\n return list(self.allowed_moves) + self.get_moves(player)", "title": "" }, { "docid": "be3ca738e15d6d0d86a97af2fc08bac8", "score": "0.5834392", "text": "def get_legal_moves(self, board):\n\t\tposition = self.position\n\t\tcolor = self.color\n\t\tpossible_moves = [(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)]\n\n\t\tfor x, y in possible_moves:\n\t\t\tdestination = (position[0] + x, position[1] + y)\n\t\t\tif destination not in board.occupied(color):\n\t\t\t\tlegal_moves.append(destination)\n\n\t\treturn legal_moves", "title": "" }, { "docid": "b4baac736ca5737372b0aa2904e7782d", "score": "0.58265924", "text": "def generateBoardState(self):\n\n grid = [[None for i in range(8)] for i in range(8)]\n\n for white_piece, black_piece in itertools.zip_longest(self.white_pieces, self.black_pieces):\n if white_piece is not None:\n grid[white_piece.x][white_piece.y] = white_piece\n\n if black_piece is not None:\n grid[black_piece.x][black_piece.y] = black_piece\n\n return grid", "title": "" }, { "docid": "0e598b742a16846ab4dd0756e4e81e1b", "score": "0.5820343", "text": "def possibilities(board):\n x_coord, y_coord = np.where(board == 0)\n return [(x_coord[i], y_coord[i]) for i in range(len(x_coord))]", "title": "" }, { "docid": "47f26659c89a71258151df675abe9b15", "score": "0.5801241", "text": "def L_shaped_moves(game, player):\n\n loc = game.get_player_location(player)\n\n directions = [(-2, 1), (-2, -1), (-1, -2), (-1, -2), (1, 2), (1, 2), (2, 1), (2, -1)]\n\n row, column = loc\n valid_moves = [(row + direction_row, column + direction_column) for direction_row, direction_column in directions\n if game.move_is_legal((row + direction_row, column + direction_column))]\n return list(set(valid_moves))", "title": "" }, { "docid": "619efc5947a08173d29efdea457fdde0", "score": "0.57873785", "text": "def getMoves(self, player):\n moves = []\n for i in range(0, len(self.board)):\n if self.board[i] == player:\n moves.append(i)\n return moves", "title": "" }, { "docid": "998049e2293b46a3e0402bdac8ea9f54", "score": "0.5776741", "text": "def legal_moves(board):\n moves = []\n for square in range(NUM_SQUARES):\n if board[square] == EMPTY:\n moves.append(square)\n return moves", "title": "" }, { "docid": "998049e2293b46a3e0402bdac8ea9f54", "score": "0.5776741", "text": "def legal_moves(board):\n moves = []\n for square in range(NUM_SQUARES):\n if board[square] == EMPTY:\n moves.append(square)\n return moves", "title": "" }, { "docid": "f41b95e1fe15789de3a34029a304de87", "score": "0.5772661", "text": "def assert_moves(\n self, layout: typing.Dict[typing.Tuple[int, int], str],\n moves: typing.Tuple[typing.Tuple[int, int]],\n host_turn: bool = True):\n if not host_turn:\n self.game.current_turn = enums.Side.AWAY\n self.make_layout(layout)\n actual_moves = self.game.game_mode.possible_moves(\n self.game.current_turn\n )\n expected_moves = []\n for move in moves:\n if len(move) == 4:\n move = (*move, None) # Add promotion.\n expected_moves.append(move)\n self.assertEqual(set(actual_moves), set(expected_moves))", "title": "" }, { "docid": "0e75f95ed52c78368665f9955f8bcbb2", "score": "0.5769852", "text": "def legal_moves(self):\n moves = [(x, y) for x in range(self.n) for y in range(self.n) if self.check_legal(x, y)]\n return moves", "title": "" }, { "docid": "cdd8cfa64928c31a83fbd36db64e53e9", "score": "0.5760343", "text": "def test_build_move_map():\n board = Board()\n\n # Player 0\n moves = board.get_legal_moves(player=0)\n assert len(board.move_map_player_0) == len(moves)\n assert all([type(v) == int for v in board.move_map_player_0.values()])\n assert all([type(k) == tuple for k in board.move_map_player_0.keys()])\n\n # Player 1\n moves = board.get_legal_moves(player=1)\n assert len(board.move_map_player_1) == len(moves)\n assert all([type(v) == int for v in board.move_map_player_1.values()])\n assert all([type(k) == tuple for k in board.move_map_player_1.keys()])", "title": "" }, { "docid": "66da702d48c13ca40976b7aee57d4229", "score": "0.57529116", "text": "def get_formed_rows_for_player(self, player):\n player_sign = config().get('board_signs')[player]\n connector_signs = config().get('board_signs')['connectors']\n\n # traverse horizontally\n positions_forming_rows = []\n\n for y_index, y in enumerate(self._game_board):\n player_pieces = []\n for x_index, x in enumerate(y):\n if x == player_sign:\n player_pieces.append([y_index, x_index])\n elif x not in connector_signs:\n player_pieces = []\n\n if len(player_pieces) == 3:\n positions_forming_rows.extend(player_pieces)\n\n # traverse vertically\n for x_index in range(len(self._game_board[0])):\n player_pieces = []\n for y_index in range(len(self._game_board)):\n curr_sign = self._game_board[y_index][x_index]\n if curr_sign == player_sign:\n player_pieces.append([y_index, x_index])\n elif curr_sign not in connector_signs:\n player_pieces = []\n\n if len(player_pieces) == 3:\n positions_forming_rows.extend(player_pieces)\n\n # traverse diagonally from top left to bot right\n player_pieces = []\n for x_index in range(len(self._game_board[0])):\n curr_sign = self._game_board[x_index][x_index]\n if curr_sign == player_sign:\n player_pieces.append([x_index, x_index])\n elif curr_sign not in connector_signs:\n player_pieces = []\n\n if len(player_pieces) == 3:\n positions_forming_rows.extend(player_pieces)\n\n # traverse diagonally from bot left to top right\n player_pieces = []\n for y_index in range(len(self._game_board[0])):\n curr_sign = self._game_board[12 - y_index][y_index]\n if curr_sign == player_sign:\n player_pieces.append([12 - y_index, y_index])\n elif curr_sign not in connector_signs:\n player_pieces = []\n\n if len(player_pieces) == 3:\n positions_forming_rows.extend(player_pieces)\n\n return positions_forming_rows", "title": "" }, { "docid": "7a4b7aafa7f1568fa9ae46a5806e8d31", "score": "0.5742393", "text": "def get_available_moves(cls, board):\n for i, row in enumerate(board):\n for j, cell in enumerate(row):\n if cell == cls.cell_empty_value:\n yield i, j", "title": "" }, { "docid": "24ed071de6a19373d6ec5757d479a1bf", "score": "0.57347447", "text": "def test_get_legal_moves():\n board = Board()\n\n empty_spaces = board.get_empty_spaces()\n legal_moves = board.get_legal_moves(player=0)\n\n for space in empty_spaces:\n q, r = space\n assert ((q, r, 1), ()) in legal_moves\n assert ((q, r, 2), ()) in legal_moves\n\n for (space1, space2) in permutations(empty_spaces, r=2):\n assert ((space1[0], space1[1], 1), (space2[0], space2[1], 2)) in legal_moves", "title": "" }, { "docid": "566c41a9aa4408508d49f06c9e3709d8", "score": "0.5728834", "text": "def get_valid_moves(self, row_count, col_count, squares, src_square):\n valid_moves = []\n\n src_row = src_square.get_row()\n src_col = src_square.get_col()\n\n # Move horizontally right\n for dx in range(1, col_count-src_col):\n dest_square = squares[src_row][src_col+dx]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move horizontally left\n for dx in range(1, src_col+1):\n dest_square = squares[src_row][src_col-dx]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move vertically down\n for dy in range(1, row_count-src_row):\n dest_square = squares[src_row+dy][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move vertically up\n for dy in range(1, src_row+1):\n dest_square = squares[src_row-dy][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n return valid_moves", "title": "" }, { "docid": "1ff69c9c2a795db01f26d0b465a6befe", "score": "0.5728664", "text": "def board8():\n return [\n ['X', 'X', 'O', 'X'],\n ['X', 'O', 'X', 'O'],\n ['X', 'O', 'O', 'O'],\n ['O', 'O', 'O', 'X'],\n ]", "title": "" }, { "docid": "d2da34ee52914e13f16d7281ff10c580", "score": "0.5723913", "text": "def possible_moves(self):\r\n res = []\r\n hand = self.get_player().get_hand()\r\n if len(hand) == 0:\r\n print(\"Player's hand is empty\")\r\n raise ValueError()\r\n hand = Game.combinations(hand, 0)", "title": "" }, { "docid": "aae2c22ea737f34e284621eb2f98dd4e", "score": "0.5708172", "text": "def probable_position(board):\n probable_list = []\n for (pos_x, pos_y) in itertools.product(range(pp.width), range(pp.height)):\n if not isFree(pos_x, pos_y):\n continue\n for (i,j) in itertools.product(range(5), range(5)):\n x, y = pos_x - i, pos_y - j\n if x < 0 or x >= pp.width or y < 0 or y >= pp.height: # out of the board\n continue\n if not isFree(x, y): # a chess is in the region\n probable_list.append((pos_x, pos_y))\n break\n return probable_list # prob_list may be empty", "title": "" }, { "docid": "39e0827e485e916609e0bc1372d2399b", "score": "0.5707209", "text": "def get_winning_moves(self, board, my_position) -> Generator:\n for m in self.get_possible_moves(board, my_position):\n if m.is_winning_move and not any(m.traversed_pieces):\n yield m", "title": "" }, { "docid": "60da9f78ffa872dd5e600a749a289883", "score": "0.57042414", "text": "def get_available_moves(self, board):\n pass", "title": "" }, { "docid": "60da9f78ffa872dd5e600a749a289883", "score": "0.57042414", "text": "def get_available_moves(self, board):\n pass", "title": "" }, { "docid": "0bc12bb62b854e11a31eb0e05f58984a", "score": "0.5702951", "text": "def legal_moves(board):\n moves = []\n for i in range(NUM_SQUARES):\n if board[i] == EMPTY:\n moves.append(i)\n return moves", "title": "" }, { "docid": "ed1569c6ecfff7d7e19a737180172af4", "score": "0.569267", "text": "def available_moves(current_pos, all_squares):\n return [(current_pos[0] + move[0], current_pos[1] + move[1]) for move in move_types if (current_pos[0] + move[0], current_pos[1] + move[1]) in all_squares]", "title": "" }, { "docid": "52106ec0b79fd098171f9f42f0740619", "score": "0.5686171", "text": "def board2():\n return [\n ['*', '*', '*'],\n ['*', '*', '*'],\n ['X', 'X', 'X'],\n ]", "title": "" }, { "docid": "eb1387ba9f2c3148268f2360bd5ed0b8", "score": "0.5679085", "text": "def makeboard():\n for x in xrange(5):\n for y in xrange(4):\n\t # omit bottom right and left squares on board\n\t if (x == 0 and y == 3) or (x == 4 and y == 3):\n\t pass\n\t else:\n\t yield x,y", "title": "" }, { "docid": "ee13e73fd7e41ff9ab86596700a8536d", "score": "0.5674903", "text": "def get_two_adjacent_pieces_for_player(self, player):\n player_sign = config().get('board_signs')[player]\n available_sign = config().get('board_signs')['available']\n connector_signs = config().get('board_signs')['connectors']\n # traverse horizontally\n adjacent_rows = 0\n\n # get horizontal adjacent pieces\n for y_index, y in enumerate(self._game_board):\n player_pieces = 0\n available_position = 0\n for x_index, x in enumerate(y):\n if x == player_sign:\n player_pieces += 1\n elif x == available_sign:\n available_position += 1\n elif x not in connector_signs:\n player_pieces = 0\n\n if player_pieces == 2 and available_position == 1:\n adjacent_rows += 1\n\n # get vertical adjacent pieces\n for x in range(len(self._game_board[0])):\n player_pieces = 0\n available_position = 0\n for y in range(len(self._game_board)):\n curr_sign = self._game_board[y][x]\n if curr_sign == player_sign:\n player_pieces += 1\n elif curr_sign == available_sign:\n available_position += 1\n elif curr_sign not in connector_signs:\n player_pieces = 0\n\n if player_pieces == 2 and available_position == 1:\n adjacent_rows += 1\n\n # get diagonally right bot\n for x in range(len(self._game_board[0])):\n player_pieces = 0\n available_position = 0\n curr_sign = self._game_board[x][x]\n if curr_sign == player_sign:\n player_pieces += 1\n elif curr_sign == available_sign:\n available_position += 1\n elif curr_sign not in connector_signs:\n player_pieces = 0\n\n if player_pieces == 2 and available_position == 1:\n adjacent_rows += 1\n\n # get diagonally left top\n for x in range(len(self._game_board[0])):\n player_pieces = 0\n available_position = 0\n curr_sign = self._game_board[12 - x][x]\n if curr_sign == player_sign:\n player_pieces += 1\n elif curr_sign == available_sign:\n available_position += 1\n elif curr_sign not in connector_signs:\n player_pieces = 0\n\n if player_pieces == 2 and available_position == 1:\n adjacent_rows += 1\n\n return adjacent_rows", "title": "" }, { "docid": "49621e22ca4e38ff1ca07b7b3c5a53bd", "score": "0.5670544", "text": "def legalMoves(self, board, debug=False):\n return []", "title": "" }, { "docid": "6a3fc661b0910dcb40e466f1e61b7204", "score": "0.5663099", "text": "def get_valid_moves(self, row_count, col_count, squares, src_square):\n valid_moves = []\n\n src_row = src_square.get_row()\n src_col = src_square.get_col()\n\n # Move horizontally right\n for dx in range(1, col_count-src_col):\n dest_square = squares[src_row][src_col+dx]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move horizontally left\n for dx in range(1, src_col+1):\n dest_square = squares[src_row][src_col-dx]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move vertically down\n for dy in range(1, row_count-src_row):\n dest_square = squares[src_row+dy][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move vertically up\n for dy in range(1, src_row+1):\n dest_square = squares[src_row-dy][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally down right\n dx = col_count-1 - src_col\n dy = row_count-1 - src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row+step+1][src_col+step+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally down left\n dx = src_col\n dy = row_count-1 - src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row+step+1][src_col-step-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally up right\n dx = col_count-1 - src_col\n dy = src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row-step-1][src_col+step+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally up left\n dx = src_col\n dy = src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row-step-1][src_col-step-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n return valid_moves", "title": "" }, { "docid": "3102449fb8da28da717fd1a8df017991", "score": "0.5661757", "text": "def available_mutations(self, player: str) -> list:\r\n \r\n if player == 'x':\r\n total_sticks = self.x_left + self.x_right\r\n left = self.x_left\r\n right = self.x_right \r\n \r\n \r\n if player == 'o':\r\n total_sticks = self.o_left + self.o_right \r\n left = self.o_left\r\n right = self.o_right \r\n combos = [(x, total_sticks - x) for x in range(total_sticks + 1)]\r\n legal_combos = []\r\n \r\n for item in combos:\r\n if self.mutate(player, item[0], item[1]) == None: \r\n legal_combos.append(item)\r\n if player == 'x':\r\n self.x_left = left\r\n self.x_right = right\r\n else:\r\n self.o_left = left\r\n self.o_right = right\r\n \r\n return legal_combos", "title": "" }, { "docid": "6f5e612e5d498d20147387629811f2c7", "score": "0.5660252", "text": "def move_list(self,player):\n\t\tmove_list = []\n\t\tif player == \"X\":\n\t\t\tfor row,col in self.blackPos:\n\t\t\t\tif (row < self.rowsNum-1) and (col > 0) and ((row+1,col-1) not in self.blackPos):\n\t\t\t\t\tmove_list.append(((row,col),(row+1,col-1)))\n\t\t\t\tif (row < self.rowsNum-1) and ((row+1,col) not in self.whitePos) and ((row+1,col) not in self.blackPos):\n\t\t\t\t\tmove_list.append(((row,col),(row+1,col)))\n\t\t\t\tif (row < self.rowsNum-1) and (col < self.colsNum-1) and ((row+1,col+1) not in self.blackPos):\n\t\t\t\t\tmove_list.append(((row,col),(row+1,col+1)))\n\t\t\t\t\n\t\tif player == \"O\":\n\t\t\tfor row,col in self.whitePos:\n\t\t\t\tif (row > 0) and (col > 0) and ((row-1,col-1) not in self.whitePos):\n\t\t\t\t\tmove_list.append(((row,col),(row-1,col-1)))\n\t\t\t\tif (row > 0) and ((row-1,col) not in self.whitePos) and ((row-1,col) not in self.blackPos):\n\t\t\t\t\tmove_list.append(((row,col),(row-1,col)))\n\t\t\t\tif (row > 0) and (col < self.colsNum-1) and ((row-1,col+1) not in self.whitePos):\n\t\t\t\t\tmove_list.append(((row,col),(row-1,col+1)))\n\t\n\t\treturn move_list", "title": "" }, { "docid": "76ad9b0ab1f6d389d36806a80e170453", "score": "0.5653445", "text": "def getValidMoves(board):\n valid = []\n for move in range(0,7):\n # try to find an x in each column, if an exception isnt thrown, add it to the valid list\n try:\n board[move].index(\"x\")\n valid.append(move)\n except ValueError:\n continue\n return valid", "title": "" }, { "docid": "a3b9acbe74d7207eec69953c418ae037", "score": "0.56528574", "text": "def get_available_moves(self, board):\n\n if self.color == 'black':\n direction = 1\n else:\n direction = -1\n\n available_moves = []\n \n i, j = int(self.position[1]), int(self.position[0])\n try:\n if board[i + 1 * direction][j] == 0:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0]))\n except Exception as e:\n \n pass\n\n # takes move\n try:\n if board[i + 1 * direction][j - 1] != 0 and board[i + 1 * direction][j - 1].color != self.color:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0] - 1))\n if board[i + 1 * direction][j + 1] != 0 and board[i + 1 * direction][j + 1].color != self.color:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0] + 1))\n except Exception as e:\n \n pass\n try:\n if self.never_moved and board[self.index[0] + 1 * direction][self.index[1]] == 0 and board[self.index[0] + 2 * direction][self.index[1]] == 0:\n available_moves.append(pygame.Vector2(self.position[1] + 2 * direction, self.position[0]))\n except:\n pass\n return available_moves", "title": "" }, { "docid": "0afddef387954e50f2426ca92cae688d", "score": "0.5649345", "text": "def next_players(self):\n\n x_count = len([cell for pos, cell in self._board.iteritems() if cell == Board.X])\n o_count = len([cell for pos, cell in self._board.iteritems() if cell == Board.O])\n\n if not -1 <= x_count - o_count <= 1:\n return [] # not a valid board, maybe raise?\n\n if self._winner() is not None:\n return []\n\n if x_count == o_count:\n return [Board.X, Board.O]\n\n elif x_count > o_count:\n return [Board.O]\n else:\n return [Board.X]", "title": "" }, { "docid": "5a185813b1537ed8c528cfaef5553aab", "score": "0.56444705", "text": "def get_valid_moves(self, row_count, col_count, squares, src_square):\n valid_moves = []\n\n src_row = src_square.get_row()\n src_col = src_square.get_col()\n\n # Move diagonally down right\n dx = col_count-1 - src_col\n dy = row_count-1 - src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row+step+1][src_col+step+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally down left\n dx = src_col\n dy = row_count-1 - src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row+step+1][src_col-step-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally up right\n dx = col_count-1 - src_col\n dy = src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row-step-1][src_col+step+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n # Move diagonally up left\n dx = src_col\n dy = src_row\n dmin = dx if dx <= dy else dy\n for step in range(0, dmin):\n dest_square = squares[src_row-step-1][src_col-step-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n break\n\n return valid_moves", "title": "" }, { "docid": "b5409f6c1bce9586d3f48ece952dad49", "score": "0.563594", "text": "def get_legal_moves(self, board):\n legal_moves = []\n\n for piece in enumerate(self.pieces):\n # every rotation\n for rotation in range(4):\n piece[1].rotation = rotation\n # every flip\n for flip in [True, False]:\n piece[1].flipped = flip\n # every square\n for row in range(board.height):\n for column in range(board.length):\n if board.is_legal_move(piece[1], [row, column]):\n legal_moves.append(\n [piece[1], [row, column], rotation, flip])\n # moves are represented as:\n # [index in self.pieces, coords, rotation, flip]\n return legal_moves", "title": "" }, { "docid": "7edb547fe47a5e09a64460b4961b37a8", "score": "0.5620858", "text": "def get_valid_moves(self, row_count, col_count, squares, src_square):\n valid_moves = []\n\n src_row = src_square.get_row()\n src_col = src_square.get_col()\n\n # Color is white\n if self.color == Color.WHITE:\n\n # First move\n if self.first_move:\n dest_square = squares[src_row-2][src_col]\n # Not occupied\n if not (dest_square.is_occupied() or squares[src_row-1][src_col].is_occupied()):\n valid_moves.append(dest_square)\n\n # Forwards\n # Within bounds\n if (0 <= src_row-1 <= row_count-1):\n dest_square = squares[src_row-1][src_col]\n # Not occupied\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n\n # Diagonal\n # Within bounds\n if (0 <= src_row-1 <= row_count-1 and 0 <= src_col-1 <= col_count-1):\n dest_square = squares[src_row-1][src_col-1]\n # Occupied\n if dest_square.is_occupied():\n # Not the same color\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n if (0 <= src_row-1 <= row_count-1 and 0 <= src_col+1 <= col_count-1):\n dest_square = squares[src_row-1][src_col+1]\n # Occupied\n if dest_square.is_occupied():\n # Not the same color\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Color is black\n else:\n\n # First move\n if self.first_move:\n dest_square = squares[src_row+2][src_col]\n # Not occupied\n if not (dest_square.is_occupied() or squares[src_row+1][src_col].is_occupied()):\n valid_moves.append(dest_square)\n\n # Forwards\n # Within bounds\n if (0 <= src_row+1 <= row_count-1):\n dest_square = squares[src_row+1][src_col]\n # Not occupied\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n\n # Diagonal\n # Within bounds\n if (0 <= src_row+1 <= row_count-1 and 0 <= src_col-1 <= col_count-1):\n dest_square = squares[src_row+1][src_col-1]\n # Occupied\n if dest_square.is_occupied():\n # Not the same color\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n if (0 <= src_row+1 <= row_count-1 and 0 <= src_col+1 <= col_count-1):\n dest_square = squares[src_row+1][src_col+1]\n # Occupied\n if dest_square.is_occupied():\n # Not the same color\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n return valid_moves", "title": "" }, { "docid": "bfdb99a63a328028e829ef6b3328ec94", "score": "0.5615623", "text": "def actions(board):\n available_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == None:\n available_moves.add((i, j))\n return available_moves", "title": "" }, { "docid": "16fc0e3bd6c37d4a616ae02da03a6e97", "score": "0.5614708", "text": "def build_best_responses(self, board=None, player=None):\r\n\r\n # Initialize\r\n if board is None:\r\n board = tuple((0,)*self.WIDTH for _ in range(self.WIDTH))\r\n player = 1\r\n \r\n if board in self.best_responses: \r\n return\r\n\r\n current_outcome = self.check_win(board, player)\r\n # If win/loss/draw has been determined, the game is over.\r\n if current_outcome is not None:\r\n for board2 in self.symmetries(board):\r\n self.best_responses[board2] = (None, current_outcome) # None => no move needed\r\n return\r\n \r\n # If we don't know the best response yet, compute it.\r\n best_value = -2 \r\n for i in range(self.WIDTH):\r\n for j in range(self.WIDTH):\r\n # This is guaranteed to execute at least once since current_outcome is None\r\n if board[i][j] == 0:\r\n board2 = self.next_board(board, i, j, player)\r\n \r\n # If board2 is already in best_responses, this does nothing.\r\n # Otherwise, it ensures board2 is added to best_responses.\r\n self.build_best_responses(board2, -1 * player)\r\n # player's value given board2 is the reverse of the next \r\n # player's value\r\n value = -1 * self.best_responses[board2][1]\r\n if value > best_value:\r\n best_value, best_move = value, (i, j)\r\n \r\n # ROTATIONS/REFLECTIONS: All 8 are added to best_responses at once.\r\n \r\n # Represent best_move in board form so it can be rotated/reflected.\r\n # This is a bit silly...but at least we only do it once for each\r\n # equivalence class of board configurations.\r\n best_move_board = self.move_to_board(best_move, self.WIDTH)\r\n for board2, best_move2 in zip(self.symmetries(board), self.symmetries(best_move_board)):\r\n best_move2 = self.board_to_move(best_move2, self.WIDTH)\r\n self.best_responses[board2] = (best_move2, best_value)", "title": "" }, { "docid": "a34ec935770c258d010a51c87b0b404f", "score": "0.56145763", "text": "def get_blank_board():\n return [(0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0),\n (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0),\n (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0),\n (0,0)]", "title": "" }, { "docid": "d8eb502304cbedc761b5ec30801ba93a", "score": "0.56099343", "text": "def adjacent_moves(self, board, player, plays, his_moves):\n import itertools\n # ava = [(i, j) for i, j in itertools.product(range(pp.width), range(pp.height)) if board[i][j]==0]\n moved = his_moves\n # moved = [(i, j) for i, j in itertools.product(range(pp.width), range(pp.height)) if board[i][j]==player]\n # pp.pipeOut(\"moved {}\".format(moved))\n\n adjacents = set()\n width = pp.width\n height = pp.height\n\n for (h, w) in moved:\n if h < width - 1:\n adjacents.add((h+1, w)) # right\n if h > 0:\n adjacents.add((h-1, w)) # left\n if w < height - 1:\n adjacents.add((h, w+1)) # upper\n if w > 0:\n adjacents.add((h, w-1)) # lower\n if w < width - 1 and h < height - 1:\n adjacents.add((h+1, w+1)) # upper right\n if h > 0 and w < height - 1:\n adjacents.add((h-1, w+1)) # upper left\n if h < width - 1 and w > 0:\n adjacents.add((h+1, w-1)) # lower right\n if w > 0 and h > 0:\n adjacents.add((h-1, w-1)) # lower left\n\n adjacents = list(set(adjacents) - set(moved))\n for move in adjacents:\n if plays.get((player, move)):\n adjacents.remove(move)\n return adjacents", "title": "" }, { "docid": "6f7fadc7b04b2a08be73cd0781ebc0bb", "score": "0.56015635", "text": "def build_board(queens_positions):", "title": "" }, { "docid": "a4b37bf33cb27da36b6b1d2d5309fbd0", "score": "0.56003416", "text": "def board1():\n return [\n ['*', '*', '*'],\n ['*', '*', '*'],\n ['X', '*', 'X'],\n ]", "title": "" }, { "docid": "bea596bec5257b51ff7ccacdecb23aea", "score": "0.5591424", "text": "def knight_moves(position, board):\n\n column, row = position\n row = int(row) - 1\n column = ord(column) - ord('a')\n i, j = row, column\n \n addMove(i, j, 2, 1)\n addMove(i, j, 2, -1)\n addMove(i, j, 1, -2)\n addMove(i, j, 1, 2)\n addMove(i, j, -1, -2)\n addMove(i, j, -1, 2)\n addMove(i, j, -2, -1)\n addMove(i, j, -2, 1)\n return possible_moves", "title": "" }, { "docid": "a9a11bc66f92ad4c59d9de016e0c6558", "score": "0.5589961", "text": "def get_possible_moves(self) -> list:\n map_ = self.map\n list_of_states = map_.extract_state()\n winning_count = 0.5 * len(list_of_states)\n count1 = 0\n count2 = 0\n for item in list_of_states:\n if item == '1':\n count1 += 1\n elif item == '2':\n count2 += 1\n if count1 >= winning_count or count2 >= winning_count:\n return []\n x = []\n large_str = self.map.__str__()\n for char in large_str:\n if char in LETTERS:\n x.append(str(char))\n return x", "title": "" }, { "docid": "8a4879cef597e2060158fffd8369bc85", "score": "0.5587735", "text": "def board6():\n return [\n ['X', 'X', 'O', 'X'],\n ['X', 'O', 'X', 'O'],\n ['X', 'O', 'O', 'O'],\n ['O', '*', 'O', '*'],\n ]", "title": "" }, { "docid": "3b8c558d1a3a4ce0b27eb09c6015a10e", "score": "0.55826116", "text": "def getAvailableMovement(pos_x, pos_y, chess, chessboard):\n available_movement = []\n if chess == State.BLACK:\n enemy_chess = State.WHITE\n else:\n enemy_chess = State.BLACK\n \n count_ver = sum([1 for i in chessboard if i[pos_x] != State.EMPTY])\n count_hor = sum([1 for i in chessboard[pos_y] if i != State.EMPTY])\n start = min(pos_x, pos_y)\n diff = abs(pos_x - pos_y)\n count_diagonal = sum([1 for i in range(7 - diff + 1) \\\n if chessboard[pos_y - start + i][pos_x - start + i] != State.EMPTY])\n start = pos_x + pos_y\n diff = max(start - 7, 0)\n start -= diff\n count_paradiagonal = sum([1 for i in range(start - diff + 1) \\\n if chessboard[start - i][diff + i] != State.EMPTY])\n\n # Vertical (x-direction).\n if pos_y - count_ver >= 0 and chessboard[pos_y - count_ver][pos_x] != chess:\n count_invalid = sum([1 for i in range(pos_y - count_ver + 1, pos_y) \\\n if chessboard[i][pos_x] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x, pos_y - count_ver])\n if pos_y + count_ver <= 7 and chessboard[pos_y + count_ver][pos_x] != chess:\n count_invalid = sum([1 for i in range(pos_y + 1, pos_y + count_ver + 1 - 1) \\\n if chessboard[i][pos_x] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x, pos_y + count_ver])\n \n # Horizontal (y-direction).\n if pos_x - count_hor >= 0 and chessboard[pos_y][pos_x - count_hor] != chess:\n count_invalid = sum([1 for i in range(pos_x - count_hor + 1, pos_x) \\\n if chessboard[pos_y][i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x - count_hor, pos_y]) \n if pos_x + count_hor <= 7 and chessboard[pos_y][pos_x + count_hor] != chess:\n count_invalid = sum([1 for i in range(pos_x + 1, pos_x + count_hor + 1 - 1) \\\n if chessboard[pos_y][i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x + count_hor, pos_y]) \n \n # Diagonal.\n if min(pos_x, pos_y) - count_diagonal >= 0 and chessboard[pos_y - count_diagonal][pos_x - count_diagonal] != chess:\n count_invalid = sum([1 for i in range(1, count_diagonal + 1 - 1) \\\n if chessboard[pos_y - i][pos_x - i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x - count_diagonal, pos_y - count_diagonal])\n if max(pos_x, pos_y) + count_diagonal <= 7 and chessboard[pos_y + count_diagonal][pos_x + count_diagonal] != chess:\n count_invalid = sum([1 for i in range(1, count_diagonal + 1 - 1) \\\n if chessboard[pos_y + i][pos_x + i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x + count_diagonal, pos_y + count_diagonal])\n \n # Paradiagonal.\n if pos_y - count_paradiagonal >= 0 and pos_x + count_paradiagonal <= 7 \\\n and chessboard[pos_y - count_paradiagonal][pos_x + count_paradiagonal] != chess:\n count_invalid = sum([1 for i in range(1, count_paradiagonal + 1 - 1) \\\n if chessboard[pos_y - i][pos_x + i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x + count_paradiagonal, pos_y - count_paradiagonal])\n if pos_x - count_paradiagonal >= 0 and pos_y + count_paradiagonal <= 7 \\\n and chessboard[pos_y + count_paradiagonal][pos_x - count_paradiagonal] != chess:\n count_invalid = sum([1 for i in range(1, count_paradiagonal + 1 - 1) \\\n if chessboard[pos_y + i][pos_x - i] == enemy_chess])\n if count_invalid == 0:\n available_movement.append([pos_x - count_paradiagonal, pos_y + count_paradiagonal])\n\n return available_movement", "title": "" }, { "docid": "cea36c76b2ebf902a97ad6641b750ae1", "score": "0.55754834", "text": "def find_winners(self):\n p1_units = 0\n p2_units = 0\n winners = []\n\n for y in range(self.height):\n for x in range(self.width):\n if self._board[y][x] == Position.PLAYER1:\n p1_units += 1\n elif self._board[y][x] == Position.PLAYER2:\n p2_units += 1\n if p1_units == 0:\n winners.append(Position.PLAYER1)\n if p2_units == 0:\n winners.append(Position.PLAYER2)\n\n return winners", "title": "" }, { "docid": "280d1bc42523c2c87b9132f0ba0f6296", "score": "0.5574005", "text": "def get_possible_moves(self, turn) -> list:\n # check turn\n # if buffalo's, append B to markers\n # if hunter's append H, D to markers\n # if None, append all\n\n actions = []\n # loop all state\n # for each (r, c), inc\n return actions", "title": "" }, { "docid": "ef06eb57a2418efbfd8b10e61ce8b1bc", "score": "0.55735594", "text": "def getValidMoves(self, Board):\n Columns = []\n for Col in range(self.BOARD_WIDTH):\n if self.isColumnValid(Board, Col):\n Columns.append(Col)\n return Columns", "title": "" }, { "docid": "2165b9edd9a3c5f43c096cee0ecf0d00", "score": "0.55715", "text": "def calculate_possible_moves(self, board: [[Piece]]) -> None:\n self.possible_moves.clear()\n\n if self.row > 0 and self.col < 7:\n Piece.explore_upper_right_diagonal(self, board)\n if self.row > 0 and self.col > 0:\n Piece.explore_upper_left_diagonal(self, board)\n if self.row < 7 and self.col < 7:\n Piece.explore_lower_right_diagonal(self, board)\n if self.row < 7 and self.col > 0:\n Piece.explore_lower_left_diagonal(self, board)\n\n if self.row > 0:\n Piece.explore_up(self, board)\n if self.row < 7:\n Piece.explore_down(self, board)\n if self.col < 7:\n Piece.explore_right(self, board)\n if self.col > 0:\n Piece.explore_left(self, board)", "title": "" }, { "docid": "07f11a092d0f7ebe68e7450715274e84", "score": "0.55666476", "text": "def calculate_possible_moves(self, board: [[Piece]]) -> None:\n self.possible_moves.clear()\n possibles = {(self.row + 1, self.col - 2), (self.row - 1, self.col - 2),\n (self.row + 1, self.col + 2), (self.row - 1, self.col + 2),\n (self.row + 2, self.col - 1), (self.row + 2, self.col + 1),\n (self.row - 2, self.col - 1), (self.row - 2, self.col + 1)}\n for row, col in possibles:\n if _in_bounds(row, col):\n if _is_space_empty(board, row, col) or board[row][col].color is not self.color:\n self.add_move_to_possibles(board, row, col)", "title": "" }, { "docid": "ddb52b9271059a1297217e720e740ca0", "score": "0.5556269", "text": "def actions(board):\n\n # store a (row, col) tuple where a move can be made\n possible_moves = set()\n\n for row in range(len(board)):\n for col in range(len(board[row])):\n # if current cell is empty then add it as a possible move\n if board[row][col] == EMPTY:\n possible_moves.add((row, col))\n\n return possible_moves", "title": "" }, { "docid": "075df295a706e167a628b236fa04ae42", "score": "0.5555025", "text": "def get_winning_moves(self, board, my_position):\n for m in self.get_possible_moves(board, my_position):\n if m.is_winning_move and m.beg.x != m.end.x:\n yield m", "title": "" }, { "docid": "5736e680e132373004663bdeb6068e71", "score": "0.5553397", "text": "def generate_valid_moves(position):\n x, y = position\n moves = [(x + 1, y + 2),\n (x + 1, y - 2),\n (x - 1, y + 2),\n (x - 1, y - 2),\n (x + 2, y + 1),\n (x - 2, y + 1),\n (x + 2, y - 1),\n (x - 2, y - 1)]\n\n return [pos for pos in moves if is_valid_move(pos)]", "title": "" }, { "docid": "d1e8955fe050ff80b3880053ea8aef3a", "score": "0.55510044", "text": "def winning_moves(self, player):\n if player == Player.RED:\n return list(self._top_boundary.intersection(self._bottom_boundary))\n if player == Player.BLUE:\n return list(self._left_boundary.intersection(self._right_boundary))\n raise Exception(\"Player must be an element of the Player enum.\")", "title": "" }, { "docid": "6253522a0c90f402b682222f7c44a236", "score": "0.5548748", "text": "def colsToWin(self, ox):\n compiler = []\n for i in range(self.width):\n if self.allowsMove(i) == True:\n self.addMove(i, ox)\n if self.winsFor(ox) == True:\n compiler += [i]\n self.delMove(i)\n else: self.delMove(i)\n return compiler", "title": "" }, { "docid": "0f8db261e36fda5063ec24e99117592f", "score": "0.5546417", "text": "def get_valid_moves(self, row_count, col_count, squares, src_square):\n valid_moves = []\n\n src_row = src_square.get_row()\n src_col = src_square.get_col()\n\n # Move down\n if (0 <= src_row+1 <= row_count-1):\n dest_square = squares[src_row+1][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move up\n if (0 <= src_row-1 <= row_count-1):\n dest_square = squares[src_row-1][src_col]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move right\n if (0 <= src_col+1 <= col_count-1):\n dest_square = squares[src_row][src_col+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move left\n if (0 <= src_col-1 <= col_count-1):\n dest_square = squares[src_row][src_col-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move down right\n if (0 <= src_row+1 <= row_count-1 and 0 <= src_col+1 <= col_count-1):\n dest_square = squares[src_row+1][src_col+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move down left\n if (0 <= src_row+1 <= row_count-1 and 0 <= src_col-1 <= col_count-1):\n dest_square = squares[src_row+1][src_col-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move up right\n if (0 <= src_row-1 <= row_count-1 and 0 <= src_col+1 <= col_count-1):\n dest_square = squares[src_row-1][src_col+1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n # Move up left\n if (0 <= src_row-1 <= row_count-1 and 0 <= src_col-1 <= col_count-1):\n dest_square = squares[src_row-1][src_col-1]\n if not dest_square.is_occupied():\n valid_moves.append(dest_square)\n else:\n if not self.color == dest_square.get_piece().get_color():\n valid_moves.append(dest_square)\n\n return valid_moves", "title": "" }, { "docid": "85f5a6f3cc2711bc8d8c6ad7e7b723ad", "score": "0.5542709", "text": "def get_valid_swings(axial_coord, player_n_pieces):\n valid_swings = set()\n for coord in get_adjacent_friendlies(axial_coord, player_n_pieces):\n valid_swings |= get_adjacent_hexes(coord)\n\n # Valid swings are any hex adjacent to an adjacent friendly which isn't blocked, slideable, or current.\n return valid_swings - get_valid_slides(axial_coord) - {axial_coord}", "title": "" }, { "docid": "46b4722512c16d92e545a863e7e8b03f", "score": "0.55372757", "text": "def calculate_possible_moves(self, board: [[Piece]]) -> None:\n self.possible_moves.clear()\n possibles = {(self.row + 1, self.col - 1), (self.row - 1, self.col - 1),\n (self.row + 1, self.col + 1), (self.row - 1, self.col + 1),\n (self.row, self.col - 1), (self.row, self.col + 1),\n (self.row - 1, self.col), (self.row + 1, self.col)}\n for row, col in possibles:\n if _in_bounds(row, col):\n if _is_space_empty(board, row, col) or board[row][col].color is not self.color:\n self.add_move_to_possibles(board, row, col)\n\n if self.can_castle:\n self._explore_castles(board)", "title": "" }, { "docid": "b36a8b6647c4c8ef0dc7e2ed238be643", "score": "0.5531392", "text": "def possibleMoves(self, turns = 0):\n\t\tpossible_moves = []\n\t\tif turns == 0:\n\t\t\tfor tile in self.tiles:\n\t\t\t\tif (tile.movement_cost <= self.current_character_turn.cur_AP) and (tile.movement_cost > 0):\n\t\t\t\t\tpossible_moves.append(tile)\n\t\telse:\n\t\t\tfor tile in self.tiles:\n\t\t\t\tif (tile.movement_cost <= (self.current_character_turn.max_AP * turns)) and (tile.movement_cost > (self.current_character_turn.max_AP * (turns - 1))):\n\t\t\t\t\tpossible_moves.append(tile)\n\t\treturn possible_moves", "title": "" }, { "docid": "d60200859a22c0af0ffd4d2a6336f3b8", "score": "0.5529382", "text": "def calculate_possible_moves(self, board: [[Piece]]) -> None:\n self.possible_moves.clear()\n\n if self.row > 0 and self.col < 7:\n Piece.explore_upper_right_diagonal(self, board)\n if self.row > 0 and self.col > 0:\n Piece.explore_upper_left_diagonal(self, board)\n if self.row < 7 and self.col < 7:\n Piece.explore_lower_right_diagonal(self, board)\n if self.row < 7 and self.col > 0:\n Piece.explore_lower_left_diagonal(self, board)", "title": "" }, { "docid": "ed5085356fa24c82d7ffbcb17a3e2cd7", "score": "0.5517977", "text": "def calculate_possible_moves(self, board: [['Piece']]) -> None:\n pass", "title": "" }, { "docid": "348f590803d6491ef4793c8b969cf78d", "score": "0.5515336", "text": "def all_moves(self,states,player):\n black_moves = dict()\n white_moves = dict()\n is_jump = dict()\n state = deepcopy(states)\n if player == 'black':\n for i in range(0,len(states)):\n for j in range(0,len(states[i])):\n if states[i][j] == 'b':\n black_moves[(i,j)],is_jump[(i,j)]= self.valid_moves(piece.Piece([i,j],'b'),states)\n if not black_moves[(i,j)]:\n del black_moves[(i,j)]\n if states[i][j] == 'B':\n black_moves[(i,j)],is_jump[(i,j)]= self.valid_moves(piece.Piece([i,j],'b',king=True),states)\n if not black_moves[(i,j)]:\n del black_moves[(i,j)]\n if is_jump[(i,j)]:\n for moves in black_moves[(i,j)]:\n board,state = self.resultState(state,moves,piece.Piece([i,j],'b',king=True),jump=True)\n endx = moves[0][0]\n endy = moves[0][1]\n rem_val = self.valid_moves(piece.Piece([endx,endy],'b',king=True),board)\n \n if rem_val[1]:\n for items in rem_val[0]:\n black_moves[(i,j)].append(items[:len(items)-1] + moves)\n black_moves[(i,j)].remove(moves)\n \n \n if player == 'white':\n for i in range(0,len(states)):\n for j in range(0,len(states[i])):\n \n if states[i][j] == 'w':\n white_moves[(i,j)],is_jump[(i,j)]= self.valid_moves(piece.Piece([i,j],'w'),states)\n if not white_moves[(i,j)]:\n del white_moves[(i,j)]\n if states[i][j] == 'W':\n white_moves[(i,j)],is_jump[(i,j)]= self.valid_moves(piece.Piece([i,j],'w',king=True),states)\n if not white_moves[(i,j)]:\n del white_moves[(i,j)]\n if is_jump[(i,j)]:\n for moves in white_moves[(i,j)]:\n board,state = self.resultState(state,moves,piece.Piece([i,j],'w',king=True),jump=True)\n endx = moves[0][0]\n endy = moves[0][1]\n rem_val = self.valid_moves(piece.Piece([endx,endy],'w',king=True),board)\n \n if rem_val[1]:\n for items in rem_val[0]:\n white_moves[(i,j)].append(items[:len(items)-1] + moves)\n white_moves[(i,j)].remove(moves)\n \n \n \n\n\n return black_moves,white_moves,is_jump", "title": "" }, { "docid": "60153a3624264ff02e1a84460c1a147b", "score": "0.5514616", "text": "def getDests(self,piece):\n possible = []\n \n jumper= []\n row = piece[0]\n col = piece[1]\n\n queue = FIFO_Queue()\n visited = set()\n\n queue.add(piece)\n neighbors = self.getNeighbors(piece[0],piece[1])\n\n \"\"\"\n adds valid neighbors into list ofpossible\n adds blocked neighbors to check \n \"\"\"\n for (r, c) in neighbors:\n neighbor = self.board[r][c]\n if neighbor == -1:\n possible.append((r, c))\n\n while len(queue) != 0:\n curr = queue.get()\n visited.add(curr)\n jumper = []\n currNeighs = self.getNeighbors(curr[0],curr[1])\n\n for (r,c) in currNeighs:\n if self.board[r][c] != -1:\n jumper.append((r,c))\n\n #print \"jumper\",jumper\n for jump in jumper:\n dest = self.findJump(curr,jump)\n if dest[0] == -1 or dest in visited:\n continue\n possible.append(dest)\n queue.add(dest)\n #print \"possible: \",possible\n #print \"current: \", piece\n return possible", "title": "" }, { "docid": "ceb921bf040b6910429b84581798fff2", "score": "0.55102104", "text": "def assigned_board(board: List[int]):\n return [i for i in board if i is not None]", "title": "" }, { "docid": "21d82caf4ef8621c727cef435823259c", "score": "0.55096424", "text": "def random_to_win(self, player=None):\r\n moves = []\r\n b = Board() # create a local board\r\n self.board_serials.append(b.serial)\r\n if player is None:\r\n player = random.choice([tokens[1],tokens[2]])\r\n\r\n while True:\r\n row = random.randint(0,2)\r\n col = random.randint(0,2)\r\n if b.move(player, row, col):\r\n moves.append((row,col,player))\r\n self.board_serials.append(b.serial)\r\n player = b.next_token(player)\r\n\r\n # check for a winner\r\n winner = b.winner\r\n if winner:\r\n self.moves = moves\r\n self.winner = winner\r\n return b.serial, moves\r\n\r\n # check for a draw\r\n if b.draw:\r\n moves = []\r\n b.clear()", "title": "" }, { "docid": "c6102b9694030881e0982fb38198b1cc", "score": "0.5494881", "text": "def calculate_moves(self, b):\n available_moves = []\n row, col = get_row_col_from_index(self.board_index)\n\n if self.piece_type == \"p\":\n if self.color == \"w\":\n if row == 2:\n if square_is_empty(b, (3, col)):\n available_moves.append((3, col))\n if square_is_empty(b, (4, col)):\n available_moves.append((4, col))\n elif row < 8:\n if square_is_empty(b, (row + 1, col)):\n available_moves.append((row + 1, col))\n attacks = [(row + 1, col - 1), (row + 1, col + 1)]\n for move in attacks:\n if move[0] < 1 or move[0] > 8 or move[1] > 8 or move[1] < 1:\n continue\n else:\n if is_enemy(b, move, self.color):\n available_moves.append(move)\n if self.color == \"b\":\n if row == 7:\n if square_is_empty(b, (6, col)):\n available_moves.append((6, col))\n if square_is_empty(b, (5, col)):\n available_moves.append((5, col))\n elif row > 1:\n if square_is_empty(b, (row - 1, col)):\n available_moves.append((row - 1, col))\n attacks = [(row - 1, col - 1), (row - 1, col + 1)]\n for move in attacks:\n if move[0] < 1 or move[0] > 8 or move[1] > 8 or move[1] < 1:\n continue\n else:\n if is_enemy(b, move, self.color):\n available_moves.append(move)\n\n if self.piece_type == \"b\":\n for i, c in enumerate(range(col, 8)):\n if square_is_empty(b, (row + i + 1, c + 1)):\n available_moves.append((row + i + 1, c + 1))\n elif is_enemy(b, (row + i + 1, c + 1), self.color):\n available_moves.append((row + i + 1, c + 1))\n break\n else:\n break\n for i, c in enumerate(range(col, 8)):\n if square_is_empty(b, (row - (i + 1), c + 1)):\n available_moves.append((row - (i + 1), c + 1))\n elif is_enemy(b, (row - (i + 1), c + 1), self.color):\n available_moves.append((row - (i + 1), c + 1))\n break\n else:\n break\n for i, c in reversed(list(enumerate(range(1, col)))):\n if square_is_empty(b, (row - (col - (i + 1)), c)):\n available_moves.append((row - (col - (i + 1)), c))\n elif is_enemy(b, (row - (col - (i + 1)), c), self.color):\n available_moves.append((row - (col - (i + 1)), c))\n break\n else:\n break\n for i, c in reversed(list(enumerate(range(1, col)))):\n if square_is_empty(b, (row + (col - (i + 1)), c)):\n available_moves.append((row + (col - (i + 1)), c))\n elif is_enemy(b, (row + (col - (i + 1)), c), self.color):\n available_moves.append((row + (col - (i + 1)), c))\n break\n else:\n break\n\n if self.piece_type == \"r\":\n for i in range(col+1, 9):\n if square_is_empty(b, (row, i)):\n available_moves.append((row, i))\n elif is_enemy(b, (row, i), self.color):\n available_moves.append((row, i))\n break\n else:\n break\n for i in reversed(range(1, col)):\n if square_is_empty(b, (row, i)):\n available_moves.append((row, i))\n elif is_enemy(b, (row, i), self.color):\n available_moves.append((row, i))\n break\n else:\n break\n for i in range(row+1, 9):\n if square_is_empty(b, (i, col)):\n available_moves.append((i, col))\n elif is_enemy(b, (i, col), self.color):\n available_moves.append((i, col))\n break\n else:\n break\n for i in reversed(range(1, row)):\n if square_is_empty(b, (i, col)):\n available_moves.append((i, col))\n elif is_enemy(b, (i, col), self.color):\n available_moves.append((i, col))\n break\n else:\n break\n\n if self.piece_type == \"q\":\n for i, c in enumerate(range(col, 8)):\n if square_is_empty(b, (row + i + 1, c + 1)):\n available_moves.append((row + i + 1, c + 1))\n elif is_enemy(b, (row + i + 1, c + 1), self.color):\n available_moves.append((row + i + 1, c + 1))\n break\n else:\n break\n for i, c in enumerate(range(col, 8)):\n if square_is_empty(b, (row - (i + 1), c + 1)):\n available_moves.append((row - (i + 1), c + 1))\n elif is_enemy(b, (row - (i + 1), c + 1), self.color):\n available_moves.append((row - (i + 1), c + 1))\n break\n else:\n break\n for i, c in reversed(list(enumerate(range(1, col)))):\n if square_is_empty(b, (row - (col - (i + 1)), c)):\n available_moves.append((row - (col - (i + 1)), c))\n elif is_enemy(b, (row - (col - (i + 1)), c), self.color):\n available_moves.append((row - (col - (i + 1)), c))\n break\n else:\n break\n for i, c in reversed(list(enumerate(range(1, col)))):\n if square_is_empty(b, (row + (col - (i + 1)), c)):\n available_moves.append((row + (col - (i + 1)), c))\n elif is_enemy(b, (row + (col - (i + 1)), c), self.color):\n available_moves.append((row + (col - (i + 1)), c))\n break\n else:\n break\n for i in range(col + 1, 9):\n if square_is_empty(b, (row, i)):\n available_moves.append((row, i))\n elif is_enemy(b, (row, i), self.color):\n available_moves.append((row, i))\n break\n else:\n break\n for i in reversed(range(1, col)):\n if square_is_empty(b, (row, i)):\n available_moves.append((row, i))\n elif is_enemy(b, (row, i), self.color):\n available_moves.append((row, i))\n break\n else:\n break\n for i in range(row + 1, 9):\n if square_is_empty(b, (i, col)):\n available_moves.append((i, col))\n elif is_enemy(b, (i, col), self.color):\n available_moves.append((i, col))\n break\n else:\n break\n for i in reversed(range(1, row)):\n if square_is_empty(b, (i, col)):\n available_moves.append((i, col))\n elif is_enemy(b, (i, col), self.color):\n available_moves.append((i, col))\n break\n else:\n break\n\n if self.piece_type == \"k\":\n for y in range(row-1, row+2):\n for x in range(col-1, col+2):\n if square_is_empty(b, (y, x)) or is_enemy(b, (y, x), self.color):\n available_moves.append((y, x))\n\n if self.piece_type == \"n\":\n n_moves = [(row + 2, col - 1), (row + 2, col + 1),\n (row + 1, col + 2), (row - 1, col + 2),\n (row - 2, col + 1), (row - 2, col - 1),\n (row - 1, col - 2), (row + 1, col - 2)]\n for move in n_moves:\n if square_is_empty(b, move) or is_enemy(b, move, self.color):\n available_moves.append(move)\n\n # Remove all moves that fall off the board\n for i, move in reversed(list(enumerate(available_moves))):\n # print(i, move)\n if move[0] < 1 or move[0] > 8 or move[1] > 8 or move[1] < 1:\n available_moves.pop(i)\n\n # Remove all moves that land on another piece of same color\n for s in b.squares:\n if s:\n pos = get_row_col_from_index(s.board_index)\n if pos in available_moves:\n if s.color == self.color:\n available_moves.remove((s.row, s.col))\n\n self.possible_moves = available_moves", "title": "" }, { "docid": "5aa26f0140475d5e71e0a91a7a765a02", "score": "0.54937863", "text": "def get_possible_moves(self) -> list:\n moves = []\n for x in self.cells:\n if isinstance(x, str):\n moves.append(x)\n return [] if self.is_over() else moves", "title": "" }, { "docid": "e59ed739a5deea6ad4488bab20029259", "score": "0.5492927", "text": "def player(board):\n empty_board = True\n count_x = 0\n count_o = 0\n f_list = []\n\n \"\"\"\n for cell in board:\n empty_board = all(c is None for c in cell)\n print(all(c is None for c in cell))\n \"\"\"\n\n # Check every item in board.\n # Flatten list, and put all elements in new list, for later use\n # Check if any of the cells contain X or O. If so, the board is not empty,\n # game has began, so we switch empty_board to False\n for cell in board:\n for i in cell:\n f_list.append(i)\n if i == X or i == O:\n empty_board = False\n\n # If board is empty, return X as the first player\n # else, figure out who's turn is next, by counting how many X and O's on the game board.\n if empty_board == True:\n return X\n else:\n count_x = f_list.count(\"X\")\n count_o = f_list.count(\"O\")\n if count_x > count_o:\n return O\n else:\n return X", "title": "" }, { "docid": "d39c7646b67f774de7ff93f05d1e44c7", "score": "0.54854065", "text": "def create_board(self):\n if self.board_type == 'Normal chessboard':\n self.board = [Pawn('Black', [\"A\", 7]), Pawn('White', [\"A\", 2]), Pawn('Black', [\"B\", 7]),\n Pawn('White', [\"B\", 2]), Pawn('Black', [\"C\", 7]), Pawn('White', [\"C\", 2]),\n Pawn('Black', [\"D\", 7]), Pawn('White', [\"D\", 2]), Pawn('Black', [\"E\", 7]),\n Pawn('White', [\"E\", 2]), Pawn('Black', [\"F\", 7]), Pawn('White', [\"F\", 2]),\n Pawn('Black', [\"G\", 7]), Pawn('White', [\"G\", 2]), Pawn('Black', [\"H\", 7]),\n Pawn('White', [\"H\", 2]), Queen('Black', ['D', 8]), Queen('White', ['D', 1]),\n King('Black', ['E', 8]), King('White', ['E', 1]), Rook('Black', [\"A\", 8]),\n Rook('Black', [\"H\", 8]), Rook('White', [\"A\", 1]), Rook('White', [\"H\", 1]),\n Bishop('Black', [\"C\", 8]), Bishop('Black', [\"F\", 8]), Bishop('White', [\"C\", 1]),\n Bishop('White', [\"F\", 1]), Knight('Black', [\"B\", 8]), Knight('Black', [\"G\", 8]),\n Knight('White', [\"B\", 1]), Knight('White', [\"G\", 1])]\n\n elif self.board_type == 'Hexagon chessboard':\n self.board = [Pawn('Black', [\"H\", 11]), Pawn('White', [\"A\", 2]), Pawn('Black', [\"G\", 11]),\n Pawn('White', [\"B\", 2]), Pawn('Black', [\"F\", 11]), Pawn('White', [\"C\", 2]),\n Pawn('Black', [\"E\", 11]), Pawn('White', [\"D\", 2]), Pawn('Black', [\"I\", 11]),\n Pawn('White', [\"E\", 2]), Pawn('Black', [\"J\", 11]), Pawn('White', [\"F\", 2]),\n Pawn('Black', [\"K\", 11]), Pawn('White', [\"G\", 2]), Pawn('Black', [\"L\", 11]),\n Pawn('White', [\"H\", 2]), Pawn('Red', [\"G\", 7]), Pawn('Red', [\"A\", 7]),\n Pawn('Red', [\"B\", 7]), Pawn('Red', [\"C\", 7]), Pawn('Red', [\"D\", 7]), Pawn('Red', [\"L\", 7]),\n Pawn('Red', [\"I\", 7]), Pawn('Red', [\"K\", 7]), Queen('Black', ['I', 12]),\n Queen('White', ['E', 1]), Queen('Red', ['D', 8]), King('Black', ['E', 12]),\n King('White', ['D', 1]), King('Red', ['I', 8]), Rook('Black',[\"H\", 12]),\n Rook('Black',[\"L\", 12]), Rook('White',[\"A\", 1]), Rook('White',[\"H\", 1]),\n Rook('Red',[\"A\", 8]), Rook('Red',[\"L\", 8]), Bishop('Black', [\"J\", 12]),\n Bishop('Black', [\"F\", 12]), Bishop('White', [\"C\", 1]), Bishop('White', [\"F\", 1]),\n Bishop('Red', [\"C\", 8]), Bishop('Red', [\"G\", 8]), Knight('Black', [\"G\", 12]),\n Knight('Black', [\"K\", 12]), Knight('White', [\"B\", 1]), Knight('White', [\"G\", 1]),\n Knight('Red', [\"B\", 8]), Knight('Red', [\"K\", 8])]\n\n elif self.board_type == 'Glinsky chessboard':\n self.board = [Pawn('Black', [\"B\", 7]), Pawn('White', [\"B\", 1]), Pawn('Black', [\"C\", 7]),\n Pawn('White', [\"C\", 2]), Pawn('Black', [\"D\", 7]), Pawn('White', [\"D\", 3]),\n Pawn('Black', [\"E\", 7]), Pawn('White', [\"E\", 4]), Pawn('Black', [\"F\", 7]),\n Pawn('White', [\"F\", 5]), Pawn('Black', [\"G\", 7]), Pawn('White', [\"G\", 4]),\n Pawn('Black', [\"H\", 7]), Pawn('White', [\"H\", 3]), Pawn('Black', [\"I\", 7]),\n Pawn('White', [\"I\", 2]), Pawn('Black', [\"K\", 7]), Pawn('White', [\"K\", 1]),\n Pawn('White', [\"K\", 5]), Queen('White', ['E', 1]),\n Queen('Black', ['E', 10]), King('Black', ['G', 10]), King('White', ['G', 1]),\n Rook('Black', [\"I\", 8]), Rook('Black', [\"C\", 8]), Rook('White', [\"I\", 1]),\n Rook('White', [\"C\", 1]), Bishop('Black', [\"F\", 9]), Bishop('Black', [\"F\", 10]),\n Bishop('White', [\"F\", 1]), Bishop('White', [\"F\", 2]), Bishop('Black', [\"F\", 11]),\n Bishop('White', [\"F\", 3]), Knight('Black', [\"H\", 9]), Knight('Black', [\"D\", 9]),\n Knight('White', [\"H\", 1]), Knight('White', [\"D\", 1])]\n\n elif self.board_type == 'Quad chessboard':\n self.board = [Pawn('Black', [\"I\", 13]), Pawn('White', [\"I\", 2]), Pawn('Black', [\"J\", 13]),\n Pawn('White', [\"J\", 2]), Pawn('Black', [\"K\", 13]), Pawn('White', [\"K\", 2]),\n Pawn('Black', [\"D\", 13]), Pawn('White', [\"D\", 2]), Pawn('Black', [\"E\", 13]),\n Pawn('White', [\"E\", 2]), Pawn('Black', [\"F\", 13]), Pawn('White', [\"F\", 2]),\n Pawn('Black', [\"G\", 13]), Pawn('White', [\"G\", 2]), Pawn('Black', [\"H\", 13]),\n Pawn('White', [\"H\", 2]), Pawn('Red', [\"M\", 4]), Pawn('Blue', [\"B\", 4]), Pawn('Red', [\"M\", 5]),\n Pawn('Blue', [\"B\", 5]), Pawn('Red', [\"M\", 6]), Pawn('Blue', [\"B\", 6]),\n Pawn('Red', [\"M\", 7]), Pawn('Blue', [\"B\", 7]), Pawn('Red', [\"M\", 8]),\n Pawn('Blue', [\"B\", 8]), Pawn('Red', [\"M\", 9]), Pawn('Blue', [\"B\", 9]),\n Pawn('Red', [\"M\", 10]), Pawn('Blue', [\"B\", 10]), Pawn('Red', [\"M\", 11]),\n Pawn('Blue', [\"B\", 11]), Queen('Black', ['G', 14]), Queen('White', ['G', 1]),\n Queen('Red', ['N', 7]), Queen('Blue', ['A', 7]), King('Black', ['H', 14]),\n King('White', ['H', 1]), King('Red', ['N', 8]), King('Blue', ['A', 7]),\n Rook('Black', [\"D\", 14]), Rook('Black', [\"K\", 14]), Rook('White', [\"D\", 1]),\n Rook('White', [\"K\", 1]), Rook('Blue', [\"A\", 11]), Rook('Blue', [\"A\", 4]),\n Rook('Red', [\"N\", 11]), Rook('Red', [\"N\", 4]), Bishop('Black', [\"F\", 14]),\n Bishop('Black', [\"I\", 14]), Bishop('White', [\"E\", 1]), Bishop('White', [\"J\", 1]),\n Bishop('Red', [\"N\", 9]), Bishop('Red', [\"N\", 6]), Bishop('Blue', [\"A\", 9]),\n Bishop('Blue', [\"A\", 6]), Knight('Black', [\"E\", 14]), Knight('Black', [\"J\", 14]),\n Knight('White', [\"E\", 1]), Knight('White', [\"J\", 1]), Knight('Red', [\"N\", 5]),\n Knight('Red', [\"N\", 10]), Knight('Blue', [\"A\", 5]), Knight('Blue', [\"A\", 10])]\n\n elif self.board_type == 'Double chessboard':\n self.board = [Pawn('Black', [\"A\", 11]), Pawn('White', [\"A\", 2]), Pawn('Black', [\"B\", 11]),\n Pawn('White', [\"B\", 2]), Pawn('Black', [\"C\", 11]), Pawn('White', [\"C\", 2]),\n Pawn('Black', [\"D\", 11]), Pawn('White', [\"D\", 2]), Pawn('Black', [\"E\", 11]),\n Pawn('White', [\"E\", 2]), Pawn('Black', [\"F\", 11]), Pawn('White', [\"F\", 2]),\n Pawn('Black', [\"G\", 11]), Pawn('White', [\"G\", 2]), Pawn('Black', [\"H\", 11]),\n Pawn('White', [\"H\", 2]), Pawn('Black', [\"I\", 11]), Pawn('White', [\"I\", 2]),\n Pawn('Black', [\"J\", 11]),\n Pawn('White', [\"J\", 2]), Pawn('Black', [\"K\", 11]), Pawn('White', [\"K\", 2]),\n Pawn('Black', [\"L\", 11]), Pawn('White', [\"L\", 2]), Pawn('Black', [\"M\", 11]),\n Pawn('White', [\"M\", 2]), Pawn('Black', [\"N\", 11]), Pawn('White', [\"N\", 2]),\n Pawn('Black', [\"O\", 11]), Pawn('White', [\"O\", 2]), Pawn('Black', [\"P\", 11]),\n Pawn('White', [\"P\", 2]), Queen('Black', ['D', 12]), Queen('White', ['D', 1]),\n Queen('Black', ['L', 12]), Queen('White', ['L', 1]), King('Black', ['E', 12]),\n King('White', ['E', 1]), King('Black', ['M', 12]), King('White', ['M', 1]),\n Rook('Black', [\"A\", 12]), Rook('Black', [\"H\", 12]), Rook('White', [\"A\", 1]),\n Rook('White', [\"H\", 1]), Rook('Black', [\"I\", 12]), Rook('Black', [\"P\", 12]),\n Rook('White', [\"I\", 1]), Rook('White', [\"P\", 1]), Bishop('Black', [\"C\", 12]),\n Bishop('Black', [\"F\", 12]), Bishop('White', [\"C\", 1]), Bishop('White', [\"F\", 1]),\n Bishop('Black', [\"N\", 12]), Bishop('White', [\"N\", 1]), Bishop('Black', [\"K\", 12]),\n Bishop('White', [\"K\", 1]), Knight('Black', [\"B\", 12]), Knight('Black', [\"G\", 12]),\n Knight('White', [\"B\", 1]), Knight('White', [\"G\", 1]), Knight('Black', [\"J\", 12]),\n Knight('Black', [\"O\", 12]), Knight('White', [\"J\", 1]), Knight('White', [\"O\", 1])]\n\n for objective in self.board:\n self.positions.append(Position(objective.position[0], objective.position[1]))", "title": "" } ]
fb480e403a4d5dd341c51577f1b55220
This is the normalization step generating dbt models files from the destination_catalog.json taken as input.
[ { "docid": "e11d0077aeb8e9e1aae12b5f7489774a", "score": "0.71015024", "text": "def generate_dbt_models(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):\n catalog_processor = CatalogProcessor(os.path.join(test_root_dir, \"models\", \"generated\"), destination_type)\n catalog_processor.process(os.path.join(\"resources\", test_resource_name, \"data_input\", \"catalog.json\"), \"_airbyte_data\", target_schema)", "title": "" } ]
[ { "docid": "ce965d0bf5e58e59351a5e541e0e9128", "score": "0.5778649", "text": "def build_cm_models(output_dir):\n\n for roots,dirs,files in os.walk(output_dir):\n path_to_sto_list=[]\n path_to_cm_list=[]\n path_to_dir=[]\n db_exist=[]\n for name in dirs:\n path_to_dir.append(os.path.join(roots,name))\n for name in files:\n fileName, fileExtension = os.path.splitext(name)\n if fileExtension=='.sto':\n path_to_sto_list.append(os.path.join(roots,name))\n path_to_cm_list.append(os.path.join(roots,fileName+'.cm'))\n elif fileName=='db':\n db_exist.append('True')\n path_to_cm_db=os.path.join(roots,'db')\n if db_exist!=[]:\n del db_exist[0]\n elif (db_exist==[] and path_to_sto_list!=[]):\n for i in range(len(path_to_sto_list)):\n new_path_to_sto=check_path(path_to_sto_list[i])\n new_path_to_cm=check_path(path_to_cm_list[i])\n stdout,stderr,return_value = qcli_system_call('cmbuild --noss -F '+path_to_cm_list[i]+' '+path_to_sto_list[i])\n if return_value != 0:\n print 'Stdout:\\n%s\\nStderr:%s\\n' % (stdout,stderr)\n exit(1)\n\t f=open(path_to_cm_db,'w')\n f.close()\n else:\n pass", "title": "" }, { "docid": "e37cf80606231141947f7aa109934cb7", "score": "0.5734499", "text": "def _generate_driver_data_model(\n client,\n cloudshell_config,\n destination_path,\n package_full_path,\n shell_filename,\n shell_name,\n ):\n url = \"http://{0}:{1}/API/ShellDrivers/Generate\".format(\n cloudshell_config.host, cloudshell_config.port\n )\n token = client.token\n response = post(\n url,\n files={path.basename(shell_filename): open(package_full_path, \"rb\")},\n headers={\"Authorization\": \"Basic \" + token},\n )\n\n if response.status_code != 200:\n error_message = \"Code generation failed with code {0} and error {1}\".format(\n response.status_code, response.text\n )\n click.echo(message=error_message, err=True)\n return\n\n click.echo(\"Extracting data model ...\")\n with TempDirContext(remove_dir_on_error=False, prefix=shell_name) as temp_dir:\n generated_zip = path.join(temp_dir, shell_filename)\n click.echo(\"Writing temporary file {0}\".format(generated_zip))\n with open(generated_zip, \"wb\") as driver_file:\n driver_file.write(response.content)\n\n click.echo(\"Extracting generated code at {0}\".format(destination_path))\n with zipfile.ZipFile(generated_zip) as zf:\n zf.extractall(destination_path)", "title": "" }, { "docid": "4295dbcfdc7e4868206099874a775297", "score": "0.55181944", "text": "def main() -> None:\n\n global MODELS\n\n # Retrieve models.yml from call-parameter for testing purposes, local file or GitHub\n if len(sys.argv) > 1 and sys.argv[1] != \"check\":\n file = sys.argv[1]\n else:\n file = SOURCE\n\n if os.path.isfile(file):\n with open(file, \"rb\") as x:\n models_yml = x.read()\n else:\n models_yml = requests.get(file).content\n\n # calc checksum to assert the models.py is up-to-date\n checksum = hashlib.md5(models_yml).hexdigest()\n\n if len(sys.argv) > 1 and sys.argv[1] == \"check\":\n from openslides_backend.models.models import MODELS_YML_CHECKSUM\n\n assert checksum == MODELS_YML_CHECKSUM\n print(\"models.py is up to date (checksum-comparison)\")\n sys.exit(0)\n\n # Fix broken keys\n models_yml = models_yml.replace(\" yes:\".encode(), ' \"yes\":'.encode())\n models_yml = models_yml.replace(\" no:\".encode(), ' \"no\":'.encode())\n\n # Load and parse models.yml\n MODELS = yaml.safe_load(models_yml)\n with open(DESTINATION, \"w\") as dest:\n dest.write(FILE_TEMPLATE)\n dest.write(\"\\nMODELS_YML_CHECKSUM = \" + repr(checksum) + \"\\n\")\n for collection, fields in MODELS.items():\n if collection == \"_migration_index\":\n continue\n model = Model(collection, fields)\n dest.write(model.get_code())\n\n print(f\"Models file {DESTINATION} successfully created.\")", "title": "" }, { "docid": "c881e5c8a4cf12b25ae6150be4bef000", "score": "0.54629475", "text": "def dedup_command(model_directory, output_directory):\n results = util.dedup_model_dir(model_directory)\n output_path = Path(output_directory)\n for protocol, models in results.items():\n for index, (model, versions) in enumerate(models.items()):\n model_name = f\"model-{index + 1}\"\n model_dir = output_path / protocol / model_name\n\n # Create the directory\n model_dir.mkdir(parents=True, exist_ok=True)\n\n # Write the model to this directory, both in Graphviz and JSON\n # format.\n with open(model_dir / \"model.gv\", \"w\") as f:\n f.write(model)\n\n graph = _dot_to_networkx(model)\n graph = util.prefix_nodes(graph, f\"{model_name}_\")\n converted = util.convert_graph(graph, add_resets=True)\n with open(model_dir / \"model.json\", \"w\") as f:\n json.dump(converted, f, indent=4)\n\n # Add the version list\n with open(model_dir / \"versions.json\", \"w\") as f:\n json.dump(versions, f, indent=4)", "title": "" }, { "docid": "4f64a1c3b106124036de56b9e358278b", "score": "0.54433626", "text": "def run(self, schema_files, field_mappings=None, model_renames=None):\n field_mapping = dict((m.key, m.field_class)\n for m in field_mappings or [])\n\n model_renames = dict((m.old, m.new)\n for m in model_renames or [])\n\n schemas = [json.load(schema_fp) for schema_fp in schema_files]\n self.stdout.write(self.generate_models(\n schemas,\n field_mapping=field_mapping,\n model_renames=model_renames))", "title": "" }, { "docid": "1633881c0733a5c94a636d9ee0715dc1", "score": "0.54346806", "text": "def task_convert():\n # Load settings\n wakeword_id = pydash.get(_CONFIG, \"wake_word.id\")\n assert wakeword_id, \"wake_word.id is required\"\n\n model_dir = _DIR / pydash.get(_CONFIG, \"model_directory\", \"models\")\n model_path = model_dir / wakeword_id / f\"{wakeword_id}.net\"\n converted_path = model_path.with_suffix(\".pb\")\n\n yield {\n \"name\": \"convert\",\n \"file_dep\": [model_path],\n \"targets\": [converted_path],\n \"actions\": [\"precise-convert {dependencies}\"],\n }", "title": "" }, { "docid": "4b6bad993a318e5772d731c0fa53afbb", "score": "0.5395181", "text": "def generate_converter(json_filename: str) -> None:\n from os.path import join, dirname, abspath\n from json import load\n\n try:\n with open(json_filename, 'r') as f:\n config = load(f)\n except FileNotFoundError as e:\n raise Exception(f'File {json_filename} is not found.') from e\n\n fileDir = dirname(abspath(__file__))\n output_file = join(fileDir, 'converter_generated.py')\n\n data_sparse = []\n data_other = []\n data_determinant = []\n for group,attr in config.items():\n for data,specs in attr.items():\n name = f'{group}_{data}'\n if 'sparse' in specs[0]:\n data_sparse.append(name)\n elif 'determinant' in group:\n data_determinant.append(name)\n else:\n data_other.append(name)\n\n with open(output_file, 'w') as f_out:\n f_out.write('import trexio \\n')\n f_out.write('def data_handler(trexio_file_from, trexio_file_to) -> None : \\n')\n f_out.write(' buffer_size = 20000 \\n')\n\n # Process the normal data first\n for attr in data_other:\n if 'package_version' in attr:\n continue\n block = f'\\n\\\n if trexio.has_{attr}(trexio_file_from): \\n\\\n data = trexio.read_{attr}(trexio_file_from) \\n\\\n trexio.write_{attr}(trexio_file_to, data) \\n\\\n '\n f_out.write(block)\n\n # Now process the sparse data\n for attr in data_sparse:\n block = f'\\n\\\n offset_file = 0 ; eof = False \\n\\\n if trexio.has_{attr}(trexio_file_from): \\n\\\n while(not eof): \\n\\\n indices, values, read_size, eof = trexio.read_{attr}( \\n\\\n trexio_file_from, offset_file, buffer_size \\n\\\n ) \\n\\\n trexio.write_{attr}( \\n\\\n trexio_file_to, offset_file, read_size, indices, values \\n\\\n ) \\n\\\n offset_file += read_size \\n\\\n '\n f_out.write(block)\n\n # Finally process the determinant data\n for attr in data_determinant:\n if 'determinant_num' in attr:\n continue\n block = f'\\n\\\n offset_file = 0 ; eof = False \\n\\\n if trexio.has_{attr}(trexio_file_from): \\n\\\n while(not eof): \\n\\\n data, read_size, eof = trexio.read_{attr}( \\n\\\n trexio_file_from, offset_file, buffer_size \\n\\\n ) \\n\\\n trexio.write_{attr}( \\n\\\n trexio_file_to, offset_file, read_size, data \\n\\\n ) \\n\\\n offset_file += read_size \\n\\\n '\n f_out.write(block)", "title": "" }, { "docid": "a4cde2e02b244c93ea67e13f04c224c4", "score": "0.5393562", "text": "def base_command_generator(source_name: str, table_mappings: list, destination_folder_path: str):\n table_mappings_with_base_models = list()\n\n for table_mapping in table_mappings:\n for table, mapping_data in table_mapping.items():\n os.makedirs(destination_folder_path, exist_ok=True)\n base_cmd = f\"dbt run-operation generate_base_model --args '{{\\\"source_name\\\": \\\"{source_name}\\\",\" \\\n f\" \\\"table_name\\\": \\\"{table}\\\"}}'\"\n\n mapping_data.update({\"base_command\": base_cmd})\n table_mapping[table] = mapping_data\n\n table_mappings_with_base_models.append(table_mapping)\n\n return table_mappings_with_base_models", "title": "" }, { "docid": "24383fc060f30bd0a4eeaac9bc4b4566", "score": "0.5333433", "text": "def build_existing_dynamic_models():\r\n # To avoid circular imports, the model is retrieved from the model cache\r\n MetaModel = models.get_model('dynamo', 'MetaModel')\r\n for meta_model in MetaModel.objects.all():\r\n DynamicModel = meta_model.get_model()\r\n # Create the table if necessary, shouldn't be necessary anyway\r\n create_db_table(DynamicModel)\r\n # While we're at it...\r\n add_necessary_db_columns(DynamicModel)", "title": "" }, { "docid": "dfe279dbe709d33ec2514af570e807e7", "score": "0.53046435", "text": "def prepare_for_export(self) -> None:\n self._model = self.strip_model(self._model)", "title": "" }, { "docid": "5018d2eb44533c31e22038baaf024b48", "score": "0.52425355", "text": "def convert_model(args):\n config, _ = export_helper.initialize_config(args)\n\n eos_token_id = config.eos_token_id\n pad_token_id = config.pad_token_id\n decoder_start_token_id = config.decoder_start_token_id\n\n encoder_path = os.path.join(args.output, \"edinit.onnx\")\n decoder_path = os.path.join(args.output, \"decoder_past.onnx\")\n final_path = os.path.join(args.output, \"model_final.onnx\")\n\n encoder_model = onnx.load(encoder_path, load_external_data=True)\n encoder_model.graph.name = \"encoderdecoderinit subgraph\"\n make_dim_proto_numeric(encoder_model, config)\n\n decoder_model = onnx.load(decoder_path, load_external_data=True)\n decoder_model.graph.name = \"decoder subgraph\"\n make_dim_proto_numeric(decoder_model, config)\n\n inputs = [\n \"input_ids\",\n \"max_length\",\n \"min_length\",\n \"num_beams\",\n \"num_return_sequences\",\n \"length_penalty\",\n \"repetition_penalty\",\n \"\",\n \"\",\n \"attention_mask\",\n ]\n outputs = [\"sequences\"]\n\n node = helper.make_node(\"BeamSearch\", inputs=inputs, outputs=outputs, name=\"BeamSearch_zcode\")\n node.domain = \"com.microsoft\"\n # NOTE: take value from args or config\n node.attribute.extend(\n [\n helper.make_attribute(\"eos_token_id\", eos_token_id),\n helper.make_attribute(\"pad_token_id\", pad_token_id),\n helper.make_attribute(\"decoder_start_token_id\", decoder_start_token_id),\n helper.make_attribute(\"no_repeat_ngram_size\", args.no_repeat_ngram_size),\n helper.make_attribute(\"early_stopping\", args.early_stopping),\n helper.make_attribute(\"model_type\", 1),\n helper.make_attribute(\"decoder\", decoder_model.graph),\n helper.make_attribute(\"encoder\", encoder_model.graph),\n ]\n )\n\n # graph inputs\n input_ids = helper.make_tensor_value_info(\"input_ids\", TensorProto.INT32, [\"batch_size\", \"sequence_length\"])\n max_length = helper.make_tensor_value_info(\"max_length\", TensorProto.INT32, [1])\n min_length = helper.make_tensor_value_info(\"min_length\", TensorProto.INT32, [1])\n num_beams = helper.make_tensor_value_info(\"num_beams\", TensorProto.INT32, [1])\n num_return_sequences = helper.make_tensor_value_info(\"num_return_sequences\", TensorProto.INT32, [1])\n length_penalty = helper.make_tensor_value_info(\"length_penalty\", TensorProto.FLOAT, [1])\n repetition_penalty = helper.make_tensor_value_info(\"repetition_penalty\", TensorProto.FLOAT, [1])\n attention_mask = helper.make_tensor_value_info(\n \"attention_mask\", TensorProto.INT32, [\"batch_size\", \"sequence_length\"]\n )\n\n graph_inputs = [\n input_ids,\n max_length,\n min_length,\n num_beams,\n num_return_sequences,\n length_penalty,\n repetition_penalty,\n attention_mask,\n ]\n\n # graph outputs\n sequences = helper.make_tensor_value_info(\n \"sequences\", TensorProto.INT32, [\"batch_size\", \"num_return_sequences\", \"max_length\"]\n )\n initializers = []\n graph_outputs = [sequences]\n new_graph = helper.make_graph([node], \"beam-search-test\", graph_inputs, graph_outputs, initializers)\n\n opset_import = helper.make_opsetid(domain=\"com.microsoft\", version=1)\n # Create the model\n decoder_model.opset_import.append(opset_import)\n new_model = helper.make_model(\n new_graph, producer_name=\"onnxruntime.transformers\", opset_imports=decoder_model.opset_import\n )\n # https://github.com/onnx/onnx/blob/main/onnx/helper.py\n onnx.save(new_model, final_path, save_as_external_data=True, all_tensors_to_one_file=False, convert_attribute=True)\n # check model > 2GB\n print(f\"--- Check the model with path: {final_path} ---\")\n onnx.checker.check_model(final_path, full_check=True)\n onnx.shape_inference.infer_shapes_path(final_path, strict_mode=True)", "title": "" }, { "docid": "629c324b3ba5b602159efa14e394b715", "score": "0.5233622", "text": "def convert_data(src: str, dest: str) -> str:\n utterance_files = glob(os.path.join(src, \"utterances_*\"))\n for filename in utterance_files:\n intent_basename = \"intent_{intent_name}.yaml\".format(\n intent_name=os.path.basename(filename).split(\"_\")[1]\n )\n with open(os.path.join(src, intent_basename)) as f:\n # XXX: assert that the intent is the last yaml \"document\" in the file and we\n # only need to append the \"utterances:\" key to it. Otherwise, we could use a\n # YAML library.\n base_yaml = f.read().strip() + \"\\nutterances:\"\n with open(filename) as f:\n expanded_utterances = \"\\n\".join(\n str(' - \"') + line + '\"' for line in expand_file(f)\n )\n with open(os.path.join(dest, intent_basename), \"w\") as f:\n print(base_yaml, file=f)\n print(expanded_utterances, file=f)\n\n # Copy entity yaml files to destination so that the dataset can be generated from\n # that folder only.\n for filename in glob(os.path.join(src, \"entity_*\")):\n shutil.copy(filename, os.path.join(dest, os.path.basename(filename)))\n\n return dest", "title": "" }, { "docid": "3710162f5625ca1cb39692694d6f74d5", "score": "0.52211887", "text": "def make_model(data_model, bases, module, app_label):\n\n output_models = []\n\n for table_json in data_model['tables']:\n\n table_cons = {}\n\n for con_type, con_list in data_model['schema']['constraints'].items():\n\n table_cons[con_type] = []\n\n for con in con_list:\n table_name = con.get('table') or con.get('source_table')\n if table_name == table_json['name']:\n table_cons[con_type].append(con)\n\n table_idxs = []\n\n for index in data_model['schema']['indexes']:\n if index['table'] == table_json['name']:\n table_idxs.append(index)\n\n output_models.append(make_table(table_json, table_cons, table_idxs,\n bases, module, app_label))\n\n return output_models", "title": "" }, { "docid": "33f5799a2459546776e078235c0c4923", "score": "0.5204638", "text": "def generate_odoo(\n service_name, version, schema_dir, force, dest_dir, file_filter):\n version = version.replace('.', '_')\n os.makedirs(dest_dir, exist_ok=True)\n\n prepare(service_name, version, dest_dir, force)\n\n output_dir = os.path.join(\n dest_dir, 'l10n_br_%s_spec/models/%s' % (service_name, version)\n )\n\n filenames = []\n if file_filter:\n for pattern in file_filter.strip('\\'').split('|'):\n filenames += [file for file in Path(schema_dir + '/%s/%s' % (\n service_name, version\n )).rglob(pattern + '*.xsd')]\n else:\n filenames = [file for file in Path(schema_dir + '/%s/%s' % (\n service_name, version\n )).rglob('*.xsd')]", "title": "" }, { "docid": "09751b86bdb7404ef54daa9aad6b8991", "score": "0.5200863", "text": "def prepare_output(ingestion_request_id=None):\n\n ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)\n ingestion_request.update_status(\"WAIT\", \"Creating output products...\")\n\n cmd = \"pg_dump -U dc_user -h {} -n agdc {} > {}\".format(settings.MASTER_NODE,\n ingestion_request.get_database_name(),\n ingestion_request.get_database_dump_path())\n os.system(cmd)\n cmd = \"dropdb -U dc_user -h {} {}\".format(settings.MASTER_NODE, ingestion_request.get_database_name())\n os.system(cmd)\n\n ingestion_request.download_script_path = ingestion_request.get_base_data_path() + \"/bulk_downloader.py\"\n\n with open(ingestion_request.download_script_path, \"w+\") as downloader:\n file_list = \",\".join('\"{}\"'.format(path) for path in glob(ingestion_request.get_base_data_path() + '/*.nc'))\n download_script = base_downloader_script.format(\n file_list=file_list,\n database_dump_file=ingestion_request.get_database_dump_path(),\n base_host=settings.BASE_HOST,\n base_data_path=ingestion_request.get_base_data_path()) + static_script\n downloader.write(download_script)\n\n ingestion_request.update_status(\"OK\", \"Please follow the directions on the right side panel to download your cube.\")", "title": "" }, { "docid": "09fd2c39b62c3bdf1b7b6f50cd1b53bc", "score": "0.5193261", "text": "def run(self, disconnect_dbs=True):\n\n self.setup_dir()\n self.prep_data(transformation=self.trans)\n\n self.fit_data()\n\n self.save()\n\n if disconnect_dbs:\n self.disconnect_dbs()", "title": "" }, { "docid": "9ad14048e9d0e506fbba0461313e900a", "score": "0.51871336", "text": "def populate_output_dir(out_dir):\n #copying model generator file to dir\n shutil.copy(model.__file__, os.path.join(out_dir, \"model.py\"))\n #copying this file to dir\n shutil.copy(cfg.__file__, os.path.join(out_dir, \"config.py\"))\n #info file\n with open(os.path.join(out_dir, \"info.txt\"), \"w\") as f:\n print(\"date created (y-m-d):\", util.date_str(), file=f)\n print(\"time created:\", util.time_str(), file=f)\n print(\"git commit hash:\", util.git_hash(), file=f)", "title": "" }, { "docid": "7ae4765f7228c363b5f5466178004a7b", "score": "0.51619124", "text": "def main(\n src_lang, \n tgt_lang, \n output_dir,\n train_sets=list,\n valid_sets=list,\n system_name=None, \n marian_args=None,\n force=False,\n dry_run=False\n ):\n #Gather the settings we used from the original model to\n #get the correct system name for the new traindir we want to create\n user_settings = {'SRC': src_lang, 'TGT': tgt_lang} \n if system_name is not None:\n user_settings['SYSTEM'] = system_name\n decoder_settings = get_decoder_settings(\n src_lang, tgt_lang, user_settings=user_settings, extra_args=marian_args\n )\n output_system_dir = os.path.join(output_dir, decoder_settings.system)\n os.makedirs(output_system_dir, exist_ok=True)\n with open(\n os.path.join(CONFIG.SYSTEMS_DIR, decoder_settings.system, 'config.yml'), \n 'r', encoding='utf-8'\n ) as infile:\n decode_config = yaml.safe_load(infile)\n orig_model_name = os.path.basename(decode_config['models'][0])\n orig_model = os.path.join(\n CONFIG.SYSTEMS_DIR, decoder_settings.system, orig_model_name)\n\n #this will be the new traindir in which we will finetune the model\n #everything will go into the output_dir/system_name (where the system_name\n #will be the same name as our original system, so the user only has to \n #define SYSTEMS_DIR, and the code will use the same settings)\n this_outdir = os.path.join(output_dir, decoder_settings.system)\n\n #these will be the filepaths that we put in the various marian configs\n pretrained_model = os.path.join(this_outdir, 'pretrained.npz')\n finetuned_model = os.path.join(this_outdir, 'model.npz')\n\n #update the settings with the correct traindir that we'll run marian in\n decoder_settings = get_decoder_settings(\n src_lang, tgt_lang, \n user_settings=user_settings, \n extra_args=marian_args, \n traindir=this_outdir\n )\n\n #copy the entire pretrained system (incl bpe models, vocabs, etc.)\n if os.path.exists(this_outdir) and not force:\n logger.info(f\"Using previously existing {this_outdir} (use --force to destructively delete it and start over instead)\")\n else:\n logger.info(f\"Preparing system (copying to {this_outdir})...\")\n shutil.rmtree(this_outdir)\n shutil.copytree(\n os.path.join(CONFIG.SYSTEMS_DIR, decoder_settings.system),\n this_outdir,\n dirs_exist_ok=True\n )\n \n #copy the pretrained model (more for recordkeeping purposes) \n if os.path.exists(pretrained_model):\n logger.info(f\"Using previously existing model for pretraining: {pretrained_model}\")\n else:\n logger.info(f\"Copying {orig_model} to {pretrained_model}\")\n shutil.copy(orig_model, pretrained_model)\n\n #Remove spurrious \\r, etc. which mess with line numbering\n logger.info(f\"Preparing data (fixing fake line breaks)...\")\n train_src, train_tgt = train_sets \n valid_src, valid_tgt = valid_sets \n cleaned_train_src, cleaned_train_tgt, train_length = clean_pair(\n this_outdir, train_src, train_tgt, 'train')\n cleaned_valid_src, cleaned_valid_tgt, valid_length = clean_pair(\n this_outdir, valid_src, valid_tgt, 'valid')\n\n #Use the TextProcessor on the data\n if os.path.exists(os.path.join(this_outdir, 'train.INPUT')) and not force:\n logger.info(f\"Using previously generated {this_outdir}/train.* and {this_outdir}/valid.*\")\n else:\n logger.info(\"Preprocessing data (moses, bpe, etc.)...\")\n tp = decoder_settings.text_processor\n train_data = tp.prepare_training_data(\n this_outdir,\n src=cleaned_train_src,\n tgt=cleaned_train_tgt\n )\n valid_data = tp.prepare_training_data(\n this_outdir,\n src=cleaned_valid_src,\n tgt=cleaned_valid_tgt\n )\n #get the filenames in line with the names in the marian train config\n shutil.move(train_data['src'], os.path.join(this_outdir, 'train.INPUT'))\n shutil.move(train_data['tgt'], os.path.join(this_outdir, 'train.OUTPUT'))\n shutil.move(valid_data['src'], os.path.join(this_outdir, 'valid.INPUT'))\n shutil.move(valid_data['tgt'], os.path.join(this_outdir, 'valid.OUTPUT'))\n shutil.copyfile(valid_tgt, os.path.join(this_outdir, 'valid.REF'))\n\n #edit the train config to use our pretrained model (otherwise keep it the same)\n train_config = os.path.join(this_outdir, 'train.yml')\n logger.info(f\"Creating train config {train_config}\")\n with open(decoder_settings.train_config, 'r', encoding='utf-8') as infile, \\\n open(train_config, 'w', encoding='utf-8') as outfile:\n marian_config = yaml.safe_load(infile)\n marian_config['pretrained-model'] = os.path.basename(pretrained_model)\n yaml.dump(marian_config, outfile)\n\n #copy decode config for later use; just change it to the finetuned model\n decode_config_file_1 = os.path.join(this_outdir, 'config.yml')\n decode_config_file_2 = os.path.join(this_outdir, 'config-fast.yml')\n logger.info(f\"Creating decode configs {decode_config_file_1}\")\n with open(decode_config_file_1, 'w', encoding='utf-8') as outfile1, \\\n open(decode_config_file_2, 'w', encoding='utf-8') as outfile2:\n decode_config['models'] = [os.path.basename(finetuned_model)]\n yaml.dump(decode_config, outfile1)\n yaml.dump(decode_config, outfile2)\n\n #change directory because the marian train config has relative paths;\n #update the path of the config in the command to be just train.yml\n cwd = os.getcwd()\n os.chdir(this_outdir)\n idx = decoder_settings.cmd.index('-c') + 1\n decoder_settings.cmd[idx] = 'train.yml'\n\n #prepare the environment variables for the marian subprocess\n my_env = os.environ.copy()\n attrs = all_members(CONFIG)\n for attr in attrs:\n my_env[attr] = str(attrs[attr])\n\n cmd = ' '.join(decoder_settings.cmd)\n logger.info(f'IN {this_outdir}; RUNNING: {cmd}')\n\n if dry_run:\n logger.info(f\"Dry run done.\")\n else: \n try:\n subprocess.check_output(cmd, stderr=sys.stderr, shell=True, env=my_env)\n except subprocess.CalledProcessError as e:\n return e.returncode\n \n os.chdir(cwd)\n logger.info(f\"Finished training in {this_outdir}\")", "title": "" }, { "docid": "8c29aef4d2b5649ed260cc6ef25c4d89", "score": "0.5160901", "text": "def convert(indir=indir,\n defdir=defdir,\n targeturl=migratieurl,\n baseurl=baseurl,\n namespace=namespace,\n ns=ns\n ): \n ingforms = recursive_glob(indir,'*.xml')\n print (\"forms read from %s\" %indir)\n \n for item in ingforms:\n try:\n converted = JsonForm(indir,\n item,\n baseurl,\n namespace,\n )\n infl = os.path.split(item)[1]\n outfl = os.path.splitext(infl)[0] + '.json'\n outdir = indir.replace('ingforms', 'json')\n if not os.path.exists(outdir):\n os.makedirs(outdir, mode=0777)\n outf = os.path.join(outdir, outfl)\n outfl = open(outf, 'w')\n dump(converted.jsonfl, outfl, indent=4)\n\n except IOError:\n pass\n print (\"json written to %s\" %outdir)\n print (\"number of files %s\" % len(ingforms))", "title": "" }, { "docid": "016e898ef881b9951d70c326e9e4b62a", "score": "0.5154462", "text": "def main():\r\n parser = OptionParser()\r\n mode= MODE_CREATE\r\n parser.add_option(\"-m\", \"--model\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"model\",\r\n help=\"defines the model for this migration.\",\r\n default =\"None\")\r\n\r\n parser.add_option(\"-c\", \"--comment\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"comment\",\r\n help=\"defines a comment for this migration.\",\r\n default =\"No Comment\")\r\n\r\n parser.add_option(\"-d\", \"--column-definitions\",\r\n action=\"store\",\r\n type=\"string\", dest=\"col_defs\",\r\n help=\"\"\"column definitions.Form: d- 'NAME TYPE opt, NAME2 TYPE2 opt2'\r\n Name, type, options (all SQLAlchemy style).\"\"\",\r\n default=\"None\")\r\n\r\n parser.add_option(\"-f\", \"--force\",\r\n action=\"store_true\",\r\n dest=\"force\",\r\n help=\"forces overrides of existing files\",\r\n default=False)\r\n #\r\n # column definition format: NAME TYPE opt1 opt2 optn, NAME2 TYPE2 opt1 opt2 optn\r\n #\r\n start = datetime.datetime.now()\r\n\r\n\r\n (options, args) = parser.parse_args()\r\n\r\n #if no model given and no parameter at all, then quit with error\r\n if options.model == \"None\" and len(args) < 1:\r\n parser.error(\"You must at least specify an migration name by giving -n <name>.\")\r\n return\r\n else:\r\n # if no model given but a parameter, than assume that the first parameter\r\n # is the model\r\n if options.model == \"None\":\r\n options.model = args[0]\r\n\r\n print \"generate_mvc for model:\", options.model\r\n # generate the model\r\n generate_model.render_model(modelname = options.model,\r\n force = options.force,\r\n comment = options.comment\r\n )\r\n print\r\n # generate the Controller\r\n generate_controller.render_controller( name = options.model,\r\n force = options.force\r\n )\r\n\r\n print\r\n # generate the views\r\n generate_scaffold.scaffold(modelname = options.model,\r\n force = options.force,\r\n actions = [\"list\", \"show\",\"create\", \"edit\", \"message\"]\r\n )\r\n\r\n print\r\n # generate the migration\r\n # ooptions_col_defs has to be a comma separated list of column names and SQLAlchemy DB types:\r\n # example: lastname String(100), email String(100)\r\n col_defs = options.col_defs\r\n\r\n generate_migration.render_migration( modelname = options.model,\r\n comment = options.comment,\r\n col_defs = options.col_defs\r\n )\r\n print\r\n\r\n\r\n end = datetime.datetime.now()\r\n duration = end - start\r\n print \"generated_mvc in(\"+ str(duration) +\")\"\r\n print\r\n return", "title": "" }, { "docid": "61ac81b3331ded85fdb60d54c236f08b", "score": "0.51474655", "text": "def stanza_dep_model(model: ModelOutput = ModelOutput(\"stanza/dep/sv_talbanken_parser.pt\"),\n pretrain: ModelOutput = ModelOutput(\"stanza/dep/sv_talbanken.pretrain.pt\")):\n zip_model = Model(\"stanza/dep/synt_stanza_full.zip\")\n zip_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/stanza/synt_stanza_full.zip\")\n zip_model.unzip()\n zip_model.remove()", "title": "" }, { "docid": "5647d3fbfc9f3f00bbee6cfaa8410c16", "score": "0.5140143", "text": "def main(args=None):\n\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n input_module = input_mapping[args.input_reader]\n output_module = output_mapping[args.output_format]\n\n templates = []\n # Load templates from external folder if set.\n if args.template_folder:\n templates += read_templates(os.path.abspath(args.template_folder))\n\n # Load internal templates, if not disabled.\n if not args.exclude_built_in_templates:\n templates += read_templates()\n output = []\n for f in args.input_files:\n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res == 'pdf seperated':\n continue\n re = None\n if res:\n logger.info(res)\n output.append(res)\n if args.dbpass is not None:\n re = output_module.write_to_db(res, f.name, args.output_date_format, \n args.dbhost, args.dbuser, args.dbpass, args.dbname,\n args.azure_account, args.azure_key, args.pdf_path)\n\n if args.copy:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.copyfile(f.name, join(args.copy, filename))\n if args.move:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.move(f.name, join(args.move, filename))\n f.close()\n if args.dbpass is not None:\n #move source pdf\n pdfdirectory = os.path.dirname(f.name) #failedTemp\n pdfpath = f.name\n pdfname = os.path.basename(f.name)\n if re == 'succeed':\n\n #move to successful\n #succeed_path = join(pdfdirectory, 'successful')\n #move to public successful folder where clients can access\n succeed_path = os.path.abspath(os.path.join(pdfdirectory, os.pardir))\n succeed_path = join(succeed_path, 'successful')\n\n from datetime import date\n succeed_path = join(succeed_path, date.today().strftime('%d-%m-%Y'))\n\n if not os.path.exists(succeed_path):\n os.makedirs(succeed_path)\n destinateFile = join(succeed_path, pdfname)\n shutil.move(pdfpath, destinateFile)\n pass\n elif re == 'link db failed':\n pass\n elif re == 'exists':\n #delete\n os.remove(pdfpath)\n pass\n\n else:\n #move to failed\n failed_path = join(pdfdirectory, 'failed')\n if not os.path.exists(failed_path):\n os.makedirs(failed_path)\n destinateFile = join(failed_path, pdfname)\n shutil.move(pdfpath, destinateFile)\n pass\n\n\n \n if output_module is not None:\n if args.dbpass is not None:\n pass #for data base output, do it in loop of extracting\n else:\n output_module.write_to_file(output, args.output_name, args.output_date_format)", "title": "" }, { "docid": "eb9ff348ed08b4b8a3d5b6d2705e12a2", "score": "0.51338387", "text": "def create_VDJ_tables(dir, dir_out):\n for file in os.listdir(dir):\n filename = os.fsdecode(file)\n if filename.startswith(\"metadata\"):\n os.system(VDJTOOLS_EXECUTABLE+\"Convert -S imgthighvquest -m \"+dir+'/'+filename+\" vdjtools_tmp\")\n with open(dir+'/'+filename, 'r') as meta_file:\n with open (\"vdjtools_tmp.metadata.tsv\", 'w') as new_metafile:\n for line in meta_file.read().strip().split('\\n'):\n line = line.replace(\"sample.id\", \"Sample\")\n values = line.split('\\t')\n new_metafile.write('\\t'.join(values[1:])+'\\n')\n \n move_vdj_files(dir_out)", "title": "" }, { "docid": "c84912c406a3aa2f9cd118cb346c3d19", "score": "0.51328206", "text": "def convert_model(self, backend, model, weight, **kwargs):\n om_save_path = kwargs[\"save_dir\"]\n input_shape = kwargs[\"input_shape\"]\n precision = kwargs['precision']\n log_save_path = os.path.dirname(model)\n\n command_line = [\"bash\", self.current_path + \"/model_convert.sh\", self.davinci_environment_type, backend, model,\n weight, om_save_path, log_save_path, input_shape, precision]\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"convert model to om model failed. the return message is : {}.\".format(exc))", "title": "" }, { "docid": "b719858e4842073378a6a8ba5396dee1", "score": "0.51145583", "text": "def generate_mappers() -> None:\n\n # import or define all models here to ensure they are attached to the\n # Base.metadata prior to any initialization routines\n import c2cgeoportal_commons.models.main # noqa: F401\n\n # run configure_mappers after defining all of the models to ensure\n # all relationships can be setup\n configure_mappers()", "title": "" }, { "docid": "7d0c8c62cb736263e87cec58d19d1453", "score": "0.51118916", "text": "def inference(args) -> NoReturn:\n\n # Arguments & parameters\n config_yaml = args.config_yaml\n checkpoint_path = args.checkpoint_path\n audios_dir = args.audios_dir\n output_dir = args.output_dir\n scale_volume = args.scale_volume\n device = (\n torch.device('cuda')\n if args.cuda and torch.cuda.is_available()\n else torch.device('cpu')\n )\n\n configs = read_yaml(config_yaml)\n sample_rate = configs['train']['sample_rate']\n input_channels = configs['train']['channels']\n target_source_types = configs['train']['target_source_types']\n target_sources_num = len(target_source_types)\n model_type = configs['train']['model_type']\n mono = input_channels == 1\n\n segment_samples = int(30 * sample_rate)\n batch_size = 1\n device = \"cuda\"\n\n models_contains_inplaceabn = True\n\n # Need to use torch.distributed if models contain inplace_abn.abn.InPlaceABNSync.\n if models_contains_inplaceabn:\n\n import torch.distributed as dist\n\n dist.init_process_group(\n 'gloo', init_method='file:///tmp/somefile', rank=0, world_size=1\n )\n\n print(\"Using {} for separating ..\".format(device))\n\n # paths\n os.makedirs(output_dir, exist_ok=True)\n\n # Get model class.\n Model = get_model_class(model_type)\n\n # Create model.\n model = Model(input_channels=input_channels, target_sources_num=target_sources_num)\n\n # Load checkpoint.\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(checkpoint[\"model\"])\n\n # Move model to device.\n model.to(device)\n\n # Create separator.\n separator = Separator(\n model=model,\n segment_samples=segment_samples,\n batch_size=batch_size,\n device=device,\n )\n\n audio_names = sorted(os.listdir(audios_dir))\n\n for audio_name in audio_names:\n audio_path = os.path.join(audios_dir, audio_name)\n\n # Load audio.\n audio, _ = librosa.load(audio_path, sr=sample_rate, mono=mono)\n\n if audio.ndim == 1:\n audio = audio[None, :]\n\n input_dict = {'waveform': audio}\n\n # Separate\n separate_time = time.time()\n\n sep_wav = separator.separate(input_dict)\n # (channels_num, audio_samples)\n\n print('Separate time: {:.3f} s'.format(time.time() - separate_time))\n\n # Write out separated audio.\n if scale_volume:\n sep_wav /= np.max(np.abs(sep_wav))\n\n soundfile.write(file='_zz.wav', data=sep_wav.T, samplerate=sample_rate)\n\n output_path = os.path.join(\n output_dir, '{}.mp3'.format(pathlib.Path(audio_name).stem)\n )\n os.system('ffmpeg -y -loglevel panic -i _zz.wav \"{}\"'.format(output_path))\n print('Write out to {}'.format(output_path))", "title": "" }, { "docid": "6494b7367eb2b2e9fffe080afb951264", "score": "0.50934845", "text": "def _import_data_models(model_dir, verbose=False):\n gen_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), model_dir)\n\n if os.path.exists(gen_dir) and os.path.isdir(gen_dir):\n if verbose > 0:\n print 'The base code-gen directory is %s' % gen_dir\n\n # Walk all the files in the generated code directory and look for YIN XM files\n # and use that to determine the python code-generated names\n\n xml_files = [f for f in os.listdir(gen_dir)\n if os.path.isfile(os.path.join(gen_dir, f)) and\n f.split('.')[-1].lower() == 'xml'\n ]\n\n if verbose > 0:\n print 'The list of XML files in the generated directory is: %s' % xml_files\n\n for filename in xml_files:\n # The class name for the model is the same as the first part of the filename\n\n model = YangModel(gen_dir, filename, model_dir, verbose=verbose)\n\n if verbose > 0:\n print \"Found model '%s' in '%s'\" % (model.name, filename)\n\n data_models.append(model)\n\n if verbose:\n print('_import_models found %d YANG models', len(data_models))", "title": "" }, { "docid": "2d847e48878929b223ed5a299a22b1a4", "score": "0.50885826", "text": "def handle(self, *args, **options): \n ## Cycle through command line arguments\n if len(args) <> 1:\n print(\"Only a single argument should be passed to this command, the model file path.\")\n sys.exit(1)\n else:\n file_in = args[0] \n \n source_filename = file_in.split(\"/\")[-1]\n \n if source_filename in Source.objects.all().values_list(\"name\",flat=True):\n source = Source.objects.get(name=source_filename)\n else:\n source = Source(\n name=source_filename\n )\n source.save()\n \n ## Create reaction IDs list\n \n mnxrxn_id_list = Reaction.objects.all().values_list(\"name\", flat=True)\n \n# ## Create id to reaction dictionary from Reaction and to metabolite dictionary from Metabolite\n# \n# print(\"Creating metabolite and reaction dictionaries ...\")\n# \n# rxns = Reaction.objects.all()\n# rxn_dict = {}\n# for rxn in rxns:\n# rxn_dict[rxn.id] = rxn\n# \n# mets = Metabolite.objects.all()\n# met_dict = {}\n# for met in mets:\n# met_dict[met.id] = met\n \n ## Create Compartment dictionary\n \n print(\"Creating compartment and direction dictionaries ...\")\n \n comps = Compartment.objects.all()\n comp_dict = {}\n for comp in comps:\n comp_dict[comp.id] = comp\n \n directions = Direction.objects.all()\n direction_dict = {}\n for direction in directions:\n direction_dict[direction.direction] = direction\n \n \n ## Count incompletely compartmentalised reactions\n \n init_num_incomplete = Reaction.objects.filter(stoichiometry__compartment__isnull=True).distinct().count()\n \n \n ## Create Stoichiometry dictionary, using reaction, metabolite and stoichiometry as \n \n print(\"Creating stoichiometry dictionary ...\")\n\n stos = Stoichiometry.objects.filter(source__name=\"metanetx\").prefetch_related()\n sto_dict = {}\n \n loop = loop_counter(len(stos))\n \n for sto in stos:\n sto_dict[(sto.reaction.name, sto.metabolite.id, sto.stoichiometry)] = sto \n loop.step()\n\n loop.stop()\n\n \n f_in = open(file_in, 'r')\n num_lines = 0\n for line in f_in:\n num_lines += 1\n f_in.close()\n f_in = open(file_in, 'r')\n \n# ## Initiate counter\n# num_tot = num_lines\n# num_done = 0\n# next_progress = 1\n# sys.stdout.write(\"\\r - %d %%\" % num_done)\n# sys.stdout.flush()\n \n print(\"Importing stoichiometries from model file ...\")\n \n for line in f_in:\n if line[0] != \"#\":\n line_old = line[:]\n cols = line.strip().split(\"\\t\")\n if (len(cols) >= 4):\n if (len(cols[3]) > 0):\n# print(\"Analysing reaction details ...\")\n equation = cols[1]\n mnxrxn_id = cols[3]\n \n #print(\"- %s\" % mnxrxn_id)\n #print(\"- '%s'\" % line_old)\n \n if mnxrxn_id not in mnxrxn_id_list:\n# print(\"No MetaNetX reaction ID (%s), skipping ...\" % mnxrxn_id)\n continue\n \n \n ## Determine reaction directionality\n try:\n if \"-->\" in equation:\n model_direction = \"ltr\"\n lhs, rhs = equation.split(\" --> \")\n elif \"<--\" in equation:\n model_direction = \"rtl\"\n lhs, rhs = equation.split(\" <-- \")\n elif \"<==>\" in equation:\n model_direction = \"both\"\n lhs, rhs = equation.split(\" <==> \")\n else:\n# print(\"Reaction did not split correctly ...\")\n# print(\" - %s\" % equation)\n continue\n except:\n# print(\"Reaction did not split correctly ...\")\n# print(\" - %s\" % equation)\n continue\n \n ## Determine way round of reaction and correct if different from MetaNetX\n \n sto_met_comps = lhs.split(\" + \")\n sto_met_comp_1 = sto_met_comps.pop(0)\n \n try:\n first_sto, first_met_comp = sto_met_comp_1.split(\" \")\n first_sto = -1*float(first_sto)\n first_met, _ = first_met_comp.split(\"@\")\n except:\n# print(\"Reaction did not conform to required format, skipping ...\")\n# print(\" - %s\" % equation)\n continue\n \n sto_tuple = (mnxrxn_id, first_met, first_sto)\n \n ### Single tuple used to determine direction is not robust due to transport. Therefore, create list of all tuples and see which way round yields the most hits and choose that\n \n ## Create all tuples, including reverse ones\n sto_met_comps_lhs = lhs.split(\" + \")\n sto_met_comps_rhs = rhs.split(\" + \")\n sto_tuples_model = []\n sto_tuples_reverse = []\n sto_tuple_comp_dict = {}\n for sto_met_comp in sto_met_comps_lhs:\n sto, met_comp = sto_met_comp.split(\" \")\n sto = -1*float(sto)\n met_id, comp_id = met_comp.split(\"@\")\n \n sto_tuple = (mnxrxn_id, met_id, sto)\n sto_tuples_model.append(sto_tuple)\n sto_tuple_comp_dict[(sto_tuple, \"model\")] = comp_id\n \n sto_tuple = (mnxrxn_id, met_id, -1*sto)\n sto_tuples_reverse.append(sto_tuple)\n sto_tuple_comp_dict[(sto_tuple, \"reverse\")] = comp_id\n \n for sto_met_comp in sto_met_comps_rhs:\n sto, met_comp = sto_met_comp.split(\" \")\n sto = float(sto)\n met_id, comp_id = met_comp.split(\"@\")\n \n sto_tuple = (mnxrxn_id, met_id, sto)\n sto_tuples_model.append(sto_tuple)\n sto_tuple_comp_dict[(sto_tuple, \"model\")] = comp_id\n \n sto_tuple = (mnxrxn_id, met_id, -1*sto)\n sto_tuples_reverse.append(sto_tuple)\n sto_tuple_comp_dict[(sto_tuple, \"reverse\")] = comp_id\n \n \n ## Test which orientation fits best with the database\n correct_count = 0\n backward_count = 0\n \n for sto_tuple in sto_tuples_model:\n if sto_tuple in sto_dict:\n correct_count += 1\n \n for sto_tuple in sto_tuples_reverse:\n if sto_tuple in sto_dict:\n backward_count += 1\n \n if correct_count >= backward_count:\n equation_orientation = \"model\"\n# print(\"Order is correct ...\")\n sto_tuples = sto_tuples_model\n else:\n equation_orientation = \"reverse\"\n# print(\"Order is reverse ...\")\n if model_direction != \"both\":\n model_direction = model_direction[::-1]\n sto_tuples = sto_tuples_reverse\n \n# print(\"Assigning compartments ...\")\n \n ## Use the relevant direction to assign compartments and directionality to all metabolites and reactions respectively\n for sto_tuple in sto_tuples:\n try:\n stoichiometry = sto_dict[sto_tuple]\n \n stoichiometry.compartment = comp_dict[sto_tuple_comp_dict[(sto_tuple, equation_orientation)]]\n \n stoichiometry.save()\n \n #stoichiometry.direction = direction_dict[model_direction]\n \n #stoichiometry.save()\n except:\n# print(\"%s tuple not found, adding to DB ...\" % str(sto_tuple))\n \n rxn = Reaction.objects.get(name=sto_tuple[0], source__name=\"metanetx\")\n met = Metabolite.objects.get(id=sto_tuple[1])\n sto = sto_tuple[2]\n try:\n compartment = comp_dict[sto_tuple_comp_dict[(sto_tuple, equation_orientation)]]\n except:\n compartment = None\n #direction = direction_dict[model_direction]\n \n if Stoichiometry.objects.filter(reaction=rxn, metabolite=met, source__name=\"metanetx\").count() > 0:\n# print(\" - Metabolite found in reaction, replacing with new stoichiometry ...\")\n Stoichiometry.objects.filter(reaction=rxn, metabolite=met, source__name=\"metanetx\").delete()\n \n stoichiometry = Stoichiometry(\n reaction=rxn,\n metabolite=met,\n source=source,\n compartment=compartment,\n #direction=direction,\n stoichiometry=sto\n )\n stoichiometry.save()\n \n# print(\"Checking completeness ...\")\n \n ##! For each reaction affected, if there are still uncompartmentalised metabolites, print reaction\n \n stoichiometries = Stoichiometry.objects.filter(reaction__name=sto_tuple[0], source__name=\"metanetx\")\n num_stos = len(stoichiometries)\n num_with_comps = 0\n \n for sto in stoichiometries:\n if sto.compartment:\n num_with_comps += 1\n rxn_name = sto.reaction.name\n \n if num_with_comps < num_stos:\n print(\"-> Reaction '%s' has incomplete compartmentalisation ...\" % rxn_name)\n# else:\n# print(\"-> Reaction '%s' has complete compartmentalisation ...\" % rxn_name)\n \n final_num_incomplete = Reaction.objects.filter(stoichiometry__compartment__isnull=True).distinct().count()\n \n num_completed = init_num_incomplete - final_num_incomplete\n \n print(\"This model completed the compartmentalisation of %d reactions.\" % num_completed)", "title": "" }, { "docid": "5056ce3950e74f904841c826a6cd1855", "score": "0.5066268", "text": "def execute(self):\n config = self.get_config()\n\n self.create_views(sources=config['sources'])\n df = self.transform(config=config)\n df = self.add_meta_data_and_primary_key(data_frame=df)\n\n self.save(data_frame=df, config=config)", "title": "" }, { "docid": "969e1a0d0180917e14505c66b3c6ed1f", "score": "0.50577337", "text": "def test5_normalize(self):\n self.trainer._normalize()\n self.assertFileExists('normproto')", "title": "" }, { "docid": "caddba5ca673f4f0002283313417838e", "score": "0.5049235", "text": "def _generateCRUDImplForModel(self, appname, domain, modelSpec ):\n\n modelSpecFile = None\n if not q.system.fs.isFile(modelSpec):\n model_spec_dir = q.system.fs.joinPaths(q.dirs.pyAppsDir, appname, \"interface\", \"model\")\n modelSpecFile = q.system.fs.joinPaths (model_spec_dir, domain, \"%s.py\"%modelSpec )\n self._generator = CloudApiGenerator(appname)\n self._generator._template_path = self._template_path\n\n modelFiles = self._generator._generateModelImpl(modelSpecFile, appname, domain )\n\n print \"Generated Files are :%s\"%modelFiles\n return modelFiles", "title": "" }, { "docid": "f41464a336ce2ca39b246aa9770dc334", "score": "0.50451416", "text": "def prepare_for_export(self) -> None:\n stripped_model = self._model\n for ctrl in self.child_ctrls:\n stripped_model = ctrl.strip_model(stripped_model)\n self._model = stripped_model", "title": "" }, { "docid": "a499e8bb0bcd79461edf976d11f711b9", "score": "0.50428534", "text": "def export_model():\n\n # TODO: implementation\n\n return", "title": "" }, { "docid": "d8f1bbcb049662112d5f03c64cc31d34", "score": "0.50319195", "text": "def main():\r\n \r\n parser = OptionParser()\r\n mode= MODE_CREATE\r\n parser.add_option( \"-n\", \"--name\", \r\n action=\"store\", \r\n type=\"string\", \r\n dest=\"name\", \r\n help=\"creates model named model-name\", \r\n default =\"None\")\r\n parser.add_option( \"-a\", \"--attributes\", \r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"actions\",\r\n help=\"defines the attributes included in the model.\",\r\n default =\"None\")\r\n parser.add_option( \"-f\", \"--force\",\r\n action=\"store_true\",\r\n dest=\"force\",\r\n help=\"forces overrides of existing files\",\r\n default=\"False\")\r\n parser.add_option( \"-c\", \"--comment\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"comment\",\r\n help=\"defines a comment for this model.\",\r\n default =\"No Comment\")\r\n parser.add_option( \"-p\", \"--path\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"path\",\r\n help=\"sets the model output psth.\",\r\n default =\"./\")\r\n \r\n (options, args) = parser.parse_args()\r\n #print options\r\n if options.name == \"None\":\r\n if len(args) > 0:\r\n # if no option flag (like -n) is given, it is assumed that the \r\n # first argument is the model name. (representing -n arg1)\r\n options.name = args[0]\r\n else:\r\n parser.error(\"You must at least specify an appname by giving -n <name>.\")\r\n\r\n #model_dir = os.path.normpath(\"./models/\")\r\n model_dir = os.path.normpath(options.path)\r\n modelname = options.name\r\n start = None\r\n end = None\r\n start = datetime.datetime.now()\r\n \r\n render_model(modelname, options.force, options.comment, model_dir)\r\n \r\n end = datetime.datetime.now()\r\n duration = None\r\n duration = end - start \r\n print \"generated_model in(\"+ str(duration) +\")\"\r\n print \"\"\r\n return", "title": "" }, { "docid": "673b106c5863839d9621c320107b5e23", "score": "0.5021565", "text": "def dbt_run(test_root_dir: str):\n # Perform sanity check on dbt project settings\n assert run_check_dbt_command(\"debug\", test_root_dir)\n final_sql_files = os.path.join(test_root_dir, \"final\")\n shutil.rmtree(final_sql_files, ignore_errors=True)\n # Compile dbt models files into destination sql dialect, then run the transformation queries\n dbt_run_succeeded = run_check_dbt_command(\"run\", test_root_dir)\n # Copy final SQL files to persist them in git\n # shutil.copytree(os.path.join(test_root_dir, \"..\", \"build\", \"run\", \"airbyte_utils\", \"models\", \"generated\"), final_sql_files)\n assert dbt_run_succeeded", "title": "" }, { "docid": "736318c25d7dfb5cb80b75b91dd9e930", "score": "0.501846", "text": "def migrate_and_import(dictionary_dir):\n call_command(\"migrate\", \"API\")\n import_xmls(dictionary_dir)", "title": "" }, { "docid": "f3a439792aa33b82f161d5651dc57ef1", "score": "0.50175977", "text": "def generate_models(self, schemas, field_mapping={}, model_renames={}):\n first, remainder = schemas[0], schemas[1:]\n first_chunk = self.generate_model(first, field_mapping, model_renames)\n remainder_chunk = u''.join([\n self.generate_model(subsequent,\n field_mapping,\n model_renames,\n include_header=False)\n for subsequent in remainder])\n return u'\\n'.join([\n first_chunk,\n remainder_chunk,\n ])", "title": "" }, { "docid": "f11b5058a51dce00f5435ad2ed0028ac", "score": "0.5011621", "text": "def finalmodel(outfolder): # type: (str) -> None\n\n rawdata = helpers.refdf.copy(deep=True)\n print('Raw data: ' + str(rawdata.shape))\n rawdata.set_index('Clause ID', inplace=True)\n # sourcedata = helpers.dedupdf.copy(deep=True)\n # print('Deduped data: ' + str(sourcedata.shape))\n sourcedata = helpers.refdf.copy(deep=True)\n print('Raw data: ' + str(sourcedata.shape))\n sourcedata = sourcedata[sourcedata['Clause Text'].map(helpers.goodsize)]\n print('Sized data: ' + str(sourcedata.shape))\n sourcedata.set_index('Clause ID', inplace=True)\n\n traindata = pd.DataFrame({\n 'text': sourcedata['Clause Text'],\n 'labels': sourcedata['Classification']\n }, index=sourcedata.index)\n\n evaldata = pd.DataFrame({\n 'text': rawdata['Clause Text'],\n 'labels': rawdata['Classification']\n }, index=rawdata.index)\n\n print('Data for BERT: ' + str(traindata.shape))\n\n accargs = buildbertargs()\n accargs.output_dir = outfolder\n accmodel = ClassificationModel('roberta', 'roberta-base', args=accargs, weight=[2, 1])\n accmodel.train_model(traindata)\n\n print('---------------')\n print('Training Data Eval:')\n\n result, model_outputs, wrong_predictions = accmodel.eval_model(traindata)\n print(result)\n\n print('---------------')\n print('Full Data Eval:')\n\n result, model_outputs, wrong_predictions = accmodel.eval_model(evaldata)\n # {'mcc': 0.9062028924099057, 'tp': 4835, 'tn': 1368, 'fp': 74, 'fn': 140, 'eval_loss': 0.18330956540325125}\n print(result)", "title": "" }, { "docid": "50e395bbd17639e0b77571a20233c59f", "score": "0.49742517", "text": "def main(datatype, outputdir, inputdir):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n outputdir = os.path.realpath(outputdir)\n inputdir = os.path.realpath(inputdir)\n datadir = os.path.join(outputdir,datatype)\n os.system(\"mkdir \" + datadir)\n if datatype == \"qm9mmff\":\n logger.info('QM9_MMFF')\n os.chdir(datadir)\n ### split data ###\n partition = {\"train\":99000,\"validation\":1000,\"test_live\":1000}\n total_num = 133885\n if not os.path.isfile(\"split_infor.npz\"):\n train_id,val_id,test_live_id,test_id = split_data_qm9(partition, total_num, False)\n else:\n split = np.load(\"split_infor.npz\")\n train_id,val_id,test_live_id,test_id = split[\"train\"],split[\"validation\"],split[\"test_live\"],split[\"test\"]\n logger.info(\"Finish split\")\n #tar = tarfile.open(os.path.join(inputdir,\"qm9_mmff.tar.bz2\"))\n tfrecord = os.path.join(inputdir, \"qm9_mmff.tfrecord\")\n ### training ###\n outfile = \"train.tfrecord\"\n write_tfrecord(tfrecord,outfile,train_id)\n logger.info(\"Finish train\")\n ### validation ###\n outfile = \"validation.tfrecord\"\n write_tfrecord(tfrecord,outfile,val_id)\n logger.info(\"Finish validation\")\n ### test_live ###\n outfile = \"testlive.tfrecord\"\n write_tfrecord(tfrecord,outfile,test_live_id)\n logger.info(\"Finish testlive\")\n ### test ###\n outfile = \"test.tfrecord\"\n write_tfrecord(tfrecord,outfile,test_id)\n logger.info(\"Finish test\")\n #tar.close()\n ### new_molecules ###\n os.system(\"cp \" + os.path.join(inputdir,\"new_molecules_new.tfrecord\") + \" new_molecules.tfrecord\")\n ### get mu, std ###\n train = \"train.tfrecord\"\n size = len(train_id)\n position = \"positions\"\n reference = os.path.join(inputdir,\"atomrefs.txt.npz\")\n outfile = open(\"mu_std.csv\",\"w\")\n getmustd_final(train,position,size,reference,outfile)\n ### get atom ref ###\n os.system(\"cp \" + reference + \" .\")\n\n logger.info('Done')\n\n elif datatype == \"emol9mmff\":\n logger.info(\"eMol9_MMFF\")\n os.chdir(datadir)\n ### split_data ###\n total_file = os.path.join(inputdir,\"eMol9_mmff_index.csv\")\n partition = {\"test\":1348,\"validation\":500,\"test_live\":500}\n if not os.path.isfile(\"split_infor.npz\"):\n traindf, valdf, testdf, testlivedf = split_data_eMol(total_file, partition[\"test\"], partition[\"validation\"], partition[\"test_live\"])\n traindf,valdf,testdf,testlivedf = traindf[\"index\"].values,valdf[\"index\"].values,testdf[\"index\"].values,testlivedf[\"index\"].values\n else:\n data = np.load(\"split_infor.npz\")\n traindf,valdf,testdf,testlivedf = data[\"train_idx\"], data[\"validation_idx\"],data[\"test_idx\"],data[\"test_live_idx\"]\n ## write set ###\n #tar = tarfile.open((os.path.join(inputdir,\"eMol9_MMFF.tar.bz2\")))\n tar = os.path.join(inputdir,\"eMol9_MMFF/\")\n ### training ###\n outtfile = 'train.tfrecord'\n processeMol(tar,outtfile,traindf)\n ### validation ###\n outtfile = 'validation.tfrecord'\n processeMol(tar,outtfile,valdf)\n ### test_live ###\n outtfile = 'testlive.tfrecord'\n processeMol(tar,outtfile,testlivedf)\n ### test ###\n outtfile = 'test.tfrecord'\n processeMol(tar,outtfile,testdf)\n tar.close()\n logger.info('Done')\n ### get mu,std ###\n train = \"train.tfrecord\"\n size = len(traindf)\n position = \"positions1\"\n reference = os.path.join(inputdir,\"atomref.B3LYP_631Gd.npz\")\n outfile = open(\"mu_std.csv\",\"w\")\n getmustd_final(train,position,size,reference,outfile)\n ### get atom ref ###\n os.system(\"cp \" + reference + \" .\")\n logger.info('Done')\n\n\n elif datatype == \"platinummmff\":\n logger.info(\"Platinum_MMFF\")\n ### no split for PlatinumMMFF dataset ###\n ### all structures have been used as test ###\n os.chdir(datadir)\n tar = tarfile.open(os.path.join(inputdir,\"Platinum_MMFF.tar.bz2\"))\n rmsd = os.path.join(inputdir,\"RMSD.csv\")\n index_list = [line.split(\",\")[1].rstrip() for line in open(rmsd) if line.split(\",\")[1].rstrip() != \"index\"]\n pro = os.path.join(inputdir,\"Gaussian_properties_allRMSD.csv\")\n outtf = \"platinum.tfrecord\"\n processPlatinum(tar,pro,outtf,index_list)\n tar.close()\n logger.info('Done')", "title": "" }, { "docid": "00b0cdafec01d1e1b1586bec72626b0f", "score": "0.49667493", "text": "def main():\n\n # check mode\n # \"development\": mode == True\n # \"evaluation\": mode == False\n mode = com.command_line_chk() # constant: True or False\n if mode is None:\n sys.exit(-1)\n\n # make output directory\n os.makedirs(PARAM[\"model_directory\"], exist_ok=True)\n\n # load base_directory list\n dir_list = com.select_dirs(param=PARAM, mode=mode)\n\n # loop of the base directory (for each machine)\n dir_list = ['/work/tamamori/dcase2020/dcase2020_task2_baseline/dev_data/ToyCar']\n for idx, target_dir in enumerate(dir_list):\n com.logger.info(\"===========================\")\n com.logger.info(\"[%d/%d] %s\", idx + 1, len(dir_list), target_dir)\n\n com.logger.info(\"============== DATASET_GENERATOR ==============\")\n dcase_dataset = DcaseDataset(target_dir)\n n_samples = len(dcase_dataset) # total number of frames\n train_size = int(n_samples * (1.0 - PARAM[\"training\"][\"validation_split\"]))\n dataset = {\"train\": None, \"val\": None}\n dataset[\"train\"] = Subset(dcase_dataset, list(range(0, train_size)))\n dataset[\"val\"] = Subset(dcase_dataset, list(range(train_size, n_samples)))\n\n com.logger.info(\"============== DATALOADER_GENERATOR ==============\")\n data_loader = {\"train\": None, \"val\": None}\n data_loader[\"train\"] = torch.utils.data.DataLoader(\n dataset[\"train\"], batch_size=PARAM[\"training\"][\"batch_size\"],\n shuffle=PARAM[\"training\"][\"shuffle\"], drop_last=True)\n\n data_loader[\"val\"] = torch.utils.data.DataLoader(\n dataset[\"val\"], batch_size=PARAM[\"training\"][\"batch_size\"],\n shuffle=False, drop_last=False)\n\n com.logger.info(\"============== MODEL TRAINING ==============\")\n model = {}\n model[\"Generator\"] = Generator(\n x_dim=PARAM[\"feature\"][\"n_mels\"] * PARAM[\"feature\"][\"frames\"],\n h_dim=PARAM[\"model\"][\"hidden_dim\"],\n z_dim=PARAM[\"model\"][\"latent_dim\"]).to(DEVICE)\n\n model[\"Discriminator\"] = Discriminator(\n x_dim=PARAM[\"feature\"][\"n_mels\"] * PARAM[\"feature\"][\"frames\"],\n h_dim=PARAM[\"model\"][\"hidden_dim\"],\n z_dim=PARAM[\"model\"][\"latent_dim\"],).to(DEVICE)\n\n optimizer = {}\n optimizer[\"Generator\"] = optim.Adam(\n model[\"Generator\"].parameters(),\n lr=PARAM[\"training\"][\"learning_rate\"],\n eps=0.0001,\n weight_decay=PARAM[\"training\"][\"weight_decay\"])\n optimizer[\"Discriminator\"] = optim.Adam(\n model[\"Discriminator\"].parameters(),\n lr=PARAM[\"training\"][\"learning_rate\"],\n eps=0.0001,\n weight_decay=PARAM[\"training\"][\"weight_decay\"])\n\n scheduler = {}\n scheduler[\"Generator\"] = optim.lr_scheduler.StepLR(\n optimizer[\"Generator\"],\n step_size=PARAM[\"training\"][\"lr_step_size\"],\n gamma=PARAM[\"training\"][\"lr_gamma\"])\n scheduler[\"Discriminator\"] = optim.lr_scheduler.StepLR(\n optimizer[\"Discriminator\"],\n step_size=PARAM[\"training\"][\"lr_step_size\"],\n gamma=PARAM[\"training\"][\"lr_gamma\"])\n\n criterion = {}\n criterion[\"Generator\"] = nn.BCELoss()\n criterion[\"Discriminator\"] = nn.BCELoss()\n\n loss = {\"train_G\": 0.0, \"train_D\": 0.0, \"val_G\": 0.0, \"val_D\": 0.0}\n\n for epoch in range(1, PARAM[\"training\"][\"epochs\"] + 1):\n loss[\"train_D\"], loss[\"train_G\"] = training(\n model, data_loader[\"train\"], optimizer, criterion)\n\n scheduler[\"Generator\"].step()\n scheduler[\"Discriminator\"].step()\n\n loss[\"val_D\"], loss[\"val_G\"] = validation(\n model, data_loader, criterion)\n\n com.logger.info(\"Epoch %2d: train_loss(D): %.6f, \"\n \"train_loss(G): %.6f, \"\n \"val_loss(D): %.6f, \"\n \"train_loss(G): %.6f\",\n epoch, loss[\"train_D\"], loss[\"train_G\"],\n loss[\"val_D\"], loss[\"val_G\"])\n\n com.logger.info(\"============== SAVE MODEL ==============\")\n torch.save(model[\"Generator\"].state_dict(),\n \"%s/model_generator_%s.pt\" % (PARAM[\"model_directory\"],\n os.path.split(target_dir)[1]))\n torch.save(model[\"Discriminator\"].state_dict(),\n \"%s/model_discriminator_%s.pt\" % (PARAM[\"model_directory\"],\n os.path.split(target_dir)[1]))\n com.logger.info(\"save_model -> %s\", \"%s/model_generator_%s.pt\"\n % (PARAM[\"model_directory\"], os.path.split(target_dir)[1]))\n com.logger.info(\"save_model -> %s\", \"%s/model_discriminator_%s.pt\"\n % (PARAM[\"model_directory\"], os.path.split(target_dir)[1]))\n com.logger.info(\"============== END TRAINING ==============\")", "title": "" }, { "docid": "b47ebd42115ee4d131966eee4dcfc959", "score": "0.49589032", "text": "def convert(source_files_path, output_path):\n for child in Path(source_files_path).iterdir():\n if child.suffix == '.deft':\n write_converted(child, Path.joinpath(output_path, task_name + child.name))\n elif child.is_dir():\n convert(child, output_path)", "title": "" }, { "docid": "db70f7e9a994c27693b476e5dc241dde", "score": "0.49579793", "text": "def transfer(model_dir, output, templates_file, epoch, num_samples, cuda_device):\n if output is None:\n fname = os.path.basename(os.path.normpath(model_dir))\n output = os.path.join('data/outputs', fname + '.tsv')\n model = load_model(model_dir, epoch, cuda_device)\n if templates_file and os.path.exists(templates_file):\n # Path exists so we use it as templates\n logger.info(f'Reading templates from file {templates_file}')\n num_lines = file_utils.get_num_lines(templates_file)\n semantic_templates = []\n syntactic_templates = []\n with open(templates_file) as tf:\n with tqdm(tf, total=num_lines, desc='Extracting template info') as pbar:\n for line in pbar:\n sentences = line.rstrip().split('\\t')\n semantic_templates.append(Template(sentences[0]))\n syntactic_templates.append(Template(sentences[1]))\n instances1 = [template.instance for template in semantic_templates]\n instances2 = [template.instance for template in syntactic_templates]\n\n et1 = get_encodings(instances1, model.vocab, lambda x: model._encode(x, model._task_encoder, 0.0), cuda_device)\n eg2 = get_encodings(instances2, model.vocab, lambda x: model._encode(x, model._gen_encoder, 0.0), cuda_device)\n latent = combine_batch_encodings(et1, eg2)\n sentences1 = [template.sentence for template in semantic_templates]\n sentences2 = [template.sentence for template in syntactic_templates]\n df = pd.DataFrame({\n 'sentence1': sentences1,\n 'sentence2': sentences2,\n 'sem1syn2': batch_decode(latent, model)\n })\n else:\n logger.info(f'Templates file not provided or does not exist, sampling from prior...')\n\n batch_size = 100\n num_iter = int(num_samples/batch_size)\n remainder = batch_size % num_iter\n\n sentences1 = []\n sentences2 = []\n sem1syn2 = []\n for _ in tqdm(range(num_iter), desc='Generating Samples'):\n l11, l22, l12 = get_latent_triplets(batch_size,\n model._task_latent_dim, model._gen_latent_dim,\n cuda_device)\n sentences1.extend(model.get_samples(l11))\n sentences2.extend(model.get_samples(l22))\n sem1syn2.extend(model.get_samples(l12))\n\n # Write the remainder samples\n if remainder:\n l11, l22, l12 = get_latent_triplets(remainder,\n model._task_latent_dim, model._gen_latent_dim,\n cuda_device)\n sentences1.extend(model.get_samples(l11))\n sentences2.extend(model.get_samples(l22))\n sem1syn2.extend(model.get_samples(l12))\n df = pd.DataFrame({\n 'sentence1': sentences1,\n 'sentence2': sentences2,\n 'sem1syn2': sem1syn2\n })\n logger.info(f'saving to path {output}')\n df.to_csv(output, sep='\\t', index=None)", "title": "" }, { "docid": "c59854ffff02c7221d4863f80dbadbee", "score": "0.49546635", "text": "def convert_dir(input_dir: str, output_dir: str, svcschema: SchemaForTable):\n\n defaults = {\n pa.string(): \"\",\n pa.int32(): 0,\n pa.int64(): 0,\n pa.float32(): 0.0,\n pa.float64(): 0.0,\n pa.date64(): 0.0,\n pa.bool_(): False,\n pa.list_(pa.string()): ['-'],\n pa.list_(pa.int64()): [],\n }\n\n df = pd.read_parquet(input_dir, use_legacy_dataset=True)\n sqschema = svcschema.get_raw_schema()\n arrow_schema = svc_schema.get_arrow_schema()\n\n for column in filter(lambda x: x['name'] not in df.columns, sqschema):\n df[column['name']] = column.get('default', defaults[column['type']])\n\n # convert all dtypes to whatever is desired\n for column in df.columns:\n if column in arrow_schema:\n df[column] = df[column].astype(arrow_schema.field(column)\n .type.to_pandas_dtype())\n\n # If there's the original ifname saved up, then eliminate this unnecessary\n # field as this model is no longer necessary\n\n if 'origIfname' in df.columns:\n if 'ifname' in df.columns:\n df = df.drop(columns=['ifname']) \\\n .rename(columns={'origIfname': 'ifname'})\n elif 'oif' in df.columns:\n df = df.drop(columns=['oif']) \\\n .rename(columns={'origIfname': 'oif'})\n\n table = pa.Table.from_pandas(df, schema=arrow_schema,\n preserve_index=False)\n partition_cols = svcschema.get_partition_columns()\n\n if 'norifcnReason' in df.columns:\n df.rename({'notifcnReason': 'notificnReason'}, inplace=True)\n\n pq.write_to_dataset(\n table,\n root_path=output_dir,\n partition_cols=partition_cols,\n version=\"2.0\",\n compression='ZSTD',\n row_group_size=100000,\n )\n\n logger.info(f'Wrote converted {input_dir}')", "title": "" }, { "docid": "852f1ca235aa730f0ab13eabbddc3a0b", "score": "0.49477792", "text": "def main2(args=None):\n\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n args['output_date_format'] = '%Y-%m-%d'\n input_module = input_mapping['pdftotext']\n output_module = output_mapping[args['output_format']]\n\n templates = []\n # Load templates from external folder if set.\n # if args['template_folder']:\n # templates += read_templates(os.path.abspath(args['template_folder']))\n\n # Load internal templates, if not disabled.\n # if not args['exclude_built_in_templates']:\n if 'template_folder' in args:\n templates += read_templates(os.path.abspath(args['template_folder']))\n else:\n templates += read_templates()\n output = []\n for fs in args['input_files']:\n f = open(fs, 'r') \n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res == 'pdf seperated':\n continue\n re = None\n if res:\n logger.info(res)\n output.append(res)\n if args['dbpass'] is not None:\n re = output_module.write_to_db(res, f.name, args['output_date_format'], \n args['dbhost'], args['dbuser'], args['dbpass'], args['dbname'],\n args['azure_account'], args['azure_key'], args['pdf_path'])\n\n f.close()\n if args['dbpass'] is not None:\n #move source pdf\n pdfdirectory = os.path.dirname(f.name)\n pdfpath = f.name\n pdfname = os.path.basename(f.name)\n if re == 'succeed':\n #move to successful\n if args['pdf_succeed']:\n succeed_path = args['pdf_succeed']\n else:\n #succeed_path = join(pdfdirectory, 'successful')\n #move to public successful folder where clients can access\n succeed_path = os.path.abspath(os.path.join(pdfdirectory, os.pardir))\n succeed_path = join(succeed_path, 'successful')\n\n from datetime import date\n succeed_path = join(succeed_path, date.today().strftime('%d-%m-%Y'))\n try:\n if not os.path.exists(succeed_path):\n os.makedirs(succeed_path)\n destinateFile = join(succeed_path, pdfname)\n shutil.move(pdfpath, destinateFile)\n except:\n if args['pdf_moved_failed']:\n succeed_path = args['pdf_moved_failed']\n else:\n succeed_path = join(pdfdirectory, 'failedToMove')\n succeed_path = join(succeed_path, date.today().strftime('%d-%m-%Y'))\n if not os.path.exists(succeed_path):\n os.makedirs(succeed_path)\n destinateFile = join(succeed_path, pdfname)\n shutil.move(pdfpath, destinateFile)\n pass\n elif re == 'link db failed':\n pass\n elif re == 'exists':\n #delete\n print('data already exists in edms: ' + pdfname)\n os.remove(pdfpath)\n pass\n else:\n #move to failed\n if args['pdf_failed']:\n failed_path = args['pdf_failed']\n else:\n father_path = os.path.abspath(os.path.join(pdfdirectory, os.pardir))\n failed_path = join(father_path, 'failed')\n if not os.path.exists(failed_path):\n os.makedirs(failed_path)\n destinateFile = join(failed_path, pdfname)\n shutil.move(pdfpath, destinateFile)\n pass\n\n\n \n if output_module is not None:\n if args['dbpass'] is not None:\n pass #for data base output, do it in loop of extracting\n else:\n logger.warning(output)\n output_module.write_to_file(output, args['output_name'], args['output_date_format'])\n return res", "title": "" }, { "docid": "72fc9150f93a1e6624652cebec5d3487", "score": "0.49379867", "text": "def prepare_model_summaries(self, document, model_directory):\n model_source = os.path.join(self.dataset_directory, document)\n\n summaries_filename_pattern = re.compile(SUMMARIES_FILE_PATTERN)\n summary_id = 0\n for file in os.listdir(model_source):\n if not summaries_filename_pattern.match(file):\n continue\n\n dest_filename = MODEL_SUMMARIES_FORMAT.format(model_id=document,\n text_id=chr(65 + summary_id))\n file_dest_path = os.path.join(model_directory, dest_filename)\n file_source_path = os.path.join(model_source, file)\n\n if summaries_filename_pattern.match(file):\n copyfile(file_source_path, file_dest_path)\n\n summary_id += 1", "title": "" }, { "docid": "8326868383c5551356bae5e1c4a7c047", "score": "0.4933102", "text": "def GenerateAllModels(self):\n\n # ________________________ DEFINE THE QUOTES & PAIRS TO CREATE MODELS FOR ________________________________________\n\n # Work on the quotes that have a directory in 'Historical_data/'\n ExistingQuoteassets = next(os.walk('Historical_data/'))[1] # ['ETH', 'BTC']\n timeframe = next(os.walk('Historical_data/' + ExistingQuoteassets[0]))[2][0].split('_')[1] # '1h'\n\n # Work on the pairs that have a file in the sub-directory, ie pairs that we have data for.\n pairs = dict() # {'ETH': ['QTUMETH', 'EOSETH',..], 'BTC':[]}\n for quoteasset in ExistingQuoteassets:\n pairs[quoteasset] = [f.replace('_' + timeframe + '_raw', '') for f in listdir('Historical_data/' + quoteasset + '/') if isfile(join('Historical_data/' + quoteasset + '/', f))]\n\n for pair in pairs[quoteasset]:\n self.GenerateAndSaveModel(quote=quoteasset, pair=pair, timeframe=timeframe)", "title": "" }, { "docid": "dfa24027c5201b7e3b78eae4c7b370c9", "score": "0.49281424", "text": "def main():\n logging.basicConfig(level=logging.DEBUG)\n print(f'Running batch: {lib.get_batch_name()}, with output folder: {lib.get_batch_output_folder()}')\n logging.info(f'Running batch: {lib.get_batch_name()}, with output folder: {lib.get_batch_output_folder()}')\n\n observations = extract()\n observations = transform(observations)\n observations, trained_model = model(observations)\n load(observations, trained_model)\n pass", "title": "" }, { "docid": "bddae3829ce5bd26e574de9c8f54e1d0", "score": "0.49239868", "text": "def generate_adapter(self, local_path=None, overwrite=False):\n self.models.generate(src_path=local_path, entry_point=self.entry_point, overwrite=overwrite)", "title": "" }, { "docid": "39095bf6eb69df3b7a0499d99fdb4535", "score": "0.49201447", "text": "def fill_output(output: Dict[str, object], options: object):\n dept_graph = load_op_dep_graph(options.dep_graph_yaml_path)\n\n model_versions = (\n options.model_versions.split(\",\") if options.model_versions is not None else []\n )\n model_assets = (\n options.model_assets.split(\",\") if options.model_assets is not None else None\n )\n\n all_models_yaml = []\n if options.models_yaml_path:\n for yaml_path in options.models_yaml_path:\n with open(yaml_path, \"rb\") as f:\n all_models_yaml.append(yaml.safe_load(f))\n\n model_filter_func = make_filter_from_options(\n options.model_name, model_versions, model_assets, options.model_backends\n )\n\n selected_models_yaml = list(filter(model_filter_func, all_models_yaml))\n\n verify_all_specified_present(\n model_assets=model_assets,\n model_versions=model_versions,\n selected_models_yaml=selected_models_yaml,\n rule_name=options.rule_name,\n model_name=options.model_name,\n new_style_rule=is_new_style_rule(options.model_name, options.model_versions),\n )\n\n create_debug_info_from_selected_models(\n output,\n selected_models_yaml,\n is_new_style_rule(options.model_name, options.model_versions),\n )\n\n # initialize variables for static build from the pt_operator_library rule\n if options.root_ops is not None:\n static_root_ops = set(filter(lambda x: len(x) > 0, options.root_ops.split(\",\")))\n else:\n static_root_ops = set()\n\n static_training_root_ops = set(\n filter(\n lambda x: len(x) > 0,\n (options.training_root_ops or \"\").split(\",\"),\n )\n )\n if len(static_training_root_ops) > 0:\n static_root_ops = static_root_ops | static_training_root_ops\n # end if\n\n root_ops_unexpand = set()\n traced_ops = set()\n training_root_ops_unexpand = set()\n traced_training_ops = set()\n all_kernel_metadata = []\n all_custom_classes = set()\n all_build_features = set()\n\n # Go through each yaml file and retrieve operator information.\n for model_info in selected_models_yaml:\n if \"traced_operators\" not in model_info:\n # If this YAML file doesn't specify any traced operators, then it is using\n # the static analysis selective build approach of finding transitively\n # used operators, and we should update root_ops with the set of root\n # operators, all of whose overloads must be included. In addition, these\n # root_ops will be further expanded using the transitive closure of\n # operator dependencies.\n static_root_ops = static_root_ops | set(model_info[\"root_operators\"])\n else:\n # If this YAML file specifies traced operators, then it is using\n # the tracing based selective build approach of finding used\n # operators, and we should update root_ops_unexpand with the set of root\n # operators whose overloads don't need to be included. In addition, these\n # root_ops_unexpand will NOT be further expanded. If the train flag is\n # set then the ops will be used for training, so we put them in a separate\n # set\n if model_info[\"train\"]:\n training_root_ops_unexpand = training_root_ops_unexpand | set(\n model_info[\"root_operators\"]\n )\n traced_training_ops = traced_training_ops | set(\n model_info[\"traced_operators\"]\n )\n else:\n root_ops_unexpand = root_ops_unexpand | set(\n model_info[\"root_operators\"]\n )\n traced_ops = traced_ops | set(model_info[\"traced_operators\"])\n\n if \"kernel_metadata\" in model_info:\n all_kernel_metadata.append(model_info[\"kernel_metadata\"])\n\n if \"custom_classes\" in model_info:\n all_custom_classes = all_custom_classes | set(model_info[\"custom_classes\"])\n\n if \"build_features\" in model_info:\n all_build_features = all_build_features | set(model_info[\"build_features\"])\n\n # This following section on transitive closure is relevant to static build only\n canonical_root_ops = canonical_opnames(static_root_ops)\n # If no canonical_root_ops exist, don't compute the transitive closure\n # otherwise, we will include __BASE__ and __ROOT__ ops and mark them as required\n # for inference.\n if len(canonical_root_ops) > 0:\n closure_op_list = gen_transitive_closure(dept_graph, canonical_root_ops)\n else:\n closure_op_list = set()\n\n canonical_training_root_ops = canonical_opnames(static_training_root_ops)\n # If no canonical_training_root_ops exist, don't compute the transitive closure\n # otherwise, we will include __BASE__ and __ROOT__ ops and mark them as required\n # for training.\n if len(canonical_training_root_ops) > 0:\n closure_training_op_list = gen_transitive_closure(\n dept_graph, canonical_training_root_ops, train=True\n )\n else:\n closure_training_op_list = set()\n\n # bucketed_ops holds sets of operators that correspond to specific semantic buckets. For\n # example:\n #\n # 1. Root Operators not used for training w/o full overload inclusion\n # 2. Root Operators not used for training w/ full overload inclusion\n # 3. Root Operators used for training w/o full overload inclusion\n # 4. Root Operators used for training w/ full overload inclusion\n # 5. Non-root Operators not used for training w/o full overload inclusion\n # etc...\n #\n # Basically for each of the 3 boolean conditional, there are 2\n # options (True/False).\n #\n bucketed_ops = []\n\n # START STATIC BUILD OPS\n static_root_ops_bucket = {}\n for op_name in static_root_ops:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": True,\n \"is_used_for_training\": False,\n \"include_all_overloads\": True,\n \"debug_info\": [options.model_name],\n },\n )\n static_root_ops_bucket[op_name] = op\n bucketed_ops.append(static_root_ops_bucket)\n\n closure_ops_bucket = {}\n for op_name in closure_op_list:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": False,\n \"is_used_for_training\": False,\n \"include_all_overloads\": True,\n \"debug_info\": [options.model_name],\n },\n )\n closure_ops_bucket[op_name] = op\n bucketed_ops.append(closure_ops_bucket)\n\n static_training_root_ops_bucket = {}\n for op_name in static_training_root_ops:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": True,\n \"is_used_for_training\": True,\n \"include_all_overloads\": True,\n \"debug_info\": [options.model_name],\n },\n )\n static_training_root_ops_bucket[op_name] = op\n bucketed_ops.append(static_training_root_ops_bucket)\n\n closure_training_ops_bucket = {}\n for op_name in closure_training_op_list:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": False,\n \"is_used_for_training\": True,\n \"include_all_overloads\": True,\n \"debug_info\": [options.model_name],\n },\n )\n closure_training_ops_bucket[op_name] = op\n bucketed_ops.append(closure_training_ops_bucket)\n # END STATIC BUILD OPS\n\n # START TRACING BASED BUILD OPS\n root_ops_unexpand_bucket = {}\n for op_name in root_ops_unexpand:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": True,\n \"is_used_for_training\": False,\n \"include_all_overloads\": False,\n \"debug_info\": [options.model_name],\n },\n )\n root_ops_unexpand_bucket[op_name] = op\n bucketed_ops.append(root_ops_unexpand_bucket)\n\n traced_ops_bucket = {}\n for op_name in traced_ops:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": False,\n \"is_used_for_training\": False,\n \"include_all_overloads\": False,\n \"debug_info\": [options.model_name],\n },\n )\n traced_ops_bucket[op_name] = op\n bucketed_ops.append(traced_ops_bucket)\n\n training_root_ops_unexpand_bucket = {}\n for op_name in training_root_ops_unexpand:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": True,\n \"is_used_for_training\": True,\n \"include_all_overloads\": False,\n \"debug_info\": [options.model_name],\n },\n )\n training_root_ops_unexpand_bucket[op_name] = op\n bucketed_ops.append(training_root_ops_unexpand_bucket)\n\n traced_training_ops_bucket = {}\n for op_name in traced_training_ops:\n op = SelectiveBuildOperator.from_yaml_dict(\n op_name,\n {\n \"is_root_operator\": False,\n \"is_used_for_training\": True,\n \"include_all_overloads\": False,\n \"debug_info\": [options.model_name],\n },\n )\n traced_training_ops_bucket[op_name] = op\n bucketed_ops.append(traced_training_ops_bucket)\n # END TRACING BASED BUILD OPS\n\n # Merge dictionaries together to remove op duplication\n operators: Dict[str, SelectiveBuildOperator] = {}\n for ops_dict in bucketed_ops:\n operators = merge_operator_dicts(operators, ops_dict)\n\n # Loop over all operators, and if any of the them specifies that\n # all overloads need to be included, then set include_all_non_op_selectives\n # to True, since it indicates that this operator list came from something\n # other than a traced operator list.\n include_all_non_op_selectives = False\n for op_name, op_info in operators.items():\n include_all_non_op_selectives = (\n include_all_non_op_selectives or op_info.include_all_overloads\n )\n\n operators_as_dict = {}\n for k, v in operators.items():\n operators_as_dict[k] = v.to_dict()\n\n output[\"operators\"] = operators_as_dict\n\n output[\"custom_classes\"] = all_custom_classes\n\n output[\"build_features\"] = all_build_features\n\n output[\"include_all_non_op_selectives\"] = include_all_non_op_selectives\n if len(all_kernel_metadata) > 0:\n kernel_metadata = {}\n for kt in all_kernel_metadata:\n kernel_metadata = merge_kernel_metadata(kernel_metadata, kt)\n output[\"kernel_metadata\"] = kernel_metadata", "title": "" }, { "docid": "581d8b61495c6cc539fff5d3d065a6ed", "score": "0.48995677", "text": "def export_model(self, destination_path):\n all_paths = [v for k, v in self.info.items() if \"path\" in k]\n if \"train_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"train_path\"])\n\n if \"validation_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"validation_path\"])\n\n if \"original_train_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"original_train_path\"])\n all_paths.extend(self.hyperparams[\"additional_data_paths\"])\n\n if \"split_and_train_params\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"split_and_train_params\"][\"df_path\"])\n copy_all(all_paths, destination_path)", "title": "" }, { "docid": "9a7fe842094f0d5e55f339def44abaf0", "score": "0.48976243", "text": "def post_model_update(): \n import shutil\n drc = dynamic_filenames()\n slurm = os.path.join(drc[\"output_files\"], \"SLURM\") \n edit_par_file(fid=os.path.join(drc[\"data\"], \"Par_file\"), choice=\"update\")\n \n # move slurm files into a separate folder\n print(\"moving slurm* files\")\n if not os.path.exists(slurm):\n os.mkdir(slurm)\n slurmfiles = glob.glob(os.path.join(drc[\"runfolder\"], \"slurm-*.out\"))\n for sfile in slurmfiles:\n sfile_new = os.path.join(slurm, os.path.basename(sfile))\n shutil.move(sfile, sfile_new)\n \n # change the name of OUTPUT_FILES, if M00_OUTPUT_FILES doesn't exist\n # if it exists, assume that this has already been run and continue\n old_output_files = os.path.join(drc[\"runfolder\"], \"M00_OUTPUT_FILES\")\n if not os.path.exists(old_output_files):\n print(\"moving OUTPUT_FILES/ to M00_OUTPUT_FILES/\")\n shutil.move(drc[\"output_files\"], old_output_files)\n \n # set up new OUTPUT_FILES/\n os.mkdir(drc[\"output_files\"])\n surface_h = os.path.join(old_output_files, \"surface_from_mesher.h\")\n values_h = os.path.join(old_output_files, \"values_from_mesher.h\")\n for fid in [surface_h, values_h]: \n shutil.copyfile(fid, os.path.join(\n drc[\"output_files\"], os.path.basename(fid))\n )\n else:\n query = input(\"M00_OUTPUT_FILES exists, \"\n \"continue to populate new OUTPUT_FILES? [y/(n)]\")\n if query != \"y\":\n sys.exit()\n \n # create new local_path in output_files\n print(\"creating new local_path\")\n old_local_path = os.path.join(\n old_output_files, os.path.basename(drc[\"local_path\"])\n )\n local_path = drc[\"local_path\"]\n try:\n os.mkdir(local_path)\n except OSError:\n pass\n \n # mv relevant mesh files from old local_path to new local_path\n print(\"copying *attenuation, *Database, and q* files to new local_path\")\n for tag in [\"*attenuation.bin\", \"*Database\", \"*qkappa.bin\", \"*qmu.bin\"]:\n fids = glob.glob(os.path.join(old_local_path, tag))\n for fid in fids:\n new_fid = os.path.join(local_path, os.path.basename(fid))\n shutil.copyfile(fid, new_fid)\n \n # mv vp_new.bin, vs_new.bin and rho_new.bin files to new local_path\n print(\"moving and renaming mesh files to new local_path\")\n mesh_files = os.path.join(old_output_files, \"mesh_files_m01\")\n for tag in [\"*vp_new.bin\", \"*vs_new.bin\", \"*rho_new.bin\"]:\n fids = glob.glob(os.path.join(mesh_files, tag))\n for fid in fids:\n # get rid of the _new tag when renaming\n new_tag = os.path.basename(fid).split('_')\n new_tag = \"{}_{}.bin\".format(new_tag[0], new_tag[1])\n new_fid = os.path.join(local_path, new_tag)\n shutil.copyfile(fid, new_fid)\n \n print(\"ready for xgenerate_databases\")", "title": "" }, { "docid": "689835806611fd6ae40b73df79f73f4d", "score": "0.48929298", "text": "def deploy(models=None, silent=False):\n config = dg.Config()\n production_dir = config.get_model_dir(production=True)\n models_dir = os.path.dirname(production_dir)\n\n models = models or config.models.keys()\n\n files = [\n os.path.basename(x) for x in\n glob.glob(os.path.join(models_dir, '*'))\n # Remove production and tensorflow from the list\n if os.path.basename(x) not in (\n 'production', 'tensorflow', 'metrics.db'\n )\n ]\n\n latest = os.path.join(models_dir, sorted(\n files, key=lambda x: datetime.strptime(x[:19], '%Y.%m.%dT%H:%M:%S')\n )[-1])\n\n ensure_dir(production_dir, directory=True)\n\n bar(silent=silent)\n for model in models:\n if not silent:\n print('Deploying model:', model)\n source = os.path.join(latest, model)\n # If the model is trained in the latest training batch\n if os.path.isdir(source):\n destination = os.path.join(production_dir, model)\n if os.path.isdir(destination):\n shutil.rmtree(destination)\n shutil.copytree(source, destination)\n bar(silent=silent)", "title": "" }, { "docid": "201ba5d73412210eb7b6d74bfd9b306e", "score": "0.48919654", "text": "def load_models(path, prefix):\n model = EncodeProcessDecode(f_dict)", "title": "" }, { "docid": "6104ee86b6fbbc73d382933098c15fa3", "score": "0.48836023", "text": "def _create_model_directory(self):\n\n if not os.path.isdir(self.main_dir):\n print(\"Created dir: \", self.main_dir)\n os.mkdir(self.main_dir)", "title": "" }, { "docid": "3f53ec28d2b2c42fad7fe35b92f117ca", "score": "0.48830682", "text": "def _convert(reporter, model, output_dir, namespace, mo_props, requested_precisions):\n if model.mo_args is None:\n reporter.print_section_heading(\"Skipping {} (no conversions defined)\", model.name)\n reporter.print()\n return True\n\n model_precisions = requested_precisions & model.precisions\n if not model_precisions:\n reporter.print_section_heading(\"Skipping {} (all conversions skipped)\", model.name)\n reporter.print()\n return True\n\n (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True)\n\n if not _run_pre_convert(reporter, model, output_dir, namespace):\n return False\n\n model_format = model.framework\n mo_extension_dir = mo_props.base_dir / \"extensions\"\n if not mo_extension_dir.exists():\n mo_extension_dir = mo_props.base_dir\n\n template_variables = {\n \"config_dir\": _common.MODEL_ROOT / model.subdirectory_ori,\n \"conv_dir\": output_dir / model.subdirectory,\n \"dl_dir\": namespace.download_dir / model.subdirectory,\n \"mo_dir\": mo_props.base_dir,\n \"mo_ext_dir\": mo_extension_dir,\n }\n\n if model.conversion_to_onnx_args:\n if not convert_to_onnx(reporter, model, output_dir, namespace, template_variables):\n return False\n model_format = \"onnx\"\n\n expanded_mo_args = [string.Template(arg).substitute(template_variables) for arg in model.mo_args]\n\n for model_precision in sorted(model_precisions):\n data_type = model_precision.split(\"-\")[0]\n layout_string = \",\".join(f\"{input.name}({input.layout})\" for input in model.input_info if input.layout)\n shape_string = \",\".join(str(input.shape) for input in model.input_info if input.shape)\n\n if layout_string:\n expanded_mo_args.append(f\"--layout={layout_string}\")\n if shape_string:\n expanded_mo_args.append(f\"--input_shape={shape_string}\")\n\n mo_cmd = [\n *mo_props.cmd_prefix,\n f\"--framework={model_format}\",\n f\"--output_dir={output_dir / model.subdirectory / model_precision}\",\n f\"--model_name={model.name}\",\n f\"--input={','.join(input.name for input in model.input_info)}\".format(),\n *expanded_mo_args,\n *mo_props.extra_args,\n ]\n if \"FP16\" in data_type:\n mo_cmd.append(\"--compress_to_fp16\")\n\n reporter.print_section_heading(\n \"{}Converting {} to IR ({})\",\n \"(DRY RUN) \" if namespace.dry_run else \"\",\n model.name,\n model_precision,\n )\n\n reporter.print(\"Conversion command: {}\", _common.command_string(mo_cmd))\n\n if not namespace.dry_run:\n reporter.print(flush=True)\n\n if not reporter.job_context.subprocess(mo_cmd):\n # NOTE: mo returns non zero return code (245) even though it successfully generate IR\n cur_time = time.time()\n time_threshold = 5\n xml_path = output_dir / model.subdirectory / model_precision / f\"{model.name}.xml\"\n bin_path = output_dir / model.subdirectory / model_precision / f\"{model.name}.bin\"\n if not (\n os.path.exists(xml_path)\n and os.path.exists(bin_path)\n and os.path.getmtime(xml_path) - cur_time < time_threshold\n and os.path.getmtime(bin_path) - cur_time < time_threshold\n ):\n return False\n\n reporter.print()\n\n return True", "title": "" }, { "docid": "b6f7c7f1e68c5e9ead2c649d18f18d61", "score": "0.4876343", "text": "async def makemigrations(args):\n name = args.name\n await Tortoise.init(config=settings.TORTOISE_ORM)\n # ๅฏผๅ‡บๅฝ“ๅ‰model sql\n new_sql = get_schema_sql(Tortoise.get_connection('default'), safe=False)\n with open(NEW_SCHEMA_FILE, 'w') as f:\n f.write(new_sql)\n if not os.path.exists(OLD_SCHEMA_FILE):\n with open(OLD_SCHEMA_FILE, 'w') as f:\n f.write(new_sql)\n # ็”Ÿๆˆๅ‡็บงsqlๅ’Œ้™็บงsql\n up_sql = os.popen(f'/usr/local/bin/schemalex {OLD_SCHEMA_FILE} {NEW_SCHEMA_FILE}').read()\n down_sql = os.popen(f'/usr/local/bin/schemalex {NEW_SCHEMA_FILE} {OLD_SCHEMA_FILE}').read()\n if up_sql == down_sql:\n os.unlink(NEW_SCHEMA_FILE)\n print(Fore.BLUE + 'No changes detected')\n return\n # ๅ‡็บงsqlๅ’Œ้™็บงsqlๅ†™ๅ…ฅdbmateๆ ผๅผ\n if not os.path.exists(MIGRATIONS_DIR):\n os.mkdir(MIGRATIONS_DIR)\n\n up_sql_file = os.path.join(MIGRATIONS_DIR, f'{datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")}_{name}.sql')\n with open(up_sql_file, 'w') as f:\n migrate_sql = f\"\"\"-- migrate:up\\n{up_sql}\\n\\n-- migrate:down\\n{down_sql}\"\"\"\n f.write(migrate_sql)\n\n with open(OLD_SCHEMA_FILE, 'w') as f:\n f.write(new_sql)\n\n os.unlink(NEW_SCHEMA_FILE)\n\n print(Fore.GREEN + f'Generate sql file {up_sql_file}.')", "title": "" }, { "docid": "d7e064e20711688a299b13964f711393", "score": "0.48724288", "text": "def models_generator_train(fieldtype_dict, domain_descriptor):\n pass\n return None", "title": "" }, { "docid": "6a477ea910238c8f14280ca85471fa84", "score": "0.486957", "text": "def map(ctx,rules,inputs,format_level,\n output_folder,output_database,\n csv_separator,use_profiler,log_file,\n no_mask_person_id,indexing_conf,\n person_id_map,max_rules,merge_output,\n objects,tables,db,write_mode,split_outputs,\n dont_automatically_fill_missing_columns,\n number_of_rows_per_chunk,allow_missing_data,\n number_of_rows_to_process):\n\n if output_folder is None:\n output_folder = f'{os.getcwd()}{os.path.sep}output_data{os.path.sep}'\n\n #if log_file == 'auto' and carrot.params['log_file'] is None:\n if log_file == 'auto':\n log_file = f\"{output_folder}{os.path.sep}logs{os.path.sep}carrot.log\"\n carrot.params['log_file'] = log_file\n elif log_file == 'none':\n pass\n else:\n carrot.params['log_file'] = log_file\n\n #load the json loads\n if type(rules) == dict:\n config = rules\n else:\n config = tools.load_json(rules)\n\n if tables:\n tables = list(set(tables))\n config = carrot.tools.filter_rules_by_destination_tables(config,tables)\n\n if objects:\n objects = list(set(objects))\n config = carrot.tools.filter_rules_by_object_names(config,objects)\n\n if max_rules:\n i = 0\n n = max_rules\n new = {}\n for destination_table,rule_set in config['cdm'].items():\n if destination_table == 'person':\n new[destination_table] = rule_set\n else:\n for name,_rules in rule_set.items():\n if i>=n:\n break\n if destination_table not in new:\n new[destination_table] = {}\n new[destination_table][name] = _rules\n i+=1\n\n config['cdm'] = new\n\n name = config['metadata']['dataset']\n\n if indexing_conf is not None:\n if isinstance(indexing_conf,dict):\n pass\n elif indexing_conf.endswith(\".json\") and os.path.exists(indexing_conf):\n indexing_conf = tools.load_json(indexing_conf)\n elif indexing_conf.endswith(\".csv\") and os.path.exists(indexing_conf):\n try:\n indexing_conf = pd.read_csv(indexing_conf,header=None,index_col=0)[1].to_dict()\n except pd.errors.EmptyDataError:\n indexing_conf = None\n pass\n\n\n #automatically calculate the ideal chunksize\n if number_of_rows_per_chunk == 'auto':\n #get the fields that are going to be used/loaded\n used_fields = tools.get_mapped_fields_from_rules(config)\n #calculate the number of fields that are to be used per dataset\n n_used_fields = [ len(sublist) for sublist in used_fields.values() ]\n #find what's the largest number of fields loaded by any dataset\n max_n_used_fields = max(n_used_fields)\n #get the number of files used\n n_files = len(n_used_fields)\n\n # If there is one dataset and one column being used, the max loaded to memory\n # is 2million rows (this is fairly arbitrary)\n # it is an approximation assuming the data in the values is relatively small\n # this should keep the memory usage down\n # When there is more fields and more files loaded, reduce the of rows per chunk\n max_n_rows = 2e6\n number_of_rows_per_chunk = int(max_n_rows/(max_n_used_fields*n_files))\n else:\n try:\n number_of_rows_per_chunk = int(number_of_rows_per_chunk)\n except ValueError:\n raise ValueError(f\"number_of_rows_per_chunk must be an Integer or 'auto', you inputted '{number_of_rows_per_chunk}'\")\n\n #turn off chunking if 0 or negative chunksizes are given\n if number_of_rows_per_chunk <= 0 :\n number_of_rows_per_chunk = None\n\n #check if exists\n if any('*' in x for x in inputs):\n data_dir = os.path.dirname(carrot.__file__)\n data_dir = f'{data_dir}{os.path.sep}data{os.path.sep}'\n\n new_inputs = []\n for i,x in enumerate(inputs):\n if not os.path.exists(x):\n new_inputs.extend(glob.glob(f\"{data_dir}{os.path.sep}{x}\"))\n else:\n new_inputs.append(x)\n inputs = new_inputs\n\n inputs = list(inputs)\n\n for x in inputs:\n if os.path.isdir(x):\n inputs.remove(x)\n inputs.extend(glob.glob(f'{x}{os.path.sep}*.csv'))\n\n #convert the list into a map between the filename and the full path\n inputs = {\n os.path.basename(x):x\n for x in inputs\n }\n\n if db:\n inputs = tools.load_sql(connection_string=db,chunksize=number_of_rows_per_chunk,nrows=number_of_rows_to_process)\n else:\n if allow_missing_data:\n config = carrot.tools.remove_missing_sources_from_rules(config,inputs)\n\n inputs = tools.load_csv(inputs,\n rules=config,\n chunksize=number_of_rows_per_chunk,\n nrows=number_of_rows_to_process)\n\n #do something with\n #person_id_map\n\n if isinstance(output_database,dict):\n if 'bclink' in output_database:\n outputs = carrot.tools.create_bclink_store(bclink_settings=output_database['bclink'],\n output_folder=output_database['cache'],\n sep=csv_separator,\n write_separate=split_outputs,\n write_mode=write_mode)\n else:\n raise NotImplementedError(f\"dont know how to configure outputs... {output_database}\")\n elif output_database == None:\n outputs = carrot.tools.create_csv_store(output_folder=output_folder,\n sep=csv_separator,\n write_separate=split_outputs,\n write_mode=write_mode)\n else:\n outputs = carrot.tools.create_sql_store()\n\n #build an object to store the cdm\n cdm = carrot.cdm.CommonDataModel(name=name,\n inputs=inputs,\n format_level=format_level,\n do_mask_person_id=not no_mask_person_id,\n outputs = outputs,\n #output_folder=output_folder,\n #output_database=output_database,\n automatically_fill_missing_columns=not dont_automatically_fill_missing_columns,\n use_profiler=use_profiler)\n #allow the csv separator to be changed\n #the default is tab (\\t) separation\n #if not csv_separator is None:\n # cdm.set_csv_separator(csv_separator)\n cdm.create_and_add_objects(config)\n\n cdm.process(conserve_memory=True)\n cdm.close()\n\n if merge_output:\n ctx.invoke(merge,\n inputs=glob.glob(f\"{output_folder}{os.path.sep}*\"),\n output_folder=output_folder)", "title": "" }, { "docid": "a8020d4f185e00c2db59a69e98d0f569", "score": "0.48653716", "text": "def spdx_model(self, task, repo_id):\n with open(\"../../augur.config.json\") as json_file:\n config = json.load(json_file)\n dbname = config[\"Database\"][\"database\"]\n user = config[\"Database\"][\"user\"]\n password = config[\"Database\"][\"password\"]\n host = config[\"Database\"][\"host\"]\n port = config[\"Database\"][\"port\"]\n dsfile = config[\"Workers\"][\"license_worker\"][\"tagfile\"]\n depth = config[\"Workers\"][\"license_worker\"][\"search_depth\"]\n ipath = config[\"Workers\"][\"facade_worker\"][\"repo_directory\"]\n home = expanduser(\"~\")\n\n configtools = 'postgresql://{}:{}@{}:{}/{}'.format(\n user, password, host, port, dbname\n )\n\n with open(\"dosocs2-example.conf\") as configfile:\n content = configfile.read()\n content_new = re.sub('(connection_uri = .*)\\n', \"connection_uri = \" + configtools + \"?options=--search_path=spdx\\n\", content)\n with open(\"dosocs2.conf\",\"w+\") as outfile:\n outfile.write(content_new)\n with open(home + \"/.config/dosocs2/dosocs2.conf\",\"w+\") as coreconfig:\n coreconfig.write(content_new)\n\n wd = os.getcwd()\n\n def depthwalk(ipath, depth, match):\n k = 0\n #print(\"IPATH \" + ipath)\n if depth > 0:\n for dir in os.listdir(ipath):\n if not ipath.endswith(\"/\"):\n usedir = ipath + \"/\" + dir\n else:\n usedir = ipath + dir\n if os.path.isdir(usedir):\n if dir == match:\n print(\"FOLDER FOUND: \" + str(usedir))\n pathtot.append(usedir)\n break\n depthwalk(usedir, depth - 1, match)\n\n def initscan(dbname, user, password, host, port, dsfile, ipath, depth):\n connection = psycopg2.connect(\n user = user,\n password = password,\n database = dbname,\n host = host,\n port = port,\n )\n print(\"********************\")\n cur = connection.cursor()\n r = cur.execute(\"set search_path to augur_data; select repo_path, repo_id, repo_group_id, repo_name from repo order by repo_group_id;\")\n rec = cur.fetchall()\n for sector in rec:\n global pathtot\n pathtot = []\n print(sector)\n repo_id = sector[1]\n print(\"****************\")\n print(repo_id)\n cur.execute(\"set search_path to spdx;\")\n cur.execute(\"select sbom_scan from augur_data.repo_sbom_scans where repo_id = \" + str(repo_id) + \" LIMIT 1;\")\n determin = cur.fetchall()\n if not determin:\n cur.execute(\"select dosocs_pkg_id from spdx.augur_repo_map where repo_id = \" + str(repo_id) + \" LIMIT 1;\")\n records = cur.fetchall()\n print(\"****************\")\n print(str(sector[0]))\n if not ipath.endswith(\"/\"):\n ipath = ipath + \"/\"\n #path = ipath + str(sector[3])\n os.chdir(ipath)\n print(\"---------------\")\n #need to make this a config parameter\n depthwalk(ipath, depth, sector[3])\n time.sleep(0.2)\n print(\"INSIDE: \" + str(pathtot))\n print(\"---------------\")\n if pathtot != []:\n path = pathtot[0]\n print(\"PATH: \" + str(path))\n print(\"SELECT repo_path FROM spdx.augur_repo_map WHERE \" + chr(39) + path + chr(39) + \" \" + chr(61) + \" repo_path;\")\n cur.execute(\"SELECT repo_path FROM spdx.augur_repo_map WHERE \" + chr(39) + path + chr(39) + \" \" + chr(61) + \" repo_path;\")\n if str(len(cur.fetchall())) == \"0\":\n print(\"ALL CHECKS PASSED\") #Create a new record in \"packages\" table. #dosocs will determine whether the entry has already been made\n print(\"Creating Record for \" + str(sector[1]))\n #cur.execute(\"INSERT INTO spdx.augur_repo_map(repo_id, repo_path) VALUES (\" + str(sector[1]) + \",\" + chr(39) + str(sector[0]) + str(sector[3]) + chr(39) + \");\")\n cur.execute(\"INSERT INTO spdx.augur_repo_map(repo_id, repo_path) VALUES (\" + str(sector[1]) + \",\" + chr(39) + path + chr(39) + \");\")\n connection.commit()\n #Attempt to create new DoSOCS entry\n print(\"CREATING NEW DOSOCS DOCUMENT\")\n print(path)\n p = subprocess.Popen(['dosocs2', 'scan', str(path)], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(str(p.communicate()))\n (output) = p\n print(\"####################\")\n print(output)\n print(\"RECORD CREATED\")\n else:\n print(\"RECORD EXISTS IN MAP TABLE\")\n else:\n print(\"NO DIRECTORY, SKIPPING\")\n else:\n print(\"DUPLICATE RECORD FOUND IN REPO_SBOM_SCANS. SKIPPING...\")\n cur.execute(\"update augur_repo_map a set dosocs_pkg_name = b.name from packages b where a.repo_path = b.download_location;\")\n cur.execute(\"update augur_repo_map a set dosocs_pkg_id = b.package_id from packages b where a.repo_path = b.download_location;\")\n connection.commit()\n cur.close()\n connection.close()\n return\n\n def parse_json(doc_1, cre_1, pac_1, pac_lif_1, pac_2, fil_dat_1, fil_rel_1, bas_rel_1, cov_1, cur, repo_id):\n license_information = {}\n temp_1 = {}\n for i in range(0, int(len(doc_1[0])/2)):\n j = i*2\n temp_1[doc_1[0][j]] = doc_1[0][j+1]\n\n doc_1_temp = {**temp_1}\n\n temp_1 = {}\n for i in range(0, int(len(cre_1[0])/2)):\n j = i*2\n temp_1[cre_1[0][j]] = cre_1[0][j+1]\n\n cre_1_temp = {**temp_1}\n\n temp_1 = {}\n for i in range(0, int(len(pac_1[0])/2)):\n j = i*2\n temp_1[pac_1[0][j]] = pac_1[0][j+1]\n temp_2 = {}\n for i in range(0, int(len(pac_lif_1[0])/2)):\n j = i*2\n temp_2[pac_lif_1[0][j]] = pac_lif_1[0][j+1]\n temp_3 = {}\n for i in range(0, int(len(pac_2[0])/2)):\n j = i*2\n temp_3[pac_2[0][j]] = pac_2[0][j+1]\n\n pac_temp = {**temp_1, **temp_2, **temp_3}\n\n temp_2 = {}\n for g in range(0, int(len(fil_dat_1))):\n temp_1 = {}\n for i in range(0, int(len(fil_dat_1[g])/2)):\n j = i*2\n temp_1[fil_dat_1[g][j]] = fil_dat_1[g][j+1]\n temp_1['File Relationship'] = fil_rel_1[g][2].split(\": \")[1]\n temp_2[\"File \" + str(g)] = temp_1\n fil_temp = {**temp_2}\n\n temp_2 = {}\n for k in range(0, int(len(bas_rel_1))):\n temp_2[\"Relationship \" + str(k)] = bas_rel_1[k][1]\n bas_rel_temp = {**temp_2}\n\n temp_1 = {}\n for i in range(0, int(len(cov_1[0])/2)):\n j = i*2\n temp_1[cov_1[0][j]] = cov_1[0][j+1]\n\n cov_temp = {**temp_1}\n\n license_information['Document Information'] = doc_1_temp\n license_information['Creation Information'] = cre_1_temp\n license_information['Package Information'] = pac_temp\n license_information['File Information'] = fil_temp\n license_information['Package Relationships'] = bas_rel_temp\n license_information['License Coverage'] = cov_temp\n\n cur.execute(\"insert into augur_data.repo_sbom_scans(repo_id, sbom_scan) VALUES(\" + str(repo_id) + \",\" + chr(39) + str(json.dumps(license_information)).replace(\"'\", \"\") + chr(39) + \");\")\n\n def grabreg(records, repo_id, dsfile):\n print(\"DETAILS FOUND. CREATING DOCUMENT\")\n proc = subprocess.Popen(\"dosocs2 generate \" + str(records[0][0]), shell=True, stdout=PIPE, stderr=PIPE)\n varerr = str(str(proc.stderr.read()).split(\" \")[3])\n charvarerr = varerr.split(\"\\\\\")[0]\n print(\"Document_id: \" + str(charvarerr))\n #f = open(\"/home/sean/dosocs2/accessDB/scans-tv/\" + repo_name + \"-full.txt\",\"w\")\n #proc = subprocess.call(\"dosocs2 print \" + str(charvarerr) + \" -T 2.0.tag.coverage\", shell=True, stdout=f, stderr=f)\n pope = subprocess.Popen(\"dosocs2 print \" + str(charvarerr) + \" -T \" + dsfile, shell=True, stdout=PIPE, stderr=PIPE)\n out, err = pope.communicate()\n #if out: #with open('ex-raw.txt', 'w+') as example:\n # example.write(out.decode('UTF-8'))\n if err: print(err.decode('UTF-8'))\n #print (out) #package_sr_1 = re.findall(r'(PackageName): (.*)\\n(SPDXID): (.*)\\n(PackageVersion|)? ?(.*|)\\n?(PackageFileName): (.*)\\n(PackageSupplier): (.*)\\n(PackageOriginator): (.*)\\n(PackageDownloadLocation): (.*)\\n(PackageVerificationCode):? ?(.*|)\\n?(PackageHomePage): (.*)\\n(PackageLic> doc_1 = re.findall(r'(DataLicense): (.*)\\n(SPDXID): (.*)\\n(DocumentNamespace): (.*)\\n(DocumentName): (.*)\\n(DocumentComment|): ?(.*|)\\n?(LicenseListVersion):(.*)', out.decode('UTF-8'))\n cre_1 = re.findall(r'(Creator): (.*)\\n(Created): (.*)\\n(CreatorComment|): ?(.*|)', out.decode('UTF-8'))\n pac_1 = re.findall(r'(PackageName): (.*)\\n(SPDXID): (.*)\\n(PackageFileName): (.*)\\n(PackageDownloadLocation): (.*)\\n(PackageVerificationCode): (.*)\\n(PackageHomePage): (.*)\\n(PackageLicenseConcluded): (.*)\\n(PackageLicenseDeclared): (.*)', out.decode('UTF-8'))\n pac_lif_1 = re.findall(r'(PackageLicenseInfoFromFiles): (.*)', out.decode('UTF-8'))\n pac_2 = re.findall(r'(PackageCopyrightText): (.*)', out.decode('UTF-8'))\n fil_dat_1 = re.findall(r'(FileName): (.*)\\n(SPDXID): (.*)\\n(FileType): (.*)\\n(FileChecksum): (.*)\\n(LicenseConcluded): (.*)\\n(LicenseInfoInFile): (.*)\\n(LicenseComments|): ?(.*|)\\n(FileCopyrightText): (.*)\\n(FileComment|): ?(.*|)\\n(FileNotice|): ?(.*|)\\n', out.decode('UTF-8'))\n fil_rel_1 = re.findall(r'(## Relationships)\\n((\\w.*)\\n)*', out.decode('UTF-8'))\n bas_rel_1 = re.findall(r'## --------------- Relationship ---------------\\n(Relationship): (.*?)\\n', out.decode('UTF-8'))\n cov_1 = re.findall(r'(TotalFiles): (.*)\\n(DeclaredLicenseFiles): (.*)\\n(PercentTotalLicenseCoverage): (.*)\\n', out.decode('UTF-8'))\n return (doc_1, cre_1, pac_1, pac_lif_1, pac_2, fil_dat_1, fil_rel_1, bas_rel_1, cov_1)\n\n def docscan(dbname, user, password, host, port, dsfile, ipath):\n connection = psycopg2.connect(\n user = user,\n password = password,\n database = dbname,\n host = host,\n port = port,\n )\n print(\"********************\")\n cur = connection.cursor()\n r = cur.execute(\"set search_path to augur_data; select repo_path, repo_id, repo_group_id, repo_name from repo order by repo_group_id;\")\n rec = cur.fetchall()\n for sector in rec:\n print(sector)\n repo_id = sector[1]\n print(\"****************\")\n print(repo_id)\n cur.execute(\"set search_path to spdx;\")\n cur.execute(\"select sbom_scan from augur_data.repo_sbom_scans where repo_id = \" + str(repo_id) + \" LIMIT 1;\")\n determin = cur.fetchall()\n if not determin:\n cur.execute(\"select dosocs_pkg_id from spdx.augur_repo_map where repo_id = \" + str(repo_id) + \" LIMIT 1;\")\n records = cur.fetchall()\n print(\"****************\")\n if records and records[0][0] != None:\n (doc_1, cre_1, pac_1, pac_lif_1, pac_2, fil_dat_1, fil_rel_1, bas_rel_1, cov_1) = grabreg(records, repo_id, dsfile)\n parse_json(doc_1, cre_1, pac_1, pac_lif_1, pac_2, fil_dat_1, fil_rel_1, bas_rel_1, cov_1, cur, repo_id)\n connection.commit()\n else:\n print(\"ERROR: RECORD DOES NOT EXIST IN MAPPING TABLE\")\n else:\n print(\"DUPLICATE RECORD FOUND. SKIPPING\")\n return\n\n print(\"---------------------\")\n print(\"INITIAL SCANS RUNNING\")\n print(\"---------------------\")\n initscan(dbname, user, password, host, port, dsfile, ipath, depth)\n #print(os.getcwd())\n os.chdir(wd)\n #print(os.getcwd())\n print(\"------------------\")\n print(\"SBOM SCANS RUNNING\")\n print(\"------------------\")\n docscan(dbname, user, password, host, port, dsfile, ipath)\n # Collection and insertion of data happens here\n\n # ...\n\n # Register this task as completed.\n # This is a method of the worker class that is required to be called upon completion\n # of any data collection model, this lets the broker know that this worker is ready\n # for another task\n self.register_task_completion(task, repo_id, 'spdx')", "title": "" }, { "docid": "5bcfca7dbc8aeeb5e7151df22a8fcf27", "score": "0.4858701", "text": "def write_model_structures(self, out_dir, top=0, suffix=\".model.atm\", idfilter=[]):\n \n if self.no_structure_output:\n self.die(\"Cannot write model structures as structure output is disabled\")\n \n if out_dir:\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n else:\n out_dir = \"\"\n \n for i, decoy in enumerate(self.results):\n if top > 0 and i >= top:\n break\n \n decoyname = \"%s_%d%s_%d\"%(decoy.struc, decoy.startres, decoy.startinscode, decoy.length)\n \n if idfilter and (decoyname not in idfilter):\n continue\n \n decoyfile = os.path.join(out_dir, decoyname+suffix)\n \n model = self.assemble_model(decoy)\n \n f = open(decoyfile, \"w\")\n try:\n f.write(str(model))\n finally:\n f.close()", "title": "" }, { "docid": "c9c29b3d11e6154011963470aa2e5027", "score": "0.48545107", "text": "def post_process(model_output: object) -> object:\n pass", "title": "" }, { "docid": "541e42991a59a34cb277e301acc9be10", "score": "0.48377407", "text": "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n input_filepath = Path(input_filepath)\n output_filepath = Path(output_filepath)\n domains(input_filepath/'../external'/'domains'/'data', output_filepath/'domains'/'data')", "title": "" }, { "docid": "915898a925ed52e7366a9f9a6f1e393f", "score": "0.48340023", "text": "def generate_model(Tool, cpacs_path):\n\n # Check for user-specified file to add to model\n if Tool.user_file.is_file():\n log.info(\"Using normal entries\")\n xd, yd = extract_data_set(Tool)\n if Tool.aeromap_case:\n log.info(\"Using aeromap entries\")\n xd_am, yd_am = extract_am_data(Tool, cpacs_path)\n xd = np.concatenate((xd_am, xd), axis=0)\n yd = np.concatenate((yd_am, yd), axis=0)\n elif Tool.aeromap_case:\n log.info(\"Using aeromap entries\")\n xd, yd = extract_am_data(Tool, cpacs_path)\n else:\n raise (FileNotFoundError(\"No aeromap or SM file has been given !\"))\n create_surrogate(Tool, xd, yd)", "title": "" }, { "docid": "82fdb2a1e9b2bd48c204f38386cc3888", "score": "0.4826749", "text": "def mergeModels(source_rpsbml, target_rpsbml, logger=None):\n logger = logger or logging.getLogger(__name__)\n #target_rpsbml.model = target_document.getModel()\n #Find the ID's of the similar target_rpsbml.model species\n ################ MODEL FBC ########################\n if not target_rpsbml.getModel().isPackageEnabled('fbc'):\n rpSBML.checklibSBML(target_rpsbml.getModel().enablePackage(\n 'http://www.sbml.org/sbml/level3/version1/fbc/version2',\n 'fbc',\n True),\n 'Enabling the FBC package')\n if not source_rpsbml.getModel().isPackageEnabled('fbc'):\n rpSBML.checklibSBML(source_rpsbml.getModel().enablePackage(\n 'http://www.sbml.org/sbml/level3/version1/fbc/version2',\n 'fbc',\n True),\n 'Enabling the FBC package')\n target_fbc = target_rpsbml.getModel().getPlugin('fbc')\n source_fbc = source_rpsbml.getModel().getPlugin('fbc')\n # note sure why one needs to set this as False\n rpSBML.checklibSBML(source_rpsbml.document.setPackageRequired('fbc', False), 'enabling FBC package')\n ################ UNITDEFINITIONS ######\n # return the list of unit definitions id's for the target to avoid overwritting\n # WARNING: this means that the original unit definitions will be prefered over the new one\n target_unitDefID = [i.getId() for i in target_rpsbml.getModel().getListOfUnitDefinitions()]\n for source_unitDef in source_rpsbml.getModel().getListOfUnitDefinitions():\n if not source_unitDef.getId() in target_unitDefID: # have to compare by ID since no annotation\n # create a new unitDef in the target\n target_unitDef = target_rpsbml.getModel().createUnitDefinition()\n rpSBML.checklibSBML(target_unitDef, 'fetching target unit definition')\n #copy unitDef info to the target\n rpSBML.checklibSBML(target_unitDef.setId(source_unitDef.getId()),\n 'setting target unit definition ID')\n rpSBML.checklibSBML(target_unitDef.setAnnotation(source_unitDef.getAnnotation()),\n 'setting target unit definition Annotation')\n for source_unit in source_unitDef.getListOfUnits():\n #copy unit info to the target unitDef\n target_unit = target_unitDef.createUnit()\n rpSBML.checklibSBML(target_unit, 'creating target unit')\n rpSBML.checklibSBML(target_unit.setKind(source_unit.getKind()),\n 'setting target unit kind')\n rpSBML.checklibSBML(target_unit.setExponent(source_unit.getExponent()),\n 'setting target unit exponent')\n rpSBML.checklibSBML(target_unit.setScale(source_unit.getScale()),\n 'setting target unit scale')\n rpSBML.checklibSBML(target_unit.setMultiplier(source_unit.getMultiplier()),\n 'setting target unit multiplier')\n target_unitDefID.append(source_unitDef.getId()) #add to the list to make sure its not added twice\n ################ COMPARTMENTS ###############\n # Compare by MIRIAM annotations\n #Note that key is source and value is target conversion\n comp_source_target = {}\n for source_compartment in source_rpsbml.getModel().getListOfCompartments():\n found = False\n target_ids = [i.getId() for i in target_rpsbml.getModel().getListOfCompartments()]\n source_annotation = source_compartment.getAnnotation()\n if not source_annotation:\n logger.warning('No annotation for the source of compartment '+str(source_compartment.getId()))\n continue\n # compare by MIRIAM first\n for target_compartment in target_rpsbml.getModel().getListOfCompartments():\n target_annotation = target_compartment.getAnnotation()\n if not target_annotation:\n logger.warning('No annotation for the target of compartment: '+str(target_compartment.getId()))\n continue\n if source_rpsbml.compareMIRIAMAnnotations(source_annotation, target_annotation):\n found = True\n comp_source_target[source_compartment.getId()] = target_compartment.getId()\n break\n if not found:\n #if the id is not found, see if the ids already exists\n if source_compartment.getId() in target_ids:\n comp_source_target[source_compartment.getId()] = source_compartment.getId()\n found = True\n #if there is not MIRIAM match and the id's differ then add it\n else:\n target_compartment = target_rpsbml.getModel().createCompartment()\n rpSBML.checklibSBML(target_compartment, 'Creating target compartment')\n rpSBML.checklibSBML(target_compartment.setMetaId(source_compartment.getMetaId()),\n 'setting target metaId')\n #make sure that the ID is different\n if source_compartment.getId()==target_compartment.getId():\n rpSBML.checklibSBML(target_compartment.setId(source_compartment.getId()+'_sourceModel'),\n 'setting target id')\n else:\n rpSBML.checklibSBML(target_compartment.setId(source_compartment.getId()),\n 'setting target id')\n rpSBML.checklibSBML(target_compartment.setName(source_compartment.getName()),\n 'setting target name')\n rpSBML.checklibSBML(target_compartment.setConstant(source_compartment.getConstant()),\n 'setting target constant')\n rpSBML.checklibSBML(target_compartment.setAnnotation(source_compartment.getAnnotation()),\n 'setting target annotation')\n rpSBML.checklibSBML(target_compartment.setSBOTerm(source_compartment.getSBOTerm()),\n 'setting target annotation')\n comp_source_target[target_compartment.getId()] = target_compartment.getId()\n # self.logger.debug('comp_source_target: '+str(comp_source_target))\n ################ PARAMETERS ###########\n # WARNING: here we compare by ID\n targetParametersID = [i.getId() for i in target_rpsbml.getModel().getListOfParameters()]\n for source_parameter in source_rpsbml.getModel().getListOfParameters():\n if source_parameter.getId() not in targetParametersID:\n target_parameter = target_rpsbml.getModel().createParameter()\n rpSBML.checklibSBML(target_parameter, 'creating target parameter')\n rpSBML.checklibSBML(target_parameter.setId(source_parameter.getId()), 'setting target parameter ID')\n rpSBML.checklibSBML(target_parameter.setSBOTerm(source_parameter.getSBOTerm()),\n 'setting target parameter SBO')\n rpSBML.checklibSBML(target_parameter.setUnits(source_parameter.getUnits()),\n 'setting target parameter Units')\n rpSBML.checklibSBML(target_parameter.setValue(source_parameter.getValue()),\n 'setting target parameter Value')\n rpSBML.checklibSBML(target_parameter.setConstant(source_parameter.getConstant()),\n 'setting target parameter ID')\n ################ FBC GENE PRODUCTS ########################\n #WARNING: here we compare by ID\n targetGenProductID = [i.getId() for i in target_fbc.getListOfGeneProducts()]\n for source_geneProduct in source_fbc.getListOfGeneProducts():\n if not source_geneProduct.getId() in targetGenProductID:\n target_geneProduct = target_fbc.createGeneProduct()\n rpSBML.checklibSBML(target_geneProduct, 'creating target gene product')\n rpSBML.checklibSBML(target_geneProduct.setId(source_geneProduct.getId()),\n 'setting target gene product id')\n rpSBML.checklibSBML(target_geneProduct.setLabel(source_geneProduct.getLabel()),\n 'setting target gene product label')\n rpSBML.checklibSBML(target_geneProduct.setName(source_geneProduct.getName()),\n 'setting target gene product name')\n rpSBML.checklibSBML(target_geneProduct.setMetaId(source_geneProduct.getMetaId()),\n 'setting target gene product meta_id')\n ############### FBC OBJECTIVES ############\n #WARNING: here we compare by ID\n #TODO: if overlapping id's need to replace the id with modified, as for the species\n targetObjectiveID = [i.getId() for i in target_fbc.getListOfObjectives()]\n sourceObjectiveID = [i.getId() for i in source_fbc.getListOfObjectives()]\n for source_objective in source_fbc.getListOfObjectives():\n if not source_objective.getId() in targetObjectiveID:\n target_objective = target_fbc.createObjective()\n rpSBML.checklibSBML(target_objective, 'creating target objective')\n rpSBML.checklibSBML(target_objective.setId(source_objective.getId()), 'setting target objective')\n rpSBML.checklibSBML(target_objective.setName(source_objective.getName()), 'setting target objective')\n rpSBML.checklibSBML(target_objective.setType(source_objective.getType()),\n 'setting target objective type')\n for source_fluxObjective in source_objective.getListOfFluxObjectives():\n target_fluxObjective = target_objective.createFluxObjective()\n rpSBML.checklibSBML(target_fluxObjective, 'creating target flux objective')\n rpSBML.checklibSBML(target_fluxObjective.setName(source_fluxObjective.getName()),\n 'setting target flux objective name')\n rpSBML.checklibSBML(target_fluxObjective.setCoefficient(source_fluxObjective.getCoefficient()),\n 'setting target flux objective coefficient')\n rpSBML.checklibSBML(target_fluxObjective.setReaction(source_fluxObjective.getReaction()),\n 'setting target flux objective reaction')\n rpSBML.checklibSBML(target_fluxObjective.setAnnotation(source_fluxObjective.getAnnotation()),\n 'setting target flux obj annotation from source flux obj')\n rpSBML.checklibSBML(target_objective.setAnnotation(source_objective.getAnnotation()),\n 'setting target obj annotation from source obj')\n # self.logger.debug('targetObjectiveID: '+str(targetObjectiveID))\n # self.logger.debug('sourceObjectiveID: '+str(sourceObjectiveID))\n ################ SPECIES ####################\n species_source_target = rpSBML.compareSpecies(comp_source_target, source_rpsbml, target_rpsbml, logger=logger)\n # self.logger.debug('species_source_target: '+str(species_source_target))\n target_species_ids = [i.id for i in target_rpsbml.getModel().getListOfSpecies()]\n for source_species in species_source_target:\n list_target = [i for i in species_source_target[source_species]]\n if source_species in list_target:\n logger.warning('The source ('+str(source_species)+') and target species ids ('+str(list_target)+') are the same')\n #if match, replace the annotation from the source to the target\n if not species_source_target[source_species]=={}:\n list_species = [i for i in species_source_target[source_species]]\n #self.logger.debug('list_species: '+str(list_species))\n if len(list_species)==0:\n continue\n #self.logger.warning('Source species '+str(member.getIdRef())+' has been created in the target model')\n elif len(list_species)>1:\n logger.warning('There are multiple matches to the species '+str(source_species)+'... taking the first one: '+str(list_species))\n #TODO: loop throught the annotations and replace the non-overlapping information\n target_member = target_rpsbml.getModel().getSpecies(list_species[0])\n source_member = source_rpsbml.getModel().getSpecies(source_species)\n rpSBML.checklibSBML(target_member, 'Retraiving the target species: '+str(list_species[0]))\n rpSBML.checklibSBML(source_member, 'Retreiving the source species: '+str(source_species))\n rpSBML.checklibSBML(target_member.setAnnotation(source_member.getAnnotation()), 'Replacing the annotations')\n #if no match then add it to the target model\n else:\n # self.logger.debug('Creating source species '+str(source_species)+' in target rpsbml')\n source_species = source_rpsbml.getModel().getSpecies(source_species)\n if not source_species:\n logger.error('Cannot retreive model species: '+str(source_species))\n else:\n rpSBML.checklibSBML(source_species, 'fetching source species')\n targetModel_species = target_rpsbml.getModel().createSpecies()\n rpSBML.checklibSBML(targetModel_species, 'creating species')\n rpSBML.checklibSBML(targetModel_species.setMetaId(source_species.getMetaId()),\n 'setting target metaId')\n ## need to check if the id of the source species does not already exist in the target model\n if source_species.getId() in target_species_ids:\n target_species_id = source_rpsbml.getModel().id+'__'+str(source_species.getId())\n if not source_species.getId() in species_source_target:\n species_source_target[source_species.getId()] = {}\n species_source_target[source_species.getId()][source_rpsbml.getModel().id+'__'+str(source_species.getId())] = 1.0\n else:\n target_species_id = source_species.getId()\n rpSBML.checklibSBML(targetModel_species.setId(target_species_id),\n 'setting target id')\n rpSBML.checklibSBML(targetModel_species.setCompartment(comp_source_target[source_species.getCompartment()]),\n 'setting target compartment')\n rpSBML.checklibSBML(targetModel_species.setInitialConcentration(\n source_species.getInitialConcentration()),\n 'setting target initial concentration')\n rpSBML.checklibSBML(targetModel_species.setBoundaryCondition(\n source_species.getBoundaryCondition()),\n 'setting target boundary concentration')\n rpSBML.checklibSBML(targetModel_species.setHasOnlySubstanceUnits(\n source_species.getHasOnlySubstanceUnits()),\n 'setting target has only substance units')\n rpSBML.checklibSBML(targetModel_species.setBoundaryCondition(\n source_species.getBoundaryCondition()),\n 'setting target boundary condition')\n rpSBML.checklibSBML(targetModel_species.setConstant(source_species.getConstant()),\n 'setting target constant')\n rpSBML.checklibSBML(targetModel_species.setAnnotation(source_species.getAnnotation()),\n 'setting target annotation')\n ################ REACTIONS ###################\n # TODO; consider the case where two reactions have the same ID's but are not the same reactions\n reactions_source_target = {}\n for source_reaction in source_rpsbml.getModel().getListOfReactions():\n is_found = False\n for target_reaction in target_rpsbml.getModel().getListOfReactions():\n score, match = rpSBML.compareReaction(species_source_target, source_reaction, target_reaction, logger=logger)\n if match:\n # self.logger.debug('Source reaction '+str(source_reaction)+' matches with target reaction '+str(target_reaction))\n # source_reaction[source_reaction.getId()] = target_reaction.getId()\n reactions_source_target[source_reaction.getId()] = target_reaction.getId()\n is_found = True\n break\n if not is_found:\n # self.logger.debug('Cannot find source reaction: '+str(source_reaction.getId()))\n rpSBML.checklibSBML(source_reaction, 'fetching source reaction')\n target_reaction = target_rpsbml.getModel().createReaction()\n rpSBML.checklibSBML(target_reaction, 'create reaction')\n target_fbc = target_reaction.getPlugin('fbc')\n rpSBML.checklibSBML(target_fbc, 'fetching target FBC package')\n source_fbc = source_reaction.getPlugin('fbc')\n rpSBML.checklibSBML(source_fbc, 'fetching source FBC package')\n source_upperFluxBound = source_fbc.getUpperFluxBound()\n rpSBML.checklibSBML(source_upperFluxBound, 'fetching upper flux bound')\n rpSBML.checklibSBML(target_fbc.setUpperFluxBound(source_upperFluxBound),\n 'setting upper flux bound')\n source_lowerFluxBound = source_fbc.getLowerFluxBound()\n rpSBML.checklibSBML(source_lowerFluxBound, 'fetching lower flux bound')\n rpSBML.checklibSBML(target_fbc.setLowerFluxBound(source_lowerFluxBound),\n 'setting lower flux bound')\n rpSBML.checklibSBML(target_reaction.setId(source_reaction.getId()), 'set reaction id')\n rpSBML.checklibSBML(target_reaction.setName(source_reaction.getName()), 'set name')\n rpSBML.checklibSBML(target_reaction.setSBOTerm(source_reaction.getSBOTerm()),\n 'setting the reaction system biology ontology (SBO)') # set as process\n # TODO: consider having the two parameters as input to the function\n rpSBML.checklibSBML(target_reaction.setReversible(source_reaction.getReversible()),\n 'set reaction reversibility flag')\n rpSBML.checklibSBML(target_reaction.setFast(source_reaction.getFast()),\n 'set reaction \"fast\" attribute')\n rpSBML.checklibSBML(target_reaction.setMetaId(source_reaction.getMetaId()), 'setting species meta_id')\n rpSBML.checklibSBML(target_reaction.setAnnotation(source_reaction.getAnnotation()),\n 'setting annotation for source reaction')\n # Reactants\n # self.logger.debug('Setting reactants')\n for source_reaction_reactantID in [i.species for i in source_reaction.getListOfReactants()]:\n # self.logger.debug('\\tAdding '+str(source_reaction_reactantID))\n target_reactant = target_reaction.createReactant()\n rpSBML.checklibSBML(target_reactant, 'create target reactant')\n if source_reaction_reactantID in species_source_target:\n if not species_source_target[source_reaction_reactantID]=={}:\n if len(species_source_target[source_reaction_reactantID])>1:\n logger.warning('Multiple matches for '+str(source_reaction_reactantID)+': '+str(species_source_target[source_reaction_reactantID]))\n logger.warning('Taking one the first one arbitrarely: '+str([i for i in species_source_target[source_reaction_reactantID]][0]))\n # WARNING: taking the first one arbitrarely\n rpSBML.checklibSBML(target_reactant.setSpecies(\n [i for i in species_source_target[source_reaction_reactantID]][0]), 'assign reactant species')\n else:\n rpSBML.checklibSBML(target_reactant.setSpecies(source_reaction_reactantID),\n 'assign reactant species')\n else:\n rpSBML.checklibSBML(target_reactant.setSpecies(source_reaction_reactantID),\n 'assign reactant species')\n source_reactant = source_reaction.getReactant(source_reaction_reactantID)\n rpSBML.checklibSBML(source_reactant, 'fetch source reactant')\n rpSBML.checklibSBML(target_reactant.setConstant(source_reactant.getConstant()),\n 'set \"constant\" on species '+str(source_reactant.getConstant()))\n rpSBML.checklibSBML(target_reactant.setStoichiometry(source_reactant.getStoichiometry()),\n 'set stoichiometry ('+str(source_reactant.getStoichiometry)+')')\n # Products\n # self.logger.debug('Setting products')\n for source_reaction_productID in [i.species for i in source_reaction.getListOfProducts()]:\n # self.logger.debug('\\tAdding '+str(source_reaction_productID))\n target_product = target_reaction.createProduct()\n rpSBML.checklibSBML(target_product, 'create target reactant')\n if source_reaction_productID in species_source_target:\n if not species_source_target[source_reaction_productID]=={}:\n if len(species_source_target[source_reaction_reactantID])>1:\n logger.warning('Multiple matches for '+str(source_reaction_productID)+': '+str(species_source_target[source_reaction_productID]))\n logger.warning('Taking one arbitrarely')\n # WARNING: taking the first one arbitrarely\n rpSBML.checklibSBML(target_product.setSpecies(\n [i for i in species_source_target[source_reaction_productID]][0]), 'assign reactant product')\n else:\n rpSBML.checklibSBML(target_product.setSpecies(source_reaction_productID),\n 'assign reactant product')\n else:\n rpSBML.checklibSBML(target_product.setSpecies(source_reaction_productID),\n 'assign reactant product')\n source_product = source_reaction.getProduct(source_reaction_productID)\n rpSBML.checklibSBML(source_product, 'fetch source reactant')\n rpSBML.checklibSBML(target_product.setConstant(source_product.getConstant()),\n 'set \"constant\" on product '+str(source_product.getConstant()))\n rpSBML.checklibSBML(target_product.setStoichiometry(source_product.getStoichiometry()),\n 'set stoichiometry ('+str(source_product.getStoichiometry)+')')\n #### GROUPS #####\n # TODO loop through the groups to add them\n if not target_rpsbml.getModel().isPackageEnabled('groups'):\n rpSBML.checklibSBML(target_rpsbml.getModel().enablePackage(\n 'http://www.sbml.org/sbml/level3/version1/groups/version1',\n 'groups',\n True),\n 'Enabling the GROUPS package')\n #!!!! must be set to false for no apparent reason\n rpSBML.checklibSBML(source_rpsbml.document.setPackageRequired('groups', False), 'enabling groups package')\n source_groups = source_rpsbml.getModel().getPlugin('groups')\n rpSBML.checklibSBML(source_groups, 'fetching the source model groups')\n target_groups = target_rpsbml.getModel().getPlugin('groups')\n rpSBML.checklibSBML(target_groups, 'fetching the target model groups')\n # # self.logger.debug('species_source_target: '+str(species_source_target))\n # # self.logger.debug('reactions_source_target: '+str(reactions_source_target))\n source_groups_ids = [i.id for i in source_groups.getListOfGroups()]\n target_groups_ids = [i.id for i in target_groups.getListOfGroups()]\n #NOTE: only need to update the source species since these are the ones that are replaced with their equivalent\n for source_group in source_groups.getListOfGroups():\n #overwrite in the group the reaction members that have been replaced\n for member in source_group.getListOfMembers():\n if member.getIdRef() in reactions_source_target:\n if reactions_source_target[member.getIdRef()]:\n member.setIdRef(reactions_source_target[member.getIdRef()])\n #overwrite in the group the species members that have been replaced\n for member in source_group.getListOfMembers():\n if member.getIdRef() in species_source_target:\n if species_source_target[member.getIdRef()]:\n list_species = [i for i in species_source_target[member.getIdRef()]]\n logger.debug('species_source_target: '+str(species_source_target))\n logger.debug('list_species: '+str(list_species))\n if len(list_species)==0:\n continue\n #self.logger.warning('Source species '+str(member.getIdRef())+' has been created in the target model')\n elif len(list_species)>1:\n logger.warning('There are multiple matches to the species '+str(member.getIdRef())+'... taking the first one: '+str(list_species))\n rpSBML.checklibSBML(member.setIdRef(list_species[0]), 'Setting name to the groups member')\n #create and add the groups if a source group does not exist in the target\n if not source_group.id in target_groups_ids:\n rpSBML.checklibSBML(target_groups.addGroup(source_group),\n 'copy the source groups to the target groups')\n #if the group already exists in the target then need to add new members\n else:\n target_group = target_groups.getGroup(source_group.id)\n target_group_ids = [i.getIdRef() for i in target_group.getListOfMembers()]\n for member in source_group.getListOfMembers():\n if member.getIdRef() not in target_group_ids:\n new_member = target_group.createMember()\n rpSBML.checklibSBML(new_member, 'Creating a new groups member')\n rpSBML.checklibSBML(new_member.setIdRef(member.getIdRef()), 'Setting name to the groups member')\n ###### TITLES #####\n target_rpsbml.getModel().setId(target_rpsbml.getModel().getId()+'__'+source_rpsbml.getModel().getId())\n target_rpsbml.getModel().setName(target_rpsbml.getModel().getName()+' merged with '+source_rpsbml.getModel().getId())\n rpSBML._checkSingleParent(target_rpsbml, logger=logger)\n return species_source_target, reactions_source_target", "title": "" }, { "docid": "bbeb6c6bed7cc0c53a7841e4b1d85390", "score": "0.48244053", "text": "def _build_and_restore_models(self, dataset: tf.data.Dataset):", "title": "" }, { "docid": "d986fe8d0ee9a8afdf48bbbb6519caa6", "score": "0.48167214", "text": "def _merge_models(self):\n\n if len(self.model_list) > 1:\n gesamt = Gesamt(mode='alignment', pdbin=self.modified_model_list, pdbout=self.modified_pdbfname,\n workdir=self.workdir, logger=self.logger)\n gesamt.run()\n\n else:\n shutil.copyfile(self.modified_model_list[0], self.modified_pdbfname)", "title": "" }, { "docid": "8523055a34f8e7a7a69f95dbbe35bc2b", "score": "0.48127815", "text": "def generate_models_and_admin(dia_path, app_dir, project_name, app_name):\r\n\r\n def format_text(string, indent=False):\r\n \"\"\"format string in lines of 80 or less characters\"\"\"\r\n retval = ''\r\n while string:\r\n line = string[:77]\r\n last_space = line.rfind(' ')\r\n if last_space != -1 and len(string) > 77:\r\n retval += \"%s \\\\\\n\" % string[:last_space]\r\n string = string[last_space + 1:]\r\n else:\r\n retval += \"%s\\n\" % string\r\n string = ''\r\n if string and indent:\r\n string = ' %s' % string\r\n return retval\r\n\r\n model_path = os.path.join(app_dir, 'models.py')\r\n admin_path = os.path.join(app_dir, 'admin.py')\r\n\r\n models_txt = 'from django.db import models\\n' + dia2django(dia_path)\r\n open(model_path, 'w').write(models_txt)\r\n\r\n classes = re.findall('class (\\w+)', models_txt)\r\n admin_txt = 'from django.contrib.admin import site, ModelAdmin\\n' + format_text('from %s.%s.models import %s' % (project_name, app_name, ', '.join(classes)), indent=True)\r\n admin_txt += format_text('\\n\\n%s' % '\\n'.join(map((lambda t: 'site.register(%s)' % t), classes)))\r\n open(admin_path, 'w').write(admin_txt)", "title": "" }, { "docid": "426ec087a2f3ce00b6aff225f7da24f8", "score": "0.4799731", "text": "def finalize_model(self,):\n X, y, _, _ = prepare_data_for_training(\n df=self.data,\n target=self.target,\n index_column=self.index_column,\n validation_test_size=0,\n )\n self.logger.info(\"Finalzing model\")\n Model = load_model(self.path_to_model)\n self.logger.info(\"Training model on all data\")\n final_model = Model()\n final_model.fit(X, y)\n final_model.save(\"playground\")\n self.final_model = Model()\n self.final_model.fit(X, y)", "title": "" }, { "docid": "a0c03ba7415f563349a39120120f07f6", "score": "0.4794456", "text": "def convert_model(\n model,\n download_dir=OMZ_CACHE,\n output_dir=OMZ_CACHE,\n precisions=None,\n force=False,\n *args,\n): # pylint: disable=keyword-arg-before-vararg\n download_dir = Path(\"\") if download_dir is None else Path(download_dir)\n output_dir = Path(\"\") if output_dir is None else Path(output_dir)\n precisions = precisions if precisions else {\"FP32\"}\n\n out = _get_ir_path(output_dir / model.subdirectory)\n if out and not force:\n return out\n\n namespace = NameSpace(\n python=shutil.which(\"python\"),\n dry_run=False,\n download_dir=download_dir,\n )\n\n mo_executable = shutil.which(\"mo\")\n\n if mo_executable:\n mo_path = Path(mo_executable)\n else:\n try:\n mo_path = Path(os.environ[\"INTEL_OPENVINO_DIR\"]) / \"tools/mo/openvino/tools/mo/mo.py\"\n if not mo_path.exists():\n mo_path = Path(os.environ[\"INTEL_OPENVINO_DIR\"]) / \"tools/model_optimizer/mo.py\"\n except KeyError:\n sys.exit(\n \"Unable to locate Model Optimizer. \"\n + \"Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.\"\n )\n\n mo_path = mo_path.resolve()\n mo_cmd_prefix = [namespace.python, \"--\", str(mo_path)]\n\n if str(mo_path).lower().endswith(\".py\"):\n mo_dir = mo_path.parent\n else:\n mo_package_path, stderr = _common.get_package_path(namespace.python, \"openvino.tools.mo\")\n mo_dir = mo_package_path\n\n if mo_package_path is None:\n mo_package_path, stderr = _common.get_package_path(args.python, \"mo\")\n if mo_package_path is None:\n sys.exit(f\"Unable to load Model Optimizer. Errors occurred: {stderr}\")\n mo_dir = mo_package_path.parent\n\n reporter = _reporting.Reporter(_reporting.DirectOutputContext())\n mo_props = ModelOptimizerProperties(\n cmd_prefix=mo_cmd_prefix,\n extra_args=[],\n base_dir=mo_dir,\n )\n shared_convert_args = (output_dir, namespace, mo_props, precisions)\n\n results = []\n models = []\n if model.model_stages:\n for model_stage in model.model_stages:\n results.append(_convert(reporter, model_stage, *shared_convert_args))\n models.append(model_stage)\n else:\n results.append(_convert(reporter, model, *shared_convert_args))\n models.append(model)\n\n failed_models = [model.name for model, successful in zip(models, results) if not successful]\n\n if failed_models:\n reporter.print(\"FAILED:\")\n for failed_model_name in failed_models:\n reporter.print(failed_model_name)\n sys.exit(1)\n\n return _get_ir_path(output_dir / model.subdirectory)", "title": "" }, { "docid": "b276de41b74da859fe73dbb28b8c30f0", "score": "0.4788148", "text": "def main(mode):\n if not os.path.exists(CONFIG['models_path']):\n os.mkdir(CONFIG['models_path'])\n if not os.path.exists(CONFIG['figures_path']):\n os.mkdir(CONFIG['figures_path'])\n AnnotationTransformer().create_dataset_jsons()\n ImageCreator(mode=mode).create_dataset()\n MaskCreator(mode=mode).create_dataset()\n FeatureExtractor(mode=mode, use_cuda=CONFIG['use_cuda']).create_dataset()", "title": "" }, { "docid": "d0e772dfe88b2292b5d7fb5d0eb0acb3", "score": "0.4781762", "text": "def preprocess(self):\r\n\r\n\t\tinitial_file_ages = {}\r\n\t\t\t\t\r\n\t\t# All the files within the data input directories after filtering out known artifacts\r\n\t\tfiles = [file for file in glob.glob(os.path.join(self.DATA_INPUTS_FOLDER_PATH, \"**\"), recursive=True) if not any(ignore in file for ignore in self.IGNORE_FILES) and os.path.isfile(file)]\r\n\t\t\r\n\t\tfor file in files:\r\n\t\t\tinitial_file_ages[file] = datetime.datetime.utcnow().isoformat()\r\n\r\n\t\twith open(self.DATABASE_FILE_PATH, 'w') as f:\r\n\t\t\tprint(\"Existing Files Detected:\")\r\n\t\t\tprint(json.dumps(initial_file_ages))\r\n\t\t\tf.write(json.dumps(initial_file_ages))", "title": "" }, { "docid": "7e9cf4a234d19fc760a36069ffe62865", "score": "0.47799337", "text": "def configure_model_dir():\n\n # create model dir if needed\n if not os.path.isdir('Model'):\n print('making a new model dir...')\n os.mkdir('Model')\n\n # delete old model prior to training\n if os.path.exists('Model/model.h5'):\n if os.path.exists('Model/model.h5'):\n print('removing old model...')\n os.remove('Model/model.h5')\n\n # remove old alphabet if making a new one\n if cfg.getboolean('args', 'make_alphabet'):\n if os.path.exists('Model/tokenizer.p'):\n print('removing old alphabet...')\n os.remove('Model/tokenizer.p')", "title": "" }, { "docid": "5fdd32048f6cd49d394fb6cb6365707b", "score": "0.4773355", "text": "def render_relation_migration(name, PARTS_DIR = powlib.PARTS_DIR, prefix_dir = \"./\"):\r\n\r\n splittxt = string.split(name, \"_\")\r\n model1 = splittxt[1]\r\n model2 = splittxt[2]\r\n \r\n print \" -- generate_migration: relation migration for models: \" + model1 + \" & \" + model2\r\n print \" -- following the naming convention rel_model1_model2\"\r\n print \" -- you gave:\", name\r\n \r\n # add the auto generated (but can be safely edited) warning to the outputfile\r\n infile = open (os.path.normpath(PARTS_DIR + \"/db_relation_migration_stub.part\"), \"r\")\r\n ostr = infile.read()\r\n infile.close()\r\n \r\n # add a creation date\r\n ostr = ostr.replace( \"#DATE\", str(datetime.date.today() ))\r\n # add model1 import\r\n ostr = ostr.replace( \"#IMPORT_MODEL1\", \"import \" + model1)\r\n # add model2 import\r\n ostr = ostr.replace( \"#IMPORT_MODEL2\", \"import \" + model2)\r\n \r\n # add the example migration for this models\r\n ostr = ostr.replace( \"#MODEL1\", model1)\r\n ostr = ostr.replace( \"#MODEL2_has_many\", powlib.pluralize(model2))\r\n ostr = ostr.replace( \"#MODEL2\", model2)\r\n \r\n filename = write_migration( name, \r\n \"relation between %s and %s\" % (model1, model2),\r\n prefix_dir,\r\n ostr\r\n )\r\n print \" -- created file:\" + str(os.path.normpath(os.path.join(prefix_dir,filename)))\r\n return", "title": "" }, { "docid": "78c61d848eb74b7dc70a9ab846ad4a03", "score": "0.47726065", "text": "def main():\r\n \r\n parser = OptionParser()\r\n mode = MODE_CREATE \r\n parser.add_option( \"-m\", \"--model\", \r\n action=\"store\", \r\n type=\"string\", \r\n dest=\"model\", \r\n help=\"defines the model for this migration.\", \r\n default =\"None\")\r\n parser.add_option( \"-f\", \"--force\", \r\n action=\"store_true\", \r\n dest=\"force\", \r\n help=\"forces overrides of existing files\",\r\n default=False)\r\n parser.add_option( \"-t\", \"--template\", \r\n action=\"store\", \r\n type=\"string\", \r\n dest=\"template\", \r\n help=\"forces a special mako template for these views\",\r\n default=\"/${context.get('template')}\")\r\n\r\n start = None\r\n end = None\r\n start = datetime.datetime.now()\r\n \r\n (options, args) = parser.parse_args()\r\n print options\r\n if options.model == \"None\":\r\n if len(args) > 0:\r\n # if no option flag (like -n) is given, it is \r\n # assumed that the first argument is the model name. (representing -n arg1)\r\n options.model = args[0]\r\n else:\r\n parser.error(\"You must at least specify an appname by giving -n <name>.\")\r\n \r\n scaffold(options.model, options.force, options.template)\r\n end = datetime.datetime.now()\r\n duration = None\r\n duration = end - start \r\n print \"generated_scaffold in(\"+ str(duration) +\")\"\r\n print\r\n return", "title": "" }, { "docid": "1907a127294507cf1a642eece050ced5", "score": "0.47628924", "text": "def generate_models(data_path, output_path, gpu, iterations, python_path,\n filelist=None,\n convert_images=True,\n keep_temp_directories=True,\n **kwargs\n ):\n if not filelist:\n assert len(os.listdir(data_path)) % 2 == 0,\\\n 'Odd number of folders in data_path, please provide a filelist ' +\\\n 'or delete a file'\n input_files = os.listdir(data_path)\n random.shuffle(input_files)\n else:\n # Open filelist and add them all to one list, ordered pairs\n with open(filelist, 'r') as f:\n filelist = json.load(f)\n input_files = []\n for pair in filelist:\n input_files.append(pair[0])\n input_files.append(pair[1])\n\n print('-'*80)\n print('Starting main')\n print('-'*80)\n for i in tqdm(range(0, len(input_files), 2)):\n start_time = time.time()\n\n # File names for input and output\n path1 = input_files[i]\n path2 = input_files[i+1]\n output_fn = str(path1) + '_' + str(path2)\n tqdm.write('Starting {}'.format(output_fn))\n # Output folder\n output_folder_path = join(output_path, output_fn)\n os.makedirs(output_folder_path, exist_ok=True)\n\n # 1. Copy images for safety\n for apath in [path1, path2]:\n tqdm.write('Copying {} images'.format(apath))\n copy_tree(join(data_path, apath), join(output_folder_path, apath))\n\n # 2. Prepare images for training\n for apath in [path1, path2]:\n tqdm.write('Prepare {} images for training'.format(apath))\n convert_frames_to_data(join(output_folder_path, apath),\n join(output_folder_path, apath + '_faces'),\n gpu=gpu, python_path=python_path,\n alignments_path=join(output_folder_path,\n '{}_alignment.txt'.format(apath)))\n\n # Time\n prep_finished_time = time.time()\n time_taken = time.time() - start_time\n tqdm.write('Finished preparation in {}'.format(\n str(datetime.timedelta(0, time_taken))))\n\n # 3. Train deepfakes model\n tqdm.write('Start training with {} iterations on gpu {}'.format(\n iterations, gpu))\n train(data_path1=join(output_folder_path, path1 + '_faces'),\n data_path2=join(output_folder_path, path2 + '_faces'),\n model_path=join(output_folder_path, 'models'),\n gpu=gpu, iterations=iterations, python_path=python_path)\n\n # Time\n time_taken = time.time() - prep_finished_time\n tqdm.write('Finished training in {}'.format(\n str(datetime.timedelta(0, time_taken))))\n\n # 4. Convert images with trained model\n folders_to_keep = ['models']\n if keep_temp_directories and convert_images:\n for apath in [path1, path2]:\n tqdm.write('Converting images: {}'.format(apath))\n out_path = path1 + '_' + path2 if apath == path1 else \\\n path2 + '_' + path1\n folders_to_keep.append(out_path)\n convert(data_path=join(data_path, path1),\n output_path=join(output_folder_path, out_path),\n model_path=join(output_folder_path, 'models'),\n gpu=gpu,\n python_path=python_path,\n alignments_path=join(output_folder_path,\n '{}_alignment.txt'.format(apath)),\n write_image_masks=join(output_folder_path,\n out_path + '_mask')\n )\n\n # Cleaning up\n if not keep_temp_directories:\n tqdm.write('Cleaning up')\n for folder in os.listdir(output_folder_path):\n if folder not in folders_to_keep:\n folder_path = join(output_folder_path, folder)\n if os.path.isfile(folder_path):\n os.remove(folder_path)\n else:\n shutil.rmtree(folder_path)\n\n # Time\n time_taken = time.time() - start_time\n tqdm.write('Finished in {}'.format(\n str(datetime.timedelta(0, time_taken))))", "title": "" }, { "docid": "2f318d2c72b8ab54b4492091da46b5f4", "score": "0.475867", "text": "def seql_mkmodel(input_name):\n location, fn = os.path.split(input_name)\n model_dir = os.path.join(location, '../', 'models', fn.split('.')[1])\n\n\n cmd = [\n 'mkdir', '-p', model_dir\n ]\n\n subprocess.call(cmd)\n\n binary_model_file = os.path.join(model_dir, 'model.bin')\n predictors_file = os.path.join(model_dir, 'model.predictors')\n\n cmd = [\n seql_mkmodel_cmd, '-i', input_name, '-o' , binary_model_file, '-O', predictors_file\n ]\n\n\n subprocess.call(cmd)", "title": "" }, { "docid": "e973a142d3dd43aff173a0e6f0776f0e", "score": "0.4754228", "text": "def process_args(args):\n\n args.input_dir = args.input_dir.strip()\n if args.input_dir == '' or not os.path.exists(os.path.join(args.input_dir, 'model.meta')):\n raise Exception(\"This scripts expects the input model was exist in '{0}' directory.\".format(args.input_dir))\n\n if args.tar_file == '':\n if args.ranges_file == '' or not os.path.exists(args.ranges_file):\n raise Exception(\"The specified range file '{0}' not exist.\".format(args.ranges_file))\n\n if args.scp_file == '' or not os.path.exists(args.scp_file):\n raise Exception(\"The specified scp file '{0}' not exist.\".format(args.scp_file))\n else:\n if not os.path.exists(args.tar_file):\n raise Exception(\"The specified tar file '{0}' not exist.\".format(args.tar_file))\n if not os.path.exists(args.tar_file.replace('.tar', '.npy')):\n raise Exception(\"There is no corresponding npy label file for tar file '{0}'.\".format(args.tar_file))\n\n if args.dropout_proportion > 1.0 or args.dropout_proportion < 0.0:\n raise Exception(\"The value of dropout-proportion must be in range [0 - 1].\")\n\n return args", "title": "" }, { "docid": "16e09c81aeac38cae013817bb477e21e", "score": "0.47513807", "text": "def _make_preparations(self):\n # concat the input wikifier file with generated wikifier file from output_df_dict\n wikifier_df = pd.concat([pd.read_csv(self.wikifier_file), self.output_df_dict[\"wikifier.csv\"]])\n temp_wikifier_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".csv\")\n wikifier_filepath = temp_wikifier_file.name\n wikifier_df.to_csv(wikifier_filepath, index=False)\n if self._debug:\n wikifier_df.to_csv(os.path.join(self.debug_dir, \"consolidated-wikifier.csv\"), index=False)\n _ = temp_wikifier_file.seek(0)\n\n # use t2wml api to add properties file to t2wml database\n all_properties_df = self.kgtk_properties_df\n all_properties_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".tsv\")\n all_properties_df.to_csv(all_properties_file.name, sep=\"\\t\", index=False)\n _ = all_properties_file.seek(0)\n add_entities_from_file(all_properties_file.name)\n\n # generate temp yaml file\n temp_yaml_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".yaml\")\n yaml_filepath = temp_yaml_file.name\n yaml.dump(self.t2wml_script, temp_yaml_file)\n temp_yaml_file.seek(0)\n\n # generate temp input dataset file\n temp_data_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".csv\")\n # data_filepath = \"{}.csv\".format(self.dataset_id)\n data_filepath = \"{}.csv\".format(f'{self.dataset_id}-{shortuuid.uuid()}')\n\n if os.path.islink(data_filepath) or os.path.exists(data_filepath):\n os.remove(data_filepath)\n os.symlink(temp_data_file.name, data_filepath)\n\n self.annotated_spreadsheet.to_csv(data_filepath, header=None, index=False)\n _ = temp_data_file.seek(0)\n\n # generate knowledge graph\n sheet_name = data_filepath\n output_kgtk_main_content = tempfile.NamedTemporaryFile(mode='r+', suffix=\".tsv\")\n t2wml_output_filepath = output_kgtk_main_content.name\n try:\n s = time()\n kg = KnowledgeGraph.generate_from_files(data_filepath, sheet_name, yaml_filepath, wikifier_filepath)\n kg.save_kgtk(t2wml_output_filepath)\n print(f'time take to get t2wml output: {time() - s} seconds')\n except:\n traceback.print_exc()\n raise ValueError(\"Generating kgtk knowledge graph file failed!\")\n finally:\n os.remove(data_filepath)\n\n t2wml_kgtk_df = pd.read_csv(t2wml_output_filepath, sep=\"\\t\", quoting=csv.QUOTE_NONE)\n if len(t2wml_kgtk_df) == 0:\n raise ValueError(\"An empty kgtk file was generated from t2wml! Please check!\")\n\n _ = output_kgtk_main_content.seek(0)\n\n # generate imploded file\n kgtk_imploded_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".tsv\")\n kgtk_imploded_file_name = kgtk_imploded_file.name\n shell_code = \"\"\"\n kgtk implode -i \"{}\" --allow-lax-qnodes --remove-prefixed-columns True --without si_units language_suffix > \"{}\"\n \"\"\".format(t2wml_output_filepath, kgtk_imploded_file_name)\n s = time()\n return_res = execute_shell_code(shell_code)\n print(f'time take to run kgtk implode: {time() - s} seconds')\n if return_res != \"\":\n print(return_res)\n raise ValueError(\"Running kgtk implode failed! Please check!\")\n _ = kgtk_imploded_file.seek(0)\n\n # concat metadata file\n metadata_df = pd.DataFrame()\n for name, each_df in self.output_df_dict.items():\n if each_df is not None and name.endswith(\".tsv\"):\n if name.strip() != 'datamart_schema_properties.tsv':\n metadata_df = pd.concat([metadata_df, each_df])\n metadata_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".tsv\")\n exploded_file = tempfile.NamedTemporaryFile(mode='r+', suffix=\".tsv\")\n metadata_file_name = metadata_file.name\n exploded_file_name = exploded_file.name\n metadata_df.to_csv(metadata_file_name, sep=\"\\t\", index=False, quoting=csv.QUOTE_NONE)\n _ = metadata_file.seek(0)\n\n # combine and explode the results\n shell_code = \"\"\"\n kgtk cat -i {} {} \\\n / explode --allow-lax-qnodes True --overwrite True \\\n > {}\n \"\"\".format(kgtk_imploded_file_name, metadata_file_name, exploded_file_name)\n s = time()\n return_res = execute_shell_code(shell_code)\n print(f'time take to run kgtk cat and explode: {time() - s} seconds')\n if return_res != \"\":\n print(return_res)\n raise ValueError(\"Running kgtk explode failed! Please check!\")\n # _ = metadata_file.seek(0)\n # _ = exploded_file.seek(0)\n\n # validate the exploded file\n # shell_code = \"\"\"\n # kgtk validate --allow-lax-qnodes True {}\n # \"\"\".format(exploded_file_name)\n # s = time()\n # res = execute_shell_code(shell_code)\n # print(f'time take to run kgtk validate: {time() - s} seconds')\n # if res != \"\":\n # print(res)\n # raise ValueError(\"The output kgtk file is invalid!\")\n\n return exploded_file, metadata_file", "title": "" }, { "docid": "ff56b15d21f2be951910d5260afd5787", "score": "0.47473937", "text": "def model_to_torchserve():\n\n parser = argparse.ArgumentParser(description='Path to model that has to be formatted to fit Torchserve')\n parser.add_argument('--model_path', default=None)\n parser.add_argument('--output_dim', default=64, type=int)\n parser.add_argument('--dropout', default=0.25, type=float)\n args = parser.parse_args(sys.argv[1:])\n print(args)\n \n if args.model_path is None:\n raise ValueError('Please give a path to the model.')\n else:\n if os.path.isfile(args.model_path):\n os.makedirs('./models/torchserve_models', exist_ok=True) # Create torchserve_models folder if it doesn't exist\n \n model = TumorClassifier.load_from_checkpoint(args.model_path, output_dim=args.output_dim, dropout=args.dropout)\n script_model = torch.jit.script(model)\n script_model.save(f'models/torchserve_models/deployable_model.pt')\n \n else:\n raise ValueError(f'Could not find file at given path: {args.model_path}')", "title": "" }, { "docid": "60aabe38f987037806ef3631d9953727", "score": "0.47465363", "text": "def main(data_path, meta_file, train_file, test_file):\n\n # Calculate path to files\n data_directory = Path(data_path) if data_path else default_data_directory\n train_csv = data_directory.joinpath(train_file)\n meta_csv = data_directory.joinpath(meta_file)\n test_csv = data_directory.joinpath(test_file)\n\n f.print_header(\"Running all models\")\n\n print(f\"Reading {train_csv} ...\")\n df_train = pd.read_csv(train_csv)\n print(f\"Reading {meta_csv} ...\")\n df_meta = pd.read_csv(meta_csv)\n print(f\"Reading {test_csv} ...\")\n df_test = pd.read_csv(test_csv)\n\n model_paras = {\n 'gbm_rank': [\n ModelGbmRank(), \n df_train, \n df_test,\n ],\n 'log_reg': [\n ModelLogReg(), \n df_train, \n df_test,\n ],\n 'nn_interaction': [\n ModelNNInteraction(), \n df_train, \n df_test,\n ],\n 'nn_item': [\n ModelNNItem(), \n df_meta, \n df_test,\n ],\n 'pop_abs': [\n ModelPopAbs(), \n df_train, \n df_test,\n ],\n 'pop_user': [\n ModelPopUsers(), \n df_train, \n df_test,\n ],\n 'position': [\n ModelPosition(), \n None, \n df_test,\n ],\n 'random': [\n ModelRandom(), \n None, \n df_test,\n ]\n }\n\n for model in model_paras.keys():\n print()\n print(f\"Running model {model} ...\")\n print()\n subm_file = f\"submission_{model}.csv\"\n subm_csv = data_directory.joinpath(subm_file)\n model, df_train, df_test = model_paras[model]\n\n print(f\"Fit model ...\")\n model.fit(df_train)\n\n print(f\"Calculate recommendations ...\")\n df_rec = model.predict(df_test)\n\n print(f\"Writing {subm_csv}...\")\n df_rec.to_csv(subm_csv, index=False)\n\n print()\n print(\"Finished calculating recommendations.\")", "title": "" }, { "docid": "5041dc79f42cd3052548509b848482ec", "score": "0.47461647", "text": "def to_backend(cls,\n ir_files: Sequence[str],\n work_dir: str,\n deploy_cfg: Any,\n log_level: int = logging.INFO,\n device: str = 'cpu',\n **kwargs) -> Sequence[str]:\n\n import copy\n\n from mmdeploy.apis.tvm import get_library_ext\n from mmdeploy.utils import (get_calib_filename, get_model_inputs,\n get_partition_config)\n from .onnx2tvm import from_onnx\n model_inputs = get_model_inputs(deploy_cfg)\n\n if device.startswith('cuda'):\n target = 'cuda'\n else:\n target = 'llvm'\n\n lib_ext = get_library_ext()\n\n tvm_files = []\n for model_id, onnx_path in enumerate(ir_files):\n model_input = copy.deepcopy(model_inputs[model_id])\n use_vm = model_input.get('use_vm', False)\n if 'target' not in model_input['tuner']:\n model_input['tuner']['target'] = target\n lib_path = osp.splitext(onnx_path)[0] + lib_ext\n code_path = osp.splitext(\n onnx_path)[0] + '.code' if use_vm else None\n model_input['output_file'] = lib_path\n model_input['onnx_model'] = onnx_path\n model_input['bytecode_file'] = code_path\n\n # create calibration dataset\n if 'qconfig' in model_input:\n from .quantize import HDF5Dataset\n calib_filename = get_calib_filename(deploy_cfg)\n calib_path = osp.join(work_dir, calib_filename)\n partition_cfgs = get_partition_config(deploy_cfg)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n partition_type = 'end2end' if partition_cfgs is None \\\n else onnx_name\n dataset = HDF5Dataset(\n calib_path,\n model_input['shape'],\n model_type=partition_type,\n device=target)\n model_input['dataset'] = dataset()\n\n from_onnx(**model_input)\n\n tvm_files += [lib_path, code_path]\n\n return tvm_files", "title": "" }, { "docid": "76166d2ac8a7abe4d0eb60adcb09926a", "score": "0.47459185", "text": "def ConvertFiles(import_files):\n temp_dir = tempfile.mkdtemp()\n # A map of filename -> count to avoid filename conflicts.\n filename_root_dict = {}\n for import_file in import_files:\n basename = os.path.basename(import_file.local_copy)\n filename_root, filename_ext = os.path.splitext(basename)\n output_jsonl = filename_root + '.jsonl'\n # Generates a unique output file name if it already exists\n num_occur = 1\n if filename_root in filename_root_dict:\n num_occur = filename_root_dict[filename_root] + 1\n output_jsonl = filename_root + str(num_occur) + '.jsonl'\n filename_root_dict[filename_root] = num_occur\n\n full_output_jsonl = os.path.join(temp_dir, output_jsonl)\n if filename_ext == '.jsonl':\n # For jsonl, we assume it is already converted and simply copy it.\n shutil.copyfile(import_file.local_copy, full_output_jsonl)\n elif _IsExternalFile(import_file.original_filepath):\n if not _IsGcsPattern(import_file.local_copy):\n filesize = os.path.getsize(import_file.local_copy)\n if filesize > MAX_EXTERNAL_FILE_SIZE_IN_BYTES:\n print('{} is skipped as it exceeds the max size limit for PDF files '\n '({} bytes). Please split the file.\\n'\n 'This max size limit also applies when using the PDF files in '\n 'GCS as inputs.'.format(import_file.local_copy,\n MAX_EXTERNAL_FILE_SIZE_IN_BYTES))\n continue\n with io.open(full_output_jsonl, 'w', encoding='utf-8') as output_file:\n output_file.writelines([\n unicode(\n _ConvertOneExampleWithExternalFile(\n import_file.original_filepath))])\n logging.info('Converted %s (localpath: %s) to %s',\n import_file.original_filepath, import_file.local_copy,\n full_output_jsonl)\n else:\n filesize = os.path.getsize(import_file.local_copy)\n if filesize > MAX_EXAMPLE_SIZE_IN_BYTES and not FLAGS.split:\n print(\n '{} is skipped as it exceeds the max size limit ({} bytes). '\n 'Please truncate or split it. Or rerun with \"-s\" to auto split it.'\n .format(import_file.original_filepath, MAX_EXAMPLE_SIZE_IN_BYTES))\n continue\n ConvertOneFile(import_file, FLAGS.split, full_output_jsonl)\n\n import_file.local_output_jsonl = full_output_jsonl\n\n return import_files", "title": "" }, { "docid": "2f0372f5efefdb1a24b9d0595f465d9b", "score": "0.47442362", "text": "def powerbi_postprocess(source_db_path: str, target_db_path: str, run_id: str):\n from autumn.core.project import get_project\n\n source_db = get_database(source_db_path)\n target_db = get_database(target_db_path)\n tables_to_copy = [t for t in source_db.table_names() if t != \"outputs\"]\n for table_name in tables_to_copy:\n logger.info(\"Copying %s\", table_name)\n table_df = source_db.query(table_name)\n if table_name == \"uncertainty\":\n # Rename \"time\" field to \"times\"\n table_df.rename(columns={\"time\": \"times\"})\n\n target_db.dump_df(table_name, table_df)\n\n app_name, region_name, timestamp, git_commit = read_run_id(run_id)\n\n # Add build metadata table\n build_key = f\"{timestamp}-{git_commit}\"\n logger.info(\"Adding 'build' metadata table with key %s\", build_key)\n build_df = pd.DataFrame.from_dict(\n {\"build_key\": [build_key], \"app_name\": [app_name], \"region_name\": [region_name]}\n )\n target_db.dump_df(\"build\", build_df)\n\n # Add scenario metadata table\n logger.info(\"Adding 'scenario' metadata table\")\n\n project = get_project(app_name, region_name)\n basline_params = project.param_set.baseline.to_dict()\n sc_params = [sc.to_dict() for sc in project.param_set.scenarios]\n\n # Add default scenario\n scenario_data = [\n {\n \"scenario\": 0,\n \"start_time\": int(basline_params[\"time\"][\"start\"]),\n \"description\": basline_params.get(\"description\", \"\"),\n }\n ]\n for sc_idx, sc_params in enumerate(sc_params):\n sc_datum = {\n \"scenario\": int(sc_idx + 1),\n \"start_time\": int(sc_params[\"time\"][\"start\"]),\n \"description\": sc_params.get(\"description\", \"\"),\n }\n scenario_data.append(sc_datum)\n\n scenario_df = pd.DataFrame(scenario_data)\n target_db.dump_df(\"scenario\", scenario_df)\n\n # Add calibration targets\n logger.info(\"Adding 'targets' table\")\n targets_data = []\n for target in project.calibration.targets:\n targets_data += [{'key': target.data.name, 'times': idx, 'value': v} for idx,v in target.data.iteritems()]\n\n targets_df = pd.DataFrame(targets_data)\n target_db.dump_df(\"targets\", targets_df)\n\n logger.info(\"Finished creating PowerBI output database at %s\", target_db_path)", "title": "" }, { "docid": "0fb5026260c40b32deb7afe11036a18a", "score": "0.47437307", "text": "def clean(args, opts):\n opts = opts or (lambda : 0)\n app = pug.nlp.db.get_app(args[0] or getattr(opts, 'app', None) or nlp.db.get_app()[0])\n model_names = args[1:] or getattr(opts, 'model_names', None) or django.db.models.get_models(app)\n for model_name in model_names:\n print(repr(model_name))\n \n pug.nlp.db.get_models(app)", "title": "" }, { "docid": "eb8dfc7c091c4ab241eccc36746048a1", "score": "0.47377154", "text": "def preprocess(input_dir: str, output_dir):\r\n data = pd.read_csv(os.path.join(input_dir, \"data.csv\"))\r\n target = pd.read_csv(os.path.join(input_dir, \"target.csv\"))\r\n\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n train_data = pd.concat([data, target], axis=1)\r\n train_data.to_csv(os.path.join(output_dir, \"train_data.csv\"), index=False)", "title": "" }, { "docid": "89161db208dba717de122119dbd33a2e", "score": "0.47327262", "text": "def workflow_generate_from_mdl_then_build(main_name):\n models = Project_Support.get_models(main_name)\n\n if not models:\n GPS.Console().write(\n \"No models specified for %s: use the 'Target' property \"\n \"in the project file to fix. See QGen Model Debugger \"\n \"user guide for more detail.\\n\" % main_name)\n\n status = yield CLI.__compile_files_to_source_code(models)\n\n if status == 0:\n w = TargetWrapper(target_name='Build Main')\n yield w.wait_on_execute(main_name=main_name)", "title": "" }, { "docid": "f9fdbabace7bd5625ddc5d0dbffa8a5c", "score": "0.4726778", "text": "def _update_model(model):\n m_hash = hashlib.sha256()\n for file in model.files:\n url = file.source.url\n etag = _get_etag(url)\n if etag is not None:\n m_hash.update(bytes(etag, \"utf-8\"))\n model.subdirectory_ori = model.subdirectory\n model.subdirectory = Path(m_hash.hexdigest())\n\n # FIXME: a bug from openvino-dev==2022.3.0\n # It has been fixed on master branch.\n # After upgrading openvino-dev, we can remove this temporary patch\n if getattr(model, \"conversion_to_onnx_args\") and not [\n arg for arg in model.conversion_to_onnx_args if arg.startswith(\"--model-path\")\n ]:\n model.conversion_to_onnx_args.append(\"--model-path=\")", "title": "" }, { "docid": "61b2ca80a8675d4ece061569680c5888", "score": "0.47233257", "text": "def export_database():\n # export tables\n for table in tables:\n table.export_model()", "title": "" }, { "docid": "c3278367e7c359704ead5b3e0fc22687", "score": "0.47227478", "text": "def do_import(self):\n finalize_writes_callback = self._import_static_files()\n\n if not self.dry_run:\n self._delete_all_imported_root_usage_dtos()\n\n cu_mapper = Chapter2UnitMapper(self)\n for chapter in self.course_root:\n chapter_usage_id = chapter.attrib['usage_id']\n unit = cu_mapper.bindings.get(chapter_usage_id)\n if unit:\n self._update_unit(chapter, unit)\n else:\n unit = self._create_unit(chapter)\n\n cu_mapper.bind(chapter, unit)\n\n sl_mapper = Sequential2LessonMapper(self, chapter, unit)\n for sequential in chapter:\n sequential_usage_id = sequential.attrib['usage_id']\n lesson = sl_mapper.bindings.get(sequential_usage_id)\n if lesson:\n self._update_lesson(sequential, lesson)\n else:\n lesson = self._create_lesson(sequential, unit)\n\n sl_mapper.bind(sequential, lesson)\n self._update_lesson_xblock_content(sequential, unit, lesson)\n\n for lesson in sl_mapper.orphans:\n self.journal.append('Delete lesson \\'%s\\'' % lesson.title)\n self.course.delete_lesson(lesson)\n\n for unit in cu_mapper.orphans:\n self.journal.append('Delete unit \\'%s\\'' % unit.title)\n self.course.delete_unit(unit)\n\n # Wait for async db operations to complete\n finalize_writes_callback()", "title": "" }, { "docid": "caace22accae8d44f69e9e07a06cdd4e", "score": "0.471762", "text": "def build_model(self):", "title": "" }, { "docid": "c0c79c691831eb1375d1f53b5a6de69e", "score": "0.4716262", "text": "def unpack_model(model_file: Text, working_directory: Optional[Text] = None\n ) -> Text:\n import tarfile\n\n if working_directory is None:\n working_directory = tempfile.mkdtemp()\n\n tar = tarfile.open(model_file)\n # All files are in a subdirectory.\n tar.extractall(working_directory)\n tar.close()\n logger.debug(\"Extracted model to '{}'.\".format(working_directory))\n\n return working_directory", "title": "" }, { "docid": "e704f188bd1df5cf6f39eb69757db6c1", "score": "0.47145543", "text": "def render_model(modelname = \"NO_MODELNAME_GIVEN\", \r\n force = False, \r\n comment=\"\", \r\n prefix_path=\"./\", \r\n properties=None, \r\n parts_dir= powlib.PARTS_DIR ):\r\n print \"generate_model: \" + modelname\r\n # new model filename\r\n classname = string.capitalize(modelname) \r\n baseclassname = \"Base\" + classname\r\n filename = classname + \".py\"\r\n filename = os.path.normpath( prefix_path+ \"/models/\" + filename)\r\n if os.path.isfile( os.path.normpath( filename ) ) and force != True:\r\n print filename + \" (exists)...(Use -f to force override)\"\r\n else:\r\n infile = None\r\n infile = open (os.path.normpath( parts_dir + \"model_stub.part\"), \"r\")\r\n ostr = \"\"\r\n ostr = ostr + infile.read()\r\n infile.close()\r\n \r\n ostr = ostr.replace(\"#DATE\", str(datetime.date.today()) )\r\n ostr = ostr.replace(\"#MODELCLASS\", classname)\r\n \r\n ostr = ostr.replace(\"#BASECLASS\", baseclassname)\r\n ostr = ostr.replace( \"#MODELTABLE\", powlib.plural(string.lower(modelname)) ) \r\n \r\n # write the output file to disk\r\n ofile = open( filename , \"w+\") \r\n print \" --\", filename + \" (created)\"\r\n ofile.write( ostr )\r\n ofile.close()\r\n \r\n ### generate BaseModel if neccessary\r\n filename = \"Base\" + classname + \".py\"\r\n if os.path.isfile( os.path.normpath( filename ) ) and force != True:\r\n print filename + \" (exists)...(Use -f to force override)\"\r\n else:\r\n infile = None\r\n ### generate the BaseClass\r\n infile = open (os.path.normpath( PARTS_DIR + \"basemodel_stub.part\"), \"r\")\r\n ostr = infile.read()\r\n infile.close()\r\n # Add Class declaration and Table relation for sqlalchemy\r\n ostr = ostr.replace( \"#BASECLASSNAME\", baseclassname )\r\n ostr = ostr.replace( \"#MODELTABLE\", powlib.plural(string.lower(modelname)) ) \r\n \r\n ### adding the properties list\r\n # TODO: Needs to be tested. \r\n if properties == None:\r\n ostr = ostr.replace(\"#PROPERTIES_LIST\", \"[]\")\r\n else:\r\n ostr = ostr.replace(\"#PROPERTIES_LIST\", properties )\r\n \r\n ostr = ostr.replace(\"#MODELNAME\" , string.capitalize(modelname) ) \r\n \r\n filename = os.path.normpath( prefix_path + \"/models/basemodels/\" + filename)\r\n \r\n ofile = open( filename , \"w+\") \r\n print \" --\", filename + \" (created)\"\r\n ofile.write( ostr )\r\n ofile.close()\r\n \r\n # render a basic testcase \r\n render_test_stub(modelname, classname, prefix_path, PARTS_DIR)\r\n return", "title": "" }, { "docid": "595ff435136660113c49f68bac70440d", "score": "0.47142863", "text": "def test_dosdp_import(self):\n ie = DOSDPImportEngine()\n files = glob.glob(os.path.join(DOSDP_DIR, '*.yaml'))\n print(f'LOADING: {files}')\n schema = ie.convert(files,\n id='https://example.org/mondo/',\n name='mondo', range_as_enums=False)\n #print(schema)\n sd = minify_schema(schema)\n model_path = os.path.join(OUTPUT_DIR, 'mondo_dps.yaml')\n with open(model_path, 'w') as stream:\n yaml.safe_dump(sd, stream, sort_keys=False)\n with open(META_OWL_OUTPUT, 'w') as stream:\n stream.write(OwlSchemaGenerator(model_path, type_objects=False, metaclasses=False).serialize())", "title": "" }, { "docid": "279f5f4b763dad1333c9114301f4e694", "score": "0.47064254", "text": "def detaw(fname_main_yaml: str) -> None:\n logging.info(f\"Reading the main YAML input: {fname_main_yaml}\")\n with open(fname_main_yaml, 'r') as file_in:\n model_params = yaml.safe_load(file_in)\n\n # FIXME For now, passing the yaml information to the current model\n # parameters. They can be passed as a dict directly.\n detaw_params = model_params[\"detaw\"]\n streamlinemodel = detaw_params[\"target_model\"]\n idayoutput = detaw_params[\"daily_output\"]\n imonthoutput = detaw_params[\"monthly_output\"]\n iyearoutput = detaw_params[\"yearly_output\"]\n itotaloutput = detaw_params[\"delta_output\"]\n dailyunit = detaw_params[\"daily_output_unit\"]\n forDSM2_daily = detaw_params[\"for_dsm2_only\"]\n start_water_year = detaw_params[\"start_water_year\"]\n end_water_year = detaw_params['end_water_year']\n fn_input_pcp = detaw_params['input_pcp']\n fn_input_temperature = detaw_params['input_temperature']\n fn_landuse = detaw_params['landuse']\n fn_et_correction = detaw_params['et_correction']\n fn_calendar = detaw_params['calendar']\n fn_detaw_output = detaw_params['detaw_output']\n fn_precip_output = detaw_params['precip_output']\n fn_et_output = detaw_params['et_output']\n\n # FIXME Avoid to use a current directory for jobs\n filepath = os.getcwd()\n\n model_start_year = int(start_water_year)-1\n # FIXME the start water year date of start_water_year-09-30 is a carry forward from the old code\n # setting this to year-10-01 and removing the extra input data results in numpy errors. I tried fixing\n # this by changing the dimensions of the declared variables but the results are different and hence I\n # rolled back to the old version.\n start_date_str = str(model_start_year) + '-09-30'\n end_date_str = str(end_water_year) + '-09-30'\n # convert string to datetime\n start_water_year_dt = pd.to_datetime(start_date_str)\n end_water_year_dt = pd.to_datetime(end_date_str)\n # mod_date_range = pd.DataFrame()\n # mod_date_range['dates'] = pd.date_range(start_date_str,end_date_str,freq='D')\n # mod_date_range['water_year'] = mod_date_range.dates.dt.year.where(mod_date_range.dates.dt.month < 10, mod_date_range.dates.dt.year + 1)\n # water_years = numpy.unique(mod_date_range['water_year'])\n\n water_years = numpy.arange(start_water_year,end_water_year+1)\n # get endyear for the model run\n endyear = end_water_year_dt.year\n # get length in days for the model run\n idates = len(pd.date_range(start_water_year_dt, end_water_year_dt, freq='D'))\n # print(\"endyear =\",endyear)\n print(\"idates =\", idates)\n\n start1 = numpy.array([start_water_year_dt.year, start_water_year_dt.month, start_water_year_dt.day, 23, 0], dtype='i4')\n iyears = endyear-start1[0]+1\n print(\"iyears =\", iyears)\n\n # Setting the value of ilands from the landuse file\n tmp_landuse_df = pd.read_csv(fn_landuse,header=[0])\n ilands = len(tmp_landuse_df['area_id'].unique())\n\n # Reading the input pcp file to get the number of pcp stations (isites). Reading the pcp values is performed in read_pcp\n tmp_df = pd.read_csv(fn_input_pcp,header=[0])\n # subtract columns by 4 to ingnore year, month, day, doy to get the number of pcp stations\n isites = tmp_df.shape[1]-4\n\n NumDay = numpy.array([0, 31, 28, 31, 30, 31, 30, 31,\n 31, 30, 31, 30, 31], dtype='i4')\n NI = numpy.array([31, 59, 90, 120, 151, 181, 212,\n 243, 273, 304, 334, 365], dtype='i4')\n NII = numpy.array([31, 60, 91, 121, 152, 182, 213,\n 244, 274, 305, 335, 366], dtype='i4')\n # pcplocs = [\"Brentwood\", \"Davis\", \"Galt\",\n # \"Lodi\", \"RioVista\", \"Stockton\", \"Tracy\"]\n # perclocs = [\"Brentwood\", \"Davis\", \"Galt\",\n # \"Lodi\", \"Rio Vista\", \"Stockton\", \"Tracy_Carbona\"]\n # cropname = [\"Urban\", \"Irrig pasture\", \"Alfalfa\", \"All Field\", \"Sugar beets\",\n # \"Irrig Grain\", \"Rice\", \"Truck Crops\", \"Tomato\", \"Orchard\",\n # \"Vineyard\", \"Riparian Vegetation\", \"Native Vegetation\",\n # \"Non-irrig Grain\", \"Water Surface\"]\n # icroptype = len(cropname)\n\n # XXX Need to fix hardwired stations\n #? all the above are hardwired. why?#\n ts_pcp = zeros((isites, idates), float)\n ts_per = zeros((isites, ilands), float)\n ETo_corrector = zeros((ilands), float)\n Region = zeros((ilands), int)\n ts_year = zeros((idates), int)\n ts_mon = zeros((idates), int)\n ts_days = zeros((idates), int)\n ts_LODI_tx = zeros((idates), float)\n ts_LODI_tn = zeros((idates), float)\n\n ts_pcp = read_pcp(start_date_str, end_date_str, fn_input_pcp)\n\n [ts_per, ETo_corrector, Region] = read_et_correction_factors(fn_et_correction)\n\n [ts_year, ts_mon, ts_days, ts_LODI_tx, ts_LODI_tn] = read_temperature(streamlinemodel, start_date_str, end_date_str, fn_input_temperature)\n\n [yearType, HAcre, icroptype] = read_landuse(fn_landuse, iyears, water_years, ilands)\n\n daysofyear = read_calendar(streamlinemodel, model_start_year, endyear, water_years, fn_calendar)\n\n if DEBUG_TIMING:\n st = timeit.default_timer()\n (pcp, ET0) = weatheroutput(ts_pcp, ts_per, ts_mon, ts_days, ts_LODI_tx,\n ts_LODI_tn, ilands, idates, isites, ETo_corrector, filepath, start1)\n weatheroutput_to_netcdf(pcp, ET0, model_start_year, fn_precip_output, fn_et_output)\n if DEBUG_TIMING:\n print('weather output took', timeit.default_timer()-st, ' seconds')\n pcp = pcp.T\n ET0 = ET0.T\n if DEBUG_TIMING:\n st = timeit.default_timer()\n # output dimensioned by (var, island,landuse,time)\n (DETAWOUTPUT) = historicalETAW(ts_per, ETo_corrector, Region, pcp, ET0, ts_LODI_tx, ts_LODI_tn,\n ilands, idates, isites, ts_year, ts_mon, ts_days, start1, filepath, NI, NII, NumDay, iyears,\n idayoutput, imonthoutput, iyearoutput, itotaloutput, dailyunit, forDSM2_daily, streamlinemodel,model_start_year,\n yearType,HAcre,icroptype)\n if DEBUG_TIMING:\n print('historical etaw calculations took ',\n timeit.default_timer()-st, ' seconds')\n if DEBUG_TIMING:\n st = timeit.default_timer()\n dx = write_to_netcdf(DETAWOUTPUT,model_start_year,fn_detaw_output)\n if DEBUG_TIMING:\n print('detaw output to netcdf4 took',\n timeit.default_timer()-st, ' seconds')\n\n run_for_dcd = False\n if run_for_dcd:\n # output dimensioned by (var, island, time)\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, \"\")\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"\",daysofyear)\n if streamlinemodel == \"CALSIM3\":\n #print(\"in the double-counting process\", idates)\n tempfile = filepath+\"/Input/planning_study/\"+\"CS3_DCD_rate1.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex1\",daysofyear)\n\n tempfile = filepath+\"/Input/planning_study/\"+\"CS3_DCD_rate2.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex2\",daysofyear)\n\n tempfile = filepath+\"/Input/planning_study/\"+\"CS3_DCD_rate3.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex3\",daysofyear)\n else:\n tempfile = filepath+\"/Input/historical_study/\"+\"CS3_DCD_rate1.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex1\",daysofyear)\n\n tempfile = filepath+\"/Input/historical_study/\"+\"CS3_DCD_rate2.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex2\",daysofyear)\n\n tempfile = filepath+\"/Input/historical_study/\"+\"CS3_DCD_rate3.txt\"\n (DETAWISL168) = timeseries_combine(\n DETAWOUTPUT, ilands, ilands, 15, idates-1, tempfile)\n forNODCU(DETAWISL168, streamlinemodel, model_start_year, endyear, ilands, \"_ex3\",daysofyear)\n print(\"done\")", "title": "" }, { "docid": "7f1cd1aa52396aed480aa9a52d58de57", "score": "0.4700407", "text": "def generate(package_version, rules_dir, local_kibana):\n get_release_rules(package_version, local_kibana, rules_dir)\n create_json_from_docs(package_version)\n update_current_text(package_version)\n get_rule_diff(package_version)\n create_documentation(package_version)\n\n click.echo('Files staged to generated-ascii-files folder - move these over to docs/detections/prebuilt-rules')", "title": "" }, { "docid": "54c0bbf36dfa6336799bdda69f614fae", "score": "0.4698261", "text": "def main(_):\n encoder = text_encoder.SubwordTextEncoder(FLAGS.vocab_file)\n\n in_files = tf.gfile.Glob(FLAGS.in_filepattern)\n assert in_files, \"No matching input files\"\n for in_file in in_files:\n convert_file(in_file, encoder)", "title": "" }, { "docid": "3d37e3813f07723f4db55807797fe1c9", "score": "0.46966526", "text": "def main(inputs_dir):\n # build model\n model = Cartoon()\n\n img_suffix = ['.jpg', '.jpeg', '.png']\n\n # load model weights\n style = input('Image style.choose [`hayao`, `hosoda`, `paprika`, `shinkai`]: ')\n\n output_dir = input('Image save path: ')\n # Create if the output save directory does not exist.\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n model_file = os.path.join(args.model, style + '.pth')\n model.load_state_dict(torch.load(model_file))\n\n # set model mode is eval\n model.eval()\n\n # check mode status\n if args.mode == 'gpu':\n if torch.cuda.is_available():\n print('Use GPU mode!')\n model.cuda()\n else:\n raise Exception('Please check if your system is properly installed with CUDA'\n 'and if PyTorch`s GPU version is installed.')\n else:\n print('Use CPU mode!')\n model.float()\n\n # Get all the files in the specified directory\n for img_path in os.listdir(inputs_dir):\n # Intercept file suffix\n suffix = os.path.splitext(img_path)[1]\n if suffix not in img_suffix:\n continue\n # load image\n file_path = os.path.join(inputs_dir, img_path)\n\n raw_image = preprocess(file_path)\n\n raw_image = np.asarray(raw_image)\n\n # RGB -> BGR\n raw_image = raw_image[:, :, [2, 1, 0]]\n raw_image = transforms.ToTensor()(raw_image).unsqueeze(0)\n\n # preprocess, (-1, 1)\n raw_image = -1 + 2 * raw_image\n\n with torch.no_grad():\n if args.mode == 'gpu':\n raw_image = Variable(raw_image).cuda()\n else:\n raw_image = Variable(raw_image).float()\n\n # forward\n cartoon_image = model(raw_image)\n cartoon_image = cartoon_image[0]\n # BGR -> RGB\n cartoon_image = cartoon_image[[2, 1, 0], :, :]\n\n # deprocess, (0, 1)\n cartoon_image = cartoon_image.data.cpu().float() * 0.5 + 0.5\n\n img_path = os.path.join(output_dir,\n img_path[:-4] + '_' + style + '.png')\n vutils.save_image(cartoon_image, img_path)", "title": "" }, { "docid": "8e0054bc4f496e924a0cb3d96e2b1d98", "score": "0.46943778", "text": "def generate_from_existing(self):", "title": "" }, { "docid": "79296eea1afc57ac0d026bff4cb18906", "score": "0.46915203", "text": "def prepare_data():\n # Loads data\n tf.logging.info(\"Loading data\")\n\n # task_datasets_rename = {\n # \"SST\": \"E2E\",\n # }\n\n data_dir = 'bert/{}'.format('E2E')\n # if FLAGS.task.upper() in task_datasets_rename:\n # data_dir = 'data/{}'.format(\n # task_datasets_rename[FLAGS.task])\n\n if FLAGS.tfrecords_output_dir is None:\n tfrecords_output_dir = data_dir\n else:\n tfrecords_output_dir = FLAGS.tfrecords_output_dir\n tx.utils.maybe_create_dir(tfrecords_output_dir)\n\n processors = {\n 'SST': data_utils.SSTProcessor\n }\n processor = processors[FLAGS.task]()\n\n num_classes = len(processor.get_labels())\n num_train_data = len(processor.get_train_examples(data_dir))\n\n tf.logging.info(\n 'num_classes:%d; num_train_data:%d' % (num_classes, num_train_data))\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file,\n do_lower_case=FLAGS.do_lower_case)\n # TO DO:Prepare data for the transformer classifier\n # i.e. Concat x' with y and see whether x' was compressed in y\n ref = refs[1]\n with open(os.path.join(e2e_data_dir, \"x{}_type.valid.txt\".format(ref)), 'r') as f_type:\n lines_type = f_type.readlines()\n with open(os.path.join(e2e_data_dir, \"x{}_value.valid.txt\".format(ref)), 'r') as f_entry:\n lines_entry = f_entry.readlines()\n with open(os.path.join(e2e_data_dir, \"x{}_value.valid.txt\".format(refs[0])), 'r') as f_entry_x:\n lines_entry_x = f_entry_x.readlines()\n with open(\"e2ev14_output_new/{}/ckpt/hypos.step{}.val.txt\".format(expr_name, step), 'r') as f_sent:\n lines_sent = f_sent.readlines()\n for (idx_line, line_type) in enumerate(lines_type):\n line_type = line_type.strip('\\n').split(' ')\n for (idx_val, attr) in enumerate(line_type):\n entry_list = lines_entry[idx_line].strip('\\n').split(' ')\n if (lines_entry_x[idx_line].find(entry_list[idx_val]) == -1):\n neg_samp = attr + ' : ' + entry_list[idx_val] + ' | ' + lines_sent[idx_line]\n with open(\"bert/E2E/{}.step{}.2.tsv\".format(expr_name, step), 'a') as f_w:\n f_w.write(neg_samp)\n\n # Concat x with y and see whether x was compressed in y\n ref = refs[0]\n with open(os.path.join(e2e_data_dir, \"x{}_type.valid.txt\".format(ref)), 'r') as f_type:\n lines_type = f_type.readlines()\n with open(os.path.join(e2e_data_dir, \"x{}_value.valid.txt\".format(ref)), 'r') as f_entry:\n lines_entry = f_entry.readlines()\n with open(\"e2ev14_output_new/{}/ckpt/hypos.step{}.val.txt\".format(expr_name, step), 'r') as f_sent:\n lines_sent = f_sent.readlines()\n for (idx_line, line_type) in enumerate(lines_type):\n line_type = line_type.strip('\\n').split(' ')\n for (idx_val, attr) in enumerate(line_type):\n entry_list = lines_entry[idx_line].strip('\\n').split(' ')\n pos_samp = attr + ' : ' + entry_list[idx_val] + ' | ' + lines_sent[idx_line]\n with open(\"bert/E2E/{}.step{}.1.tsv\".format(expr_name, step), 'a') as f_w:\n f_w.write(pos_samp)\n\n # Produces TFRecords files\n data_utils.prepare_TFRecord_data(\n processor=processor,\n tokenizer=tokenizer,\n data_dir=data_dir,\n max_seq_length=FLAGS.max_seq_length,\n output_dir=tfrecords_output_dir,\n expr_name=expr_name,\n step=step)\n modify_config_data(FLAGS.max_seq_length, num_train_data, num_classes)", "title": "" }, { "docid": "f045fd405f2f83d7b62c904b84023c5b", "score": "0.46890363", "text": "def process(self):\n try:\n make_dirs(os.path.dirname(self.output_abspath))\n with open(self.output_abspath, mode='w', encoding='utf-8') as outputfile:\n for input_abspath in self.input_abspaths:\n with open(input_abspath, encoding='utf-8') as inputfile:\n for line in inputfile:\n outputfile.write(line)\n except IOError as e:\n raise DerivaDownloadError(\"Concatenate transform failed\", e)\n\n return super(ConcatenateTransformProcessor, self).process()", "title": "" } ]
7b214326c84de19606bbaaccfe707110
returns a dictionary of
[ { "docid": "94d2ec278189c510866bc3aeee035468", "score": "0.0", "text": "def predict(self, at=5):\n recs = {}\n for i in range(0, self.R_hat.shape[0]):\n pl_id = self.pl_id_list[i]\n pl_row = self.R_hat.data[self.R_hat.indptr[i]:\n self.R_hat.indptr[i + 1]]\n # get top 5 indeces. argsort, flip and get first at-1 items\n sorted_row_idx = np.flip(pl_row.argsort(), axis=0)[0:at]\n track_cols = [self.R_hat.indices[self.R_hat.indptr[i] + x]\n for x in sorted_row_idx]\n tracks_ids = [self.tr_id_list[x] for x in track_cols]\n recs[pl_id] = tracks_ids\n return recs", "title": "" } ]
[ { "docid": "0f82d3748c9bac7976eba45ff1d4a511", "score": "0.7530778", "text": "def _asdict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.7360245", "text": "def to_dict(self):", "title": "" }, { "docid": "41dd663a994d01c8d5fee9465aafca83", "score": "0.71872026", "text": "def dictionary(self) -> dict:\n return {}", "title": "" }, { "docid": "49f8060045ddc855ff2daec8b0fbeaf6", "score": "0.7078897", "text": "def to_dict(self):\n\t\treturn {key: self[key] for key in self.keys()}", "title": "" }, { "docid": "ee14636a7c0db1370c460190ebce5b0e", "score": "0.7075524", "text": "def return_dict(self):\n return dict(N=self.N, p=self.p, bc=self.bc, H=self.H)", "title": "" }, { "docid": "ac0fc0997a05570500f5496767f9446d", "score": "0.70632726", "text": "def _dict(self) -> Dict[str, str]:\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"hostname\": self.hostname,\n \"type\": self.type.value,\n \"version\": self.version,\n }", "title": "" }, { "docid": "a85d60a88a3c9ecc180c01d22e225f56", "score": "0.7052195", "text": "def get_dict(self):\n d_header = self.get_header_dict()\n data_list = self.get_data_list()\n d_fileheader = self.get_fileheader_dict()\n comment_list = self.get_comments_list()\n illegals_list = self.get_illegals_list()\n return {\n \"header\": d_header,\n \"fileheader\": d_fileheader,\n \"data\": data_list,\n \"comments\": comment_list,\n \"illegals\": illegals_list,\n }", "title": "" }, { "docid": "e9b3dc73a0868f0830b2ca9e5b5154b9", "score": "0.6979714", "text": "def _dict(self) -> Dict[str, Union[Optional[str], List[Optional[str]]]]:\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"title\": self.title,\n \"type\": self.type.value,\n \"host\": self.host,\n \"database\": self.database,\n \"schema\": self.schema,\n \"workspace\": [self.workspace.airtable_id],\n }", "title": "" }, { "docid": "042a8385b5d96e356fcd92ee6b232c68", "score": "0.6957291", "text": "def asDict( self ):\r\n return dict( self.items() )", "title": "" }, { "docid": "eed4fe1279a8a109a279efaa60af282b", "score": "0.69138646", "text": "def get_info(self):\n return {}", "title": "" }, { "docid": "04fea07a73e7a71e32655c6141036c47", "score": "0.6868344", "text": "def _dict(self) -> Dict[str, str]:\n return {\"id\": self.id, \"name\": self.name, \"title\": self.title, \"server\": self.server.id}", "title": "" }, { "docid": "0d02f27e84c829d160347c1395682927", "score": "0.68285567", "text": "def dict(self):\r\n return dict((key, self[key]) for key in self)", "title": "" }, { "docid": "b5de5c864c9c4255e51a6ee3268cb33c", "score": "0.68176454", "text": "def getdictionary(self):\n d = dict()\n for x,y in self.getcards():\n if y not in d:\n d[y]= list()\n if y in d:\n d[y].append(x)\n return d", "title": "" }, { "docid": "3390d5ec9f774ef696d118a8c1ecf39b", "score": "0.6812373", "text": "def as_dict(self) -> Dict:\n pass", "title": "" }, { "docid": "6ebfbea2daf588b807bb4ddf308b4b89", "score": "0.67840016", "text": "def _dict(self) -> Dict[str, Union[str, List[Optional[str]]]]:\n _services = []\n for service in self.services:\n _services.append(service.value)\n _layers = []\n for layer in self.layers:\n _layers.append(layer.airtable_id)\n _styles = []\n for style in self.styles:\n _styles.append(style.airtable_id)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"title\": self.title,\n \"services\": _services,\n \"workspace\": [self.workspace.airtable_id],\n \"layers\": _layers,\n \"styles\": _styles,\n }", "title": "" }, { "docid": "9fe8d4b6767db086d2bf0631bae6a666", "score": "0.6782678", "text": "def asdict(self):\n return {key: getattr(self, key) for key in self.detectorNames + self.workNames}", "title": "" }, { "docid": "bed9cd6e715f05bb6c014a2bdf3b3c13", "score": "0.6768926", "text": "def to_dict(self) -> Dict:\n pass", "title": "" }, { "docid": "3bd9e6f6309095ee6d7d88fcbb59ab01", "score": "0.67039824", "text": "def stats(self) -> dict:", "title": "" }, { "docid": "0a251f4331962262a9ad5abf5e8118f7", "score": "0.6693318", "text": "def ToDictionary(self):\n w = DictionaryWriter(self)\n w.Write('frequency')\n w.Write('command_regex')\n return w.Dictionary()", "title": "" }, { "docid": "111f680eaf442bba3a06959a76fb7240", "score": "0.6684197", "text": "def get_dict(self):\n\t\td = {}\n\t\td['rawText'] = self.rawText\n\t\td['creationDate'] = self.creationDate.isoformat()\n\t\td['entryNum'] = self.entryNum\n\t\td['treeRoot'] = self.treeRoot.get_dict()\n\t\treturn d", "title": "" }, { "docid": "ae9b1d5a9efa09c1f548aadc82e81627", "score": "0.66624606", "text": "def to_dict(self): # pylint: disable=no-self-use\n return {\n }", "title": "" }, { "docid": "ae9b1d5a9efa09c1f548aadc82e81627", "score": "0.66624606", "text": "def to_dict(self): # pylint: disable=no-self-use\n return {\n }", "title": "" }, { "docid": "ae9b1d5a9efa09c1f548aadc82e81627", "score": "0.66624606", "text": "def to_dict(self): # pylint: disable=no-self-use\n return {\n }", "title": "" }, { "docid": "abe678f77159fb78e04a208bda356d3a", "score": "0.6659592", "text": "def to_dict(self) -> dict:\n pass", "title": "" }, { "docid": "52aa8b73b5d61df2d1de8a0edd0b1e4e", "score": "0.66592336", "text": "def get_data(self):\n return dict([(key, getattr(self, key)) for key in self.get_raw()])", "title": "" }, { "docid": "e5b3d96f482a7a37ee481e62d4ff99bf", "score": "0.66360325", "text": "def _dict(self) -> Dict[str, Union[str, List[str]]]:\n _services = []\n for service in self.services:\n _services.append(service.value)\n _styles = []\n for style in self.styles:\n _styles.append(style.airtable_id)\n _dict = {\n \"id\": self.id,\n \"name\": self.name,\n \"title\": self.title,\n \"type\": self.type.value,\n \"geometry\": None,\n \"services\": _services,\n \"table-view\": self.table_view,\n \"workspace\": [self.workspace.airtable_id],\n \"store\": [self.store.airtable_id],\n \"styles\": _styles,\n }\n if self.geometry is not None:\n _dict[\"geometry\"] = self.geometry.value\n\n return _dict", "title": "" }, { "docid": "2556341922eca69570866d707a8827af", "score": "0.66286236", "text": "def info(self):\r\n return {}", "title": "" }, { "docid": "2556341922eca69570866d707a8827af", "score": "0.66286236", "text": "def info(self):\r\n return {}", "title": "" }, { "docid": "712da7b2124ab91035311ad7680146cf", "score": "0.66215855", "text": "def dict(self) -> dict:\n return {key: self[key] for key in self.keys()}", "title": "" }, { "docid": "3b8df0e7a7bb38cd19fd90a62c8085f4", "score": "0.66070217", "text": "def to_dictionary(self):\n keys = [\"id\", \"size\", \"x\", \"y\"]\n return {a: getattr(self, a) for a in keys}", "title": "" }, { "docid": "16ac03f7247523748d68fa96d0d32f91", "score": "0.6606796", "text": "def _asdict(self) -> dict:\n\n return {\n field: self[field]\n for field in self\n }", "title": "" }, { "docid": "ba45ec377e1b02d0da4a11054b259772", "score": "0.6606489", "text": "def as_dict(self) -> Dict[str, Dict[str, Any]]:\n dict_ = dict()\n for idx, sta in enumerate(self.data[\"station\"]):\n dict_.update(\n {\n sta.lower(): {\n \"num\": self.data[\"num\"][idx],\n \"domes\": self.data[\"domes\"][idx],\n \"pos_x\": self.data[\"pos_x\"][idx],\n \"pos_y\": self.data[\"pos_y\"][idx],\n \"pos_z\": self.data[\"pos_z\"][idx],\n \"flag\": self.data[\"flag\"][idx],\n }\n }\n )\n\n return dict_", "title": "" }, { "docid": "0109d196a154a7457fa7c2e8834c8584", "score": "0.6595959", "text": "def todict(self):\n\t\treturn super().todict()", "title": "" }, { "docid": "25d660fa95d33c6ddfa102aeda2b313e", "score": "0.6594771", "text": "def get_dict(self):\n\t\tres_dict = {'name': self.name, 'type': self.type}\n\t\tres_dict['children'] = self._get_id_list(self.children)\n\t\tres_dict['parents'] = self._get_id_list(self.parents)\n\t\treturn res_dict", "title": "" }, { "docid": "3c0133f08acad4482409523fecacc510", "score": "0.6586613", "text": "def _activity_to_dict(self) -> Dict:", "title": "" }, { "docid": "d78805932a256e7a17be21d4f1011335", "score": "0.65745074", "text": "def asdict(self):\n return dict([(k, getattr(self, k))\n for k in self.registered])", "title": "" }, { "docid": "c770cba24017ac1457d4c8b39fd81093", "score": "0.65733784", "text": "def to_dict(self):\n # output = []\n # output.append(self.meta_data())\n output = self.meta_data()\n return output", "title": "" }, { "docid": "c12716ab500a560248339a88b0cec6e7", "score": "0.65535444", "text": "def to_dict(self):\n return {\n name: getattr(self, name)\n for name in self._PROPERTIES.keys()}", "title": "" }, { "docid": "7c0bd53e648572c390b883cbce07bf50", "score": "0.654184", "text": "def to_dict(self) -> Dict[str, Any]:\n return {\n 'uid': self.uid,\n 'bug': self.bug,\n 'tools': self.tools.copy()\n }", "title": "" }, { "docid": "7d15b8f7f60e362a5030375dd4e9fd80", "score": "0.65305084", "text": "def get_user_defined_mapping() -> Dict:\n return {}", "title": "" }, { "docid": "c78044a08b979c5f22a56f114688491a", "score": "0.6512679", "text": "def to_dict(self):\n retval = {}\n for key in self:\n retval[key] = self[key]\n return retval", "title": "" }, { "docid": "a71b8d0b51a9de3c1e30aaa6dddd57d5", "score": "0.65121305", "text": "def to_dict(self) -> dict:\r\n\r\n return {k: v for k, v in self.__dict__.items() if k.endswith('_')}", "title": "" }, { "docid": "7cca4b66a36fd7194f781ac7ea15ae41", "score": "0.6496758", "text": "def params(self):\n return dict()", "title": "" }, { "docid": "309b566ef0e17e3efebfd90e3e603fea", "score": "0.6489883", "text": "def data(self):\n d = {}\n for name in self.key_names:\n d[name] = self.__dict__.get(name, None)\n return d", "title": "" }, { "docid": "d112545c2988e709bddeabb08cbc2bfe", "score": "0.6479215", "text": "def dict(self):\n ret = {\"name\": self.name, \"start\": self.start, \"end\": self.end}\n return ret", "title": "" }, { "docid": "2934a4539f715aa10b988f8c080e5534", "score": "0.6475107", "text": "def get100dict(self):\r\n dict = {}\r\n for line in self.get100result()[1]:\r\n key, val = line.split(' ', 1)\r\n dict[key] = dequote(val)\r\n return dict", "title": "" }, { "docid": "ce9347ad031d4f31c7ab0e9b2928ddb0", "score": "0.6473517", "text": "def _dict(self) -> Dict[str, Union[str, List[str]]]:\n _dict = {\n \"id\": self.id,\n \"name\": self.name,\n \"title\": self.title,\n \"type\": self.type.value,\n }\n if self.workspace is not None:\n _dict[\"namespace\"] = [self.workspace.airtable_id]\n\n return _dict", "title": "" }, { "docid": "7037000da02367bd57cfac62bddfe956", "score": "0.64703023", "text": "def return_as_dictionary(self):\n out_put_dict = {}\n out_put_dict['productCode'] = self.product_code\n out_put_dict['description'] = self.description\n out_put_dict['marketPrice'] = self.market_price\n out_put_dict['rentalPrice'] = self.rental_price\n out_put_dict['brand'] = self.brand\n out_put_dict['voltage'] = self.voltage\n\n return out_put_dict", "title": "" }, { "docid": "6fbc48ca667e638b85f19b2c536ba70a", "score": "0.646598", "text": "def params(self) -> dict:", "title": "" }, { "docid": "f106ac64bed986c8f6697dabbbeaadaf", "score": "0.6458572", "text": "def _to_dict(self):\n return self.data", "title": "" }, { "docid": "3ebd8c1fe820e141a30b6635efd5972b", "score": "0.6453693", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n if hasattr(self, 'frequency') and self.frequency is not None:\n _dict['frequency'] = self.frequency\n if hasattr(self, 'arguments') and self.arguments is not None:\n _dict['arguments'] = [x._to_dict() for x in self.arguments]\n if hasattr(self, 'evidence') and self.evidence is not None:\n _dict['evidence'] = [x._to_dict() for x in self.evidence]\n return _dict", "title": "" }, { "docid": "83f1b5f014843d04ebc7518ba2e6ea86", "score": "0.6448701", "text": "def getDataDict(self):\n return dict(zip(self.keys,[getattr(self,k) for k in self.keys]))", "title": "" }, { "docid": "c7d3b9c3a99ba2db60acfcec840df01d", "score": "0.6435676", "text": "def ToDictionary(self):\n w = DictionaryWriter(self)\n w.Write('start_version')\n w.Write('end_version')\n w.Write('version_regex')\n w.Write('age')\n w.Write('check_components')\n return w.Dictionary()", "title": "" }, { "docid": "530f485a823b245bbc833718a0fadc15", "score": "0.6425314", "text": "def to_dict(self):\r\n return dict(self)", "title": "" }, { "docid": "2bc908da7bc5db270b8406af0b7933ff", "score": "0.64241284", "text": "def get_map(self):\n return_object = {}\n return_object[\"message_id\"] = self.mid\n return_object[\"user_id\"] = self.user_id\n return_object[\"other_id\"] = self.other_id\n return_object[\"message\"] = self.msg\n return_object[\"time_user_seen\"] = self.time_user_seen\n return_object[\"users_notified\"] = self.users_notified\n return_object[\"created_at\"] = self.created_at.__str__()\n return return_object", "title": "" }, { "docid": "a087172ccacc4dabe1a189a264fee8dc", "score": "0.6420896", "text": "def as_dict(self) -> dict:\n d = {\n \"member_no\": self.member_no,\n \"hardware_id\": self.hardware_id,\n \"priority\": self.priority,\n }\n return d", "title": "" }, { "docid": "8186ccef166352214a32f8069fb6582d", "score": "0.64178544", "text": "def to_dict(self) -> Dict[str, Any]:\n return asdict(self)", "title": "" }, { "docid": "250d9f4df6a295a60f0490efae1eaa47", "score": "0.6414717", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'fonts') and self.fonts is not None:\n _dict['fonts'] = [x._to_dict() for x in self.fonts]\n if hasattr(self, 'styles') and self.styles is not None:\n _dict['styles'] = [x._to_dict() for x in self.styles]\n return _dict", "title": "" }, { "docid": "f15d399d0fc21f99a3ea84e7350bb226", "score": "0.6413072", "text": "def get_dict(self):\n ret = dict((k, v[AVM.VALUE]) for k, v in self.__data.iteritems())\n ret['__NAME__'] = self.name\n return ret", "title": "" }, { "docid": "e87d510dfd8efe17ee1cd36ca4374e7f", "score": "0.6412388", "text": "def to_dict(self) -> Dict[Any, Any]:\n return dict(\n first=self._first.to_dict(),\n second=self.second.to_dict(),\n evidence=self.evidence.to_dict(),\n number=self.number,\n is_consensus=self.is_consensus,\n software=self.software,\n mmej=self.mmej,\n )", "title": "" }, { "docid": "4fb4ba90bad59091fa2bdedfed1ecbf3", "score": "0.6406319", "text": "def to_dict(self) -> Dict:\n return {n: getattr(self, n, None) for n in self.__slots__}", "title": "" }, { "docid": "6e3d9e01e75f99633270db87b7e938a2", "score": "0.6402351", "text": "def to_dictionary(self):\n ret_dict = {}\n for k, v in vars(self).items():\n if k is \"id\":\n ret_dict[k] = v\n elif \"width\" in k or \"height\" in k:\n ret_dict[\"size\"] = v\n else:\n ret_dict[k[12:]] = v\n return ret_dict", "title": "" }, { "docid": "87de55187bc00d9c0bc251ec2059d338", "score": "0.6400135", "text": "def to_dict(self):\n return {self: self.proba}", "title": "" }, { "docid": "4350fe683fdbcc1ba8551a7e88faf501", "score": "0.6395758", "text": "def get_as_dict(self):\n return {self.beautify_key(k): v for k, v in vars(self).items()}", "title": "" }, { "docid": "8967d2d6cf58c722a3135cb759eb990e", "score": "0.6392126", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'document_id') and self.document_id is not None:\n _dict['document_id'] = self.document_id\n if hasattr(self,\n 'cross_reference') and self.cross_reference is not None:\n _dict['cross_reference'] = self.cross_reference\n if hasattr(self, 'relevance') and self.relevance is not None:\n _dict['relevance'] = self.relevance\n return _dict", "title": "" }, { "docid": "31049cf06048bb3aab9fc4884f260bca", "score": "0.6390035", "text": "def dictionary(self):\n dicti = dict()\n dicti[\"name\"] = self.name\n dicti[\"provision_type\"] = self.provision_type\n dicti[\"commands\"] = self.commands\n return dicti", "title": "" }, { "docid": "3a7834da9aed63dec19523e5e9c3da7c", "score": "0.6379093", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'key') and self.key is not None:\n _dict['key'] = self.key\n if hasattr(self,\n 'matching_results') and self.matching_results is not None:\n _dict['matching_results'] = self.matching_results\n if hasattr(self, 'event_rate') and self.event_rate is not None:\n _dict['event_rate'] = self.event_rate\n return _dict", "title": "" }, { "docid": "fbda710dc5e0762323c9984bc32c13a2", "score": "0.63718134", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'exclude') and self.exclude is not None:\n _dict['exclude'] = self.exclude\n if hasattr(self, 'include') and self.include is not None:\n _dict['include'] = self.include\n return _dict", "title": "" }, { "docid": "03d49b1d7fcc6caaa54a7d2e133e7fe1", "score": "0.6368818", "text": "def _meta(self) -> dict:\n return {}", "title": "" }, { "docid": "7e92f82cbed26ebb24477823f3a07bfd", "score": "0.63673794", "text": "def get_metadata(self):\n return {}", "title": "" }, { "docid": "a6706574fe17811efd3b066993f37d4d", "score": "0.6367091", "text": "def parameters(self) -> Dict:\n return {}", "title": "" }, { "docid": "8a71f7c6482d703344b220ed8e85b10c", "score": "0.63606036", "text": "def to_dict(self) -> dict:\n\t\treturn dataclasses.asdict(self)", "title": "" }, { "docid": "49c8c34c1552b58aefc92426a9567163", "score": "0.63592243", "text": "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"price\": self.price\n }", "title": "" }, { "docid": "71623f41457160d4a047f623195e118f", "score": "0.63567865", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'match') and self.match is not None:\n _dict['match'] = self.match\n return _dict", "title": "" }, { "docid": "ee0b0ce4f5187c3f1d5a4a711f8baf46", "score": "0.6352033", "text": "def to_dictionary(self):\n temp = {}\n temp['id'] = self.id\n temp['size'] = self.size\n temp['x'] = self.x\n temp['y'] = self.y\n return temp", "title": "" }, { "docid": "b933c2f5c5e3e4439b4ed0cc8880e1d4", "score": "0.6350928", "text": "def ToDictionary(self):\n w = DictionaryWriter(self)\n w.Write('id')\n w.Write('condition', func=Condition.ToDictionary)\n w.Write('trigger', func=Trigger.ToDictionary)\n w.Write('notification', func=Notification.ToDictionary)\n return w.Dictionary()", "title": "" }, { "docid": "25053642c3776dd6a524de2c21254135", "score": "0.6345036", "text": "def _get_params(self):\n\n return {}", "title": "" }, { "docid": "0f2105df152584bbc6f35122b8ddab0a", "score": "0.63437384", "text": "def get_dict(self):\n source_dict = self.__dict__\n source_dict['file'] = self.get_mini_file_dict(self.file)\n source_dict['crossref_source'] = self.get_mini_source_dict(self.crossref_source)\n return source_dict", "title": "" }, { "docid": "42c66636a4c93ae4153df70e8b087509", "score": "0.6341441", "text": "def toDict(self) :\n aDict = {}\n\n # first required keys\n try:\n aDict[self.STATION_KEY] = self.station\n aDict[self.NETWORK_KEY] = self.network\n except NameError:\n print (\"Missing data error\")\n\n # second optional keys\n try:\n if self.channel != '':\n aDict[self.CHANNEL_KEY] = self.channel\n except:\n pass\n\n try:\n if self.location != '':\n aDict[self.LOCATION_KEY] = self.location\n except:\n pass\n\n return aDict", "title": "" }, { "docid": "b62db89bf032f040efc269b8bbf08aaf", "score": "0.6339664", "text": "def generate_dictionary(self):\n\n\n\t\tdictionary = {}\n\t\t#count = 0\n\t\t#for vsid in self.vs_ids[0:num_people]:\n\t\t#\tvariants = list(self.client.search_variants(vsid, call_set_ids= self.cs_ids,\n\t\t#\t\tstart=begin, end=stop, reference_name = num_chrom))\n\t\t#\ti = 0\n\t\t#\tfor v in variaints:\n\t\t#\t\tfor call in v.calls:\n\t\t#\t\t\ti += 1\n\t\t#\t\t\tdicitionary[call.call_set_id] = i\n\t\t#return (dictionary)\n\t\t#i = 0\n\t\t#for csid in self.cs_ids:\n\t\t#\ti += 1\n\t\t#\tdictionary[csid] = i\n\n\t\t#dictionary = {}\n\n\t\t#for variant_set_id in self.vs_ids:\n\t\tvariants = self.client.search_variants(self.vs_ids[0], call_set_ids= self.cs_ids[0:self.num_people],\n\t\t \t\tstart=self.start, end=self.end, reference_name = self.chrom )\n\n\t\ti = 0\n\t\tfor v in variants:\n\t\t\tfor call_inner in v.calls:\n\t\t\t\ti += 1\n\t\t\t\tdictionary[call_inner.call_set_id] = i\n\n\t\treturn (dictionary)", "title": "" }, { "docid": "389b472d330f501a91ca2498aa64017d", "score": "0.6335126", "text": "def as_dict(self) -> Dict[str, Any]:\n return {\n \"app_display_name\": self.app_display_name,\n \"app_id\": self.app_id,\n \"data\": self.data,\n \"device_display_name\": self.device_display_name,\n \"kind\": self.kind,\n \"lang\": self.lang,\n \"profile_tag\": self.profile_tag,\n \"pushkey\": self.pushkey,\n \"enabled\": self.enabled,\n \"device_id\": self.device_id,\n }", "title": "" }, { "docid": "0ffd64e3672bef76674010f3f36fd254", "score": "0.63319045", "text": "def state_dict(self) -> Dict[str, Any]:\n ...", "title": "" }, { "docid": "41e5b876d25ee6dda1d73921ba2fefc9", "score": "0.6330441", "text": "def read(self):\n\n return {self.name: {'value': self.value,\n 'timestamp': self.timestamp}}", "title": "" }, { "docid": "8458c919685a8f9f98418cc9d0a7b425", "score": "0.63301283", "text": "def epgdict(self):\n\n return {\n \"callsign\": self.dict[\"callsign\"],\n \"name\": self.dict[\"name\"],\n \"number\": self.number,\n \"id\": self.dict[\"origin_id\"],\n \"thumbnail\": self.thumbnail,\n \"listing\": [],\n }", "title": "" }, { "docid": "db6161892ee1dd35ce76df3105bb0288", "score": "0.63284093", "text": "def to_dictionary(self):\n final = {}\n attrib = ['size', 'x', 'y', 'id']\n for i in attrib:\n final[i] = getattr(self, i)\n return final", "title": "" }, { "docid": "bdf953c9e93c02f381f1d46af43a0ebe", "score": "0.6324076", "text": "def ToDictionary(self):\n w = DictionaryWriter(self)\n w.Write('annotation')\n w.Write('update_to_version')\n w.Write('custom_message')\n return w.Dictionary()", "title": "" }, { "docid": "d8f919c2e4c4c216bc74452ad4b38f48", "score": "0.6318973", "text": "def retrieve_properties(self):\n return {}", "title": "" }, { "docid": "df28b1a221cb4be152a86e7290c39fde", "score": "0.63189274", "text": "def asdict(self) -> Dict[str, Any]:\n return dataclasses.asdict(self)", "title": "" }, { "docid": "cb3652ca20680b7db624b92770e8a43a", "score": "0.63175184", "text": "def _interp_dict(self):\n i_dict = {}\n for section in self.sections():\n for item in self.items(section, raw=True):\n i_dict[section + \".\" + item[0]] = item[1] \n return i_dict", "title": "" }, { "docid": "49ba4d8697efe195bbb24b7ddd0fcd98", "score": "0.63133293", "text": "def _as_dict(self):\n d = {}\n for k, vals in self._store.values():\n d[k] = vals[0] if len(vals) == 1 else vals\n return d", "title": "" }, { "docid": "3a1ee8efdcd2bffdc866a7a7eb336347", "score": "0.6312363", "text": "def get_map(self):\n return_object = {}\n return_object[\"user_id\"] = self.user_id\n return_object[\"username\"] = self.username\n return_object[\"email\"] = self.email\n return_object[\"profile_picture\"] = self.profile_picture\n return_object[\"image_index\"] = self.image_index\n return_object[\"images_visited\"] = self.images_visited\n return_object[\"friends\"] = self.friends\n return_object[\"security_level\"] = self.security_level\n return_object[\"info\"] = self.info\n return_object[\"created_at\"] = self.created_at.__str__()\n return return_object", "title": "" }, { "docid": "91bc02f9c6549521367d2a118040f843", "score": "0.6308826", "text": "def to_dictionary(self):\n attrs = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n return {k: getattr(self, k) for k in attrs}", "title": "" }, { "docid": "46d1d820fa751ed1977ed905e66843df", "score": "0.63048875", "text": "def _toDict(self):\n\t\toutputDict = dict()\n\t\toutputDict[\"Name\"] = self.name\n\t\toutputDict[\"Description\"] = self.description\n\t\toutputDict[\"n Value\"] = self.nValue\n\t\t\n\t\tftDefs = dict()\n\t\tfor ftDef in self.filetypeDefinitions:\n\t\t\tftDefs[ftDef.name] = ftDef._toDict()\n\n\t\toutputDict[\"Filetype Definitions\"] = ftDefs\n\t\treturn outputDict", "title": "" }, { "docid": "1dbdf30708e2c918f07f85d532c5a354", "score": "0.6304459", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'limit') and self.limit is not None:\n _dict['limit'] = self.limit\n return _dict", "title": "" } ]
5e06fab16bb0d071c255737bfb52f140
Return pulls tags from a given username and repository ID.
[ { "docid": "01255111111eef23e1ba5ba8a38f9085", "score": "0.6062889", "text": "def pulls_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"pulls\", access_token)", "title": "" } ]
[ { "docid": "182350f0750dd0612243d00a837739d5", "score": "0.71279055", "text": "def tags_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"tags\", access_token)", "title": "" }, { "docid": "af2378e9a1fc72aa89a9e5790a8eb95b", "score": "0.6983423", "text": "def tags_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(repository_id, \"tags\", access_token)", "title": "" }, { "docid": "4905c932d66d264eddfa123374bd5846", "score": "0.6430474", "text": "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "title": "" }, { "docid": "f90977a0db08b93f20bef7121e01822d", "score": "0.63649845", "text": "def test_get_repositories_by_username_by_repo_slug_pullrequests_by_pull_request_id_statuses(self):\n pass", "title": "" }, { "docid": "b4976e40f01d2b92304630deb8e86c75", "score": "0.630853", "text": "def pulls_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"pulls\", access_token)", "title": "" }, { "docid": "095e2c73a385476887ac0f99191505e4", "score": "0.62459356", "text": "def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result", "title": "" }, { "docid": "d2bbf82dc23c6d9d6fbed9a249b0c15b", "score": "0.6162564", "text": "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "title": "" }, { "docid": "4d1c74f8b37b6d999a4e8f50b208f6a0", "score": "0.60822016", "text": "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "title": "" }, { "docid": "4e9a5ca43f0bc22e38b7dd20eeb80c6c", "score": "0.59965533", "text": "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "title": "" }, { "docid": "0c186bd9862873a363fae480bb4f38a9", "score": "0.58527094", "text": "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "title": "" }, { "docid": "7bd1173c0ab8a78f01eb0eb3b103b6db", "score": "0.57975936", "text": "def userlog_tags(self):\n url = (yield self.get_sitemap())['userlogs'] + '/tags'\n response = yield self._http_client.fetch(url)\n raise tornado.gen.Return(json.loads(response.body))", "title": "" }, { "docid": "46b742dacd110b59d8951fbcd93c7749", "score": "0.5736162", "text": "def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }", "title": "" }, { "docid": "f274ea95039878d251de9682ca315922", "score": "0.5723681", "text": "def tags(request):\n return Tag.objects.filter(user=request.user)", "title": "" }, { "docid": "84b6e65d8efd28a47576fad46c6713fe", "score": "0.57109725", "text": "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "title": "" }, { "docid": "dc32c904161af396b766673836efbc2c", "score": "0.57017225", "text": "def test_get_repositories_by_username_by_repo_slug_commit_by_node_statuses(self):\n pass", "title": "" }, { "docid": "f3c95bfd772f3ebad406d6810c0a462e", "score": "0.5649908", "text": "def by_user(user):\n return Tag.objects.all().filter(owner=user)", "title": "" }, { "docid": "1c7ba89b1bfc166e8329960302c2730c", "score": "0.5628508", "text": "def get_repositories(github_user):\n\n if not github_user:\n return [1, {\"message\": \"GitHub username missing\"}]\n else:\n\n # build Request object\n request = urllib2.Request(\"https://api.github.com/users/\"\n + str(github_user) + \"/repos\")\n request.get_method = lambda: 'GET'\n try:\n '''try to send the request to the GitHub API and\n create Python dictionary from JSON response'''\n repositories = urllib2.urlopen(request)\n repositories = json.loads(\"\\n\".join(repositories.readlines()))\n\n return [0, repositories]\n\n except urllib2.HTTPError as e:\n\n # return HTTP error and the message from the API\n return [1, {\"message\": str(e) + \": \"\n + json.loads('\\n'.join(e.readlines()))['message']}]", "title": "" }, { "docid": "6af12ab71f8075ec10fa5d9a622e71e8", "score": "0.5627698", "text": "def get_for(user):\n projects = Project.get_for(user)\n return Tag.objects.filter(\n project__in=list(projects)\n )", "title": "" }, { "docid": "6c6a1429bcece4b7e47ca2a016d0548b", "score": "0.5606637", "text": "def get_interests_each_member(self, username):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag FROM \" + ENV_DB + \".UserTags WHERE username='\" + username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n return set([i[0] for i in data])", "title": "" }, { "docid": "2bf3bfa3bcfe122def949ffcc36ca727", "score": "0.55482996", "text": "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "title": "" }, { "docid": "752eb7e00c8bc26c2db54819723b382b", "score": "0.553462", "text": "def find_tags(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n tags = []\n for tag, tag_id in [(t, ref_dict[t]) for t in repo.tags]:\n obj, obj_id = repo.repo[tag_id], None\n if isinstance(obj, Tag):\n _, obj_id = obj.object\n if isinstance(obj, Commit):\n obj_id = obj.id\n if commit.id == obj_id:\n tags.append((tag, obj))\n return tags", "title": "" }, { "docid": "e3f9970d0c83dae5ca9c16064364e73a", "score": "0.55273724", "text": "def test_get_repositories_by_username_by_repo_slug_commit_by_node_statuses_build_by_key(self):\n pass", "title": "" }, { "docid": "0cab6e5148bb4367b9cae035f486c9db", "score": "0.5513745", "text": "def get_runners_by_tags(self, tags, project_id):\n\n runners = []\n if tags:\n for tag in tags:\n url = f\"{self.server}/api/v4/projects/{project_id}/runners/all?tag_list={tag}&per_page=100\"\n runners += self.handle_pagination(url)\n else:\n url = f\"{self.server}/api/v4/runners/all?per_page=100\"\n runners = self.handle_pagination(url)\n return runners", "title": "" }, { "docid": "0f2c778de98f8ce6bb9422eaf1601545", "score": "0.5504753", "text": "def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list", "title": "" }, { "docid": "4b469c34e426a124de779b2677b0923c", "score": "0.54852825", "text": "def get_asset_tags(user_id):\n result = db.session.query(Tag).filter(\n NotificationAssetTag.user_id == user_id,\n NotificationAssetTag.tag_id == Tag.id\n ).all()\n return result if result else []", "title": "" }, { "docid": "c8974c8dccf010f73da0b235e8fdc8a3", "score": "0.54614776", "text": "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "title": "" }, { "docid": "78e9e0cc09df43be5e9ed7ff21965659", "score": "0.5450847", "text": "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "title": "" }, { "docid": "73017c6433db402a57e2965a4b4a6a8a", "score": "0.544119", "text": "def do_the_pulls(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = os.path.join(tmp_dir, \"{}_{}_pulls.txt\".format(repo_id, user_id)\n )\n\n # the first request for pull\n the_url = get_initial_url_pulls(user_id, repo_id)\n resp_obj = requests.get(the_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n\n # prase the initial request.\n rsp_json = json.loads(resp_obj.text)\n print(\"the len of resp is {}\".format(len(rsp_json)))\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n\n # subsequent requests for pull\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))", "title": "" }, { "docid": "c5a1c411aec45bd48d199a778f8ec908", "score": "0.5434369", "text": "def get_repos(github_id):\r\n\r\n url = 'https://api.github.com/users/{}/repos'.format(github_id)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n repo_list = []\r\n \r\n for data in todos:\r\n repo_list.append(data['name'])\r\n\r\n return repo_list", "title": "" }, { "docid": "07ba7f9d3d62f406ef68648c3c3c6bea", "score": "0.5430206", "text": "def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")", "title": "" }, { "docid": "fbb627625786afb6452bc6f7923f295e", "score": "0.5416489", "text": "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "title": "" }, { "docid": "5788fb21babe44d803548555844ba7d3", "score": "0.5408225", "text": "def read_tag(\n *,\n db: Session = Depends(get_db),\n id: int,\n current_user: DBUser = Depends(get_current_active_user),\n):\n tag = crud.tag.get(db_session=db, id=id)\n if not tag:\n raise HTTPException(status_code=404, detail=\"Tag not found\")\n if not crud.user.is_superuser(current_user) and (tag.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n return tag", "title": "" }, { "docid": "638389e68b0a893673c3b596944b2117", "score": "0.54075545", "text": "def pull(connection, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.pull()", "title": "" }, { "docid": "e1238912981afc2e0d4891dc3c139956", "score": "0.53965735", "text": "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "title": "" }, { "docid": "1eb9bebbe1458cd918cbab7970effdde", "score": "0.5380765", "text": "def getTagsUsingId(self,resourceId):\n response = requests.get('https://api.imagga.com/v1/tagging?content=%s' % resourceId,\n auth=(self.apikey, self.secret))\n #print ('printing response')\n #print (response.json())", "title": "" }, { "docid": "b206e2d721b97cae048d21064b4f893d", "score": "0.53694683", "text": "def get_tags(requests_session: object, project_id: str, server_id: str) -> tuple:\n\n tags_response = requests_session.get(\n ECS_ENDPOINT_URL + ECS_API_URIS.get('tags_get').format(project_id=project_id, server_id=server_id))\n status_code = tags_response.status_code\n tags = []\n if status_code == 200:\n tags = tags_response.json()['tags']\n message = \"success\"\n else:\n message = tags_response.content\n return tags, status_code, message", "title": "" }, { "docid": "f73f09b263443285a02a2f3cb3ebc98c", "score": "0.53600097", "text": "def AddRepositoryTags(prefix='', rid=True, date=True, revision=True,\n wrap=False):\n tags = []\n wrapper = '\"' if wrap else ''\n\n # Format print the '$' into the RCS tags in order prevent the tags from\n # being interpolated here.\n p4_id = '%s%sId:%s%s' % (wrapper, '$', '$', wrapper)\n p4_date = '%s%sDate:%s%s' % (wrapper, '$', '$', wrapper)\n p4_revision = '%s%sRevision:%s%s' % (wrapper, '$', '$', wrapper)\n if rid:\n tags.append('%s%s' % (prefix, p4_id))\n if date:\n tags.append('%s%s' % (prefix, p4_date))\n if revision:\n tags.append('%s%s' % (prefix, p4_revision))\n return tags", "title": "" }, { "docid": "90252b2912bbf6b4b4bb7f93c6cce00d", "score": "0.535491", "text": "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "title": "" }, { "docid": "7029e92a73c1da4e77582c9394d9010d", "score": "0.53532547", "text": "def get_usernames_from_tag(tag):\n try:\n tag_url = 'https://www.instagram.com/explore/tags/{0}/?__a=1'.format(tag)\n tag_res = requests.get(tag_url).json()\n\n nodes = tag_res['tag']['media']['nodes']\n\n new_urls = [node['code'] for node in nodes]\n new_req = [requests.get('https://www.instagram.com/p/{0}/?__a=1'.format(url)).json() for url in new_urls]\n\n usernames = [req['graphql']['shortcode_media']['owner']['username'] for req in new_req]\n\n return usernames\n\n except Exception as err:\n print('{0} - This account was not classified...'.format(str(err)))\n return []", "title": "" }, { "docid": "6ca6172c5f506422581c8b2740d13c2f", "score": "0.534942", "text": "def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)", "title": "" }, { "docid": "43a0f8b5a8397c6a713f197314ccaaae", "score": "0.53262275", "text": "def get_repository(self, user, repository):\n\n # With this request I can url to request the list of all user repositories.\n repos_url = requests.get('https://api.github.com/users/' + user, auth=self.authentication).json()['repos_url']\n\n # This request will obtain the list of all repositories.\n repositories_response = requests.get(repos_url, auth=self.authentication).json()\n\n # I __get_repo_url_by_name to obtain url of repository I want to use.\n commits_url = self.__get_repo_url_by_name(repository, repositories_response)\n\n # Request to obtain all commits to the repository.\n response_commits = requests.get(commits_url, auth=self.authentication).json() # all commits\n\n # Get url to final commit version of repository.\n actual_repo_url = response_commits[0]['commit']['tree']['url']\n\n # Get the list of all files in repository.\n actual_repo_response = requests.get(actual_repo_url+\"?recursive=1\", auth=self.authentication).json()\n\n # iterate over list of files in repository and find .py ones.\n for element in actual_repo_response['tree']:\n if element['path'].endswith('.py'):\n # Creation of File object.\n new_file = github.file.File(element['path'], self.__get_file_code(element['url']))\n self.files[element['path']] = new_file", "title": "" }, { "docid": "d423eaefe5c2d627f8447b937b707c88", "score": "0.53172404", "text": "def get_repo_prs(owner, repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/pulls'\n query_params = {'state': 'all'}\n return get_one_item_at_a_time(url, query_params, session)", "title": "" }, { "docid": "48f797ae981b6700200e8a6697f7748b", "score": "0.52883494", "text": "def pull(self, repo, tag):\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)", "title": "" }, { "docid": "833c861eef6235cae69713d7f628b162", "score": "0.5264566", "text": "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "title": "" }, { "docid": "833c861eef6235cae69713d7f628b162", "score": "0.5264566", "text": "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "title": "" }, { "docid": "62a067ea066eb3de7b8c4cb3075d9a3e", "score": "0.52637863", "text": "def get_tags(filter, api_site_parameter, page = 1, pagesize = 10, sort = 'popular'):\n path = \"tags\"\n \n query_filter = ')(Yb(vlSfU'\n \n results = __fetch_results(path, api_site_parameter, inname= filter, page = page, pagesize = pagesize, filter = query_filter, sort = sort)\n return results", "title": "" }, { "docid": "e74d4ed76e399ba277061a98895ad8fb", "score": "0.52289623", "text": "def fetch_repos(connection):\n\n try:\n response = connection.get_json('repository')\n except HTTPRequestError as ex:\n raise exception_from_http_error(ex) from ex\n\n result = response.get('result', [])\n return [Repository(connection, repo['rid'], data=repo) for repo in result]", "title": "" }, { "docid": "f7ad03934c05b16c69b2439a870e37df", "score": "0.5207001", "text": "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "title": "" }, { "docid": "149d2ac5a2486204c82d2d1ba4d6afd6", "score": "0.51926845", "text": "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "title": "" }, { "docid": "cabbce04a8c843c1d2329e6a84802516", "score": "0.5181648", "text": "def get_tags(x):\n poster = x['_source']['user']['id']\n for user in x['_source']['users_in_photo']:\n if user['user']['id'] != poster:\n yield (poster, user['user']['id'])", "title": "" }, { "docid": "af6c6eea66787d4e3b57cc27d386646b", "score": "0.5174223", "text": "def find_by_id(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.get(path, params, **options)", "title": "" }, { "docid": "edeb6aa8f904ec2534c9bd0802e30cfb", "score": "0.51695865", "text": "def pull_image_then_retag(docker_client=None, repository=None, tag='latest', retag_repository=None):\n try:\n Logger.debug(f\"ready to pull the image {repository}:{tag}\")\n image = docker_client.images.pull(repository, tag)\n Logger.info(f\"pull the image {image.attrs['RepoTags'][0]} completed,ready to re-tag.\")\n return image.tag(retag_repository, tag)\n except docker.errors.APIError as error:\n Logger.error(error)\n return False", "title": "" }, { "docid": "588c6c43516b1467782d7057d3d41c93", "score": "0.5154211", "text": "def repository_by_name(self, username, repository_name, access_token=None):\n url = \"{0}/repos/{1}/{2}{3}\"\n access_token = self.get_token(access_token)\n token_arg = ''\n\n if access_token != '':\n token_arg = \"?access_token={}\".format(access_token)\n\n response = requests.get(\n url.format(\n self.ROOT_API_URL, username, repository_name, token_arg)\n )\n\n self._check_common_status_code(response, access_token)\n\n if response.status_code == requests.codes.not_found:\n raise RepositoryNameNotFoundError(\n {'repository_name': repository_name, 'username': username})\n\n return response.json()", "title": "" }, { "docid": "1660086234ccecf5fb276a1e8ef52f32", "score": "0.51532847", "text": "def get_tags(directory=None):\n out = check_output('git tag -l', shell=True, cwd=directory)\n return [l.strip() for l in out.splitlines()]", "title": "" }, { "docid": "6c54d9b5927e1ccc3f9dee33a361335b", "score": "0.5152293", "text": "def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)", "title": "" }, { "docid": "5824ec83cf0613bdb0db1866a710be7d", "score": "0.51511586", "text": "def get_recent_tags(self, auth_userid, owner_userid, limit):\n\t\ttry:\n\t\t\tif auth_userid:\n\t\t\t\tauth_userid = validation.cast_integer(auth_userid, 'auth_userid')\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\tlimit = validation.cast_integer(limit, 'limit')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\tlimit_clause = \"\"\n\t\tif limit:\n\t\t\tlimit_clause = \"LIMIT %s\" % limit\n\n\t\tif not auth_userid:\n\t\t\td = Deferred()\n\t\t\td.callback([])\n\t\t\treturn d\n\t\telse:\n\t\t\tquery = \"\"\"\n\t\t\t\tSELECT\n\t\t\t\t\tt1.tag_name,\n\t\t\t\t\tCOUNT(t2.image_id) AS cnt_images,\n\t\t\t\t\tMAX(t1.date_added) as last_used\n\t\t\t\tFROM\n\t\t\t\t\tuser_image_tags t1\n\t\t\t\t\tJOIN user_images t2 using(image_id)\n\t\t\t\tWHERE\n\t\t\t\t\tt2.owner_userid = %%s\n\t\t\t\tGROUP BY\n\t\t\t\t\tt1.tag_name\n\t\t\t\tORDER BY\n\t\t\t\t\tlast_used desc\n\t\t\t\t%s\t\t\t\t\n\t\t\t\t\"\"\" % limit_clause\n\t\t\n\t\t\treturn self.app.db.query(query, (auth_userid, ))", "title": "" }, { "docid": "28723bef635da223312a952f6ae3c9c6", "score": "0.5146321", "text": "def tags(self) -> List[str]:\n if \"RepoTags\" in self.attrs:\n return [tag for tag in self.attrs[\"RepoTags\"] if tag != \"<none>:<none>\"]\n return []", "title": "" }, { "docid": "5477074b9e6ec32f424c6d0bef54e41b", "score": "0.51358074", "text": "def tags(self, uuid):\n return self._backend.tags(uuid)", "title": "" }, { "docid": "2e035a45991c33e3865029a39c9b6cb0", "score": "0.5132972", "text": "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "title": "" }, { "docid": "5f3b438c43ac9f590a2bd0b9e4933383", "score": "0.51197255", "text": "def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)", "title": "" }, { "docid": "51f5550227e3369451f2f8dab0ba23b2", "score": "0.50912994", "text": "def get_pull_requests():\n pull_requests = []\n url_base = f\"https://github.com/{GITHUB_OWNER}/{GITHUB_REPO}/pull/\"\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n pulls = repo.get_pulls(base=\"main\", state=\"closed\")\n last_release_date = repo.get_latest_release().published_at\n for pull in pulls:\n if not pull.draft and pull.closed_at > last_release_date and pull.merged:\n log_line = f\"* {pull.title} [#{pull.number}]({url_base}{pull.number})\"\n pull_requests.append(log_line)\n return pull_requests", "title": "" }, { "docid": "91f334879584adf9cbce103c5cf9fdb0", "score": "0.509017", "text": "def get_trunk_tag(case_dict, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"get_trunk_tag\")\n\n tag = 0\n svn_repo = \"{0}/trunk_tags\".format(case_dict[\"svn_repo_url\"])\n cmd = [\"svn\", \"list\", svn_repo, \"--username\", username, \"--password\", password]\n result = \"\"\n try:\n result = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n \"list\",\n svn_repo,\n \"--username\",\n username,\n \"--password\",\n \"******\",\n ]\n msg = _call_template.substitute(\n function=\"get_trunk_tag\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n\n if result:\n last_tag = [i for i in result.split(\"\\n\") if i][-1]\n last_tag = last_tag[:-1].split(\"_\")[-1]\n tag = int(last_tag.lstrip(\"0\"))\n\n return tag", "title": "" }, { "docid": "776a7c112fa876eaa7775a22c736f2ec", "score": "0.50711644", "text": "def get_image_tags(self, owner_userid, image_id, tag_type='owner'):\n\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.oneof(tag_type, ('owner', 'public', 'all'), 'tag_type')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tif tag_type == 'owner':\n\t\t\tinclude_clause = \"AND tag_userid = t2.owner_userid\"\n\t\telif tag_type == 'public':\n\t\t\tinclude_clause = \"AND tag_userid != t2.owner_userid\"\n\t\telif tag_type == 'all':\n\t\t\tinclude_clause = \"\"\n\n\t\treturn self.app.db.query(\"\"\"\n\t\t\t\tSELECT\n\t\t\t\t\ttag_name,\n\t\t\t\t\tt3.userid AS tag_userid,\n\t\t\t\t\ttag_userid,\n\t\t\t\t\tdate_added\n\t\t\t\tFROM\n\t\t\t\t\tuser_image_tags t1\n\t\t\t\t\tJOIN user_images t2 USING (image_id)\n\t\t\t\t\tJOIN users t3 ON (t1.tag_userid = t3.userid)\n\t\t\t\tWHERE\n\t\t\t\t\timage_id = %%s\n\t\t\t\t\t%s\n\t\t\t\tORDER BY tag_name asc\n\t\t\t\t\"\"\" % include_clause, (image_id, ))", "title": "" }, { "docid": "8ab18cdd9553273c1eee18fa8df8ae16", "score": "0.5070949", "text": "def listTags(self, authenticationToken):\r\n pass", "title": "" }, { "docid": "6b33e96f52d20e7d8a6a0fcfeb28fdea", "score": "0.50558114", "text": "def show_tags(config, args):\n for item in lib.input_json_lines():\n yield config.repo.tag(item)", "title": "" }, { "docid": "bb0a7b02214a994349929738e8cb7a4a", "score": "0.50509053", "text": "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "title": "" }, { "docid": "13789fd4c58419798f51483ff2a04515", "score": "0.5050538", "text": "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "title": "" }, { "docid": "de78c45394fc4fb8dce06c00bf61b7a9", "score": "0.50461024", "text": "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "title": "" }, { "docid": "c2d86aa88298e717c4027ab2e7e9bba9", "score": "0.50459886", "text": "def tagging(self, text):\n\n response = self._send_request(\"tagging\", dict(text=text))\n return response[self._layer]['tags']", "title": "" }, { "docid": "ae546f3284dab00eacc941224d7d40b4", "score": "0.5019375", "text": "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "title": "" }, { "docid": "ae546f3284dab00eacc941224d7d40b4", "score": "0.5019375", "text": "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "title": "" }, { "docid": "7d7adc4bb042167b0b59064a7b35b177", "score": "0.5015816", "text": "def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))", "title": "" }, { "docid": "a206e3c5935c9fe24e5d6e077a6be2de", "score": "0.50145274", "text": "def multi_get_image_tags(self, owner_userid, image_ids, tag_type='owner'):\n\t\tif owner_userid:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\tif tag_type == 'owner':\n\t\t\tinclude_clause = \"AND tag_userid = t2.owner_userid\"\n\t\telif tag_type == 'public':\n\t\t\tinclude_clause = \"AND tag_userid != t2.owner_userid\"\n\t\telif tag_type == 'all':\n\t\t\tinclude_clause = \"\"\n\t\t\t\n\t\timage_list = []\n\t\tfor id in image_ids:\n\t\t\timage_list.append(\"%s\" % id)\n\n\t\towner_clause = \"\"\n\t\tif owner_userid:\n\t\t\towner_clause = \"AND t2.owner_userid = %(owner_userid)s\"\n\t\tquery_args = {'owner_userid': owner_userid}\n\t\t\t\n\t\treturn self.app.db.query(\"\"\"\n\t\t\t\tSELECT\n\t\t\t\t\ttag_name,\n\t\t\t\t\tcount(*) AS cnt_images\n\t\t\t\tFROM\n\t\t\t\t\tuser_image_tags t1\n\t\t\t\t\tJOIN user_images t2 USING (image_id)\n\t\t\t\tWHERE\n\t\t\t\t\tt1.image_id in (%s)\n\t\t\t\t\t%s\n\t\t\t\t\t%s\n\t\t\t\tGROUP BY\n\t\t\t\t\ttag_name\n\t\t\t\tORDER BY\n\t\t\t\t\ttag_name asc\n\t\t\t\t\"\"\" % (','.join(image_list), owner_clause, include_clause), query_args)", "title": "" }, { "docid": "42179cf731a5d0eaeb6b6d70c87eb7bc", "score": "0.4983276", "text": "def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])", "title": "" }, { "docid": "24bdf46da0d1bc15ef4d9313ed6ae4fc", "score": "0.4979126", "text": "def get_tagname(tags, tagid):\n for tag in tags:\n if tag['id'] == tagid:\n return tag['name']", "title": "" }, { "docid": "174f217102344e240119315eb5fd9e14", "score": "0.49761644", "text": "def test_pull_multiple_tags(self):\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.public,\n 'remotetype': 'dockerv2'\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.public}\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n\n # Now reppull with a different tag for the same image\n newtag = self.public.replace('latest', '1')\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': newtag,\n 'remotetype': 'dockerv2'\n }\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Track through transistions\n state = self.time_wait(id)\n # Requery the original record\n mrec = self.images.find_one(q)\n self.assertIn(self.public, mrec['tag'])\n self.assertIn(newtag, mrec['tag'])", "title": "" }, { "docid": "7be6e8a3e8dd85ffbb01fa7be27a1c62", "score": "0.4970902", "text": "def tags(table='None',record_id=None):\n\n return LOAD('plugin_wiki','tags',\n args=(table,record_id or 0),ajax=True)", "title": "" }, { "docid": "1b4cb501ec5d75429efccc157c9f2346", "score": "0.4963289", "text": "def get_remote_tags(self, remote: str):\n\n return tuple(\n line.split(\"/\", 1)[-1]\n for line in self.obj.git.ls_remote(\n \"--tags\", \"--refs\", \"--sort=v:refname\", remote\n ).splitlines()\n )", "title": "" }, { "docid": "c61fe6ed960da7f0a4f71882605855b6", "score": "0.49454686", "text": "def listTagsByNotebook(self, authenticationToken, notebookGuid):\r\n pass", "title": "" }, { "docid": "c5fff7450192140b8230e0d1776f9f11", "score": "0.4939746", "text": "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "title": "" }, { "docid": "ba7e863fec916a68e3228d153b802eca", "score": "0.49353454", "text": "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "title": "" }, { "docid": "9e9cf13f12c45123e613124c3c245b25", "score": "0.49340457", "text": "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "title": "" }, { "docid": "64f9102b722fa629c1cc7818aa1127b4", "score": "0.4927678", "text": "def get_all(user_id, tag_text=None) -> Query:\n query = Snippet.query.filter_by(user_id=user_id)\n if tag_text:\n tag = Tag.query.filter_by(text=tag_text).first()\n tag_id = tag and tag.id\n query = query.join(tagged_snippets).filter_by(tag_id=tag_id)\n return query.order_by(Snippet.year.desc(), Snippet.week.desc())", "title": "" }, { "docid": "1b2e752fd8bde01654d781fcfc4daca9", "score": "0.49262363", "text": "def find(user_id):\n return repository.find(user_id)", "title": "" }, { "docid": "9a95bcb3ba51973f4ca97a635934fb6e", "score": "0.49206075", "text": "def call(self, username):\n profile_response = requests.get(GIT_HUB_API_USER_URL.format(username))\n profile_response.raise_for_status()\n repos_response = requests.get(GIT_HUB_API_REPOS_URL.format(username))\n repos_response.raise_for_status()\n starred_response = requests.get(GIT_HUB_API_STARRED_URL.format(username))\n starred_response.raise_for_status()\n languages, topics = [], []\n stars_received_count, open_issues_count, total_size, = 0, 0, 0\n original_repo_count, forked_repo_count = 0, 0\n for repo in repos_response.json():\n stars_received_count += repo['stargazers_count']\n open_issues_count += repo['open_issues_count']\n total_size += repo['size']\n if repo['language']:\n languages.append(repo['language'].lower())\n # TODO:\n # topics.append(repo['topics'])\n if repo['fork']:\n forked_repo_count += 1\n else:\n original_repo_count += 1\n languages = list(set(languages))\n # TODO:\n # topics = list(set(topics))\n git_hub_profile = {\n 'public_repos_count': {\n 'original': original_repo_count,\n 'forked': forked_repo_count,\n },\n 'followers_count': profile_response.json()['followers'],\n 'stars_received_count': stars_received_count,\n 'stars_given_count': len(starred_response.json()),\n 'open_issues_count': open_issues_count,\n 'original_repo_commits_count': -1, # TODO\n 'size_of_account': total_size,\n 'languages_used': {\n 'count': len(languages),\n 'languages': languages\n },\n 'repo_topics': {\n 'count': -1,\n 'topics': []\n },\n }\n return git_hub_profile", "title": "" }, { "docid": "7707c5ff5c71d59cf946198eb381e552", "score": "0.49134362", "text": "def tags():", "title": "" }, { "docid": "3476c2e8ff7c584a0eccefc698f2f95d", "score": "0.49111128", "text": "def read_owners_of_dog(self, dog_id):\n users = list()\n owners = self.__dog_owner_repository.search(f\"dog_id=={dog_id}\")\n for dog_owner in owners.to_list():\n try:\n user = self.read(dog_owner.owner_id)\n users.append(user)\n except NotFoundError:\n pass\n return users", "title": "" }, { "docid": "8942131ca248080f5b1413c5bd0516a4", "score": "0.4899881", "text": "def get_image_tags(self):\n current_images = self.images()\n tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}\n return tags", "title": "" }, { "docid": "94a4e9f1b5b7140db082a0d697256228", "score": "0.48947862", "text": "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "title": "" }, { "docid": "a5aa92c49db754d2488e02876f271f9b", "score": "0.4894571", "text": "def get_tag_list(self, auth_userid, owner_userid, limit, offset, sort, count_flag):\n\t\tvalid_sorts = {\n\t\t\t'title-asc': (\"t1.tag_name\", \"asc\"),\n\t\t\t'title-desc': (\"t1.tag_name\", \"desc\"),\n\t\t\t'date_asc': (\"date_added\", \"asc\"),\n\t\t\t'date_desc': (\"date_added\", \"desc\"),\n\t\t\t'count-asc': (\"cnt_images\", \"asc\"),\n\t\t\t'count-desc': (\"cnt_images\", \"desc\")\n\t\t}\n\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\tif auth_userid:\n\t\t\t\tauth_userid = validation.cast_integer(auth_userid, 'auth_userid')\n\t\t\tlimit = validation.cast_integer(limit, 'limit')\n\t\t\toffset = validation.cast_integer(offset, 'offset')\n\t\t\tvalidation.oneof(sort, valid_sorts.keys(), 'sort')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tselect = [\n\t\t\t\"t1.tag_name\",\n\t\t\t\"MAX(t1.date_added) AS date_added\",\n\t\t\t\"count(t2.image_id) AS cnt_images\"\n\t\t]\n\t\tjoins = [\n\t\t\t\"user_image_tags t1\",\n\t\t\t\"LEFT JOIN user_images t2 USING (image_id)\"\n\t\t]\n\n\t\twhere = [\n\t\t\t\"t2.owner_userid = %(owner_userid)s\",\n\t\t\t\"zoto_user_can_view_media(t2.owner_userid, t1.image_id, %(auth_userid)s)\"\n\t\t]\n\t\tquery_args = {\n\t\t\t'owner_userid': owner_userid,\n\t\t\t'auth_userid': auth_userid\n\t\t}\n\n\t\tlimit_sql = \"\"\n\t\toffset_sql = \"\"\n\t\torder_by_sql = \"\"\n\t\tgroup_by_sql = \"\"\n\t\t\n\t\t#if we only need the count...\n\t\tif count_flag == True:\n\t\t\tselect = [\n\t\t\t\t\"count(distinct(t1.tag_name)) AS count\"\n\t\t\t]\n\t\telse:\n\t\t\torder_by_sql = \"ORDER BY %s %s\" % (valid_sorts[sort][0], valid_sorts[sort][1])\n\t\t\tgroup_by_sql = \"GROUP BY t1.tag_name\"\n\t\t\tif limit:\n\t\t\t\tlimit_sql = \"LIMIT %s\" % limit\n\n\t\t\tif offset:\n\t\t\t\toffset_sql = \"OFFSET %s\" % offset\n\t\t\n\t\tquery = \"\"\"\n\t\t\tSELECT\n\t\t\t\t%s\n\t\t\tFROM\n\t\t\t\t%s\n\t\t\tWHERE\n\t\t\t\t%s\n\t\t\t%s -- order_by\n\t\t\t%s -- group_by\n\t\t\t%s -- limit\n\t\t\t%s -- offset\n\t\t\"\"\" % (\",\\n \".join(select), \"\\n\".join(joins), \" AND\\n\".join(where), group_by_sql, order_by_sql, limit_sql, offset_sql)\n\t\t\t\n\t\tself.log.debug(\"*\"*80)\n\t\tself.log.debug(query)\n\t\t\t\n\t\treturn self.app.db.query(query, query_args)", "title": "" }, { "docid": "a644a524bdea23256db2a2b65f39cc1f", "score": "0.48886272", "text": "async def refs(self, user, repo):\n ref_types = (\"branches\", \"tags\")\n ref_data = [None, None]\n\n for i, ref_type in enumerate(ref_types):\n with self.catch_client_error():\n response = await getattr(self.github_client, \"get_%s\" % ref_type)(\n user, repo\n )\n ref_data[i] = json.loads(response_text(response))\n\n return ref_data", "title": "" }, { "docid": "a288b2d11c5cef690c4048f8f6453136", "score": "0.48829398", "text": "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "title": "" }, { "docid": "bee9b5747aa36b94f49915f8667ebb23", "score": "0.4882586", "text": "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "title": "" }, { "docid": "4605939f6adaed043ade66c84b2326ce", "score": "0.4880918", "text": "def repo(self, user, repo):\r\n return repositories.Repo(self, user, repo)", "title": "" }, { "docid": "32c7716f4d7ce86919c230638ccca6bc", "score": "0.48716825", "text": "def _list_tags(self, expression):\n try:\n for tag in self.dockerioapi.get_tags(expression):\n Msg().out(tag)\n return self.STATUS_OK\n except (KeyError, TypeError, ValueError):\n return self.STATUS_ERROR", "title": "" }, { "docid": "ba5468f07084f174652508de8429f5ca", "score": "0.48701033", "text": "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "title": "" }, { "docid": "f7d0da44d52f12adf25c1415649b7fb8", "score": "0.48691803", "text": "def labels_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"labels\", access_token)", "title": "" }, { "docid": "973bcd4f9d5162829978872c1d170cbe", "score": "0.48653847", "text": "def get_repo(repo_id):\n if repo_id == \"orphans\":\n pkgs = Database().db.get_orphans()\n else:\n pkgs = Database().db.get_repo_pkgs(repo_id)\n return render_template(\"repo.html\", \n title=\" - \"+repo_id,\n repos=Database().db.get_repos_names(),\n pkgs=pkgs,\n repo=repo_id)", "title": "" }, { "docid": "5fb4521cf7c0601d598229cdc58e9d77", "score": "0.48642635", "text": "def get_repositories(self) -> None:\n\n self.log.info(\"Fetching repositories for %s\", self.name)\n\n catalog = self.raw_client.get_catalog().json()\n self.log.info(\"Found the following repositories in registry %s:\", self.name)\n for repo in catalog['repositories']:\n tags = self.raw_client.get_tags(repo).json()['tags']\n if tags is None:\n tags = []\n self.log.debug(\"\\t%s with %s tags\", repo, len(tags))\n self.repositories[repo] = Repository(name=repo, registry=self, tags=tags)\n self.log.info(self.repositories[repo])", "title": "" }, { "docid": "5538bafa51108f03442978fe8bc9bd73", "score": "0.48626122", "text": "def get_pulls(self):\n url = self.base_url + 'pulls'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "title": "" } ]
0be3860b5d6da42b962f0c7443225204
Constructor for Datacenter data structure. self.name > str self.clusters > list(Cluster)
[ { "docid": "216bee2b06cd3fac63d0b176ae82f434", "score": "0.7706204", "text": "def __init__(self, name, cluster_dict):\n\n self.name = name\n self.clusters = cluster_dict", "title": "" } ]
[ { "docid": "64611b4547d39b69e6d2ea2437c484bc", "score": "0.682774", "text": "def __init__(self,\r\n X=None,\r\n y=None,\r\n AttributeNames=None,\r\n classNames=None,\r\n N=None,\r\n M=None,\r\n C=None,\r\n n_components = 7,\r\n covariance_type = 'full',\r\n n_init = 10,\r\n tolerance = 1e-6):\r\n print('Clustering object initialized')\r\n self.X = X\r\n self.y = y\r\n self.AttributeNames = AttributeNames\r\n self.classNames = classNames\r\n self.N = N\r\n self.M = M\r\n self.C = C\r\n self.K = n_components\r\n self.cov_type = covariance_type\r\n self.reps = n_init\r\n self.tol = tolerance", "title": "" }, { "docid": "c1a67a97bad779008c80a50a5225683c", "score": "0.6752775", "text": "def __init__(self,\n cluster=None,\n container=None,\n name=None,\n mtype=None,\n ):\n\n # Initialize members of the class\n self.cluster = cluster\n self.container = container\n self.name = name\n self.mtype = mtype", "title": "" }, { "docid": "cd3db352b2077fc8f9e43e1c2037e5d9", "score": "0.669899", "text": "def __init__(self, clusters):\n\t\tself.clusters_to_words, self.words_to_clusters = self.getClusterData(clusters)", "title": "" }, { "docid": "74b84157e19757c26b76a140292a6426", "score": "0.65846175", "text": "def __init__(self,\r\n X=None,\r\n y=None,\r\n AttributeNames=None,\r\n classNames=None,\r\n N=None,\r\n M=None,\r\n C=None,\r\n max_cluster=4,\r\n method='single',\r\n metric='euclidean'):\r\n print('Clustering object initialized')\r\n self.X = X\r\n self.y = y\r\n self.AttributeNames = AttributeNames\r\n self.classNames = classNames\r\n self.N = N\r\n self.M = M\r\n self.C = C\r\n self.method = method\r\n self.metric = metric\r\n self.max_cluster = max_cluster\r\n self.Z = linkage(X, method=self.method, metric=self.metric)", "title": "" }, { "docid": "ff37107510d8061caaf0996344588ad4", "score": "0.6495595", "text": "def __init__(self, admin_creds=None,\n mgmt_vip_addr=None,\n name=None):\n if not name:\n name = \"cluster\"\n self.name = name\n self._mgmt_vip_addr = mgmt_vip_addr\n # For now, don't accept a cluster without a management VIP.\n # We may want to relax this in the future, though.\n if mgmt_vip_addr is None:\n raise ValueError(\"Cluster %r has no management VIP\" % name)\n # Credentials\n self._admin_creds = admin_creds\n self._util = None\n\n self._lock = threading.Lock()", "title": "" }, { "docid": "640b9982eaba6853a7d3a7e0f1987a18", "score": "0.6460341", "text": "def __init__(self, n_clusters=120, *args, **kwargs):\n self.engine = KMeans(n_clusters=n_clusters, *args, **kwargs)\n self.method='kmeans'", "title": "" }, { "docid": "75164bb6ffdf6d005f6b98c86fd10b64", "score": "0.6346859", "text": "def __init__(self, dataset, cluster_assignments):\n super(ClusteredDataSet, self).__init__(dataset.get_data_frame(), \n dataset.get_labels())\n self.cluster_assignments = cluster_assignments", "title": "" }, { "docid": "46fa5af05e057a6c01a776acb7cbbb94", "score": "0.6224637", "text": "def __init__(self,\n primary_cluster_id=None,\n primary_cluster_user_sid=None,\n primary_cluster_username=None,\n ):\n\n # Initialize members of the class\n self.primary_cluster_id = primary_cluster_id\n self.primary_cluster_user_sid = primary_cluster_user_sid\n self.primary_cluster_username = primary_cluster_username", "title": "" }, { "docid": "0b7c1a333cdabf35af51bf30059ec79d", "score": "0.6219635", "text": "def __init__(self, data, n_clusters=2, min_coherence = 0.9):\n\n assert data is not None, 'Empty data'\n self._data = data\n (self._I, self._J) = self._data.shape\n self._n_clusters = n_clusters\n self._biclusters = list()\n self._objective_function = 0\n self._min_coherence = min_coherence", "title": "" }, { "docid": "910274e5184372a8fde5f25e4a4dafd3", "score": "0.62175065", "text": "def __init__(self, clustering_file):\n self.clustering_file = clustering_file", "title": "" }, { "docid": "ced19e330afd687e3070ccadd3b25fb7", "score": "0.6144237", "text": "def __init__(self,path,idx,clustering_method):\n CreateData.__init__(self,path,idx)\n self.nb_cluster = None\n self.ClusterMethod = clustering_method\n self.cl = None\n self.y_pred = None\n self.cluster_labels = None", "title": "" }, { "docid": "6edc4af305a56e6e9ac7de1ed3857118", "score": "0.61391073", "text": "def clusters_(self):\n pass", "title": "" }, { "docid": "487cab2573f742554b42861c89d1f36f", "score": "0.6122735", "text": "def __init__(self):\n self._cluster_list = []\n self._client_list = []", "title": "" }, { "docid": "e675f3a25353a2aaf7b22a4fa7baa576", "score": "0.60944307", "text": "def cluster(self):\n # TODO", "title": "" }, { "docid": "5dba6910b99a007c5b87fa7a34cbca9c", "score": "0.608139", "text": "def identify_clusters(self, data):\n err = \"Please create a new subclass and implement this method\"\n raise NotImplementedError(err)", "title": "" }, { "docid": "d47a8a1815551431b3926540f4247d3c", "score": "0.6058577", "text": "def __init__(self, cluster_cache_dir, config):\n self.hosts_to_id = ImmutableDictionary({})\n self.components_by_key = ImmutableDictionary({})\n self.hostname = hostname.hostname(config)\n self.current_host_ids_to_cluster = {}\n self.cluster_local_components = {}\n self.cluster_host_info = None\n self.component_version_map = {}\n super(ClusterTopologyCache, self).__init__(cluster_cache_dir)", "title": "" }, { "docid": "8faf1ab990b7ab65bbc3571b398c3a53", "score": "0.6056795", "text": "def __init__(self):\n data = pickle.load(open('vizuka/data/models/clusterizer.pkl', 'rb'))\n self.xs, self.engine = data # self.engine is here a collection of labels\n self.kdtree = cKDTree(self.xs)", "title": "" }, { "docid": "39696f6525fbe953244348545859bc29", "score": "0.605466", "text": "def __init__(self,\n id=None,\n times=None,\n nodes=None,\n active_links=None,\n links=None,\n clusters=None\n ):\n super().__init__(id, times, nodes, active_links, links)\n self.clusters = clusters", "title": "" }, { "docid": "650fa8260d9ddd08ec2ccfaa5a518159", "score": "0.60031825", "text": "def __init__(self, n_clusters,\n dim_subspaces=1):\n # type: (int, int) -> None\n self._dim_subspaces = dim_subspaces\n self._max_iter = 1000\n self._k = n_clusters", "title": "" }, { "docid": "d15702bffa0a1b402c90de340a7a0930", "score": "0.6000481", "text": "def __init__(self, array: np.ndarray, k: int = 3):\n self.points = array\n self.clusters = None\n self.k = k\n self.initialize_clusters()", "title": "" }, { "docid": "b2e66b9d546f9a0cc81480d0562805b7", "score": "0.5987528", "text": "def __init__(self, cluster_name=None, current=None, db_name=None, id=None, index_name=None, jdbc_url=None, my_sql_db_name=None, password=None, type=None, username=None):\n\n self._cluster_name = None\n self._current = None\n self._db_name = None\n self._id = None\n self._index_name = None\n self._jdbc_url = None\n self._my_sql_db_name = None\n self._password = None\n self._type = None\n self._username = None\n\n if cluster_name is not None:\n self.cluster_name = cluster_name\n if current is not None:\n self.current = current\n if db_name is not None:\n self.db_name = db_name\n if id is not None:\n self.id = id\n if index_name is not None:\n self.index_name = index_name\n if jdbc_url is not None:\n self.jdbc_url = jdbc_url\n if my_sql_db_name is not None:\n self.my_sql_db_name = my_sql_db_name\n if password is not None:\n self.password = password\n if type is not None:\n self.type = type\n if username is not None:\n self.username = username", "title": "" }, { "docid": "9079009727c1da5fdfa6f9416ed9f6f4", "score": "0.5942966", "text": "def __init__(self, prim, clus, u, clus_dominant):\n self.prim = prim\n self.cluster = clus\n self.u = u\n self.dominant_cluster = clus_dominant\n # print(self.cluster)\n # print(self.dominant_cluster)", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940165", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940165", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940165", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "9751791d2bc7377d211c010ce9cc1280", "score": "0.5940099", "text": "def cluster(self, value):\n Struct._check_type('cluster', value, Cluster)\n self._cluster = value", "title": "" }, { "docid": "dc2ee97c0ee7268013dd48e4461509c0", "score": "0.593609", "text": "def __init__(self, cluster_file: str | PathLike, /) -> None:\n self.cluster_file = str(cluster_file)\n self._gcf_dict = self._parse_gcf(self.cluster_file)\n self._gcf_list = list(self._gcf_dict.values())", "title": "" }, { "docid": "f44b248e544fa14b2eaceaeccd3f6a59", "score": "0.5865208", "text": "def get_by_data(self, data):\n\n return OmnistackCluster(self._connection, self._client, data)", "title": "" }, { "docid": "17cf6fa2ad2a38a388efca0802c2b48f", "score": "0.5855749", "text": "def initialize_clusters(self):\n self.clusters = [HierarchicalCluster(self.points[i, :]) for i in range(self.points.shape[0])]", "title": "" }, { "docid": "f70f682bb55f7d66694b82ae6ed300c9", "score": "0.5854909", "text": "def __init__(self,sites,boundaries,N,inhomogeneity=0,box=None,center=None,radius=None,perimeter=None,perimeterToArea=None):\n self.index = GetNewIndexCluster()\n \"\"\"\n Positions of every site in the cluster.\n \"\"\"\n self.sites = SiteList(sites)\n \"\"\"\n All boundaries of the cluster.\n \"\"\"\n self.boundaries = SortedList(boundaries)\n \"\"\"\n Size of the whole system consisting all clusters.\n \"\"\"\n self.N = N\n \"\"\"\n Inhomogeneity of the cluster.\n \"\"\"\n self.inhomogeneity = inhomogeneity\n \"\"\"\n Box of the cluster with periodic boundary conditions.\n \"\"\"\n if box is None:\n if len(sites) == 1:\n self.box = BoxWithPBC([[self.sites[0,0], self.sites[0,0]], \\\n [self.sites[0,1], self.sites[0,1]]], N)\n else:\n self.box = BoxWithPBC([[self.sites[:,0].min(), self.sites[:,0].max()], \\\n [self.sites[:,1].min(), self.sites[:,1].max()]], N)\n else:\n self.box = box\n \"\"\"\n center and radius for dealing with boundary boxes and overlapping clusters.\n\n Position of the center of the cluster.\n \"\"\"\n if center is None:\n self.center = numpy.average(self.sites,axis=0)\n else:\n self.center = center\n \"\"\"\n Maximum size of the cluster.\n \"\"\"\n if radius is None:\n if len(sites) < 3:\n self.radius = len(sites)/2.\n else:\n self.radius = len(sites)\n else:\n self.radius = radius\n \"\"\"\n The perimeter of the cluster.\n \"\"\"\n self.perimeter = perimeter\n \"\"\"\n The ratio of perimeter and area of the cluster.\n \"\"\"\n self.perimeterToArea = perimeterToArea \n \"\"\"\n boundarySites only keep the record of sites near the boundaries.\n Initially they are the same as the sites, but RemoveInternalSites\n method will get rid of some of them to speed up computation.\n \"\"\"\n self.boundarySites = self.sites.copy()\n \"\"\"\n last_checked_size for keeping record of how often you remove internal sites.\n \"\"\"\n self.last_checked_size = len(sites)\n \"\"\"\n Record the global constant of CORESIZE.\n \"\"\"\n self.CORESIZE = CORESIZE", "title": "" }, { "docid": "35ea0f6cfc8486e6d80b8024660b6c1d", "score": "0.58543026", "text": "def __init__(self, results_dir, clusters, verbose = False):\n self.results_dir = results_dir\n self.clusters = clusters\n self.verbose = verbose", "title": "" }, { "docid": "ae7d75b4c485bae6bad6de7820d9b815", "score": "0.58316386", "text": "def __init__(self,\n cassandra_ports_info=None,\n cassandra_security_info=None,\n cassandra_version=None,\n commit_log_backup_location=None,\n config_directory=None,\n data_centers=None,\n dse_config_directory=None,\n is_dse_authenticator=None,\n is_dse_tiered_storage=None,\n is_jmx_auth_enable=None,\n kerberos_principal=None,\n primary_host=None,\n seeds=None,\n solr_nodes=None,\n solr_port=None,\n ):\n\n # Initialize members of the class\n self.cassandra_ports_info = cassandra_ports_info\n self.cassandra_security_info = cassandra_security_info\n self.cassandra_version = cassandra_version\n self.commit_log_backup_location = commit_log_backup_location\n self.config_directory = config_directory\n self.data_centers = data_centers\n self.dse_config_directory = dse_config_directory\n self.is_dse_authenticator = is_dse_authenticator\n self.is_dse_tiered_storage = is_dse_tiered_storage\n self.is_jmx_auth_enable = is_jmx_auth_enable\n self.kerberos_principal = kerberos_principal\n self.primary_host = primary_host\n self.seeds = seeds\n self.solr_nodes = solr_nodes\n self.solr_port = solr_port", "title": "" }, { "docid": "61138d073545de3451ccefa1a36fb27c", "score": "0.57975316", "text": "def __init__(__self__, *,\n cluster_name: pulumi.Input[str],\n database_name: pulumi.Input[str],\n principal_id: pulumi.Input[str],\n principal_type: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n role: pulumi.Input[str],\n tenant_id: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n pulumi.set(__self__, \"database_name\", database_name)\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"principal_type\", principal_type)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"role\", role)\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "title": "" }, { "docid": "c88b8db1f74693e211dc503e57b8c497", "score": "0.57732195", "text": "def __init__(self, env, dataset):\n self.env = env\n self.ds = dataset\n self.dc_strings = []\n self.dcs = []", "title": "" }, { "docid": "9fc40551c1815ce4a8e81e30cef3ada5", "score": "0.57638425", "text": "def __init__(self):\n self.cluster_list = None\n self.pi = 0", "title": "" }, { "docid": "c8ecae1844c6e4866584ab73fe828e3f", "score": "0.57289714", "text": "def data_centers(self, value):\n self._data_centers = value", "title": "" }, { "docid": "6f4470bdc9f2b01453a9df186a1305db", "score": "0.57261467", "text": "def cluster(*args, **kwargs):\n pass", "title": "" }, { "docid": "94a3a8a87aa3a252bba1d126be0a60a3", "score": "0.5717505", "text": "def cluster(self) -> pulumi.Input[Union['Cluster', 'CoreDataArgs']]:\n return pulumi.get(self, \"cluster\")", "title": "" }, { "docid": "d45f23aff49a8787262019fcfaf899d5", "score": "0.5700823", "text": "def cluster_name(self, value: str):\n unwrapped = _unwrap(None, value)\n self._dotnet_instance.ClusterName = next(unwrapped)", "title": "" }, { "docid": "d45f23aff49a8787262019fcfaf899d5", "score": "0.5700823", "text": "def cluster_name(self, value: str):\n unwrapped = _unwrap(None, value)\n self._dotnet_instance.ClusterName = next(unwrapped)", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.56969583", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696584", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696368", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "ed1a9cceb46ae5a5fcda4b31e0d50d9c", "score": "0.5696368", "text": "def data_center(self, value):\n Struct._check_type('data_center', value, DataCenter)\n self._data_center = value", "title": "" }, { "docid": "278943bcd1f816eb28547cd57f042788", "score": "0.569224", "text": "def __init__(__self__,\n resource_name: str,\n args: EcsDedicatedHostClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "05b0d4bf1533e51563fd42b3545e2813", "score": "0.5690691", "text": "def __init__(__self__, *,\n distribution: pulumi.Input[str],\n fleet: pulumi.Input['AttachedClusterFleetArgs'],\n location: pulumi.Input[str],\n oidc_config: pulumi.Input['AttachedClusterOidcConfigArgs'],\n platform_version: pulumi.Input[str],\n annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n authorization: Optional[pulumi.Input['AttachedClusterAuthorizationArgs']] = None,\n deletion_policy: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n logging_config: Optional[pulumi.Input['AttachedClusterLoggingConfigArgs']] = None,\n monitoring_config: Optional[pulumi.Input['AttachedClusterMonitoringConfigArgs']] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"distribution\", distribution)\n pulumi.set(__self__, \"fleet\", fleet)\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"oidc_config\", oidc_config)\n pulumi.set(__self__, \"platform_version\", platform_version)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if authorization is not None:\n pulumi.set(__self__, \"authorization\", authorization)\n if deletion_policy is not None:\n pulumi.set(__self__, \"deletion_policy\", deletion_policy)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if logging_config is not None:\n pulumi.set(__self__, \"logging_config\", logging_config)\n if monitoring_config is not None:\n pulumi.set(__self__, \"monitoring_config\", monitoring_config)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "title": "" }, { "docid": "ede91572298ffda92df11ec152eb9568", "score": "0.5652854", "text": "def clusters(self, value):\n self._clusters = value", "title": "" }, { "docid": "8c30154157a7ca70523c5fc8b7ddf013", "score": "0.5650433", "text": "def createCluster(base):\n nbcluster=7\n kmeans=KMeans(nbcluster).fit(base)\n return(kmeans.cluster_centers_)", "title": "" }, { "docid": "8d31ecc1216d814ac726b78153cde144", "score": "0.5639186", "text": "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n authorization: Optional[pulumi.Input[pulumi.InputType['AttachedClusterAuthorizationArgs']]] = None,\n deletion_policy: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution: Optional[pulumi.Input[str]] = None,\n fleet: Optional[pulumi.Input[pulumi.InputType['AttachedClusterFleetArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logging_config: Optional[pulumi.Input[pulumi.InputType['AttachedClusterLoggingConfigArgs']]] = None,\n monitoring_config: Optional[pulumi.Input[pulumi.InputType['AttachedClusterMonitoringConfigArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n oidc_config: Optional[pulumi.Input[pulumi.InputType['AttachedClusterOidcConfigArgs']]] = None,\n platform_version: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "title": "" }, { "docid": "c4fa12714bbea01f89f4f2f0d17dda0c", "score": "0.56384283", "text": "def __init__(self, # Cluster\n asn=None # Association\n ):\n if asn is None:\n self.As = []\n else:\n self.As = [asn]", "title": "" }, { "docid": "b9159564611a57c0840d92e6e2776d73", "score": "0.5632172", "text": "def create_cluster (self, objects):\n assert set(objects).issubset(self.raw)\n cid0 = 1 + max([-1]+self.keys())\n self[cid0] = Cluster(objects)\n for obj in objects:\n i = self.raw.index(obj)\n self._cids[i] = cid0\n self.unclustered.remove(obj)", "title": "" }, { "docid": "b7855848b4cc6d5cc106f2cef10dc065", "score": "0.55859375", "text": "def __init__(self,\n hdfs_entity_id=None,\n kerberos_principal=None,\n root_data_directory=None,\n zookeeper_quorum=None,\n ):\n\n # Initialize members of the class\n self.hdfs_entity_id = hdfs_entity_id\n self.kerberos_principal = kerberos_principal\n self.root_data_directory = root_data_directory\n self.zookeeper_quorum = zookeeper_quorum", "title": "" }, { "docid": "f6aaba7d2c1ff2270c1d4b217da450f4", "score": "0.55613387", "text": "def get_clusters(self) -> dict:\n return self.clusters", "title": "" }, { "docid": "9fc7d08cd71284125b07b8f9ec7a6ef2", "score": "0.55487084", "text": "def __init__(self, path: str, k: int) -> None:\n self.data = initialize_data(load_path(path))\n self.centroids = [random.choice(self.data) for _ in range(k)]\n self.clusters = self.update_clusters()", "title": "" }, { "docid": "9112bc44b27e5d0f815b81e755e4db9d", "score": "0.55454266", "text": "def __init__(self):\n\n self.trees = []\n self.clust_ids=[]\n self.random_trees=[]\n self.random_clust_ids=[]\n self.available_methods = ['single','complete','average',\n 'weighted','ward','median','centroid'] \n self.available_metrics = ['braycurtis', 'canberra', 'chebyshev', \n 'cityblock', 'correlation', 'cosine', 'dice', \n 'euclidean', 'hamming', 'jaccard', 'jensenshannon', \n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', \n 'rogerstanimoto', 'russellrao', 'seuclidean', \n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']", "title": "" }, { "docid": "7e4475b25fb5d5c427e1afcc33c2c92e", "score": "0.55434626", "text": "def list(self, **kwargs):\n return self.getResourceManager() \\\n .getSdk() \\\n .datacenters \\\n .list(**kwargs)", "title": "" }, { "docid": "3776af3382823a75e179e23a1aa7a7cf", "score": "0.55367583", "text": "def load_cluster(s, infile):\n print(\"Reading cluster definition from: {}\".format(infile))\n df = pd.read_csv(infile, skipinitialspace=True)\n s.cluster_def = df # Cluster definition as a dataframe\n print(\"Cluster Centers: \\n\", df)", "title": "" }, { "docid": "100b08a0146ac429f209672af1dc5374", "score": "0.5526176", "text": "def __init__(self, data_path):\n self.data_path = os.path.abspath(data_path)\n\n config_file = os.path.join(self.data_path, \"config.yaml\")\n self.config = config.load_config(config_file)\n\n self._image_list_file_name = \"image_list_with_gps.tsv\"\n self._clusters_file_name = \"clusters.npz\"\n self._clusters_with_neighbors_file_name = \"clusters_with_neighbors.npz\"\n self._clusters_with_neighbors_geojson_file_name = (\n \"clusters_with_neighbors.geojson\"\n )\n self._clusters_geojson_file_name = \"clusters.geojson\"\n\n io.mkdir_p(self._submodels_path())", "title": "" }, { "docid": "62d93cb4471e7ce7b161e9f97078153c", "score": "0.55228287", "text": "def start(self):\n credentials, subscription_id, managed_identity_id = self._get_credentials()\n\n # Fetch metadata about the instance\n metadata = get_instance_metadata()\n\n # Resource group\n resource_group = self.config.get('resourceGroup', None)\n dss_host_resource_group = metadata[\"compute\"][\"resourceGroupName\"]\n if _is_none_or_blank(resource_group):\n resource_group = dss_host_resource_group\n logging.info(\"Using same resource group as DSS: {}\".format(resource_group))\n\n # Location\n location = self.config.get('location', None)\n if _is_none_or_blank(location):\n location = metadata[\"compute\"][\"location\"]\n logging.info(\"Using same location as DSS: {}\".format(location))\n\n # Consistency checks\n if _is_none_or_blank(resource_group):\n raise Exception(\"A resource group to put the cluster in is required\")\n if _is_none_or_blank(location):\n raise Exception(\"A location to put the cluster in is required\")\n\n # AKS Client\n clusters_client = None\n\n # Credit the cluster to DATAIKU\n if os.environ.get(\"DISABLE_AZURE_USAGE_ATTRIBUTION\", \"0\") == \"1\":\n logging.info(\"Azure usage attribution is disabled\")\n clusters_client = ContainerServiceClient(credentials, subscription_id)\n else:\n policy = UserAgentPolicy()\n policy.add_user_agent('pid-fd3813c7-273c-5eec-9221-77323f62a148')\n clusters_client = ContainerServiceClient(credentials, subscription_id, user_agent_policy=policy)\n\n # check that the cluster doesn't exist yet, otherwise azure will try to update it\n # and will almost always fail\n try:\n existing = clusters_client.managed_clusters.get(resource_group, self.cluster_name)\n if existing is not None:\n raise Exception(\"A cluster with name %s in resource group %s already exists\" % (self.cluster_name, resource_group))\n except ResourceNotFoundError:\n logging.info(\"Cluster doesn't seem to exist yet\")\n\n cluster_builder = ClusterBuilder(clusters_client)\n cluster_builder.with_name(self.cluster_name)\n cluster_builder.with_dns_prefix(\"{}-dns\".format(self.cluster_name))\n cluster_builder.with_resource_group(resource_group)\n cluster_builder.with_location(location)\n cluster_builder.add_tags(self.config.get(\"tags\", None))\n cluster_builder.with_linux_profile() # default is None\n cluster_builder.with_network_profile(service_cidr=self.config.get(\"serviceCIDR\", None),\n dns_service_ip=self.config.get(\"dnsServiceIP\", None),\n load_balancer_sku=self.config.get(\"loadBalancerSku\", None),\n outbound_type=self.config.get(\"outboundType\", None),\n network_plugin=self.config.get(\"networkPlugin\"),\n docker_bridge_cidr=self.config.get(\"dockerBridgeCidr\"))\n cluster_builder.with_custom_config(self.config.get(\"customConfig\", None))\n\n if self.config.get(\"useCustomNodeResourceGroup\", False):\n cluster_builder.with_node_resource_group(self.config.get(\"nodeResourceGroup\"))\n\n # Cluster identity\n connection_info = self.config.get(\"connectionInfo\", None)\n cluster_idendity_legacy_use_distinct_sp = self.config.get(\"useDistinctSPForCluster\", False)\n cluster_idendity_legacy_sp = self.config.get(\"clusterServicePrincipal\", None)\n cluster_identity_type = None\n cluster_identity = None\n if not _is_none_or_blank(connection_info) or cluster_idendity_legacy_use_distinct_sp:\n logging.warn(\"Using legacy options to configure cluster identity. Clear them to use the new ones.\")\n if not cluster_idendity_legacy_use_distinct_sp and not _is_none_or_blank(connection_info):\n cluster_sp = connection_info\n elif cluster_idendity_legacy_use_distinct_sp and not _is_none_or_blank(cluster_idendity_legacy_sp):\n cluster_sp = self.config.get(\"clusterServicePrincipal\")\n else:\n raise Exception(\"Legacy options are not complete enough to determine cluster identity settings\")\n cluster_builder.with_cluster_sp_legacy(cluster_service_principal_connection_info=cluster_sp)\n else:\n cluster_identity = self.config.get(\"clusterIdentity\",{\"identityType\":\"managed-identity\"}) \n cluster_identity_type = cluster_identity.get(\"identityType\", \"managed-identity\")\n if cluster_identity_type == \"managed-identity\":\n if cluster_identity.get(\"inheritDSSIdentity\",True):\n logging.info(\"Need to inspect Managed Identity infos from Azure\")\n if metadata is None:\n metadata = get_instance_metadata()\n vm_resource_group = metadata[\"compute\"][\"resourceGroupName\"]\n vm_name = metadata[\"compute\"][\"name\"]\n compute_client = ComputeManagementClient(credentials, subscription_id)\n vm = compute_client.virtual_machines.get(vm_resource_group, vm_name)\n # No choice here but to use the first one\n if managed_identity_id is None:\n managed_identity_id = next(iter(vm.identity.user_assigned_identities.keys()))\n for managed_identity_resource_id, managed_identity_properties in vm.identity.user_assigned_identities.items():\n if managed_identity_id == managed_identity_resource_id or managed_identity_id == managed_identity_properties.client_id:\n break\n logging.info(\"Found managed identity id {}\".format(managed_identity_resource_id))\n cluster_builder.with_managed_identity(managed_identity_resource_id)\n cluster_builder.with_kubelet_identity(managed_identity_resource_id, managed_identity_properties.client_id, managed_identity_properties.principal_id) \n else:\n control_plane_mi = None if cluster_identity.get(\"useAKSManagedIdentity\",True) else cluster_identity[\"controlPlaneUserAssignedIdentity\"]\n cluster_builder.with_managed_identity(control_plane_mi)\n if control_plane_mi is None:\n logging.info(\"Configure cluster with system managed identity.\")\n else:\n logging.info(\"Configure cluster with user assigned identity: {}\".format(control_plane_mi))\n if not cluster_identity.get(\"useAKSManagedKubeletIdentity\",True):\n kubelet_mi = cluster_identity[\"kubeletUserAssignedIdentity\"]\n _,_,mi_subscription_id,_,mi_resource_group,_,_,_,mi_name = kubelet_mi.split(\"/\")\n msiclient = ManagedServiceIdentityClient(credentials, mi_subscription_id)\n mi = msiclient.user_assigned_identities.get(mi_resource_group, mi_name)\n cluster_builder.with_kubelet_identity(kubelet_mi, mi.client_id, mi.principal_id)\n logging.info(\"Configure kubelet identity with user assigned identity resourceId=\\\"{}\\\", clientId=\\\"{}\\\", objectId=\\\"{}\\\"\".format(kubelet_mi, mi.client_id, mi.principal_id))\n elif cluster_identity_type == \"service-principal\":\n cluster_builder.with_cluster_sp(cluster_identity[\"clientId\"], cluster_identity[\"password\"])\n logging.info(\"Configure cluster with service principal\")\n else:\n raise Exception(\"Cluster identity type \\\"{}\\\" is unknown\".format(cluster_identity_type))\n\n\n # Fail fast for non existing ACRs to avoid drama in case of failure AFTER cluster is created\n acr_role_id = None\n authorization_client = None\n if cluster_identity_type is not None and cluster_identity is not None:\n if cluster_identity_type == \"managed-identity\" and cluster_identity.get(\"useAKSManagedKubeletIdentity\",True) and not cluster_identity.get(\"inheritDSSIdentity\", True):\n acr_name = cluster_identity.get(\"attachToACRName\", None)\n if not _is_none_or_blank(acr_name):\n # build acr scope\n acr_identifier_splitted = acr_name.split('/')\n acr_subscription_id = subscription_id\n acr_resource_group = resource_group\n if 9 == len(acr_identifier_splitted):\n _,_,acr_subscription_id,_,acr_resource_group,_,_,_,acr_name = acr_identifier_splitted\n elif 2 == len(acr_identifier_splitted):\n acr_resource_group, acr_name = acr_identifier_splitted\n \n authorization_client = AuthorizationManagementClient(credentials, acr_subscription_id)\n acr_scope = \"/subscriptions/{acr_subscription_id}/resourceGroups/{acr_resource_group}/providers/Microsoft.ContainerRegistry/registries/{acr_name}\".format(**locals())\n try:\n acr_roles = list(authorization_client.role_definitions.list(acr_scope,\"roleName eq 'AcrPull'\"))\n except ResourceNotFoundError:\n raise Exception(\"ACR {} not found. Check it exists and you are Owner of it.\".format(acr_scope))\n if 0 == len(acr_roles):\n raise Exception(\"Could not find the AcrPull role on the ACR {}. Check you are Owner of it.\".format(acr_scope))\n else:\n acr_role_id = acr_roles[0].id\n logging.info(\"ACR pull role id: %s\", acr_role_id)\n \n # Try to run a fake role assignment. Depending on the failure type we know if we are Owner or not\n try:\n fake_role_assignment = authorization_client.role_assignments.create(\n scope=acr_scope,\n role_assignment_name=str(uuid.uuid4()),\n parameters= {\n \"properties\": {\n \"role_definition_id\": acr_role_id,\n \"principal_id\": \"00000000-0000-0000-0000-000000000000\",\n },\n },\n )\n except HttpResponseError as e:\n if e.reason == \"Forbidden\" and \"AuthorizationFailed\" in str(e.error):\n raise Exception(\"Cannot create role assignments on ACR {}. Check that your are Owner of it or provide an existing Kubelet identity.\".format(acr_scope))\n elif e.reason == \"Bad Request\" and \"PrincipalNotFound\" in str(e.error):\n logging.info(\"Fake role assignment on ACR looks ok. Identity should be allowed to assign roles in further steps.\")\n else:\n raise(e)\n except Exception as e:\n raise(e)\n \n # Sanity check for node pools\n node_pool_vnets = set()\n for idx, node_pool_conf in enumerate(self.config.get(\"nodePools\", [])):\n node_pool_builder = cluster_builder.get_node_pool_builder()\n nodepool_vnet = node_pool_conf.get(\"vnet\", None)\n nodepool_subnet = node_pool_conf.get(\"subnet\", None)\n vnet, _ = node_pool_builder.resolve_network(inherit_from_host=node_pool_conf.get(\"useSameNetworkAsDSSHost\"),\n cluster_vnet=nodepool_vnet,\n cluster_subnet=nodepool_subnet,\n connection_info=connection_info,\n credentials=credentials,\n resource_group=resource_group,\n dss_host_resource_group=dss_host_resource_group)\n node_pool_vnets.add(vnet)\n \n if 1 < len(node_pool_vnets):\n raise Exception(\"Node pools must all share the same vnet. Current node pools configuration yields vnets {}.\".format(\",\".join(node_pool_vnets)))\n elif 0 == len(node_pool_vnets):\n raise Exception(\"You cannot deploy a cluster without any node pool.\")\n \n # Check role assignments for vnet like on ACR for fail fast if not doable\n vnet_id = node_pool_vnets.pop()\n if not vnet_id.startswith(\"/\"):\n vnet_name = vnet_id\n vnet_id = \"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Network/virtualNetworks/{vnet_name}\".format(**locals())\n vnet_role_id = None\n if cluster_identity_type is not None and cluster_identity is not None:\n if cluster_identity_type == \"managed-identity\" and cluster_identity.get(\"useAKSManagedIdentity\",True) and not cluster_identity.get(\"inheritDSSIdentity\", True):\n authorization_client = AuthorizationManagementClient(credentials, subscription_id)\n try:\n vnet_roles = list(authorization_client.role_definitions.list(vnet_id,\"roleName eq 'Contributor'\"))\n except ResourceNotFoundError:\n raise Exception(\"Vnet {} not found. Check it exists and you are Owner of it.\".format(vnet_id))\n if 0 == len(vnet_roles):\n raise Exception(\"Could not find the Contributor role on the vnet {}. Check you are Owner of it.\".format(vnet_id))\n else:\n vnet_role_id = vnet_roles[0].id\n logging.info(\"Vnet contributor role id: %s\", acr_role_id) \n # Try to run a fake role assignment. Depending on the failure type we know if we are Owner or not\n try:\n fake_role_assignment = authorization_client.role_assignments.create(\n scope=vnet_id,\n role_assignment_name=str(uuid.uuid4()),\n parameters= {\n \"properties\": {\n \"role_definition_id\": vnet_role_id,\n \"principal_id\": \"00000000-0000-0000-0000-000000000000\",\n },\n },\n )\n except HttpResponseError as e:\n if e.reason == \"Forbidden\" and \"AuthorizationFailed\" in str(e.error):\n raise Exception(\"Cannot create role assignments on Vnet {}. Check that your are Owner of it or provide an existing Controle Plane identity.\".format(vnet_id))\n elif e.reason == \"Bad Request\" and \"PrincipalNotFound\" in str(e.error):\n logging.info(\"Fake role assignment on Vnet looks ok. Identity should be allowed to assign roles in further steps.\")\n else:\n raise(e)\n except Exception as e:\n raise(e)\n\n # Access level\n if self.config.get(\"privateAccess\"):\n cluster_builder.with_private_access(self.config.get(\"privateAccess\"))\n\n cluster_builder.with_cluster_version(self.config.get(\"clusterVersion\", None))\n\n # Node pools\n install_gpu_driver = False\n for idx, node_pool_conf in enumerate(self.config.get(\"nodePools\", [])):\n node_pool_builder = cluster_builder.get_node_pool_builder()\n node_pool_builder.with_idx(idx)\n node_pool_builder.with_vm_size(node_pool_conf.get(\"vmSize\", None))\n vnet = node_pool_conf.get(\"vnet\", None)\n subnet = node_pool_conf.get(\"subnet\", None)\n node_pool_builder.with_network(inherit_from_host=node_pool_conf.get(\"useSameNetworkAsDSSHost\"),\n cluster_vnet=vnet,\n cluster_subnet=subnet,\n connection_info=connection_info,\n credentials=credentials,\n resource_group=resource_group,\n dss_host_resource_group=dss_host_resource_group)\n\n node_pool_builder.with_availability_zones(\n use_availability_zones=node_pool_conf.get(\"useAvailabilityZones\", True))\n\n node_pool_builder.with_node_count(enable_autoscaling=node_pool_conf.get(\"autoScaling\", False),\n num_nodes=node_pool_conf.get(\"numNodes\", None),\n min_num_nodes=node_pool_conf.get(\"minNumNodes\", None),\n max_num_nodes=node_pool_conf.get(\"maxNumNodes\", None))\n\n node_pool_builder.with_mode(mode=node_pool_conf.get(\"mode\", \"Automatic\"),\n system_pods_only=node_pool_conf.get(\"systemPodsOnly\", True))\n\n node_pool_builder.with_disk_size_gb(disk_size_gb=node_pool_conf.get(\"osDiskSizeGb\", 0))\n node_pool_builder.with_node_labels(node_pool_conf.get(\"labels\", None))\n node_pool_builder.with_node_taints(node_pool_conf.get(\"taints\", None))\n node_pool_builder.with_gpu(node_pool_conf.get(\"enableGPU\", False))\n install_gpu_driver |= node_pool_builder.gpu\n node_pool_builder.add_tags(self.config.get(\"tags\", None))\n node_pool_builder.add_tags(node_pool_conf.get(\"tags\", None))\n node_pool_builder.build()\n cluster_builder.with_node_pool(node_pool=node_pool_builder.agent_pool_profile)\n\n\n # Run creation\n logging.info(\"Start creation of cluster\")\n def do_creation():\n cluster_create_op = cluster_builder.build()\n return cluster_create_op.result()\n create_result = run_and_process_cloud_error(do_creation)\n logging.info(\"Cluster creation finished\")\n\n\n # Attach to ACR\n acr_attachment = {}\n if cluster_identity_type is not None and cluster_identity is not None:\n if cluster_identity_type == \"managed-identity\" and cluster_identity.get(\"useAKSManagedKubeletIdentity\",True) and not cluster_identity.get(\"inheritDSSIdentity\", True):\n kubelet_mi_object_id = create_result.identity_profile.get(\"kubeletidentity\").object_id\n logging.info(\"Kubelet Managed Identity object id: %s\", kubelet_mi_object_id)\n if not _is_none_or_blank(acr_role_id):\n logging.info(\"Assign ACR pull role id %s to %s\", acr_role_id, kubelet_mi_object_id)\n role_assignment = authorization_client.role_assignments.create(\n scope=acr_scope,\n role_assignment_name=str(uuid.uuid4()),\n parameters= {\n \"properties\": {\n \"role_definition_id\": acr_role_id,\n \"principal_id\": kubelet_mi_object_id,\n },\n },\n )\n acr_attachment.update({\n \"name\": acr_name,\n \"resource_group\": acr_resource_group,\n \"subscription_id\": acr_subscription_id,\n \"resource_id\": acr_scope,\n \"role_assignment\": role_assignment.as_dict(),\n })\n \n # Attach to VNET to allow LoadBalancers creation\n vnet_attachment = {}\n if cluster_identity_type is not None and cluster_identity is not None:\n if cluster_identity_type == \"managed-identity\" and cluster_identity.get(\"useAKSManagedIdentity\",True) and not cluster_identity.get(\"inheritDSSIdentity\", True):\n # And here we are blocked because we cant get the principal id of a System Assigned Managed Id easily\n control_plane_object_id = create_result.identity.principal_id\n logging.info(\"Controle Plane Managed Identity object id: %s\", control_plane_object_id)\n if not _is_none_or_blank(vnet_role_id):\n logging.info(\"Assign Vnet contributolr role id %s to %s\", vnet_role_id, control_plane_object_id)\n vnet_role_assignment = authorization_client.role_assignments.create(\n scope=vnet_id,\n role_assignment_name=str(uuid.uuid4()),\n parameters= {\n \"properties\": {\n \"role_definition_id\": vnet_role_id,\n \"principal_id\": control_plane_object_id,\n },\n },\n )\n vnet_attachment.update({\n \"subscription_id\": subscription_id,\n \"resource_id\": vnet_id,\n \"role_assignment\": vnet_role_assignment.as_dict(),\n })\n\n logging.info(\"Fetching kubeconfig for cluster {} in {}...\".format(self.cluster_name, resource_group))\n def do_fetch():\n return clusters_client.managed_clusters.list_cluster_admin_credentials(resource_group, self.cluster_name)\n get_credentials_result = run_and_process_cloud_error(do_fetch)\n kube_config_content = get_credentials_result.kubeconfigs[0].value.decode(\"utf8\")\n logging.info(\"Writing kubeconfig file...\")\n kube_config_path = os.path.join(os.getcwd(), \"kube_config\")\n with open(kube_config_path, 'w') as f:\n f.write(kube_config_content)\n\n overrides = make_overrides(\n self.config,\n yaml.safe_load(kube_config_content),\n kube_config_path,\n acr_name = None if _is_none_or_blank(acr_attachment) else acr_attachment[\"name\"],\n )\n \n if install_gpu_driver:\n add_gpu_driver_if_needed(kube_config_path)\n\n return [overrides, {\"kube_config_path\": kube_config_path, \"cluster\": create_result.as_dict(), \"acr_attachment\": acr_attachment, \"vnet_attachment\": vnet_attachment}]", "title": "" }, { "docid": "b8f449e42cfecc7726147fce4674793b", "score": "0.55134654", "text": "def __init__(self, absorbers, cluster, cparam=\"HI_Column_Density\", \n cscale=\"log\"):\n self.absorbers = absorbers\n self.cluster = cluster\n \n self.cscale = cscale\n self.cparam = cparam", "title": "" }, { "docid": "2b291c1981130b2e68a33fee8cf03f7a", "score": "0.5506317", "text": "def _add_cluster(self):\n cluster_description = 'This is a good description of the cluster'\n cluster_name = 'strange cluster name'\n vector = [{clustering.DIM_TYPE: clustering.DIM_TYPE_UNIT,\n clustering.DIM_ID: 1,\n clustering.DIM_LOW: 10,\n clustering.DIM_HIGH: 50}]\n new_cluster = clustering.ClusterDTO(None,\n {'name': cluster_name,\n 'description': cluster_description,\n 'version': clustering.ClusterRESTHandler.SCHEMA_VERSIONS[0],\n 'vector': vector})\n return clustering.ClusterDAO.save(new_cluster)", "title": "" }, { "docid": "e682150b8b873c624be25517325c0eba", "score": "0.5503002", "text": "def assign_clusters(self, data):\n \n if data.ndim == 1:\n data = tw_kk['sentiment']\n \n dist_to_centroid = pairwise_distances(data, self.centroids, metric = 'euclidean')\n self.cluster_labels = np.argmin(dist_to_centroid, axis = 1)\n \n return self.cluster_labels", "title": "" }, { "docid": "0e97bcecd9e5773d5b02004eda50f0a8", "score": "0.5498455", "text": "def _add_clusters(self, clusters_number):\n self.clusters_keys = []\n self.description_str = ('This is a fairly good description of the'\n ' cluster{}')\n self.name_str = 'strange cluster name{}'\n for index in range(clusters_number):\n new_cluster = clustering.ClusterDTO(None,\n {'name': self.name_str.format(index),\n 'description': self.description_str.format(index),\n 'vector': []})\n self.clusters_keys.append(clustering.ClusterDAO.save(new_cluster))", "title": "" }, { "docid": "f5242248dbd09f51b20e446d99140c23", "score": "0.548669", "text": "def cluster(self, method=KMEANS, **kwargs):\n # The optional documents parameter can be a selective list \n # of documents in the model to cluster.\n documents = kwargs.get(\"documents\", self.documents)\n if not getattr(self, \"lsa\", None):\n # Using document vectors:\n vectors, features = [d.vector for d in documents], self.vector.keys()\n else:\n # Using LSA concept space:\n vectors, features = [self.lsa[d.id] for d in documents], range(len(self.lsa))\n # Create a dictionary of vector.id => Document.\n # We need it to map the clustered vectors back to the actual documents.\n map = dict((v.id, documents[i]) for i, v in enumerate(vectors))\n if method in (KMEANS, \"kmeans\"):\n clusters = k_means(vectors, \n k = kwargs.pop(\"k\", 10),\n iterations = kwargs.pop(\"iterations\", 10),\n features = features, **kwargs)\n if method == HIERARCHICAL:\n clusters = hierarchical(vectors, \n k = kwargs.pop(\"k\", 1),\n iterations = kwargs.pop(\"iterations\", 1000),\n features = features, **kwargs)\n if method in (KMEANS, \"kmeans\"):\n clusters = [[map[v.id] for v in cluster] for cluster in clusters]\n if method == HIERARCHICAL:\n clusters.traverse(visit=lambda cluster: \\\n [cluster.__setitem__(i, map[v.id]) \n for i, v in enumerate(cluster) if not isinstance(v, Cluster)])\n return clusters", "title": "" }, { "docid": "30f62486f3a0a26b385de0f2380f74e9", "score": "0.54840446", "text": "def load_from_json(cluster_name):\n data = Data()\n json_data = data.read_cluster_json(cluster_name)\n if json_data is None:\n return None\n\n ambari_server_vm = []\n service_server_vm_list = []\n ambari_agent_vm_list = []\n\n for vm_json in json_data[\"ambari_server_vm\"]:\n ambari_server_vm.append(VM.load_from_json(vm_json))\n\n for vm_json in json_data[\"service_server_vm_list\"]:\n service_server_vm_list.append(VM.load_from_json(vm_json))\n\n for vm_json in json_data[\"ambari_agent_vm_list\"]:\n ambari_agent_vm_list.append(VM.load_from_json(vm_json))\n\n cluster = Cluster()\n cluster.cluster_name = cluster_name\n cluster.state = json_data[\"state\"]\n cluster.create_time = json_data[\"create_time\"]\n cluster.ambari_server_vm = ambari_server_vm\n cluster.service_server_vm_list = service_server_vm_list\n cluster.ambari_agent_vm_list = ambari_agent_vm_list\n return cluster", "title": "" }, { "docid": "29a4206acf9a35aebb7f6894e5bcd48d", "score": "0.5467023", "text": "def __init__(self, root = None, initialized = None, filename=\"EXAConf\"):\n\n # Version numbers of the current cluster\n # NOTE : the version numbers are somewhat special. The COS\n # and DB version are overwritten by the ones in the EXAConf file\n # (present after initialization). The DB version may also be\n # overwritten during initialization (if provided on the CLI\n # or taken from the Docker image).\n # The 'version' parameter is static and denotes the version\n # of the EXAConf python module and EXAConf format\n self.version = \"7.0.10\"\n self.re_version = \"7.0.10\"\n self.set_os_version(self.version)\n self.set_db_version(self.version)\n self.set_re_version(self.re_version)\n self.img_version = self.version\n\n # set root to container_root if omitted\n # --> only true when called from within the container\n if not root:\n self.root = os.path.join(self.container_root, self.etc_dir)\n else:\n self.root = root\n # check if root actually exists\n if not os.path.isdir(self.root):\n raise EXAConfError(\"root directory '%s' does not exist (or is a file)!\" % self.root)\n self.conf_path = os.path.join(self.root, filename)\n # if initialized is true, the given file has to exist!\n if initialized and not os.path.exists(self.conf_path):\n raise EXAConfError(\"EXAConf file '%s' does not exist! Has the cluster been initialized?\" % self.conf_path)\n # read / create configuration\n try:\n self.config = configobj.ConfigObj(self.conf_path,\n list_values = False,\n write_empty_values = True,\n indent_type = ' ')\n except configobj.ConfigObjError as e:\n raise EXAConfError(\"Failed to read '%s': %s\" % (self.conf_path, e))\n\n # update and validate content if EXAConf is already initialized\n # also read current version numbers from config\n if self.initialized():\n self.check_integrity()\n self.validate()\n if \"OSVersion\" in self.config[\"Global\"].scalars:\n self.set_os_version(self.config[\"Global\"][\"OSVersion\"])\n if \"DBVersion\" in self.config[\"Global\"].scalars:\n self.set_db_version(self.config[\"Global\"][\"DBVersion\"])\n if \"REVersion\" in self.config[\"Global\"].scalars:\n self.set_re_version(self.config[\"Global\"][\"REVersion\"])\n # has been introduced later, i. e. may be absent\n if \"ImageVersion\" in self.config[\"Global\"].scalars:\n self.img_version = self.config[\"Global\"][\"ImageVersion\"]\n if \"AuthenticationToken\" not in self.config[\"Global\"].scalars:\n self.config[\"Global\"][\"AuthenticationToken\"] = self.generate_authentication_token()\n self.commit()", "title": "" }, { "docid": "48f9a35cb2e3edc63a033591d163e1c9", "score": "0.5465026", "text": "def __init__(\n self,\n dcos_url: str,\n masters: Optional[List[str]],\n slaves: Optional[List[str]],\n public_slaves: Optional[List[str]],\n auth_user: Optional[DcosUser],\n exhibitor_admin_password: Optional[str]=None):\n super().__init__(Url.from_string(dcos_url))\n self.master_list = masters\n self.slave_list = slaves\n self.public_slave_list = public_slaves\n self.auth_user = auth_user\n self.exhibitor_admin_password = exhibitor_admin_password", "title": "" }, { "docid": "d1815365bebaf31cd1c3775d8990513b", "score": "0.54635763", "text": "def __initialize(data, n_clusters):\n # initialize \n data = np.array(data)\n N = data.shape[0]\n\n # initialize the centroids list and add a randomly selected data point to the list\n centroids = []\n centroids.append(data[np.random.randint(N), :])\n\n # compute remaining k - 1 centroids\n for cluster in range(n_clusters - 1):\n\n # initialize a list to store distances of data points from nearest centroid\n distances = []\n \n for data_idx in range(N):\n\n # save current data point's coordinates\n point = data[data_idx, :]\n dist = sys.maxsize\n\n # loop through each centroid to find the minimum distances \n for centroid_idx in range(len(centroids)):\n\n # compute distance of 'point' from each of the previously selected centroid and store the minimum distance\n curr_distance = __euclidean_distance(point, centroids[centroid_idx])\n dist = min(dist, curr_distance)\n \n # add distance to array\n distances.append(dist)\n\n # data point with max distance\n distances = np.array(distances)\n\n # add centroid to array and reset distances\n center = data[np.argmax(distances), :]\n centroids.append(center)\n distances = []\n\n # return array of centroids\n return centroids", "title": "" }, { "docid": "02016390152c7beb4ee7a5d0da6688f7", "score": "0.54546404", "text": "def _init_clusters_centers(self, data_set: np.ndarray, dims: int):\r\n\r\n start = np.mean(data_set) - np.std(data_set)\r\n end = np.mean(data_set) + np.std(data_set)\r\n locs = np.linspace(start, end, self._num_of_clusters)\r\n clusters_centers = np.zeros((dims, self._num_of_clusters))\r\n for i in range(self._num_of_clusters):\r\n clusters_centers[:, i] = (np.ones((dims, 1)) * locs[i])[:, 0]\r\n\r\n return clusters_centers", "title": "" }, { "docid": "94baf63862a151b8c7f4a080b65a9462", "score": "0.5451824", "text": "def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "b7fd7402fdad176b75ff90d786018c54", "score": "0.543329", "text": "def assignCluster(self):\n self.data_notna[\"cluster\"] = [c for c in self.kmeans_labels]\n \n return self.data_notna", "title": "" }, { "docid": "27b5ef8326bf5471361e10591b7186fe", "score": "0.5421967", "text": "def __init__(self, classifier=None, clusterer=None, require_dense=None):\n super(LabelSpacePartitioningClassifier, self).__init__(classifier, require_dense)\n self.clusterer = clusterer\n self.copyable_attrs = ['clusterer', 'classifier', 'require_dense']", "title": "" }, { "docid": "293fb3903f3e0f67ed226c5f42a36987", "score": "0.5411285", "text": "def datasetname(self):\n return _pyEMsoft.f90wrap_ebsdclusternamelisttype__get__datasetname(self._handle)", "title": "" }, { "docid": "02cbd90c1d7e58108f82c41c824fe70f", "score": "0.53969055", "text": "def create_cluster(self, cluster_name, cluster_size, services):\n json = self.__proto.create_cluster(cluster_name, cluster_size, services)\n return Cluster(self.__proto, json)", "title": "" }, { "docid": "afae0f379d5195f02aced2b27e270544", "score": "0.53930384", "text": "def cluster(self,data,method=None,metric=None,n_bootstraps=100):\n \n ## try to load the method.\n if method:\n try:\n self.available_methods.index(method)\n except ValueError:\n raise Exception('Selected clustering method not available.')\n else:\n method = 'ward'\n \n \n ## try to load the metric\n if metric:\n try:\n self.available_metrics.index(metric)\n except ValueError:\n raise Exception('Selected distance metric not available.')\n else:\n metric = 'euclidean'\n \n \n rand = data.copy()\n num_zeroes = (data.values==0).sum()\n val_max = data.values.max()\n \n ## Iterate over the number of boostraps\n for x in range(0,n_bootstraps):\n \n ## Cluster on the bootstrapped tree\n boot_tree, boot_clust_id = self.cluster_single(data.sample(data.shape[1],replace=True,axis=1),method,metric)\n \n self.trees.append(boot_tree)\n self.clust_ids.append(boot_clust_id)\n \n ## Cluster on the random tree\n rand_tree, rand_clust_id = self.cluster_single(self.generate_random(rand,val_max,num_zeroes),method,metric)\n \n self.random_trees.append(rand_tree)\n self.random_clust_ids.append(rand_clust_id)\n\n return self.trees, self.random_trees, self.clust_ids, self.random_clust_ids", "title": "" }, { "docid": "01632e2c960b236ca67953a862caae81", "score": "0.5392451", "text": "def _compute_centers(self, data: list):\r\n\r\n for i in range(self._num_of_clusters):\r\n # Initialize location vector for the current center\r\n cluster_center = np.squeeze(np.zeros((data[0].data.shape[0], 1)))\r\n num_of_points = 0\r\n\r\n # Average the location of the cluster center, from the locations of all of the points belonging to it\r\n for point in data:\r\n if point.cluster == i: # The point belongs to the current cluster\r\n for dim in range(point.data.shape[0]):\r\n cluster_center = np.add(cluster_center[:], point.data[:])\r\n\r\n num_of_points += 1\r\n\r\n else:\r\n continue\r\n\r\n if num_of_points != 0:\r\n self._clusters_centers[:, i] = cluster_center * (1/num_of_points)\r\n\r\n else:\r\n self._clusters_centers[:, i] = cluster_center", "title": "" }, { "docid": "cb1f48c190b5d7402cbc533af090cd45", "score": "0.53781056", "text": "def __init__(self, data, name=None):\n \n self.data = data\n self.name = name", "title": "" }, { "docid": "3f1f756252a5626f72bc7663cf72da51", "score": "0.53746456", "text": "def __init__(self, data, name=None):\n \n self.entries = data\n self.length = len(self.entries)\n self.name = name", "title": "" }, { "docid": "58fd67dfee72c446d07415ef5647e35d", "score": "0.5365905", "text": "def populate(self):\n\n name = self.name\n # Assign center\n ctr = self.center\n ctr.assign_cluster(name)\n \n mlist = self.members\n # Assign members\n ctr_nlist = (n for n in ctr.neighbors if not n.cluster)\n for e in ctr_nlist:\n mlist.append(e)\n e.assign_cluster(name)", "title": "" }, { "docid": "2f8a2532061e7fb551794128de9f7223", "score": "0.5360415", "text": "def __init__(__self__, *,\n cluster_id: pulumi.Input[str],\n end_ip_address: pulumi.Input[str],\n start_ip_address: pulumi.Input[str],\n name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"cluster_id\", cluster_id)\n pulumi.set(__self__, \"end_ip_address\", end_ip_address)\n pulumi.set(__self__, \"start_ip_address\", start_ip_address)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "title": "" }, { "docid": "2434aa9a93256e34733ebf61a7af8494", "score": "0.5356522", "text": "def clusterCommand(args):\n cluster(args, DATA_FILES_PREPARED, MODELS_PATH, RESULTS_PATH)", "title": "" }, { "docid": "ba10a32e7c919be5305e1d1563e9c998", "score": "0.53479135", "text": "def get_clusters(data_url, num_clusters, alg, data_table):\n \n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n \n if alg == 'hier':\n cluster_list = cpc.hierarchical_clustering(singleton_list, num_clusters)\n elif alg == 'kmeans':\n cluster_list = cpc.kmeans_clustering(singleton_list, num_clusters, 5)\t\n \n return cluster_list", "title": "" }, { "docid": "0701d03174de30b41658a3310eba3730", "score": "0.53471804", "text": "def populate(self):\n\n name = self.name\n # Assign center\n ctr = self.center\n ctr.assign_cluster(name)\n\n mlist = self.members\n # Assign members\n ctr_nlist = (n for n in ctr.neighbors if not n.cluster)\n for e in ctr_nlist:\n mlist.append(e)\n e.assign_cluster(name)", "title": "" }, { "docid": "65dd85ac047aa7a3880042c6a20e582a", "score": "0.53469086", "text": "def _specialize_clusters(cls, clusters, **kwargs):\n return clusters", "title": "" } ]
81ee465edfa3c2d3a25c93edc7c36b88
ComplianceViolation a model defined in Swagger
[ { "docid": "ce33712e662119a97a00e7b66fbd10d3", "score": "0.0", "text": "def __init__(self, compliance_type=None, listing_id=None, offer_id=None, sku=None, violations=None): # noqa: E501 # noqa: E501\n self._compliance_type = None\n self._listing_id = None\n self._offer_id = None\n self._sku = None\n self._violations = None\n self.discriminator = None\n if compliance_type is not None:\n self.compliance_type = compliance_type\n if listing_id is not None:\n self.listing_id = listing_id\n if offer_id is not None:\n self.offer_id = offer_id\n if sku is not None:\n self.sku = sku\n if violations is not None:\n self.violations = violations", "title": "" } ]
[ { "docid": "05c207eb191ec3969dc106fb77be54b2", "score": "0.5712235", "text": "def missed_the_point():\n abort(422, messages={'_schema': ['Please, more parameters']})", "title": "" }, { "docid": "824e81f0df0e610efff1bc783f0cb58a", "score": "0.5648701", "text": "def validate(self, apiobj, method, api, param, safe):", "title": "" }, { "docid": "eca383930f91ed797334a884bf386013", "score": "0.55611366", "text": "def validate(self, schema_obj):\n raise NotImplementedError('validate not implemented')", "title": "" }, { "docid": "58a9ccd12e05964ce0fb6226de3c5a85", "score": "0.549453", "text": "def test_schema_bad_prop_01(self):", "title": "" }, { "docid": "582c1d5e117167a3d435334b360922d7", "score": "0.54784805", "text": "def __validate(self):\n pass", "title": "" }, { "docid": "3c9f01ac8a977af31e49fd42d2d9d406", "score": "0.5457327", "text": "def validate(self) -> None:", "title": "" }, { "docid": "df072487cb3c207910c1a5d898b743de", "score": "0.5446801", "text": "def _Validate(vex):\n product_tree = vex.get('product_tree')\n if product_tree is None:\n raise ar_exceptions.InvalidInputValueError(\n 'product_tree is required in csaf document'\n )\n branches = product_tree.get('branches')\n if branches is None:\n raise ar_exceptions.InvalidInputValueError(\n 'branches are required in product tree in csaf document'\n )\n if len(branches) < 1:\n raise ar_exceptions.InvalidInputValueError(\n 'at least one branch is expected in product tree in csaf document'\n )\n for product in branches:\n name = product.get('name')\n if name is None:\n raise ar_exceptions.InvalidInputValueError(\n 'name is required in product tree in csaf document'\n )\n if len(name.split('/')) < 3:\n raise ar_exceptions.InvalidInputValueError(\n 'name of product should be artifact path, showing repository,'\n ' project, and package/image'\n )\n\n vulnerabilities = vex.get('vulnerabilities')\n if vulnerabilities is None:\n raise ar_exceptions.InvalidInputValueError(\n 'vulnerabilities are required in csaf document'\n )\n if len(vulnerabilities) < 1:\n raise ar_exceptions.InvalidInputValueError(\n 'at least one vulnerability is expected in csaf document'\n )\n for vuln in vulnerabilities:\n _ValidateVulnerability(vuln)", "title": "" }, { "docid": "849fed65937422141ec0879a792a5e2b", "score": "0.5438119", "text": "def _validate(self, obj):\n pass", "title": "" }, { "docid": "2ccc034a6395c6a21e604e431bfb14f8", "score": "0.5438075", "text": "def validate_again(self) -> None:\n _, _, validation_error = pydantic.validate_model(type(self), self.dict())\n if validation_error:\n raise validation_error", "title": "" }, { "docid": "c00942a95101738e39d68e1b5f46f7e8", "score": "0.542996", "text": "def test_schema_bad_prop_03(self):", "title": "" }, { "docid": "2de9b24268528d8973426dd2e78e4c1b", "score": "0.540572", "text": "def test_rest_validation_invalid():\n data, errors = AnnoSchema().load({u'bounded': u'uncertain',\n u'invalid': u'false',\n u'extended': u'true',\n u'change': u'true',\n u'stative': u'unsure',\n u'notes': u''})\n assert errors\n assert 'stative' in errors", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.53883135", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.53883135", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e81bc60aad1f367e4047d4be3bab89f", "score": "0.53883135", "text": "def validate(self):\n raise NotImplementedError", "title": "" }, { "docid": "820b259d730b7d81bd90fa873d4b4dbf", "score": "0.53837645", "text": "def test_rest_validation_ok():\n data, errors = AnnoSchema().load({u'bounded': u'true',\n u'invalid': u'false',\n u'extended': u'uncertain',\n u'change': u'uncertain',\n u'stative': u'false',\n u'notes': u'something'})\n assert not errors\n assert data == {'bounded': BooleanUnsure.true,\n 'invalid': BooleanUnsure.false,\n 'extended': BooleanUnsure.uncertain,\n 'change': BooleanUnsure.uncertain,\n 'stative': BooleanUnsure.false,\n 'notes': u'something',\n 'annotation_idx': 0}", "title": "" }, { "docid": "e29893c07cfd41b5f5fb820c60cde210", "score": "0.53702796", "text": "def validate(self):\n raise NotImplementedError()", "title": "" }, { "docid": "bd6870841a8dd29039819eefd1f38683", "score": "0.5355786", "text": "def validate(self, obj):\n return", "title": "" }, { "docid": "67fbdc15736367cc75a0c054d95f5549", "score": "0.5355657", "text": "def test_invalid_input():\n\n with pytest.raises(ValidationError):\n ApplicationScalingModelIn(opt_dsl_code=\"\", model={\"name\": \"amdahl\", \"G\": 0.5})\n\n with pytest.raises(ValidationError):\n ApplicationScalingModelIn(opt_dsl_code=\"\", model={\"name\": \"amdahl\"})\n\n with pytest.raises(ValidationError):\n ApplicationScalingModelIn(opt_dsl_code=\"\", model={\"name\": \"noop\", \"F\": 0.5})", "title": "" }, { "docid": "4daa2fe1fbe1be3b49af3fe74ebdae75", "score": "0.5341636", "text": "def validate(spec):\n\n def validate_decorator(func):\n @functools.wraps(func)\n def wrapper_validate(*args, **kwargs):\n try:\n data = request.get_json()\n except BadRequest:\n result = \"The request body is not a well-formed JSON.\"\n log.debug(\"create_circuit result %s %s\", result, 400)\n raise BadRequest(result) from BadRequest\n if data is None:\n result = \"The request body mimetype is not application/json.\"\n log.debug(\"update result %s %s\", result, 415)\n raise UnsupportedMediaType(result)\n\n validator = RequestValidator(spec)\n openapi_request = FlaskOpenAPIRequest(request)\n result = validator.validate(openapi_request)\n if result.errors:\n errors = result.errors[0]\n if hasattr(errors, \"schema_errors\"):\n schema_errors = errors.schema_errors[0]\n error_log = {\n \"error_message\": schema_errors.message,\n \"error_validator\": schema_errors.validator,\n \"error_validator_value\": schema_errors.validator_value,\n \"error_path\": list(schema_errors.path),\n \"error_schema\": schema_errors.schema,\n \"error_schema_path\": list(schema_errors.schema_path),\n }\n log.debug(\"error response: %s\", error_log)\n error_response = f\"{schema_errors.message} for field\"\n error_response += (\n f\" {'/'.join(map(str,schema_errors.path))}.\"\n )\n else:\n error_response = (\n \"The request body mimetype is not application/json.\"\n )\n raise BadRequest(error_response) from BadRequest\n return func(*args, data=data, **kwargs)\n\n return wrapper_validate\n\n return validate_decorator", "title": "" }, { "docid": "fe4a3f293f8a68c771d6b63ec262eb3e", "score": "0.53075016", "text": "def test_validate_api_client(self):\n\n renderer = OpenApiRenderer(self.template_file, api_client=True)\n render = renderer.render()\n self.validate_spec(render)", "title": "" }, { "docid": "7ffd3f218392f5c0ecff8311060fc7ba", "score": "0.52981657", "text": "async def validation_exception_handler(request, exc):\n return JSONResponse(\n status_code=status.HTTP_400_BAD_REQUEST,\n content=jsonable_encoder({'detail': exc.errors(), 'body': exc.body}),\n )", "title": "" }, { "docid": "2fcd007fb79d249894dd80ca11093e8e", "score": "0.5293079", "text": "def validate_document(self, payload):", "title": "" }, { "docid": "11be8bb73df3df5b8e6364da1f9ecd4e", "score": "0.5283743", "text": "def validate(self, obj) -> None:\n\n if hasattr(self.validator, 'is_valid') and hasattr(obj, '__dict__'):\n kwargs = obj.__dict__.copy()\n kwargs.pop('_disable_patching', '')\n self._vaa_validation(**kwargs)\n else:\n self._simple_validation(obj)", "title": "" }, { "docid": "fb9926e5338cdf20c607058f6205b3c0", "score": "0.52745473", "text": "def validate_representation(self, value: Any) -> None:\n pass # pragma: no cover", "title": "" }, { "docid": "58057215db77f3dad53dbbc76f856297", "score": "0.52731204", "text": "def test_07_non_scoped_change_reason(self):\n test_pr = {'synopsis': 'foo', 'enum-fld': 'bar',}\n self.vf_out = [[], []]\n out = self.db._validate(test_pr, 'fields-cr')\n self.assertEqual(self.fname_in,\n 'synopsis enum-fld'.split())\n self.assertEqual(out, {})\n self.assertEqual(self.check_cr_in, [True, True])", "title": "" }, { "docid": "e2e88e0b99599ebe74e0c34b7217d64e", "score": "0.52592397", "text": "def _validate(self):\n pass", "title": "" }, { "docid": "5caac75a2db673aa33f8828153ca07db", "score": "0.5255004", "text": "def test_strict_validation_passes(self) -> None:\n self.question.validate()", "title": "" }, { "docid": "582417513f66df4f1728795a376c3c29", "score": "0.52415764", "text": "def custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title='sonoUno Server',\n description=description,\n version=__version__,\n tags=tags_metadata,\n routes=app.routes,\n )\n for path in openapi_schema['paths']:\n for method in openapi_schema['paths'][path]:\n if openapi_schema['paths'][path][method]['responses'].get('422'):\n openapi_schema['paths'][path][method]['responses'][\n '400'\n ] = openapi_schema['paths'][path][method]['responses']['422']\n openapi_schema['paths'][path][method]['responses'].pop('422')\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "title": "" }, { "docid": "2945b04a49e40fd9d7884217fe20a700", "score": "0.5229001", "text": "def validate():\n # TODO(Guodong) not now\n pass", "title": "" }, { "docid": "79e21de7c6c9c52fb00dd5ffe2ce6a23", "score": "0.5211888", "text": "def _ValidateVulnerability(vuln):\n cve_name = vuln.get('cve')\n if cve_name is None:\n raise ar_exceptions.InvalidInputValueError(\n 'cve is required in all vulnerabilities in csaf document'\n )\n product_status = vuln.get('product_status')\n if product_status is None:\n raise ar_exceptions.InvalidInputValueError(\n 'product_status is required in all vulnerabilities in csaf document'\n )\n if len(product_status) < 1:\n raise ar_exceptions.InvalidInputValueError(\n 'at least one status is expected in each vulnerability'\n )\n for status in product_status:\n if status not in POSSIBLE_PRODUCT_STATUS:\n raise ar_exceptions.InvalidInputValueError(\n 'Invalid product status passed in {}. Product status should be one'\n ' of {}'.format(status, POSSIBLE_PRODUCT_STATUS)\n )\n flags = vuln.get('flags')\n if flags is not None:\n for flag in flags:\n label = flag.get('label')\n if label not in POSSIBLE_JUSTIFICATION_FLAGS:\n raise ar_exceptions.InvalidInputValueError(\n 'Invalid flag label passed in {}. Label should be one of {}'\n .format(label, POSSIBLE_JUSTIFICATION_FLAGS)\n )\n remediations = vuln.get('remediations')\n if remediations is not None:\n for remediation in remediations:\n category = remediation.get('category')\n if category not in POSSIBLE_REMEDIATION_CATEGORIES:\n raise ar_exceptions.InvalidInputValueError(\n 'Invalid remediation category passed in {}. Label should be one'\n ' of {}'.format(category, POSSIBLE_REMEDIATION_CATEGORIES)\n )", "title": "" }, { "docid": "13d08991bbbd9979194e4eca245bb6e0", "score": "0.521021", "text": "def validate(self):\n BaseSpecification.validate(self)\n\n if not self.params_dict.get('allowed_types', None):\n raise BIValueError('Allowed resources types for collection \"%s\" is not specified!' % self.type_name())", "title": "" }, { "docid": "97fdf9773dc4188912c31131ea10a952", "score": "0.52077186", "text": "def test_validate_invalid_version(self) -> None:\n self.observed_object.version = -2\n with self.assertRaisesRegex(\n utils.ValidationError,\n 'Expected version to be non-negative, received -2'):\n self.observed_object.validate()\n\n self.observed_object.version = 'invalid' # type: ignore[assignment]\n with self.assertRaisesRegex(\n utils.ValidationError,\n 'Expected version to be int, received invalid'):\n self.observed_object.validate()", "title": "" }, { "docid": "67c05fe5420f35181775909037b0b5b4", "score": "0.51926947", "text": "def deltaNeutralValidation(self, reqId, underComp):\n pass", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.51881343", "text": "def validate(self):", "title": "" }, { "docid": "51174fbc5a3a2077f5434b17dafc6ac3", "score": "0.51881343", "text": "def validate(self):", "title": "" }, { "docid": "bbb609ab15fe5dc58d07bcd9dd7c8b89", "score": "0.5179807", "text": "def test_04_non_scoped_errors(self):\n test_pr = {'synopsis': 'foo', 'enum-fld': 'bar',}\n self.vf_out = [[1], []]\n out = self.db._validate(test_pr, 'fields')\n self.assertEqual(self.fname_in,\n 'synopsis enum-fld'.split())\n self.assertEqual(out,\n {'enum-fld': [1],})", "title": "" }, { "docid": "694983f416db61327d280d6cdc65598e", "score": "0.5177091", "text": "def validate(self, data):\n try:\n return jsonschema.validate(data, self.schema)\n except jsonschema.ValidationError as e:\n raise ValidationError(e.message)\n except Exception as e:\n raise ValidationError(e)", "title": "" }, { "docid": "9c8df359113bfbfe9bd4d14f8d37336c", "score": "0.51708364", "text": "def validate(self, data):\n if (data['provision_of_the_act'].provision == 'Section 6 (5) (d) (ii) (A)' and\n ('schedule_d_sheet_index' not in data or\n data['schedule_d_sheet_index'] is None)):\n raise serializers.ValidationError({\n 'schedule_d_index': ComplianceReportValidation.missing\n })\n\n if (data['provision_of_the_act'].provision == 'Section 6 (5) (d) (ii) (B)' and\n ('intensity' not in data or data['intensity'] is None)):\n raise serializers.ValidationError({\n 'intensity': ComplianceReportValidation.missing\n })\n\n if (data['provision_of_the_act'].provision == 'Section 6 (5) (c)' and\n ('fuel_code' not in data or data['fuel_code'] is None)):\n raise serializers.ValidationError({\n 'fuel_code': ComplianceReportValidation.missing\n })\n\n if (('intensity' in data and data['intensity'] is not None) and\n ('fuel_code' in data and data['fuel_code'] is not None)):\n raise serializers.ValidationError(\n {\n 'fuel_code': ComplianceReportValidation.extra_value,\n 'intensity': ComplianceReportValidation.extra_value\n }\n )\n\n if (('intensity' in data and data['intensity'] is not None) and\n data['provision_of_the_act'].provision != 'Section 6 (5) (d) (ii) (B)'):\n raise serializers.ValidationError({\n 'intensity': ComplianceReportValidation.extra_value\n })\n\n return data", "title": "" }, { "docid": "1037c9e7dfe5e23f081c180c3c36f105", "score": "0.5169227", "text": "def report_violation(message, raise_on_violation):\n if raise_on_violation:\n raise InterfaceConformanceError(message)\n else:\n logging.warning(message)", "title": "" }, { "docid": "8f38ce3947ea8d27c82c574446278951", "score": "0.5167799", "text": "def validate(self, validation, context):\n pass", "title": "" }, { "docid": "bc06d68d80cbac3f3b25873dd84dd0fb", "score": "0.5166317", "text": "def _validate(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "fa2a43c09ca9941ad4ef37ab2358370a", "score": "0.51634943", "text": "def validate(schema):\n def decorator(method):\n @functools.wraps(method)\n def wrapper(self, req, resp, *args, **kwargs):\n try:\n req.context[\"marshmallow\"] = schema.load(req.media)\n return method(self, req, resp, *args, **kwargs)\n except marshmallow.ValidationError as e:\n return falcon.HTTP_400, {\n \"message\": \"request failed validation\",\n \"fields\": sorted([f.name for f in e.fields]),\n \"errors\": e.messages,\n }\n return wrapper\n return decorator", "title": "" }, { "docid": "4d277904aaed5fcf903c840db9052cb8", "score": "0.51603395", "text": "def _validate(self):\n self._check_planning_interventions_exist()", "title": "" }, { "docid": "169c539958b89514eac181ae66a39651", "score": "0.5153217", "text": "def validation_error(e):\n return bad_request(e.args[0])", "title": "" }, { "docid": "075d61a3e2049ba0034d1e44db2aedc8", "score": "0.51504123", "text": "def validate(self) -> None:\n pass", "title": "" }, { "docid": "9a56a01526589fdc7c0ae4977a731c14", "score": "0.5147306", "text": "def test_parsing_fails(broken):\n with pytest.raises(SpecError):\n spec = OpenAPI(broken)", "title": "" }, { "docid": "57728d1dda8b31ec0644e92533dee439", "score": "0.5138149", "text": "def patch(self):\n self.request.errors.add('body', 'data', 'Complaint addition not implemented')\n self.request.errors.status = 403", "title": "" }, { "docid": "f08f7dc6509078759d132f3221ef1bdb", "score": "0.51345414", "text": "def find_violations(self, project, enabled_apis):\n\n violating_apis = _RULE_MODE_METHODS[self.rule['mode']](\n rule_apis=self.rule['services'],\n enabled_apis=enabled_apis)\n\n if violating_apis:\n yield self.RuleViolation(\n resource_name=project.display_name,\n resource_type=project.type,\n resource_id=project.id,\n full_name=project.full_name,\n rule_name=self.rule_name,\n rule_index=self.rule_index,\n violation_type=VIOLATION_TYPE,\n apis=tuple(violating_apis),\n resource_data=project.data\n )", "title": "" }, { "docid": "aaa9dc94190fc0e48a57e4b0b6804b19", "score": "0.513041", "text": "def Validate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "31dcaccdbc4c760cd53df1df0a0a8720", "score": "0.51172465", "text": "def validate(self, attrs):\n\n if attrs['loan_type'] in (Monthly.VA, Monthly.VA_HB) :\n\n if not attrs.get('va_status') :\n raise serializers.ValidationError(\"va_status is required if loan_type is VA or VA-HB\")\n\n elif attrs.get('va_status') not in (Upfront.DISABLED) :\n\n if attrs.get('va_first_use') is None:\n raise serializers.ValidationError(\"va_first_use is required if va_status is not DISABLED\")\n\n elif attrs.get('va_first_use') not in (0, 1):\n raise serializers.ValidationError(\"va_first_use needs to be 0 or 1.\")\n\n if attrs.get('rate_structure') == Monthly.ARM :\n\n if not attrs.get('arm_type') :\n raise serializers.ValidationError(\"arm_type is required if rate_structure is ARM\")\n\n elif attrs.get('arm_type') == self.ARM_3_1:\n raise serializers.ValidationError(\"No mortgage insurance data for 3/1 ARM\")\n\n return attrs", "title": "" }, { "docid": "ed6638d362151e57aad8b2211dc40131", "score": "0.51166135", "text": "def test_bad_visibility_reason(self, client, entities, endpoint):\n # Setup inputs\n inputs = ENTITY_PARAMS['fields'][endpoint].copy()\n model_cls = ENDPOINT_ENTITY_MAP.get(endpoint)\n entity = entities.get(model_cls)[0]\n _add_foreign_keys(inputs, entity)\n url = endpoint\n\n # Send request with bad value\n inputs.update({\"visibility_reason\": \"foobar\"})\n resp = client.post(url, data=json.dumps(inputs),\n headers={'Content-Type': 'application/json'})\n\n body = json.loads(resp.data.decode('utf-8'))\n assert body['_status']['code'] == 400\n assert 'Must be one of' in body['_status']['message']", "title": "" }, { "docid": "26ba9029a0add5fdf6f8538279732af6", "score": "0.51138943", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a7f497ee86aa8e57bde4045657abfbac", "score": "0.5104914", "text": "def validate(self, request):\n raise NotImplementedError()", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "a1f37e07207bfb9e1ad5f622b980dcc8", "score": "0.5099007", "text": "def validate(self):\n pass", "title": "" }, { "docid": "343fff01564dea5849cb5ef138884206", "score": "0.50976413", "text": "def test_xapi_statement_api_with_invalid_payload(self):\n video = VideoFactory()\n jwt_token = UserAccessTokenFactory()\n\n data = {\"foo\": \"bar\"}\n\n response = self.client.post(\n f\"/xapi/video/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n data=json.dumps(data),\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 400)\n content = json.loads(response.content)\n self.assertEqual(\n content,\n {\n \"verb\": [\"This field is required.\"],\n \"context\": [\"This field is required.\"],\n },\n )", "title": "" }, { "docid": "77edf6c1659f54e197dbfe4aade5db46", "score": "0.50824386", "text": "def validate(self):\n # TODO create code to validate\n pass", "title": "" }, { "docid": "c85c70f635f83df036636b4d86045253", "score": "0.5071943", "text": "def test_validate():\n schema = {\n 'x': {\n 'items': [{'type': ['integer', 'float']}, {'type': ['integer', 'float']}],\n 'required': True,\n 'type': 'list',\n },\n 'y': {\n 'items': [{'type': ['integer', 'float']}, {'type': ['integer', 'float']}],\n 'required': False,\n 'type': 'list',\n },\n }\n pass_doc_1 = {'x': [3, -4.0], 'y': [1e-6, 1e6]}\n pass_doc_2 = {'x': [-1e6, 1e6]}\n fail_doc_1 = {'x': [1, 2, 3]}\n\n fail_result = {'x': ['length of list should be 2, it is 3']} # act\n\n assert utils_data.validate(pass_doc_1, schema) == {}\n assert utils_data.validate(pass_doc_2, schema) == {}\n assert utils_data.validate(fail_doc_1, schema) == fail_result", "title": "" }, { "docid": "0ea75d97d6bf0d9e8f7d1511e86a17d2", "score": "0.5070296", "text": "def validate(self):\n if not isinstance(self.exp_id, python_utils.BASESTRING):\n raise utils.ValidationError(\n 'Expected exp_id to be a str, received %s' % self.exp_id)\n\n if not isinstance(self.version, int):\n raise utils.ValidationError(\n 'Expected version to be an int, received %s' % self.version)", "title": "" }, { "docid": "7d8a5c7062abc065cab6b6c76f3c39c5", "score": "0.5068768", "text": "def test_rest_validation_missing():\n data, errors = AnnoSchema().load({u'bounded': u'false',\n u'invalid': u'false',\n u'extended': u'false',\n u'change': u'false',\n u'notes': u''})\n assert errors\n assert 'stative' in errors", "title": "" }, { "docid": "a16b8ea383b2b5b9e061b5463d87a31f", "score": "0.50644183", "text": "def validate(self) -> None:\n self.config.validate_annotations()", "title": "" }, { "docid": "be9b2592d3c5e08056d188fc70e46b8f", "score": "0.5042329", "text": "def test_model_validation_with_condition(self):\n obj1 = UniqueConstraintConditionProduct.objects.create(name=\"p1\", color=\"red\")\n obj2 = UniqueConstraintConditionProduct.objects.create(name=\"p2\")\n UniqueConstraintConditionProduct(\n name=obj1.name, color=\"blue\"\n ).validate_constraints()\n msg = \"Constraint โ€œname_without_color_uniqโ€ is violated.\"\n with self.assertRaisesMessage(ValidationError, msg):\n UniqueConstraintConditionProduct(name=obj2.name).validate_constraints()", "title": "" }, { "docid": "7bda36807b6853bbe44c8d8ad5ebb307", "score": "0.50408095", "text": "def test_pydantic_model(cls, is_ok, data):\n\n if not is_ok:\n with pytest.raises(ValidationError):\n cls(**data)\n else:\n cls(**data)", "title": "" }, { "docid": "bb43c71161cc6d5cc0834222a5261e6b", "score": "0.5038348", "text": "def validate(self):\n BaseSpecification.validate(self)\n\n if not self.params_dict.get('connecting_type', None):\n raise BIValueError('Connecting resource type is not specified for connection \"%s\"!' % self.type_name())\n\n if not self.params_dict.get('connected_type', None):\n raise BIValueError('Connected resource type is not specified for connection \"%s\"!' % self.type_name())", "title": "" }, { "docid": "1009bb18da587b3b7fa7cfb071712837", "score": "0.50367206", "text": "def compliance(cls, start: int = 0, stop: int = -1, body: int = -1):\n return cls(\n name=\"compliance\",\n analysis_type=\"structural\",\n start=start,\n stop=stop,\n body=body,\n )", "title": "" }, { "docid": "482b01cba688a05b7662f9956f6d2798", "score": "0.50328064", "text": "def test_update_not_permitted(self):\n response = self.update_not_permitted()\n res = response.data[\"detail\"]\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(res, self.detail)", "title": "" }, { "docid": "e44e493123775fe10375544f2855d115", "score": "0.50301397", "text": "def test_payload_missing_scholarly_field_expected_fail(self):\n payload = {\n \"references\": [\n {\n \"id\": \"item2\",\n \"title\": \"Some older article about some domokuns.\",\n \"author\": [\n {\"given\": \"chevy\",\n \"family\": \"chaser\"}\n ],\n \"type\": \"book\"\n },\n {\n \"id\": \"item3\",\n \"title\": \"Some article about some domokun lovers.\",\n \"author\": [\n {\"given\": \"kim\",\n \"family\": \"possibly\"}\n ],\n \"type\": \"book\",\n \"doi\": \"asdflkj209asdlkfj209sadfkj2\"\n }\n ]\n }\n\n # call articles endpoint validation on payload\n self.assertFalse(articles_endpoint_validator.validate(payload))", "title": "" }, { "docid": "27104ec78d134853d881a685ca3e6224", "score": "0.5029942", "text": "def test_create_enforces_required_fields(self):\n service = Service.objects.order_by('?').first()\n self.authenticateAsProjectOwner(service.project)\n response_data = self.assertBadRequest(\n \"/services/{}/requirements/\".format(service.pk),\n \"POST\",\n dict()\n )\n required_fields = {'resource', 'amount'}\n self.assertCountEqual(response_data.keys(), required_fields)\n for name in required_fields:\n self.assertEqual(response_data[name][0]['code'], 'required')", "title": "" }, { "docid": "c457d4e6ef41137fd883ed5a35d7bfa0", "score": "0.50280356", "text": "def test_doc_first():\n\n PositiveInt = vtype('PositiveInt', int, {'should be positive': lambda x: x >= 0})\n\n assert isinstance(1, PositiveInt)\n assert not isinstance(-1, PositiveInt)\n\n with pytest.raises(ValidationError):\n PositiveInt.validate('x', -1)\n\n assert PositiveInt.has_valid_type(-1)\n assert not PositiveInt.has_valid_value(-1)", "title": "" }, { "docid": "3c873a30aceefd9246cf59d524c10db9", "score": "0.5027887", "text": "def validate_response(response_spec, op, response):\n if not op.swagger_spec.config['validate_responses']:\n return\n\n validate_response_body(op, response_spec, response)\n validate_response_headers(op, response_spec, response)", "title": "" }, { "docid": "790292c27072e655e4a0c0b7317af925", "score": "0.50257933", "text": "def sanitation_code_violation(self, request):\n serializer = serializers.SanitationCodeViolationIncidentCreateSerializer(data=self.request.data,\n context={'request': request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(status=status.HTTP_201_CREATED)", "title": "" }, { "docid": "c2496c80e4353ee01a3631f1f30416fa", "score": "0.5023065", "text": "def validate(self, ops, attrs):\n raise RuntimeError(f\"'{self.__class__.__name__}.validate' has not been implemented\")", "title": "" }, { "docid": "5367e27eafb6b60ca742293d856e3200", "score": "0.501006", "text": "def test_falcon_exception_formatting(self):\n expected_msg = (\n 'deckhand:create_cleartext_documents is disallowed by policy')\n\n with mock.patch.object(\n policy, '_do_enforce_rbac',\n spec_set=policy._do_enforce_rbac) as m_enforce_rbac:\n m_enforce_rbac.side_effect = falcon.HTTPForbidden(\n description=expected_msg)\n resp = self.app.simulate_put(\n '/api/v1.0/buckets/test/documents',\n headers={'Content-Type': 'application/x-yaml'}, body=None)\n\n expected = {\n 'status': 'Failure',\n 'kind': 'Status',\n 'code': '403 Forbidden',\n 'apiVersion': 'v1.0',\n 'reason': 'Unspecified',\n 'retry': False,\n 'details': {\n 'errorType': 'HTTPForbidden',\n 'errorCount': 1,\n 'messageList': [\n {\n 'message': expected_msg,\n 'error': True\n }\n ]\n },\n 'message': expected_msg,\n 'metadata': {}\n }\n body = yaml.safe_load(resp.text)\n\n self.assertEqual(403, resp.status_code)\n self.assertEqual(expected, body)", "title": "" }, { "docid": "3c598efb4d88bad9a5f12a376953acb8", "score": "0.5003827", "text": "def test_validateFieldValidation(self):\n messageType = self.messageType()\n self.assertRaises(\n ValidationError,\n messageType._serializer.validate,\n {\"key\": 1, \"value\": None, \"message_type\": \"myapp:mysystem\"},\n )", "title": "" }, { "docid": "f5d8de5b590c25d8e8b1bc660b736d50", "score": "0.5000737", "text": "def validate(self, data):\n service = self.context['service']\n if service.is_active:\n raise serializers.ValidationError('You need to finish the Service after write a review.')\n data['service'] = service\n return data", "title": "" }, { "docid": "e36c548e38b622fcc508423cf5940dfd", "score": "0.50005066", "text": "def Check(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "db9d296fd3aaaefc70f2113b7aa294ec", "score": "0.49994475", "text": "def validator(self):\n pass", "title": "" }, { "docid": "838ccdc38f4b652f9a3f6d0abe447c57", "score": "0.49872363", "text": "def test_validateStandardFields(self):\n messageType = self.messageType()\n messageType._serializer.validate(\n {\n \"key\": 1,\n \"value\": 2,\n \"message_type\": \"myapp:mysystem\",\n \"task_level\": \"/\",\n \"task_uuid\": \"123\",\n \"timestamp\": \"xxx\",\n }\n )", "title": "" }, { "docid": "a1c8d78f60980f6fa37e0f80b7357583", "score": "0.49865636", "text": "def api_error(self, message):\n self.add_error(NON_FIELD_ERRORS, message)", "title": "" }, { "docid": "138a0164994f187deffeceea078c8777", "score": "0.49849707", "text": "def test_swagger_swagger_json(self):\n pass", "title": "" }, { "docid": "c311dc8b5da728437fa41ee78f5dfeb7", "score": "0.4981666", "text": "def validate_response(self, response):\n openapi.response_validator(\n self.settings).validate(response).raise_for_errors()", "title": "" }, { "docid": "14a33cd1ab6d7b0e9d62e33ce615bc61", "score": "0.49785152", "text": "async def test_validate_mismatched_resource_type(\n workflow: FhirWorkflow, nats_client, monkeypatch\n):\n workflow.message[\"resourceType\"] = \"Encounter\"\n with monkeypatch.context() as m:\n m.setattr(nats, \"get_nats_client\", nats_client)\n with pytest.raises(ValidationError):\n await workflow.validate()", "title": "" }, { "docid": "6742b831f92a1fe0ceb2ebec572c843a", "score": "0.49749914", "text": "def test_validate_only_allowed_fields_can_be_updated(self):\n company = CompanyFactory()\n item = PipelineItemFactory(adviser=self.user)\n url = _pipeline_item_detail_url(item.pk)\n response = self.api_client.patch(url, data={'company': company.pk})\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.json() == {'company': ['field not allowed to be update.']}", "title": "" }, { "docid": "e6c06dc4e67f1cd4e19f7f8d4ebfdecf", "score": "0.49732333", "text": "def test_incorrect(self):\n serializer = IncorrectSerializer(data=self.data)\n assert serializer.is_valid()\n with self.assertRaises(me_ValidationError):\n serializer.save()", "title": "" }, { "docid": "3fc79443b0ec38099c589964c01b6b03", "score": "0.49714208", "text": "def test_create(self):\n validator = Required(message='Custom error')\n self.assertIsInstance(validator, Required)", "title": "" }, { "docid": "31c61b56e71d3e2a2abc64861b6b1a3b", "score": "0.49669296", "text": "def test_validation_checking(self):\r\n\r\n nt.assert_raises(ValueError, Axis, type='panda')", "title": "" }, { "docid": "f19bb801e35fc5f450238257753fd24c", "score": "0.49608254", "text": "def validate(obj, schema_version=None, schema_key=None, missing_ok=False):\n schema_key = schema_key or obj.get(\"schemaKey\")\n if schema_key is None:\n raise ValueError(\"Provided object has no known schemaKey\")\n schema_version = schema_version or obj.get(\"schemaVersion\")\n if schema_version not in ALLOWED_VALIDATION_SCHEMAS and schema_key in [\n \"Dandiset\",\n \"PublishedDandiset\",\n \"Asset\",\n \"PublishedAsset\",\n ]:\n raise ValueError(\n f\"Metadata version {schema_version} is not allowed. \"\n f\"Allowed are: {', '.join(ALLOWED_TARGET_SCHEMAS)}.\"\n )\n klass = getattr(models, schema_key)\n try:\n klass(**obj)\n except pydantic.ValidationError as exc:\n if not missing_ok:\n raise\n reraise = False\n messages = []\n for el in exc.errors():\n if el[\"msg\"] != \"field required\":\n reraise = True\n messages.append(el[\"msg\"])\n if reraise:\n ValueError(messages)", "title": "" }, { "docid": "c5d94e381ba55e957e6afe5b9f26b0c4", "score": "0.4960678", "text": "def _validate_instance(self, model, instance):\n try:\n validate_model_instance(model, instance)\n except MissingFields as fields:\n raise tornado.web.HTTPError(400, \"Missing Fields %s\" % fields)\n except ValidationError:\n raise tornado.web.HTTPError(400, \"Validation Error\")\n self._create_instance(instance)", "title": "" }, { "docid": "98979b107d9abdb9a6696be73cb67e8f", "score": "0.49596843", "text": "def test_rest_create_v2_400(self, *args):\n api = get_test_client(self.napp.controller, self.napp)\n url = \"%s/v2/namespace/123\" % self.API_URL\n response = api.open(url, method='POST')\n\n self.assertEqual(response.status_code, 400)", "title": "" }, { "docid": "c704f67f7017a3c519f9c4c3a6bf0a4b", "score": "0.49593464", "text": "def test_custom_excepetion_422():\n expected = {\n 'message': 'test',\n 'code': 422,\n 'data': None\n }\n instance_test = ValidationError('test')\n instance = instance_test.__str__()\n assert instance == repr(expected)", "title": "" }, { "docid": "0435b5c9410a5ca3f9597e2bd42c5a5a", "score": "0.49524593", "text": "def fail(self):\n raise AccountControlViolation(constraint=repr(self))", "title": "" } ]
5da92a368f8d5dedef47a25c95e01cc5
Fetches all incidents not marked as 'Resolved'
[ { "docid": "c01976be6a59d05bb2960bf2c8ce3f02", "score": "0.6302394", "text": "def get_all_open_incidents(self):\n self.open_incidents = []\n try:\n response = self.session.get(f'{self.base_url}/incidents')\n except RequestException as err:\n sys.exit(f'REQUEST ERROR! Unable to get incidents.\\n{err}')\n for incident in response.json()['data']:\n if incident['status'] != 4:\n self.open_incidents.append(incident)", "title": "" } ]
[ { "docid": "625033ffb356b956860c15ebe156935a", "score": "0.66342324", "text": "def unresolved_issues(self):\n return self.issue_set.filter(resolved_state__isnull=True)", "title": "" }, { "docid": "8aca38791acd7bc07aab9efcffe76b20", "score": "0.5845197", "text": "def get_all_unresolved_corefering_entities(self):\n articles = []\n for article in self.db_article_collection.find({'coref_entities': {'$exists': True}}):\n entities = [{'main': ent['main'],\n 'mentions': [mention for mention in ent['mentions'] if mention['resolved'] is False]}\n for ent in article['coref_entities']]\n entities = [ent for ent in entities if len(ent['mentions']) > 0]\n articles.append({\n 'source': article['source'],\n 'coref_entities': entities\n })\n return articles", "title": "" }, { "docid": "0bc6edabdf9162926f7e36d98f15795a", "score": "0.58078194", "text": "def get_partly_resolved_observations(self):\n items = []\n for obj in self.observations:\n if obj.get('observation_question_status', '') in [\n 'phase1-closed',\n 'phase2-closed'] and \\\n obj.get('observation_finalisation_reason', '') == 'partly-resolved':\n items.append(obj)\n return items", "title": "" }, { "docid": "67327ce2c2745898e47a2b865849b5e7", "score": "0.554834", "text": "def filter_issues(self, issues):\n return self.filter_on_committed(issues)", "title": "" }, { "docid": "3baf14084c09a508980aa92e37f03c4b", "score": "0.5497551", "text": "def fetch_incidents():\n last_run = demisto.getLastRun()\n\n last_updated = (datetime(1999, 1, 1, 0, 0, 0, 0), '1999-01-01T00:00:00.0Z')\n if last_run and 'createdOn' in last_run:\n ts_str = last_run.get('createdOn')\n last_updated = (datetime.strptime(ts_str, '%Y-%m-%dT%H:%M:%S.%fZ'), ts_str)\n\n severities = get_alert_severity()\n\n events = []\n incidents = []\n tmp_time = last_updated\n\n for severity in severities:\n events = get_alerts(severity, None, last_updated[1])\n\n for event in events:\n event_ts = datetime.strptime(event['createdOn'], '%Y-%m-%dT%H:%M:%S.%fZ')\n if event_ts > tmp_time[0]:\n tmp_time = (event_ts, event['createdOn'])\n\n incident = {\n 'name': event['type'],\n 'occurred': event['createdOn'],\n 'severity': SCADAFENCE_ALERT_SEVERITY_LEVEL[event['severity']],\n 'rawJSON': json.dumps(event)\n }\n incidents.append(incident)\n if tmp_time[0] > last_updated[0]:\n\n demisto.setLastRun({\n 'createdOn': tmp_time[1]\n })\n\n demisto.incidents(incidents)", "title": "" }, { "docid": "486ab24a30843c4e4229f18809a4987b", "score": "0.54935455", "text": "def filter_on_committed(self, issues):\n filtered_issues = [i for i in issues if i.ended and (i.committed['entered_at'] >= self.start_date and i.committed['entered_at'] <= self.end_date)]\n return filtered_issues", "title": "" }, { "docid": "be9e35982532ce590f9f486a2e02aadb", "score": "0.5411216", "text": "def get_issues_without_epic(\n self,\n board_id,\n jql=\"\",\n validate_query=\"\",\n fields=\"*all\",\n expand=\"\",\n start=0,\n limit=50,\n ):\n url = \"/rest/agile/1.0/board/{boardId}/epic/none/issue\".format(boardId=board_id)\n params = {}\n if jql:\n params[\"jql\"] = jql\n if validate_query:\n params[\"validateQuery\"] = validate_query\n if fields:\n params[\"fields\"] = fields\n if expand:\n params[\"expand\"] = expand\n if start:\n params[\"startAt\"] = start\n if limit:\n params[\"maxResults\"] = limit\n return self.get(url, params=params)", "title": "" }, { "docid": "82af16afb8dfffbcad1431cea5b97a9d", "score": "0.540656", "text": "def _FetchUntriagedAnomalies(self):\n # Previous code process anomalies by sheriff with LIMIT. It prevents some\n # extreme cases that anomalies produced by a single sheriff prevent other\n # sheriff's anomalies being processed. But it introduced some unnecessary\n # complex to system and considered almost impossible happened.\n future = anomaly.Anomaly.QueryAsync(\n keys_only=True,\n limit=_MAX_UNTRIAGED_ANOMALIES,\n recovered=False,\n is_improvement=False,\n bug_id='',\n )\n future.wait()\n anomalies = future.get_result()[0]\n return anomalies", "title": "" }, { "docid": "f839cd0cc9db6411f26a622835000ae5", "score": "0.5363145", "text": "def filter_issues(self, issues):\n return self.filter_on_ended(issues)", "title": "" }, { "docid": "f839cd0cc9db6411f26a622835000ae5", "score": "0.5363145", "text": "def filter_issues(self, issues):\n return self.filter_on_ended(issues)", "title": "" }, { "docid": "f839cd0cc9db6411f26a622835000ae5", "score": "0.5363145", "text": "def filter_issues(self, issues):\n return self.filter_on_ended(issues)", "title": "" }, { "docid": "8523b9f9a48bb170f449d708859cdedc", "score": "0.5361128", "text": "def fetch_incidents(client: Client, last_run, first_fetch_time):\n last_fetch = last_run.get('last_fetch', None)\n last_run_rids = last_run.get('last_run_rids', {})\n page_to_query = last_run.get('page_to_query', 1)\n\n if not last_fetch:\n last_fetch, _ = parse_date_range(first_fetch_time, date_format=DATE_FORMAT, utc=True)\n\n current_rids = []\n incidents = []\n response, field_list = get_list_incidents(client, last_fetch, page_to_query)\n items = _parse_alerts_result(response, field_list)\n\n # Check last queried item's timestamp\n latest_created_time = None\n if items:\n parsed_date = dateparser.parse(items[-1]['Timestamp'])\n assert parsed_date is not None, f\"failed parsing {items[-1]['Timestamp']}\"\n latest_created_time = parsed_date.replace(tzinfo=None).strftime(DATE_FORMAT)\n\n # If timestamp stayed the same than get next 10\n if last_fetch == latest_created_time:\n page_to_query += 1\n else:\n page_to_query = 1\n\n for item in items:\n # Make datetime object unaware of timezone for comparison\n parsed_date = dateparser.parse(item['Timestamp'])\n assert parsed_date is not None, f\"failed parsing {item['Timestamp']}\"\n incident_created_time = parsed_date.replace(tzinfo=None)\n\n # Don't add duplicated incidents\n # if item[\"ResourceID\"] not in last_run_rids:\n incident = {\n 'name': item.get('Description', None),\n 'occurred': incident_created_time.strftime(DATE_FORMAT),\n 'severity': CTD_TO_DEMISTO_SEVERITY.get(item.get('Severity', None), None),\n 'rawJSON': json.dumps(item)\n }\n\n incidents.append(incident)\n current_rids.append(item[\"ResourceID\"])\n\n # If there were no items queried, latest_created_time is the same as last run\n if latest_created_time is None:\n latest_created_time = last_fetch\n\n # If no new items were retrieved, last_run_rids stay the same\n if not current_rids:\n current_rids = last_run_rids\n\n next_run = {'last_fetch': latest_created_time, 'last_run_rids': current_rids, \"page_to_query\": page_to_query}\n return next_run, incidents", "title": "" }, { "docid": "a915676d254592d19d2f8e9b2b705917", "score": "0.5345372", "text": "def getRemainingAssetsList(self): \n try:\n self.cur.execute(\"select id, latitude, longitude \\\n from assets \\\n where is_deleted = 'f' \\\n and installation_date is not null and commissioning_date is not null \\\n and id not in ( \\\n select asset_id from streets_reverse_geocoded \\\n )\")\n except:\n print(\"I am unable to get data\")\n\n rows = self.cur.fetchall() \n\n assets_list = []\n for row in rows:\n asset_id = row[0]\n asset_latitude = row[1]\n asset_longitude = row[2]\n assets_list.append((asset_id, asset_latitude, asset_longitude))\n\n return assets_list", "title": "" }, { "docid": "65652e02fdde96b3ce6c13b0f1871773", "score": "0.533216", "text": "def _LookupUncommittedChanges(self, deps, limit_to=None):\n unsatisfied = []\n for dep in deps:\n if dep in self._committed_cache:\n continue\n\n try:\n self._LookupHelper(dep)\n except GerritHelperNotAvailable:\n # Internal dependencies are irrelevant to external builders.\n logging.info(\"Skipping internal dependency: %s\", dep)\n continue\n\n dep_change = self._lookup_cache[dep]\n\n if dep_change is None:\n dep_change = self._GetGerritPatch(dep)\n if dep_change is None:\n continue\n if getattr(dep_change, 'IsAlreadyMerged', lambda: False)():\n continue\n elif limit_to is not None and dep_change not in limit_to:\n if self._is_submitting:\n raise PatchRejected(dep_change)\n else:\n raise dep_change.GetMergeException() or PatchNotEligible(dep_change)\n\n unsatisfied.append(dep_change)\n\n # Perform last minute custom filtering.\n return [x for x in unsatisfied if self.deps_filter_fn(x)]", "title": "" }, { "docid": "393909784623c7ff286f3ab499b3baca", "score": "0.5312973", "text": "def _get_clients_non_drug_users(self):\n filtering = self.__default_encounter_filtering()\n encounters = Encounter.objects.filter(**filtering)\n clients = encounters.values_list('person', flat=True)\n return Client.objects.filter(pk__in=clients).filter(close_person=True)", "title": "" }, { "docid": "080f07df611f70a1c793fa5929c8cf99", "score": "0.53024465", "text": "def getNonFetchedAddresses(self):\n return self.dbpool.runQuery(\n \"select \\\n hosts.address, \\\n services.port, \\\n services.name \\\n from hosts \\\n join services on (hosts.id = services.host_id) \\\n left join urls_resolved_to_hosts on (hosts.id = urls_resolved_to_hosts.host_id) \\\n left join www_page_located_at_urls on (urls_resolved_to_hosts.url_id = www_page_located_at_urls.url_id) \\\n left join www_pages on (www_page_located_at_urls.page_id = www_pages.id) \\\n where (services.name ilike 'http' or services.name ilike 'https') and urls_resolved_to_hosts.host_id is null\")", "title": "" }, { "docid": "3b56214c1dfe7c34a8424d2aa05a3449", "score": "0.5292883", "text": "def get_partly_resolved_observations(self):\n return self.get_observations(\n observation_question_status=[\n 'phase1-closed',\n 'phase2-closed'],\n observation_finalisation_reason='partly-resolved',\n )", "title": "" }, { "docid": "001c748f3b272e28afd95b60c449fc7c", "score": "0.52875745", "text": "def fetch_incidents(client: Client, fetch_time: str, fetch_limit: int, last_run: dict, incident_types: List[str] = None,\n incident_status_id: List[str] = None, incident_severities: List[str] = None, is_test=False):\n incidents = []\n if incident_severities:\n incident_severities = [INCIDENT_SEVERITY_MAPPING[severity] for severity in incident_severities] # type: ignore\n if incident_types:\n incident_types = [INCIDENT_TYPE_MAPPING[incident_type] for incident_type in incident_types]\n\n if last_run:\n last_update_time = last_run.get('last_incident_creation_time')\n\n else:\n # In first run\n last_update_time = parse_creation_date(fetch_time)\n\n incidents_data_res = client.get_incidents_request(status_id=incident_status_id,\n severity=incident_severities, # type: ignore\n incident_type=incident_types, limit=fetch_limit,\n creation_date=last_update_time, order_by=True)\n\n incidents_data_list = incidents_data_res.get('incidents', [])\n\n for incident_data in incidents_data_list:\n incident_id = incident_data.get('incidentId')\n incident_creation_time = incident_data.get('creationDate')\n\n if is_incident_already_fetched_in_previous_fetch(last_update_time, incident_creation_time):\n # Skipping last incident from last cycle if fetched again\n continue\n\n incident_details = get_incident_details_fetch(client, incident_data)\n incident: dict = {\n 'rawJSON': json.dumps(incident_details),\n 'name': f'Symantec DLP Incident ID {incident_id}',\n 'occurred': parse_creation_date(incident_creation_time)\n }\n incidents.append(incident)\n if incident_creation_time == incidents_data_list[-1].get('creationDate'):\n last_update_time = incident_creation_time\n\n if is_test:\n return None\n\n demisto.setLastRun(\n {\n 'last_incident_creation_time': last_update_time\n }\n )\n # Sort the incidents list because the incident's ID and creation date are not synchronize\n sorted_incidents = sorted(incidents, key=lambda d: d['name'])\n return sorted_incidents", "title": "" }, { "docid": "ce605fb84cb95ee7b6ed4fd59bd15cde", "score": "0.52874345", "text": "def getPendingReportDates():\n # Filter the unreported derivatives\n query = Derivative.query.filter_by(reported=False)\n # Query the distinct dates of unreported derivatives\n query = query.with_entities(Derivative.date_of_trade).distinct()\n # Execute query and extract dates from sqlalchemy.util._collections.result\n return [d[0] for d in query.all()]", "title": "" }, { "docid": "cd82025d61951e15f416d37d97fb6137", "score": "0.52727735", "text": "def list(self, request):\n take_all = bool(self.request.query_params.get('take_all'))\n logger.info('List incidents request received. [take_all: {}]'.format(take_all))\n requester = self.request.user\n if requester.role == Role.ADMIN and take_all is True:\n queryset = Incident.objects.all()\n else:\n queryset = Incident.objects.filter(owner=requester)\n serializer = SimpleIncidentSerializer(queryset, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "9bb45e50d6803f158bafb57d57d69bac", "score": "0.5260496", "text": "def list_incidents(engine):\n query_all = \"SELECT incident, COUNT(*) FROM public.incident_tweets WHERE incident IS NOT NULL AND misinfo_type != 'not misinformation' GROUP BY incident;\"\n incidents =pd.read_sql(query_all, con=engine)\n return incidents", "title": "" }, { "docid": "bd7a92e901435c07dde2a9e3b20f26ee", "score": "0.52349764", "text": "def get_unused(self):\n return self.filter(used=False)", "title": "" }, { "docid": "d49a70dd7b76234a09906065dd2f0c82", "score": "0.5217671", "text": "def list_uncompleted(self):\n pass", "title": "" }, { "docid": "eb3b1ffaeef2afc12f5f9fb68b4633c6", "score": "0.52048784", "text": "def unrejected_requests(self, user):\r\n key = make_key('unrejected_requests', user.pk)\r\n unrejected_requests = cache.get(key)\r\n if unrejected_requests is None:\r\n unrejected_requests = self.filter(Q(rejected__isnull=True) | Q(rejected=False), from_user=user)\r\n cache.set(key, unrejected_requests)\r\n return unrejected_requests", "title": "" }, { "docid": "087a5ce0ec3ca1e0584255dbe20289ed", "score": "0.5201605", "text": "def listIncomingTasksWithoutClosedReport( self, REQUEST, **query ):\n kw = {}\n member_id = REQUEST.get('member_id')\n involved_users = REQUEST.get('involved_users')\n kw['member_id' ] = member_id\n\n if involved_users:\n kw['InvolvedUsers'] = { 'query':involved_users, 'operator':'OR' }\n\n kw = self._extp('incoming_total', **kw)\n if query: kw = updateQuery( kw, query, restricted=['InvolvedUsers'] )\n\n kw['BrainsType'] = REQUEST.get('brains_type')\n kw['sort_limit'] = None\n\n total_objects, tasks = self.searchTasks( REQUEST=REQUEST, IsCatalog=1, **kw )\n\n IsExpired = REQUEST.has_key('IsExpired')\n IsNotAnswered = REQUEST.has_key('IsNotAnswered')\n\n if IsExpired or IsNotAnswered:\n res = []\n for r in tasks:\n if IsExpired and self._IsExpired( member_id, r ):\n res.append( r )\n if IsNotAnswered and self._IsNotAnswered( member_id, r ):\n res.append( r )\n return ( len(res), res, 1, )\n\n return ( total_objects, tasks, 1, )", "title": "" }, { "docid": "ea1ea92a12867e11eb7ab82c0289e6ba", "score": "0.5183953", "text": "def fetch_all_resolved_tickets(self):\n self.fetch_auth_token(partial(BugzillaFetcher.fetch_all_resolved_tickets, self))", "title": "" }, { "docid": "13846ee0ec8464f2736569cc5a680250", "score": "0.51825416", "text": "def get_incident_owners(incident_ids) -> list:\n\n res = demisto.executeCommand('GetIncidentsByQuery', {\n 'query': \"id:({})\".format(' '.join(incident_ids))\n })\n\n if isError(res):\n return_error(f'Error occurred while trying to get incidents by query: {get_error(res)}')\n\n incidents_from_query = json.loads(res[0]['Contents'])\n\n incident_owners = set([incident['owner'] for incident in incidents_from_query])\n incident_owners.add(demisto.incident()[\"owner\"]) # Add the campaign incident owner\n incident_owners_res = list(filter(lambda x: x, incident_owners))\n\n return incident_owners_res", "title": "" }, { "docid": "c6e0b88a98a183200ef40b013f97f6da", "score": "0.51687443", "text": "def vaccines_missing(aggregated: bool = False, verbose: bool = False):\n if aggregated:\n # Get tracked vaccines\n vax_tracked = vaccines_tracked(as_list=True)\n client = TrackVaccinesClient()\n vax_approved = client.vaccines_approved()\n return {\n \"vaccines_untracked\": [v for v in vax_approved if v not in vax_tracked],\n \"vaccines_unapproved\": [v for v in vax_tracked if v not in vax_approved]\n }\n else:\n vax_tracked = vaccines_tracked()\n vax_approved = vaccines_approved(verbose=True)\n # Build result dataframe\n df = vax_tracked.merge(vax_approved, on=\"location\", suffixes=(\"_tracked\", \"_approved\"))\n df = df[df.vaccines_tracked != df.vaccines_approved].dropna()\n df = df.assign(\n unapproved=(\n df.apply(lambda x: [xx for xx in x[\"vaccines_tracked\"] if xx not in x[\"vaccines_approved\"]], axis=1)\n ),\n untracked=(\n df.apply(lambda x: [xx for xx in x[\"vaccines_approved\"] if xx not in x[\"vaccines_tracked\"]], axis=1)\n )\n )\n df = df.assign(\n num_unapproved=df.unapproved.apply(len),\n num_untracked=df.untracked.apply(len)\n )\n df = df[[\"location\", \"unapproved\", \"num_unapproved\", \"untracked\", \"num_untracked\"]]\n df = df.sort_values(by=\"num_untracked\", ascending=False)\n return df", "title": "" }, { "docid": "218dc930afd76d4ee88fb398f24c5d3d", "score": "0.51548684", "text": "def _escalate(self):\n self.log.info(u\"Getting list of open approval requests\")\n\n try:\n # This just queries for all requests that match the escalation conditions.\n # For cases with many thousands of open requests, a more scalable approach could\n # use \"paged\" queries, i.e. send a \"limit\" and then process each page of results.\n results = self.bit9_client.query_approval_request(self.escalation_query)\n\n # Query results should be a list\n if not isinstance(results, list):\n self.log.warn(u\"Query produced unexpected value: %s\", results)\n return\n\n self.log.info(\"%d results\", len(results))\n self.log.debug(results)\n\n r_incidents = []\n if len(results) > 0:\n # Some (many!) of these approval requests will already have been escalated to Resilient.\n # For efficiency, find them and filter them out from this batch.\n # Then we're left only with \"un-escalated\" incidents.\n req_ids = [result[\"id\"] for result in results]\n query_uri = u\"/incidents/query?return_level=normal&field_handle={}\".format(REQUEST_ID_FIELDNAME)\n query = {\n 'filters': [{\n 'conditions': [\n {\n 'field_name': 'properties.{}'.format(REQUEST_ID_FIELDNAME),\n 'method': 'in',\n 'value': req_ids\n },\n {\n 'field_name': 'plan_status',\n 'method': 'equals',\n 'value': 'A'\n }\n ]\n }]\n }\n self.log.debug(query)\n try:\n r_incidents = self.rest_client().post(query_uri, query)\n except SimpleHTTPException:\n # Some versions of Resilient 30.2 onward have a bug that prevents query for numeric fields.\n # To work around this issue, let's try a different query, and filter the results. (Expensive!)\n query_uri = u\"/incidents/query?return_level=normal&field_handle={}\".format(REQUEST_ID_FIELDNAME)\n query = {\n 'filters': [{\n 'conditions': [\n {\n 'field_name': 'properties.{}'.format(REQUEST_ID_FIELDNAME),\n 'method': 'has_a_value'\n },\n {\n 'field_name': 'plan_status',\n 'method': 'equals',\n 'value': 'A'\n }\n ]\n }]\n }\n self.log.debug(query)\n r_incidents_tmp = self.rest_client().post(query_uri, query)\n r_incidents = [r_inc for r_inc in r_incidents_tmp\n if r_inc[\"properties\"].get(REQUEST_ID_FIELDNAME) in req_ids]\n\n escalated_ids = [r_inc[\"properties\"].get(REQUEST_ID_FIELDNAME) for r_inc in r_incidents]\n\n unescalated_requests = [result for result in results if str(result[\"id\"]) not in escalated_ids]\n\n # Process each approval-request in the batch\n for req in unescalated_requests:\n self.fire(ProcessApprovalRequest(request=req))\n\n self.log.info(u\"Processed all approval requests\")\n\n except Exception as err:\n raise err\n finally:\n # We always want to reset the timer to wake up, no matter failure or success\n self.fire(PollCompleted())", "title": "" }, { "docid": "4ac868fd903631d210744e9b29e08b57", "score": "0.51518637", "text": "def retrieve_all_verified_events(self):\n unverified_events = []\n select_query = \"\"\"SELECT * FROM social_distancing\"\"\"\n retrieved_events = self.off_chain.select(select_query)\n events_df = pd.DataFrame(retrieved_events, columns=self.off_chain.cursor.column_names)\n for row, event in events_df.iterrows():\n verified = self.on_chain.verify_event(event[\"Event_ID\"], event[\"Location\"], event[\"Local_Time\"], event[\"Violations\"])\n if not verified:\n print(\"Event ID {} is not verifiable!\".format(event[\"Event_ID\"]))\n unverified_events.append(event[\"Event_ID\"])\n events_df = events_df.drop([row])\n return events_df, unverified_events", "title": "" }, { "docid": "08f9c924bf51950af360bf4332ab391d", "score": "0.512804", "text": "def incident_list(self) -> 'outputs.IncidentListResponse':\n return pulumi.get(self, \"incident_list\")", "title": "" }, { "docid": "523961d4e70186c4f0ecc39410834851", "score": "0.51268965", "text": "def filter_on_ended(self, issues):\n filtered_issues = [i for i in issues if i.ended and (i.ended['entered_at'] >= self.start_date and i.ended['entered_at'] <= self.end_date)]\n return filtered_issues", "title": "" }, { "docid": "4a1c8720a60e268a00a7a749b6a4b591", "score": "0.5083646", "text": "def find_missing_rows(self):\n\n self.cur.execute(Database.Sql.find_missing_rows)\n return [(issue, epiweek) for (issue, epiweek) in self.cur]", "title": "" }, { "docid": "6fbc830da9f34d07868ff0175c7569a5", "score": "0.5055957", "text": "def get_no_response_needed_observations(self):\n items = []\n for obj in self.observations:\n if obj.get('observation_question_status', '') in [\n 'phase1-closed',\n 'phase2-closed'] and \\\n obj.get('observation_finalisation_reason', '') == 'no-response-needed':\n items.append(obj)\n return items", "title": "" }, { "docid": "dbac1b70f81e843aaa29e60e626b5079", "score": "0.5049629", "text": "def get(self):\n return get_all_incidents()", "title": "" }, { "docid": "b444b72e7cda3347a49b0efb98a87e65", "score": "0.50394773", "text": "def ignored(self) -> List[Link]:\n\n lmbd = lambda x: x.status == Status.IGNORED\n return self._filter(lmbd)", "title": "" }, { "docid": "87f2a5fefe1f4949628834f0397b6096", "score": "0.50391793", "text": "def ignored(self):\n if not self.id:\n raise InvalidObjectError(\"missing IOC ID\")\n if not self._report_id:\n raise InvalidObjectError(\"ignore status only applies to watchlist IOCs\")\n\n url = \"/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/iocs/{}/ignore\".format(\n self._cb.credentials.org_key,\n self._report_id,\n self.id\n )\n resp = self._cb.get_object(url)\n return resp[\"ignored\"]", "title": "" }, { "docid": "455a1611be9d1b37f0a5b3ba0f2fda65", "score": "0.5009249", "text": "def unresolveds(self):\n # by default, empty\n return ()", "title": "" }, { "docid": "888a192d0a9bfe4df05ca3f47b3559c3", "score": "0.5002682", "text": "def fetch_all_statuses(issues: List[models.Scheme], dispatcher: Dispatcher, max_workers):\n raw_url_to_issues = group_issues_by_url(issues)\n\n # Remove all issues whose tracker contains a URL segment listed in `EXCEPTIONS`\n url_to_issues = {}\n for url, issues in raw_url_to_issues.items():\n exclude, reason = should_exclude(url)\n if exclude:\n LOG.warning(f\"Excluding {url!r} due to exception segment {reason!r}\")\n else:\n url_to_issues[url] = issues\n\n len_trackers = len(url_to_issues)\n LOG.debug(f\"Unique issue trackers found: {len_trackers}\")\n\n # When `max_workers` is None, `ThreadPoolExecutor` uses a sensible default value based on the number of cores\n all_issues = []\n with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n future_to_url = {executor.submit(dispatcher.dispatch, url): url for url in url_to_issues}\n for i, future in enumerate(futures.as_completed(future_to_url)):\n url = future_to_url[future]\n log_prefix = f\"{i + 1}/{len_trackers} -\"\n issues_with_status = _handle_completed_future(future, log_prefix, url, url_to_issues)\n all_issues.extend(issues_with_status)\n return all_issues", "title": "" }, { "docid": "7ada12d11b56fca48c5c2a9351e40f60", "score": "0.49931666", "text": "def get_incident_ids() -> list:\n incidents = demisto.get(demisto.context(), \"EmailCampaign.incidents\")\n return [incident[\"id\"] for incident in incidents]", "title": "" }, { "docid": "360462d9d355b6549ae4ee275e02be31", "score": "0.49851692", "text": "def __get_critical_activities(self):\n critical_activities = []\n for node1, node2 in self.graph.edges():\n if self.graph.edge[node1][node2]['total_float'] == 0:\n critical_activities.append((node1, node2))\n return critical_activities", "title": "" }, { "docid": "55600039a7c08d2d571e529c5f34aaa3", "score": "0.49712437", "text": "def listIncomingTasksWithClosedReport( self, REQUEST, **query ):\n kw = {}\n member_id = REQUEST.get('member_id') or _getAuthenticatedUser(self).getUserName()\n kw['member_id'] = member_id\n\n kw = self._extp('incoming_closed', **kw)\n if query: kw = updateQuery( kw, query, restricted=['InvolvedUsers'] )\n\n return self.searchTasks( REQUEST=REQUEST, IsCatalog=1, **kw )", "title": "" }, { "docid": "d49b60bb9b464a39b013c2824b43f429", "score": "0.4970701", "text": "def clear_closed(self):\n self.visits = []", "title": "" }, { "docid": "13a5ff553d5220524fa27be6be43e241", "score": "0.4964416", "text": "def getUnfinished(self, pid):\r\n \r\n self.c.execute(\"SELECT id FROM links WHERE package=? AND status NOT IN (0, 4, 13) LIMIT 3\", (str(pid),))\r\n return [r[0] for r in self.c]", "title": "" }, { "docid": "593588a347a7dc36917dc2065acf5d5c", "score": "0.49566975", "text": "def final_activities(self):\n return [act for act in self.activities if len(self.outset[act]) == 0]", "title": "" }, { "docid": "df9474fad7bcfb5ad1c43e739e5b49eb", "score": "0.49396974", "text": "def _FetchOpenBugs(self):\n issue_tracker = issue_tracker_service.IssueTrackerService(\n utils.ServiceAccountHttp())\n bugs = issue_tracker.List(\n can='open',\n q='Performance=Sheriff OR Performance=Sheriff-V8',\n maxResults=1000)\n return bugs['items']", "title": "" }, { "docid": "6f254839caf8e211a53fe9ea1ceae767", "score": "0.4935863", "text": "def get_unassociated_eips(self):\n # Disallow unrestricted association attempts,\n # filter addresses must be provided to prevent\n # over-greedy attempts on capturing EIP's\n if len(self.filter_addresses) < 1:\n self.logger.critical(\"No eip addresses specified, aborting...\")\n self.safe_exit(1)\n try:\n eips = self.ec2_connection.get_all_addresses(\n addresses=self.filter_addresses)\n\n unassociated_eips = \\\n [eip for eip in eips if eip.association_id is None]\n\n self.logger.info(\"Found {} eips: {}\".format(len(eips), eips))\n self.logger.info(\"Found {} unassociated eips: {}\"\n .format(len(unassociated_eips),\n unassociated_eips)\n )\n\n return unassociated_eips\n\n except EC2ResponseError as e:\n # This prints a user-friendly error with stacktrace\n self.logger.critical(\"Error getting EIPS: {}\".format(e.message))\n # If we're not associated already, we need to go into standby\n if len(self.get_instance_association()) < 1:\n self.update_standby_mode(True)\n self.safe_exit(exit_code=1)", "title": "" }, { "docid": "7fd4a8ab9632bb9738ea76aca7ad0dff", "score": "0.49283215", "text": "def db_get_all_not_closed_translations():\n translation = Translation.query.filter(Translation.status != config.GetConfig.TRANSLATED_STATUS).all()\n if (len(translation) > 0):\n return translation\n else:\n return 'Not Found.'", "title": "" }, { "docid": "b8e2486b1fc19b76da3092e2d9472d71", "score": "0.49207306", "text": "def allUnreconciledTransactionsForAccount(self, account):\n unreconciledTransactions = []\n with self.session() as session:\n unreconciledTransactions_False = session.query(self.table_class).filter_by(reconciled=False, account=account)\n unreconciledTransactions_None = session.query(self.table_class).filter_by(reconciled=None, account=account)\n unionOfUnreconciledTransactions = unreconciledTransactions_False.union(unreconciledTransactions_None)\n unreconciledTransactions = unionOfUnreconciledTransactions.order_by(desc(Transaction.date)).all()\n return unreconciledTransactions", "title": "" }, { "docid": "33fc3a2712176c3c687de18b5242809a", "score": "0.49156427", "text": "def get_stale_endpoints(self):", "title": "" }, { "docid": "10ed5fb9843de91c0ae9b954dd0c7438", "score": "0.48890817", "text": "def received_requests(self):\n type_ = RelationshipType.pending_incoiming\n return [rs for rs in self.relationships.values() if rs.type is type_]", "title": "" }, { "docid": "e3ff5a14720678f356cea62e1754b0e6", "score": "0.48830652", "text": "def get_not_processed_notifications(self, notification_username):\n try:\n db = self.mongodb_handler.get_reports_state_db()\n collection = db[NOTIFICATION_COLLECTION]\n\n cursor = collection.find({\"status\": \"not_sent\", \"user_id\": notification_username})\n except Exception as e:\n self.logger_m.log_error('DatabaseManager.get_not_processed_notifications', '{0}'.format(repr(e)))\n raise e\n\n return list(cursor)", "title": "" }, { "docid": "dc002ca976cca5ce2104912f0eace2d8", "score": "0.4881742", "text": "def revert(self):\n self.conflict.resolution = None\n return []", "title": "" }, { "docid": "099690bbb83c31e86eaa252fb0e5033c", "score": "0.48759815", "text": "def get_all(request):\n\n serializer = InternalReportListRequestSerializer(\n data=request.query_params\n )\n if not serializer.is_valid():\n raise WrongParameterError(description=serializer.errors)\n\n data = serializer.data\n\n context = request.user.appcontextmembers.context\n\n raw_filters = dict(\n context=context,\n generated__gte=serializer.get_start_date(),\n generated__lte=serializer.get_end_date(),\n type=data.get('type', None),\n status=data.get('status', None)\n )\n\n filters = [Q(**{k: v}) for k, v in raw_filters.items() if v is not None]\n\n reports = InternalReport.objects.filter(*filters).order_by('-generated')\n\n broken_reports = reports.filter(\n status=INTERNAL_REPORT_STATUS_GENERATING,\n generated__lt=datetime.now() - timedelta(hours=24))\n\n if len(broken_reports):\n broken_reports.update(status=INTERNAL_REPORT_STATUS_FAILED)\n\n return Response(\n data=InternalReportSerializer(reports, many=True).data,\n status=status.HTTP_200_OK\n )", "title": "" }, { "docid": "49b245f37a3f9141545936c56a0106db", "score": "0.4874684", "text": "def non_safe_guarding_offences(self) -> List[OffenceRecordsFound]:\n return self._non_safe_guarding_offences", "title": "" }, { "docid": "98066b1ff26bf686ecff257d0153dac6", "score": "0.48740458", "text": "def test_get_incidents_non_existent_mine_guid(self, test_client, db_session, auth_headers):\n\n batch_size = 3\n fake_guid = uuid.uuid4()\n MineIncidentFactory.create_batch(size=batch_size)\n\n get_resp = test_client.get(\n f'/mines/{fake_guid}/incidents', headers=auth_headers['full_auth_header'])\n get_data = json.loads(get_resp.data.decode())\n assert get_resp.status_code == 404", "title": "" }, { "docid": "94c0b523b67ac5f6c08411f5abab4017", "score": "0.48734936", "text": "def all(self, status):\n\t\tif 1 == 0:\n\t\t\tdata = memcache.get(\"issues_all\")\n\t\t\tif data is not None:\n\t\t\t\treturn data, True\n\n\t\tquery = gdata.projecthosting.client.Query(max_results=200,status=status)\n\t\tfeed = self.client.get_issues(conf.GOOGLE_PROJECT, query=query)\n\t\tdata = []\n\t\tfor issue in feed.entry:\n\t\t\tdic = self.process_entry(issue)\n\t\t\tdata.append(dic)\n\t\t\t\n\t\tif not memcache.set(\"issues_all\", data, 10):\n\t\t\tprint \"error\"\n\t\treturn data, False", "title": "" }, { "docid": "fea23cafd88a82205807d6587114f136", "score": "0.48561874", "text": "def list_notseen(request):\n profile = request.user.my_userprofile\n ticket_vus = profile.tickets_vus or {}\n\n def my_get_context(request):\n ctx = get_context(request)\n ctx[\"show_ticket_seen\"] = True\n return ctx\n\n def postfiltercallback(qs):\n for k, v in ticket_vus.items():\n if k == \"all\":\n q = models.Q(last_modification__gt=datetime.datetime.fromtimestamp(int(v)))\n qs = qs.filter(q)\n else:\n q = models.Q(pk=int(k)) & models.Q(last_modification__lt=datetime.datetime.fromtimestamp(int(v)))\n qs = qs.exclude(q)\n return qs\n\n return list_all(request,\n export_link=reverse('ticket_export_notseen'),\n postfiltercallback=postfiltercallback,\n get_context_callback=my_get_context)", "title": "" }, { "docid": "cf85bbdc6a02d2795136bda37d5b6108", "score": "0.48543656", "text": "def undefined(self) -> List[Link]:\n\n lmbd = lambda x: x.status == Status.UNDEFINED\n return self._filter(lmbd)", "title": "" }, { "docid": "f8af520b2c0abaf960825a8c9fca752c", "score": "0.48529136", "text": "def get_additional_occurrences(self, start, end):\n return [occ for key,occ in self.lookup.items() if (occ.start < end and occ.end >= start and not occ.cancelled)]", "title": "" }, { "docid": "ab2516668a94e56877a45ca61818f301", "score": "0.4850754", "text": "def _remove_unfulfilled(self, records):\n new_records = ModelList()\n for record in records:\n if record.noncontextual_required_fulfilled:\n new_records.append(record)\n return new_records", "title": "" }, { "docid": "14b845cd8fd63d7741a78f7ed9856f01", "score": "0.48475298", "text": "def deferred(self):\n return self.filter(status=STATUS_DEFERRED)", "title": "" }, { "docid": "e2856b79e41e3b6090121e3160f1f0c4", "score": "0.48429787", "text": "def continuing(self):\n index_round = self.nbr_round - 1\n continuing_candidates = set([candidate\n for candidate in self.status\n if self.status[candidate].status == K.STATUS_CONTINUING])\n return continuing_candidates", "title": "" }, { "docid": "78796c5bae138fa048c058312501e92d", "score": "0.4832315", "text": "def fetch_incidents():\n _url = 'http://help.sparrowsms.com/api?task=incidents'\n help_offered_incidents = [] \n help_required_incidents = [] \n\n\n response = requests.get(_url)\n json_resp = response.json()\n incident_list = json_resp[\"payload\"][\"incidents\"]\n for incident in incident_list:\n\tcategory_list = incident.get(\"categories\", [])\n for categories in category_list:\n\t category = categories.get(\"category\", {})\n\t if 'Help Offered' == category.get(\"title\", None):\n\t help_offered_incidents.append(incident)\n\t elif 'Help Needed' == category.get(\"title\", None): \n\t help_required_incidents.append(incident)\n \n print help_required_incidents", "title": "" }, { "docid": "f6b6bceff9dec183b955e60f26278ef9", "score": "0.4830252", "text": "def get_no_response_needed_observations(self):\n return self.get_observations(\n observation_question_status=[\n 'phase1-closed',\n 'phase2-closed'],\n observation_finalisation_reason='no-response-needed',\n )", "title": "" }, { "docid": "dab26fb1a744ac7a19b4426585a47506", "score": "0.48281777", "text": "def is_unresolveable(self):\n return not bool(self.resolved)", "title": "" }, { "docid": "897f716fd1beaf8f6f89a6190a593813", "score": "0.48195097", "text": "def unviewed_requests(self, user):\r\n key = make_key('unviewed_requests', user.pk)\r\n unviewed_requests = cache.get(key)\r\n if unviewed_requests is None:\r\n unviewed_requests = self.filter(Q(viewed__isnull=True) | Q(viewed=False), from_user=user)\r\n cache.set(key, unviewed_requests)\r\n return unviewed_requests", "title": "" }, { "docid": "64836688b34ec9d56931c3d5f7338727", "score": "0.48188648", "text": "def aggregate_resolved_issues(resolved_issues: [int]):\n return Counter(resolved_issues)", "title": "" }, { "docid": "e9dcc9f52e637eac1f76f8a2ba80a074", "score": "0.4809288", "text": "def _get_issues(self, rec):\n try:\n return rec.issue_ids\n except AttributeError:\n return None", "title": "" }, { "docid": "6d45174c874d12db75f431601850c3ee", "score": "0.48055607", "text": "def missing_refs(self):\n return [ref for ref in self.find(CFN_REF) if not self.has_ref(ref)]", "title": "" }, { "docid": "a7b46a9760b79258e35e3d9c08c563c1", "score": "0.4801549", "text": "def fetch_incidents(\n client: Client,\n last_run: Dict,\n first_fetch_time: str,\n max_fetch: int,\n mirror_direction: Optional[str],\n integration_instance: str,\n):\n if last_run:\n oldest_alert_time = dateparser.parse(last_run[\"last_run\"])\n existing_ids = last_run.get(\"existing_ids\", [])\n else:\n oldest_alert_time = dateparser.parse(\n first_fetch_time,\n settings={\"TIMEZONE\": \"UTC\", \"RETURN_AS_TIMEZONE_AWARE\": True},\n )\n existing_ids = []\n\n assert oldest_alert_time\n org_name = client.active_user[\"organization\"][\"name\"]\n org_psa_id = client.active_user[\"organization\"][\"psa_id\"]\n start_time_iso = oldest_alert_time.isoformat()\n now_iso = datetime.now().isoformat() + \"Z\"\n params = {\n \"sort by\": \"last time org assigned\",\n \"last time org assigned\": f\"{start_time_iso}&{now_iso}\",\n \"incident status\": [\"open\", \"reviewing\"],\n \"assigned organization\": org_psa_id,\n \"limit\": max_fetch,\n }\n alerts = client.get_alerts(params=params)\n\n incidents = []\n newest_ids = []\n escalation_time = oldest_alert_time\n for alert in alerts:\n alert_orig_escalation_time = get_alert_org_escalation_time(alert)\n assert alert_orig_escalation_time is not None and alert_orig_escalation_time is not None\n escalation_time = max(escalation_time, alert_orig_escalation_time)\n alert_id = str(alert[\"id\"])\n\n if alert_id in existing_ids:\n newest_ids.append(alert_id)\n continue\n\n if not was_alert_first_escalated(client, alert_id, org_name, oldest_alert_time):\n continue\n\n newest_ids.append(alert_id)\n\n trigger_events = client.get_events(alert_id)\n alert[\"xsoar_trigger_events\"] = trigger_events\n\n # Parse trigger event as key/value pairs for ease of parsing\n alert[\"xsoar_trigger_kv\"] = extract_trigger_kv(trigger_events)\n\n # Mirroring fields\n alert[\"xsoar_mirror_direction\"] = mirror_direction\n alert[\"xsoar_mirror_instance\"] = integration_instance\n alert[\"xsoar_mirror_id\"] = alert_id\n alert[\"xsoar_mirror_tags\"] = [client.comment_tag, client.escalate_tag]\n alert[\"xsoar_input_tag\"] = client.input_tag\n\n # Link back to ZTAP\n alert[\"url\"] = alert.get(\"url\")\n\n incident = alert_to_incident(alert)\n incidents.append(incident)\n\n new_last_run = {\n \"last_run\": escalation_time.isoformat().replace(\"+00:00\", \"Z\"),\n \"existing_ids\": newest_ids,\n }\n\n return incidents, new_last_run", "title": "" }, { "docid": "e26e2ea4d0538f174dace802d37589b1", "score": "0.48008007", "text": "def destroyed_ancients(self):\n return [ancient for ancient in self.ancients.values()\n if not ancient.alive]", "title": "" }, { "docid": "bd5b53c484899e50782380f07628e1cf", "score": "0.47948232", "text": "def get_maintenance(objects):\n now = datetime.datetime.now()\n so = set(objects)\n r = set()\n for m in Maintenance._get_collection().find(\n {\"is_completed\": False, \"start\": {\"$lte\": now}}, {\"_id\": 0, \"affected_objects\": 1}\n ):\n mo = set(r[\"object\"] for r in m[\"affected_objects\"])\n r |= so & mo\n return r", "title": "" }, { "docid": "0be93e49162356cbf744ebabf245e1c9", "score": "0.4782589", "text": "def unlisted(self):\n return self.then(iterators.unlisted)", "title": "" }, { "docid": "62651651f82b5952f005f99ac60e71f0", "score": "0.47796255", "text": "def fetch_incidents(\n client: Client,\n last_run: Dict[str, Any],\n first_fetch: str,\n max_fetch: int,\n alert_types: List[str] | None,\n alert_severities: List[str] | None,\n source_types: List[str] | None,\n fetch_csv: bool | None,\n is_closed: bool,\n fetch_attachments: bool | None,\n) -> tuple[Dict[str, Any], List[dict]]:\n incidents = []\n offset = None\n if (offset_time := last_run.get(\"time\")) and (offset_id := last_run.get(\"last_id\")):\n try:\n datetime.fromisoformat(offset_time.replace(\"Z\", \"+00:00\"))\n offset = f\"{offset_time}::{offset_id}\"\n except ValueError:\n offset = None\n\n list_response = client.list_alert(\n offset=offset,\n limit=max_fetch,\n last_updated_from=first_fetch,\n alert_type=alert_types,\n severity=alert_severities,\n source_type=source_types,\n is_closed=is_closed,\n )\n if not list_response.get(\"content\"):\n return last_run, []\n\n alert_ids = [alert[\"_id\"] for alert in list_response[\"content\"]]\n for alert_id in alert_ids:\n alert_details = client.get_alert(alert_id=alert_id)\n attachments = []\n if fetch_csv is True:\n csv_response = client.get_alert_csv(alert_id=alert_id)\n attachments.append(\n fetch_csv_handler(csv_response=csv_response, alert_id=alert_id)\n )\n if fetch_attachments is True:\n for img in alert_details.get(\"Details\", {}).get(\"Images\", []):\n attachments.append(\n fetch_attachment_parser(\n file_name=f\"{img}.png\", data=client.get_alert_image(img).content\n )\n )\n incident = client.parser.alert_fetch_parser(\n alert_details, attachments=attachments\n )\n incidents.append(\n client.parser.parse_incident(\n alert=incident, attachments=remove_empty_elements(attachments)\n )\n )\n offset_date = list_response[\"content\"][-1].get(\"updateDate\")\n offset_id = list_response[\"content\"][-1].get(\"_id\")\n next_run = {\"time\": offset_date, \"last_id\": offset_id}\n return next_run, incidents", "title": "" }, { "docid": "00b492884835552b6551150b0e036ad4", "score": "0.47786146", "text": "def get_all_causes():\r\n\r\n return Cause.query.all()", "title": "" }, { "docid": "472d94544344ffe1a3cc6599dc15a6d4", "score": "0.47786132", "text": "def describe_pending_maintenance_actions(ResourceIdentifier=None, Filters=None, Marker=None, MaxRecords=None):\n pass", "title": "" }, { "docid": "472d94544344ffe1a3cc6599dc15a6d4", "score": "0.47786132", "text": "def describe_pending_maintenance_actions(ResourceIdentifier=None, Filters=None, Marker=None, MaxRecords=None):\n pass", "title": "" }, { "docid": "54922c089af2703299b547bd7aa0ad90", "score": "0.47731617", "text": "def get_unfailed_interface_objects(self):\n\n unfailed_interface_objects = set()\n\n interface_iter = (interface for interface in self.interface_objects)\n\n for interface in interface_iter:\n if not interface.failed:\n unfailed_interface_objects.add(interface)\n\n return unfailed_interface_objects", "title": "" }, { "docid": "5529314859704dbe7a048913dcfc9adb", "score": "0.47608733", "text": "def get_untracked(self):\n raise NotImplementedError", "title": "" }, { "docid": "ff04bc0bceff875571db1a25195fd050", "score": "0.47519696", "text": "def test_get_referrals_without_total_count(self):\n pass", "title": "" }, { "docid": "9c5b7300cbc8733e2d81bd24743b4e39", "score": "0.47508052", "text": "def find_not_online(self):\n unavails = []\n for key in ('data', 'cache', 'spares', 'logs'):\n if getattr(self, key):\n unavails.extend(getattr(self, key).find_not_online())\n return unavails", "title": "" }, { "docid": "b51ad55007123a31fc358579f1a3b22d", "score": "0.47391158", "text": "def filter_old_reports(reports):\n new_reports = []\n for report in reports:\n if not models.Attack.objects.filter(guid=report.guid):\n new_reports.append(report)\n\n return new_reports", "title": "" }, { "docid": "7a7060400b9d7e837cf3d8f83f199842", "score": "0.4738517", "text": "def not_connected(self):\n return self.exclude(source__relation_type='connected')", "title": "" }, { "docid": "533c44164966c775799aeab24cacd4f9", "score": "0.4738046", "text": "def cancel_pending(self):\n for pending in self.get_pending():\n pending.cancel()", "title": "" }, { "docid": "950adddc9a0442b893811e876f556ff2", "score": "0.47272253", "text": "def prune(self):\n return self.identity_map.prune()", "title": "" }, { "docid": "c826f9fd46853f890fa85e37933ae21e", "score": "0.47254625", "text": "def test_notification_get_all_route_unauthorized(self) -> None:\n test_route_auth(\n self, self.client, \"GET\", \"/v2/notifications/\", AuthVariant.UNAUTHORIZED\n )", "title": "" }, { "docid": "72dd1841b4084e531c691392ead05b6d", "score": "0.47226864", "text": "def get_missing_report_sundays(\n existing_sundays: Set[str], eligible_sundays: Set[str]\n) -> Set[str]:\n missing_sundays = eligible_sundays.difference(existing_sundays)\n print(f\"Missing report Sundays: {missing_sundays}\")\n return missing_sundays", "title": "" }, { "docid": "c513d7d7d062fab58b333bcad8d36d87", "score": "0.47216296", "text": "def get_resolved(self):\n return [ i for i in self.get_comp() if i.fields.customfield_10504 ]", "title": "" }, { "docid": "97e298c107d13f8485416f43b5bb882c", "score": "0.4719916", "text": "def get_unavailable_actions(self):\n self._verify_obj()\n actions = []\n for suds_action in self.session.tracker_client.service. \\\n getUnavailableActions(self.uri):\n actions.append(WorkflowAction(suds_object=suds_action))\n return actions", "title": "" }, { "docid": "375bd4fda75a6b8467a60c007e51d883", "score": "0.47173196", "text": "def get_issues(epic_id):\n todo_epic = work_item_tracking_client.get_work_items([epic_id], project='simplic-framework', expand='Relations')[0]\n if not todo_epic.relations:\n return []\n else:\n issue_ids = [relation.url.split('/workItems/')[1] for relation in todo_epic.relations if relation.attributes['name'] == 'Child']\n issues = work_item_tracking_client.get_work_items(issue_ids, project='simplic-framework', expand='Relations')\n \n return issues", "title": "" }, { "docid": "e068029864d3a2cff98bd3bdb9e98e11", "score": "0.47133988", "text": "def rejected_requests(self, user):\r\n key = make_key('rejected_requests', user.pk)\r\n rejected_requests = cache.get(key)\r\n if rejected_requests is None:\r\n rejected_requests = self.filter(Q(rejected__isnull=False) | Q(rejected=True), from_user=user)\r\n cache.set(key, rejected_requests)\r\n return rejected_requests", "title": "" }, { "docid": "2f5220d9e387661294bf2eeb7a630e06", "score": "0.47110215", "text": "def Flag_Old_Entries( self ):\n statement = \"SELECT DISTINCT n.nid FROM nrdb AS n LEFT JOIN cross_references AS c ON n.nid = c.nid \"+\\\n \" WHERE c.nid IS NULL\"\n nids = map(lambda x: x[0], self.Execute( statement ).fetchall())\n\n for nid in nids:\n self.Execute( \"UPDATE nrdb SET filter = 0 WHERE nrdb.nid = \" + str(nid) )\n\n return len(nids)", "title": "" }, { "docid": "b25a2e9d991f2e7e37ca7a197aaab985", "score": "0.4708497", "text": "def rest_of_infos(self, uuid):\n return [info for info in self.infos() if info.UID != uuid]", "title": "" }, { "docid": "5c9857dee1b7541492eca6e5fdba96ac", "score": "0.47077146", "text": "def get_reports_noop_query():\n noop_event_query = EqualsOperator('status', 'noop')\n noop_subquery = SubqueryOperator('events')\n noop_subquery.add_query(noop_event_query)\n noop_extract = ExtractOperator()\n noop_extract.add_field(str('certname'))\n noop_extract.add_query(noop_subquery)\n noop_in_query = InOperator('certname')\n noop_in_query.add_query(noop_extract)\n\n other_event_query = NotOperator()\n other_event_query.add(EqualsOperator('status', 'noop'))\n other_subquery = SubqueryOperator('events')\n other_subquery.add_query(other_event_query)\n other_extract = ExtractOperator()\n other_extract.add_field(str('certname'))\n other_extract.add_query(other_subquery)\n other_in_query = InOperator('certname')\n other_in_query.add_query(other_extract)\n other_not_in = NotOperator()\n other_not_in.add(other_in_query)\n\n result = AndOperator()\n result.add([noop_in_query, other_not_in])\n return result", "title": "" }, { "docid": "6258e811947c1b10af9f310f92b94e3c", "score": "0.4701314", "text": "def required_inspections(self) -> Iterable[Inspection]:\n raise NotImplementedError", "title": "" }, { "docid": "8bb37c879e85845f234bb250c5f38c69", "score": "0.46973693", "text": "def my_undocked_ships(self):\n return [ship for ship in self.get_me().all_ships()\n if ship.docking_status == Ship.DockingStatus.UNDOCKED]", "title": "" }, { "docid": "17887889764838ab9ae8220472085f35", "score": "0.4680867", "text": "def inactive(self):\n return self.filter(Q(status=Slide.INACTIVE_STATUS) | Q(~self.get_date_query()))", "title": "" }, { "docid": "e31f48acd2f52976182c25192071cb1e", "score": "0.4678257", "text": "def get_pending_friend_requests(self):\n return self.do_request(\n ASIConnection.people_url + '/' + self.user_id +\n '/@pending_friend_requests', method='GET')", "title": "" }, { "docid": "70b30fc6170e85fade87126702f0eb82", "score": "0.46768433", "text": "def test_fetch_incidents(mocker, client, requests_mock, demisto_mocker, last_run):\n from SaasSecurity import main\n\n get_incidents = util_load_json('test_data/get-incidents.json')\n incidents_for_fetch = util_load_json('test_data/fech_incident_data.json')\n mocker.patch.object(demisto, 'command', return_value='fetch-incidents')\n mocker.patch.object(demisto, 'getLastRun', return_value={'last_run_time': last_run})\n requests_mock.get('http://base_url/incident/api/incidents/delta', json=get_incidents)\n\n main()\n\n assert demisto.incidents.call_count == 1\n incidents = demisto.incidents.call_args[0][0]\n if last_run:\n assert len(incidents) == 8\n assert incidents[0]['occurred'] == '2021-08-03T20:25:13Z'\n assert incidents[1]['occurred'] == '2021-08-03T20:25:15Z'\n assert incidents_for_fetch == incidents\n else:\n assert not incidents", "title": "" } ]
751e3123e3738fb33e4824f9c5d67a85
Get status of lastest submission for sample sets in the workspace
[ { "docid": "1339076c31977b1205ad36d02b201592", "score": "0.5348673", "text": "def get_sample_set_status(self, configuration):\n return self.get_entity_status('sample_set', configuration)", "title": "" } ]
[ { "docid": "3c7aeddc0da95b45b75c63b6011e29da", "score": "0.614393", "text": "def last_success(cls):\n qs = cls.objects.filter(complete=True, has_error=False)\n return qs.order_by(\"-start_run\")[0] if qs.count() else None", "title": "" }, { "docid": "c4aa6f29437eb3be907e23d96394ae46", "score": "0.6100703", "text": "def get_last_successful_workflow(feature_branch_name):\n recent_workflows = get_recent_workflows_data_request(feature_branch_name).get('items')\n for workflow in recent_workflows:\n if workflow.get('status') == \"success\":\n return workflow.get('id')", "title": "" }, { "docid": "c8e8afacec83a012cad5701c24d5d39f", "score": "0.59169877", "text": "def get_submission_status(self, show_namespaces=False):\n dfs = []\n for i in self.workspace_list:\n df = i.get_submission_status(show_namespaces=show_namespaces)\n if show_namespaces:\n df['workspace'] = '{}/{}'.format(i.namespace, i.workspace)\n else:\n df['workspace'] = i.workspace\n dfs.append(df)\n return pd.concat(dfs, axis=0)", "title": "" }, { "docid": "231eedfd4dbf6664386b00c18d809f1c", "score": "0.5885867", "text": "def status():\n meta, projects, records = load()\n project = get_current_project(meta, projects)\n if not project:\n print(\"Current project not set\")\n return 0\n last_record = get_last_record(project, records)\n print(\"Currently working on project {0}\".format(project.name))\n print(\"Last record {0}\".format(last_record))", "title": "" }, { "docid": "3992c3e59331107853b94c922fc01253", "score": "0.58106935", "text": "def get_entity_status(self, etype, config):\n\n # filter submissions by configuration\n submissions = self.list_submissions(config=config)\n\n # get status of last run submission\n entity_dict = {}\n for k,s in enumerate(submissions, 1):\n print('\\rFetching submission {}/{}'.format(k, len(submissions)), end='')\n if s['submissionEntity']['entityType']!=etype:\n print('\\rIncompatible submission entity type: {}'.format(\n s['submissionEntity']['entityType']))\n print('\\rSkipping : '+ s['submissionId'])\n continue\n r = self.get_submission(s['submissionId'])\n ts = datetime.timestamp(iso8601.parse_date(s['submissionDate']))\n for w in r['workflows']:\n entity_id = w['workflowEntity']['entityName']\n if entity_id not in entity_dict or entity_dict[entity_id]['timestamp']<ts:\n entity_dict[entity_id] = {\n 'status':w['status'],\n 'timestamp':ts,\n 'submission_id':s['submissionId'],\n 'configuration':s['methodConfigurationName']\n }\n if 'workflowId' in w:\n entity_dict[entity_id]['workflow_id'] = w['workflowId']\n else:\n entity_dict[entity_id]['workflow_id'] = 'NA'\n print()\n status_df = pd.DataFrame(entity_dict).T\n status_df.index.name = etype+'_id'\n\n return status_df[['status', 'timestamp', 'workflow_id', 'submission_id', 'configuration']]", "title": "" }, { "docid": "fa2aa9cc3f1038f6b1c00b6799f3b407", "score": "0.57996917", "text": "def getLatestJobs():\n pass", "title": "" }, { "docid": "8fa7f295ae582f7f339263206b23a7c8", "score": "0.57746893", "text": "def _last_test_result(self):\n return self._test_results[self._test_names_in_order[-1]]", "title": "" }, { "docid": "94997ea484bf3e64cb74e2351ae278c7", "score": "0.5731888", "text": "def status(self):\n print(self.last_result.__dict__)\n print(self.last_result.testsRun)\n print(self.last_result.errors)\n print(self.last_result.failures)\n print(self.last_result.skipped)\n print(self.last_result.expectedFailures)\n print(self.last_result.unexpectedSuccesses)", "title": "" }, { "docid": "9f228f27b2116a994317d0cce35d8276", "score": "0.57154024", "text": "def summarize_latest(self, sb):\n return self._summarize_execution(-1, sb)", "title": "" }, { "docid": "b3916d428bb3978db32b135f7c95b816", "score": "0.5692652", "text": "def get_last_submitted_changeset(self, namespace=None):\n changeset_root_key = _Changeset.get_root_key(namespace=namespace)\n # Use an ancestor query to maintain strong consistency.\n changeset_query = _Changeset.query(\n ancestor=changeset_root_key, namespace=namespace)\n changeset_query = changeset_query.filter(\n _Changeset.status == ChangesetStatus.submitted)\n changeset_query = changeset_query.order(-_Changeset.num)\n latest_changeset = list(changeset_query.fetch(1))\n if not latest_changeset:\n return None\n return Changeset(num=latest_changeset[0].num, namespace=namespace)", "title": "" }, { "docid": "2bfc27a18674fd7aaa4100f38d738662", "score": "0.5654942", "text": "def get_submission_history(self, sample_id, config=None):\n\n # filter submissions by configuration\n submissions = self.list_submissions(config=config)\n\n # filter by sample\n submissions = [s for s in submissions\n if s['submissionEntity']['entityName']==sample_id\n and 'Succeeded' in list(s['workflowStatuses'].keys())\n ]\n\n outputs_df = []\n for s in submissions:\n r = self.get_submission(s['submissionId'])\n\n metadata = self.get_workflow_metadata(s['submissionId'], r['workflows'][0]['workflowId'])\n\n outputs_s = pd.Series(metadata['outputs'])\n outputs_s.index = [i.split('.',1)[1].replace('.','_') for i in outputs_s.index]\n outputs_s['submission_date'] = iso8601.parse_date(s['submissionDate']).strftime('%H:%M:%S %m/%d/%Y')\n outputs_df.append(outputs_s)\n\n outputs_df = pd.concat(outputs_df, axis=1).T\n # sort by most recent first\n outputs_df = outputs_df.iloc[np.argsort([datetime.timestamp(iso8601.parse_date(s['submissionDate'])) for s in submissions])[::-1]]\n outputs_df.index = ['run_{}'.format(str(i)) for i in np.arange(outputs_df.shape[0],0,-1)]\n\n return outputs_df", "title": "" }, { "docid": "28da2835583e74f4ff7c875f0e6bdfab", "score": "0.5640707", "text": "def get_most_relevant_run(runs):\n\n # if there is a current run in the set pick it\n most_relevant_run = next(\n (run for run in runs if run.availability == AvailabilityType.current.value),\n None,\n )\n\n if not most_relevant_run:\n # if there a future runs in the set, pick the one with earliest start date\n runs = runs.order_by(\"start_date\")\n most_relevant_run = next(\n (\n run\n for run in runs\n if run.availability\n in [\n AvailabilityType.upcoming.value,\n AvailabilityType.starting_soon.value,\n ]\n ),\n None,\n )\n\n if not most_relevant_run:\n # get latest past run by start date\n most_relevant_run = next((run for run in runs.reverse()))\n\n return most_relevant_run", "title": "" }, { "docid": "b3d3e9751c098c823ca753284685c7ee", "score": "0.5608866", "text": "def all_latest_results(self):\n controllers.connect()\n runres = controllers.TestResultsController.latest_run_for_testcase(self.test_name)\n self.resultslocation = runres.resultslocation\n return controllers.TestResultsController.all_results_for_run(runres, self.test_name)", "title": "" }, { "docid": "a83d7430e3f641f4d92eed77258eade8", "score": "0.56025296", "text": "def getLastRun(suite):\n\n outdir = suite.testTopDir + suite.suiteName + \"-tests/\"\n \n dirs = []\n for dir in os.listdir(outdir):\n # this will work through 2099\n if os.path.isdir(outdir + dir) and dir.startswith(\"20\"):\n dirs.append(dir)\n\n dirs.sort()\n\n return dirs[-1]", "title": "" }, { "docid": "e18a3d2759b51dea16ff3f5acca7d6e5", "score": "0.55732614", "text": "def display_status(self, configuration, entity='sample', filter_active=True):\n # workflow status for each sample (from latest/current run)\n status_df = self.get_sample_status(configuration)\n\n # get workflow details from 1st submission\n metadata = self.get_workflow_metadata(status_df['submission_id'][0], status_df['workflow_id'][0])\n\n workflow_tasks = list(metadata['calls'].keys())\n\n print(status_df['status'].value_counts())\n if filter_active:\n ix = status_df[status_df['status']!='Succeeded'].index\n else:\n ix = status_df.index\n\n state_df = pd.DataFrame(0, index=ix, columns=workflow_tasks)\n for k,i in enumerate(ix, 1):\n print('\\rFetching metadata for sample {}/{}'.format(k, len(ix)), end='')\n metadata = self.get_workflow_metadata(status_df.loc[i, 'submission_id'], status_df.loc[i, 'workflow_id'])\n state_df.loc[i] = [metadata['calls'][t][-1]['executionStatus'] if t in metadata['calls'] else 'Waiting' for t in workflow_tasks]\n print()\n state_df.rename(columns={i:i.split('.')[1] for i in state_df.columns}, inplace=True)\n summary_df = pd.concat([state_df[c].value_counts() for c in state_df], axis=1).fillna(0).astype(int)\n print(summary_df)\n state_df[['workflow_id', 'submission_id']] = status_df.loc[ix, ['workflow_id', 'submission_id']]\n\n return state_df, summary_df", "title": "" }, { "docid": "762b2054738ce5332120f8b8315b353e", "score": "0.55556893", "text": "def GetLastOutputs(self):\n return self.lastResults", "title": "" }, { "docid": "974b6afca33097fee66a83077ba8e306", "score": "0.5540115", "text": "def status(self):\n return bee_client.query(self.wf_id)[0]", "title": "" }, { "docid": "642ae0635474a29ef607fb8f2779afcf", "score": "0.5530286", "text": "def test_get_submission_history(self):\n pass", "title": "" }, { "docid": "78a201a5d027fa3efb5d67688fc00079", "score": "0.55020255", "text": "def latest_result(self):\n controllers.connect()\n result = controllers.TestResultsController.latest_result_for(self.test_name)\n self.set_resultslocation_from_testresult(result)\n return result", "title": "" }, { "docid": "61d26edd0fd9aeee7cc44b27b284e16b", "score": "0.548871", "text": "def last_job(self):\n self.ensure_logged_in()\n try:\n jobs = self.job_list(False, 0, 1)\n return jobs[0]\n except xmlrpc.client.Fault as e:\n raise BoaException() from e", "title": "" }, { "docid": "b801c05cefd5e838bf798e2685a855fb", "score": "0.54785585", "text": "def test_last_known_status(self):\n with mock.patch('clinical_trials.clinical_study.get_schema') as donk:\n donk.return_value = self.schema\n for study_id, overall_status in (('NCT02348489', ''),\n ('NCT01565668', ''),\n ('NCT02536534', '')):\n with mock.patch(\"clinical_trials.clinical_study.get_study\") as dink:\n dink.return_value = self.cache.get(study_id)\n study = ClinicalStudy.from_nctid(study_id)\n self.assertEqual(study.last_known_status, overall_status)", "title": "" }, { "docid": "36f9ddcf2f33d2a64a14895b98ee5fb0", "score": "0.54498476", "text": "def _get_last_coverage_build(self):\n self.ensure_one()\n return self.env['runbot.build'].search([\n ('branch_id.id', '=', self.id),\n ('local_state', 'in', ['done', 'running']),\n ('coverage_result', '>=', 0.0),\n ], order='sequence desc', limit=1)", "title": "" }, { "docid": "31eb55df5d782f53b4c91b85e402bc8e", "score": "0.5442207", "text": "def get_latest_succesful_test_results():\n\n url = env.test_results_api\n resp = requests.get(url,\n timeout=env.timeout,\n headers=env.header)\n\n env.set_return_header(resp.headers)\n\n if resp.status_code != 200:\n LOG.debug(\"Request for test descriptor returned with \" +\n (str(resp.status_code)))\n return False, json.loads(resp.text)\n\n data = json.loads(resp.text)\n \n match = next(d for d in data if d['status'] == 'PASSED')\n \n if (str(date.today()) not in match['started_at']):\n return False, json.loads(resp.text)\n \n return True, match", "title": "" }, { "docid": "f550e331c66640f192eacd730790cd24", "score": "0.54297316", "text": "def get_stack_set_last_operation_status(stack_set):\n latestOperationTime = datetime(1970, 1, 1, 00, 00, 00, 0, tzinfo=tzutc())\n response = cf.list_stack_set_operations(StackSetName=stack_set)\n operations = response['Summaries']\n status = 'NONE'\n for operation in operations:\n if operation['CreationTimestamp'] > latestOperationTime:\n status = operation['Status']\n latestOperationTime = operation['CreationTimestamp']\n\n return status", "title": "" }, { "docid": "51415cfba75f5869738aac3357cbfbc9", "score": "0.54249287", "text": "def get_latest_checkpoint(self):\n\n checkpoint = -1\n experiment_config = self.master_config.get('experiment_config')\n run_generations = int(experiment_config.get('num_generations'))\n\n self.seen_checkpoint_ids = self.checkpoint_persistence.restore()\n num_seen = len(self.seen_checkpoint_ids)\n if num_seen == 0:\n return checkpoint, run_generations\n\n checkpoint = self.seen_checkpoint_ids[num_seen - 1]\n run_generations = run_generations - num_seen\n\n return checkpoint, run_generations", "title": "" }, { "docid": "52b6ac333f3c6ea0f51656141bc58d7b", "score": "0.53906155", "text": "def get_last_successful(logger, config_name):\n last_successful_file = set_last_successful_file_name(config_name, \"audits\")\n\n if os.path.isfile(last_successful_file):\n with open(last_successful_file, \"r+\") as last_run:\n check_for_rows = last_run.readlines()\n if check_for_rows:\n last_successful = check_for_rows[0]\n last_successful = last_successful.strip()\n else:\n last_successful = \"2000-01-01T00:00:00.000Z\"\n\n else:\n beginning_of_time = \"2000-01-01T00:00:00.000Z\"\n last_successful = beginning_of_time\n create_directory_if_not_exists(logger, \"last_successful\")\n with open(last_successful_file, \"w\") as last_run:\n last_run.write(last_successful)\n logger.info(\n \"Searching for audits since the beginning of time: \" + beginning_of_time\n )\n return last_successful", "title": "" }, { "docid": "8adfd9087db3690d84383ad9195a099f", "score": "0.53901136", "text": "def get_last_submission_date(self, obj):\n xform_ids = obj.projectxform_set.values_list('xform', flat=True)\n last_submission = Instance.objects.\\\n order_by('-date_created').\\\n filter(xform_id__in=xform_ids).values_list('date_created',\n flat=True)\n\n # Force explicit serialization to a list as it used to rely on\n # an implicit one.\n last_submission = list(last_submission)\n return last_submission and last_submission[0]", "title": "" }, { "docid": "78c9d42c78c88d58fac5f02810a795ab", "score": "0.53626305", "text": "def get_last_ingestion_jobs_by_status(self, status):\n results = self.mssql_db_mgr.execute_query(self.LAST_JOBS_BY_STATUS_QUERY.format(status))\n return results", "title": "" }, { "docid": "983dd80f8397e6b67416d76485bdb988", "score": "0.53563493", "text": "def last_job_run_id(self):\n recent_jobs = self._data.get('summary_fields', {}).get('recent_jobs')\n ids = [v for job in recent_jobs for k, v in job.items() if k == 'id']\n ids.sort()\n return ids[-1]", "title": "" }, { "docid": "3f3db2ab76fa397a1f3b9ae566aa6c18", "score": "0.5341646", "text": "def get_experiment_status(self, name, namespace=None):\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n katibexp = self.get_experiment(name, namespace=namespace)\n last_condition = katibexp.get(\"status\", {}).get(\"conditions\", [])[-1]\n return last_condition.get(\"type\", \"\")", "title": "" }, { "docid": "bbbe1244922fc6a2642f2016ef8a6a3e", "score": "0.5334172", "text": "def test4_final_workflow_status(self):\n\n self.assertFalse(self.wf.is_running)\n self.assertFalse(self.wf.is_completed)\n self.assertTrue(self.wf.has_failed)\n\n self.assertIsNotNone(self.wf.starttime)\n self.assertIsNone(self.wf.finishtime)\n self.assertIsNotNone(self.wf.updatetime)\n self.assertLessEqual(self.wf.runtime, 9)\n\n bp = self.wf.workflow.query_nodes(key='test3')\n self.assertTrue(bp.status == 'aborted')", "title": "" }, { "docid": "0c466efb8c9f94ec57a84f16251a671c", "score": "0.53323126", "text": "def get_most_recent_workout_data(self):\n response = requests.get(\n \"https://api.runningahead.com/rest/logs/me/workouts\",\n params=self.payload,\n timeout=(20, 20)\n )\n # print json.dumps(response.json()) # Debug\n # raw_input()\n return response.json()['data']['entries']", "title": "" }, { "docid": "f1a1e32d3982786c4f9d3c8646a2f898", "score": "0.5304187", "text": "def get_newest_status(self):\n site_ids = SiteStatus.objects.values_list('site', flat=True).distinct()\n tracked_sites = []\n for id in site_ids:\n tracked_sites.append(TrackedSite.objects.get(id=id))\n newest_status_list = []\n for tracked_site in tracked_sites:\n newest_status_list.append(SiteStatus.objects.filter(\n site=tracked_site).order_by(\"-timestamp\")[0])\n return newest_status_list", "title": "" }, { "docid": "40e1c37e80437a543e9def49fea1c843", "score": "0.5284763", "text": "def getLastResult(self):\r\n return self.lastResult", "title": "" }, { "docid": "4ce4c1a2c04e15c5ae16ad243872569f", "score": "0.52835965", "text": "async def get_latest_data_generation_status(\n current_user: User = Depends(auth.get_current_user_and_bot),\n):\n latest_data_generation_status = TrainingDataGenerationProcessor.fetch_latest_workload(current_user.get_bot(), current_user.get_user())\n return {\"data\": latest_data_generation_status}", "title": "" }, { "docid": "1298f2ec14bada0559169ffd5c62c1e1", "score": "0.5271266", "text": "def last_activity_all(self):\r\n return self.command(WORKER_LAST_ACTION)", "title": "" }, { "docid": "b6500b88611c5d0cc95eae853002c6a2", "score": "0.5270354", "text": "def post_get_latest_analysis():\n content = request.json\n analysis_obj = store.get_latest_analysis(case_id=content.get(\"case_id\"))\n if analysis_obj:\n data = stringify_timestamps(analysis_obj.to_dict())\n return jsonify(**data), 200\n return jsonify(None), 200", "title": "" }, { "docid": "082bf8c117a380d464fac9169819e3e8", "score": "0.52424496", "text": "def test4_final_workflow_status(self):\n\n self.assertFalse(self.wf.is_running)\n self.assertFalse(self.wf.is_completed)\n self.assertTrue(self.wf.has_failed)\n\n self.assertIsNotNone(self.wf.starttime)\n self.assertIsNone(self.wf.finishtime)\n self.assertIsNotNone(self.wf.updatetime)\n self.assertLessEqual(self.wf.runtime, 9)\n\n bp = self.wf.workflow.query_nodes(key='test3')\n\n self.assertEqual(bp.task_metadata.retry_count(), 0)\n self.assertEqual(self.wf.failed_tasks, [bp])", "title": "" }, { "docid": "a880a83b061dd2c97fe3a095efa346b4", "score": "0.5215724", "text": "def latest_run(self) -> pulumi.Output['outputs.ScanRunResponse']:\n return pulumi.get(self, \"latest_run\")", "title": "" }, { "docid": "a55a39bd476b22efcfb96b00c0245784", "score": "0.5201623", "text": "def get(self):\n AnomalyEngine = app.config['AnomalyEngine']\n res = []\n for anomaly in AnomalyEngine.current_running_anomalies():\n tmp = {\n \"name\": anomaly.name,\n \"parameter\": AnomalyEngine.last_parameters[anomaly.name],\n \"default_param\": anomaly.default_parameters == AnomalyEngine.last_parameters[anomaly.name]\n }\n res.append(tmp)\n res_code = 201 if res else 200\n return res, res_code", "title": "" }, { "docid": "814a43bfcd4d4eb5e8c56d2108231fb4", "score": "0.51984334", "text": "def get_best_score(version):\n logger = logging.getLogger(__name__)\n logger.info(\"Request best submission evaluation for version {0}\".format(version))\n from src.common_paths import get_submissions_version_path, get_project_path\n filepath = os.path.join(get_submissions_version_path(version), \"upload_history.jl\")\n if not os.path.exists(filepath):\n return None, np.Inf\n with open(filepath) as f:\n upload_history = [json.loads(x) for x in f.read().strip().split(\"\\n\")]\n best_submission = min(upload_history, key=lambda x:x[\"score\"])\n alias, score = best_submission[\"alias\"], best_submission[\"score\"]\n logger.info(\"Best submission found: {0}, {1}, {2}\".format(version, alias, score))\n return alias, score", "title": "" }, { "docid": "47f45c74e2fe26e861b3f6be6805ef8a", "score": "0.51923555", "text": "def last_request():\n # If there is at least 1 request, return the last one\n latest_requests = HTTPretty.latest_requests\n if latest_requests:\n return latest_requests[len(latest_requests) - 1]", "title": "" }, { "docid": "65c971f408b97fc29c032044c8f500bd", "score": "0.51917636", "text": "def fetch_latest_resource():", "title": "" }, { "docid": "e2331d0e6874f804aca19a5d776d30c8", "score": "0.51896393", "text": "def get_last_complete_cluster_backup(medusa_config):\n backup = medusa.report_latest.get_latest_complete_cluster_backup(medusa_config)\n if backup is not None:\n print(backup.name)\n else:\n print(\"Could not find any full backup for the cluster\")", "title": "" }, { "docid": "0cacc1ecc36bc54b643cd0344c19c6b4", "score": "0.5188825", "text": "def get_latest_run_dir(self):\n latest_run = None\n if tf.gfile.Exists(self.workspace):\n run_dir = os.path.join(self.workspace, self.model_dir)\n if tf.gfile.Exists(run_dir):\n all_runs = sorted(os.listdir(run_dir), key=lambda x: datetime.datetime.strptime(x, DATETIME_FORMAT),\n reverse=True)\n if len(all_runs) > 0:\n print(\"Found\", len(all_runs), \"runs. Looking for one with a matching TensorFlow configuration.\")\n current_config = FLAGS.__dict__['__flags']\n for run in all_runs:\n current_config_path = os.path.join(run_dir, run, FLAG_CONFIG_FILE)\n if tf.gfile.Exists(current_config_path):\n with open(current_config_path, 'r') as f:\n run_config = json.load(f)\n if current_config == run_config:\n print(\"Run\", run,\n \"is the latest run with a matching configuration. Selecting this one.\")\n latest_run = run\n break\n return latest_run", "title": "" }, { "docid": "f758eca54cae9e77cc15079435cf962a", "score": "0.51808053", "text": "def save_job_status(self) -> None:\n condor_q_data = self.get_condor_q_data()\n\n summary_keys = ['AcctGroup', 'AccountingGroup', 'ClusterId', 'JobBatchName', 'kb_app_id',\n 'QDate', 'JobStartDate', 'JobCurrentStartDate',\n 'JobCurrentStartExecutingDate', 'RemoteWallClockTime',\n 'CpusProvisioned', 'CPUsUsage', 'CumulativeRemoteSysCpu',\n 'CumulativeRemoteUserCpu', 'MemoryUsage', 'JobStatus',\n 'kb_function_name', 'kb_module_name', 'CLIENTGROUP', 'KB_CLIENTGROUP', 'RemoteHost',\n 'LastRemoteHost'\n 'LastRejMatchReason', 'HoldReason', 'HoldReasonCode', 'LastHoldReason',\n 'LastHoldReasonCode', 'NumSystemHolds']\n\n queue_stats = self.get_queue_stats()\n\n rows = []\n for job in condor_q_data:\n job_info = {}\n\n for key in summary_keys:\n job_info[key] = job.get(key, '')\n\n if job_info['JobStatus'] == 4:\n continue\n\n job_info[\"AcctGroup\"] = job_info[\"AccountingGroup\"]\n del job_info[\"AccountingGroup\"]\n\n job_info['CLIENTGROUP'] = job_info.get('KB_CLIENTGROUP', 'Unknown Job Client Group')\n cg = job_info['CLIENTGROUP']\n\n # Find jobs ahead for queued jobs\n job_info['JobsAhead'] = 0\n if job_info['JobStatus'] == 1:\n job_info['JobsAhead'] = queue_stats[cg]['Idle']\n\n # Possibly remove these to save space in json return object\n job_info['QDateHuman'] = str(datetime.datetime.utcfromtimestamp(job_info['QDate']))\n job_info['JobStatusHuman'] = job_status_codes[job_info['JobStatus']]\n\n for key in job_info:\n job_info[key] = str(job_info[key])\n\n rows.append(job_info)\n\n condor_q_lookup = {'rows': rows,\n 'created': datetime.datetime.utcnow()}\n\n self.jobs.save(condor_q_lookup)", "title": "" }, { "docid": "2a0a0248e759cce6a9148455438373ac", "score": "0.51800025", "text": "def getBatchJobStatusBySite(self):\n args = {}\n callname = 'batchjobstatusbysite'\n return self._getResult(callname, args = args, verb = \"GET\")", "title": "" }, { "docid": "421c02d1fb5967eb5140495951eaa9d0", "score": "0.5179048", "text": "def test_get_submitters_lead_submission():\n sub = synapseclient.Submission(evaluationId=\"2\", entityId=\"2\", versionNumber=\"3\")\n temp = tempfile.NamedTemporaryFile()\n sub.filePath = temp.name\n\n with patch.object(\n utils, \"evaluation_queue_query\", return_value=[{\"objectId\": 1}]\n ) as patch_query, patch.object(\n SYN, \"getSubmission\", return_value=sub\n ) as patch_getsub:\n dl_file = submission.get_submitters_lead_submission(\n SYN, SUBMISSIONID, QUEUEID, \"key\", verbose=False\n )\n patch_query.assert_called_once()\n patch_getsub.assert_called_once_with(1, downloadLocation=\".\")\n assert dl_file == \"previous_submission.csv\"\n os.unlink(\"previous_submission.csv\")", "title": "" }, { "docid": "50704446bab3d34ac05bc8788e378f98", "score": "0.51778156", "text": "def find_latest_report(self):\n\n local_path = self.information.get_framework_local_path()\n report_path = os.path.join(local_path, 'report')\n files, name = self.find(report_path, '\\\\d+-\\\\d+')\n files.sort()\n latest_report = files[-1]\n\n return latest_report", "title": "" }, { "docid": "5ea6c2499116e2b27bb777e65e015d88", "score": "0.51690185", "text": "def get_submission_status(self, config=None, filter_active=True, show_namespaces=False):\n # filter submissions by configuration\n submissions = self.list_submissions(config=config)\n\n statuses = ['Succeeded', 'Running', 'Failed', 'Aborted', 'Aborting', 'Submitted', 'Queued']\n df = []\n for s in submissions:\n d = {\n 'entity_id':s['submissionEntity']['entityName'],\n 'status':s['status'],\n 'submission_id':s['submissionId'],\n 'date':iso8601.parse_date(s['submissionDate']).strftime('%H:%M:%S %m/%d/%Y'),\n }\n d.update({i:s['workflowStatuses'].get(i,0) for i in statuses})\n if show_namespaces:\n d['configuration'] = s['methodConfigurationNamespace']+'/'+s['methodConfigurationName']\n else:\n d['configuration'] = s['methodConfigurationName']\n df.append(d)\n df = pd.DataFrame(df)\n df.set_index('entity_id', inplace=True)\n df['date'] = pd.to_datetime(df['date'])\n df = df[['configuration', 'status']+statuses+['date', 'submission_id']]\n if filter_active:\n df = df[(df['Running']!=0) | (df['Submitted']!=0)]\n return df.sort_values('date')[::-1]", "title": "" }, { "docid": "0f29b940b17e43616519eb0ff1ebb24c", "score": "0.51634496", "text": "def run(cimi_api_url=\"https://nuv.la/api\", last_run=None):\n cl = CIMIClient(cimi_api_url)\n\n # --- Specific to this job\n resource_name = \"user\"\n query = '$orderby=created:desc&$filter=created>\"%s\"&$last=100' % last_run\n # --- //\n\n collection = cl.local_get(resource_name, query=query)\n collection_name = resource_name + \"s\"\n\n events = collection[collection_name]\n if events:\n # This means the collection is NOT empty\n # Since these are ordered desc, the first entry is the newest one\n last_run = events[0][\"created\"]\n\n print(events)\n return events, last_run", "title": "" }, { "docid": "4bd9e8cd41cff8ec5a25a8002389745d", "score": "0.5157653", "text": "def mode_lastlog(args, logger, fail_status=False):\n _jobs = sorted(_get_job_results(args, logger), key=lambda x: x.start_time)\n jobs_by_name = {}\n for job in _jobs:\n if job.name not in jobs_by_name:\n jobs_by_name[job.name] = []\n jobs_by_name[job.name].append(job)\n\n if len(jobs_by_name) > 0:\n view_jobs = []\n for (name, jobs) in jobs_by_name.items():\n job = jobs[-1] # last job (any status)\n if job.output_filename:\n if fail_status and job.exit_status != 0:\n view_jobs.append(job)\n elif not fail_status:\n view_jobs.append(job)\n\n if view_jobs:\n for job in view_jobs:\n if os.path.isfile(job.output_filename):\n with open(job.output_filename, 'r') as f:\n print '=== Script output of {!r}'.format(job)\n shutil.copyfileobj(f, sys.stdout)\n print '=== End of script output\\n'\n else:\n print('No script output found for {!s} with fail_status={!s}'.format(', '.join(jobs_by_name.keys()), fail_status))\n else:\n print \"No jobs found\"", "title": "" }, { "docid": "067856c62c37ccfd69f1e9cfe22e5b2a", "score": "0.51561004", "text": "def status_from_test_obj(pav_cfg: dict, test: TestRun):\n\n status_f = test.status.current()\n\n if status_f.state == STATES.BUILDING:\n # When building, the update time comes from the build log\n last_update = test.builder.log_updated()\n status_f.note = ' '.join([\n status_f.note, '\\nLast updated: ',\n str(last_update) if last_update is not None else '<unknown>'])\n elif status_f.state in STATES.RUNNING:\n # When running check for recent run log updates, and check the\n # scheduler if things have gone on too long.\n\n log_path = test.path/'run.log'\n if log_path.exists():\n mtime = log_path.stat().st_mtime\n else:\n mtime = None\n\n if mtime is None or time.time() - mtime > RUNNING_UPDATE_TIMEOUT:\n sched = schedulers.get_plugin(test.scheduler)\n sched_status_f = sched.job_status(pav_cfg, test)\n if sched_status_f.state != STATES.SCHED_STARTUP:\n status_f = sched_status_f\n else:\n last_update = format_mtime(mtime)\n status_f.note = ' '.join([\n status_f.note, '\\nLast updated:', last_update])\n\n elif status_f.state in STATES.SCHEDULED:\n # When the state is scheduled, get the real status from the scheduler.\n sched = schedulers.get_plugin(test.scheduler)\n status_f = sched.job_status(pav_cfg, test)\n\n try:\n # Use the actual node count one the test is running.\n nodes = test.var_man.get('sched.test_nodes', '')\n except DeferredError:\n # Otherwise use the chunk size when requesting all nodes\n # or the requested size otherwise.\n nodes = '({})'.format(test.var_man.get('sched.requested_nodes', '?'))\n\n result = test.results.get('result', '') or ''\n series_id = test.series or ''\n\n return {\n 'job_id': str(test.job) if test.job is not None else '',\n 'name': test.name,\n 'nodes': nodes,\n 'note': status_f.note,\n 'part': test.var_man.get('sched.partition'),\n 'result': result,\n 'series': series_id,\n 'state': status_f.state,\n 'test_id': test.id if test.full_id.startswith('main') else test.full_id,\n 'time': status_f.when,\n }", "title": "" }, { "docid": "f61d4d711696f7ee8ffd3e1b6f671d56", "score": "0.5149837", "text": "def get_recently_submitted_training_jobs_list(self):\n self.logger.debug('getting list of recently submitted training jobs')\n response = self.session.get(self.path_to_url(TRAINING_JOBS_ENDPOINT))\n response.raise_for_status()\n return response", "title": "" }, { "docid": "9f4603056e301047f9acbbf046bf44f2", "score": "0.51423335", "text": "def last_status(self) -> StatusCode:\n return self._last_status", "title": "" }, { "docid": "a7a8b3092a89c6c4c1e4b2264c7543ec", "score": "0.5140409", "text": "def get_last_changeset(self):\n c = self.repository.changesets.filter(\n date__lte=self.last_changed)#.exclude(revision=self.revision)\n if c.count():\n return c[0]\n else:\n return self.repository.changesets.get(date=self.last_changed)", "title": "" }, { "docid": "dc10ff7e60ef1fc50658acc3c06a43a0", "score": "0.51385325", "text": "def get_benchmark_run(self):\n\t\treturn self.__result_object['benchmark_info']['run']", "title": "" }, { "docid": "156279c5f95f6790c14057d15bdbbffa", "score": "0.5126038", "text": "def get_latest_log(self):\n return [self.last_success, self.last_attempt, self.total_attempts, self.save_directory]", "title": "" }, { "docid": "35637478ff092b2c4ba27a30d82f80c1", "score": "0.5125694", "text": "def getBatchJobStatus(self):\n args = {}\n callname = 'batchjobstatus'\n return self._getResult(callname, args = args, verb = \"GET\")", "title": "" }, { "docid": "11eb537c09f41104adadd9cb398edf80", "score": "0.5122107", "text": "def retrieve_output(n, submitted):\n if n and submitted:\n try:\n job = Job.fetch(submitted[\"id\"], connection=conn)\n print(job.get_status())\n if job.get_status() == \"failed\":\n print(job.exc_info)\n if job.get_status() == \"finished\":\n # job is finished, return result, and store id\n return job.result[0], job.result[1], {\"id\": submitted[\"id\"]}\n\n # job is still running, get progress and update progress bar\n return dash.no_update, dash.no_update, dash.no_update\n\n except NoSuchJobError:\n # something went wrong, display a simple error message\n return dash.no_update, dash.no_update, dash.no_update\n # nothing submitted yet, return nothing.\n return dash.no_update, None, {}", "title": "" }, { "docid": "06806cbcad28d463b22902e559e09211", "score": "0.51113087", "text": "def _getlast(self):\n return self.last_call", "title": "" }, { "docid": "a6857cf7ee4efb99308c382cd1cd5b29", "score": "0.5110251", "text": "def getStatus(jobid):", "title": "" }, { "docid": "170f8554fb25d0682567eb88bb1cb760", "score": "0.5105953", "text": "def get_status(self) -> str:\n\n if self.db_status == AssignmentState.CREATED:\n return super().get_status()\n elif self.db_status in AssignmentState.final_unit():\n # These statuses don't change with a get_status call\n return self.db_status\n\n # These statuses change when we change an existing agent\n agent = self.get_assigned_agent()\n if agent is None:\n if self.db_status in AssignmentState.completed():\n logger.warning(f\"Agent for completed unit {self} is None\")\n\n return self.db_status\n\n # Get API client\n requester: \"ProlificRequester\" = self.get_requester()\n client = self._get_client(requester.requester_name)\n\n # time.sleep(2) # Prolific servers may take time to bring their data up-to-date\n\n # Get Study from Prolific, record status\n study = prolific_utils.get_study(client, self.get_prolific_study_id())\n if study is None:\n return AssignmentState.EXPIRED\n self.datastore.update_study_status(study.id, study.status)\n study_is_completed = study.status in [\n StudyStatus.COMPLETED,\n StudyStatus.AWAITING_REVIEW,\n ]\n\n # Get Submission from Prolific, record status\n datastore_unit = self.datastore.get_unit(self.db_id)\n prolific_submission_id = datastore_unit[\"prolific_submission_id\"]\n prolific_submission = None\n if prolific_submission_id:\n prolific_submission = prolific_utils.get_submission(client, prolific_submission_id)\n self.datastore.update_submission_status(\n prolific_submission_id,\n prolific_submission.status,\n )\n\n # Check Unit status\n local_status = self.db_status\n external_status = self.db_status\n\n if study_is_completed:\n # Note: Prolific cannot expire a study while there are incomplete Submissions\n # so we always expire the unit here (without checking for Submissions status).\n\n # Checking for NULL worker_id to avoid labeling not-yet-worked-on units as \"COMPLETED\"\n if self.worker_id is None:\n external_status = AssignmentState.EXPIRED\n else:\n external_status = AssignmentState.COMPLETED\n\n if not study_is_completed and prolific_submission:\n if prolific_submission.status == SubmissionStatus.ACTIVE:\n if self.worker_id is None:\n # Check for NULL worker_id to prevent accidental reversal of unit's progress\n if external_status != AssignmentState.LAUNCHED:\n logger.debug(\n f\"{self.log_prefix}Moving Unit {self.db_id} status from \"\n f\"`{external_status}` to `{AssignmentState.LAUNCHED}`\"\n )\n external_status = AssignmentState.LAUNCHED\n elif prolific_submission.status == SubmissionStatus.PROCESSING:\n # This is just Prolific's transient status to move Submission between 2 statuses\n pass\n else:\n external_status = SUBMISSION_STATUS_TO_ASSIGNMENT_STATE_MAP.get(\n prolific_submission.status,\n )\n if not external_status:\n raise Exception(f\"Unexpected Submission status {prolific_submission.status}\")\n\n if external_status != local_status:\n self.set_db_status(external_status)\n\n return self.db_status", "title": "" }, { "docid": "eae884eb68261814199438f850569034", "score": "0.5096964", "text": "def get_status(last_timestamp: float, force: bool = False) -> dict:\n project = cauldron.project.get_internal_project(timeout=0)\n\n if project:\n project_data = project.kernel_serialize()\n step_changes = _utils.get_step_changes_after(\n project=project,\n timestamp=last_timestamp - 1,\n write_running=True\n )\n else:\n project_data = None\n step_changes = []\n\n response = environ.Response().update(\n version=environ.version,\n python_version=environ.python_version,\n notebook_version=environ.notebook_version,\n ui_server_version=environ.version,\n ui_python_version=environ.python_version,\n remote=environ.remote_connection.serialize(),\n project=project_data,\n step_changes=step_changes,\n view=environ.view,\n is_active_async=ui_configs.is_active_async(),\n )\n\n results = response.serialize()\n results['hash'] = _utils.get_digest_hash(results, force)\n return results", "title": "" }, { "docid": "442c88f0a4bdadc3549498358c54d511", "score": "0.50963163", "text": "def retrievePipelineToolStatus():\n global submissionInfo\n\n integrationDir = submissionInfo[ \"RepoDirs\" ][ \"submission/Integration/Main\" ].strip()\n jobWriterPath = os.path.join( integrationDir, \"JobWriter.py\" )\n\n scenePath = hou.hipFile.path()\n argArray = [\"-ExecuteScript\", jobWriterPath, \"Houdini\", \"--status\", \"--scene-path\", scenePath]\n statusMessage = CallDeadlineCommand( argArray )\n\n return statusMessage", "title": "" }, { "docid": "1a4627286efa9876a2b9b2f82f6bcebd", "score": "0.509126", "text": "def max_staleness(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"max_staleness\")", "title": "" }, { "docid": "e4d01a2e23365f2d358e4c0bc4b89f34", "score": "0.5089037", "text": "def status(self):\n statuses = [future.status for future in self]\n for stat in ['PENDING', 'STARTED', 'RETRY', 'REVOKED', 'FAILURE', 'SUCCESS']:\n if stat in statuses:\n return stat\n return 'SUCCESS'", "title": "" }, { "docid": "420c2a6bf27b787bd0a1eed0809b2c26", "score": "0.5084934", "text": "def last_job_run_at(self):\n return self._to_datetime(self._data.get('last_job_run'))", "title": "" }, { "docid": "2ad9fb6662e4bad69347bbe8b6d51b32", "score": "0.50824106", "text": "def get_last_sample(self):\n return self.chain[-1]", "title": "" }, { "docid": "f0daff50d453aab0cfd91dfae9206fbc", "score": "0.5070897", "text": "def get_last_recorded_tweet():\n _ensure_folder()\n frames = recorded_tweet_list()\n return frames[-2]", "title": "" }, { "docid": "a72771a338d55c491ff5c43aee6f94e9", "score": "0.5064276", "text": "def get_last(self):\n if self.sample_count > 0:\n return self.history[self.sample_count - 1]\n else:\n raise Exception('No recorded value')", "title": "" }, { "docid": "9bc394de20fed52fa042e3f9434406cb", "score": "0.5062719", "text": "def status(self):\n algorithm_tasks = {}\n\n for _, experiment in self.get_experiments():\n trials = experiment.fetch_trials()\n\n algorithm_name = list(experiment.configuration[\"algorithm\"].keys())[0]\n\n if algorithm_tasks.get(algorithm_name, None) is None:\n task_state = {\n \"algorithm\": algorithm_name,\n \"experiments\": 0,\n \"assessment\": self.assess_name,\n \"task\": self.task_name,\n \"completed\": 0,\n \"trials\": 0,\n }\n else:\n task_state = algorithm_tasks[algorithm_name]\n\n task_state[\"experiments\"] += 1\n task_state[\"trials\"] += len(trials)\n if experiment.is_done:\n task_state[\"completed\"] += 1\n\n algorithm_tasks[algorithm_name] = task_state\n\n return list(algorithm_tasks.values())", "title": "" }, { "docid": "2cd70c22ca44d0ddbc58f7fa1891fa0c", "score": "0.50455207", "text": "def test_empty_get_status() -> None:\n bugs = bts.get_status(0)\n assert isinstance(bugs, list)\n assert len(bugs) == 0", "title": "" }, { "docid": "8ed6c064f68aea8f93b5b90bf8e59d78", "score": "0.50430536", "text": "def get_latest_tested_opset_version(self):\n return self.onx_op.get_latest_tested_opset_version()", "title": "" }, { "docid": "b1421861d8fd05deb5746006eabe2bf6", "score": "0.50381243", "text": "def getTestResult( self, elementName, vo, jobID, submissionTime ):\n\n isFinish = False\n\n res = self.__getJobOutput( jobID, vo )\n if not res[ 'OK' ]:\n return res\n output = res[ 'Value' ]\n status = res[ 'Status' ]\n\n resDict = { 'CompletionTime' : None, 'Status' : None, 'Log' : None, 'ApplicationTime' : None }\n utcNow = datetime.utcnow().replace( microsecond = 0 )\n\n if output:\n isFinish = True\n resDict[ 'CompletionTime' ] = utcNow\n log = output[ 'Log' ]\n if not output[ 'Download' ]:\n resDict[ 'Status' ] = 'Unknown'\n resDict[ 'Log' ] = 'Fail to download log file for job %s: %s' % ( jobID, log )\n else:\n resDict[ 'Log' ] = log\n resDict[ 'Status' ] = self._judge( log )\n resDict[ 'AppliactionTime' ] = self.__getAppRunningTime( log )\n\n else:\n if utcNow - submissionTime >= timedelta( seconds = self.timeout ):\n isFinish = True\n if elementName.split( '.' )[ 0 ] == 'CLOUD':\n site = elementName\n else:\n site = BESUtils.getSiteForCE( elementName )\n jobCount = self.wmsAdmin.getSiteSummaryWeb( { 'Site' : site }, [], 0, 0 )\n if not jobCount[ 'OK' ]:\n return jobCount\n params = jobCount[ 'Value' ][ 'ParameterNames' ]\n records = jobCount[ 'Value' ][ 'Records' ][ 0 ]\n run = records[ params.index( 'Running' ) ]\n done = records[ params.index( 'Done' ) ]\n if status == 'Waiting' and run == 0 and done == 0:\n resDict[ 'Status' ] = 'Bad'\n resDict[ 'Log' ] = 'The test job is waiting for %d seconds, but no running and done jobs at this site.' % self.timeout\n else:\n if run != 0:\n resDict[ 'Status' ] = 'Busy'\n resDict[ 'Log' ] = 'Site %s is too busy to execute this test job, job status is %s' % ( site, status )\n else:\n resDict[ 'Status' ] = 'Unknown'\n resDict[ 'Log' ] = 'Test did not complete within the timeout of %d seconds, job status is %s' % ( self.timeout, status )\n self.dirac.killJob( jobID )\n\n if not isFinish:\n return S_OK()\n else:\n return S_OK( resDict )", "title": "" }, { "docid": "5b2d8aa7b44d0d6b6d231080e2292fc3", "score": "0.50364333", "text": "def last_minute_submissions(grades):\n\n return ...", "title": "" }, { "docid": "75060b73d2afb2fb53d99862516cfb3b", "score": "0.5034814", "text": "def last_update_status(self):\n return self._last_update_status", "title": "" }, { "docid": "ef5a3f3d2ee37ab930aae3c8e4e6cc3d", "score": "0.5032052", "text": "def get_run_status(self) -> str:\n\n if self._current_run_id is None:\n raise FitNeeded(reason=\"Firstly schedule a fit job by using the fit() method.\")\n\n return self._wml_client.training.get_status(training_uid=self._current_run_id).get('state')", "title": "" }, { "docid": "ce4f02c4882eb5abbd49b8ff1d5a0fdc", "score": "0.50282854", "text": "def local_latest(results):\n\n if len(results.index) <= 1:\n return results\n # separate all the attributes which could be different between two versions\n separate = ['path', 'version', 'time_complete', 'filename','fdate', 'tdate', 'periods']\n cols = [k for k in results.columns if k not in separate]\n results = results.sort_values('version').drop_duplicates(subset=cols, keep='last')\n return results", "title": "" }, { "docid": "e2b936705ec92efd304015cbbd7c5e9e", "score": "0.50254935", "text": "def test4_final_workflow_status(self):\n\n self.assertFalse(self.wf.is_running)\n self.assertFalse(self.wf.is_completed)\n self.assertTrue(self.wf.has_failed)\n\n self.assertIsNotNone(self.wf.starttime)\n self.assertIsNone(self.wf.finishtime)\n self.assertIsNotNone(self.wf.updatetime)\n self.assertLessEqual(self.wf.runtime, 5)\n\n self.assertEqual(self.wf.failed_tasks, [self.wf.workflow.query_nodes(key='test3')])", "title": "" }, { "docid": "e2b936705ec92efd304015cbbd7c5e9e", "score": "0.50254935", "text": "def test4_final_workflow_status(self):\n\n self.assertFalse(self.wf.is_running)\n self.assertFalse(self.wf.is_completed)\n self.assertTrue(self.wf.has_failed)\n\n self.assertIsNotNone(self.wf.starttime)\n self.assertIsNone(self.wf.finishtime)\n self.assertIsNotNone(self.wf.updatetime)\n self.assertLessEqual(self.wf.runtime, 5)\n\n self.assertEqual(self.wf.failed_tasks, [self.wf.workflow.query_nodes(key='test3')])", "title": "" }, { "docid": "1107f0e2271ca23113ad78a4643ed321", "score": "0.5023025", "text": "def get_overall_status(self):\n if self.task.status in self.final_statuses:\n return self.task.status\n else:\n return self.PENDING", "title": "" }, { "docid": "e1e8d05cce021ca17bffccc7985e5260", "score": "0.5021598", "text": "def recent_jobs(self):\n return self._data.get('summary_fields', {}).get('recent_jobs')", "title": "" }, { "docid": "818c01b05718bccf915ed0e56433e35f", "score": "0.5020584", "text": "def latest_run(self) -> Optional[pulumi.Input['ScanRunArgs']]:\n return pulumi.get(self, \"latest_run\")", "title": "" }, { "docid": "a381c0391cebef6fa0f40314b9bba622", "score": "0.5020056", "text": "def _get_last_action(self):\r\n return {'id': 0, 'mtime': 0, 'snippet': ''}", "title": "" }, { "docid": "acf0a0ab20c94383e8e9c63c2abf2da7", "score": "0.50178033", "text": "def _test_submission__previous_submission__inactive(self, platform):\n # Do a valid submission\n self._login()\n status, obj = self._submit(TEST_APP_ID, platform)\n self.assertEqual(Submission.objects.all().count(), 1)\n # Make sure its inactive\n previous = Submission.objects.get(submission_id=obj['id'])\n previous.is_active = False\n previous.is_failed = False\n previous.save()\n #ย Do another subission, it should fail.\n status, obj = self._submit(TEST_APP_ID, platform)\n self.assertEqual(status, 400)\n self.assertTrue('already in the publishing queue' in obj['detail'])\n self.assertEqual(Submission.objects.all().count(), 1)", "title": "" }, { "docid": "cf8370c3a66b8c4390fb56281302ef7f", "score": "0.5015243", "text": "def print_submission_status():\n table = [['Capture MD5',\n 'Capture Name',\n 'Capture Start',\n 'Capture End',\n 'Upload Start',\n 'Upload End',\n 'Size',\n 'Queued',\n 'Analysis Started',\n 'Analysis Completed',\n 'Malicious',\n 'Link']]\n table.extend(get_submissions_status())\n print(terminaltables.AsciiTable(table).table)", "title": "" }, { "docid": "15003843a836e0a07e385a59949fb02a", "score": "0.5011535", "text": "def last_updated(self):\n latest = self.updated\n for block in self.blocks.all():\n if block.updated > latest:\n latest = block.updated\n\n return latest", "title": "" }, { "docid": "68c5f6aa063a79058307fd952bbc68cf", "score": "0.5009846", "text": "def print_latest_status(self, total_tests):\n result = self._last_test_result()\n passed, failed, warned = self._split()\n if result.passed:\n self._printer.write(\"pass\", fg='gi')\n self.fp.write(\"%s = pass\\n\" % result.name)\n elif result.failed:\n self._printer.write(\"fail\", fg='ri')\n self.fp.write(\"%s = fail\\n\" % result.name)\n elif result.warned:\n self._printer.write(\"warn\", fg='rgi')\n self.fp.write(\"%s = warn\\n\" % result.name) \n else:\n self.fp.write(\"%s = unknown\\n\" % result.name)\n assert False\n\n args = []\n args.append(\"P=%i\" % len(passed))\n args.append(\"W=%i\" % len(warned))\n args.append(\"F=%i\" % len(failed))\n args.append(\"T=%i\" % total_tests)\n\n if result.fail_message != '':\n self._printer.write(\" (%s) %s (%.1f seconds)\\n FailMsg: %s\\n LogFile: %s\" %\n (\" \".join(args), result.name, result.time, result.fail_message, result.log_file))\n else:\n self._printer.write(\" (%s) %s (%.1f seconds)\\n LogFile: %s\" %\n (\" \".join(args), result.name, result.time, result.log_file))", "title": "" }, { "docid": "0fbff94f391f7be23f336b126762cccc", "score": "0.5003849", "text": "def last_good_build(self):\n data = self._controller.get_api_data()\n\n lgb = data['lastSuccessfulBuild']\n\n if lgb is None:\n return None\n\n temp_data_io = self._controller.clone(lgb['url'])\n return Build(temp_data_io)", "title": "" }, { "docid": "6be6f5b20155f2fc135e2c6e72971530", "score": "0.49976906", "text": "def _get_last_checkpoint(self, config_dir):\n ckpt_files = [os.path.join(config_dir, 'train', f)\n for f in os.listdir(os.path.join(config_dir, 'train'))\n if f.endswith('.index')]\n ckpt_files = [x[:-6] for x in ckpt_files]\n ckpt_files = [(x, int(x[x.find('ckpt-')+5:])) for x in ckpt_files]\n ckpt_files = sorted(ckpt_files, key=lambda x: x[1], reverse=True)\n return ckpt_files[0]", "title": "" }, { "docid": "036ed1f13de2ff02734169c43907acdf", "score": "0.49944264", "text": "def submission_statistics(self):\r\n return self._submission_statistics", "title": "" }, { "docid": "37274d057ffbf1cf040f5a9b4c648649", "score": "0.49934176", "text": "def get_job_status():\n\n # Create a client that will talk to the web API\n client = WebAPIClient(\n hostname=\"https://umcradanonp11.umcn.nl/p01\",\n username=\"z123sandbox\",\n token=\"token\",\n )\n\n # get the status for 3 specific jobs\n job_status_list = client.get(\n \"get_jobs_list_extended\", job_ids=[53769, 53770, 53771]\n )\n print(f\"found status for {len(job_status_list)} jobs in list:\")\n print(job_status_list)", "title": "" }, { "docid": "9f5ae9ef7cf3a69166016cfacca44a2a", "score": "0.49932545", "text": "def get_latest_revision(self):\n revision = 0\n if self.changesets.count():\n revision = self.changesets.all()[0].revision\n return revision", "title": "" }, { "docid": "257648a32631363443194430dacd9333", "score": "0.4977236", "text": "def get_current_statistics():\n results = Submission.objects.values(\"user\", \"exercise\").annotate(\n rating=Max(\"testresult__success_count\")\n )\n final_result = defaultdict(int)\n for result in results:\n if result[\"rating\"] and result[\"rating\"] > 5:\n final_result[result[\"user\"]] += result[\"rating\"]\n return sorted(final_result.values())", "title": "" }, { "docid": "5de1856da43e9715ff8f2d32ac2aad63", "score": "0.4976961", "text": "def get_last_changeset(self):\n return self.repository.changesets.get(date=self.last_changed)", "title": "" }, { "docid": "5877361a902d0145c6ec78f4b66de553", "score": "0.49766994", "text": "def test_last(self) -> None:\n response = self.cve.last()\n self.assertIsNotNone(response)\n self.assertIsInstance(response, list)\n self.assertEqual(len(response), 30)", "title": "" }, { "docid": "781369418af805c60e2f3a35aa102d67", "score": "0.49723783", "text": "def last_status(self) -> StatusCode:\n return self.visalib.get_last_status_in_session(self.session)", "title": "" }, { "docid": "91f5959e88dfb395cfa9e07df503ee6c", "score": "0.49681935", "text": "def get_check_runs(self, commit = None):\n if commit is None:\n commit = self._ref\n result = self._GAI.get_check_runs(commit)\n print(\"get_check_runs\")\n print(result)\n return result['check_runs']", "title": "" }, { "docid": "e80d7e122e22291ee40a4668498f8143", "score": "0.496475", "text": "def test_sample_get_status() -> None:\n bugs = bts.get_status(486212)\n assert len(bugs) == 1\n bug = bugs[0]\n assert bug.bug_num == 486212\n assert bug.date == datetime.datetime(2008, 6, 14, 10, 30, 2)\n assert bug.subject.startswith(\"[reportbug-ng] segm\")\n assert bug.package == \"reportbug-ng\"\n assert bug.severity == \"normal\"\n assert bug.tags == [\"help\"]\n assert bug.blockedby == []\n assert bug.blocks == []\n assert bug.summary == \"\"\n assert bug.location == \"archive\"\n assert bug.source == \"reportbug-ng\"\n assert bug.log_modified == datetime.datetime(2008, 8, 17, 7, 26, 22)\n assert bug.pending == \"done\"\n assert bug.done\n assert bug.done_by == \"Bastian Venthur <[email protected]>\"\n assert bug.archived\n assert bug.found_versions == [\"reportbug-ng/0.2008.06.04\"]\n assert bug.fixed_versions == [\"reportbug-ng/1.0\"]\n assert bug.affects == []", "title": "" } ]
33f1864dad6aa245ab722088c583c125
Anonymous function node. Inline the function definition, and return the function identifier.
[ { "docid": "cff250350e2e20c429b64454aca71c7f", "score": "0.5156287", "text": "def _(node, inline, bindings=()):\n name = next(name_generator('lambda'))\n inline_children = []\n\n # bindings in local scope\n binding_assignments = [\n python.Assign(\n targets=[\n python.Tuple(\n elts=[\n python.Subscript(\n value=python.Name(id='scope', ctx=python.Load()),\n slice=python.Index(\n value=python.Str(s=identifier.value),\n ),\n ctx=python.Store(),\n ) for identifier in (identifier.expressions if (isinstance(identifier, tscl.List)) else [identifier])\n ],\n ctx=python.Store(),\n )\n ],\n value=(\n # expression: assume iterable if identifier expects destructuring\n generate(expression, inline_children) if (\n isinstance(identifier, (\n tscl.List,\n )) and isinstance(expression, (\n tscl.Let,\n tscl.Call,\n tscl.Identifier,\n ))\n ) else\n\n # reference: may or may not evaluate to an iterable\n generate(expression, inline_children) if isinstance(expression, (\n tscl.Identifier,\n )) else\n\n # list: wrap nested expressions in tuple if identifier expects destructuring\n python.Tuple(\n elts=[generate(expression, inline_children) for expression in expression.expressions],\n ctx=python.Load(),\n ) if (isinstance(identifier, tscl.List) and isinstance(expression, tscl.List)) else\n\n # value or expression: wrap in tuple\n python.Tuple(\n elts=[generate(expression, inline_children)],\n ctx=python.Load(),\n )\n )\n )\n for identifier, expression in zip(bindings[::2], bindings[1::2])\n ]\n children = [wrap(generate(node, inline_children)) for node in node.expressions]\n inline.append(python.FunctionDef(\n name=name,\n args=python.arguments(\n args=[\n python.arg(arg=param.value, annotation=None) for param in node.parameters.expressions\n ],\n defaults=[],\n vararg=None,\n # capture current scope and create a new child scope to pass into the function\n kwonlyargs=[\n python.arg(arg='scope', annotation=None),\n ],\n kw_defaults=[\n python.Call(\n func=python.Attribute(\n value=python.Name(\n id='scope', ctx=python.Load()\n ),\n attr='new_child', ctx=python.Load(),\n ),\n args=[], keywords=[], starargs=None, kwargs=None,\n ),\n ],\n kwarg=None,\n ),\n body=(\n [\n # update scope with function locals\n wrap(python.Call(\n func=python.Attribute(\n value=python.Name(id='scope', ctx=python.Load()),\n attr='update', ctx=python.Load(),\n ),\n args=[\n python.Call(\n func=python.Name(id='locals', ctx=python.Load()),\n args=[], keywords=[], starargs=None, kwargs=None)\n ],\n keywords=[], starargs=None, kwargs=None\n ))\n # function body with in-lined statements and bindings (let ...), returning the last expression\n ] + inline_children + binding_assignments + children[:-1] + [python.Return(value=children[-1].value)]\n # or pass if no body\n ) if node.expressions else [python.Pass()],\n decorator_list=[],\n returns=None,\n ))\n # the function itself will be inlined in a parent scope; return an identifier for the function\n return python.Name(\n id=name,\n ctx=python.Load(),\n )", "title": "" } ]
[ { "docid": "0de17cbc78992eb9c121abcf78ba8ae1", "score": "0.67060685", "text": "def _(node, inline):\n name = generate(\n tscl.Function(\n parameters=tscl.List(expressions=()),\n expressions=node.expressions,\n ),\n inline,\n node.bindings.expressions,\n )\n return python.Call(\n func=name, args=[], keywords=[], starargs=None, kwargs=None,\n )", "title": "" }, { "docid": "43807da552f1c294714e760aac49dd9a", "score": "0.62891203", "text": "def identity():\n def f(x):\n return x\n return f", "title": "" }, { "docid": "c0dce060825bf31745d049e63aa5c784", "score": "0.60872465", "text": "def identity():\n f = lambda x: x\n return f", "title": "" }, { "docid": "16cdf98dbf804f8b88576f98d90a28e0", "score": "0.5902344", "text": "def inline(fn):\n if is_macro(fn):\n raise ValueError(\"A function cannot be marked for inlining and expansion at the same time\")\n fn._peval_inline = True\n return fn", "title": "" }, { "docid": "39669ab2cb837d4d2086baabd44ed63d", "score": "0.5879168", "text": "def _(node, inline):\n return python.Call(\n func=generate(node.expression, inline),\n args=[\n generate(node, inline) for node in node.expressions\n ],\n keywords=[],\n starargs=None,\n kwargs=None,\n )", "title": "" }, { "docid": "7caed24da765d84a8398463da82d1e8b", "score": "0.5834416", "text": "def full_function_code(self):\n from .utils import build_function\n\n return build_function(self.function_signature, self.docstring, self.function_body)", "title": "" }, { "docid": "000b3701c1a96ee717a758122646367a", "score": "0.5738888", "text": "def visit_function(self, node):\r\n decorate = node.decorators and node.decorators.accept(self) or ''\r\n docs = node.doc and '\\n%s\"\"\"%s\"\"\"' % (INDENT, node.doc) or ''\r\n return '\\n%sdef %s(%s):%s\\n%s' % (decorate, node.name, node.args.accept(self),\r\n docs, self._stmt_list(node.body))", "title": "" }, { "docid": "e4400eace0a1a240d4163a1c43cca20b", "score": "0.5678608", "text": "def visit_FunctionDef(self_r, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "64146eb3fe4658a4dfc80e0cc62061aa", "score": "0.5596656", "text": "def visit_FunctionDef(self_t, node):\n return node", "title": "" }, { "docid": "eb5bd173abb866b44c40710d514a6525", "score": "0.5575674", "text": "def _function_definition(self):\n self._match(tokens.DEF)\n\n node = AST(tokens.FUNC_DEF)\n id_token = self._lookahead_token(0)\n node.add_child(AST(id_token))\n\n func_symbol = FunctionSymbol(id_token.text, self.current_scope)\n func_symbol.scope = self.current_scope\n self.current_scope.define(func_symbol)\n self.current_scope = func_symbol\n\n self._match(tokens.ID)\n self._match(tokens.LPAREN)\n\n if self._lookahead_type(0) == tokens.ID:\n node.add_child(AST(self._lookahead_token(0)))\n\n variable_symbol = VariableSymbol(self._lookahead_token(0).text)\n variable_symbol.scope = self.current_scope\n self.current_scope.define(variable_symbol)\n\n self._match(tokens.ID)\n\n while self._lookahead_type(0) == tokens.COMMA:\n self._match(tokens.COMMA)\n node.add_child(AST(self._lookahead_token(0)))\n\n variable_symbol = VariableSymbol(self._lookahead_token(0).text)\n self.current_scope.define(variable_symbol)\n variable_symbol.scope = self.current_scope\n\n self._match(tokens.ID)\n\n self._match(tokens.RPAREN)\n\n self.current_scope = LocalScope(self.current_scope)\n\n block_ast = self._slist()\n func_symbol.block_ast = block_ast\n node.add_child(block_ast)\n\n # pop LocalScope\n self.current_scope = self.current_scope.get_enclosing_scope()\n\n # pop FunctionSymbol\n self.current_scope = self.current_scope.get_enclosing_scope()\n\n return node", "title": "" }, { "docid": "2c8b250b6f1226a7bac36a563ef989c3", "score": "0.54948366", "text": "def _extract_expression_to_function(self, node):\n checkpoint = self._checkpoint_rw_vars()\n return_stmt = ast.Return(value=self.visit(node))\n func_name = self._allocator.globalsym()\n arg_names = list(self._loaded_vars.values()) + list(self._stored_vars.values())\n\n func = function_def(func_name, arg_names, [return_stmt])\n self._extra_predicates.append(func)\n self._restore_checkpoint_rw_vars(checkpoint)\n return function_call(func_name, arg_names)", "title": "" }, { "docid": "4579ab4a7207fe82079b1c83519ddc0f", "score": "0.5483696", "text": "def identity():\n return lambda x: x", "title": "" }, { "docid": "f03f2f28b8e4e4879b8fbaa6104939a7", "score": "0.54697037", "text": "def visit_FunctionDef(self, node: ast.FunctionDef):\n for func_node in ast.iter_child_nodes(node):\n if isinstance(func_node, ast.Expr) and type(func_node.value) in [ast.Str, ast.Call]:\n next\n elif isinstance(func_node, ast.arguments):\n next\n else:\n self.__inlineable_cache__[node.name] = False\n return\n self.__inlineable_cache__[node.name] = True\n self.__function_cache__[node.name] = {'args': node.args}\n if isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str):\n # skip docstring\n call_list = node.body[1:]\n else:\n call_list = node.body\n call_list = map(lambda x: x.value, call_list)\n self.__function_cache__[node.name]['body'] = call_list", "title": "" }, { "docid": "491edf6925ebcdb88fc5065fcdbce473", "score": "0.5461149", "text": "def func_id(self):\n return self._func_id", "title": "" }, { "docid": "178fe3507eeb10ad94427fc1587d4d70", "score": "0.5453419", "text": "def visit_fun(self, fun_node, env):\n return Closure(fun_node.args, fun_node.body, env, self)", "title": "" }, { "docid": "311bf052e23e7598e194a9547fdf9014", "score": "0.5436296", "text": "def identity_function(x, name=None):\n return x", "title": "" }, { "docid": "e9462c6cb6f0ac47f232950baf4dea5c", "score": "0.5416859", "text": "def _function(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n node_details = []\n name = \"\"\n func_attrs = node.attrs\n if func_attrs:\n node_details = [f\"{k}: {func_attrs.get_str(k)}\" for k in func_attrs.keys()]\n # \"Composite\" might from relay.transform.MergeComposite\n if \"Composite\" in func_attrs.keys():\n name = func_attrs[\"Composite\"]\n node_id = node_to_id[node]\n\n # Body -> FunctionNode\n viz_node = VizNode(node_id, f\"Func {name}\", \"\\n\".join(node_details))\n viz_edges = [VizEdge(node_to_id[node.body], node_id)]\n return viz_node, viz_edges", "title": "" }, { "docid": "6525cbc0cc608fd92ec1bc93106d74d7", "score": "0.5414807", "text": "def macro(fn):\n if is_inline(fn):\n raise ValueError(\"A function cannot be marked for inlining and expansion at the same time\")\n fn._peval_macro = True\n return fn", "title": "" }, { "docid": "36981eeacf1f7397ea7a981018d5cf43", "score": "0.5413922", "text": "def sin_function():\n def f(x):\n return math.sin(x)\n return f", "title": "" }, { "docid": "450115e7adf41e20822d504537b80016", "score": "0.5394574", "text": "def function(self, name, args=[], xml=\"\"):\n return 'function %s(){%s};' % (name, xml)", "title": "" }, { "docid": "1c5020e1aa524ce2b461b5616a17a128", "score": "0.5386749", "text": "def getId(self):\n return _libsbml.FunctionDefinition_getId(self)", "title": "" }, { "docid": "7657a96baf6a0454e48d07aa935b4275", "score": "0.5324209", "text": "def greet2():\n def func():\n return 5\n return func", "title": "" }, { "docid": "46b16628f0c08590173c4651d489c66e", "score": "0.5314426", "text": "def function(self):\n if self._function is None:\n self._extract_function()\n return self._function", "title": "" }, { "docid": "db87ef5a3c32d26e71a2f738c7879259", "score": "0.5274775", "text": "def function_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_arn\")", "title": "" }, { "docid": "feb536e104881e848c8ca0e4ca52a9a7", "score": "0.5248866", "text": "def math_funcdef_handle(tokens):\n funcdef, suite = tokens\n return funcdef + (\"\" if suite.startswith(\"\\n\") else \" \") + suite", "title": "" }, { "docid": "c4f6afe3c97760cf608d006389766814", "score": "0.5238953", "text": "def call_fn_name(token):\n return 'CALL_FUNCTION_%i' % token.attr", "title": "" }, { "docid": "069d3203d4c93178e391ded55a533aca", "score": "0.52328146", "text": "def visit_FunctionDef(self, node):\n self.result[node.name] = node", "title": "" }, { "docid": "e95f7233cd82fe8aae3f9f62bdb03b14", "score": "0.5222923", "text": "def a_function():\n return \"1+1\"", "title": "" }, { "docid": "4e16ddd1a6c7af908c6efd3b753e7694", "score": "0.5206077", "text": "def from_fn(fn: Callable, name: str = None) -> 'Node':\n if name is None:\n name = fn.__name__\n sig = inspect.signature(fn)\n return Node(name, sig.return_annotation, fn.__doc__ if fn.__doc__ else '', callabl=fn)", "title": "" }, { "docid": "0c2ffdb6718cc59a7e94712465a31c56", "score": "0.5202213", "text": "def visit_FunctionDef(self, node):\n # Can't have decorators or a returns annotation.\n if node.decorator_list or node.returns:\n return node\n # Body must be of length 1 and consist of a Return node;\n # can't translate a body consisting of only an Expr node as that would\n # lead to an improper return value (i.e., something other than None\n # potentially).\n if len(node.body) > 1 or not isinstance(node.body[0], ast.Return):\n return node\n args = node.args\n # No annotations for *args or **kwargs.\n if ((args.vararg and args.vararg.annotation) or\n (args.kwarg and args.kwarg.annotation)):\n return node\n # No annotations on any other parameters.\n if any(arg.annotation for arg in args.args):\n return node\n # In the clear!\n return_ = node.body[0].value\n if return_ is None:\n return_ = ast.Name('None', ast.Load())\n lambda_ = ast.Lambda(args, return_)\n return ast.Assign([ast.Name(node.name, ast.Store())], lambda_)", "title": "" }, { "docid": "205ee508e886fe397c7558ef6e07cefc", "score": "0.5183396", "text": "def get_function_name(self, line_data):\n return helpers.get_string_between(\"def \", \"(\", line_data)", "title": "" }, { "docid": "9ed926726fabf96c14ab1e44f5787f2c", "score": "0.5162544", "text": "def visit_functiondef(self, node: ast.FunctionDef):\n funsymbol = symbols.FunctionSymbol(name=node.name, params=node.params, block=node.block)\n self.scope.put(funsymbol)\n pass", "title": "" }, { "docid": "00e321a981368f1cd767d807c78a59f0", "score": "0.51403373", "text": "def f():", "title": "" }, { "docid": "c8adde38dc45d816b60de68b375d527e", "score": "0.5135819", "text": "def f_factory(i):\n def f():\n print i\n return f", "title": "" }, { "docid": "9910cc59c50459c9541f09ba1eba082d", "score": "0.5128793", "text": "def visit(self, node): \n if not self.funcNeedDefine(node): \n return None\n\n doxy_comment = ''\n if Config.DOXYGEN: doxy_comment = node['doxygen'] + os.linesep\n\n fun_def = str(Template(Config.FUNCTION, searchList=[{\n 'function': node\n }]))\n if node['const']:\n fun_def = fun_def.replace(')', ') const')\n self._stream.write( doxy_comment + fun_def)", "title": "" }, { "docid": "41b3bc5d3e90cbf14b5c6efc5ba9c988", "score": "0.5125502", "text": "def f():\n pass", "title": "" }, { "docid": "41b3bc5d3e90cbf14b5c6efc5ba9c988", "score": "0.5125502", "text": "def f():\n pass", "title": "" }, { "docid": "41b3bc5d3e90cbf14b5c6efc5ba9c988", "score": "0.5125502", "text": "def f():\n pass", "title": "" }, { "docid": "1fdac2af3c664504ab9cc90ebd75c042", "score": "0.50958574", "text": "def function(self, returns=None, name=None):\r\n\r\n def _(func, name=name):\r\n if name is None:\r\n name = func.__name__\r\n self.add_function(name, func, returns)\r\n return func\r\n return _", "title": "" }, { "docid": "829da6ad7ee60ac6d146f6cc7fcc6ddd", "score": "0.5089088", "text": "def code(self) -> pulumi.Output['outputs.FunctionCode']:\n return pulumi.get(self, \"code\")", "title": "" }, { "docid": "ad1729d2b129083744bab451bd0ba4a5", "score": "0.5089038", "text": "def n_funccall(self, node): \n self.make_func(node, funccall_name(node))", "title": "" }, { "docid": "4bedbf9f76d6dfcc8d4efafcb577b7c7", "score": "0.50436354", "text": "def visit_lambda(self, node):\r\n return 'lambda %s: %s' % (node.args.accept(self), node.body.accept(self))", "title": "" }, { "docid": "c016d02e1707b0702c3df60fc9713f3a", "score": "0.5041464", "text": "def gen_simple(code, f_globals):\n bp_code = Code.from_code(code)\n optimize_locals(bp_code.code)\n bp_code.newlocals = False\n new_code = bp_code.to_code()\n return FunctionType(new_code, f_globals)", "title": "" }, { "docid": "c9ad935e0bc01233ac29ba19015ee19c", "score": "0.50265723", "text": "def function(self):\n return self._func", "title": "" }, { "docid": "56dd15cd72d02523bacfd77fa2fd88aa", "score": "0.500752", "text": "def visit_FunctionDef(self, node):\n # TODO: Use PyPosAST\n self.contexts.append(Context(node.name))\n self.generic_visit(node)\n code_hash = persistence.put(\n '\\n'.join(self.code[node.lineno - 1:self.lineno]).encode('utf-8'))\n self.functions[self.namespace] = self.contexts[-1].to_tuple(code_hash)\n self.contexts.pop()", "title": "" }, { "docid": "9af4897cfd6f63150cb7dfc795e960e7", "score": "0.5004543", "text": "def createFunctionDefinition(self):\n return _libsbml.Model_createFunctionDefinition(self)", "title": "" }, { "docid": "ca83192d00ccfeb4687ef93347e0b67c", "score": "0.4999975", "text": "def EvaluateFunctionTree(self):\n self.functionEvaluation = dict(list=list(), length=list(), body=list(), address=list());\n nextStart = 0;\n # \".interrupt\" is optionally required (and is sure to exist by this\n # function call if it is required). The interrupt handler always starts at\n # address 3 so that address 0 can be a jump to \".main\".\n if self.interrupt:\n nextStart = 3;\n self.functionEvaluation['list'].append('.interrupt');\n self.functionEvaluation['length'].append(self.interrupt['length']);\n self.functionEvaluation['body'].append(self.interrupt['tokens']);\n self.functionEvaluation['address'].append(nextStart);\n nextStart = nextStart + self.functionEvaluation['length'][-1];\n # \".main\" is always required.\n self.functionEvaluation['list'].append('.main');\n self.functionEvaluation['length'].append(self.main['length']);\n self.functionEvaluation['body'].append(self.main['tokens']);\n self.functionEvaluation['address'].append(nextStart);\n nextStart = nextStart + self.functionEvaluation['length'][-1];\n # Loop through the required function bodies as they are identified.\n ix = 0;\n while ix < len(self.functionEvaluation['body']):\n for token in self.functionEvaluation['body'][ix]:\n if (token['type'] == 'macro') and (token['value'] in ('.call','.callc',)):\n callName = token['argument'][0]['value'];\n if callName not in self.functionEvaluation['list']:\n if not self.IsSymbol(callName):\n raise asmDef.AsmException('Function \"%s\" not defined for function \"%s\"' % (callName,self.functionEvaluation['list'][ix],));\n ixName = self.symbols['list'].index(callName);\n if self.symbols['type'][ixName] != 'function':\n raise asmDef.AsmException('Function \"%s\" called by \"%s\" is not a function' % (callName, self.functionEvaluation['list'][ix],));\n self.functionEvaluation['list'].append(callName);\n self.functionEvaluation['length'].append(self.symbols['body'][ixName]['length']);\n self.functionEvaluation['body'].append(self.symbols['body'][ixName]['tokens']);\n self.functionEvaluation['address'].append(nextStart);\n nextStart = nextStart + self.functionEvaluation['length'][-1];\n ix = ix + 1;\n # Within each function, compute the list of label addresses and then fill in\n # the address for all jumps and calls.\n for ix in range(len(self.functionEvaluation['list'])):\n startAddress = self.functionEvaluation['address'][ix];\n labelAddress = dict(list=list(), address=list());\n for token in self.functionEvaluation['body'][ix]:\n if token['type'] == 'label':\n labelAddress['list'].append(token['value']);\n labelAddress['address'].append(startAddress + token['offset']);\n for token in self.functionEvaluation['body'][ix]:\n if token['type'] != 'macro':\n continue;\n if token['value'] in ('.jump','.jumpc',):\n ix = labelAddress['list'].index(token['argument'][0]['value']);\n token['address'] = labelAddress['address'][ix];\n elif token['value'] in ('.call','.callc',):\n ix = self.functionEvaluation['list'].index(token['argument'][0]['value']);\n token['address'] = self.functionEvaluation['address'][ix];\n # Sanity checks for address range\n if self.functionEvaluation['address'][-1] + self.functionEvaluation['length'][-1] >= 2**13:\n raise asmDef.AsmException('Max address for program requires more than 13 bits');", "title": "" }, { "docid": "3e3d2f2a63476e11a902dfac7de27b20", "score": "0.4998105", "text": "def expand_function(*, func, sess_ctx):\n assert parametrized(func)\n assert next(filter(lambda x: isinstance(x, IgniteVersionParametrize), func.marks), None)\n\n return MarkedFunctionExpander(session_context=sess_ctx, function=func).expand()", "title": "" }, { "docid": "b8efea478506ad59e21d7014d76973e1", "score": "0.49929282", "text": "def identity_function(x):\n return x", "title": "" }, { "docid": "5ef791fea8f7bac5bb162f0922e5698f", "score": "0.49912837", "text": "def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'", "title": "" }, { "docid": "93f2934d29921d0aab8f39befd63fe29", "score": "0.49909526", "text": "def _assure_identity(fnc):\n def _wrapped(*args, **kwargs):\n if identity is None:\n _create_identity()\n return fnc(*args, **kwargs)\n return _wrapped", "title": "" }, { "docid": "e5219379b8969bd43d9bfaeb04ae4042", "score": "0.49844617", "text": "def sin_function():\n f = lambda x: math.sin(x)\n return f", "title": "" }, { "docid": "3e1a7051ec70a6d76581c820391d3312", "score": "0.49794906", "text": "def function_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_arn\")", "title": "" }, { "docid": "30bb5d2676d79d7f1b034294879f44e2", "score": "0.49692062", "text": "async def eval(self, ctx, *, body):\n to_exec = textwrap.indent(body, \" \")\n exec(f\"async def func(ctx):\\n\\t{to_exec}\", globals())\n await func(ctx)", "title": "" }, { "docid": "1f1a9c0216fbed469b2af33857ec4304", "score": "0.4968637", "text": "def main(code_block: 'Index of desired function'):\n func = funcs[int(code_block)]\n print(func.__doc__)\n func()", "title": "" }, { "docid": "59411b65560d56694489ec35010fb35d", "score": "0.49652815", "text": "def closure(expr, **kwargs):\n return ArithTreeTransformer(expr, **kwargs).closure()", "title": "" }, { "docid": "a4448220212072eb680cef5a144e66fd", "score": "0.49639612", "text": "def eval_func(self):\n return self._eval_func", "title": "" }, { "docid": "298500122b415a971081cde991141a81", "score": "0.49524316", "text": "def resource(self) -> str:\n return f\"${{LambdaFunction{self.task_definition_name}}}\"", "title": "" }, { "docid": "53878aa72606763c232085429476fd3d", "score": "0.49487305", "text": "def gen_function(self) -> str:\n target = backend.target.Target.current()\n func_key = \"{target}.{op}.gen_function\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n return func(\n self._attrs,\n self.exec_cond_template,\n self._extract_dims(),\n )", "title": "" }, { "docid": "58d3b5a04903d9f185888aed4a48704e", "score": "0.4943776", "text": "def remove_functions(source, all_inline=False):\n global INLINE_COUNT\n inline = {}\n hoisted = {}\n n = 0\n limit = len(source) - 9 # 8 is length of 'function'\n res = ''\n last = 0\n while n < limit:\n if n and source[n - 1] in IDENTIFIER_PART:\n n += 1\n continue\n if source[n:n + 8] == 'function' and source[n +\n 8] not in IDENTIFIER_PART:\n if source[:n].rstrip().endswith(\n '.'): # allow function as a property name :)\n n += 1\n continue\n if source[n + 8:].lstrip().startswith(\n ':'): # allow functions inside objects...\n n += 1\n continue\n entered = n\n res += source[last:n]\n name = ''\n n = pass_white(source, n + 8)\n if source[n] in IDENTIFIER_START: # hoisted function\n name, n = parse_identifier(source, n)\n args, n = pass_bracket(source, n, '()')\n if not args:\n raise SyntaxError('Function misses bracket with argnames ()')\n args = args.strip('() \\n')\n args = tuple(parse_identifier(e, 0)[0]\n for e in argsplit(args)) if args else ()\n if len(args) - len(set(args)):\n # I know its legal in JS but python does not allow duplicate argnames\n # I will not work around it\n raise SyntaxError(\n 'Function has duplicate argument names. Its not legal in this implementation. Sorry.'\n )\n block, n = pass_bracket(source, n, '{}')\n if not block:\n raise SyntaxError(\n 'Function does not have any code block to execute')\n mixed = False # named function expression flag\n if name and not all_inline:\n # Here I will distinguish between named function expression (mixed) and a function statement\n before = source[:entered].rstrip()\n if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):\n #print 'Ended ith keyword'\n mixed = True\n elif before and before[-1] not in PRE_ALLOWED and not before[\n -2:] in INCREMENTS:\n #print 'Ended with'+repr(before[-1]), before[-1]=='}'\n mixed = True\n else:\n #print 'FUNCTION STATEMENT'\n #its a function statement.\n # todo remove fucking label if present!\n hoisted[name] = block, args\n if not name or mixed or all_inline: # its a function expression (can be both named and not named)\n #print 'FUNCTION EXPRESSION'\n INLINE_COUNT += 1\n iname = INLINE_NAME % INLINE_COUNT # inline name\n res += ' ' + iname\n inline['%s@%s' % (\n iname, name\n )] = block, args #here added real name at the end because it has to be added to the func scope\n last = n\n else:\n n += 1\n res += source[last:]\n return res, hoisted, inline", "title": "" }, { "docid": "e4c07da359a77cad4b7c8391333b224c", "score": "0.49415165", "text": "def visit_FunctionDef(self, node):\n self.funcname = node.name\n\n if self.classname is None:\n print \"Function: %s\" % (self.funcname, )\n else:\n print \"Method: %s in class %s\" % (self.funcname, self.classname)\n\n # if self.funcname == 'getSentinelLogLeftNav':\n if self.funcname == 'showActionPageForAssessment':\n print ast.dump(node, annotate_fields=True, include_attributes=True)\n\n rc = ast.NodeVisitor.generic_visit(self, node)\n self.funcname = None\n return rc", "title": "" }, { "docid": "8391ed7782b5d738cd38c570e85dc6f0", "score": "0.4941419", "text": "def __function_call (self, node):\n\n funcName = \"\"\n expressions = []\n for i in range(len(node.children)):\n if isinstance(node.children[i], tree.Tree):\n if node.children[i].data == 'name':\n funcName = self.__name(node.children[i].children[0])\n else:\n expressions.append(self.__expression(node.children[i]))\n functionCall = FunctionCall(funcName, expressions, node.line)\n return functionCall", "title": "" }, { "docid": "1014ce34bdd5b91e873c1c526a0b8af1", "score": "0.4934191", "text": "def generate_ascii_function(function_name):\n random_var_par = utils.get_random_var(vars)\n vars.add(random_var_par)\n\n random_var_list = utils.get_random_var(vars)\n vars.add(random_var_list)\n\n block = 'def ' + function_name + '(' + random_var_par + '):\\n'\n\n block += ' ' * utils.SPACE_NUM\n block += random_var_list + '=[]\\n'\n\n random_var_for = utils.get_random_var(vars)\n vars.add(random_var_for)\n block += ' ' * utils.SPACE_NUM\n block += 'for ' + random_var_for + ' in str(' + random_var_par + '):\\n'\n\n block += ' ' * (utils.SPACE_NUM * 2)\n block += random_var_list + '.append(str(ord(' + random_var_for + ')))\\n'\n\n random_var_count = utils.get_random_var(vars)\n vars.add(random_var_count)\n block += ' ' * utils.SPACE_NUM\n block += random_var_count + '=len(' + random_var_list + ')-1\\n'\n\n random_var_res = utils.get_random_var(vars)\n vars.add(random_var_res)\n block += ' ' * utils.SPACE_NUM\n block += random_var_res + '=0\\n'\n\n random_var_for_2 = utils.get_random_var(vars)\n vars.add(random_var_for_2)\n block += ' ' * utils.SPACE_NUM\n block += 'for ' + random_var_for_2 + ' in ' + random_var_list + ':\\n'\n\n block += ' ' * (utils.SPACE_NUM * 2)\n block += random_var_res + '+=(10**' + random_var_count + ')*int(chr(int(' + random_var_for_2 + ')))\\n'\n\n block += ' ' * (utils.SPACE_NUM * 2)\n block += random_var_count + '-=1\\n'\n\n block += ' ' * utils.SPACE_NUM\n block += 'return ' + random_var_res + '\\n'\n\n return block", "title": "" }, { "docid": "684298caa070a740938f1a44dc3faf39", "score": "0.49226224", "text": "def visit_FunctionDef(self, node):\n docstring = self._parse_docstring(node)\n if docstring:\n self._str_codes.append((docstring, node.lineno + self._lineno + 2))", "title": "" }, { "docid": "a6089387bc02e1f0d2b4fe971578e67d", "score": "0.491576", "text": "def get_parse_function( node_name ):\r\n return _PARSE_FUNCTIONS.get(node_name,None)", "title": "" }, { "docid": "2e2411bc49a2ab0df0e2ba3b269a6a57", "score": "0.48968667", "text": "def add_simple_node(self, function, namespace, name):\n node = function, namespace, name\n if node not in self:\n self.add_node(node, **{FUNCTION: function, NAMESPACE: namespace, NAME: name})", "title": "" }, { "docid": "5acdcb63e8ba4ac80e64e36b87b80b8e", "score": "0.4882044", "text": "def __call__(self, func):\n name = func.__name__\n if name == \"<lambda>\":\n e = Exception(\"No anonymous functions here, boy. Get out!\")\n raise e\n if name in link._table:\n e = Exception(\n \"Oh no! You've got two functions with the same name, \\\"{}\\\"\".format(name))\n raise e\n\n return link._proxy(func, **self.kwargs)", "title": "" }, { "docid": "b0cf23cf36399505e2061a3665793dd0", "score": "0.48773316", "text": "def function(self) -> Callable:\n return self.env.function", "title": "" }, { "docid": "b0cf23cf36399505e2061a3665793dd0", "score": "0.48773316", "text": "def function(self) -> Callable:\n return self.env.function", "title": "" }, { "docid": "e28d3395671941a512598039a1a6e122", "score": "0.48668605", "text": "def inline_help(func):\n fun_name = '%s%s' % (func.__name__, inspect.signature(func))\n fun_doc = func.__doc__.strip().split('\\n')[0] if func.__doc__ else \"\"\n return \"%s\\n\\t%s\" % (fun_name, fun_doc)", "title": "" }, { "docid": "d737ba6aa5bb363aaacf8cbaf6b977af", "score": "0.48638412", "text": "def python_function(self):\n return self._python_function", "title": "" }, { "docid": "7014b172a9320516a6ba6f8d28a19bae", "score": "0.4863401", "text": "def identity():\n identity = _f2py_UT.f90wrap_identity()\n return identity", "title": "" }, { "docid": "a5cdda077b02e1e5ffa883bfb6a9c424", "score": "0.48528546", "text": "def write_function(self, name, n_locals):\n self.output_file.write(\"function \" + name + \" \" + str(n_locals) + \"\\n\")", "title": "" }, { "docid": "e4d7e7c9fc12a76c3bb65941c64fde7f", "score": "0.4841498", "text": "def fn():", "title": "" }, { "docid": "20d630e0664d0158ea0f7cc2f7352d07", "score": "0.48400342", "text": "def fn(self):\n return self._fn", "title": "" }, { "docid": "c68c9c156cfd1ba7f5b6afd4dd1d4f94", "score": "0.48385906", "text": "def sin_function():\n return lambda x: math.sin(x)", "title": "" }, { "docid": "31387a759a016e2c8e38a08696aadc9e", "score": "0.48342398", "text": "def lambda_function_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"lambda_function_arn\")", "title": "" }, { "docid": "39dfd056dd6ac577c54bec37d4f25a38", "score": "0.48338312", "text": "def function(fname):\n def _f(func):\n class WrapFunction(Function):\n name = fname\n\n def __call__(self, *args, **kwargs):\n return func(*args, **kwargs)\n\n return WrapFunction\n\n return _f", "title": "" }, { "docid": "b3272de240172978cb28f6d0b51695d1", "score": "0.4830331", "text": "def identity_function(value):\n return value", "title": "" }, { "docid": "41aed38eb5f00734121f5e251fbe8fb4", "score": "0.48209944", "text": "def func():\n pass", "title": "" }, { "docid": "25af187a1615e35b64b81b3896af05b2", "score": "0.48144448", "text": "def create_function_of_function(outer_function, inner_function):\n return lambda time: outer_function(inner_function(time), time)", "title": "" }, { "docid": "1478217b3df0d24fa95b53370a9b3e09", "score": "0.4812441", "text": "def parse(node):\n rtype = FunctionDefinition.parse_rtype(node) \n args = FunctionDefinition.parse_args(node)\n body = FunctionDefinition.body_contains_double_definition(node)\n return FunctionDefinition(node, rtype, args, body)", "title": "" }, { "docid": "0c2353c2c7971ebc724ea69482de48c3", "score": "0.480409", "text": "def _function(self, fctname, nVars):\n self._fct_name = fctname\n code = '('+fctname+')'+'\\n'\n if int(nVars) != 0:\n code += '@SP'+'\\n'\n code += 'A=M'+'\\n'\n for _ in range(int(nVars)):\n code += 'M=0'+'\\n'\n code += 'A=A+1'+'\\n'\n code += '@'+nVars+'\\n'\n code += 'D=A'+'\\n'\n code += '@SP'+'\\n'\n code += 'M=D+M'+'\\n'\n return code", "title": "" }, { "docid": "e59a333c3aff3bd5f2e539e5f3c73bf5", "score": "0.48010913", "text": "def visit_FunctionDef(self, node: ast.FunctionDef):\n super().generic_visit(node)\n\n node.body = self.cleanBody(node.body)\n return node", "title": "" }, { "docid": "466c74c8231e803853ced26414dc870d", "score": "0.47908738", "text": "def ident(x):\n return intrinsic('ident', x)", "title": "" }, { "docid": "2263c7410dff54bc3fdf72e9dc658550", "score": "0.47871947", "text": "def make_anonymous_factorial():\n return \"YOUR EXPRESSION HERE\"", "title": "" }, { "docid": "b849c6c3970e0bc647eb0fd382b33886", "score": "0.4776685", "text": "def next_position(self, fn):\n\n self._fn = fn\n \n lines, num = getsourcelines(self._fn)\n \n \n if (lines[0][:3] != 'def'):\n raise SyntaxError('unrecognized syntax, the function must begin with \"def\"')\n if (lines[-1].strip()[:6] != 'return'):\n raise SyntaxError( 'unrecognized syntax, the function must end with \"return\"')\n \n self._function = True\n var_name = getargspec(self._fn).args[0]\n self._next = \"\"\"function next(x) {\\n\"\"\"\n \n \n for line in lines[1:-1]:\n line = line.strip()\n if(line == ''):\n continue\n if(line[:len(var_name)+1] == var_name+ '['):\n self._next += '\\t' + line + ';\\n'\n else:\n self._next += '\\t' + 'var ' + line + ';\\n'\n \n self._next += '\\t' + lines[-1].strip() +';\\n'+ '}\\n'", "title": "" }, { "docid": "e62dbb75c9ea88964fd694ccbabedf58", "score": "0.47722575", "text": "def make_anonymous_factorial():\n return lambda x: x if x == 1 else mul(x, (lambda: make_anonymous_factorial)()()(x - 1))", "title": "" }, { "docid": "78e33815d207e4aa34d4e52320435483", "score": "0.4768945", "text": "def func():", "title": "" }, { "docid": "78e33815d207e4aa34d4e52320435483", "score": "0.4768945", "text": "def func():", "title": "" } ]
53708b31bc8ffe4495009b081e518f75
Return a particular LVNF in this tenant.
[ { "docid": "6d8e99eae8651813c00b8bc5eb1e953f", "score": "0.756228", "text": "def lvnf(self, addr):\n\n if self.tenant_id not in RUNTIME.tenants:\n return None\n\n if addr not in RUNTIME.tenants[self.tenant_id].lvnfs:\n return None\n\n return RUNTIME.tenants[self.tenant_id].lvnfs[addr]", "title": "" } ]
[ { "docid": "243aa44a45f3fde3c4066a7040d4bcc6", "score": "0.68816304", "text": "def lvnfs(self):\n\n if self.tenant_id not in RUNTIME.tenants:\n return None\n\n return RUNTIME.tenants[self.tenant_id].lvnfs.values()", "title": "" }, { "docid": "ea2ba440f1a9f95c41ee03746ae96484", "score": "0.5860856", "text": "def getVnfr(self, vnfrId):\n osm_url = f\"https://{self.ip}:9999/osm/nslcm/v1/vnf_instances/{vnfrId}\"\n # Get the VNFR from VNF ID in json format\n while True:\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {self.token}\",\n }\n response = requests.get(osm_url, headers=headers, verify=False)\n if response.status_code != 401:\n vnfr = response.json()\n break\n else:\n self.getToken()\n return vnfr", "title": "" }, { "docid": "76cda294a18046792584cb23f0d84e6e", "score": "0.5695049", "text": "def lvap(self, addr):\n\n if self.tenant_id not in RUNTIME.tenants:\n return None\n\n if addr not in RUNTIME.tenants[self.tenant_id].lvaps:\n return None\n\n return RUNTIME.tenants[self.tenant_id].lvaps[addr]", "title": "" }, { "docid": "4a05bad46b4d736a7dc40828af505537", "score": "0.54886276", "text": "def getVNFInfo (self, vnf_id=None):\n log.debug(\n \"Call getVNFInfo - VNF id: %s\" % vnf_id if vnf_id is not None else \"all\")\n return self.call_RPC('getVNFInfo', vnf_id=vnf_id)", "title": "" }, { "docid": "fd9ef0c4f0da0de88abdecb85482e995", "score": "0.5438796", "text": "def GetNeighborLDofTable(self, nbr_ldof):\n return _communication.GroupCommunicator_GetNeighborLDofTable(self, nbr_ldof)", "title": "" }, { "docid": "36a6e3208076751b9f6439808c3322c8", "score": "0.5435041", "text": "def _get_vrf(self):\n return self.__vrf", "title": "" }, { "docid": "d8474f4e265e5e058b6d767392507d8a", "score": "0.5430317", "text": "def get_nglview(self):\n import nglview as nv\n return nv.show_ase(self.to_ase_atoms())", "title": "" }, { "docid": "f03eb500fe1605f56d39f30a920595b8", "score": "0.54283935", "text": "def GetNeighborLTDofTable(self, nbr_ltdof):\n return _communication.GroupCommunicator_GetNeighborLTDofTable(self, nbr_ltdof)", "title": "" }, { "docid": "c4cddfac9460a86bebe02c0c5cea50f2", "score": "0.54209614", "text": "def get_ldif(self, dn):\n entry = self.get(dn)\n lines = self._data_as_ldif({dn: entry})\n return '\\n'.join(lines)", "title": "" }, { "docid": "8a906b6817c7cfc495c856e7913f72e7", "score": "0.5359037", "text": "def vlan(self):\n if self.is_nexus:\n return self.split_entry[1]\n return self.split_entry[0]", "title": "" }, { "docid": "1ba6f2b4adaa4a2d34671f699bea4f63", "score": "0.527446", "text": "def vlr_id(self):\n return self._vlr_id", "title": "" }, { "docid": "bd078c8abe838891574cbe68b6806ce3", "score": "0.5189612", "text": "def get(self, profilid):\n adm = AppAdministration()\n lernfaecher = adm.get_lernfaecher_by_profil_id(profilid)\n return lernfaecher", "title": "" }, { "docid": "673e25c93e077ac83e18709d8bb1aaec", "score": "0.5173184", "text": "def tno_get_view ( self, node ):\r\n return node.view", "title": "" }, { "docid": "e35eb2815cd9ac4e22d32c478ed61ad4", "score": "0.51564693", "text": "def _get_vlan_id(self):\n return self.__vlan_id", "title": "" }, { "docid": "6998931cd886df43418778e3b2188de4", "score": "0.51441157", "text": "def nfev(self):\n return self._nfev[0]", "title": "" }, { "docid": "57a8f79c4a22fdf2bf40a1348f91a791", "score": "0.5102128", "text": "def getLFU(self):\n idx = self.cache[['Frequency']].idxmin()[0] \n return self.cache['Address'][idx]", "title": "" }, { "docid": "f044387f430884b02da5ea8dff34b76d", "score": "0.5100537", "text": "def _get_ldp_local_lblspid(self):\n return self.__ldp_local_lblspid", "title": "" }, { "docid": "1000638b7d2582e72e308191945e65d8", "score": "0.50944155", "text": "def get_dni(self):\n return self.dni", "title": "" }, { "docid": "1000638b7d2582e72e308191945e65d8", "score": "0.50944155", "text": "def get_dni(self):\n return self.dni", "title": "" }, { "docid": "1f45e9c9e337f3bce43725a5b135febd", "score": "0.50878876", "text": "def serv_nf_id(self):\n return self._serv_nf_id", "title": "" }, { "docid": "7e744f2a1d9ec2c7cc19836226656845", "score": "0.50828636", "text": "def getIPs(self, vnfr):\n vnf_name = vnfr[\"vnfd-ref\"]\n mgmt_ip = vnfr[\"ip-address\"]\n vdu_ips = []\n vm_list = []\n for i in vnfr[\"vdur\"]:\n for ip in i[\"interfaces\"]:\n vdu_ips.append(ip[\"ip-address\"])\n vm_list.append(i[\"name\"])\n vnf_info = {\n \"vnf_name\": vnf_name,\n \"mgmt_ip\": mgmt_ip,\n \"vdu_ips\": vdu_ips,\n \"vm_list\": vm_list,\n }\n return vnf_info", "title": "" }, { "docid": "6919e7f1bb49da2ab9805890208dc1c2", "score": "0.5028536", "text": "def vlan(self):\n return self._vlan", "title": "" }, { "docid": "19745da950545a59803281d1850e8f8a", "score": "0.50152117", "text": "def v_net_id(self) -> Optional[str]:\n return pulumi.get(self, \"v_net_id\")", "title": "" }, { "docid": "8095bb40290fff12206187cbc65f1957", "score": "0.5006313", "text": "def vnf_instance_id(self) -> str:\n return self._vnf_instance_id", "title": "" }, { "docid": "1ebe93555c5ca876fa2cefd93d7fd98d", "score": "0.49913597", "text": "def linode(self):\n\n if self.entity and self.entity.type == \"linode\":\n return Instance(self._client, self.entity.id)\n return None", "title": "" }, { "docid": "76fa08b6828f81d27f84ec1ea700198a", "score": "0.4958983", "text": "def vnFx(self):\n return self.vnC", "title": "" }, { "docid": "e877918b9f2b7aa3e6ddc4d7ddae6696", "score": "0.49566045", "text": "def nfvi_host(self):\n return self._nfvi_host", "title": "" }, { "docid": "a7826ba1d7003e523ab6f4f0a890d5cb", "score": "0.4944396", "text": "def _get_isns_vrf_instance(self):\n return self.__isns_vrf_instance", "title": "" }, { "docid": "a3d2415f1ed45cc03db7e4712b563a87", "score": "0.49424034", "text": "def _get_vnic_data(self, index, val, field):\n vnic_fields = ['state', 'link', 'status', 'ipaddress', 'vnic', 'mac', 'hostname', 'subnet',\n 'routerip', 'namespace', 'index', 'vlantag', 'vlan']\n cmd = [self.oci_network_path, 'show', '--details', '--output-mode', 'parsable']\n all_vnic_data = subprocess.check_output(cmd).decode('utf-8').splitlines()\n\n for vnic in all_vnic_data:\n vnic_list = vnic.split('#')\n if index not in vnic_fields or field not in vnic_fields:\n return None\n if vnic_list[vnic_fields.index(index)] == val:\n return vnic_list[vnic_fields.index(field)]\n return None", "title": "" }, { "docid": "d8670fa39c2b963f8394be746705c70b", "score": "0.4936702", "text": "def get_DNI(self):\n return self.dni", "title": "" }, { "docid": "653204eef12289825d4052facd76a8e1", "score": "0.49224713", "text": "def _get_ldp_entity_index(self):\n return self.__ldp_entity_index", "title": "" }, { "docid": "28a60120e8fd4dcefd7aa610998ebd08", "score": "0.49170497", "text": "def getNLLAlt(self):\n return self._gp.LML()", "title": "" }, { "docid": "208ec5f26b5b01f74eebdc22223f381d", "score": "0.49153084", "text": "def vnetid(self) -> str:\n return pulumi.get(self, \"vnetid\")", "title": "" }, { "docid": "3b9222ccffd9ae7fb6f3d876320cb38a", "score": "0.49005568", "text": "def _get_lsp(self):\n return self.__lsp", "title": "" }, { "docid": "3b9222ccffd9ae7fb6f3d876320cb38a", "score": "0.49005568", "text": "def _get_lsp(self):\n return self.__lsp", "title": "" }, { "docid": "e663573d5ff47ca62de1c0c2533381f5", "score": "0.48792836", "text": "def getNDVI(self):\n return self.spectral._ndvi(\n nir=self.getBand(\"nir\"),\n red=self.getBand(\"red\")\n )", "title": "" }, { "docid": "83f1947befa0d2ce33ed554b5da23e42", "score": "0.48479906", "text": "def getVlan(self, vlanid, namespace=None):\n if namespace == None:\n namespace = self.getNamespace()\n identifier = VlanIdentifier(vlanid)\n vlan = pynt.xmlns.GetRDFObject(identifier=identifier, namespace=namespace, klass=Vlan, \n initfunction=self.setNewVlanProperties, vlanid=vlanid)\n return vlan", "title": "" }, { "docid": "6b2801a161b8afcdab9675d37d81860e", "score": "0.48442903", "text": "def _get_ldp_peer_lblspid(self):\n return self.__ldp_peer_lblspid", "title": "" }, { "docid": "073e2d53f2d4d3c7ee09778afd1ce892", "score": "0.48399863", "text": "def basedn(self) :\n\t\ttry :\n\t\t\treturn self._basedn\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "ec308aa940bdfd922563ced63d238c60", "score": "0.4827459", "text": "def getDN(self):\n return self.dn", "title": "" }, { "docid": "8d44da5cc513275c5ca9ecb1ddd76ef9", "score": "0.48246044", "text": "def vlan(self) -> Optional[int]:\n return pulumi.get(self, \"vlan\")", "title": "" }, { "docid": "36d8002751d27d46bdd8722a940550dd", "score": "0.48087344", "text": "def get_lview():\n clients = parallel.Client()\n clients.direct_view().use_dill() # Testing, stackoverflow.com/a/24316222\n lview = clients.load_balanced_view()\n lview.block = False\n return lview", "title": "" }, { "docid": "be3408d58909dc3e24d0766d37413cc5", "score": "0.47956523", "text": "def get_vendortype(self, index):\n return self.data().get_vendor()", "title": "" }, { "docid": "72b11602414689867dcfc0557d53d066", "score": "0.47932208", "text": "def name(self) -> Optional[str]:\n return self._vaillant_name", "title": "" }, { "docid": "4eafe12637a58a426e63cf1bb4da83e4", "score": "0.47763222", "text": "def get_view ( self, object ):\r\n return object.tno_get_view( self )", "title": "" }, { "docid": "8fef5ecf72274d14af706a305576ab96", "score": "0.47684586", "text": "def vnic_type(self) -> str:\n return pulumi.get(self, \"vnic_type\")", "title": "" }, { "docid": "89d7fe2b640816b82b664aca8f66cc69", "score": "0.4765389", "text": "def name(self):\n return self._nfvi_host.name", "title": "" }, { "docid": "f9afbd283bbef19c0dafd3e043558c10", "score": "0.47543162", "text": "def get_node(nid):\n with nipyapi.utils.rest_exceptions():\n return nipyapi.nifi.ControllerApi().get_node(nid)", "title": "" }, { "docid": "c2db03f4ce09496e100272c855092087", "score": "0.4739983", "text": "def get_node(nid):\n return swagger_client.ControllerApi.get_node(nid)", "title": "" }, { "docid": "a888348f6534422e4df8be01639dbe3d", "score": "0.47362578", "text": "def _get_ldp_gr(self):\n return self.__ldp_gr", "title": "" }, { "docid": "97052cb316f43d929358ffced11d3c92", "score": "0.4727837", "text": "def vnic_type(self) -> Optional[str]:\n return pulumi.get(self, \"vnic_type\")", "title": "" }, { "docid": "1047760cdfa42cef0898c56fcc04d164", "score": "0.47187257", "text": "def lunar_module(self):\n return self._lunar_module", "title": "" }, { "docid": "2ba0cbf8b81eaaf2145d6f4115219b41", "score": "0.47133198", "text": "def get_is(self, nh):\n return _RMF.DiffuserFactory_get_is(self, nh)", "title": "" }, { "docid": "1ea2776e81940e58b221cf58d79497d8", "score": "0.4711832", "text": "def _get_lldp(self):\n return self.__lldp", "title": "" }, { "docid": "0491286d0aba653457421c0fd52835d5", "score": "0.47114003", "text": "def lun(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"lun\")", "title": "" }, { "docid": "b9d494162e911aa85548e9e245e2f3d8", "score": "0.47060302", "text": "def ligand(self):\n return self._ligand", "title": "" }, { "docid": "2ca7feca1cf3966aef37ebaba67439b8", "score": "0.46930537", "text": "def getVnfrId(self, nsr):\n vnfrId_list = nsr[\"constituent-vnfr-ref\"]\n return vnfrId_list", "title": "" }, { "docid": "a5a4e935a9f91c9b7ac282cf6913f971", "score": "0.4691018", "text": "def getWlan(self, nr: int = 3):\n if(nr == 3):\n return self.getWlan3()\n elif(nr == 1):\n return self.getWlan1()", "title": "" }, { "docid": "0ae9e9499efcd54c1f12d08637b02187", "score": "0.4679739", "text": "def _find_lun(self, volume):\n LOG.debug('_find_lun, volume id: %s.', volume['id'])\n volumeinstance = None\n volumename = self._create_volume_name(volume['id'])\n\n try:\n location = ast.literal_eval(volume['provider_location'])\n classname = location['classname']\n bindings = location['keybindings']\n\n if classname and bindings:\n LOG.debug('_find_lun, '\n 'classname: %(classname)s, '\n 'bindings: %(bindings)s.',\n {'classname': classname,\n 'bindings': bindings})\n volume_instance_name = (\n self._create_eternus_instance_name(classname, bindings))\n\n LOG.debug('_find_lun, '\n 'volume_insatnce_name: %(volume_instance_name)s.',\n {'volume_instance_name': volume_instance_name})\n\n vol_instance = (\n self._get_eternus_instance(volume_instance_name))\n\n if vol_instance['ElementName'] == volumename:\n volumeinstance = vol_instance\n except Exception:\n volumeinstance = None\n LOG.debug('_find_lun, '\n 'Cannot get volume instance from provider location, '\n 'Search all volume using EnumerateInstanceNames.')\n\n if volumeinstance is None:\n # for old version\n\n LOG.debug('_find_lun, '\n 'volumename: %(volumename)s.',\n {'volumename': volumename})\n\n # get volume instance from volumename on ETERNUS\n try:\n namelist = self._enum_eternus_instance_names(\n 'FUJITSU_StorageVolume')\n except Exception:\n msg = (_('_find_lun, '\n 'volumename: %(volumename)s, '\n 'EnumerateInstanceNames, '\n 'cannot connect to ETERNUS.')\n % {'volumename': volumename})\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n for name in namelist:\n try:\n vol_instance = self._get_eternus_instance(name)\n\n if vol_instance['ElementName'] == volumename:\n volumeinstance = vol_instance\n path = volumeinstance.path\n\n LOG.debug('_find_lun, '\n 'volumename: %(volumename)s, '\n 'vol_instance: %(vol_instance)s.',\n {'volumename': volumename,\n 'vol_instance': path})\n break\n except Exception:\n continue\n else:\n LOG.debug('_find_lun, '\n 'volumename: %(volumename)s, '\n 'volume not found on ETERNUS.',\n {'volumename': volumename})\n\n LOG.debug('_find_lun, ret: %s.', volumeinstance)\n return volumeinstance", "title": "" }, { "docid": "e4c9fe3e6a66dd192fc0bbfe9125f31d", "score": "0.4679221", "text": "def get_nt(self, index: int) -> NameTerm:\n if index in self._index_to_nt:\n return self._index_to_nt.get(index) # type: ignore\n else:\n # User error. User should not ask for an out-of-bounds index.\n raise IndexError(f\"Index {index} does not exist in index.\")", "title": "" }, { "docid": "0695ef5e000367bb04601022f23b3c31", "score": "0.4673395", "text": "def name(self):\n return self.nrf_device.name", "title": "" }, { "docid": "dec02732132dd76439bce8127cc9c617", "score": "0.4672441", "text": "def get(self, id):\n adm = AppAdministration()\n lernfach = adm.get_lernfach_by_id(id)\n return lernfach", "title": "" }, { "docid": "bca2812cdbed9193716c40c9df08d7b6", "score": "0.4666267", "text": "def nh(self):\n return self.__nh", "title": "" }, { "docid": "478adb4b73f32d5b13fd7487c99adcb3", "score": "0.4664172", "text": "def get_viewer(self):\n if self._napari_viewer is None:\n return self._nd_viewer\n else:\n return self._napari_viewer", "title": "" }, { "docid": "15017e562fb53663211bc7f4a22bba07", "score": "0.46600324", "text": "def show_vlan(self, vlan=None):\n\n if vlan is not None and vlan in self.get_vlans_list():\n print self.vlans[vlan]\n else:\n for v in self.vlans:\n print self.vlans[v]", "title": "" }, { "docid": "15017e562fb53663211bc7f4a22bba07", "score": "0.46600324", "text": "def show_vlan(self, vlan=None):\n\n if vlan is not None and vlan in self.get_vlans_list():\n print self.vlans[vlan]\n else:\n for v in self.vlans:\n print self.vlans[v]", "title": "" }, { "docid": "3b4e7db518180cf0ed0d301abfab68ab", "score": "0.46592137", "text": "def get(self, profilid):\n adm = AppAdministration()\n lerngruppe = adm.get_lerngruppe_by_profil_id(profilid)\n return lerngruppe", "title": "" }, { "docid": "ea91744a814890130d0f005529528f06", "score": "0.46462357", "text": "def get_userVent(self):\n return self.read_register(1003, numberOfDecimals=0, signed=False, functioncode=3)", "title": "" }, { "docid": "a73b7a64f228245edb169cb2824a5ac2", "score": "0.46444726", "text": "def tenant_vrf(self, **kwargs):\r\n\r\n vrf_dict = collections.defaultdict(list)\r\n uri = \"https://{}/api/node/mo/uni/tn-{}.json?query-target=children&target-subtree-class=fvCtx\"\\\r\n .format(self.apic, kwargs[\"tenant\"])\r\n request = self.session.get(uri, verify=False)\r\n response = json.loads(request.text)\r\n\r\n try:\r\n index = 0\r\n for i in range(0, 100):\r\n vrf_dict[\"vrf\"].append(response[\"imdata\"][index][\"fvCtx\"][\"attributes\"][\"name\"])\r\n index = index + 1\r\n except IndexError:\r\n pass\r\n\r\n return vrf_dict", "title": "" }, { "docid": "704fdc9968df4ece009ec7c96833a163", "score": "0.46421972", "text": "def vld_id(self):\n return self._vlr_msg.vld_ref", "title": "" }, { "docid": "24119214c1f6b8c115773108588c5a9f", "score": "0.46403337", "text": "def _get_vector_line(self):\n line_type = vs.FPenPatN()\n return ObjectRepository().get(vs.Index2Name(line_type * -1)) if line_type < 0 else None", "title": "" }, { "docid": "05ce9626c5be21d10dbf16a312c908a5", "score": "0.4637136", "text": "def lookup_nlcd(land_use):\n if land_use not in LAND_USE_VALUES:\n raise KeyError('Unknown land use type: %s' % land_use)\n elif 'nlcd' not in LAND_USE_VALUES[land_use]:\n raise KeyError('Land use type %s does not have an NLCD class defined',\n land_use)\n else:\n return LAND_USE_VALUES[land_use]['nlcd']", "title": "" }, { "docid": "08d93ceed4b52291fb0e4e699b70be9c", "score": "0.46303764", "text": "def oppgi_navn(self):\n return self.name", "title": "" }, { "docid": "072fef8c701a9e97ec621f5a002c4e17", "score": "0.46214223", "text": "def GetLabel(self,n):\n return self.SerialQuery('GRAT%dLABEL?' % (n))", "title": "" }, { "docid": "9531bf946a4f29ce34464755b724653d", "score": "0.46186438", "text": "def get(self, flavor):\n return self._get(\"/flavors/%s\" % base.getid(flavor), \"flavor\")", "title": "" }, { "docid": "41dc825e79e31a7c9eed4d8cb3aebd8d", "score": "0.46132043", "text": "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "41dc825e79e31a7c9eed4d8cb3aebd8d", "score": "0.46132043", "text": "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "41dc825e79e31a7c9eed4d8cb3aebd8d", "score": "0.46132043", "text": "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "41dc825e79e31a7c9eed4d8cb3aebd8d", "score": "0.46132043", "text": "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "41dc825e79e31a7c9eed4d8cb3aebd8d", "score": "0.46132043", "text": "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "e4ab101aa9913bd2fa93a066b8824ac0", "score": "0.46070695", "text": "def _get_ni(self, env, node_id):\n instances = env.storage.get_node_instances(node_id=node_id)\n assert len(instances) == 1\n return instances[0]", "title": "" }, { "docid": "e1e53cedc84a6336a5d52913618e0e81", "score": "0.46023428", "text": "def laagtype(self):\n return self._laagtype.get_waarde()", "title": "" }, { "docid": "c26807d118da974d082040cba3765d6c", "score": "0.4601407", "text": "def vm(self) -> str:\n return pulumi.get(self, \"vm\")", "title": "" }, { "docid": "2870a7838e6078777f37324310837527", "score": "0.4600032", "text": "def _get_physnet_tunneled_info(self, context, neutron, net_id):\n if self.has_multi_provider_extension(client=neutron):\n network = neutron.show_network(net_id,\n fields='segments').get('network')\n segments = network.get('segments', {})\n for net in segments:\n # NOTE(vladikr): In general, \"multi-segments\" network is a\n # combination of L2 segments. The current implementation\n # contains a vxlan and vlan(s) segments, where only a vlan\n # network will have a physical_network specified, but may\n # change in the future. The purpose of this method\n # is to find a first segment that provides a physical network.\n # TODO(vladikr): Additional work will be required to handle the\n # case of multiple vlan segments associated with different\n # physical networks.\n physnet_name = net.get('provider:physical_network')\n if physnet_name:\n return physnet_name, False\n\n # Raising here as at least one segment should\n # have a physical network provided.\n if segments:\n msg = (_(\"None of the segments of network %s provides a \"\n \"physical_network\") % net_id)\n raise exception.NovaException(message=msg)\n\n net = neutron.show_network(\n net_id, fields=['provider:physical_network',\n 'provider:network_type']).get('network')\n return (net.get('provider:physical_network'),\n net.get('provider:network_type') in constants.L3_NETWORK_TYPES)", "title": "" }, { "docid": "b2aa0439e93451031485512504d8ac21", "score": "0.45960337", "text": "def _get_ldp_advertise_label(self):\n return self.__ldp_advertise_label", "title": "" }, { "docid": "6fa217e4b8a53c5f97cc3b0d6589bb62", "score": "0.45926803", "text": "def linguist_identifier(self):\n return self._linguist.identifier", "title": "" }, { "docid": "9ed4cd060ca758f0971f0c7aadc4008d", "score": "0.45918316", "text": "def access_tier(self) -> str:\n return pulumi.get(self, \"access_tier\")", "title": "" }, { "docid": "e119feb08ebca1a30798e1fb20684a08", "score": "0.4588449", "text": "def get_rfport_voltage(self):\n resp_raw = self._get('-v')\n voltageout = resp_raw.split('-v')[-1].strip(' ')\n self._logger.debug('Got RF port voltage: \"%s\".', voltageout)\n\n return voltageout", "title": "" }, { "docid": "fbb124918785826f07ab9683ee605b0c", "score": "0.4584737", "text": "def view_tenants(self):\r\n\r\n uri = \"https://{}/api/class/fvTenant.json\".format(self.apic)\r\n\r\n request = self.session.get(uri, verify=False)\r\n response_dict = request.json()\r\n total_count = int(response_dict[\"totalCount\"])\r\n\r\n try:\r\n index = 0\r\n for i in range(0, total_count):\r\n self.tenant_array.append(response_dict[\"imdata\"][index][\"fvTenant\"][\"attributes\"][\"name\"])\r\n index = index + 1\r\n except IndexError:\r\n pass\r\n\r\n return self.tenant_array", "title": "" }, { "docid": "16d792bd72671cf8a8a223b6c7176b25", "score": "0.4568982", "text": "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "title": "" }, { "docid": "1ac0c686cdf6f8cba48afb48c17c0f72", "score": "0.4567283", "text": "def vnf_profile_id(self) -> str:\n return self._vnf_profile_id", "title": "" }, { "docid": "df983d65c8eb0fe15cca70c6af488ae0", "score": "0.45633018", "text": "def getTF_Name(ensID, db):\n cmdStr = \"SELECT HGNC_Name from TranscriptionFactors where EnsembleID = \\\"\" + ensID + \"\\\";\"\n cursor=db.cursor()\n cursor.execute(cmdStr)\n rows = cursor.fetchall()\n if len(rows) > 0:\n return rows[0][0]\n else:\n return None", "title": "" }, { "docid": "4205ed15b03c35f4764b838dc3f041bb", "score": "0.45575336", "text": "def uuid(self):\n return self._nfvi_host.uuid", "title": "" }, { "docid": "4b9540098013d90440aef463a0591f5c", "score": "0.45525423", "text": "def lms_instance(self) -> Optional[str]:\n return pulumi.get(self, \"lms_instance\")", "title": "" }, { "docid": "ee2b0a4febba384123fd2accd8c17979", "score": "0.45470187", "text": "def pvlan_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"pvlan_type\")", "title": "" }, { "docid": "4c251c21be245b3095cad497edb2351b", "score": "0.45444003", "text": "def _get_rsf_partner():\n partner = None\n hosts = []\n hostname = get_hostname()\n\n try:\n output = execute(\"%s status\" % _rsfcli)\n except Exception, e:\n logger.error(\"Failed to determine appliance RSF partner\")\n logger.debug(str(e), exc_info=True)\n raise RuntimeError(\"Failed to determine appliance RSF partner\")\n\n # Parse the output for the partner hostname\n for l in output.splitlines():\n if l.startswith(\"Host\"):\n hosts.append(l.split()[1].strip())\n\n # Determine which clustered host is the partner\n for h in hosts:\n if h != hostname:\n partner = h\n break\n\n if partner is None:\n logger.error(\"Failed to determine appliance RSF partner\")\n logger.debug(output)\n raise RuntimeError(\"Failed to determine appliance RSF partner\")\n\n logger.debug(\"RSF partner is %s\" % partner)\n\n return partner", "title": "" }, { "docid": "3ff2e8e757565dff6e596dcf70c29437", "score": "0.4542424", "text": "def nalt(self):\n return self._nalt", "title": "" }, { "docid": "505432ed37b99dfa8a7d35601be3c769", "score": "0.45420027", "text": "def get_lsi_snmp_index(self, **kwarg):\n rhandle = kwarg.get(\"rhandle\", None)\n ls_name = kwarg.get(\"logical-system\", None)\n label = kwarg.get(\"label\", None)\n vrf_name = kwarg.get(\"vrf_name\", None)\n if ls_name is None:\n rt_output = rhandle.cli(\n command=\"show route label %s extensive active-path\"%label).response()\n else:\n rt_output = rhandle.cli(\n command=\"show route label %s extensive active-path \"\\\n \"logical-system %s\"%(label, ls_name)).response()\n lsi_if = re.search(r'Next hop: via lsi.(\\d+) [(]%s[)], selected'%vrf_name, rt_output)\n if lsi_if:\n lsi_intf = 'lsi.' + lsi_if.group(1)\n lsi_snmp_index = self.get_snmp_index_value(\n dhandle=rhandle, interface=lsi_intf)\n else:\n lsi_snmp_index = None\n return lsi_snmp_index", "title": "" }, { "docid": "9b61dad0cfc8fec916356aa238b02bd7", "score": "0.45320916", "text": "def vlan_id(self):\n return self._vlan_id", "title": "" }, { "docid": "297087e9e9c3531f81b33995e41e0367", "score": "0.45268595", "text": "def vdisk_by_uid(self, vdisk_uid):\n\n vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)\n\n if len(vdisks) == 0:\n return None\n\n if len(vdisks) != 1:\n msg = (_('Expected single vdisk returned from lsvdisk when '\n 'filtering on vdisk_UID. %(count)s were returned.') %\n {'count': len(vdisks)})\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n vdisk = vdisks.result[0]\n\n return self.ssh.lsvdisk(vdisk['name'])", "title": "" } ]
28ac0f95cb5d7de56dd58690cd1c22c2
Generates all pythagorean triples with sum (a+b+c) n, mn odd and m,n coprime, and returns the pythagorean triples (a,b,c)
[ { "docid": "a822654a02fcadae9ba4ccd9714616f7", "score": "0.7234538", "text": "def pythagorean_triples(L, give_sum=False):\n M = int((L//2)**0.5)+1\n for m, n in coprimes(M):\n # Skip m,n with (m-n) even\n if (m-n) % 2 == 0: continue\n for k in itertools.count(1):\n triple = k*(m*m-n*n), 2*k*m*n, k*(m*m+n*n)\n perim = sum(triple)\n if perim <= L:\n if give_sum: yield perim, tuple(sorted(triple))\n else: yield tuple(sorted(triple))\n else: break", "title": "" } ]
[ { "docid": "270dba8f27498fcb432a46b9affb1de2", "score": "0.7959152", "text": "def pythagorean_triplet(n):\n\n max_value = n // 3 + 1 # set maximum for a, not exceed 1/3 of n, to prevent b < a or c < a\n for a in range(1, max_value):\n rest = n - a\n for b in range((rest-a)//2 + 1, rest//2 + 1): # set the condition tighter to improve algorithm\n c = n - a - b\n if a < b and a + b > c and a**2 + b**2 == c**2:\n print(a, \"*\", b, \"*\", c)\n return a * b * c\n else:\n return None # In case can't find any.", "title": "" }, { "docid": "6626cb5bacd54c6a8fd58c9f871691cc", "score": "0.7457681", "text": "def pythagorean_triplet(n: int) -> tuple:\r\n # the lowest number in the triplet can't be higher than n/3\r\n a_high = int(n/3)\r\n\r\n # start with a_high, go down, check possible triplets\r\n # that sum to n to see if they're Pythagorean triplets\r\n for a in range(a_high, 0, -1):\r\n # b has to be higher than a, but lower than c\r\n b_high = int((n-a)/2)\r\n for b in range(a+1, b_high + 1):\r\n c = n - a - b\r\n if a**2 + b**2 == c**2:\r\n return a, b, c\r\n\r\n return 0, 0, 0", "title": "" }, { "docid": "89c7fe16228a245b38183bb8bd10bb45", "score": "0.739192", "text": "def PythagoreanTriples(N):\n\n description = \"Pythagorean triples problem on 1...{}\".format(N)\n ptn = CNF(description=description)\n\n def V(i):\n return \"x_{{{}}}\".format(i)\n\n # Variables represent the coloring of the number\n for i in range(1, N + 1):\n ptn.add_variable(V(i))\n\n for x, y in combinations(range(1, N + 1), 2):\n z = int(sqrt(x**2 + y**2))\n if z <= N and z**2 == x**2 + y**2:\n ptn.add_clause([(True, V(x)), (True, V(y)), (True, V(z))],\n strict=True)\n ptn.add_clause([(False, V(x)), (False, V(y)), (False, V(z))],\n strict=True)\n\n return ptn", "title": "" }, { "docid": "a5437e3d4697682dfff7f1023e8d35f2", "score": "0.73530793", "text": "def problem_nine():\n target = 1000\n combinations = [(a,b,target-b-a) for b in xrange(1, target/2) for a in xrange(b,target/2)]\n triples = filter(is_pythagorean, combinations)\n a,b,c = first(filter(lambda t: sum(t) == target, triples))\n return a*b*c", "title": "" }, { "docid": "6d95f5cc2bbab8dac5422fa956aaf818", "score": "0.7251694", "text": "def special_pythagorean_triplet(number):\n triplet = []\n flag = True\n\n for index_i in range(1, number):\n for index_ii in range(1, index_i):\n for index_iii in range(1,index_ii):\n if (index_iii*index_ii)+(index_i*number) == (number**2)/2 and index_i + index_ii + index_iii == number:\n triplet.append(index_i)\n triplet.append(index_ii)\n triplet.append(index_iii)\n \n\n return triplet", "title": "" }, { "docid": "59b72554f5a7c1220a504e16ddd588e7", "score": "0.7155664", "text": "def case_gen():\n\t# See http://en.wikipedia.org/wiki/Pythagorean_triplets#Generating_a_triple\n\tfor m in count(2):\n\t\t# required conditions for uniquness m > n, m - n is odd, and m & n are coprime (gcd == 1)\n\t\tfor n in range(1 if m % 2 == 0 else 2, m, 2):\n\t\t\tif gcd(m,n) == 1:\n\t\t\t\ta = m**2 - n**2\n\t\t\t\tb = 2 * m * n\n\t\t\t\tc = m**2 + n**2\n\t\t\t\tif c % abs(b - a) == 0:\n\t\t\t\t\tyield (a,b,c)", "title": "" }, { "docid": "3bb45fa4abffcb353d4bffe73d9dfce4", "score": "0.63608825", "text": "def pythagorean(perimeter):\n limit = int(sqrt(perimeter/2))+1\n result = defaultdict(list)\n for m in range(2, limit):\n for n in range(1,m):\n if gcd(m,n) == 1 and (m-n)%2 == 1:\n a = m**2-n**2\n b = 2*m*n\n c = m**2+n**2\n p = a+b+c\n s = p\n while p <= perimeter:\n result[p].append((p*a/s,p*b/s,p*c/s))\n p += a+b+c\n return result", "title": "" }, { "docid": "58d962ba8830350e57d5409d40afc62a", "score": "0.6356062", "text": "def main_logic(*args, **kwargs) -> list:\n num: int = args[0]\n res = []\n for m_var in range(2, ceil(sqrt(num))):\n for n_var in range(1, m_var):\n # m and n are coprime and not both odd\n if gcd(m_var, n_var) == 1 and (m_var - n_var) % 2 and (m_var ** 2 + n_var ** 2) < num:\n a_var = m_var ** 2 - n_var ** 2\n b_var = 2 * m_var * n_var\n c_var = m_var ** 2 + n_var ** 2\n if a_var > b_var:\n a_var, b_var = b_var, a_var\n k_var = 1\n while k_var * c_var < num:\n res.append(\n [k_var * a_var, k_var * b_var, k_var * c_var])\n k_var += 1\n return res", "title": "" }, { "docid": "806161448d600352e6c49a3b7fff6d0d", "score": "0.6235959", "text": "def generate_triples(max_side):\n for a in range(1, max_side):\n for b in range(a, max_side):\n # If a^2 + b^2 is a perfect square, then this triple is valid\n c_double = math.sqrt(a*a + b*b)\n c = int(c_double)\n if c == c_double:\n yield [a, b, c]", "title": "" }, { "docid": "2ad4cf6fa18b338af0f73692393b6a7a", "score": "0.61713284", "text": "def is_pythagorean_triple(a, b, c):\n triple = None\n if a**2 + b**2 == c**2:\n triple = True\n else:\n triple = False\n ### EXERCISE 1 -- Replace pass with your code\n return triple", "title": "" }, { "docid": "e38d64828a7c6dd05365363344204b8b", "score": "0.6164699", "text": "def triplets(n):\n return [(a, b, a+b) for a in range(1, n)for b in range(a, n-1) if a+b < n]", "title": "" }, { "docid": "95fa6a4b1ecc7cb60c8dc3792da66119", "score": "0.6155254", "text": "def genPrimitivePythagorianTriplesWithPerimeter(p):\n ret = []\n \n for m in range(1, int(math.sqrt((p // 2 + 1))) + 1):\n for n in range(1, min(int(p // (2 * m)) + 1 - m, m)):\n if ((m - n) % 2 == 1):\n if (gcd(m, n) == 1):\n a , b = min(m * m - n * n, 2 * m * n) , max(m * m - n * n, 2 * m * n)\n c = m * m + n * n\n\n if ((a + b + c) <= p):\n ret += [(a, b, c)]\n return ret", "title": "" }, { "docid": "38a0e429ded446d83995e10322f807a3", "score": "0.60694003", "text": "def test_sum_3_combinations():\n assert solve([1721, 979, 366, 299, 675, 1456], 3) == 241861950", "title": "" }, { "docid": "e26d197250c2e60a200d715c83acfaf4", "score": "0.60614014", "text": "def problem009a():\n \n for c in range(3, 1000):\n for b in range(2, c):\n a = math.sqrt(c**2 - b**2)\n if a > b:\n continue\n if a + b + c != 1000:\n continue\n print a,b,c\n print a*b*c\n return", "title": "" }, { "docid": "1c1c802b5410d72f6be86710075953b9", "score": "0.6046664", "text": "def primitive_triple_perimeters(limit):\n\n triples = set()\n\n for m in range(3, int(limit ** 0.5) + 1, 2):\n for n in range(m - 2, 0, -2):\n if gcd(m, n) == 1:\n\n a = (m * m - n * n) // 2\n b = m * n\n c = (m * m + n * n) // 2\n\n wire = a + b + c\n\n if wire <= limit:\n triples.add(wire)\n\n return triples", "title": "" }, { "docid": "5bd5041ff1f406d901bf8107b2540606", "score": "0.60312676", "text": "def problem0009():\n candidates = ((a, b, 1000 - a - b)\n for a in xrange(1, 999)\n for b in xrange(a + 1, 999))\n return first(a * b * c for (a, b, c) in candidates\n if a * a + b * b == c * c)", "title": "" }, { "docid": "b5c0686a461af0ca72a4b4f4a464d8c8", "score": "0.6026654", "text": "def main():\n\tTARGET_SUM = 1000\n\t# a < b\n\tfor a in range(1, TARGET_SUM-2):\n\t\tfor b in range(a, TARGET_SUM-1):\n\t\t\t# c^2 = a^2 + b^2\n\t\t\tc_sq = a**2 + b**2\n\t\t\tif is_square(c_sq):\n\t\t\t\tc = int(math.sqrt(c_sq))\n\t\t\t\tif a + b + c == 1000:\n\t\t\t\t\treturn a*b*c\n\treturn -1", "title": "" }, { "docid": "5278bd0dfcbb141bd0c846311f0258df", "score": "0.60257745", "text": "def a002113(n):\n yield 0\n for i in range(1, n + 1):\n for j in range(i, n + 1):\n ij = i * j\n if _is_palindrome(ij):\n yield ij", "title": "" }, { "docid": "73da651a78967e2a54c54f93319c4271", "score": "0.6017696", "text": "def coprimes(N):\n def _coprime_gen(n, a=1, b=1):\n # the actual generating function. We don't use directly because\n # the first tuple is (1,1) which voilate b < a.\n yield (a, b)\n k = 1\n while a*k + b <= n:\n for coprimes in _coprime_gen(n, a*k+b, a):\n yield coprimes\n k += 1\n # Skip the first item which is always (1,1)\n cg = _coprime_gen(N)\n next(cg)\n for pair in cg:\n yield pair", "title": "" }, { "docid": "e9b8fae2787f6310d2ad0fbb1442cb9f", "score": "0.6010343", "text": "def problem0032():\n def get_permutation(ndigits):\n return ((num_from_digits(ds), list(ds))\n for ds in permutations(range(1, 10), ndigits))\n def get_multiplicands(ndigits1, ndigits2):\n return cartesian_product(get_permutation(ndigits1),\n get_permutation(ndigits2))\n candidates = chain(get_multiplicands(1, 4), get_multiplicands(2, 3))\n return sum(iunique(a*b for ((a, adigits), (b, bdigits)) in candidates\n if a*b < 1e4\n and is_pandigital(adigits + bdigits + digits_from_num(a*b))))", "title": "" }, { "docid": "752bdbdc952a4a22a50d50f310c152cf", "score": "0.59748834", "text": "def get_palindromes_created_from_three_digit_nums():\n for i in range(100, 1000):\n for j in range(100, 1000):\n g = i * j\n if is_palindrome(g):\n yield g", "title": "" }, { "docid": "fa2122a06de302555ddedac880b01826", "score": "0.5873229", "text": "def crt(a: List[int], m: List[int]) -> int:\n M = 1\n for mi in m:\n M *= mi\n\n x = 0\n for i in range(len(a)):\n a_i = a[i]\n m_i = m[i]\n\n M_i = M // m_i\n y_i = pow(M_i, -1, m_i)\n\n x = (x + a_i * M_i * y_i) % M\n return x", "title": "" }, { "docid": "6e9f9c4bc1288b9318dbfc9a726cbd39", "score": "0.5867013", "text": "def two_of_three(a, b, c):\n # return a * a + b * b + c * c - pow(min(a, b, c), 2)\n # return max(a * a + b * b, b * b + c * c, a * a + c * c)\n return pow(sorted((a, b, c))[1], 2) + pow(sorted((a, b, c))[2], 2)", "title": "" }, { "docid": "a8652d04bbe3df5d81f31cb3ae2ff2fd", "score": "0.58470976", "text": "def brute(lim):\n\t# Pythgorean triplets formula from http://en.wikipedia.org/wiki/Pythagorean_triples#Generating_a_triple\n\t# Limit for m is based on the equation 2 * m**2 + 2*m - 1 < lim\n\tL = int(((2 * lim + 3)**0.5 - 1) // 2 + 1)\n\t# Set checks triangle uniquness\n\tl = set()\n\tfor m in range(2, int((lim/2 + 1)**0.5 - 1)):\n\t\tfor n in range(1 if m % 2 == 0 else 2, m, 2):\n\t\t\tif gcd(m,n) == 1:\n\t\t\t\ta = m**2 - n**2\n\t\t\t\tb = 2 * m * n\n\t\t\t\tc = m**2 + n**2\n\t\t\t\tif c % abs(b - a) == 0:\n\t\t\t\t\t# multiples of the base case\n\t\t\t\t\tfor k in range(1, lim/sum((a,b,c)) + 1):\n\t\t\t\t\t\tl.add(tuple(sorted((k * a, k * b, k * c))))\n\treturn len(l)", "title": "" }, { "docid": "2103148a8fef5a2b9f6a73b06235be13", "score": "0.58390677", "text": "def problem9():\n first_a = 0\n # the largest value of c that satisfies a < b < c is 999\n for c in xrange(2, 999):\n largest_b = c - 1\n for b in xrange(1, largest_b+1):\n largest_c = b - 1\n for a in xrange(0, largest_c+1):\n if a + b + c == 1000:\n if (a**2)+(b**2) == (c**2):\n return a*b*c\n return -1", "title": "" }, { "docid": "862fb25222e3a97ba51bb9ce9acae799", "score": "0.58278406", "text": "def test_sum_2_combinations():\n assert solve([1721, 979, 366, 299, 675, 1456], 2) == 514579", "title": "" }, { "docid": "8ebbb859734c8e3d382c29d37eac9348", "score": "0.5821808", "text": "def McNuggets(n):\n for a in range(n):\n for b in range(n):\n for c in range(n):\n if 6 * a + 9 * b + 20 * c == n:\n return True\n return False", "title": "" }, { "docid": "12e5e44886f8c23b83f8bb1943d1ecde", "score": "0.5814165", "text": "def pytriple_gen(max_c: int) -> Iterator[Tuple[int, int, int]]:\n for real_pts in range(2, int(sqrt(max_c)) + 1, 1):\n for imag_pts in range(real_pts % 2 + 1, real_pts, 2):\n comp = complex(real_pts, imag_pts)\n sqrd = comp * comp\n real = int(sqrd.real)\n imag = int(sqrd.imag)\n if abs(real - imag) % 2 == 1 and gcd_it(imag, real) == 1:\n sea = int((comp * comp.conjugate()).real)\n if sea > max_c:\n break\n else:\n yield (imag, real, sea) if real > imag else (real, imag, sea)", "title": "" }, { "docid": "4088a8efac72d196a1ddae51cb906869", "score": "0.5812202", "text": "def pythagorean(a, b):\n c = a * a + b * b\n return math.sqrt(c)", "title": "" }, { "docid": "0759f4ec42aa212b9c5c634ed777a516", "score": "0.5808998", "text": "def calc_triples(self, side = 1):\n triple = []\n self.set_corner_side(side)\n if side % 2 == 0:\n squares = self.get_even_squares()\n else:\n squares = self.get_odd_squares()\n #print \"calc_triples corner {side}, squares {squares}\".format(side=side, squares=squares)\n stop = False;\n for sq in squares:\n [a, b, c] = self.calc_a_b_c(sq)\n\n if a != b:\n t = [a, b, c]\n alts = self.find_alternatives(t)\n t.sort()\n #t.append(alts)\n if not stop:\n t_str = ', '.join(map(lambda x: str(x), t))\n t_str = t_str + '; diff ' + str(b-a)\n triple.append(t_str)\n\n stop = b - a < 0\n\n return triple", "title": "" }, { "docid": "4c1e50e077df37af22c5f2666b6a3f03", "score": "0.5779319", "text": "def ck(j,l,m,a,b):\n coefficient = 0.0\n \n for k in range(0,l+1):\n for i in range(0,m+1):\n if i + k == j:\n coefficient += special.binom(l,k) * special.binom(m,i) * a**(l-k) * b**(m-i)\n\n return coefficient", "title": "" }, { "docid": "aa0eabfc23dd4140b0ef9a8e86de7587", "score": "0.577721", "text": "def pythagGiveC(a, b):\n return root(add(square(a), square(b)))", "title": "" }, { "docid": "c64970bd718dee383307258df2327f29", "score": "0.5752363", "text": "def combination(n):\n\n return np.array([[int(c) for c in [*('{0:0' + str(n) + 'b}').format(i)]] for i in range(2**n)])", "title": "" }, { "docid": "1fa61049731a763ff580ef046f4375f0", "score": "0.5752217", "text": "def problem_4():\n first, second = 0, 0\n for i in range(999, 99, -1):\n for j in range(999, 99, -1):\n if is_palindrome(str(i * j)) and i * j > first * second:\n first, second = i, j\n return first * second", "title": "" }, { "docid": "f54b784bb9a5ee4500731043c10d0745", "score": "0.57488984", "text": "def pentagonal_sequence(n=1):\n while True:\n yield n * (3 * n - 1) // 2\n n += 1", "title": "" }, { "docid": "1d50840a8eb6d8c83620f0f1714cfe12", "score": "0.5747412", "text": "def coPrimes(n):\n choices = defaultdict(list)\n for pair in CoPrimePairs[n]:\n choices[pair[0]].append(pair[1])\n if pair[0] != pair[1]:\n choices[pair[1]].append(pair[0])\n return choices", "title": "" }, { "docid": "b2b0b0662a3beb7f393cb04df65f9c0b", "score": "0.5745931", "text": "def problem0039():\n def get_sides_for_perimeter(perimeter):\n return ((a, b, c) for (a, b, c) in candidates if a + b + c == perimeter)\n # Perimeter for 1st pythagorean triplet is 12, so take 84\n # (1000 / 12 = 83.3) rounds of 16 elements, total (84 * 16) 1344.\n candidates = list(take(1344, pythagorean_triplets()))\n return max(xrange(120, 1000), key=compose(ilen, get_sides_for_perimeter))", "title": "" }, { "docid": "abe15ec96a632fc2721c0677f1cfe096", "score": "0.5743772", "text": "def coeff(n, m):\r\n\tc = 0\r\n\tif n != m:\r\n\t\tc = 2\r\n\telse:\r\n\t\tc = 1\r\n\treturn c", "title": "" }, { "docid": "157f1ff31db205e2b4e924a7866a89df", "score": "0.5740785", "text": "def solution_9():\n a = xrange(1,1001)\n b = xrange(1000,0,-1)\n for i in a:\n for j in b:\n c = 1000 - i - j\n if c < 0:\n continue\n else:\n if i**2 + j**2 == c**2:\n print('solution found')\n return i*j*c", "title": "" }, { "docid": "3eb5359195cd86ae008e833dbfd5160a", "score": "0.5733927", "text": "def triples(num):\n lst = [i ** 2 for i in range(1, num)]\n ans = []\n\n # Now the question essentially is to find 3 numbers ST 2 of them add up to the third one.\n for num in lst:\n target_sum = num\n candidate_lstoflists = twosum(lst, target_sum)\n for i in candidate_lstoflists:\n i += [num]\n ans.append(i)\n\n ans = return_unique_members(ans)\n ans = [tuple([int(e**0.5) for e in lst]) for lst in ans]\n return ans", "title": "" }, { "docid": "7a9994b1c174b9b8ed105e11f1f114f7", "score": "0.56994784", "text": "def gen_theta_combinations(thetas):\n theta_combinations = [] \n for t0 in thetas[0]:\n for t1 in thetas[1]:\n for t2 in thetas[2]:\n for t3 in thetas[3]:\n theta_combinations.append( (t0,t1,t2,t3) )\n \n return theta_combinations", "title": "" }, { "docid": "eb20baca564da67f370512fa55dfb9e7", "score": "0.5689533", "text": "def __box_combination(self, n):\n if not isinstance(n, int) or n <= 0:\n raise ValueError(\"n has to be positive integer.\")\n\n combinations = list()\n\n for i in range(1, n + 1):\n y = 1\n\n while i * y < n:\n y = y + 1\n \n combinations.append((i, y))\n\n return combinations", "title": "" }, { "docid": "707cae034a7ebf6bb4cfd77bad0ad719", "score": "0.5683392", "text": "def calcsolnat(a, b, c):\n sol1 = (-b + math.sqrt(b*b -4*a*c))/2\n sol2 = (-b - math.sqrt(b*b -4*a*c))/2\n\n return sol1, sol2", "title": "" }, { "docid": "9153480561e4cde30de9a3e07f1743ef", "score": "0.56686175", "text": "def get_integer_pairs_which_product_is_no_more_than_n(self, n):\n if not isinstance(n, int) or n <= 0:\n raise ValueError(\"n has to be positive integer.\")\n\n combinations = list()\n\n for i in range(1, n + 1):\n y = 1\n\n while i * y < n:\n y = y + 1\n \n combinations.append((i, y))\n\n return combinations", "title": "" }, { "docid": "6069c071ac496ee09e0fb2f923702245", "score": "0.563501", "text": "def list_of_squares(n):\n\n for i in n:\n range(n)\n result = (i ** 2)\n return result", "title": "" }, { "docid": "ffd0257a6e2f36500976433e5be574fe", "score": "0.5627596", "text": "def subsets_turbo(n,p,s):\n from math import floor\n k_max = floor((p-n)/s)\n \n # summation\n result = 0\n for i in range(0, k_max+1): # k_max inclusive..\n \n C_1 = (-1)**i\n C_2 = ncr(n, i)\n C_3 = ncr(p-s*i-1,n-1)\n C_4 = C_1*C_2*C_3\n result += C_4\n \n return result", "title": "" }, { "docid": "86f5c6cd4a180942eb90ae6a76f0598c", "score": "0.5627506", "text": "def problem0044():\n pairs = ((p1, p2) for (n1, p1) in ((n, pentagonal(n)) for n in count(0))\n for p2 in (pentagonal(n) for n in xrange(1, n1))\n if is_pentagonal(p1 - p2) and is_pentagonal(p1 + p2))\n p1, p2 = first(pairs)\n return p1 - p2", "title": "" }, { "docid": "61ba51c101aaf0bb2ef61f4b00a8c747", "score": "0.562661", "text": "def problem0031():\n def recursive(amount, coins):\n \"\"\"Returns the combinations of coins for the given amount\"\"\"\n if len(coins) == 1:\n return 1 if amount % coins[0] == 0 else 0\n return sum(recursive(amount - coins[0] * n, coins[1:])\n for n in xrange(amount / coins[0] + 1))\n return recursive(200, [200, 100, 50, 20, 10, 5, 2, 1])", "title": "" }, { "docid": "b7232994a88bfdbee219a9a4460a2e42", "score": "0.56211346", "text": "def ten_pairs(n):\n def count(x, m = n):\n if m%10 == m:\n if x == m:\n return 1\n else:\n return 0\n elif m%10 == x:\n return count(x, m//10) + 1\n else:\n return count(x, m//10)\n return count(1)*count(9) + count(2)*count(8) + count(3)*count(7) + count(4)*count(6) + (count(5)*(count(5)-1))//2", "title": "" }, { "docid": "21654b0a8e2170a55d02a979913f4d6a", "score": "0.5614977", "text": "def solve_congruence(*remainder_modulus_pairs, **hint):\n def combine(c1, c2):\n \"\"\"Return the tuple (a, m) which satisfies the requirement\n that n = a + i*m satisfy n = a1 + j*m1 and n = a2 = k*m2.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Method_of_successive_substitution\n \"\"\"\n a1, m1 = c1\n a2, m2 = c2\n a, b, c = m1, a2 - a1, m2\n g = gcd(a, b, c)\n a, b, c = [i//g for i in [a, b, c]]\n if a != 1:\n inv_a, _, g = igcdex(a, c)\n if g != 1:\n return None\n b *= inv_a\n a, m = a1 + m1*b, m1*c\n return a, m\n\n rm = remainder_modulus_pairs\n symmetric = hint.get('symmetric', False)\n\n if hint.get('check', True):\n rm = [(as_int(r), as_int(m)) for r, m in rm]\n\n # ignore redundant pairs but raise an error otherwise; also\n # make sure that a unique set of bases is sent to gf_crt if\n # they are all prime.\n #\n # The routine will work out less-trivial violations and\n # return None, e.g. for the pairs (1,3) and (14,42) there\n # is no answer because 14 mod 42 (having a gcd of 14) implies\n # (14/2) mod (42/2), (14/7) mod (42/7) and (14/14) mod (42/14)\n # which, being 0 mod 3, is inconsistent with 1 mod 3. But to\n # preprocess the input beyond checking of another pair with 42\n # or 3 as the modulus (for this example) is not necessary.\n uniq = {}\n for r, m in rm:\n r %= m\n if m in uniq:\n if r != uniq[m]:\n return None\n continue\n uniq[m] = r\n rm = [(r, m) for m, r in uniq.items()]\n del uniq\n\n # if the moduli are co-prime, the crt will be significantly faster;\n # checking all pairs for being co-prime gets to be slow but a prime\n # test is a good trade-off\n if all(isprime(m) for r, m in rm):\n r, m = list(zip(*rm))\n return crt(m, r, symmetric=symmetric, check=False)\n\n rv = (0, 1)\n for rmi in rm:\n rv = combine(rv, rmi)\n if rv is None:\n break\n n, m = rv\n n = n % m\n else:\n if symmetric:\n return symmetric_residue(n, m), m\n return n, m", "title": "" }, { "docid": "06598f45a9110379084d377504e94d29", "score": "0.5610802", "text": "def g_iter(n):\n \n \n if n <= 3:\n return n\n a, b, c = 3, 2, 1\n amount, k = 0, 3\n while k < n:\n amount = a + 2*b + 3*c\n a,b,c = total, a, b\n k = k+ 1\n return total", "title": "" }, { "docid": "1b4adda583ea5cde2f3ec01ec0269e4c", "score": "0.56107044", "text": "def problem0037():\n def truncatable_get_primes():\n for ndigits in count(2):\n digit_groups = [[2, 3, 5, 7]] + [[1, 3, 7, 9]] * (ndigits - 2) + [[3, 7]]\n for ds in cartesian_product(*digit_groups):\n x = num_from_digits(ds)\n if is_prime(x) and all(is_prime(num_from_digits(ds[n:])) and\n is_prime(num_from_digits(ds[:-n])) for n in range(1, len(ds))):\n yield x\n return sum(take(11, truncatable_get_primes()))", "title": "" }, { "docid": "afd62e628d4a78ee48020caba2426de4", "score": "0.5609107", "text": "def ncombinations(n, k):\n return cartesian_product(xrange(n-k+1, n+1)) / factorial(k)", "title": "" }, { "docid": "c7fd1ff9f9c3be91b64fec3c40bec2af", "score": "0.5585417", "text": "def find_products(numbers, n):\n\n for c in combinations(numbers, n):\n if sum(c) == 2020:\n prod = reduce(mul, c)\n print(f\"multiply {c} = {prod}\")", "title": "" }, { "docid": "1a92c642d8b9f325f6b01f2f863600be", "score": "0.5573673", "text": "def all_triples(limit):\n\n prim_peris = list(primitive_triple_perimeters(limit))\n non_prims = []\n\n for i in prim_peris:\n if limit % 1 == 0:\n non_prims.extend((i * j for j in range(2, limit // i + 1)))\n else:\n non_prims.extend((i * j for j in range(2, int(ceil(limit // i)))))\n\n count = Counter(prim_peris + non_prims)\n return sum(1 for x in count if count[x] == 1)", "title": "" }, { "docid": "2f0bd4b9c89b1587ae86ee3c044c5966", "score": "0.5565087", "text": "def compsum(m, n):\n return bincoeff(m-1, n-1)", "title": "" }, { "docid": "5fd1367320c76d7a4567d8efde7d40a0", "score": "0.5540594", "text": "def euler0005(n): # n must be 20 in order to solve the problem\n R = 1\n for x in euler.powD(range(1,n),1,0,1,1): R *= x\n return R", "title": "" }, { "docid": "81a866819771d268c8214a4823ed11f0", "score": "0.55400395", "text": "def euler1():\n # Problem :\n \n # Solve:\n result = 0\n for n in range(1000):\n if n%3 == 0 or n%5 == 0:\n result += n\n\n return result", "title": "" }, { "docid": "2fa2578134f137a8cec7f4009d9a323b", "score": "0.55393773", "text": "def problem0035():\n def is_circular_prime(digits):\n return all(is_prime(num_from_digits(digits[r:] + digits[:r]))\n for r in xrange(len(digits)))\n circular_primes = (num_from_digits(ds) for n in xrange(2, 7)\n for ds in cartesian_product([1, 3, 7, 9], repeat=n)\n if is_circular_prime(ds))\n return ilen(chain([2, 3, 5, 7], circular_primes))", "title": "" }, { "docid": "cc03f69f95515f33a2f8974ebc6d5a74", "score": "0.552845", "text": "def solutions(perimeter):\n for a in range(1, perimeter / 2):\n for b in range(a, perimeter - a):\n c = sqrt(a ** 2 + b ** 2)\n if a + b + c == perimeter:\n yield a, b, int(c)", "title": "" }, { "docid": "f50da3c11346504ce2fbd756fad97c41", "score": "0.5517696", "text": "def calc_3(n: int) -> List[int]:\n sieve = [True] * (n + 1)\n sieve[0] = sieve[1] = False\n\n # eliminate even numbers\n for i in range(4, n + 1, 2):\n sieve[i] = False\n # go through odd only numbers, up to sqrt of all numbers\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n for j in range(i * i, n + 1, i * 2):\n sieve[j] = False\n return [i for i, v in enumerate(sieve) if v]", "title": "" }, { "docid": "36e04364b301de8f934c93f6d3ed6891", "score": "0.55101866", "text": "def generate_combinations(n, m):\n def make_neighbors(partial_combi):\n for spam in range(partial_combi[-1] + 1, n):\n clone = [x for x in partial_combi]\n clone.append(spam)\n yield tuple(clone)\n\n if m > n:\n raise ValueError(\"n >= m at all times\")\n\n skew = [[x] for x in range(n)]\n\n while skew:\n # Must use the skew as a queue rather than stack to ensure ordering.\n # Call pop with no arguments to perform DFS instead.\n combi = skew.pop(0)\n\n if len(combi) == m:\n yield tuple(combi)\n else:\n for neighbor in make_neighbors(combi):\n skew.append(neighbor)", "title": "" }, { "docid": "409fb2118dc6ed1bf162e76a18500f8f", "score": "0.5502628", "text": "def problem0004():\n # A brute-force solution is a bit slow, let's try to simplify it a little bit:\n # xy = \"abccba\" = 100001a + 10010b + 1100c = 11 * (9091a + 910b + 100c)\n # So at least one of them must be multiple of 11.\n candidates = (x * y\n for x in xrange(110, 1000, 11)\n for y in xrange(x, 1000))\n return max(x for x in candidates if is_palindromic(x))", "title": "" }, { "docid": "2d1bd8db9d0fd9632b3cca0be3ff0161", "score": "0.5498557", "text": "def problem_32():\n\n prod_set = set()\n perms = __lex_permutations({i for i in range(1, 10)})\n for perm in perms:\n s = str().join(str(i) for i in perm)\n if int(s[:2]) * int(s[2:5]) == int(s[5:]) or int(s[:3]) * int(s[3:5]) == int(s[5:]) or int(s[:1]) * int(s[1:5]) == int(s[5:]) or int(s[:4]) * int(s[4:5]) == int(s[5:]):\n prod_set.add(int(s[5:]))\n\n return sum(prod_set)", "title": "" }, { "docid": "f320225ff56629afdd2fd9f41d5f8f68", "score": "0.54763013", "text": "def generate_primes( n ):\n sieve = [ True ] * n\n for i in range( 3 , int( sqrt( n ) ) + 1 , 2 ):\n if sieve[ i ]:\n sieve[ ( i * i ) : : ( 2 * i ) ] = [ False ] * ( ( n - ( i * i ) - 1 ) // ( 2 * i ) + 1 )\n return [ 2 ] + [ i for i in range( 3 , n , 2 ) if sieve[ i ] ]", "title": "" }, { "docid": "c028949e52aa2061d0cc57918a903910", "score": "0.54679596", "text": "def PolyChaos(cn,alpha,x):\n Sum=0\n for n in range(0,len(cn)):\n Sum=Sum+cn[n]*PSolve(x,n,k=alpha)\n return(Sum)", "title": "" }, { "docid": "d9b5d8e167e1106a9a117dfff6da0ecc", "score": "0.54677635", "text": "def problem0028():\n return sum((2 + l - n) * l + 1\n for n in xrange(4)\n for l in xrange(2, 1001, 2)) + 1", "title": "" }, { "docid": "4452d1f564bd341fa93a53566efd270d", "score": "0.546435", "text": "def compo_product(compo, n):\n total = multinomial(n, compo)\n for i in compo:\n total *= g(i)\n return total", "title": "" }, { "docid": "893732f2cd0c87495dbd18685c44f675", "score": "0.54444885", "text": "def poch(a,n):\n p = 1\n for m in range(0, n):\n p *= a+m\n return p", "title": "" }, { "docid": "7df8c55dcd561f75f2301312636f6394", "score": "0.54391575", "text": "def extended_change_making(coins: list, n: int) -> list:\n _pre_conditions(coins, n)\n\n m = _get_change_making_matrix(len(coins), n)\n\n # Matrix used to keep track of which coins are used.\n p = _get_sets_of_coins_matrix(len(coins), n)\n\n for c in range(1, len(coins) + 1):\n\n # In this module's doc-strings, z ranges from 0 to C. However, because\n # of implementation details (see _get_change_making_matrix), here we\n # range from 1 to n (or C).\n for z in range(1, n + 1):\n\n # Just use the coin coins[c - 1].\n if coins[c - 1] == z:\n m[c][z] = 1\n p[c][z].append(coins[c - 1])\n\n # coins[c - 1] cannot be included. We use the previous solution for\n # for totaling z, excluding coins[c - 1].\n elif coins[c - 1] > z:\n m[c][z] = m[c - 1][z]\n p[c][z] = p[c - 1][z]\n\n # We can use coins[c - 1]. We need to decide which one of the\n # following solutions is the best:\n #\n # 1. Using the previous solution for totaling z (without using\n # coins[c - 1]).\n #\n # 2. Using coins[c - 1] + the optimal solution for totaling\n # z - coins[c - 1].\n else:\n if m[c - 1][z] < 1 + m[c][z - coins[c - 1]]:\n p[c][z] = p[c - 1][z]\n m[c][z] = m[c - 1][z]\n else:\n p[c][z] = [coins[c - 1]] + p[c][z - coins[c - 1]]\n m[c][z] = 1 + m[c][z - coins[c - 1]]\n\n assert sum(p[-1][-1]) == n\n\n return p[-1][-1]", "title": "" }, { "docid": "808b00d5fbd7d49938e57ee14d9c0cbb", "score": "0.54376507", "text": "def get_combinations(hyperparameters):\n return list(itertools.product(*hyperparameters))", "title": "" }, { "docid": "659a392c22fce08bb8bd1233d4aaba2c", "score": "0.5431949", "text": "def phi_function(n):\n\tresult = [n]\n\twhile n > 1:\n\t\tif n % 2 == 0:\n\t\t\tn = n / 2\n\t\telse:\n\t\t\tn = 3 * n + 1\n\t\tresult.append(n)\n\n\treturn result\n\t# return len(result)", "title": "" }, { "docid": "0f1f27a7556c73466ea5251028ec87ec", "score": "0.542838", "text": "def pythagoras(a=None, b=None, c=None):\r\n\r\n have_a, have_b, have_c = a is not None, b is not None, c is not None\r\n\r\n if not have_a and have_b and have_c:\r\n return math.sqrt((c**2)-(b**2))\r\n if have_a and not have_b and have_c:\r\n return math.sqrt((c**2)-(a**2))\r\n if have_a and have_b and not have_c:\r\n return math.sqrt((a**2)+(b**2))\r\n\r\n raise Exception(\"You must specify exactly two sides.\")", "title": "" }, { "docid": "99e205a7d700bfdcf1be21452d4aa4b9", "score": "0.54267347", "text": "def two_of_three(a, b, c):\n return pow(max(a, b), 2) + pow(max(min(a, b), c), 2)", "title": "" }, { "docid": "62a55f4bd40fb628ef299f519d306a8e", "score": "0.54216623", "text": "def get_part_02_answer():\n return prod(summation_equals(puzzle_inputs, 2020, 3))", "title": "" }, { "docid": "5f92da55c0a69eb29f04f09ef798daf1", "score": "0.5420268", "text": "def euler0002(n): # n must be 4000000 in order to solve the problem\n x,y,R = 1,0,0\n while R<n:R,y,x=((x%2==0)and[R+x]or[R])[0],x,(x+y)\n return R", "title": "" }, { "docid": "4ec7a818945be6a6da25e7ab3922afb2", "score": "0.5418727", "text": "def newton(n: int) -> np.ndarray:\n # The nodes of the Clenshaw-Curtis rule are x_i = -cos(i * Pi / (n-1)).\n # Here, we calculate the coefficients c_i such that sum_i c_i * x^i\n # = prod_i (x - x_i). The coefficients are thus sums of products of\n # cosines.\n #\n # This routine uses the relation\n # cos(a) cos(b) = (cos(a + b) + cos(a - b)) / 2\n # to efficiently calculate the coefficients.\n #\n # The dictionary 'terms' descibes the terms that make up the\n # monomial coefficients. Each item ((d, a), m) corresponds to a\n # term m * cos(a * Pi / n) to be added to prefactor of the\n # monomial x^(n-d).\n\n mod = 2 * (n - 1)\n terms: dict[tuple[int, int], int] = defaultdict(int)\n terms[0, 0] += 1\n\n for i in range(n):\n newterms = []\n for (d, a), m in terms.items():\n for b in [i, -i]:\n # In order to reduce the number of terms, cosine\n # arguments are mapped back to the inteval [0, pi/2).\n arg = (a + b) % mod\n if arg > n - 1:\n arg = mod - arg\n if arg >= n // 2:\n if n % 2 and arg == n // 2:\n # Zero term: ignore\n continue\n newterms.append((d + 1, n - 1 - arg, -m))\n else:\n newterms.append((d + 1, arg, m))\n for d, s, m in newterms:\n terms[d, s] += m\n\n c = (n + 1) * [0]\n for (d, a), m in terms.items():\n if m and a != 0:\n raise ValueError(\"Newton polynomial cannot be represented exactly.\")\n c[n - d] += m\n # The check could be removed and the above line replaced by\n # the following, but then the result would be no longer exact.\n # c[n - d] += m * np.cos(a * np.pi / (n - 1))\n\n cf = np.array(c, float)\n assert all(int(cfe) == ce for cfe, ce in zip(cf, c)), \"Precision loss\"\n\n cf /= 2.0 ** np.arange(n, -1, -1)\n return cf", "title": "" }, { "docid": "77b1c58b07b8d3cea183228b059f0575", "score": "0.5415579", "text": "def triangles(n):\n \n return (n+1)**3- (n+1)", "title": "" }, { "docid": "b3d5f4bb0a685edb0c4b56be4ce6864c", "score": "0.5411504", "text": "def calculate(self):\n\n result = []\n mid = []\n\n for c in sorted(self.pcnt):\n n = self.pcnt[c]\n if n > 0:\n for j in range(n // 2):\n result.append(c)\n if n % 2:\n mid.append(c)\n\n return \"\".join(result + mid + list(reversed(result)))", "title": "" }, { "docid": "971e3a48b5d55817168ec50804d4e0e7", "score": "0.54084426", "text": "def euler(n) :\n\tn = int(n)\n\ttab = [0] * (n+1)\n\ti = 2\n\tn2 = n\n\twhile i <= n : \n\t\tif n2%i == 0 : \n\t\t\ttab[i] += 1\n\t\t\tn2 /= i\n\t\t\ti = 1\n\t\ti += 1;\n\tresultat = 1\n\tfor i in range(2, n+1) :\n\t\tif tab[i] > 0 : \n\t\t\tresultat *= math.pow(i, tab[i]) - math.pow(i, tab[i]-1)\n\treturn resultat", "title": "" }, { "docid": "807041d209035abde2000b73b00a9847", "score": "0.5407959", "text": "def NUMBER_OF_COMBINATIONS() -> int:\n return 13", "title": "" }, { "docid": "701b349045fc76e353d97ca30d80ff34", "score": "0.54052997", "text": "def multiplicationTable(n):\n return [[(i+1)*(j+1) for i in range(n)] for j in range(n)]", "title": "" }, { "docid": "e81b9969aa6b710f28e59b9f7537f384", "score": "0.53984296", "text": "def test(n):\n if n > 23:\n return \"Input size too large. Try something smaller than 24.\"\"\"\n\n def powerset(iterable):\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\n\n total = 0\n S = [i**i for i in range(1, n+1)]\n L = []\n for x in powerset(S):\n if sum(list(x)) % 250 == 0 and x:\n total += 1\n return total", "title": "" }, { "docid": "8c06582169c9dbbb48d75f04ec537a46", "score": "0.5385283", "text": "def get_coprimes(n, primes):\n factors = set(prime_factors(n, primes))\n\n # Now sieve out the factors\n coprime = [True for i in range(n)]\n coprime[0] = False\n coprime[1] = False\n for factor in factors:\n for multiplier in range(1, n // factor):\n coprime[factor * multiplier] = False\n\n # And we have the coprimes!\n return [c for c in coprime if c]", "title": "" }, { "docid": "c5c50749b3e10ab2c41c8e19a872bd5f", "score": "0.53812546", "text": "def most_square_product(n: int) -> Tuple[int, int]:\n o = math.ceil(math.sqrt(n))\n m = o - 1\n if o ** 2 == n:\n return o, o\n else:\n i = 0\n while o * m != n and m != 1:\n if i % 2 == 0:\n # Every even iteration\n o += 1\n else:\n # Every odd iteration\n m -= 1\n # Increase iteration count\n i += 1\n return m, o", "title": "" }, { "docid": "86ae5ff87c92739731a0ad84aa01fcca", "score": "0.5367453", "text": "def pentagonal(n):\n return int((3 * n * n - n ) / 2)", "title": "" }, { "docid": "a0058ff1b8a3c28bc70fef3bb5f83101", "score": "0.5364629", "text": "def crt(m, v, symmetric=False, check=True):\n if check:\n m = list(map(as_int, m))\n v = list(map(as_int, v))\n\n result = gf_crt(v, m, ZZ)\n mm = prod(m)\n\n if check:\n if not all(v % m == result % m for v, m in zip(v, m)):\n result = solve_congruence(*list(zip(v, m)),\n check=False, symmetric=symmetric)\n if result is None:\n return result\n result, mm = result\n\n if symmetric:\n return int(symmetric_residue(result, mm)), int(mm)\n return int(result), int(mm)", "title": "" }, { "docid": "61025571c91b6db156ba41a938121042", "score": "0.5364312", "text": "def ten_pairs(n):\n \"*** YOUR CODE HERE ***\"\n if n < 10 :\n return 0\n return ten_pairs(n // 10) + count_time(10 - n % 10, n // 10)", "title": "" }, { "docid": "b9a3b0a267044a6c85605add09c9bcad", "score": "0.53634787", "text": "def _get_coprime_combos(s, limit):\n combos = []\n if s < 1:\n raise ValueError(\"s should be greater than 1\")\n if s % 2 == 0:\n start = 1\n else:\n start = 2\n for t in range(start, s, 2): # s and t must be opposite parity, so step by 2\n if gcd(s, t) == 1 and (s + t) % 2 == 1:\n combos += [(s, t)]\n return combos", "title": "" }, { "docid": "5e353787dce5c5e32e2b304e59c19752", "score": "0.5356333", "text": "def solution_10(n=2000000): \n \n total = 0\n for i in xrange(n):\n if is_prime(i):\n total +=i\n return total", "title": "" }, { "docid": "cc2f7f22d24fe09ddb15cd09f7681a53", "score": "0.534999", "text": "def permcalculator(n):\r\n\r\n permutation = []\r\n count = 0\r\n\r\n #Create list of all possible permutations of +ve integers up to n (1, 2, 3)\r\n for i in itertools.permutations(list(range(1, n + 1))):\r\n\r\n #Create list of all possible +ve & -ve outcomes at each of the 3 positions\r\n for j in itertools.product([-1, 1], repeat=len(list(range(1, n + 1)))):\r\n\r\n #Multiply list 1 by all possible variants of list 2\r\n #zip function generates tuples containing parallel elements from each iterable\r\n perm = [a * sign for a, sign in zip(i, j)]\r\n\r\n permutation.append(perm)\r\n count += 1\r\n\r\n print(count)\r\n\r\n for i in range(len(permutation)):\r\n print(*permutation[i]) #Use * to print lists without []\r", "title": "" }, { "docid": "88901e704bb6f2e1ee95ecb2577d8128", "score": "0.5342756", "text": "def problem0029():\n return ilen(iunique((a ** b\n for a in xrange(2, 101)\n for b in xrange(2, 101))))", "title": "" }, { "docid": "9a9a112b92ab4718527f9ef78e4300bc", "score": "0.53424567", "text": "def phi(n):\n return len([x for x in range(1, n) if gcd(x, n) == 1])", "title": "" }, { "docid": "88068416f08962393753ac86d12fad71", "score": "0.53323835", "text": "def calculate_jacobi_symbol(m:int, n:int) -> int:\n if n < 0 or not n % 2:\n raise ValueError(\"n should be an odd positive integer\")\n if m < 0 or m > n:\n m = m % n\n if not m:\n return int(n == 1)\n if n == 1 or m == 1:\n return 1\n if gcd(m, n) != 1:\n return 0\n\n j = 1\n if m < 0:\n m = -m\n if n % 4 == 3:\n j = -j\n while m != 0:\n while m % 2 == 0 and m > 0:\n m >>= 1\n if n % 8 in [3, 5]:\n j = -j\n m, n = n, m\n if m % 4 == 3 and n % 4 == 3:\n j = -j\n m %= n\n if n != 1:\n j = 0\n return j", "title": "" }, { "docid": "f6f53e0c0944bdd820db0a21797d6d98", "score": "0.53295666", "text": "def mccoy(mode, op_a, op_b, m, n):\n new_op = dict()\n for r in range(0, n + 1):\n coeff = binom(n, r) / (2**n)\n new_term = tuple([(mode, op_b)] * r + [(mode, op_a)] * m +\n [(mode, op_b)] * (n - r))\n if new_term not in new_op:\n new_op[tuple(new_term)] = coeff\n else:\n new_op[tuple(new_term)] += coeff\n return new_op", "title": "" }, { "docid": "891f3a2b505a5f9275abcbe68536e295", "score": "0.53290033", "text": "def euler35(n):\n seedPrimes(int(math.sqrt(n)))\n circCount = 0\n for i in xrange(2, n):\n isCircPrime = True\n for ir in rotations(i):\n if not isPrime(ir):\n isCircPrime = False\n break\n if isCircPrime:\n circCount += 1\n return circCount", "title": "" }, { "docid": "f01a03d1e1924c35bb2501dfeae4019a", "score": "0.5324542", "text": "def calc_number_combinations(*args):\n total = 1\n for each in args:\n total *= len(each) # Multiplies lengths of all collections together.\n return total", "title": "" }, { "docid": "ec7a980cd6092410779b069bf7895d55", "score": "0.5323918", "text": "def printTriangle(n):\n for r in range(n):\n for c in range(r+1):\n print(computeCoeff(r,c), end=' ')\n print('\\n')", "title": "" }, { "docid": "95948cbbab086347e7d61dacd81a0497", "score": "0.5320787", "text": "def problem_43():\n def __div_perms(two_digit, digit_set, divisible_by):\n \"\"\"\n Generate 3-digit numbers from two_digit and an additional digit in digit_set\n that are divisible by divisible_by\n \"\"\"\n for digit in digit_set:\n new_num = int(str(two_digit) + str(digit))\n if new_num % divisible_by == 0:\n yield digit\n\n res = 0\n\n # Overall, there's 10 choose 3 = 120 triplets, 3! permutations for each triplet. Since we \n # also have to choose a digit for d1, we can do that in 9 choose 1 = 9 ways.\n # Then there are the obvious (easily testable) restrictions for some triplets: \n # d4 has to be pair (for d2d3d4 ro be divisible by 2)\n # d3 + d4 + d5 has to be divisible by 3\n # d6 has to be 0 or 5 (for d4d5d6 to be divisible by 5)\n # \n # There are divisibility tests for 7, 11, 13 and 17, but the given ones should already reduce the \n # possibilities to a small enough set for brute force to be viable.\n digits = {i for i in range(10)}\n triplets = __combinations(digits, 3)\n \n # candidates for d3, d4, d5\n by_3 = [c for c in triplets if sum(c) % 3 == 0]\n \n # since d4 has to be pair, we can eliminate triplets with all odd numbers\n by_3 = [s for s in by_3 if s.intersection({0, 2, 4, 6, 8})]\n\n # TODO: this is fugly, the loops could & should probably be refactored into one\n # separate, that just finds the i-th number of the sequence as needed\n\n for c in by_3:\n for p in __lex_permutations(c):\n # only if d4 is pair\n if p[1] % 2 == 0:\n # at this point, d3, d4 and d5 are fixed, but d6 can only be\n # 0 or 5, so just fix d6 to either and work both cases.\n # With 4 numbers fixed, this theoretically leaves us \n # 6 choose 3 = 20 options, but since we have ALREADY chosen\n # d3, d4, d5 and d6, and d5d6d7 has to be divisible by 7, we just \n # need to find our available options, then, similarly, do the same\n # for d6d7d8 to be divisible by 11, yadda yadda. In the end, if we even\n # get there, slap the remaining two elements up in the front, permute \n # them and live happily ever after\n for d6 in {0, 5}.difference(p):\n for d7 in __div_perms(str(p[2]) + str(d6), digits.difference(p + [d6]), 7):\n for d8 in __div_perms(str(d6) + str(d7), digits.difference(p + [d6, d7]), 11):\n for d9 in __div_perms(str(d7) + str(d8), digits.difference(p + [d6, d7, d8]), 13):\n\n for d10 in __div_perms(str(d8) + str(d9), digits.difference(p + [d6, d7, d8, d9]), 17):\n for front in __lex_permutations(digits.difference(p + [d6, d7, d8, d9, d10])):\n # first digit can't be zero\n if front[0]:\n res += int(str().join(str(i) for i in front + p + [d6, d7, d8, d9, d10]))\n\n return res", "title": "" }, { "docid": "f0ae9c1516adf9f72b49094bb94fc462", "score": "0.5319543", "text": "def compute_h3_pn(S: List[Vec3], n: int) -> Iterable[Vec3]:\n assert n >= 0\n if n == 0:\n return set()\n tuples = it.product(S, repeat=n)\n p_n = map(vectuple_h3_sum, tuples)\n return p_n", "title": "" } ]
c743f7246010cbe6833d12c760ee90cc
Checks wheather coordinate (x,y) is in circle with center s and radius val
[ { "docid": "b5c4a74dcb7087397ca948845c677589", "score": "0.0", "text": "def check_incl(s,x,y, val, norm = l1_norm, use_norm=True):\n \n keki = s.split(' ')\n o = float(keki[0])\n h = float(keki[1])\n if(not use_norm):\n if((abs(o-x)<val) and (abs(h-y))<val*2):\n return True\n else:\n return False\n if(norm(o,h,x,y)<val):\n return True\n else:\n return False", "title": "" } ]
[ { "docid": "96969616fcf6e2eb9efb0ce4cccce6c9", "score": "0.8017797", "text": "def is_in_circle(center, radius, position):\n\n d = math.sqrt(((center[0] - position[0]) ** 2) +\\\n (center[1] - position[1]) ** 2)\n\n return d <= radius", "title": "" }, { "docid": "11ecffe8b8ff1a20181d9ef3cc2180da", "score": "0.79951835", "text": "def in_circle(x, y):\n x, y = abs(x), abs(y)\n return y <= math.sqrt(1.0 - x ** 2.0)", "title": "" }, { "docid": "f8899e8547f1f7705f7d4dd37715563c", "score": "0.78113", "text": "def check_circle(self):\n self.radius=(self.max_x-self.min_x)/2\n self.cir_center_x=(self.max_x+self.min_x)/2\n self.cir_center_y=(self.max_y+self.min_y)/2\n if abs(distance(self.pix_x[10],self.cir_center_x,self.pix_y[10],self.cir_center_y)-self.radius) <=self.tolerance and self.check_is_closed():\n self.circle=True\n return True\n else:\n return False", "title": "" }, { "docid": "db3c1bd3213c09f47c6f54634fe3e3c0", "score": "0.7545162", "text": "def pointCircle(x, y, cx, cy, r):\n dx = abs(x - cx)\n dy = abs(y - cy)\n d = math.sqrt(dx ** 2 + dy ** 2)\n if d <= r:\n return True\n return False", "title": "" }, { "docid": "42631cf55c549ef03c6d4b6763079ec6", "score": "0.72667867", "text": "def circular_boolean(centre, radius):\r\n #define size of image\r\n y = len(hdulist)\r\n x = len(hdulist[0])\r\n Y, X = np.ogrid[:y, :x]\r\n\r\n #calculate distance from each point to centre\r\n disp_from_centre = np.sqrt((X - centre[0])**2 + (Y-centre[1])**2)\r\n \r\n #distances less than radius given value 'True'\r\n circle = disp_from_centre <= radius\r\n return circle", "title": "" }, { "docid": "4866db7ad8f8193dbd1fd35973a18422", "score": "0.72102", "text": "def mask_from_circle_params(center, radius, img_shape=(580,420)):\n# http://stackoverflow.com/questions/8647024/how-to-apply-a-disc-shaped-mask-to-a-numpy-array \n y_vals, x_vals = np.ogrid[0:img_shape[1],0:img_shape[0]]\n mask=(x_vals-center[0])**2 + (y_vals-center[1])**2 <= radius**2\n return mask", "title": "" }, { "docid": "6e059ef4dcea8b332594bcc78b61768c", "score": "0.7195683", "text": "def _check_point_in_circle(px, py):\n distance_to_holds = []\n for h in holds:\n d_sq = (px - h.x) ** 2 + (py - h.y) ** 2\n distance_to_holds.append(round(d_sq, 6))\n\n return all(i <= round(circle_radius ** 2, 6) for i in distance_to_holds)", "title": "" }, { "docid": "2f358b5b9d92eda5f150cea87378a24f", "score": "0.71847004", "text": "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "title": "" }, { "docid": "e70c6e5c7b00715325d3128c4c1af5da", "score": "0.71564794", "text": "def make_circle(circle, cx, cy, r):\n for x in range(cx - r, cx + r): # x-coordinate will belong to range (cx-r,cx+r)\n for y in range(cy - r, cy + r): # y-coordinate will belong to range(cy-r,cy+r)\n if dist(cx, cy, x, y) <= r and dist(cx, cy, x, y) >= r - 3:\n circle[x][y] = 1", "title": "" }, { "docid": "d13418d5c954dd271d126550443b3973", "score": "0.71382385", "text": "def in_radius(radius, latitude_center, longitude_center, gas_latitude, gas_longitude):\n return (gas_latitude <= radius + latitude_center\n and gas_longitude <= radius + longitude_center) or (gas_latitude >= radius - latitude_center\n and gas_longitude <= radius - longitude_center)", "title": "" }, { "docid": "2007843846034fdd51b80503ab58ffcb", "score": "0.7125686", "text": "def IsInsideCircle(self, Center, Radius):\n\n if (self.DistanceToPoint(Center) < Radius):\n return 1\n else:\n return 0", "title": "" }, { "docid": "17b76771ae638613a8a3296c43adc021", "score": "0.7094074", "text": "def circleCircle(cx1, cy1, cx2, cy2, r1, r2):\n dx = abs(cx1 - cx2)\n dy = abs(cy1 - cy2)\n d = math.sqrt(dx ** 2 + dy ** 2)\n if d <= r1 + r2:\n return True\n return False", "title": "" }, { "docid": "95d673b15d2ef811fac30e9177873aa2", "score": "0.7042919", "text": "def inSquare(center, point, r):\n\n return abs(center.lat - point.lat) <= r and abs(center.lon - point.lon) <= r", "title": "" }, { "docid": "064337ecbb3df1a081b8dbae482ca6d1", "score": "0.6976825", "text": "def inside(point, center):\n\treturn dist(point, center) < RADIUS", "title": "" }, { "docid": "03c2e2e92fcbb90a863d8c9c7ce89590", "score": "0.6904601", "text": "def monte_carlo_pi_single_point():\n # generate random X, Y pair\n x = np.random.uniform(-1.0, 1.0)\n y = np.random.uniform(-1.0, 1.0)\n\n # if (x, y) in circle\n if x**2 + y**2 < 1.:\n return 1\n else:\n return 0", "title": "" }, { "docid": "2ee49c7c43d8f85320c945264528db82", "score": "0.6872504", "text": "def withinRadius(self, origin, radius):\n spots_in_range = []\n retval = False\n \n #create a coordinate pair for the origin of the radius.\n origin_point = geometry.Point(origin)\n\n #create a circle, based on the radius and its origin, to measure against.\n search_radius_buffer = origin_point.buffer(radius)\n \n spot_point = geometry.Point([self.lon, self.lat])\n\n if spot_point.within(search_radius_buffer): \n retval = True\n\n return retval\n\n # Removeing. Avoid reinventing what already exists in libraries:\n #earthRadius = 6371 \n #distanceLat = math.radians(destinationLat-originLat)\n #distanceLon = math.radians(destinationLon-originLon)\n #a = math.sin(distanceLat/2) * math.sin(distanceLat/2) + math.cos(math.radians(originLat)) \\\n # * math.cos(math.radians(destinationLat)) * math.sin(distanceLon/2) * math.sin(distanceLon/2)\n #c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n #return earthRadius * c", "title": "" }, { "docid": "ad7eed7b67dd7a7d0d2677c7af377f9d", "score": "0.68393546", "text": "def if_close_circle(test_circle, label_circle,center_closeness_thredhold, radius_closeness_threshold):\n center_is_close = False\n similar_radius = False\n cen_dist = np.linalg.norm(np.array(test_circle['Center']) - np.array(label_circle['Center']))\n if cen_dist<center_closeness_thredhold*label_circle['Radius']:\n center_is_close = True\n\n rad_diff = test_circle['Radius'] - label_circle['Radius']\n rad_diff = np.abs(rad_diff)\n if rad_diff < radius_closeness_threshold*label_circle[\"Radius\"]:\n similar_radius = True\n return similar_radius and center_is_close", "title": "" }, { "docid": "a4869cbe4b3f4af61ff60d37875e9ff0", "score": "0.6822965", "text": "def inside_circle(total_count):\n\n x = np.float64(np.random.uniform(size=total_count))\n y = np.float64(np.random.uniform(size=total_count))\n\n radii = np.sqrt(x*x + y*y)\n\n count = len(radii[np.where(radii<=1.0)])\n\n return count", "title": "" }, { "docid": "91c117801cb65467cdd4a71353e84a72", "score": "0.67935705", "text": "def rectangle_circle_collide(corner_rec:tuple, dims:tuple, centre_circ:tuple, radius):\n \n if circle_inside_rectangle(corner_rec,dims,centre_circ,radius):\n return True\n else:\n return rectangle_circle_intersect(corner_rec, dims, centre_circ, radius)", "title": "" }, { "docid": "b3e58c45f4a8b127a3a3877e4666b63a", "score": "0.6791855", "text": "def close_circle(center1, radius1, center2, radius2):\n distance = np.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2)\n return distance < 1.3*(radius1 + radius2)", "title": "" }, { "docid": "78624025e429483f670eb7aaaad19c9a", "score": "0.6766206", "text": "def circle(self, center, radius, color = Color.DEFAULT, width = 1, filled = False, alpha = -1, antialias = True):", "title": "" }, { "docid": "e201fb99415906919c7e1342c4f54ff2", "score": "0.67555946", "text": "def rectangle_circle_intersect(corner_rec:tuple, dims:tuple, centre_circ:tuple, radius):\n\n nearest_x,nearest_y = get_rectangle_nearest_circle(corner_rec, dims, centre_circ)\n \n c_x,c_y = centre_circ\n \n del_x = c_x - nearest_x\n del_y = c_y - nearest_y\n return del_x**2 + del_y**2 < radius**2", "title": "" }, { "docid": "11fbc53d2f15f4d350575b7e0fbe62fa", "score": "0.6737856", "text": "def constrain(raven):\n x = raven.x\n y = raven.y\n # circle\n res = np.sqrt(x**2 + y**2)\n if res <= 0.2:\n return False\n # rectangle\n if 0.25 < x < 0.75 and 0 < y < 1:\n return False\n return True", "title": "" }, { "docid": "64751e4cc31a9471e8f004254d63a44a", "score": "0.66789633", "text": "def circle(radius):\n return math.pi * radius**2", "title": "" }, { "docid": "385318fdf354873406cff66f3b9d33d7", "score": "0.66742706", "text": "def isInRadius(boat, gps, rad):\n\treturn (((gps[0] - boat[0]) ** 2 + (gps[1] - boat[1]) ** 2) ** 0.5 < rad)", "title": "" }, { "docid": "b79402e00ff49661e7c05e77784f5b76", "score": "0.6663386", "text": "def particle_circle_collide(centreA, radius, point):\n\n return (centreA[0] - point[0])**2 + (centreA[1] - point[1])**2 <= radius**2", "title": "" }, { "docid": "01b627ed3e45d1be31384f585730eb0f", "score": "0.6648732", "text": "def circle(t, r):\n #print \"inside circle\"\n arc(t, r, 360)", "title": "" }, { "docid": "54a442c3eb85482ea2c93c523798a8fb", "score": "0.66303253", "text": "def cone_in_polysphere(PolesLong,PolesLat,Long,Lat,Radius):\n\n #Longitudes_circle=Long # N longitudes de cercles\n #Latitudes_circle=Lat # N latitudes de cercles\n #Radius_circles=Radius # N radius de cercles\n\n Dist=np.arccos(np.multiply(np.sin(PolesLat),np.sin(Lat))+np.multiply(np.cos(PolesLat),np.cos(Lat))*np.cos(PolesLong-Long))\n Flag=np.zeros(np.shape(Dist)[1])\n for i in range(np.shape(Dist)[1]):#optimize\n Flag[i]=all(Dist[:,i]<=0.5*math.pi+Radius) #1 if all distances are smaller than..\n return Flag", "title": "" }, { "docid": "124dd3ea7984a00dd152d3e0c3d1b0a6", "score": "0.66020936", "text": "def circle(x,d):\n return np.sqrt((d/2)**2 - x*x)", "title": "" }, { "docid": "e546c3a0b19c4809112fcf2047800e8c", "score": "0.6593599", "text": "def __contains__(self, posn):\n return np.power((posn - self.center), 2).sum() < self.radius**2", "title": "" }, { "docid": "09740103524f9602b29137ca2a4e08c5", "score": "0.65845376", "text": "def findCircleIntersection(self, x, y, radius):\r\n x1 = self.x1 - x\r\n y1 = self.y1 - y\r\n x2 = self.x2 - x\r\n y2 = self.y2 - y\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr2 = dx * dx + dy * dy\r\n det = x1 * y2 - x2 * y1\r\n\r\n discrim = dr2 * radius * radius - det * det\r\n if (discrim >= 0):\r\n sqrtDiscrim = sqrt(discrim)\r\n sign = -1 if (dy < 0) else 1\r\n\r\n posX = (det * dy + sign * dx * sqrtDiscrim) / dr2 + x\r\n posY = (-det * dx + abs(dy) * sqrtDiscrim) / dr2 + y\r\n negX = (det * dy - sign * dx * sqrtDiscrim) / dr2 + x\r\n negY = (-det * dx - abs(dy) * sqrtDiscrim) / dr2 + y\r\n\r\n posDot = self.dotProduct(posX, posY)\r\n negDot = self.dotProduct(negX, negY)\r\n\r\n # Return the point on the segment closest to the end\r\n if (posDot < 0 and negDot >= 0):\r\n return (negX, negY)\r\n elif (posDot >= 0 and negDot < 0):\r\n return (posX, posY)\r\n else:\r\n dPos = PathSegment.getDistance(self.x2, self.y2, posX, posY)\r\n dNeg = PathSegment.getDistance(self.x2, self.y2, negX, negY)\r\n if (dPos < dNeg):\r\n return (posX, posY)\r\n else:\r\n return (negX, negY)\r\n\r\n else:\r\n return (None, None)", "title": "" }, { "docid": "c855ffb0a9a936dfe798c4b0f5c3cd75", "score": "0.6573531", "text": "def circleRectangle(ccx, ccy, cr, rtlx, rtly, rw, rh):\n testX = ccx\n testY = ccy\n if (ccx < rtlx):\n testX = rtlx\n elif (ccx > rtlx+rw):\n testX = rtlx+rw \n if (ccy < rtly):\n testY = rtly\n elif (ccy > rtly+rh):\n testY = rtly+rh\n \n dx = abs(ccx - testX)\n dy = abs(ccy - testY)\n d = math.sqrt(dx ** 2 + dy ** 2)\n if d <= cr:\n return True\n return False", "title": "" }, { "docid": "b15444a4532c8d3eaf82226aa6f3f56f", "score": "0.656838", "text": "def points_in_circle_np(radius, x0=0, y0=0, ):\n x_ = np.arange(x0 - radius - 1, x0 + radius + 1, dtype=int)\n y_ = np.arange(y0 - radius - 1, y0 + radius + 1, dtype=int)\n x, y = np.where((x_[:,np.newaxis] - x0)**2 + (y_ - y0)**2 <= radius**2)\n # x, y = np.where((np.hypot((x_-x0)[:,np.newaxis], y_-y0)<= radius)) # alternative implementation\n for x, y in zip(x_[x], y_[y]):\n yield x, y", "title": "" }, { "docid": "4eb56725b2f1021119be9c527764599d", "score": "0.6562376", "text": "def collision_check(self, x, radius, obs, clearance=None):\n pass", "title": "" }, { "docid": "275fce312d0300031c55976315d17f2f", "score": "0.6561593", "text": "def circle_coords(r, c, radius, shape=None):\n return ellipse(r, c, radius, radius, shape)", "title": "" }, { "docid": "08d5a5f0f5145cc7169cf46c9f8976f1", "score": "0.6556568", "text": "def __circle_from_edges(self, edges):\n\n if(len(edges) < 4): # not enough edges to be a circle\n #print(\"Not enough edges.\")\n return None\n\n b_box = self.__bb_from_points(edges)\n #print(edges)\n #print(b_box)\n centre = (\n (b_box[0] + b_box[2]) / 2.0,\n (b_box[1] + b_box[3]) / 2.0\n )\n #print(centre)\n # distances of the edges from the centre\n radii = [self.__distance_between(centre, edge) for edge in edges]\n radius_mean = np.mean(radii)\n radius_std = np.std(radii)\n\n if(radius_std < radius_mean * 0.20):\n #print(\"It seems to be a valid circle!\")\n return centre\n else:\n #print(\"Points not circley enough.\")\n #print(\"the std radius is %f and there is the mean %f, which is higher than the condition of %f\" %(radius_std, radius_mean, radius_mean * 0.20))\n return None", "title": "" }, { "docid": "6fd0803d054fb5a09684fba3022575d0", "score": "0.65425324", "text": "def does_intersect(self, a_circle):\n return self.radius + a_circle.radius >= self.center.distance_from_point(a_circle.center)", "title": "" }, { "docid": "e8f4543337368e6c77cf95062ea85284", "score": "0.6539979", "text": "def draw_circle_from_center(radius):\n down()\n circle(radius)\n up()", "title": "" }, { "docid": "387fbaf5604ce0316386d25860ddb6ad", "score": "0.65355396", "text": "def checkRadiusAt(self, pos, radius, frameNo):\n framePart = self.searchFrame(frameNo)\n if framePart == None:\n return False\n elif framePart.pos.dist(pos) > radius:\n return False\n else:\n return True", "title": "" }, { "docid": "dd42480252ca55f313e098375d639441", "score": "0.6530797", "text": "def circunferencia (x,y):\n suma_coordenada = x**2 + y**2\n return suma_coordenada <= 1000", "title": "" }, { "docid": "7fb977b89ad0e4df7451bf1419a93e8b", "score": "0.65249646", "text": "def _collide_circle(pos1: np.array, r1: int, pos2: np.array, r2: int) -> (bool, float, np.array or None):\n # Time for vector math :(\n # print(\"deflect_circle\", pos2, r2)\n\n # delta pos\n dp = pos1 - pos2\n dp_sq = dp * dp\n\n # clipping_factor\n cf = (r1 * r1 + r2 * r2) - (dp_sq[0] + dp_sq[1])\n\n # print(\"Clipping Factor\", cf)\n\n if cf < 0:\n # print(\"No clip\")\n # No clipping\n return False, 0, None\n\n cf = math.sqrt(cf)\n\n dp_e = dp / math.sqrt(dp_sq[0] + dp_sq[1])\n # print(\"Hit\", cf, dp_e)\n return True, cf, dp_e", "title": "" }, { "docid": "d02c29791645327a4a41960ca5518c45", "score": "0.6505767", "text": "def circle(img, center, radius, color, thickness=None, lineType=None, shift=None):\n pass", "title": "" }, { "docid": "507c5bd0f01ed18f14eb47f0db7c336e", "score": "0.6496806", "text": "def does_contain(self, a_circle):\n return not self.does_intersect(a_circle) and \\\n max(self.radius, a_circle.radius) < self.center.distance_from_point(a_circle.center)", "title": "" }, { "docid": "be085f755c1bf9c424bb3adfd398b794", "score": "0.64887214", "text": "def circle(self, x, y, r):\n xpt, ypt, rpt = upt(x, y, r)\n self.b.oval(xpt-rpt, ypt-rpt, rpt*2, rpt*2) # Render the unit values", "title": "" }, { "docid": "20d2dc4d6e59657127ea98deafd12477", "score": "0.6484123", "text": "def circle(self, ax, x0, y0, r, c):\n box_size=self.m_dach.shape[0]\n x0, y0, r = np.array(x0), np.array(y0), np.array(r)\n for i in range(x0.size):\n x, y = np.array([]),np.array([])\n for t in np.arange(0,2*np.pi,0.1):\n x = np.append(x, (x0[i] + r[i]*np.cos(t))%(box_size) )\n y = np.append(y, (y0[i] + r[i]*np.sin(t))%(box_size) )\n mask = (x>0)*(x<self.m_dach.shape[0])*(y>0)*(y<self.m_dach.shape[0])\n x, y =x[mask], y[mask]\n ax.scatter(x,y,c=c, s=5, alpha=0.9)", "title": "" }, { "docid": "84033be1b96209195b6223f474b6e4d0", "score": "0.6476044", "text": "def _collides_with_sphere(self, center, radius):\n d_min = 0\n B_mins, B_maxs = zip(*self.ranges)\n for B_min, B_max, C in zip(B_mins, B_maxs, center):\n if C < B_min:\n d_min += (C - B_min) * (C - B_min)\n elif C > B_max:\n d_min += (C - B_max) * (C - B_max)\n return d_min <= (radius * radius)", "title": "" }, { "docid": "6b239736221b430ec75f4bed660636e3", "score": "0.6424822", "text": "def isInside(self,v):\n return self.distance(v) <= self.radius", "title": "" }, { "docid": "7ae0cda6f6315d149767728b867f0a18", "score": "0.64226806", "text": "def circle(img, center, radius, color, thickness=None, lineType=None, shift=None): # real signature unknown; restored from __doc__\r\n pass", "title": "" }, { "docid": "e374f550b9ed0802568d180a0570d2d1", "score": "0.6405278", "text": "def cal_circle(r):\n return math.pi*r*r", "title": "" }, { "docid": "0adeb524343174b2caf0c85d547a4292", "score": "0.6400141", "text": "def great_circle(from_lat, from_lon, to_lat, to_lon):\n dlat = from_lat - to_lat\n dlon = from_lon - to_lon\n\n a = (math.sin(deg2rad(dlat) / 2)) ** 2 + math.cos(deg2rad(from_lat)) * math.cos(\n deg2rad(to_lat)\n ) * (math.sin(deg2rad(dlon) / 2)) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n expected = _EARTH_RADIUS * c\n return expected", "title": "" }, { "docid": "bb6ea22f56b5ea540792761163b69a6d", "score": "0.6395732", "text": "def get_coors_in_ball(coors, centre, radius, inside=True):\n coors = nm.asarray(coors)\n centre = nm.asarray(centre)\n\n vec = coors - centre[None, :]\n\n if inside:\n out = nm.where(norm(vec) <= radius)[0]\n\n else:\n out = nm.where(norm(vec) >= radius)[0]\n\n return out", "title": "" }, { "docid": "4df27612eb07ed282c1b5f4d9ddd4337", "score": "0.6390245", "text": "def circle_filter(arm, x, y):\n #r = arm.DFIBER * arm.FN\n inds = (x**2 + y**2 <= 1.0)\n return x[inds], y[inds]", "title": "" }, { "docid": "afe4a78b03224419851d6fec59dfd6a4", "score": "0.63798255", "text": "def get_circle(center, radius, semi=False):\n [x0, y0] = center\n\n if semi:\n theta = _np.arange(0, _np.pi, 1e-4)\n else:\n theta = _np.arange(0, _np.pi * 2, 1e-4)\n\n x = x0 + radius * _np.cos(theta)\n y = y0 + radius * _np.sin(theta)\n return x, y", "title": "" }, { "docid": "c78d57f42036035312b0d662c835e50b", "score": "0.6375737", "text": "def circle(t, r):\n\tarc(t, r, 360)", "title": "" }, { "docid": "58a3ddf951f27dcdf06311957891cb2a", "score": "0.63688344", "text": "def opt_circle(x, y):\n x_m = np.average(x)\n y_m = np.average(y)\n ce = x_m, y_m\n center, ier = optimize.leastsq(_f, ce, args=(x, y))\n xc, yc = center\n Ri = _findR(x, y, *center)\n R = np.average(Ri)\n residual = np.sum((Ri - R)**2)\n return xc, yc, R, residual", "title": "" }, { "docid": "8312dc36d186c6baf5ac21e30ccb7bcf", "score": "0.6363459", "text": "def setCircle(self, xcenter, ycenter, radius, value):\n return self.setArc(xcenter, ycenter, radius, 0.0, 360.0, value)", "title": "" }, { "docid": "5d1521798c89bd8dd08431a857f3b95a", "score": "0.6354438", "text": "def circle_circumference(radius : Number) -> Number:\n return 2*pi*radius", "title": "" }, { "docid": "93807fde963e19104f0d8784b2f71c12", "score": "0.6333789", "text": "def contains(self, point):\n center, radius = (self.center, self.radius)\n return abs(\n (point.x - center.x) ** 2\n + (point.y - center.y) ** 2\n - radius ** 2\n ) < EPSILON", "title": "" }, { "docid": "0044ab45dd6a9646738728558d6953dd", "score": "0.6312699", "text": "def circle(corners=32, center=(0,0), radius=(1,1)):\r\n circle = []\r\n for i in xrange(corners):\r\n circle.append((center[0] + radius[0] * sin(2.0 * i/corners * pi), center[1] + radius[1] * cos(2.0 * i/corners * pi)))\r\n circle.append(circle[0]) # close circle\r\n return circle", "title": "" }, { "docid": "af316ba8e86ae54eda69bdc0ebf77652", "score": "0.6307826", "text": "def center_and_radius(mask):\n top, bottom, left, right = circle_edges(mask)\n vertical_radius = (bottom - top) / 2\n horizontal_radius = (right - left) / 2\n return (int((top + bottom) / 2), # Center\n int((left + right) / 2)),\\\n (vertical_radius + horizontal_radius) / 2 # Radius", "title": "" }, { "docid": "4a6781fecf1df7f01f9f7d22c71b3078", "score": "0.63040984", "text": "def circular_area(radius):\n return np.pi * (radius ** 2)", "title": "" }, { "docid": "4d2d6489559e1b22af8ac42176ed956e", "score": "0.62839717", "text": "def calcCircleArea(r):\n return mult(pi, square(r))", "title": "" }, { "docid": "0dc06cd0a26924011ab88902ec47ee01", "score": "0.62672067", "text": "def test_create_only_radius_input(self):\n expected = ((0, 0), 2.5)\n circle = Circle(radius=2.5)\n self.assertEqual(circle_data(circle), expected)\n # Make sure radius=0 works (edge case of Circles)\n expected = ((0, 0), 0)\n circle = Circle(radius=0)\n self.assertEqual(circle_data(circle), expected)", "title": "" }, { "docid": "cbd1fceffe3dfe1c0935d8a9c7a40a11", "score": "0.62642246", "text": "def circle_coordinates(x, y, radius):\n x1 = x - radius # Left\n x2 = x + radius # Right\n y1 = y - radius # Bottom\n y2 = y + radius # Top\n return (x1, y1, x2, y2)", "title": "" }, { "docid": "0814223dc8da53b1dff1f18377492db1", "score": "0.62609375", "text": "def circleArea(radius):\n return math.pi * radius * radius", "title": "" }, { "docid": "c26ec36732c814f164f2ed91fd1b8f29", "score": "0.62601775", "text": "def is_inside(self,x,y,z):", "title": "" }, { "docid": "5b05a1972d5706a4d13a6d682c38c82c", "score": "0.6256146", "text": "def out_of_bounds(self, x, y):\n # circular arena, compute radial position of point and compare to arena radius\n r = np.sqrt(x**2 + y**2)\n return r > self.radius", "title": "" }, { "docid": "543b27aa628863b042b0231e19501422", "score": "0.6254059", "text": "def circleArea(radius):\n return math.pi * radius*radius", "title": "" }, { "docid": "ead0aa2b03be017d51bb9cc5899b377c", "score": "0.62539357", "text": "def draw_circle(self) -> None:\r\n # Define center of the circle\r\n a = self.circle[1][0]\r\n b = self.circle[1][1]\r\n # Define radius of the circle\r\n r = self.circle[0]\r\n # Draw the circle\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if (x - a) ** 2 + (y - b) ** 2 <= r ** 2:\r\n self.obstacle_img[y][x] = (0, 0, 0)", "title": "" }, { "docid": "1898f81c902f055690d83b3faa5b2ec6", "score": "0.6251131", "text": "def inside(coordiantes):\n return (centre[0] - coordiantes[0]) ** 2 / radii[0] ** 2 \\\n + (centre[1] - coordiantes[1]) ** 2 / radii[1] ** 2 <= 1", "title": "" }, { "docid": "6d2d81f58bb8378af8e11b611e592acb", "score": "0.6245794", "text": "def get_rectangle_circle_intersection_points(corner_rec:tuple, dims:tuple, centre_circ:tuple, radius:float):\n \n def get_point(ls,side):\n # given a side, find the first point.\n # 0,1,2,3 ==> left,right,top,bottom\n p1 = ls[2*side]\n if p1 == None:\n return ls[2*side+1]\n else:\n return p1\n \n\n #side 1 (s), side 2 (t)\n s = (0,0)\n t = (0,0)\n \n c_x,c_y = centre_circ\n r_x,r_y = corner_rec\n w,h = dims\n\n\n # everything ordered left,right,top,bottom\n\n #points,sides = get_side_points(corner_rec, dims, centre_circ, radius)\n points = rectangle_circle_intersection(corner_rec, dims, centre_circ, radius)\n \n circle_left, circle_right,circle_above,circle_below = False, False, False, False\n \n \n rx_max = r_x + w-1\n rx_min = r_x\n ry_max = r_y + h-1\n ry_min = r_y\n \n if c_x <= rx_min:\n circle_left = True\n elif c_x >= rx_max:\n circle_right = True\n\n if c_y >= ry_min:\n circle_above = True\n elif c_y <= ry_max:\n circle_below = True\n\n left_side = points[0] != None or points[1] != None\n right_side = points[2] != None or points[3] != None\n top_side = points[4] != None or points[5] != None\n bottom_side = points[6] != None or points[7] != None\n\n\n # test to make sure at most two sides comply:\n val = 0\n if left_side: val += 1\n if right_side: val += 1\n if top_side: val += 1\n if bottom_side: val += 1\n\n if val > 2:\n raise ValueError(\"Circle approximation intersects with more than two sides of screen.\")\n else:\n if val == 1:\n # Maximum two points of intersection.\n p1 = None\n p2 = None\n found = False\n \n i = 0\n while i < 8 and not found:\n tmp = points[i]\n \n if p1 != None:\n found = True\n if tmp != None:\n p2 = tmp\n \n else:\n if tmp != None:\n p1 = tmp\n i += 1\n \n if p2 != None:\n return (p1,p2)\n elif p1 != None:\n return tuple(p1)\n else:\n raise ValueError(\"Single line of rectangle intersects with circle, however, no points on the line intersect.\")\n\n elif val == 2:\n \n if left_side and right_side:\n left = get_point(points,0)\n right = get_point(points,1)\n\n if circle_below:\n return left, right, (rx_max,ry_min), (rx_min,ry_min)\n else:\n return right,left, (rx_min,ry_max), (rx_max,ry_max)\n \n \n elif left_side and top_side:\n left = get_point(points,0)\n top = get_point(points,2)\n \n if not circle_below and not circle_right:\n return left, (rx_min,ry_max), top\n else:\n return left, top,(rx_max,ry_max), (rx_max,ry_min), (rx_min,ry_min)\n \n \n elif left_side and bottom_side:\n left = get_point(points,0)\n bottom = get_point(points,3)\n \n if not circle_right and not circle_above:\n return left, bottom, (rx_min,ry_min)\n else:\n return bottom, left, (rx_min,ry_max), (rx_max,ry_max), (rx_max,ry_min)\n \n \n elif right_side and top_side:\n right = get_point(points,1)\n top = get_point(points,2)\n \n if not circle_left and not circle_below:\n return right, top, (rx_max,ry_max)\n else:\n return top, right, (rx_max,ry_min), (rx_min,ry_min), (rx_min,ry_max)\n \n\n elif right_side and bottom_side:\n right = get_point(points,1)\n bottom = get_point(points,3)\n\n if not circle_left and not circle_above:\n return bottom, right, (rx_max,ry_min)\n else:\n\n return right, bottom, (rx_min, ry_min),(rx_min,ry_max), (rx_max,ry_max)\n\n elif top_side and bottom_side:\n top = get_point(points,2)\n bottom = get_point(points,3)\n\n if circle_left:\n return top, bottom, (rx_min,ry_min), (rx_min,ry_max)\n else:\n return bottom, top, (rx_max,ry_max), (rx_max,ry_min)\n \n else:\n raise ValueError(\"Unknown side combinations for Circle Approximation\")\n\n\n elif val == 0:\n raise ValueError(\"Circle approximation intersects with no sides of the screen.\")\n else:\n raise ValueError(\"Circle approximation intersects with too many sides of the screen.\\n side number = {0}\"\\\n .format(val))", "title": "" }, { "docid": "e0122e4cefa134fc4415f715a1aee676", "score": "0.62415147", "text": "def circle_area(r):\n\tif r > 0:\n\t\tA = pi*r**2\n\t\treturn A\n\telse:\n\t\tprint(\"R must be greater than zero!!!\")", "title": "" }, { "docid": "b9a53fb00ed35767f19db579a494f3aa", "score": "0.6240452", "text": "def circle_area(radius):\n a = radius ** 2 * math.pi\n return a", "title": "" }, { "docid": "27105a9568fb4196d1a616e884ea8fed", "score": "0.62371594", "text": "def circle_from_points_in_sphere(points, sphere_r, sphere_ctr):\n # center points\n #print 'Circle from points in sphere'\n ctr = sphere_ctr\n pts = np.array([np.subtract(p, ctr) for p in points])\n\n guess = pts.mean(0)\n guess /= np.linalg.norm(guess)\n distances = [np.dot(guess, p) for p in pts]\n m_dist = min(distances)\n\n if m_dist < 0:\n raise Exception(\"All points should be concentrated in the same side of the sphere\")\n guess *= m_dist * 0.5\n constraints = []\n\n def all_up_plane(candidate):\n vs = pts - candidate\n dots = np.dot(vs, candidate)\n return np.min(dots)\n\n assert all_up_plane(guess) >= 0\n cons = {\"type\": \"ineq\", \"fun\": all_up_plane}\n constraints.append(cons)\n guess_0 = guess.copy()\n constraints.append({\"type\": \"ineq\", \"fun\": lambda x: np.dot(x, guess_0)})\n assert np.dot(guess, guess_0) > 0\n\n def error_fun(x): return sphere_r ** 2 - np.dot(x, x)\n\n ans = optimize.minimize(error_fun, guess, constraints=constraints, method=\"COBYLA\")\n\n #print 'ans %s'%ans\n #assert ans.success\n\n vec = ans.x\n magnitude = np.linalg.norm(vec)\n circle_radius = np.sqrt(sphere_r ** 2 - magnitude ** 2)\n #print 'vec %s'%vec\n #print 'circle_radius %s'%circle_radius\n return vec, circle_radius", "title": "" }, { "docid": "9f084c1caffc52000ecf4097ffe013c1", "score": "0.6235271", "text": "def area_of_circle(radius):\n import math\n\n return math.pi * (radius ** 2)", "title": "" }, { "docid": "be113798b8572376c21211e3b0903cd5", "score": "0.62328017", "text": "def area_of_circle(radius: int or float):\n return math.pi * (radius ** 2)", "title": "" }, { "docid": "a1da4d4dfb140f776b1dd8e88d16605c", "score": "0.62311786", "text": "def circle(t, r):\n circumference = r * 3.1415", "title": "" }, { "docid": "f5ca8734a45e79fe3e6d13f9144fb4db", "score": "0.62279606", "text": "def houghCircle(img:np.ndarray,min_radius:float,max_radius:float)->list:\r\n\r\n list = []\r\n imgCanny = cv2.Canny(img.astype(np.uint8), 100, 50)\r\n\r\n # create an 3D matrix\r\n hough_circle = np.zeros((imgCanny.shape[0], imgCanny.shape[1], max_radius - min_radius))\r\n\r\n # checking all circles in the image\r\n hough = help_houghCircle(hough_circle, imgCanny, img, min_radius)\r\n\r\n # if the point > threshold=20 it marked as an center of circle\r\n for r in range(hough.shape[2]):\r\n for x in range(0, img.shape[0]):\r\n for y in range(0, img.shape[1]):\r\n if hough[x, y, r] > 20:\r\n list.append((x, y, min_radius + r))\r\n\r\n return list", "title": "" }, { "docid": "b796acf6d8165a625fa7cdde2f318f86", "score": "0.62270516", "text": "def circle_classification(x):\n # Circle parameters (x-c_1)^2 + (y-c_2)^2 = r^2\n c = 0.5\n N = x.shape[1]\n features = x.shape[0]\n r = N_radius(features, 1)\n y = np.zeros((N,1))\n\n for i in range(N):\n rad_sum = 0\n for j in range(features):\n rad_sum += (x[j,i]-c)**2\n if rad_sum > r**2:\n y[i,0] = 1\n return y", "title": "" }, { "docid": "283902b3502c1d7e2b70cd878e567bd3", "score": "0.6225656", "text": "def getRadius(self, pos, radius):\n # for hexagonal map, only int are allowed\n # for squared map, we can have 1.5 elements\n\n return filter(self.isValid, self.mapTypeInst.getRadius(pos, radius))", "title": "" }, { "docid": "5256d19679074516bf61fd6bcd9920c4", "score": "0.62209445", "text": "def circle(m, centerlon, centerlat, radius, *args, **kwargs):\n\n glon1 = centerlon\n glat1 = centerlat\n X = []\n Y = []\n for azimuth in range(0, 360):\n glon2, glat2, baz = _gccalc(glon1, glat1, azimuth, radius)\n X.append(glon2)\n Y.append(glat2)\n X.append(X[0])\n Y.append(Y[0])\n\n proj_x, proj_y = m(X,Y)\n return zip(proj_x, proj_y)", "title": "" }, { "docid": "7db33e2646741b5de2fc7695ef6b4270", "score": "0.6205012", "text": "def circle_coordinates(x, y, radius):\n\n x1 = x - radius # Left\n x2 = x + radius # Right\n y1 = y - radius # Bottom\n y2 = y + radius # Top\n\n return (x1, y1, x2, y2)", "title": "" }, { "docid": "b7cbc31d3afc3493f30bd6f0b1293756", "score": "0.6183448", "text": "def get_min_enclosed_circle(self):\n hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n # Extract Hue Value from HSV representation ( Taking lemon hue as a feature extractor )\n img = hsv[:, :, 0]\n # Blur image to remove noise\n # im = cv2.blur(im, (3, 3))\n img = cv2.bitwise_not(img)\n # Use Otsu's method for automatic thresholding to convert to b/w image\n _, bwimg = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n bwimg = cv2.blur(bwimg, (4, 4))\n\n # Start finding contour using opencv contour library\n contour, hierarchy = cv2.findContours(bwimg, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)\n cnt = contour[0]\n (x_coord, y_coord), self.radius = cv2.minEnclosingCircle(cnt)\n self.feature_image = bwimg\n return (x_coord, y_coord), self.radius", "title": "" }, { "docid": "e0aab3e6b3b1530530ed7be2e9ba5bee", "score": "0.61824226", "text": "def circle_area(radius: Number) ->Number:\n return math.pi*radius**2", "title": "" }, { "docid": "11ed69e2e37aced38f026580f468cb67", "score": "0.6169531", "text": "def generate_circle(position_x, position_y, radius):\n number_of_angle = 20\n theta = np.linspace(0, 2*np.pi, number_of_angle)\n x = position_x+radius*np.cos(theta)\n y = position_y+radius*np.sin(theta)\n return x, y", "title": "" }, { "docid": "f07093fcac224d31f0adca6309180058", "score": "0.6155283", "text": "def create_circle(x, y, radius):\n # if type(x) != int:\n # return None\n if radius < 1:\n return None # NoneType, means the absence of a value\n return [x, y, radius]\n # return {'x': x, 'y': y, 'rad': radius}", "title": "" }, { "docid": "e87c9d8bdfb892bc96e11376d0acaa45", "score": "0.61516964", "text": "def fit_to_circle(x, y, xc=None, yc=None):\n if not xc:\n estimate = numpy.median(x), numpy.median(y) # first guess for centre\n\n ii = 0\n # until convergence\n while True: \n # optimize centre \n (xc, yc), ier = optimize.leastsq(f_2, estimate, args=(x,y)) # fitted xc, yc\n\n # calculate radii of points, median for best value, median absolute \n # deviation (MAD) for error estimates.\n radii_fit = calc_R(x, y, xc, yc) \n radius = numpy.median(radii_fit) \n radius_MAD = numpy.median(numpy.abs(radii_fit-radius)) \n\n # Use only those values within 5 times the median absolute deviation. \n # For a Gaussian distribution this would be > 3 sigma. \n whr_good = numpy.where((radii_fit > radius - 5 * radius_MAD) &\n (radii_fit < radius + 5 * radius_MAD))[0]\n # convergence if all values are good \n if len(whr_good) == len(radii_fit):\n break\n else:\n x = x[whr_good]\n y = y[whr_good]\n ii += 1\n return xc, yc, radius, radius_MAD", "title": "" }, { "docid": "ccd641c5604520bff940c0e12698f303", "score": "0.6143195", "text": "def create_circle(img, center=None, radius=None):\n\n if center is None:\n center = tuple(i // 2 for i in img)\n\n if radius is None:\n radius = min(img) * 3.0 / 8.0\n\n grid = np.mgrid[[slice(i) for i in img]]\n grid = (grid.T - center).T\n phi = radius - np.sqrt(np.sum((grid)**2, 0))\n res = np.int8(phi > 0)\n return res", "title": "" }, { "docid": "17224e5a0fd97f38d34cc33c7ddc4285", "score": "0.61422515", "text": "def circle_points(center, radius, start=0, end=0):\n result = []\n x0 = center[0]\n y0 = center[1]\n x = radius\n y = 0\n err = 1 - x\n\n def append(x, y):\n if start == end: # full circle\n result.append((x, y))\n else: # arc, check if we should add this point\n angle = degrees(atan2(y - y0, x - x0))\n if (angle - start) % 360 <= (end - start) % 360:\n result.append((x, y))\n\n while x >= y:\n append(x + x0, y + y0)\n append(y + x0, x + y0)\n append(-x + x0, y + y0)\n append(-y + x0, x + y0)\n append(-x + x0, -y + y0)\n append(-y + x0, -x + y0)\n append(x + x0, -y + y0)\n append(y + x0, -x + y0)\n\n y = y + 1\n\n if err < 0:\n err = err + 2 * y + 1\n else:\n x = x - 1\n err = err + 2 * (y - x + 1)\n\n return result", "title": "" }, { "docid": "10b9ac63b40e43b348d2099eace31e51", "score": "0.6141477", "text": "def get_circle(r=30):\n image = np.zeros((2*r, 2*r, 3), np.uint8)\n cv2.circle(image,(r,r),r,(255,255,255),-1)\n white = np.array([255, 255, 255])\n mask = image[:, :, :] == white[np.newaxis, np.newaxis, :]\n mask = np.mean(mask, axis=2)\n return mask.astype(np.uint8)", "title": "" }, { "docid": "cbc9093ba4a390e3737397e586581550", "score": "0.6140908", "text": "def filled_circle(self, pos, radius, color):\n pos = round_pos(pos)\n pygame.draw.circle(self._surf, make_color(color), pos, radius, 0)", "title": "" }, { "docid": "cda8cc9e422e7bc47a4b03c255a7c0e0", "score": "0.6138912", "text": "def circle_circumference(radius):\n circumference = 2 * PI * radius\n return circumference", "title": "" }, { "docid": "dead77defca361378cbf05f124d4dd03", "score": "0.61372167", "text": "def circle_intersections(holds):\n def _check_point_in_circle(px, py):\n \"\"\"\n This sub-function checks whether a point is in a circle or not\n :param px: X coordinate of the point\n :param py: Y coordinate of the point\n :return: Boolean\n \"\"\"\n distance_to_holds = []\n for h in holds:\n d_sq = (px - h.x) ** 2 + (py - h.y) ** 2\n distance_to_holds.append(round(d_sq, 6))\n\n return all(i <= round(circle_radius ** 2, 6) for i in distance_to_holds)\n\n # Circle Radius same for all circles\n circle_radius = RobotData.l1 + RobotData.l2 + RobotData.r\n\n # Number of holds\n hold_num = len(holds)\n\n # Create of circles as Symbolic because Sympy has solver to find intersection points\n circle_list = []\n for i in range(hold_num):\n circle_list.append(Circle(Point(holds[i].x, holds[i].y), circle_radius))\n\n # Find Intersections\n intersection_list = []\n for i in range(hold_num):\n circle = circle_list[i]\n check_circles = circle_list[i + 1:]\n for check in check_circles:\n intersection_list.append(circle.intersection(check))\n\n # Get Intersection Points\n points = []\n for intersections in intersection_list:\n if len(intersections) == 1:\n intersections.append(intersections[0])\n p0 = intersections[0]\n p1 = intersections[1]\n points.append([(float(p0.x), float(p0.y)), (float(p1.x), float(p1.y))])\n\n # Now all intersection points are found, but we desire only intersections of all circles\n # Get the points only desired\n xs = []\n ys = []\n for inter_points in points:\n p0x = inter_points[0][0]\n p0y = inter_points[0][1]\n p1x = inter_points[1][0]\n p1y = inter_points[1][1]\n\n if _check_point_in_circle(p0x, p0y):\n xs.append(p0x)\n ys.append(p0y)\n\n if _check_point_in_circle(p1x, p1y):\n xs.append(p1x)\n ys.append(p1y)\n\n return xs, ys, sum(xs) / len(xs), sum(ys) / len(ys)", "title": "" }, { "docid": "0e2f957d48ddf64022f88d3c0e48b8b9", "score": "0.61326146", "text": "def circle_points_coordinates(self):\n\n # bmo correction\n bmo_crop = 15\n # import bmo points, scaling, computing of bmo center as mean of all points and projection on x,y plane\n bmo_data = pd.read_csv(self.file_name_bmo, sep=\",\", header=None)\n bmo_points = np.zeros([bmo_data.shape[1], bmo_data.shape[0]], dtype=float)\n bmo_points[:, 0] = bmo_data.iloc[0, :] * self.file_header['Distance']\n bmo_points[:, 1] = (bmo_data.iloc[1, :] + bmo_crop) * self.file_header['ScaleX']\n bmo_points[:, 2] = bmo_data.iloc[2, :] * self.file_header['ScaleZ']\n bmo_center_3d = np.mean(bmo_points, axis=0)\n bmo_center_2d = np.array([bmo_center_3d[0], bmo_center_3d[1]])\n bmo_center_geom_mean_2d = geometric_median(bmo_points[:, 0:2], eps=1e-6)\n\n # compute noe equidistant circle points around bmo center with given radius\n noe = self.number_circle_points\n\n # Scan Position\n scan_pos = str(self.file_header['ScanPosition'])\n scan_pos_input = scan_pos[2:4]\n\n # OD clock wise, OS ccw ring scan interpolation for correct orientation\n if scan_pos_input == 'OS':\n phi = np.linspace(0, - 2 * np.pi, num=noe, endpoint=False)\n else:\n phi = np.linspace(0, 2 * np.pi, num=noe, endpoint=False) - np.pi\n\n # create center from geometric median as vector for broadcasting\n center = np.linspace(bmo_center_geom_mean_2d, bmo_center_geom_mean_2d, num=noe).T\n\n # compute circle points with given center and radius\n circle_points_coordinates = center + self.radius * np.array((np.cos(phi), np.sin(phi)))\n\n # plot to visualize differences between center of mass vs. geom median\n plot = False\n if plot:\n radius_bmo = np.mean(np.sqrt((bmo_points[:, 0] - bmo_center_geom_mean_2d[0])**2 +\n (bmo_points[:, 1] - bmo_center_geom_mean_2d[1])**2))\n bmo_circle_points = center + radius_bmo * np.array((np.cos(phi), np.sin(phi)))\n shift_centers = np.sqrt((bmo_center_2d[0] - bmo_center_geom_mean_2d[0])**2 +\n (bmo_center_2d[1] - bmo_center_geom_mean_2d[1])**2)\n print('Difference between center of mass and geometric median of BMO points:', shift_centers)\n fig, ax = plt.subplots(ncols=1)\n ax.plot(bmo_points[:, 0], bmo_points[:, 1], color='black', marker='o', linestyle='None')\n ax.plot(bmo_center_3d[0], bmo_center_3d[1], color='red', marker='o', linestyle='None')\n ax.plot(bmo_center_geom_mean_2d[0], bmo_center_geom_mean_2d[1], color='orange', marker='+')\n ax.plot(bmo_circle_points[0], bmo_circle_points[1], color='black', linestyle='None')\n ax.plot(circle_points_coordinates[0], circle_points_coordinates[1], ':', linewidth=3, color='green')\n ax.set_aspect('equal', 'box')\n ax.axis('off')\n plt.show()\n\n return circle_points_coordinates", "title": "" }, { "docid": "82318d8e0779c771cec22fb211307341", "score": "0.6132364", "text": "def test_circle_10():\n theta, path = spl.circle([0,0],1,npoints=10)\n\n assert theta == approx(np.linspace(0, 2*np.pi, 10))", "title": "" }, { "docid": "a9a8bcf13e8138be665f4340652d050b", "score": "0.6128098", "text": "def radius(circunf) -> float:\n return (circunf / math.pi) / 2", "title": "" }, { "docid": "cd5210fcd8ba4152a23e6ca8c99d2253", "score": "0.6127329", "text": "def circle_area(radius : Number) -> Number:\n return pi*radius*radius", "title": "" }, { "docid": "99e31061c8b3282c1176827aab5b95bc", "score": "0.61269546", "text": "def is_inside(self,x,y,z):\n return (x**2+y**2)/(self.a**2) + (z/self.c)**2 <= 1", "title": "" }, { "docid": "67813eeebbcbc98d3c0c2ee40f9a2613", "score": "0.61251736", "text": "def circle(r):\r\n pi = 3.14\r\n area = pi*r*r\r\n perimeter = 2*pi*r\r\n return area, perimeter\r\n \r\n circle(4)\r\n a,p = circle(6)", "title": "" }, { "docid": "6aa9c53345ecfb89404041f586e1d437", "score": "0.6122784", "text": "def circle_area(radius):\n circle_area = PI * (radius ** 2)\n return circle_area", "title": "" }, { "docid": "e188c89000465a4a27ee3611fe958a94", "score": "0.6117123", "text": "def circle_radius(lat, lon, max_lat, max_lon, min_lat, min_lon):\n points = numpy.array([\n (min_lat, min_lon),\n (min_lat, max_lon),\n (max_lat, min_lon),\n (max_lat, max_lon),\n ], dtype=numpy.double)\n\n radius = _geocalc.max_distance(lat, lon, points)\n return int(round(radius))", "title": "" } ]
c89fd9c6e428416132183bca926ce386
The publisher of the 3rd Party Artifact that is being bought. E.g. NewRelic
[ { "docid": "5151abc3e492ac5419ad39fafb658524", "score": "0.65167165", "text": "def publisher(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"publisher\")", "title": "" } ]
[ { "docid": "5edae9fd69ceb12a313846410a240744", "score": "0.69928986", "text": "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "title": "" }, { "docid": "0da235a06b1f437255f5219cd6eb2efb", "score": "0.6917126", "text": "def publisher(self) -> Optional[str]:\n return pulumi.get(self, \"publisher\")", "title": "" }, { "docid": "0da235a06b1f437255f5219cd6eb2efb", "score": "0.6917126", "text": "def publisher(self) -> Optional[str]:\n return pulumi.get(self, \"publisher\")", "title": "" }, { "docid": "36611280296c2bdedd54e8ab973e2c61", "score": "0.6709819", "text": "def publisher_name(self):\n return self._publisher_name", "title": "" }, { "docid": "95d91970197477ce0da1a8f5b052e3ef", "score": "0.65966517", "text": "def publisher(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher\")", "title": "" }, { "docid": "57adbe079a9ea3ba534fdb37ff2f5768", "score": "0.6492988", "text": "def publisher(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher\")", "title": "" }, { "docid": "f9f46a9e80a90bf4ecb6059b6a446d1c", "score": "0.60977715", "text": "def publisher_id(self) -> Optional[str]:\n return pulumi.get(self, \"publisher_id\")", "title": "" }, { "docid": "96aceb39039018c90258a448c4766a5e", "score": "0.6012537", "text": "def get_preferred_publisher(self):\n return self.cfg_cache.preferred_publisher", "title": "" }, { "docid": "7cc24fe5764de4a583142d045f5cbb42", "score": "0.58693236", "text": "def plan_publisher(self) -> str:\n return pulumi.get(self, \"plan_publisher\")", "title": "" }, { "docid": "55a1a33a4356062afc5c0bbf17726242", "score": "0.58691555", "text": "def get_pub(self):\n return self._pub", "title": "" }, { "docid": "9cba9d76899a9455b6eea8812833bfce", "score": "0.5816261", "text": "def marketplace_publisher_id(self) -> str:\n return pulumi.get(self, \"marketplace_publisher_id\")", "title": "" }, { "docid": "65d2c5984dd74e9260e4655be05dd779", "score": "0.5798854", "text": "def publisher(self, publisher_name):\n for publisher in self.publishers:\n if publisher.name == publisher_name:\n return publisher\n return None", "title": "" }, { "docid": "2251713d7388660dca5d5e3ab8d1881d", "score": "0.5742479", "text": "def publisher_indexing(self):\n if self.publisher is not None:\n return self.publisher.name", "title": "" }, { "docid": "842ab67e7f85aaf7f12fa2d8a4a2bee3", "score": "0.566609", "text": "def publication(self) -> str:\n return self.__publication", "title": "" }, { "docid": "bd6c252216ac6ba84a517b60f75e86a2", "score": "0.5454047", "text": "def publisher_profile(self) -> Optional[str]:\n return pulumi.get(self, \"publisher_profile\")", "title": "" }, { "docid": "84370bb8b578beceb2384973f865bee3", "score": "0.544901", "text": "def publisher_catalog(request):\n link = [make_link('up', ContentType.NAVIGATION, '/opds/'),\n make_link('self', ContentType.NAVIGATION, '/opds/publisher')]\n\n entry = []\n try:\n for pub in request.db.publisher.find().sort('title_ascii', ASCENDING):\n if 'links' not in pub:\n pub['links'] = [make_link(LinkRel.SUBSECTION,\n ContentType.ACQUISITION, '/opds/publisher/{}'.format(quote\n (pub['_id'].encode('utf8')))), ]\n entry.append(pub)\n except errors.AutoReconnect as e:\n logging.getLogger(__name__).error('MongoDB: %s' % e.message)\n\n return {\n '_id': 'http://books.scielo.org/opds/publisher',\n 'title': 'SciELO Books - Publishers',\n 'updated': datetime.now(),\n 'links': link,\n 'entry': entry}", "title": "" }, { "docid": "4129331d96074e04b44090a2ee5a8d37", "score": "0.54074764", "text": "def pubchem(self):\n return self._pubchem", "title": "" }, { "docid": "7fa4b41f1e641ae035651300cc6f29e6", "score": "0.53414446", "text": "def vendor(self) -> Optional[str]:\n return pulumi.get(self, \"vendor\")", "title": "" }, { "docid": "da3b4b13f29643c0c2cf7b90cdadd6cb", "score": "0.52367115", "text": "def vendor(self):\n return self._vendor", "title": "" }, { "docid": "da3b4b13f29643c0c2cf7b90cdadd6cb", "score": "0.52367115", "text": "def vendor(self):\n return self._vendor", "title": "" }, { "docid": "900b3214bbc3c281dfc0b24d2f9fb459", "score": "0.5204641", "text": "def package(self) -> typing.Optional[str]:\n if self.raw.dist:\n return self.raw.dist.project_name\n return None", "title": "" }, { "docid": "c3aa28d1df3a30b48b36c6c67107a6c8", "score": "0.51274395", "text": "def get_installed_pubs(self):\n\n cat = self.get_catalog(self.IMG_CATALOG_INSTALLED)\n return cat.publishers()", "title": "" }, { "docid": "4a11d24d43dc0a623790d3d2bae6cc87", "score": "0.51015854", "text": "def display_publisher_info(\n username,\n password,\n session_id,\n server=_DEFAULT_SERVER,\n unity_version=DEFAULT_UNITY_VERSION,\n tools_version=DEFAULT_TOOLS_VERSION):\n session = AssetStoreSession(\n username=username,\n password=password,\n session_id=session_id,\n server=server,\n unity_version=unity_version,\n tools_version=tools_version)\n metadta = session.get_metadata()\n print(\"publisher_id={}; publisher_name={}; package_ids=({})\".format(\n metadta['publisher']['id'],\n metadta['publisher']['name'],\n ' '.join(metadta['packages'].keys())))", "title": "" }, { "docid": "aeb97d52599231db4a465aa2569fb043", "score": "0.508652", "text": "def vendor(self) -> str:\n return self.__vendor", "title": "" }, { "docid": "b4add722bf90f002200d5085e6d8404a", "score": "0.5078563", "text": "def getProductURL(self)->str:\n return self.__productURL", "title": "" }, { "docid": "5f8e77548f3f25e137a02ab509c613e3", "score": "0.5067576", "text": "def public_type(self):\n return self.pub_type", "title": "" }, { "docid": "41f93dae877f2ee25aa0a4102bea984f", "score": "0.50508356", "text": "def installed_file_publisher(filepath):\n\n f = file(filepath)\n try:\n flines = f.readlines()\n version, pub = flines\n version = version.strip()\n pub = pub.strip()\n f.close()\n except ValueError:\n # If ValueError occurs, the installed file is of\n # a previous format. For upgrades to work, it's\n # necessary to assume that the package was\n # installed from the preferred publisher. Here,\n # the publisher is setup to record that.\n if flines:\n pub = flines[0]\n pub = pub.strip()\n newpub = \"%s_%s\" % (\n pkg.fmri.PREF_PUB_PFX, pub)\n else:\n newpub = \"%s_%s\" % (\n pkg.fmri.PREF_PUB_PFX,\n self.get_preferred_publisher())\n pub = newpub\n assert pub\n return pub", "title": "" }, { "docid": "680ad07046dd71d6a73603109bc7343c", "score": "0.5048657", "text": "def primary_artifact(self):\n try:\n return self.artifacts[0]\n except IndexError:\n return None", "title": "" }, { "docid": "f45722eb483c4ed86e53e5790e01b331", "score": "0.5046064", "text": "def author(self):\r\n return \"Eraser <[email protected]>\"", "title": "" }, { "docid": "f79b2e69cb072c80f35bef89cf445899", "score": "0.5046007", "text": "def get_publisher_dict():\n j = {\n \"id\": 1,\n \"code\": settings.PUBLISHER_CODE,\n \"name\": settings.PUBLISHER_NAME,\n \"ipi_name_number\": settings.PUBLISHER_IPI_NAME,\n \"ipi_base_number\": settings.PUBLISHER_IPI_BASE,\n \"affiliations\": [\n {\n \"organization\": {\n \"code\": settings.PUBLISHER_SOCIETY_PR,\n \"name\": SOCIETY_DICT.get(\n settings.PUBLISHER_SOCIETY_PR, \"\"\n ).split(\",\")[0],\n },\n \"affiliation_type\": {\n \"code\": \"PR\",\n \"name\": \"Performance Rights\",\n },\n \"territory\": WORLD_DICT,\n }\n ],\n }\n\n # append MR data to affiliations id needed\n if settings.PUBLISHER_SOCIETY_MR:\n j[\"affiliations\"].append(\n {\n \"organization\": {\n \"code\": settings.PUBLISHER_SOCIETY_MR,\n \"name\": SOCIETY_DICT.get(\n settings.PUBLISHER_SOCIETY_MR, \"\"\n ).split(\",\")[0],\n },\n \"affiliation_type\": {\n \"code\": \"MR\",\n \"name\": \"Mechanical Rights\",\n },\n \"territory\": WORLD_DICT,\n }\n )\n\n # append SR data to affiliations id needed\n if settings.PUBLISHER_SOCIETY_SR:\n j[\"affiliations\"].append(\n {\n \"organization\": {\n \"code\": settings.PUBLISHER_SOCIETY_SR,\n \"name\": SOCIETY_DICT.get(\n settings.PUBLISHER_SOCIETY_SR, \"\"\n ).split(\",\")[0],\n },\n \"affiliation_type\": {\n \"code\": \"SR\",\n \"name\": \"Synchronization Rights\",\n },\n \"territory\": WORLD_DICT,\n }\n )\n\n return j", "title": "" }, { "docid": "bd020567db73f769611c7a0db85c9220", "score": "0.5028885", "text": "def test_valid_detect_publisher(self):\n pass", "title": "" }, { "docid": "3aa9f5449494f73460cd967ef08107c6", "score": "0.50282943", "text": "def report_repo(self):\n return self.dist", "title": "" }, { "docid": "dc7870236c52c5ff03cf2e4965fb6d7b", "score": "0.50250185", "text": "def publication_name(self) -> str:\n return self._pretty_name", "title": "" }, { "docid": "8dd3c2253c1ba25251cc88176733ae3d", "score": "0.50176525", "text": "def publication_withexternality(self):\n if self.publication.id == self.score.publication.id:\n return 'D'\n else:\n return 'E'", "title": "" }, { "docid": "770a5de4956d31e7cc092f7d2a5b99b6", "score": "0.5009917", "text": "def get_publisher_output(publisher_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPublisherResult]:\n ...", "title": "" }, { "docid": "e6cd169e2196d8972e05bea10a76cd88", "score": "0.50075686", "text": "def get_source(self):\n return self.__source_iceberg", "title": "" }, { "docid": "2248b476162f421ecb635702445b24f0", "score": "0.5007129", "text": "def publisher(bookData):\r\n\r\n # if there are books with no listed publisher, then set them to be \"other\"\r\n bookData.publisher.fillna('other', inplace=True)\r\n return bookData", "title": "" }, { "docid": "4debd426bf4ef22a6bf89af2f81c2ecc", "score": "0.49867636", "text": "def get_publisher_project(slug):\n try:\n return PublisherProject.objects.get(slug=slug)\n except PublisherProject.DoesNotExist:\n return slug", "title": "" }, { "docid": "0ddb7f82f6cf0fe70163a092d1f8d040", "score": "0.4984217", "text": "def publisher_status(self) -> Optional['PublisherStatus']:\n return pulumi.get(self, \"publisher_status\")", "title": "" }, { "docid": "971e7eb81fbcbd51d6c19cfcc76cfbc5", "score": "0.49787006", "text": "def pub_year(self):\n return self._series_work__dict['work']['original_publication_year']", "title": "" }, { "docid": "b74fde0666f887640b0b1deb9b2ff8b5", "score": "0.48851538", "text": "def get_vendor(self):\n\n return self.VENDOR", "title": "" }, { "docid": "a408c0733101f05bd4af58baedbe39b9", "score": "0.48737052", "text": "def test_invalid_detect_publisher(self):\n pass", "title": "" }, { "docid": "9e87920a4eb1495bc191ac8c22f5a6af", "score": "0.4865376", "text": "def get_publication(refpoint):\n try:\n publication = refpoint.find_next('div', attrs={'class': re.compile(r'^metascore')}) \\\n .find_next('span', attrs={'class': re.compile(r'^source')})\\\n .get_text().lower()\n return publication\n except:\n return None", "title": "" }, { "docid": "dcccf79d9defd528abce2c2e5084f06a", "score": "0.4852364", "text": "def test_get_object_download_presigned_url(self):\n pass", "title": "" }, { "docid": "05697f8ac003fbe42a26ae04bc3f0bec", "score": "0.48511574", "text": "def getDefinitionSource(self):\n return self.definition_source_publ;", "title": "" }, { "docid": "f749c50e4e606bfb07083367d7c18837", "score": "0.4844164", "text": "def validate_publisher(self, publisher):\n if publisher not in [Hero.MARVEL, Hero.DC_COMICS]:\n raise serializers.ValidationError(\"Publisher choice not valid\")\n return publisher", "title": "" }, { "docid": "35740b2143268ad1b8261aa2c48aceb9", "score": "0.48367468", "text": "async def getCombinePublicity(self, ctx):\n public_str = \"public\" if await self._is_public_combine(ctx.guild) else \"private\"\n response = \"Combines are currently **{0}**.\".format(public_str)\n await ctx.send(response)", "title": "" }, { "docid": "28e20be6a0a2dfe38ad68ddc88010ecc", "score": "0.4835145", "text": "def getOSProductName(self):\n return self.os.getProductName()", "title": "" }, { "docid": "6dd12a23e4f74f382ec5f09339dc17d1", "score": "0.48306632", "text": "def author(self):\n return self.__author", "title": "" }, { "docid": "1d75e04ea2f8bedf9e632281b3075ec8", "score": "0.48149613", "text": "def source(self):\n source = self.element.find('.//sources/sourceProduct')\n if source is not None:\n source = source.attrib['refid']\n return source", "title": "" }, { "docid": "23538c34db2ed1ad14ce769c19a3850d", "score": "0.48133975", "text": "def __validate_metadata(self, croot, repo):\n\n c = pkg.catalog.Catalog(meta_root=croot, read_only=True)\n if not c.exists:\n # Nothing to validate.\n return\n if not c.version > 0:\n # Validation doesn't apply.\n return\n if not c.package_count:\n # Nothing to do.\n return\n\n # XXX For now, perform this check using the catalog data.\n # In the future, it should be done using the output of the\n # publisher/0 operation.\n pubs = c.publishers()\n\n if self.prefix not in pubs:\n origins = repo.origins\n origin = origins[0]\n logger.error(_(\"\"\"\nUnable to retrieve package data for publisher '{prefix}' from one\nof the following origin(s):\n\n{origins}\n\nThe catalog retrieved from one of the origin(s) listed above only\ncontains package data for: {pubs}.\n\"\"\").format(origins=\"\\n\".join(str(o) for o in origins), prefix=self.prefix,\n pubs=\", \".join(pubs)))\n\n if global_settings.client_name != \"pkg\":\n logger.error(_(\"\"\"\\\nThis is either a result of invalid origin information being provided\nfor publisher '{0}', or because the wrong publisher name was\nprovided when this publisher was added.\n\"\"\").format(self.prefix))\n # Remaining messages are for pkg client only.\n return\n\n logger.error(_(\"\"\"\\\nTo resolve this issue, correct the origin information provided for\npublisher '{prefix}' using the pkg set-publisher subcommand, or re-add\nthe publisher using the correct name and remove the '{prefix}'\npublisher.\n\"\"\").format(prefix=self.prefix))\n\n if len(pubs) == 1:\n logger.warning(_(\"\"\"\\\nTo re-add this publisher with the correct name, execute the following\ncommands as a privileged user:\n\npkg set-publisher -P -g {origin} {pub}\npkg unset-publisher {prefix}\n\"\"\").format(origin=origin, prefix=self.prefix, pub=list(pubs)[0]))\n return\n\n logger.warning(_(\"\"\"\\\nThe origin(s) listed above contain package data for more than one\npublisher, but this issue can likely be resolved by executing one\nof the following commands as a privileged user:\n\"\"\"))\n\n for pfx in pubs:\n logger.warning(_(\"pkg set-publisher -P -g \"\n \"{origin} {pub}\\n\").format(\n origin=origin, pub=pfx))\n\n logger.warning(_(\"\"\"\\\nAfterwards, the old publisher should be removed by executing the\nfollowing command as a privileged user:\n\npkg unset-publisher {0}\n\"\"\").format(self.prefix))", "title": "" }, { "docid": "3680544ef2d2e6c69d5c681759386b5d", "score": "0.48057064", "text": "def test_pmid_to_publisher_link(pmid, publisher_link):\n\n publisher_link = pubtools.pmid_to_publisher_link(pmid)\n assert_equal(publisher_link, publisher_link)", "title": "" }, { "docid": "0d2742f9e0235884af08fd3455594713", "score": "0.48017856", "text": "def __str__(self):\n return '{} bought by {}'.format(self.product.name, self.email)", "title": "" }, { "docid": "ffd71a9280912fd4e814840fd0987668", "score": "0.4798962", "text": "def prefix_lib(self):\n return self.product.lib", "title": "" }, { "docid": "c82bd63a37446ce321ba2fb56c0df2c1", "score": "0.47979903", "text": "def get_pubmed(r):\n if 'references' not in r:\n return None\n \n return r['references'][0].get('pubmed',None)", "title": "" }, { "docid": "e2fdada64fc257d1b5156b1590cc1613", "score": "0.47958702", "text": "def catalog(self) -> Optional[str]:\n return pulumi.get(self, \"catalog\")", "title": "" }, { "docid": "932fea3d3b0c6df760f73b0f6303e1f5", "score": "0.47949535", "text": "def author(self):\n return 'nmenon34'", "title": "" }, { "docid": "d503d0b7efe3f19e4f33bcd8463a3d56", "score": "0.4787381", "text": "def author(self):\n return self._series_work__dict['work']['best_book']['author']", "title": "" }, { "docid": "75ef6e191411c425b2ad12ef1d3c6814", "score": "0.4778886", "text": "def author(self):", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "d1ba30b7913e96d8613bfb731e46239b", "score": "0.47706985", "text": "def isLicensed(self):\r\n return True", "title": "" }, { "docid": "b610acaf9c1fde13a5b9ed627597f373", "score": "0.47678876", "text": "def pubkey(self):\n return self.pk and bin2pubkey(self.pk) or 'not made'", "title": "" }, { "docid": "1f54da1967f49e81d028632fc7f84554", "score": "0.47647196", "text": "def item_pubdate(self, item):\n return item.creation_date", "title": "" }, { "docid": "64764ff2b7f5c9ee6955761c23afcca5", "score": "0.4749263", "text": "def get_product_name(self):\n return self.execute(ShdlcCmdGetProductName())", "title": "" }, { "docid": "db1b138c214cd3d396453c094d34b498", "score": "0.47465205", "text": "def owner(self):\n return self.item.owner", "title": "" }, { "docid": "db1b138c214cd3d396453c094d34b498", "score": "0.47465205", "text": "def owner(self):\n return self.item.owner", "title": "" }, { "docid": "dcc7f8bd73db053c08f3c3ba5e55479e", "score": "0.47443122", "text": "def isLicensed(self):\r\n\t\treturn True", "title": "" }, { "docid": "dd0fbb8e550072a4c3b6cb8fc8cb0717", "score": "0.47439843", "text": "def test_get_repository_hosted_property_value(self):\n pass", "title": "" }, { "docid": "862319930b5e16e3bae15c7c0088ce18", "score": "0.47400406", "text": "def author(self):\n return self._author", "title": "" }, { "docid": "862319930b5e16e3bae15c7c0088ce18", "score": "0.47400406", "text": "def author(self):\n return self._author", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" }, { "docid": "ff49f9486328892fd567160a661e3eb4", "score": "0.47375244", "text": "def isLicensed(self):\n return True", "title": "" } ]
8d0ad179f81a20ff6613833cd14acd7b
Return all statistic_ids (or filtered one) and unit of measurement. Queries the database for existing statistic_ids, as well as integrations with a recorder platform for statistic_ids which will be added in the next statistics period.
[ { "docid": "184f0821fec57cb5786e98df8f012a1e", "score": "0.64374584", "text": "async def async_list_statistic_ids(\n hass: HomeAssistant,\n statistic_ids: set[str] | None = None,\n statistic_type: Literal[\"mean\"] | Literal[\"sum\"] | None = None,\n) -> list[dict]:\n instance = get_instance(hass)\n\n if statistic_ids is not None:\n # Try to get the results from the cache since there is nearly\n # always a cache hit.\n statistics_meta_manager = instance.statistics_meta_manager\n metadata = statistics_meta_manager.get_from_cache_threadsafe(statistic_ids)\n if not statistic_ids.difference(metadata):\n result = _statistic_by_id_from_metadata(hass, metadata)\n return _flatten_list_statistic_ids_metadata_result(result)\n\n return await instance.async_add_executor_job(\n list_statistic_ids,\n hass,\n statistic_ids,\n statistic_type,\n )", "title": "" } ]
[ { "docid": "f9a4da3caaebf0150c69dc96a64d5ea5", "score": "0.7760222", "text": "def list_statistic_ids(\n hass: HomeAssistant,\n statistic_ids: set[str] | None = None,\n statistic_type: Literal[\"mean\"] | Literal[\"sum\"] | None = None,\n) -> list[dict]:\n result = {}\n instance = get_instance(hass)\n statistics_meta_manager = instance.statistics_meta_manager\n\n # Query the database\n with session_scope(hass=hass, read_only=True) as session:\n metadata = statistics_meta_manager.get_many(\n session, statistic_type=statistic_type, statistic_ids=statistic_ids\n )\n result = _statistic_by_id_from_metadata(hass, metadata)\n\n if not statistic_ids or statistic_ids.difference(result):\n # If we want all statistic_ids, or some are missing, we need to query\n # the integrations for the missing ones.\n #\n # Query all integrations with a registered recorder platform\n for platform in hass.data[DOMAIN].recorder_platforms.values():\n if not (\n platform_list_statistic_ids := getattr(\n platform, INTEGRATION_PLATFORM_LIST_STATISTIC_IDS, None\n )\n ):\n continue\n platform_statistic_ids = platform_list_statistic_ids(\n hass, statistic_ids=statistic_ids, statistic_type=statistic_type\n )\n\n for key, meta in platform_statistic_ids.items():\n if key in result:\n # The database has a higher priority than the integration\n continue\n result[key] = {\n \"display_unit_of_measurement\": meta[\"unit_of_measurement\"],\n \"has_mean\": meta[\"has_mean\"],\n \"has_sum\": meta[\"has_sum\"],\n \"name\": meta[\"name\"],\n \"source\": meta[\"source\"],\n \"unit_class\": _get_unit_class(meta[\"unit_of_measurement\"]),\n \"unit_of_measurement\": meta[\"unit_of_measurement\"],\n }\n\n # Return a list of statistic_id + metadata\n return _flatten_list_statistic_ids_metadata_result(result)", "title": "" }, { "docid": "36ed46f7a5ac3a2097984019756c95d1", "score": "0.5895717", "text": "def _ws_get_list_statistic_ids(\n hass: HomeAssistant,\n msg_id: int,\n statistic_type: Literal[\"mean\"] | Literal[\"sum\"] | None = None,\n) -> str:\n return JSON_DUMP(\n messages.result_message(msg_id, list_statistic_ids(hass, None, statistic_type))\n )", "title": "" }, { "docid": "c16deb543c7e326b14c43bc9c67a6ff3", "score": "0.5851002", "text": "def all_stats(klass, account, ids, metrics, **kwargs):\n end_time = kwargs.get('end_time', datetime.utcnow())\n start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))\n granularity = kwargs.get('granularity', GRANULARITY.HOUR)\n segmentation_type = kwargs.get('segmentation_type', None)\n\n params = {\n 'metrics': ','.join(metrics),\n 'start_time': to_time(start_time, granularity),\n 'end_time': to_time(end_time, granularity),\n 'granularity': granularity.upper()\n }\n if segmentation_type is not None:\n params['segmentation_type'] = segmentation_type.upper()\n\n params[klass.ANALYTICS_MAP[klass.__name__]] = ','.join(ids)\n\n resource = klass.RESOURCE_STATS.format(account_id=account.id)\n response = Request(account.client, 'get', resource, params=params).perform()\n return response.body['data']", "title": "" }, { "docid": "457abaf56622a8c15bac2467d4b047b6", "score": "0.57810575", "text": "def _get_measurement_statistics(experiment_proto, statistic):\n mapping = selection.extract_measurement_statistic(experiment_proto, statistic)\n output_names, data = list(zip(*sorted(mapping.items())))\n return lt.constant(data, axes=[('target', list(output_names))])", "title": "" }, { "docid": "764235e8d12797cda5381c941abbcd2b", "score": "0.57596743", "text": "def fetch_sampling_data(self,\n smiles,\n num_samples,\n scaled_radius,\n force_unique,\n sanitize):\n logger.debug('Fetching benchmark data...')\n cursor = self.conn.cursor()\n cursor.execute(\n '''\n SELECT id FROM smiles\n WHERE smiles=?\n AND num_samples=?\n AND scaled_radius=?\n AND force_unique=?\n AND sanitize=?\n ''',\n [smiles, num_samples, scaled_radius, force_unique, sanitize])\n id = cursor.fetchone()\n\n if not id:\n return None\n\n cursor.execute('SELECT smiles FROM smiles_samples WHERE input_id=?',\n [id[0]])\n generated_smiles = cursor.fetchall()\n generated_smiles = [x[0] for x in generated_smiles]\n return generated_smiles", "title": "" }, { "docid": "6cf316b530548b1062941b2fe61b0218", "score": "0.573063", "text": "def statistics_during_period(\n hass: HomeAssistant,\n start_time: datetime,\n end_time: datetime | None,\n statistic_ids: set[str] | None,\n period: Literal[\"5minute\", \"day\", \"hour\", \"week\", \"month\"],\n units: dict[str, str] | None,\n types: set[Literal[\"change\", \"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> dict[str, list[StatisticsRow]]:\n with session_scope(hass=hass, read_only=True) as session:\n return _statistics_during_period_with_session(\n hass,\n session,\n start_time,\n end_time,\n statistic_ids,\n period,\n units,\n types,\n )", "title": "" }, { "docid": "177849548be3427840b9212ffbe39e10", "score": "0.570995", "text": "def _ws_get_statistics_during_period(\n hass: HomeAssistant,\n msg_id: int,\n start_time: dt,\n end_time: dt | None,\n statistic_ids: set[str] | None,\n period: Literal[\"5minute\", \"day\", \"hour\", \"week\", \"month\"],\n units: dict[str, str],\n types: set[Literal[\"change\", \"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> str:\n result = statistics_during_period(\n hass,\n start_time,\n end_time,\n statistic_ids,\n period,\n units,\n types,\n )\n for statistic_id in result:\n for item in result[statistic_id]:\n if (start := item.get(\"start\")) is not None:\n item[\"start\"] = int(start * 1000)\n if (end := item.get(\"end\")) is not None:\n item[\"end\"] = int(end * 1000)\n if (last_reset := item.get(\"last_reset\")) is not None:\n item[\"last_reset\"] = int(last_reset * 1000)\n return JSON_DUMP(messages.result_message(msg_id, result))", "title": "" }, { "docid": "3bc75aad5a609e845b6df8041ffffcde", "score": "0.57026625", "text": "def collect_all_metadata(self):\n self.user = self.user.lower()\n with open('{}/all_ids.json'.format(self.tweetdata_output_path)) as f:\n ids = json.load(f)\n self.logger.info('Total ids: {}'.format(len(ids)))\n limit = len(ids)\n for id in range(0, limit, 100):\n self.logger.info('Currently getting {} - {}'.format(id, id + 100))\n sleep(6) # needed to prevent hitting API rate limit\n id_batch = ids[id:id + 100]\n tweets = self.api.statuses_lookup(id_batch)\n for tw in tweets:\n self.all_data.append(dict(tw._json))\n self.logger.info('Metadata collection complete')", "title": "" }, { "docid": "bf874fb144a91c4320c3c0c197089faa", "score": "0.56617945", "text": "def get_multiple_statistics():\n r = DL1Reader(PATH)\n df = r.load_entire_table()\n df['tm'] = df['pixel'] // 64\n df_stats = df[['tm', 'charge']].groupby('tm').agg(['mean', 'min', 'max'])\n print(df_stats)\n print(df_stats['charge']['mean'])", "title": "" }, { "docid": "448c742d7d07a6aef19d3c28697eb8b2", "score": "0.56270593", "text": "def get_stats():\n\n all_statistics = []\n\n # For each Dataset\n for current_dataset in DATASET_OPTIONS:\n\n # Get files paths\n current_dataset_path = OUTPUT_FILE + current_dataset\n current_dataset_files = os.listdir(current_dataset_path)\n\n # Fore each file/article\n for file in current_dataset_files:\n\n # Open 'system_answers_eval.txt' in lines\n current_dataset_file_path = current_dataset_path + '/' + file + '/' + 'system_answers_eval.txt'\n current_dataset_file_lines = open(current_dataset_file_path).readlines()\n\n # Get statistics.\n try:\n article_sentences = re.findall(r'\\d+', current_dataset_file_lines[0].strip()) # First Line - # sentences\n article_questions = re.findall(r'\\d+', current_dataset_file_lines[1].strip()) # Second Line - # questions\n article_questions_with_answers = re.findall(r'\\d+', current_dataset_file_lines[2].strip()) # Third Line - # questions with answers\n article_correct = re.findall(r'\\d+', current_dataset_file_lines[3].strip()) # Fourth Line - # correct answers\n article_accuracy = re.findall(r'\\d+\\.\\d+', current_dataset_file_lines[4].strip()) # Fifth Line - # accuracy percent\n except 'Not_updated_file':\n print(current_dataset_file_path)\n\n # Store all statistics.\n quintet = int(article_sentences[0]), int(article_questions[0]), int(article_questions_with_answers[0]), int(article_correct[0]), float(article_accuracy[0])\n all_statistics.append(quintet )\n\n return all_statistics", "title": "" }, { "docid": "831742cd471f6e8c7d75421c3a5a8fd0", "score": "0.5612014", "text": "def stats(self, metrics, **kwargs): # noqa\n return self.__class__.all_stats(self.account, [self.id], metrics, **kwargs)", "title": "" }, { "docid": "1d36641a11f1a9b0827210a11b472b42", "score": "0.5596658", "text": "def instrument_ids(self, s_id):\n sql = select([self.stations.c.thermometer_id,\n self.stations.c.barometer_id,\n self.stations.c.hygrometer_id]).\\\n where(self.stations.c.id == s_id)\n return self.engine.execute(sql).fetchall()[0]", "title": "" }, { "docid": "67d0d3075e6ee78c6aadcf585ebbfdf3", "score": "0.55890805", "text": "def _raw_stats(raw_df, unit_ids, group_stats, unit_stats):\n for unit_id in unit_ids:\n try:\n logger.debug('\\t-- Extracting raw stats')\n unit_df = raw_df[unit_id]\n group_stats['raw_units'] += 1\n cf = unit_df['load'].max()\n pos = unit_df['load'] > 0\n gen = unit_df.loc[pos, 'load'].sum()\n group_stats['raw_cf'] += cf\n group_stats['raw_gen'] += gen\n unit_stats['raw_cf'] += cf\n unit_stats['raw_gen'] += gen\n points = len(unit_df)\n group_stats['total_points'] += points\n unit_stats['total_points'] += points\n non_zero = (unit_df['load'] > 0).sum()\n group_stats['non_zero_points'] += non_zero\n unit_stats['non_zero_points'] += non_zero\n except KeyError:\n logger.debug('- {} is not present in Raw CEMS data'\n .format(unit_id))\n\n return group_stats, unit_stats", "title": "" }, { "docid": "c3881509bd1053da5cab5977591765ce", "score": "0.55822253", "text": "def scheduled_get_statistic_report_objects(\n self, cr, uid, data=None, context=None):\n _logger.warning('Simulation report: generate statistic data')\n self.get_statistic_report_objects(cr, uid, data=data, context=context)\n _logger.warning('End simulation report: generate statistic data')\n return True", "title": "" }, { "docid": "ff128e7cbbbc44ba4026c32648dd365a", "score": "0.5566758", "text": "def get_unit_statistics():\n\n print('--INTEGER STATS--')\n for functional_unit in INTEGER:\n print(functional_unit)\n\n print('--DIVIDER STATS--')\n for functional_unit in DIVIDER:\n print(functional_unit)\n\n print('--MULTIPLIER STATS--')\n for functional_unit in MULTIPLIER:\n print(functional_unit)\n\n print('--LOAD STATS--')\n for functional_unit in LOAD:\n print(functional_unit)\n\n print('--STORE STATS--')\n for functional_unit in STORE:\n print(functional_unit)", "title": "" }, { "docid": "d7fd25517a097454abe3267d6e7fb4c6", "score": "0.5564961", "text": "def _statistic_by_id_from_metadata(\n hass: HomeAssistant,\n metadata: dict[str, tuple[int, StatisticMetaData]],\n) -> dict[str, dict[str, Any]]:\n return {\n meta[\"statistic_id\"]: {\n \"display_unit_of_measurement\": get_display_unit(\n hass, meta[\"statistic_id\"], meta[\"unit_of_measurement\"]\n ),\n \"has_mean\": meta[\"has_mean\"],\n \"has_sum\": meta[\"has_sum\"],\n \"name\": meta[\"name\"],\n \"source\": meta[\"source\"],\n \"unit_class\": _get_unit_class(meta[\"unit_of_measurement\"]),\n \"unit_of_measurement\": meta[\"unit_of_measurement\"],\n }\n for _, meta in metadata.values()\n }", "title": "" }, { "docid": "47257d5edeba3fcf2a4aa30174bb6b05", "score": "0.55339485", "text": "def fetch_n_sampling_data(self,\n smiles,\n num_samples,\n scaled_radius,\n force_unique,\n sanitize):\n logger.debug('Fetching benchmark data...')\n cursor = self.conn.cursor()\n cursor.execute(\n '''\n SELECT id FROM smiles\n WHERE smiles=?\n AND scaled_radius=?\n AND force_unique=?\n AND sanitize=?\n ''',\n [smiles, scaled_radius, force_unique, sanitize])\n id = cursor.fetchone()\n\n if not id:\n return None\n\n cursor.execute(\n '''\n SELECT smiles, embedding, embedding_dim\n FROM smiles_samples WHERE input_id=?\n LIMIT ?\n ''',\n [id[0], num_samples])\n generated_smiles = cursor.fetchall()\n # generated_smiles = [x for x in generated_smiles]\n\n return generated_smiles", "title": "" }, { "docid": "9bfac82a53def653f6b2ce74a8158017", "score": "0.5532075", "text": "def writeStatistics(self, statistics):\n\n write = []\n for stat in statistics:\n write.append(dict(statistic=stat, value=float(statistics[stat])))\n\n return self.hdf.appendRows('/%s/statistics' % self.importGroup, write)", "title": "" }, { "docid": "d9f5b443aa9b4169c39caf7dda12c047", "score": "0.55120105", "text": "def statistics():\n data_by_type = db_queries.stats_data_by_type()\n data_by_year_type, data_by_type_year = db_queries.stats_data_by_year()\n data_by_city, data_by_country = db_queries.stats_data_by_location()\n return render_template('statistics.html', session=session,\n data_by_type=json.dumps(data_by_type),\n data_by_year_type=json.dumps(data_by_year_type),\n data_by_type_year=json.dumps(data_by_type_year),\n data_by_city=json.dumps(data_by_city),\n data_by_country=json.dumps(data_by_country))", "title": "" }, { "docid": "9fcfd24cf7bb2441d68f2ee213875f30", "score": "0.55119413", "text": "def _sorted_statistics_to_dict( # noqa: C901\n hass: HomeAssistant,\n session: Session,\n stats: Sequence[Row[Any]],\n statistic_ids: set[str] | None,\n _metadata: dict[str, tuple[int, StatisticMetaData]],\n convert_units: bool,\n table: type[StatisticsBase],\n start_time: datetime | None,\n units: dict[str, str] | None,\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> dict[str, list[StatisticsRow]]:\n assert stats, \"stats must not be empty\" # Guard against implementation error\n result: dict[str, list[StatisticsRow]] = defaultdict(list)\n metadata = dict(_metadata.values())\n # Identify metadata IDs for which no data was available at the requested start time\n field_map: dict[str, int] = {key: idx for idx, key in enumerate(stats[0]._fields)}\n metadata_id_idx = field_map[\"metadata_id\"]\n start_ts_idx = field_map[\"start_ts\"]\n stats_by_meta_id: dict[int, list[Row]] = {}\n seen_statistic_ids: set[str] = set()\n key_func = itemgetter(metadata_id_idx)\n for meta_id, group in groupby(stats, key_func):\n stats_list = stats_by_meta_id[meta_id] = list(group)\n seen_statistic_ids.add(metadata[meta_id][\"statistic_id\"])\n\n # Set all statistic IDs to empty lists in result set to maintain the order\n if statistic_ids is not None:\n for stat_id in statistic_ids:\n # Only set the statistic ID if it is in the data to\n # avoid having to do a second loop to remove the\n # statistic IDs that are not in the data at the end\n if stat_id in seen_statistic_ids:\n result[stat_id] = []\n\n # Figure out which fields we need to extract from the SQL result\n # and which indices they have in the result so we can avoid the overhead\n # of doing a dict lookup for each row\n mean_idx = field_map[\"mean\"] if \"mean\" in types else None\n min_idx = field_map[\"min\"] if \"min\" in types else None\n max_idx = field_map[\"max\"] if \"max\" in types else None\n last_reset_ts_idx = field_map[\"last_reset_ts\"] if \"last_reset\" in types else None\n state_idx = field_map[\"state\"] if \"state\" in types else None\n sum_idx = field_map[\"sum\"] if \"sum\" in types else None\n sum_only = len(types) == 1 and sum_idx is not None\n # Append all statistic entries, and optionally do unit conversion\n table_duration_seconds = table.duration.total_seconds()\n for meta_id, stats_list in stats_by_meta_id.items():\n metadata_by_id = metadata[meta_id]\n statistic_id = metadata_by_id[\"statistic_id\"]\n if convert_units:\n state_unit = unit = metadata_by_id[\"unit_of_measurement\"]\n if state := hass.states.get(statistic_id):\n state_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n convert = _get_statistic_to_display_unit_converter(unit, state_unit, units)\n else:\n convert = None\n\n if sum_only:\n # This function is extremely flexible and can handle all types of\n # statistics, but in practice we only ever use a few combinations.\n #\n # For energy, we only need sum statistics, so we can optimize\n # this path to avoid the overhead of the more generic function.\n assert sum_idx is not None\n result[statistic_id] = _fast_build_sum_list(\n stats_list,\n table_duration_seconds,\n convert,\n start_ts_idx,\n sum_idx,\n )\n continue\n\n ent_results_append = result[statistic_id].append\n #\n # The below loop is a red hot path for energy, and every\n # optimization counts in here.\n #\n # Specifically, we want to avoid function calls,\n # attribute lookups, and dict lookups as much as possible.\n #\n for db_state in stats_list:\n row: StatisticsRow = {\n \"start\": (start_ts := db_state[start_ts_idx]),\n \"end\": start_ts + table_duration_seconds,\n }\n if last_reset_ts_idx is not None:\n row[\"last_reset\"] = db_state[last_reset_ts_idx]\n if convert:\n if mean_idx is not None:\n row[\"mean\"] = convert(db_state[mean_idx])\n if min_idx is not None:\n row[\"min\"] = convert(db_state[min_idx])\n if max_idx is not None:\n row[\"max\"] = convert(db_state[max_idx])\n if state_idx is not None:\n row[\"state\"] = convert(db_state[state_idx])\n if sum_idx is not None:\n row[\"sum\"] = convert(db_state[sum_idx])\n else:\n if mean_idx is not None:\n row[\"mean\"] = db_state[mean_idx]\n if min_idx is not None:\n row[\"min\"] = db_state[min_idx]\n if max_idx is not None:\n row[\"max\"] = db_state[max_idx]\n if state_idx is not None:\n row[\"state\"] = db_state[state_idx]\n if sum_idx is not None:\n row[\"sum\"] = db_state[sum_idx]\n ent_results_append(row)\n\n return result", "title": "" }, { "docid": "a9ec7e555de8c9d62ad3ecf06bbdfc41", "score": "0.54822034", "text": "async def ws_list_statistic_ids(\n hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]\n) -> None:\n await ws_handle_list_statistic_ids(hass, connection, msg)", "title": "" }, { "docid": "33d19bfd3651fcfd88997c14f4f00010", "score": "0.5473623", "text": "def get_supported_stats(self):\n\n stat_list = []\n\n stat = {}\n stat['stat_id'] = 'api_activity_histogram'\n stat['description'] = self.api_activity_histogram.__doc__\n stat['necessary_params'] = ['task']\n stat_list.append(stat)\n\n stat = {}\n stat['stat_id'] = 'compute_duration_multiline_plot'\n stat['description'] = self.compute_duration_multiline_plot.__doc__\n stat['necessary_params'] = ['task']\n stat_list.append(stat)\n\n stat = {}\n stat['stat_id'] = 'compute_duration_detailed_stacked_area_plot'\n stat['description'] = self.compute_duration_detailed_stacked_area_plot.__doc__\n stat['necessary_params'] = ['task','alg_label']\n stat_list.append(stat)\n\n stat = {}\n stat['stat_id'] = 'response_time_histogram'\n stat['description'] = self.response_time_histogram.__doc__\n stat['necessary_params'] = ['alg_label']\n stat_list.append(stat)\n\n stat = {}\n stat['stat_id'] = 'network_time_histogram'\n stat['description'] = self.response_time_histogram.__doc__\n stat['necessary_params'] = ['alg_label']\n stat_list.append(stat)\n\n\n return stat_list", "title": "" }, { "docid": "9597b4d45ad78bf3f4e5b58a1189bb66", "score": "0.54673237", "text": "def get_stats(self):\n if self.flag_process:\n warnings.warn('data were already processed. Stats will be extracted from the processed data, not the '\n 'original data.')\n if not self.flag_subset:\n warnings.warn('Stats are extracted before subset. Subset groups or times will affect the stats.')\n\n # Different groups of measurements\n colnames = list(self.dataset.columns.values)\n colnames.remove(self.col_id)\n colnames.remove(self.col_class)\n groups = list(OrderedDict.fromkeys([i.split('_')[0] for i in colnames]))\n\n # Initialize all dictionaries;\n # {'mu':{'groups_combined', 'groupA', 'groupB'}, 'sd':{'groups_combined', 'groupA', 'groupB'}, ...}\n stats = ['mu', 'sd', 'mini', 'maxi']\n self.stats = {k1:{k2:{k3:{} for k3 in ['global', 'train']} for k2 in groups+['groups_combined']} for k1 in stats}\n\n # Consider all channels as one single measurement\n ## Global\n # Convert to array otherwise mean can only be row or column-wise\n groups_combined = np.array(self.dataset.drop([self.col_id, self.col_class], axis=1))\n self.stats['mu']['groups_combined']['global'] = np.nanmean(groups_combined)\n self.stats['sd']['groups_combined']['global'] = np.nanstd(groups_combined)\n self.stats['mini']['groups_combined']['global'] = np.nanmin(groups_combined)\n self.stats['maxi']['groups_combined']['global'] = np.nanmax(groups_combined)\n del groups_combined\n ## Training set only\n groups_combined_train = pd.merge(self.dataset, self.id_set, on=self.col_id)\n groups_combined_train = np.array(\n groups_combined_train[groups_combined_train[self.col_set] == 'train'].drop([self.col_id, self.col_class, self.col_set], axis=1))\n self.stats['mu']['groups_combined']['train'] = np.nanmean(groups_combined_train)\n self.stats['sd']['groups_combined']['train'] = np.nanstd(groups_combined_train)\n self.stats['mini']['groups_combined']['train'] = np.nanmin(groups_combined_train)\n self.stats['maxi']['groups_combined']['train'] = np.nanmax(groups_combined_train)\n\n # Extract statistics independently for each channel\n for group in groups:\n group_columns = [i for i in colnames if search('^{0}_'.format(group), i)]\n ## Global\n # Class and ID columns are already excluded here\n group_array = np.array(self.dataset[group_columns])\n self.stats['mu'][group]['global'] = np.nanmean(group_array)\n self.stats['sd'][group]['global'] = np.nanstd(group_array)\n self.stats['mini'][group]['global'] = np.nanmin(group_array)\n self.stats['maxi'][group]['global'] = np.nanmax(group_array)\n del group_array\n ## Training set only\n group_array_train = pd.merge(self.dataset, self.id_set, on=self.col_id)\n group_array_train = group_array_train[group_array_train[self.col_set] == 'train']\n group_array_train = np.array(group_array_train[group_columns])\n self.stats['mu'][group]['train'] = np.nanmean(group_array_train)\n self.stats['sd'][group]['train'] = np.nanstd(group_array_train)\n self.stats['mini'][group]['train'] = np.nanmin(group_array_train)\n self.stats['maxi'][group]['train'] = np.nanmax(group_array_train)\n del group_array_train\n\n return None", "title": "" }, { "docid": "b2404aed19f02c7fb77dc830086c8fcc", "score": "0.54560536", "text": "def statistics_during_period(\n hass: HomeAssistant,\n start_time: datetime,\n end_time: datetime | None = None,\n statistic_ids: set[str] | None = None,\n period: Literal[\"5minute\", \"day\", \"hour\", \"week\", \"month\"] = \"hour\",\n units: dict[str, str] | None = None,\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]]\n | None = None,\n) -> dict[str, list[dict[str, Any]]]:\n if statistic_ids is not None and not isinstance(statistic_ids, set):\n statistic_ids = set(statistic_ids)\n if types is None:\n types = {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"}\n return statistics.statistics_during_period(\n hass, start_time, end_time, statistic_ids, period, units, types\n )", "title": "" }, { "docid": "c836c130e25a418db74bfce1b46b7051", "score": "0.545198", "text": "def _statistics_at_time(\n session: Session,\n metadata_ids: set[int],\n table: type[StatisticsBase],\n start_time: datetime,\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> Sequence[Row] | None:\n start_time_ts = start_time.timestamp()\n stmt = _generate_statistics_at_time_stmt(table, metadata_ids, start_time_ts, types)\n return cast(Sequence[Row], execute_stmt_lambda_element(session, stmt))", "title": "" }, { "docid": "e4f1a26c4fa11883f189a53ca38ce028", "score": "0.5422759", "text": "def statistics(self, dsid):\n web.mime.json.set()\n with self.get_dataset(dsid, False) as ds:\n # Get statistics\n mean, std, n = ds.statistics()\n if not n:\n mean = 0.0\n std = 0.0\n # Convert to json\n return web.json_out(dict(mean=mean, std=std, n=n))", "title": "" }, { "docid": "b94ba829c9324578763f4e2f471ce9c9", "score": "0.54051644", "text": "def sample_numbers_summary(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT * FROM\n (SELECT COUNT(sample_id) AS num_samples\n FROM qiita.study_sample\n WHERE study_id = %s) ns,\n -- Number of samples plated\n (SELECT COUNT(DISTINCT sample_id) AS number_samples_plated\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n WHERE study_id = %s) nsp,\n -- Number of samples extracted\n (SELECT COUNT(DISTINCT sample_id) AS number_samples_extracted\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n WHERE study_id = %s) nse,\n -- Number of samples prepared for amplicon libraries\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_amplicon_libraries\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.library_prep_16s_composition\n USING (gdna_composition_id)\n WHERE study_id = %s) nsal,\n -- Number of samples included in amplicon pools\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_amplicon_pools\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.library_prep_16s_composition lib\n USING (gdna_composition_id)\n JOIN labman.pool_composition_components p\n ON lib.composition_id = p.input_composition_id\n WHERE study_id = %s) nsap,\n -- Number of samples included in amplicon sequencing pools\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_amplicon_sequencing_pools\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.library_prep_16s_composition lib\n USING (gdna_composition_id)\n JOIN labman.pool_composition_components p\n ON lib.composition_id = p.input_composition_id\n JOIN labman.pool_composition pc\n ON p.output_pool_composition_id =\n pc.pool_composition_id\n JOIN labman.pool_composition_components p2\n ON p2.input_composition_id = pc.composition_id\n WHERE study_id = %s) nsasp,\n -- Number of samples amplicon sequenced\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_amplicon_sequencing_runs\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.library_prep_16s_composition lib\n USING (gdna_composition_id)\n JOIN labman.pool_composition_components p\n ON lib.composition_id = p.input_composition_id\n JOIN labman.pool_composition pc\n ON p.output_pool_composition_id =\n pc.pool_composition_id\n JOIN labman.pool_composition_components p2\n ON p2.input_composition_id = pc.composition_id\n JOIN labman.sequencing_process_lanes s\n ON s.pool_composition_id =\n p2.output_pool_composition_id\n WHERE study_id = %s) nsasr,\n -- Number of samples compressed\n (SELECT COUNT(DISTINCT sample_id) AS number_samples_compressed\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.compressed_gdna_composition\n USING (gdna_composition_id)\n WHERE study_id = %s) nsc,\n -- Number of samples normalized\n (SELECT COUNT(DISTINCT sample_id) AS number_samples_normalized\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.compressed_gdna_composition\n USING (gdna_composition_id)\n JOIN labman.normalized_gdna_composition\n USING (compressed_gdna_composition_id)\n WHERE study_id = %s) nsn,\n -- Number of samples prepared for shotgun libraries\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_shotgun_libraries\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.compressed_gdna_composition\n USING (gdna_composition_id)\n JOIN labman.normalized_gdna_composition\n USING (compressed_gdna_composition_id)\n JOIN labman.library_prep_shotgun_composition\n USING (normalized_gdna_composition_id)\n WHERE study_id = %s) nssl,\n -- Number of samples included in a shotgun pool\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_shotgun_pool\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.compressed_gdna_composition\n USING (gdna_composition_id)\n JOIN labman.normalized_gdna_composition\n USING (compressed_gdna_composition_id)\n JOIN labman.library_prep_shotgun_composition lib\n USING (normalized_gdna_composition_id)\n JOIN labman.pool_composition_components p\n ON lib.composition_id = p.input_composition_id\n WHERE study_id = %s) nssp,\n -- Number of samples shotgun sequenced\n (SELECT COUNT(DISTINCT sample_id) AS\n number_samples_shotgun_sequencing_runs\n FROM qiita.study_sample\n JOIN labman.sample_composition USING (sample_id)\n JOIN labman.gdna_composition\n USING (sample_composition_id)\n JOIN labman.compressed_gdna_composition\n USING (gdna_composition_id)\n JOIN labman.normalized_gdna_composition\n USING (compressed_gdna_composition_id)\n JOIN labman.library_prep_shotgun_composition lib\n USING (normalized_gdna_composition_id)\n JOIN labman.pool_composition_components p\n ON lib.composition_id = p.input_composition_id\n JOIN labman.sequencing_process_lanes l\n ON p.output_pool_composition_id =\n l.pool_composition_id\n WHERE study_id = %s) nsssr\"\"\"\n # Magic number 12 -> the number of times the study id appears\n # as parameter in the previous query\n TRN.add(sql, [self.id] * 12)\n # Magic number 0 -> the previous query only outputs a single row\n return dict(TRN.execute_fetchindex()[0])", "title": "" }, { "docid": "7acaa9a366e3a226506fd8ac32bf752d", "score": "0.53877896", "text": "def _read_all_stats_for_simpoint(self):\n stats_file_list = glob.glob(os.path.join(self.results_dir, \"*.stat.*.out\"))\n\n # Check to see if any stats were generated\n if len(stats_file_list) == 0:\n if print_warnings:\n warn(\"No stat files for {} : Skipping...\".format(self.results_dir))\n else:\n self.no_stat_files = False\n\n # Get core id from stat filename and parse all stats\n for stats_file in stats_file_list:\n m = re.search('[.]stat[.]([0-9]+)[.]out', stats_file)\n if m:\n stats_file_name = os.path.join(self.results_dir, stats_file)\n core_id = int(m.group(1))\n self._parse_stats_file(stats_file_name, core_id)", "title": "" }, { "docid": "95aa977061d9eac9e1ca505624c4f8c4", "score": "0.53830624", "text": "def get_objects(self, data=None, context=None):\n return self.pool.get('mrp.production').get_statistic_report_objects(\n self.cr, self.uid, data=data, context=context)", "title": "" }, { "docid": "33e62c367e23bcedf166f45fb08c47dd", "score": "0.53810096", "text": "def samples(self):\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT artifact_id, array_agg(\n sample_id ORDER BY sample_id)\n FROM qiita.analysis_sample\n WHERE analysis_id = %s\n GROUP BY artifact_id\"\"\"\n qdb.sql_connection.TRN.add(sql, [self._id])\n return dict(qdb.sql_connection.TRN.execute_fetchindex())", "title": "" }, { "docid": "274a5e0a754c44ab57749fbb71466dd3", "score": "0.53538233", "text": "def test_query_statistics(self):\n stat = MyStats()\n # imitate stat.get_statistics_data().items. The order of the array\n # returned by items() should be preserved.\n class DummyDict:\n def items(self):\n return [('Init', 'dummy'), ('Stats', 'dummy'), ('Auth', 'dummy')]\n stat.get_statistics_data = lambda: DummyDict()\n mod = ('Init', 'Auth', 'Auth')\n seq = [('Init', stat._seq + 1),\n ('Auth', stat._seq + 2),\n ('Auth', stat._seq + 2) ]\n self.assertListEqual(seq, stat._query_statistics(mod))", "title": "" }, { "docid": "ed5b982a079389989fa4c2eb804f4a03", "score": "0.5349753", "text": "def _statistics_during_period_with_session(\n hass: HomeAssistant,\n session: Session,\n start_time: datetime,\n end_time: datetime | None,\n statistic_ids: set[str] | None,\n period: Literal[\"5minute\", \"day\", \"hour\", \"week\", \"month\"],\n units: dict[str, str] | None,\n _types: set[Literal[\"change\", \"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> dict[str, list[StatisticsRow]]:\n if statistic_ids is not None and not isinstance(statistic_ids, set):\n # This is for backwards compatibility to avoid a breaking change\n # for custom integrations that call this method.\n statistic_ids = set(statistic_ids) # type: ignore[unreachable]\n # Fetch metadata for the given (or all) statistic_ids\n metadata = get_instance(hass).statistics_meta_manager.get_many(\n session, statistic_ids=statistic_ids\n )\n if not metadata:\n return {}\n\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]] = set()\n for stat_type in _types:\n if stat_type == \"change\":\n types.add(\"sum\")\n continue\n types.add(stat_type)\n\n metadata_ids = None\n if statistic_ids is not None:\n metadata_ids = _extract_metadata_and_discard_impossible_columns(metadata, types)\n\n # Align start_time and end_time with the period\n if period == \"day\":\n start_time = dt_util.as_local(start_time).replace(\n hour=0, minute=0, second=0, microsecond=0\n )\n start_time = start_time.replace()\n if end_time is not None:\n end_local = dt_util.as_local(end_time)\n end_time = end_local.replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + timedelta(days=1)\n elif period == \"week\":\n start_local = dt_util.as_local(start_time)\n start_time = start_local.replace(\n hour=0, minute=0, second=0, microsecond=0\n ) - timedelta(days=start_local.weekday())\n if end_time is not None:\n end_local = dt_util.as_local(end_time)\n end_time = (\n end_local.replace(hour=0, minute=0, second=0, microsecond=0)\n - timedelta(days=end_local.weekday())\n + timedelta(days=7)\n )\n elif period == \"month\":\n start_time = dt_util.as_local(start_time).replace(\n day=1, hour=0, minute=0, second=0, microsecond=0\n )\n if end_time is not None:\n end_time = _find_month_end_time(dt_util.as_local(end_time))\n\n table: type[Statistics | StatisticsShortTerm] = (\n Statistics if period != \"5minute\" else StatisticsShortTerm\n )\n stmt = _generate_statistics_during_period_stmt(\n start_time, end_time, metadata_ids, table, types\n )\n stats = cast(\n Sequence[Row], execute_stmt_lambda_element(session, stmt, orm_rows=False)\n )\n\n if not stats:\n return {}\n\n result = _sorted_statistics_to_dict(\n hass,\n session,\n stats,\n statistic_ids,\n metadata,\n True,\n table,\n start_time,\n units,\n types,\n )\n\n if period == \"day\":\n result = _reduce_statistics_per_day(result, types)\n\n if period == \"week\":\n result = _reduce_statistics_per_week(result, types)\n\n if period == \"month\":\n result = _reduce_statistics_per_month(result, types)\n\n if \"change\" in _types:\n _augment_result_with_change(\n hass, session, start_time, units, _types, table, metadata, result\n )\n\n # Return statistics combined with metadata\n return result", "title": "" }, { "docid": "8c74442f6e0816ecad38206f4b497092", "score": "0.5330986", "text": "def _gen_statistics():\n statistics = []\n\n ##accuracy test\n test = {}\n test['name'] = 'acc'\n test['func'] = lambda rts: sum(rts > 0) / len(rts)\n statistics.append(test)\n\n ##quantile statistics of absolute response time\n quantiles = [10, 30, 50, 70, 90]\n for q in quantiles:\n test = {}\n test['name'] = 'q%d' % q\n test['func'] = lambda rts, q=q: scoreatpercentile(np.abs(rts), q)\n statistics.append(test)\n\n return statistics", "title": "" }, { "docid": "3b094297fcd1e9c9695ea657ef5162da", "score": "0.5327607", "text": "async def test_statistics_during_period_empty_statistic_ids(\n recorder_mock: Recorder, hass: HomeAssistant, hass_ws_client: WebSocketGenerator\n) -> None:\n now = dt_util.utcnow()\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"statistic_ids\": [],\n \"end_time\": (now + timedelta(seconds=1)).isoformat(),\n \"period\": \"5minute\",\n }\n )\n response = await client.receive_json()\n assert not response[\"success\"]\n assert response[\"error\"][\"code\"] == \"invalid_format\"", "title": "" }, { "docid": "be1cfad7fbacb198ae6bf5679903de84", "score": "0.5305608", "text": "def do_statistics(cc, args):\n aggregates = []\n for a in args.aggregate:\n aggregates.append(dict(zip(('func', 'param'), a.split(\"<-\"))))\n api_args = {'meter_name': args.meter,\n 'q': options.cli_to_array(args.query),\n 'period': args.period,\n 'groupby': args.groupby,\n 'aggregates': aggregates}\n try:\n statistics = cc.statistics.list(**api_args)\n except exc.HTTPNotFound:\n raise exc.CommandError('Samples not found: %s' % args.meter)\n else:\n fields_display = {'duration': 'Duration',\n 'duration_end': 'Duration End',\n 'duration_start': 'Duration Start',\n 'period': 'Period',\n 'period_end': 'Period End',\n 'period_start': 'Period Start',\n 'groupby': 'Group By'}\n fields_display.update(AGGREGATES)\n fields = ['period', 'period_start', 'period_end']\n if args.groupby:\n fields.append('groupby')\n if args.aggregate:\n for a in aggregates:\n if 'param' in a:\n fields.append(\"%(func)s/%(param)s\" % a)\n else:\n fields.append(a['func'])\n for stat in statistics:\n stat.__dict__.update(stat.aggregate)\n else:\n fields.extend(['max', 'min', 'avg', 'sum', 'count'])\n fields.extend(['duration', 'duration_start', 'duration_end'])\n cols = [fields_display.get(f, f) for f in fields]\n utils.print_list(statistics, fields, cols)", "title": "" }, { "docid": "d3017b49b59435c6e2067a531b4f846d", "score": "0.52841395", "text": "def _get_last_statistics(\n hass: HomeAssistant,\n number_of_stats: int,\n statistic_id: str,\n convert_units: bool,\n table: type[StatisticsBase],\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> dict[str, list[StatisticsRow]]:\n statistic_ids = {statistic_id}\n with session_scope(hass=hass, read_only=True) as session:\n # Fetch metadata for the given statistic_id\n metadata = get_instance(hass).statistics_meta_manager.get_many(\n session, statistic_ids=statistic_ids\n )\n if not metadata:\n return {}\n metadata_ids = _extract_metadata_and_discard_impossible_columns(metadata, types)\n metadata_id = metadata_ids[0]\n if table == Statistics:\n stmt = _get_last_statistics_stmt(metadata_id, number_of_stats)\n else:\n stmt = _get_last_statistics_short_term_stmt(metadata_id, number_of_stats)\n stats = cast(\n Sequence[Row], execute_stmt_lambda_element(session, stmt, orm_rows=False)\n )\n\n if not stats:\n return {}\n\n # Return statistics combined with metadata\n return _sorted_statistics_to_dict(\n hass,\n session,\n stats,\n statistic_ids,\n metadata,\n convert_units,\n table,\n None,\n None,\n types,\n )", "title": "" }, { "docid": "c4be901259c8b891c52a5862874bcd53", "score": "0.52807677", "text": "def getMeasurements(self, sessionId, metricName, startTime, endTime):\n dataEntries = self.db.find(\n {\n self.SESSION_KEY: sessionId,\n self.TIME_KEY: {self.FROM: startTime, self.TO: endTime},\n },\n self.DATA_COLL,\n )\n values = [\n [v[metricName], v[self.TIME_KEY]]\n for v in dataEntries\n if metricName in v\n ]\n return values", "title": "" }, { "docid": "5b54c83ea43d339bd42f8607c028ff4b", "score": "0.5264193", "text": "def _get_gpu_stats(self, queries: List[str]) -> List[List[float]]:\n gpu_query = ','.join(queries)\n format = 'csv,nounits,noheader'\n result = subprocess.run(\n [shutil.which('nvidia-smi'), f'--query-gpu={gpu_query}', f'--format={format}', f'--id={self._gpu_ids}'],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, # for backward compatibility with python version 3.6\n check=True\n )\n\n def _to_float(x: str) -> float:\n try:\n return float(x)\n except ValueError:\n return 0.\n\n stats = result.stdout.strip().split(os.linesep)\n stats = [[_to_float(x) for x in s.split(', ')] for s in stats]\n return stats", "title": "" }, { "docid": "d0cf61ce31ca44a33f72a3f070ddead4", "score": "0.5261076", "text": "def statistics(data, batch_size=32):\n return _statistics(data, batch_size)", "title": "" }, { "docid": "5cd8f39671fb246712809658ab265f5c", "score": "0.52514744", "text": "def get_last_statistics(\n hass: HomeAssistant,\n number_of_stats: int,\n statistic_id: str,\n convert_units: bool,\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> dict[str, list[StatisticsRow]]:\n return _get_last_statistics(\n hass, number_of_stats, statistic_id, convert_units, Statistics, types\n )", "title": "" }, { "docid": "2d827e4e1bdd368642875397d674bbaa", "score": "0.5249488", "text": "def getSamples(processList=None, channelList=None, typeList=None):\n output = []\n dbstore = SAMADhi.DbStore()\n theSamples = filterSamples(samples,processList, channelList, typeList)\n for key,name in samples.iteritems():\n output += dbstore.find(SAMADhi.Sample,SAMADhi.Sample.name==unicode(name)).one()\n return output", "title": "" }, { "docid": "4eaea058a4d96772bc84f95edb0535a3", "score": "0.524822", "text": "def getMetricData(self, metric_name, namespace, dimensions, unit, statistics='Average', minutes=15, period=60):\n end_time = datetime.datetime.utcnow()\n start_time = end_time - datetime.timedelta(minutes=minutes)\n return self.conn.get_metric_statistics(period, start_time, end_time, metric_name, namespace, statistics, dimensions=dimensions, unit=unit)", "title": "" }, { "docid": "f03120cbfdfb5468d1948199fb317ff1", "score": "0.52424026", "text": "async def ws_get_statistics_metadata(\n hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]\n) -> None:\n statistic_ids = msg.get(\"statistic_ids\")\n statistic_ids_set_or_none = set(statistic_ids) if statistic_ids else None\n metadata = await async_list_statistic_ids(hass, statistic_ids_set_or_none)\n connection.send_result(msg[\"id\"], metadata)", "title": "" }, { "docid": "e2ac0fd688be09480714ac92d78176e5", "score": "0.5225897", "text": "def stats(self):\n return unit_types.get_unit_stats(self)", "title": "" }, { "docid": "7850595e279674d35ce4d59566143248", "score": "0.5216141", "text": "def get_samples():\n\t\treturn [x.data for x in ImportedData.objects.filter(type='sample')]", "title": "" }, { "docid": "30e5323142f97aa837c593c00bdecb0c", "score": "0.52098334", "text": "async def test_change_statistics_unit(\n recorder_mock: Recorder, hass: HomeAssistant, hass_ws_client: WebSocketGenerator\n) -> None:\n now = dt_util.utcnow()\n\n units = METRIC_SYSTEM\n attributes = POWER_SENSOR_KW_ATTRIBUTES | {\"device_class\": None}\n state = 10\n\n hass.config.units = units\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n hass.states.async_set(\"sensor.test\", state, attributes=attributes)\n await async_wait_recording_done(hass)\n\n do_adhoc_statistics(hass, period=\"hourly\", start=now)\n await async_recorder_block_till_done(hass)\n\n client = await hass_ws_client()\n\n await client.send_json({\"id\": 1, \"type\": \"recorder/list_statistic_ids\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == [\n {\n \"statistic_id\": \"sensor.test\",\n \"display_unit_of_measurement\": \"kW\",\n \"has_mean\": True,\n \"has_sum\": False,\n \"name\": None,\n \"source\": \"recorder\",\n \"statistics_unit_of_measurement\": \"kW\",\n \"unit_class\": \"power\",\n }\n ]\n\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"5minute\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"sensor.test\": [\n {\n \"end\": int((now + timedelta(minutes=5)).timestamp() * 1000),\n \"last_reset\": None,\n \"max\": 10.0,\n \"mean\": 10.0,\n \"min\": 10.0,\n \"start\": int(now.timestamp() * 1000),\n }\n ],\n }\n\n await client.send_json(\n {\n \"id\": 3,\n \"type\": \"recorder/change_statistics_unit\",\n \"statistic_id\": \"sensor.test\",\n \"new_unit_of_measurement\": \"W\",\n \"old_unit_of_measurement\": \"kW\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n await async_recorder_block_till_done(hass)\n\n await client.send_json({\"id\": 4, \"type\": \"recorder/list_statistic_ids\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == [\n {\n \"statistic_id\": \"sensor.test\",\n \"display_unit_of_measurement\": \"kW\",\n \"has_mean\": True,\n \"has_sum\": False,\n \"name\": None,\n \"source\": \"recorder\",\n \"statistics_unit_of_measurement\": \"W\",\n \"unit_class\": \"power\",\n }\n ]\n\n await client.send_json(\n {\n \"id\": 5,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"5minute\",\n \"units\": {\"power\": \"W\"},\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"sensor.test\": [\n {\n \"end\": int((now + timedelta(minutes=5)).timestamp() * 1000),\n \"last_reset\": None,\n \"max\": 10000.0,\n \"mean\": 10000.0,\n \"min\": 10000.0,\n \"start\": int(now.timestamp() * 1000),\n }\n ],\n }\n\n # Changing to the same unit is allowed but does nothing\n await client.send_json(\n {\n \"id\": 6,\n \"type\": \"recorder/change_statistics_unit\",\n \"statistic_id\": \"sensor.test\",\n \"new_unit_of_measurement\": \"W\",\n \"old_unit_of_measurement\": \"W\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n await async_recorder_block_till_done(hass)\n\n await client.send_json({\"id\": 7, \"type\": \"recorder/list_statistic_ids\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == [\n {\n \"statistic_id\": \"sensor.test\",\n \"display_unit_of_measurement\": \"kW\",\n \"has_mean\": True,\n \"has_sum\": False,\n \"name\": None,\n \"source\": \"recorder\",\n \"statistics_unit_of_measurement\": \"W\",\n \"unit_class\": \"power\",\n }\n ]", "title": "" }, { "docid": "c4c95f73321db08c8b9afcc34204e1c4", "score": "0.52031976", "text": "def mock_sensor_statistics():\n\n def sensor_stats(entity_id, start):\n \"\"\"Generate fake statistics.\"\"\"\n return {\n \"meta\": {\n \"has_mean\": True,\n \"has_sum\": False,\n \"name\": None,\n \"statistic_id\": entity_id,\n \"unit_of_measurement\": \"dogs\",\n },\n \"stat\": {\"start\": start},\n }\n\n def get_fake_stats(_hass, start, _end):\n return statistics.PlatformCompiledStatistics(\n [\n sensor_stats(\"sensor.test1\", start),\n sensor_stats(\"sensor.test2\", start),\n sensor_stats(\"sensor.test3\", start),\n ],\n get_metadata(\n _hass, statistic_ids={\"sensor.test1\", \"sensor.test2\", \"sensor.test3\"}\n ),\n )\n\n with patch(\n \"homeassistant.components.sensor.recorder.compile_statistics\",\n side_effect=get_fake_stats,\n ):\n yield", "title": "" }, { "docid": "d245b1717321952f37cf4d9ce898a294", "score": "0.519746", "text": "def _ws_get_statistic_during_period(\n hass: HomeAssistant,\n msg_id: int,\n start_time: dt | None,\n end_time: dt | None,\n statistic_id: str,\n types: set[Literal[\"max\", \"mean\", \"min\", \"change\"]] | None,\n units: dict[str, str],\n) -> str:\n return JSON_DUMP(\n messages.result_message(\n msg_id,\n statistic_during_period(\n hass, start_time, end_time, statistic_id, types, units=units\n ),\n )\n )", "title": "" }, { "docid": "cbf538ddf75e1bf4ca7e4eb54d2eec67", "score": "0.51971984", "text": "def _flatten_list_statistic_ids_metadata_result(\n result: dict[str, dict[str, Any]]\n) -> list[dict]:\n return [\n {\n \"statistic_id\": _id,\n \"display_unit_of_measurement\": info[\"display_unit_of_measurement\"],\n \"has_mean\": info[\"has_mean\"],\n \"has_sum\": info[\"has_sum\"],\n \"name\": info.get(\"name\"),\n \"source\": info[\"source\"],\n \"statistics_unit_of_measurement\": info[\"unit_of_measurement\"],\n \"unit_class\": info[\"unit_class\"],\n }\n for _id, info in result.items()\n ]", "title": "" }, { "docid": "a19024ee285b1cafd5a87523805a3f9f", "score": "0.5183593", "text": "def getAllMetrics(self, filtr=None):\n dataEntries = self.findInMeta(filtr)\n response = {}\n for entry in dataEntries:\n for metric in entry[self.METRICS_KEY]:\n response.update(\n {metric[self.METRIC_ID_KEY]: [metric[self.DESCRIPTION_KEY],\n metric[self.UNIT_KEY]]}\n )\n\n return response", "title": "" }, { "docid": "4baa07f1416600499e6c8fd50aaea4ae", "score": "0.51810026", "text": "def metricsWithStats(self, metrics=None):\n if metrics is None:\n metrics = self.metrics\n # Identify metricIds which are also in stats.\n metrics = metrics[np.in1d(metrics['metricId'], self.stats['metricId'])]\n metrics = self.sortMetrics(metrics, order=['displayGroup', 'displaySubgroup', 'slicerName',\n 'displayOrder', 'metricMetadata', 'baseMetricNames'])\n return metrics", "title": "" }, { "docid": "b346aec5784f27abd2139af4aecafdc8", "score": "0.51805717", "text": "def get_stats():", "title": "" }, { "docid": "b346aec5784f27abd2139af4aecafdc8", "score": "0.51805717", "text": "def get_stats():", "title": "" }, { "docid": "b346aec5784f27abd2139af4aecafdc8", "score": "0.51805717", "text": "def get_stats():", "title": "" }, { "docid": "b346aec5784f27abd2139af4aecafdc8", "score": "0.51805717", "text": "def get_stats():", "title": "" }, { "docid": "08746f1601d164e2a3ca67c33e5dc054", "score": "0.5174838", "text": "def get_all_device_stats():\n devices = get_devices()\n stats = {}\n for serial in devices:\n model, device_stats = get_device_stats(serial)\n if not stats.get(model):\n stats[model] = {}\n stats[model][serial] = device_stats\n return stats", "title": "" }, { "docid": "fa8496e20f37df9f899d1d9420e9f653", "score": "0.51720697", "text": "def _group_stats(self, group_type):\n group_stats = pd.Series(0, index=['raw_units', 'raw_cf', 'raw_gen',\n 'total_points', 'non_zero_points',\n 'clean_units', 'clean_points',\n 'clean_cf', 'clean_gen',\n 'filtered_units', 'filtered_points',\n 'filtered_cf', 'filtered_gen',\n 'final_units', 'final_cf',\n 'final_gen', 'final_points'])\n stats = group_stats.copy().drop(labels=['raw_units', 'clean_units',\n 'filtered_units',\n 'final_units'])\n group_stats.name = group_type\n\n group_fits = self._get_fits(group_type).set_index('unit_id')\n raw_df = self._get_raw(group_type)\n clean_df = self._get_cleaned(group_type)\n filtered_df = self._get_filtered(group_type)\n\n group_unit_stats = []\n for unit_id, unit_fit in group_fits.iterrows():\n logger.debug('\\t- Extracting stats for unit: {}'.format(unit_id))\n unit_stats = stats.copy()\n unit_stats.name = unit_id\n # Raw Dat Stats\n if \"CC\" in group_type:\n pos = self._cc_map['cc_unit'] == unit_id\n raw_unit_ids = self._cc_map.loc[pos, 'unit_id'].to_list()\n else:\n raw_unit_ids = [unit_id]\n\n group_stats, unit_stats = self._raw_stats(raw_df, raw_unit_ids,\n group_stats, unit_stats)\n # Clean Data Stats\n\n group_stats, unit_stats = self._clean_stats(clean_df, unit_id,\n group_stats,\n unit_stats)\n # Filtered and Final data Stats\n group_stats, unit_stats = self._filter_stats(filtered_df, unit_id,\n unit_fit,\n group_stats,\n unit_stats)\n group_unit_stats.append(unit_stats)\n\n group_unit_stats = pd.concat(group_unit_stats, axis=1).T\n\n return group_stats, group_unit_stats", "title": "" }, { "docid": "d1cc46a37dc2e097a584f3f39479ca7e", "score": "0.5167431", "text": "def statistics(self):\n resp = self._get(endpoint=\"stats\")\n return self.process_response(resp)", "title": "" }, { "docid": "9b6c7e7671accb56dc8a1ca9e6d10f19", "score": "0.51556337", "text": "def _get_statistic_data(\n summary: Dict[str, PredictionResult],\n statistic: str,\n condition_id: str,\n output_id: str,\n) -> Tuple[Sequence[float], Sequence[float]]:\n condition_index = summary[statistic].condition_ids.index(condition_id)\n condition_result = summary[statistic].conditions[condition_index]\n t = condition_result.timepoints\n output_index = condition_result.output_ids.index(output_id)\n y = condition_result.output[:, output_index]\n return (t, y)", "title": "" }, { "docid": "48da05368ebe836453549c71e334a69d", "score": "0.51433253", "text": "def build_stats(self):\n result_i = 0\n last = 0\n for n in range(self.stats.min_number, self.stats.max_number + 1):\n result_i += self.raw_data.get(n, 0)\n self.stats.data.update({n: result_i})\n\n return self.stats", "title": "" }, { "docid": "759d3eff5e99c9206683e00765f74da8", "score": "0.51402", "text": "def query(self, **kwargs):\r\n \r\n measurementList = {}\r\n measurementList['no_dim'] = {}\r\n \r\n for url in self.url:\r\n if(self.socketGain):\r\n stats_csv_rows = self.readSocket(url) \r\n \r\n if not ('idle' in measurementList['no_dim']):\r\n measurementList['no_dim']['idle'] = []\r\n measurementList['no_dim']['idle'].append(\r\n self.getIdleFromSocket(url)\r\n )\r\n \r\n else:\r\n stats_csv_rows = self.readHttp(url)\r\n \r\n if not ('idle' in measurementList['no_dim']):\r\n measurementList['no_dim']['idle'] = []\r\n measurementList['no_dim']['idle'].append(\r\n self.getIdleFromHttp(url)\r\n )\r\n \r\n for row in stats_csv_rows:\r\n try:\r\n pxname = row['# pxname']\r\n svname = row['svname']\r\n dimensionName = pxname\r\n \r\n if svname == 'FRONTEND':\r\n metric_prefix = \"fe_\"\r\n prefix_metrics = {'ereq', 'scur', 'susage', 'req_rate', 'bin', 'bout'}\r\n elif svname == 'BACKEND':\r\n continue\r\n else:\r\n metric_prefix = 'be_'\r\n prefix_metrics = {'econ', 'eresp', 'qcur', 'scur', 'susage', 'bin', 'bout', 'rtime'}\r\n except KeyError as ex:\r\n raise ConfigException('Content from \"%s\" does not appear to be in haproxy stats format' % url) from ex\r\n \r\n all_metrics = prefix_metrics.union(self._NO_PREFIX_METRICS)\r\n \r\n if not (dimensionName in measurementList):\r\n measurementList[dimensionName] = {}\r\n \r\n for metric in all_metrics:\r\n if metric in row and metric in prefix_metrics:\r\n metric_value = row[metric]\r\n if metric_value == '' or metric_value == None:\r\n continue\r\n metric_json_name = metric_prefix + metric\r\n if not (metric_json_name in measurementList[dimensionName]):\r\n measurementList[dimensionName][metric_json_name] = []\r\n measurementList[dimensionName][metric_json_name].append(\r\n metric_value\r\n )\r\n if metric in row and metric in self._NO_PREFIX_METRICS:\r\n metric_value = row[metric]\r\n if metric_value == '' or metric_value == None:\r\n continue\r\n metric_json_name = metric\r\n if not (metric_json_name in measurementList[dimensionName]):\r\n measurementList[dimensionName][metric_json_name] = []\r\n measurementList[dimensionName][metric_json_name].append(\r\n metric_value\r\n )\r\n elif metric == 'susage':\r\n try:\r\n scur = row['scur']\r\n slim = row['slim']\r\n if scur == '' or slim == '' or scur == None or slim == None:\r\n continue\r\n metric_json_name = metric_prefix + metric\r\n if not (metric_json_name in measurementList[dimensionName]):\r\n measurementList[dimensionName][metric_json_name] = []\r\n measurementList[dimensionName][metric_json_name].append(\r\n (float(scur)/float(slim))*100\r\n )\r\n except KeyError as ex:\r\n raise ConfigException('Content from \"%s\" does not appear to be in haproxy stats format' % url) from ex\r\n\r\n for dimension, metricsList in measurementList.items():\r\n dimensions = {'service': dimension}\r\n for metricKey, metricValues in metricsList.items():\r\n aggregatedValue = 0\r\n for value in metricValues:\r\n aggregatedValue += float(value)\r\n if (\"susage\" in metricKey):\r\n self.results_builder.add_absolute_result(\r\n PluginMeasurement(dimensions=dimensions, key=metricKey, value=aggregatedValue/len(metricValues))\r\n )\r\n elif (\"idle\" in metricKey):\r\n self.results_builder.add_absolute_result(\r\n PluginMeasurement(key=metricKey, value=aggregatedValue/len(metricValues))\r\n )\r\n elif metricKey in self._ABSOLUTE_METRICS:\r\n self.results_builder.add_absolute_result(\r\n PluginMeasurement(dimensions=dimensions, key=metricKey, value=int(aggregatedValue))\r\n )\r\n else:\r\n self.results_builder.add_relative_result(\r\n PluginMeasurement(dimensions=dimensions, key=metricKey, value=int(aggregatedValue))\r\n )", "title": "" }, { "docid": "34f20e95e847a49de7e894700fd79adf", "score": "0.51320755", "text": "def __get_list_metrics(self):\n experiment_dir = os.path.join(config.path_to_output, self.name)\n with open(os.path.join(experiment_dir, 'performance.json'), 'r') as fh:\n data = json.load(fh)\n\n perf_list = list()\n perf_list2 = list()\n for run in data.keys():\n for method in data[run].keys():\n for db in data[run][method].keys():\n for metric in data[run][method][db].keys():\n if \"line\" in metric:\n value = data[run][method][db][metric]\n perf_list2.append([run, method, db, metric, value])\n elif \"drug\" in metric:\n value = data[run][method][db][metric]\n perf_list.append([run, method, db, metric, value])\n column_names = ['Run', 'Method', 'Dataset', 'Metric', 'Value']\n df_lines = pd.DataFrame(perf_list2, columns=column_names)\n df_drugs = pd.DataFrame(perf_list, columns=column_names)\n return df_lines, df_drugs", "title": "" }, { "docid": "78dfd1dad511d45fd01e9d2d97d14303", "score": "0.5127378", "text": "async def test_statistic_during_period_hole(\n recorder_mock: Recorder, hass: HomeAssistant, hass_ws_client: WebSocketGenerator\n) -> None:\n id = 1\n\n def next_id():\n nonlocal id\n id += 1\n return id\n\n now = dt_util.utcnow()\n\n await async_recorder_block_till_done(hass)\n client = await hass_ws_client()\n\n zero = now\n start = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=-18)\n\n imported_stats = [\n {\n \"start\": (start + timedelta(hours=3 * i)),\n \"max\": i * 2,\n \"mean\": i,\n \"min\": -76 + i * 2,\n \"sum\": i,\n }\n for i in range(0, 6)\n ]\n\n imported_metadata = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"recorder\",\n \"statistic_id\": \"sensor.test\",\n \"unit_of_measurement\": \"kWh\",\n }\n\n recorder.get_instance(hass).async_import_statistics(\n imported_metadata,\n imported_stats,\n Statistics,\n )\n await async_wait_recording_done(hass)\n\n # This should include imported_stats[:]\n await client.send_json(\n {\n \"id\": next_id(),\n \"type\": \"recorder/statistic_during_period\",\n \"statistic_id\": \"sensor.test\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"max\": max(stat[\"max\"] for stat in imported_stats[:]),\n \"mean\": fmean(stat[\"mean\"] for stat in imported_stats[:]),\n \"min\": min(stat[\"min\"] for stat in imported_stats[:]),\n \"change\": imported_stats[-1][\"sum\"] - imported_stats[0][\"sum\"],\n }\n\n # This should also include imported_stats[:]\n start_time = \"2022-10-20T13:00:00+00:00\"\n end_time = \"2022-10-21T05:00:00+00:00\"\n assert imported_stats[0][\"start\"].isoformat() == start_time\n assert imported_stats[-1][\"start\"].isoformat() < end_time\n await client.send_json(\n {\n \"id\": next_id(),\n \"type\": \"recorder/statistic_during_period\",\n \"statistic_id\": \"sensor.test\",\n \"fixed_period\": {\n \"start_time\": start_time,\n \"end_time\": end_time,\n },\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"max\": max(stat[\"max\"] for stat in imported_stats[:]),\n \"mean\": fmean(stat[\"mean\"] for stat in imported_stats[:]),\n \"min\": min(stat[\"min\"] for stat in imported_stats[:]),\n \"change\": imported_stats[-1][\"sum\"] - imported_stats[0][\"sum\"],\n }\n\n # This should also include imported_stats[:]\n start_time = \"2022-10-20T13:00:00+00:00\"\n end_time = \"2022-10-21T08:20:00+00:00\"\n await client.send_json(\n {\n \"id\": next_id(),\n \"type\": \"recorder/statistic_during_period\",\n \"statistic_id\": \"sensor.test\",\n \"fixed_period\": {\n \"start_time\": start_time,\n \"end_time\": end_time,\n },\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"max\": max(stat[\"max\"] for stat in imported_stats[:]),\n \"mean\": fmean(stat[\"mean\"] for stat in imported_stats[:]),\n \"min\": min(stat[\"min\"] for stat in imported_stats[:]),\n \"change\": imported_stats[-1][\"sum\"] - imported_stats[0][\"sum\"],\n }\n\n # This should include imported_stats[1:4]\n start_time = \"2022-10-20T16:00:00+00:00\"\n end_time = \"2022-10-20T23:00:00+00:00\"\n assert imported_stats[1][\"start\"].isoformat() == start_time\n assert imported_stats[3][\"start\"].isoformat() < end_time\n await client.send_json(\n {\n \"id\": next_id(),\n \"type\": \"recorder/statistic_during_period\",\n \"statistic_id\": \"sensor.test\",\n \"fixed_period\": {\n \"start_time\": start_time,\n \"end_time\": end_time,\n },\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"max\": max(stat[\"max\"] for stat in imported_stats[1:4]),\n \"mean\": fmean(stat[\"mean\"] for stat in imported_stats[1:4]),\n \"min\": min(stat[\"min\"] for stat in imported_stats[1:4]),\n \"change\": imported_stats[3][\"sum\"] - imported_stats[1][\"sum\"],\n }\n\n # This should also include imported_stats[1:4]\n start_time = \"2022-10-20T15:00:00+00:00\"\n end_time = \"2022-10-21T00:00:00+00:00\"\n assert imported_stats[1][\"start\"].isoformat() > start_time\n assert imported_stats[3][\"start\"].isoformat() < end_time\n await client.send_json(\n {\n \"id\": next_id(),\n \"type\": \"recorder/statistic_during_period\",\n \"statistic_id\": \"sensor.test\",\n \"fixed_period\": {\n \"start_time\": start_time,\n \"end_time\": end_time,\n },\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"max\": max(stat[\"max\"] for stat in imported_stats[1:4]),\n \"mean\": fmean(stat[\"mean\"] for stat in imported_stats[1:4]),\n \"min\": min(stat[\"min\"] for stat in imported_stats[1:4]),\n \"change\": imported_stats[3][\"sum\"] - imported_stats[1][\"sum\"],\n }", "title": "" }, { "docid": "b1ec7e900afed96c2fff85b3eb3afe36", "score": "0.51203245", "text": "def test_results_created(self, mock_qutrit_device_extract_stats, monkeypatch, measurement):\n\n qscript = QuantumScript(measurements=[measurement])\n\n with monkeypatch.context() as m:\n dev = mock_qutrit_device_extract_stats()\n results = dev.statistics(qscript)\n\n assert results == [0]", "title": "" }, { "docid": "bf2a817e5ba250eccaa2ff2fda35d6f9", "score": "0.5111268", "text": "def get_analytics(self, stats: List[AnalyticsRequest]) -> Operation:\n ...", "title": "" }, { "docid": "16007f636723c805e673ff5593355d26", "score": "0.5105022", "text": "def statistic_during_period(\n hass: HomeAssistant,\n start_time: datetime | None,\n end_time: datetime | None,\n statistic_id: str,\n types: set[Literal[\"max\", \"mean\", \"min\", \"change\"]] | None,\n units: dict[str, str] | None,\n) -> dict[str, Any]:\n metadata = None\n\n if not types:\n types = {\"max\", \"mean\", \"min\", \"change\"}\n\n result: dict[str, Any] = {}\n\n with session_scope(hass=hass, read_only=True) as session:\n # Fetch metadata for the given statistic_id\n if not (\n metadata := get_instance(hass).statistics_meta_manager.get(\n session, statistic_id\n )\n ):\n return result\n\n metadata_id = metadata[0]\n\n oldest_stat = _first_statistic(session, Statistics, metadata_id)\n oldest_5_min_stat = None\n if not valid_statistic_id(statistic_id):\n oldest_5_min_stat = _first_statistic(\n session, StatisticsShortTerm, metadata_id\n )\n\n # To calculate the summary, data from the statistics (hourly) and\n # short_term_statistics (5 minute) tables is combined\n # - The short term statistics table is used for the head and tail of the period,\n # if the period it doesn't start or end on a full hour\n # - The statistics table is used for the remainder of the time\n now = dt_util.utcnow()\n if end_time is not None and end_time > now:\n end_time = now\n\n tail_only = (\n start_time is not None\n and end_time is not None\n and end_time - start_time < timedelta(hours=1)\n )\n\n # Calculate the head period\n head_start_time: datetime | None = None\n head_end_time: datetime | None = None\n if (\n not tail_only\n and oldest_stat is not None\n and oldest_5_min_stat is not None\n and oldest_5_min_stat - oldest_stat < timedelta(hours=1)\n and (start_time is None or start_time < oldest_5_min_stat)\n ):\n # To improve accuracy of averaged for statistics which were added within\n # recorder's retention period.\n head_start_time = oldest_5_min_stat\n head_end_time = oldest_5_min_stat.replace(\n minute=0, second=0, microsecond=0\n ) + timedelta(hours=1)\n elif not tail_only and start_time is not None and start_time.minute:\n head_start_time = start_time\n head_end_time = start_time.replace(\n minute=0, second=0, microsecond=0\n ) + timedelta(hours=1)\n\n # Calculate the tail period\n tail_start_time: datetime | None = None\n tail_end_time: datetime | None = None\n if end_time is None:\n tail_start_time = now.replace(minute=0, second=0, microsecond=0)\n elif end_time.minute:\n tail_start_time = (\n start_time\n if tail_only\n else end_time.replace(minute=0, second=0, microsecond=0)\n )\n tail_end_time = end_time\n\n # Calculate the main period\n main_start_time: datetime | None = None\n main_end_time: datetime | None = None\n if not tail_only:\n main_start_time = start_time if head_end_time is None else head_end_time\n main_end_time = end_time if tail_start_time is None else tail_start_time\n\n if not types.isdisjoint({\"max\", \"mean\", \"min\"}):\n result = _get_max_mean_min_statistic(\n session,\n head_start_time,\n head_end_time,\n main_start_time,\n main_end_time,\n tail_start_time,\n tail_end_time,\n tail_only,\n metadata_id,\n types,\n )\n\n if \"change\" in types:\n oldest_sum: float | None\n if start_time is None:\n oldest_sum = 0.0\n else:\n oldest_sum = _get_oldest_sum_statistic(\n session,\n head_start_time,\n main_start_time,\n tail_start_time,\n oldest_stat,\n tail_only,\n metadata_id,\n )\n newest_sum = _get_newest_sum_statistic(\n session,\n head_start_time,\n head_end_time,\n main_start_time,\n main_end_time,\n tail_start_time,\n tail_end_time,\n tail_only,\n metadata_id,\n )\n # Calculate the difference between the oldest and newest sum\n if oldest_sum is not None and newest_sum is not None:\n result[\"change\"] = newest_sum - oldest_sum\n else:\n result[\"change\"] = None\n\n state_unit = unit = metadata[1][\"unit_of_measurement\"]\n if state := hass.states.get(statistic_id):\n state_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n convert = _get_statistic_to_display_unit_converter(unit, state_unit, units)\n\n if not convert:\n return result\n return {key: convert(value) for key, value in result.items()}", "title": "" }, { "docid": "8d7f68743a10db6f90d6b6a2f4a73388", "score": "0.5102899", "text": "def test_measure_statistics(self):\n\n flag = \"user\"\n api = \"measure.statistics\"\n\n result = self.access_api(flag = flag, api = api)\n self.assertTrue('data_list' in result)\n print(result[\"data_list\"])", "title": "" }, { "docid": "a8dd43bd1312273f9d7a3f90e5154e8f", "score": "0.5101307", "text": "def summary_data(self):\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT\n COUNT(DISTINCT study_id) as studies,\n COUNT(DISTINCT artifact_id) as artifacts,\n COUNT(DISTINCT sample_id) as samples\n FROM qiita.study_artifact\n JOIN qiita.analysis_sample USING (artifact_id)\n WHERE analysis_id = %s\"\"\"\n qdb.sql_connection.TRN.add(sql, [self._id])\n return dict(qdb.sql_connection.TRN.execute_fetchindex()[0])", "title": "" }, { "docid": "ea47c5424008f45c10a6b138fd5a0f89", "score": "0.5087127", "text": "def measurements(update: Update, context: CallbackContext):\n try:\n last = list(\n DB[MEASUREMENTS_COL].aggregate(\n [\n {\"$sort\": {\"date\": -1}},\n {\"$limit\": 1},\n ]\n )\n )[0]\n values = last[\"values\"]\n timestamp = (\n datetime.fromisoformat(last[\"date\"])\n .astimezone(DISPLAY_TIMEZONE)\n .strftime(\"%Y\\-%m\\-%d, %H:%M\")\n )\n update.message.reply_text(\n f\"Measurements at {timestamp}:\\n\"\n f\"```\"\n f' Temperature: {values[\"temperature\"]:.1f} ยฐC\\n'\n f' Humidity: {values[\"humidity\"]:.0f} %\\n'\n f' COโ‚‚: {values[\"co2\"]:.0f} ppm'\n f\"```\",\n parse_mode=ParseMode.MARKDOWN_V2,\n )\n except requests.exceptions.RequestException:\n update.message.reply_text(\"Impossible to get the measurements\")", "title": "" }, { "docid": "c103d160ee75a0b3aa6b85477b7ecf9f", "score": "0.5082626", "text": "def get_query_specs(self):\n lookup_queries = []\n sandbox_queries = []\n drop_queries = []\n unconsented_lookup_query = {\n cdr_consts.QUERY:\n UNCONSENTED_PID_QUERY.render(\n project=self.project_id,\n dataset=self.dataset_id,\n sandbox_dataset=self.sandbox_dataset_id,\n unconsented_lookup=EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE\n )\n }\n lookup_queries.append(unconsented_lookup_query)\n\n for table in self.affected_tables:\n\n sandbox_query = {\n cdr_consts.QUERY:\n SANDBOX_ROWS.render(\n project=self.project_id,\n dataset=self.dataset_id,\n sandbox_dataset=self.sandbox_dataset_id,\n sandbox_table=self.sandbox_table_for(table),\n domain_table=table,\n unconsented_lookup=\n EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE,\n mapping_domain_table=f'_mapping_{table}')\n }\n\n sandbox_queries.append(sandbox_query)\n\n drop_query = {\n cdr_consts.QUERY:\n DROP_ROWS.render(\n project=self.project_id,\n dataset=self.dataset_id,\n sandbox_dataset=self.sandbox_dataset_id,\n domain_table=table,\n sandbox_table=self.sandbox_table_for(table))\n }\n\n drop_queries.append(drop_query)\n\n return lookup_queries + sandbox_queries + drop_queries", "title": "" }, { "docid": "4fe0b4d36f9af6e32afcbb842b4cbd9a", "score": "0.5076066", "text": "def queryInfo(self) -> TGProcessorSetStatistics:", "title": "" }, { "docid": "15f8e3c3e80de9dbfca62b6bb6826cf8", "score": "0.50675213", "text": "def all_gather_stats(stat, max_size=4096):\n stats = Statistics.all_gather_stats_list([stat], max_size=max_size)\n return stats[0]", "title": "" }, { "docid": "eb0dc0f770730f0def41a302a93304b0", "score": "0.5054132", "text": "async def ws_handle_list_statistic_ids(\n hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict\n) -> None:\n connection.send_message(\n await get_instance(hass).async_add_executor_job(\n _ws_get_list_statistic_ids,\n hass,\n msg[\"id\"],\n msg.get(\"statistic_type\"),\n )\n )", "title": "" }, { "docid": "0e16de0c79b190ca2f132cbd1c9a9abc", "score": "0.5039339", "text": "def get_inventory_steps(self, bulkids, window):\n\n query = \"\"\"\nSELECT * FROM (\n SELECT xacttime AS date,\n bulkid,\n 'sale' AS type,\n -1 AS n\n FROM transactions t INNER JOIN products p\n ON p.barcode = t.barcode\n WHERE xacttime > current_date - interval %s and bulkid is not null\n\n UNION ALL\n\n SELECT date,\n bulk_type_id AS bulkid,\n 'order' AS type,\n quantity*number AS n\n FROM order_items oi INNER JOIN orders o\n ON o.id = oi.order_id\n WHERE date > current_date - interval %s and bulk_type_id is not null\n\n UNION ALL\n\n SELECT date,\n bulkid,\n 'inventory' AS type,\n units AS n\n FROM inventory i\n WHERE\n date > current_date - interval %s\n and bulkid is not null\n and units is not null\n) s\nWHERE bulkid IN %s\nORDER BY date ASC\n\"\"\"\n cursor = self._get_cursor()\n cursor.execute(query, [window, window, window, tuple(bulkids)])\n stats = cursor.fetchall()\n self.db.commit()\n return stats", "title": "" }, { "docid": "74f50f4cf37123dc21ca1c75a25a0730", "score": "0.50389886", "text": "def get_all_sens(self):\n\n results = {gamma: {} for gamma in self.spec_ind}\n for gamma in self.spec_ind:\n for t in self.all_delta_ts:\n try:\n results[gamma][t] = np.load(\n self.trials_base + 'signal_results/'\n + f'{self.sample_str}_delta_t_{t:.2e}_gamma_{gamma}_'\n + '.pkl',\n allow_pickle=True)\n except Exception as e:\n pass\n event_sensitivity = {gamma: np.array([\n results[gamma][ii]['sensitivity']['n_sig']\n for ii in self.all_delta_ts])\n for gamma in self.spec_ind}\n sensitivity = {gamma: np.array([\n results[gamma][ii]['sensitivity']['E2dNdE']\n for ii in self.all_delta_ts])\n for gamma in self.spec_ind}\n event_discovery = {gamma: np.array([\n results[gamma][ii]['discovery']['n_sig']\n for ii in self.all_delta_ts])\n for gamma in self.spec_ind}\n discovery = {gamma: np.array([\n results[gamma][ii]['discovery']['E2dNdE']\n for ii in self.all_delta_ts])\n for gamma in self.spec_ind}\n\n self.all_results = results\n self.all_sensitivity = sensitivity\n self.all_event_sensitivity = event_sensitivity\n self.all_discovery = discovery\n self.all_event_discovery = event_discovery\n disc_dict = results[self.spec_ind[0]][self.all_delta_ts[0]]\n self.disc_cl = disc_dict['discovery']['CL']\n self.discovery_nsigma = disc_dict['discovery']['nsigma']\n self.get_this_sens()", "title": "" }, { "docid": "2a426fa99487dd77e09757fb3ad20715", "score": "0.5034053", "text": "async def test_statistics_during_period_no_statistic_ids(\n recorder_mock: Recorder, hass: HomeAssistant, hass_ws_client: WebSocketGenerator\n) -> None:\n now = dt_util.utcnow()\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": (now + timedelta(seconds=1)).isoformat(),\n \"period\": \"5minute\",\n }\n )\n response = await client.receive_json()\n assert not response[\"success\"]\n assert response[\"error\"][\"code\"] == \"invalid_format\"", "title": "" }, { "docid": "1d8daa71a58960515238ed82f6e727a5", "score": "0.5030641", "text": "def get_sample_ids(self):\n print('get_sample_ids called')\n return []", "title": "" }, { "docid": "0ac922712a811364d52d17fb60c7273c", "score": "0.5029728", "text": "def get_stats(self):\n return self._api.get_stats(self.id)", "title": "" }, { "docid": "d966ee4c5f1420d87bce899ba78b0e6d", "score": "0.50243616", "text": "def run_stats(self):\n\n try:\n self._process = 'run_stats'\n self._start_time = time.time()\n\n with self.reader as r:\n stats = Stats([(c.name, c.type) for c in r.columns]).run(r, sample_from=r.n_rows)\n\n with self.writer as w:\n w.set_stats(stats)\n\n finally:\n self._process = 'none'\n\n return stats", "title": "" }, { "docid": "2452a2b721bf89cddc41bb117f8accbd", "score": "0.50178534", "text": "def get_metadata_with_session(\n instance: Recorder,\n session: Session,\n *,\n statistic_ids: set[str] | None = None,\n statistic_type: Literal[\"mean\"] | Literal[\"sum\"] | None = None,\n statistic_source: str | None = None,\n) -> dict[str, tuple[int, StatisticMetaData]]:\n return instance.statistics_meta_manager.get_many(\n session,\n statistic_ids=statistic_ids,\n statistic_type=statistic_type,\n statistic_source=statistic_source,\n )", "title": "" }, { "docid": "83a73b9756d678d6fcd624b750605bd5", "score": "0.5011193", "text": "def stats_factory(\n stats: List[DataQueryResult],\n units: UnitDefinition,\n interval: TimeInterval,\n period: Optional[TimePeriod] = None,\n network: Optional[NetworkSchema] = None,\n timezone: Optional[Union[timezone, str]] = None,\n code: Optional[str] = None,\n region: Optional[str] = None,\n include_group_code: bool = False,\n fueltech_group: Optional[bool] = False,\n group_field: Optional[str] = None,\n data_id: Optional[str] = None,\n localize: Optional[bool] = True,\n include_code: Optional[bool] = True,\n) -> Optional[OpennemDataSet]:\n\n if network:\n timezone = network.get_timezone()\n\n group_codes = list(set([i.group_by for i in stats if i.group_by]))\n\n stats_grouped = []\n\n for group_code in group_codes:\n\n data_grouped: Dict[datetime, Any] = dict()\n\n for stat in stats:\n if stat.group_by != group_code:\n continue\n\n if stat.interval not in data_grouped:\n data_grouped[stat.interval] = None\n\n # if stat.result:\n data_grouped[stat.interval] = stat.result\n\n data_sorted = OrderedDict(sorted(data_grouped.items()))\n\n data_value = list(data_sorted.values())\n\n # Skip null series\n if len([i for i in data_value if i]) == 0:\n continue\n\n # @TODO possible bring this back\n # Skip zero series\n # if sum([i for i in data_value if i]) == 0:\n # continue\n\n # Cast trailing nulls\n if not units.name.startswith(\"temperature\") or units.cast_nulls:\n data_value = cast_trailing_nulls(data_value)\n\n # Find start/end dates\n dates = list(data_grouped.keys())\n\n if not dates:\n return None\n\n start = min(dates)\n end = max(dates)\n\n # should probably make sure these are the same TZ\n if localize:\n if timezone and not is_aware(start):\n start = make_aware(start, timezone)\n\n if timezone and not is_aware(end):\n end = make_aware(end, timezone)\n\n if timezone and localize and network and network.offset:\n tz = pytz.FixedOffset(int(network.offset))\n\n start = start.astimezone(tz)\n end = end.astimezone(tz)\n\n # Everything needs a timezone even flat dates\n if network and timezone and not is_aware(start):\n start = start.replace(tzinfo=network.get_fixed_offset())\n\n if network and timezone and not is_aware(end):\n end = end.replace(tzinfo=network.get_fixed_offset())\n\n # free\n dates = []\n\n history = OpennemDataHistory(\n start=start,\n last=end,\n interval=interval.interval_human,\n data=data_value,\n )\n\n data = OpennemData(\n data_type=units.unit_type,\n units=units.unit,\n # interval=interval,\n # period=period,\n history=history,\n )\n\n if include_code:\n data.code = group_code\n\n if network:\n data.network = network.code.lower()\n\n # *sigh* - not the most flexible model\n # @TODO fix this schema and make it more flexible\n if fueltech_group:\n data.fuel_tech = group_code\n\n data_comps = [\n # @NOTE disable for now since FE doesn't\n # support it\n network.country if network else None,\n network.code.lower() if network else None,\n region.lower() if region and region.lower() != network.code.lower() else None,\n \"fuel_tech\",\n group_code,\n units.unit_type,\n ]\n\n data.id = \".\".join(i for i in data_comps if i)\n # @TODO make this an alias\n data.type = units.unit_type\n\n if group_field:\n group_fields = []\n\n # setattr(data, group_field, group_code)\n\n if network:\n group_fields.append(network.country.lower())\n group_fields.append(network.code.lower())\n\n if region:\n if region.lower() != network.code.lower():\n group_fields.append(region.lower())\n\n if units.name_alias:\n group_fields.append(units.name_alias)\n\n elif units.unit_type:\n group_fields.append(units.unit_type)\n\n if group_code and include_group_code:\n group_fields.append(group_code)\n group_fields.append(group_field)\n\n data.id = \".\".join([f for f in group_fields if f])\n data.type = units.unit_type\n\n if data_id:\n data.id = data_id\n\n if not data.id:\n _id_list = []\n\n # @NOTE disable for now since FE doesn't\n # support it\n # network.country if network else None,\n\n if network:\n _id_list.append(network.code.lower())\n\n if region and (region.lower() != network.code.lower()):\n _id_list.append(region.lower())\n\n if group_code:\n _id_list.append(group_code.lower())\n\n if units and units.name_alias:\n _id_list.append(units.name_alias)\n elif units and units.name:\n _id_list.append(units.name)\n\n data.id = \".\".join([f for f in _id_list if f])\n data.type = units.unit_type\n\n if region:\n data.region = region\n\n stats_grouped.append(data)\n\n dt_now = datetime.now()\n\n if network:\n dt_now = dt_now.astimezone(network.get_timezone())\n\n # @NOTE this should probably be\n # country.network.region\n if not code:\n if network:\n code = network.code\n\n if region:\n code = region\n\n stat_set = OpennemDataSet(\n type=units.unit_type,\n data=stats_grouped,\n created_at=dt_now,\n version=get_version(),\n )\n\n if include_code:\n stat_set.code = code\n\n if network:\n stat_set.network = network.code\n\n if region:\n stat_set.region = region\n\n return stat_set", "title": "" }, { "docid": "deb5e09d696518bb9ca260b6650b3e58", "score": "0.5003878", "text": "def getStatistics(self) -> JSON:\n\t\treturn self.db.searchStatistics()", "title": "" }, { "docid": "f17044fdae15f605f297f20eddd6896a", "score": "0.49998108", "text": "def _compile_statistics(\n instance: Recorder, session: Session, start: datetime, fire_events: bool\n) -> set[str]:\n assert start.tzinfo == dt_util.UTC, \"start must be in UTC\"\n end = start + timedelta(minutes=5)\n statistics_meta_manager = instance.statistics_meta_manager\n modified_statistic_ids: set[str] = set()\n\n # Return if we already have 5-minute statistics for the requested period\n if execute_stmt_lambda_element(session, _get_first_id_stmt(start)):\n _LOGGER.debug(\"Statistics already compiled for %s-%s\", start, end)\n return modified_statistic_ids\n\n _LOGGER.debug(\"Compiling statistics for %s-%s\", start, end)\n platform_stats: list[StatisticResult] = []\n current_metadata: dict[str, tuple[int, StatisticMetaData]] = {}\n # Collect statistics from all platforms implementing support\n for domain, platform in instance.hass.data[DOMAIN].recorder_platforms.items():\n if not (\n platform_compile_statistics := getattr(\n platform, INTEGRATION_PLATFORM_COMPILE_STATISTICS, None\n )\n ):\n continue\n compiled: PlatformCompiledStatistics = platform_compile_statistics(\n instance.hass, start, end\n )\n _LOGGER.debug(\n \"Statistics for %s during %s-%s: %s\",\n domain,\n start,\n end,\n compiled.platform_stats,\n )\n platform_stats.extend(compiled.platform_stats)\n current_metadata.update(compiled.current_metadata)\n\n # Insert collected statistics in the database\n for stats in platform_stats:\n modified_statistic_id, metadata_id = statistics_meta_manager.update_or_add(\n session, stats[\"meta\"], current_metadata\n )\n if modified_statistic_id is not None:\n modified_statistic_ids.add(modified_statistic_id)\n _insert_statistics(\n session,\n StatisticsShortTerm,\n metadata_id,\n stats[\"stat\"],\n )\n\n if start.minute == 55:\n # A full hour is ready, summarize it\n _compile_hourly_statistics(session, start)\n\n session.add(StatisticsRuns(start=start))\n\n if fire_events:\n instance.hass.bus.fire(EVENT_RECORDER_5MIN_STATISTICS_GENERATED)\n if start.minute == 55:\n instance.hass.bus.fire(EVENT_RECORDER_HOURLY_STATISTICS_GENERATED)\n\n return modified_statistic_ids", "title": "" }, { "docid": "96cf28a86f5d4d80c84562fb0349bc2e", "score": "0.4993449", "text": "def _get_statistics(self):\n return self.__statistics", "title": "" }, { "docid": "8e6fcd64d66ccc6bf88004fb3ce9791a", "score": "0.4992436", "text": "def get_all_units(self):\n raise NotImplementedError()", "title": "" }, { "docid": "9bca55984a304dee04da3eb86fa352f2", "score": "0.4991302", "text": "def gather_Device_Statistics(self):\n\t\tself.num_actions = 0\n\t\tself.num_services = 0\n\t\tself.num_state_variables = 0\n\t\tfor entry in self.service_list:\n\t\t\tself.num_actions += entry.num_actions\n\t\t\tself.num_state_variables += entry.num_state_variables\n\t\tself.num_services = len(self.service_list)", "title": "" }, { "docid": "14320aa3aa654ef53339335e021d50fc", "score": "0.49894473", "text": "def ids(self) -> Generator[str, None, None]:\n # TODO: use dataset cache for ids\n if self.is_unit:\n for record_id in self._ids_unit():\n yield record_id\n else:\n units = self.units()\n ids = []\n for unit in units:\n for record_id in unit.ids():\n if not record_id in ids:\n ids.append(record_id)\n yield record_id", "title": "" }, { "docid": "fbe549b0b27035c0f87254b96f4b9fbb", "score": "0.49837592", "text": "def get_latest_short_term_statistics(\n hass: HomeAssistant,\n statistic_ids: set[str],\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n metadata: dict[str, tuple[int, StatisticMetaData]] | None = None,\n) -> dict[str, list[StatisticsRow]]:\n with session_scope(hass=hass, read_only=True) as session:\n # Fetch metadata for the given statistic_ids\n if not metadata:\n metadata = get_instance(hass).statistics_meta_manager.get_many(\n session, statistic_ids=statistic_ids\n )\n if not metadata:\n return {}\n metadata_ids = _extract_metadata_and_discard_impossible_columns(metadata, types)\n stmt = _latest_short_term_statistics_stmt(metadata_ids)\n stats = cast(\n Sequence[Row], execute_stmt_lambda_element(session, stmt, orm_rows=False)\n )\n if not stats:\n return {}\n\n # Return statistics combined with metadata\n return _sorted_statistics_to_dict(\n hass,\n session,\n stats,\n statistic_ids,\n metadata,\n False,\n StatisticsShortTerm,\n None,\n None,\n types,\n )", "title": "" }, { "docid": "7433076ea6b7ec3b404b29e91a77f8d4", "score": "0.49756172", "text": "def _all_samples(self):\n for s in self._all_sample_probe_details():\n yield s.sample", "title": "" }, { "docid": "87c15222e13a01111892879a0f07b73d", "score": "0.4966819", "text": "def update_statistics_metadata(\n instance: Recorder,\n statistic_id: str,\n new_statistic_id: str | None | UndefinedType,\n new_unit_of_measurement: str | None | UndefinedType,\n) -> None:\n statistics_meta_manager = instance.statistics_meta_manager\n if new_unit_of_measurement is not UNDEFINED:\n with session_scope(session=instance.get_session()) as session:\n statistics_meta_manager.update_unit_of_measurement(\n session, statistic_id, new_unit_of_measurement\n )\n if new_statistic_id is not UNDEFINED and new_statistic_id is not None:\n with session_scope(\n session=instance.get_session(),\n exception_filter=_filter_unique_constraint_integrity_error(instance),\n ) as session:\n statistics_meta_manager.update_statistic_id(\n session, DOMAIN, statistic_id, new_statistic_id\n )", "title": "" }, { "docid": "0dd5a70488a8d6b04e6c481054c3a671", "score": "0.49645737", "text": "def _generate_statistics_at_time_stmt(\n table: type[StatisticsBase],\n metadata_ids: set[int],\n start_time_ts: float,\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> StatementLambdaElement:\n stmt = _generate_select_columns_for_types_stmt(table, types)\n stmt += lambda q: q.join(\n (\n most_recent_statistic_ids := (\n select(\n func.max(table.start_ts).label(\"max_start_ts\"),\n table.metadata_id.label(\"max_metadata_id\"),\n )\n .filter(table.start_ts < start_time_ts)\n .filter(table.metadata_id.in_(metadata_ids))\n .group_by(table.metadata_id)\n .subquery()\n )\n ),\n and_(\n table.start_ts == most_recent_statistic_ids.c.max_start_ts,\n table.metadata_id == most_recent_statistic_ids.c.max_metadata_id,\n ),\n )\n return stmt", "title": "" }, { "docid": "00e627d60b6172577dcae624a59de6d1", "score": "0.4961656", "text": "def getStatistics(listInts):\n result = []\n df = pd.DataFrame()\n df['data'] = listInts\n\n\n\n result.append(df['data'].min())\n result.append(df['data'].max())\n result.append(df['data'].mean())\n result.append(df['data'].mad())\n result.append(df['data'].std())\n result.append(df['data'].var())\n result.append(df['data'].skew())\n result.append(df['data'].kurtosis())\n for value in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:\n result.append(df['data'].quantile(q=value))\n result.append(len(listInts))\n\n return result", "title": "" }, { "docid": "d311aa9b0304695c038e0f8884a144ec", "score": "0.4961016", "text": "def get_unit_symbols(qids: List[str]) -> List[Dict]:\n print(datetime.datetime.now(), f\"Starting with unit symbols\")\n print(f\"Total unit symbols to extract: {len(qids)}\")\n item_count = 0\n extract_dicts = []\n chunk_size = 50 # The chunksize 50 is allowed by the wikidata api, bigger numbers need special permissions\n id_chunks = chunks(list(qids), chunk_size)\n for chunk in id_chunks:\n query_result = wikidata_entity_request(chunk, props=[CLAIMS], timeout=10)\n\n if ENTITIES not in query_result:\n logger.error(\"Skipping chunk\")\n continue\n\n for result in query_result[ENTITIES].values():\n try:\n qid = result[ID]\n except Exception as error:\n logger.error(\"Error on qid, skipping item. Error: {0}\".format(error))\n continue\n\n unit_symbol = map_wd_attribute.try_get_unit_symbol(\n result, PROPERTY_NAME_TO_PROPERTY_ID[UNIT_SYMBOL], UNIT_SYMBOL\n )\n\n subject_dict = {ID: qid, UNIT_SYMBOL: unit_symbol}\n extract_dicts.append(subject_dict)\n\n item_count += len(chunk)\n print(f\"Status of unit symbols: {item_count}/{len(qids)}\", end=\"\\r\", flush=True)\n\n print(datetime.datetime.now(), f\"Finished with unit symbols\")\n return extract_dicts", "title": "" }, { "docid": "5d428e68f9e87d675d0ec998dc1ac3ba", "score": "0.4960945", "text": "def test_collect_statistics(self):\n stat = MyStats()\n seq = [ ('Init', stat._seq + 1),\n ('Auth', stat._seq + 2),\n ('Auth', stat._seq + 2) ]\n ret = [('Init', 'frominit', {'boot_time': '2013-01-01T00:00:00Z'}),\n ('Auth', 'fromauth1', {'queries.tcp': 100}),\n ('Auth', 'fromauth2', {'queries.udp': 200})]\n stat._answers = [\n (create_answer(0, r[2]), {'from': r[1]}) for r in ret\n ]\n self.assertListEqual(ret, stat._collect_statistics(seq))", "title": "" }, { "docid": "37ab2c39f81f4fae631b4389b9dcf10f", "score": "0.49587792", "text": "def _generate_statistics_during_period_stmt(\n start_time: datetime,\n end_time: datetime | None,\n metadata_ids: list[int] | None,\n table: type[StatisticsBase],\n types: set[Literal[\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"]],\n) -> StatementLambdaElement:\n start_time_ts = start_time.timestamp()\n stmt = _generate_select_columns_for_types_stmt(table, types)\n stmt += lambda q: q.filter(table.start_ts >= start_time_ts)\n if end_time is not None:\n end_time_ts = end_time.timestamp()\n stmt += lambda q: q.filter(table.start_ts < end_time_ts)\n if metadata_ids:\n stmt += lambda q: q.filter(\n # https://github.com/python/mypy/issues/2608\n table.metadata_id.in_(metadata_ids) # type:ignore[arg-type]\n )\n stmt += lambda q: q.order_by(table.metadata_id, table.start_ts)\n return stmt", "title": "" }, { "docid": "70fd2f2eee7e122568c1c4887ef6c506", "score": "0.4951421", "text": "def collect_statistics( self, column_names = None, options = {} ):\n response = self.db.collect_statistics( self.qualified_name,\n column_names, options )\n if not response.is_ok():\n raise GPUdbException( response.get_error_msg() )\n\n return response", "title": "" }, { "docid": "c0b7ddb55460d775fa1d31e67d1d1686", "score": "0.49507153", "text": "def calc_statistics(timings):\n median = statistics.median(timings)\n min_timing = min(timings)\n max_timing = max(timings)\n average = sum(timings) / len(timings)\n stddev = statistics.stdev(timings)\n percentile_10 = np.percentile(timings, 10)\n\n return median, min_timing, max_timing, average, stddev, percentile_10", "title": "" }, { "docid": "1c6088a7c3ccf00dc80ebe080beb6eda", "score": "0.49472648", "text": "def get_stats_id(self, id):\n\n n_filter = 1\n lang = globals()[\"thesis\" + str(id)].language\n study = globals()[\"thesis\" + str(id)].study\n filename = globals()[\"thesis\" + str(id)].name\n\n token_counter, lemma_counter, n_sent, longest_sentences, entity_counter = globals()[\"thesis\" + str(id)].parse()\n\n #Calling the statistics!\n calculate_statistics(lang, study, token_counter, lemma_counter, longest_sentences, entity_counter, n_filter, n_sent, id=id, filename=filename)", "title": "" }, { "docid": "0b6013fe0717ce867ed479e553dfdc5e", "score": "0.49432722", "text": "def getStatistics(self):\n return self._stats", "title": "" }, { "docid": "e0390d2e396bab8bd2becf8977130bfa", "score": "0.49322775", "text": "def _get_all_instruments(self):\n r = requests.get(self._url + \"all\")\n self._instruments_list.delete(0, tk.END)\n for s in r.json():\n self._instruments_list.insert(tk.END, s['instrument_type'] + ' ' + str(s['id']))\n if s['instrument_type'] == \"piano\":\n self._instruments_list.itemconfig(tk.END, {'fg': 'blue'})", "title": "" } ]
6e23d6d89d173283541b4f362872abe2
Sums the ASCII character values mod256 and returns the lower byte of the two's complement of that value
[ { "docid": "16e9820e599d012efae3f474349ed2fc", "score": "0.5380465", "text": "def get_checksum(file):\n sum = 0\n with open(file, 'r') as f:\n data = f.read()\n for i in range(len(data)):\n sum = sum + ord(data[i])\n temp = sum % 256\n rem = -temp\n return '%2X' % (rem & 0xFF)", "title": "" } ]
[ { "docid": "5219472d468a1de5c385225eaff14233", "score": "0.65635484", "text": "def SignedChar(value):\n if value < 0:\n # Perform two's complement.\n return value + 256\n return value", "title": "" }, { "docid": "535d30b8d513bdbc8168cea6c4f9e15f", "score": "0.6562197", "text": "def fletcher32(string):\n a = list(map(ord, string))\n b = [sum(a[:i])%65535 for i in range(len(a)+1)]\n return chr((sum(b) >> 8) & 255) + chr((sum(b)) & 255) + chr((max(b) >> 8) & 255) + chr((max(b)) & 255)", "title": "" }, { "docid": "535d30b8d513bdbc8168cea6c4f9e15f", "score": "0.6562197", "text": "def fletcher32(string):\n a = list(map(ord, string))\n b = [sum(a[:i])%65535 for i in range(len(a)+1)]\n return chr((sum(b) >> 8) & 255) + chr((sum(b)) & 255) + chr((max(b) >> 8) & 255) + chr((max(b)) & 255)", "title": "" }, { "docid": "1b491b6d016d0bda43fe16b415547dfd", "score": "0.63879734", "text": "def crc(t):\n sum = 0\n for v in t:\n sum += v\n sum &= 0xff\n return 0xff - sum + 1", "title": "" }, { "docid": "7d818b6a7d1c309eca5cfc7320b16245", "score": "0.6366621", "text": "def twoscomp(hexstring):\n val = int(hexstring, 16)\n binstring = \"{0:0{1}b}\".format(int(hexstring, 16), 32)\n if binstring[0] == \"1\":\n val -= (1 << 32)\n return val", "title": "" }, { "docid": "f46521b9d6fadac5bcbdf200856a33bc", "score": "0.6039973", "text": "def checksum(payload):\n return sum( c for c in payload ) % 256", "title": "" }, { "docid": "c7b98d622e2016698d65f4b3a1f9228f", "score": "0.60043573", "text": "def weakchecksum(data):\n a = b = 0\n l = len(data)\n\n for i in range(l):\n n = ord(data[i])\n a += n\n b += (l - i) * n\n\n return (b << 16) | a, a, b", "title": "" }, { "docid": "8c4f429d7c91767712882805046136e7", "score": "0.5933095", "text": "def str_sum(data):\n if data is None:\n return 0\n sum = 0\n for x in data:\n sum += ord(x)\n return sum", "title": "" }, { "docid": "ebe898a189e0813a6de1333a743d3c30", "score": "0.5917493", "text": "def decrypto(self, c):\n stuff = self.simpleConvertToAscii(c)\n aux = list()\n for x in stuff:\n aux.append(pow(x, self.privateKey, self.publicKey[0]))\n return self.backToChar(aux)", "title": "" }, { "docid": "ef4151eb6094fc558ad79b9572dd2bb5", "score": "0.59016865", "text": "def single_byte_xor_cipher(ciphertext: bytes) -> typing.Tuple[bytes, bytes, bytes]:\n candidates = [(bytes([i]), bytes([i^c for c in ciphertext]), ciphertext) for i in range(256)]\n return max(candidates, key=lambda t: score_english_similarity(t[1]))", "title": "" }, { "docid": "5a85b8635cd61a0f43395a1b78cc670c", "score": "0.58995306", "text": "def byte_cat(items):\r\n items = list(items)\r\n items.reverse()\r\n cauldron = 0\r\n for i in range(len(items)):\r\n cauldron ^= items[i] << (i * 8)\r\n return cauldron", "title": "" }, { "docid": "83462ebb8e047eec071f3da60c5aed01", "score": "0.5895105", "text": "def checksum(string):\n csum = 0\n count_to = (len(string) // 2) * 2\n count = 0\n while count < count_to:\n this_val = string[count + 1] * 256 + string[count]\n csum = csum + this_val\n csum = csum & 0xffffffff\n count = count + 2\n if count_to < len(string):\n csum = csum + string[len(string) - 1]\n csum = csum & 0xffffffff\n csum = (csum >> 16) + (csum & 0xffff)\n csum = csum + (csum >> 16)\n answer = ~csum\n answer = answer & 0xffff\n answer = answer >> 8 | (answer << 8 & 0xff00)\n return answer", "title": "" }, { "docid": "657b92674fc885201ce4f1e88b3e2755", "score": "0.58692515", "text": "def anc_unsignedbytes( data ):\r\n negative = False # data[0] & 0x80 > 0\r\n\r\n if negative:\r\n inverted = bytearray(~d % 256 for d in data)\r\n return -signedbytes(inverted) - 1\r\n\r\n encoded = str(data).encode('hex')\r\n return int(encoded, 16)", "title": "" }, { "docid": "1dd00792dd4338ceaf94a362ec0a1404", "score": "0.58569694", "text": "def calc_sha256sum(self):\n return hashlib.sha256(open(self.tmp, 'rb').read()).hexdigest()", "title": "" }, { "docid": "4c76abe0930ccffe8fb51c1a8d6b6531", "score": "0.58474636", "text": "def single_byte_xor(ciphertext):\n\n possibles = []\n for b in range(256):\n key = bytes([b])\n out = decrypt.byte_key_xor(ciphertext, key)\n score = measure.english_frequency_score(out)\n possibles.append((key, out, score))\n\n possibles.sort(key=lambda x: x[2], reverse = True)\n\n return (possibles[0][0], possibles[0][1])", "title": "" }, { "docid": "16a8de5a47f540c5ef12a864cd0691a0", "score": "0.5805788", "text": "def sha256(data: bytes) -> bytes:\n pass", "title": "" }, { "docid": "83eab3a3c8951f4a158e3bb5659cc9ea", "score": "0.5750946", "text": "def checksum(s, m):\n # note, I *think* it's possible to have unicode chars in\n # a twitter handle. That makes it a bit interesting. \n # We don't handle unicode yet, just ASCII\n total = 0\n for ch in s:\n # no non-printable ASCII chars, including space\n # but we want \"!\" to represent an increment of 1, hence 32 below\n total = (total + ord(ch)-32) % m\n return total", "title": "" }, { "docid": "85d68c8bd488d86166747aca0a3a128c", "score": "0.57343954", "text": "def java_string_hashcode(s):\n h = 0\n for c in s:\n h = (31 * h + ord(c)) & 0xFFFFFFFF\n return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000", "title": "" }, { "docid": "ccd56c352dfa35624977f6afe7b727fb", "score": "0.570774", "text": "def compact(number):\n\n base256 = []\n while number:\n number, byte = divmod(long(number), 256)\n base256.insert(0, byte)\n\n if base256[0] > 127:\n base256.insert(0, 0)\n\n # Save original length then zero-pad the end\n length = len(base256)\n while len(base256) < 3:\n base256.append(0)\n\n return bytearray([length] + base256[:3])", "title": "" }, { "docid": "07f825af582814ce5367f43fa8f52a34", "score": "0.56950897", "text": "def f(w):\n a = 97\n l = 26\n c = [0]*l\n for i in w[:k]:\n j = ord(i)-a\n c[j] += 1\n return ''.join([str(x) for x in c])", "title": "" }, { "docid": "5ccb4bd48c4c988195c38cdd8ee0e2b7", "score": "0.56914383", "text": "def calccrc(value):\n crc = 0xffffffff\n for i in range(0, len(value)):\n crc &= 0xffffffff\n #print ((crc >> 24) ^ ord(s[i:i+1]))\n crc = (crc << 8) ^ TABLE[(crc >> 24) ^ ord(value[i:i+1])]\n return 0xffffffff^(crc&0xffffffff)", "title": "" }, { "docid": "e7ce22380a10862e17f931e302e741e0", "score": "0.567827", "text": "def __IntChksum(byte_msg):\r\n\ttotal = 0\r\n\tlength = len(byte_msg)\t#length of the byte message object\r\n\ti = 0\r\n\twhile length > 1:\r\n\t\ttotal += ((byte_msg[i+1] << 8) & 0xFF00) + ((byte_msg[i]) & 0xFF)\r\n\t\ti += 2\r\n\t\tlength -= 2\r\n\r\n\tif length > 0:\r\n\t\ttotal += (byte_msg[i] & 0xFF)\r\n\r\n\twhile (total >> 16) > 0:\r\n\t\ttotal = (total & 0xFFFF) + (total >> 16)\r\n\r\n\ttotal = ~total\r\n\r\n\treturn total & 0xFFFF", "title": "" }, { "docid": "e7ce22380a10862e17f931e302e741e0", "score": "0.567827", "text": "def __IntChksum(byte_msg):\r\n\ttotal = 0\r\n\tlength = len(byte_msg)\t#length of the byte message object\r\n\ti = 0\r\n\twhile length > 1:\r\n\t\ttotal += ((byte_msg[i+1] << 8) & 0xFF00) + ((byte_msg[i]) & 0xFF)\r\n\t\ti += 2\r\n\t\tlength -= 2\r\n\r\n\tif length > 0:\r\n\t\ttotal += (byte_msg[i] & 0xFF)\r\n\r\n\twhile (total >> 16) > 0:\r\n\t\ttotal = (total & 0xFFFF) + (total >> 16)\r\n\r\n\ttotal = ~total\r\n\r\n\treturn total & 0xFFFF", "title": "" }, { "docid": "7ab9723ffb2cd4d71e64022616b5e158", "score": "0.56654245", "text": "def get_char(bits: List[int]) -> str:", "title": "" }, { "docid": "8f3b90055cc62b401d58a00b89bd862b", "score": "0.5652867", "text": "def getEntropy(data):\n if not data:\n return 0\n\n entropy = 0\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy += - p_x * math.log(p_x, 2)\n\n return entropy", "title": "" }, { "docid": "f294f66bf89c0893fc7f1f70e2982029", "score": "0.56330794", "text": "def convert_to_32(data):\n return data + (32 - len(data) % 32) * chr(32 - len(data) % 32)", "title": "" }, { "docid": "891fe378e9ffa7434a5930adb9c1c29a", "score": "0.56241745", "text": "def secure_compare(val1, val2):\n if len(val1) != len(val2):\n return False\n result = 0\n if isinstance(val1, bytes) and isinstance(val2, bytes):\n for x, y in zip(val1, val2):\n result |= x ^ y\n else:\n for x, y in zip(val1, val2):\n result |= ord(cast(str, x)) ^ ord(cast(str, y))\n return result == 0", "title": "" }, { "docid": "31f3db01217dbfe238599ce93195f833", "score": "0.5617119", "text": "def find_ASCII(s):\n return sum(map(ord, s))", "title": "" }, { "docid": "a98e8357ab0e482360e392a663c12ac4", "score": "0.5611372", "text": "def twocomplement(word):\n\tb = bitstring.Bits(uint=word, length=16)\n\treturn b.int", "title": "" }, { "docid": "7cde583c74929b26ce4108ed6e70c883", "score": "0.5599464", "text": "def complement(s):\n letters = list(s)\n letters = [baseComplement[base] for base in letters]\n return ''.join(letters)", "title": "" }, { "docid": "bbab69954ca68b21534f1a062186dc44", "score": "0.5597621", "text": "def _validateChecksum(self, data):\n total = 0\n for byte in data[:-1]:\n total = total + ord(byte)\n return (256-(total%256)==ord(data[-1]))", "title": "" }, { "docid": "4000f600695f12dbe20c021a07ba07a8", "score": "0.55966353", "text": "def _calculateChecksum(self):\n data = chr(self.destination)+chr(self.length)+chr(self.source)+\\\n repr(self.payload)\n total = 0\n for byte in data:\n total = total + ord(byte)\n return 256-(total%256)", "title": "" }, { "docid": "d1cc7918f2d9ac63049976dd4d330e5d", "score": "0.5587969", "text": "def CryptoLevel1(text, encoding='utf-8', errors='surrogatepass'):\r\n bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]\r\n return bits.zfill(8 * ((len(bits) + 7) // 8))", "title": "" }, { "docid": "f03817015f1b7a09a2e6d89933731c3a", "score": "0.55768156", "text": "def getEntropy(data):\n\n if not data:\n return 0\n\n entropy = 0\n for x in range(256):\n p_x = float(data.count(chr(x)))/len(data)\n if p_x > 0:\n entropy += - p_x*math.log(p_x, 2)\n\n return entropy", "title": "" }, { "docid": "fffe3b6aeb549504f2e1c67aa7af6bf9", "score": "0.556637", "text": "def magic_transfer(p):\n return int('{:032b}1'.format(p)[::-1], 2)", "title": "" }, { "docid": "181fba12d562ae32192301e3b84d107c", "score": "0.5546626", "text": "def calcString(self, st, crc):\r\n\t for ch in st:\r\n\t crc = (crc >> 8) ^ self.table[(crc ^ (ch)) & 0xFF] #ord(ch) \r\n\t return crc", "title": "" }, { "docid": "b07f34869d939b58a3438045c4d6fe0d", "score": "0.5541175", "text": "def calc_encryption_weakness(a):\n return min(a) + max(a)", "title": "" }, { "docid": "fedf9cb3109906f91dcd3bc2684058ef", "score": "0.5537733", "text": "def decr_func_orig(a1):\n return (0x9E - 0x11 * ((a1 - 0x22) ^ 0xAD)) & 0xFF", "title": "" }, { "docid": "c02f0b794d4375b1f45630ee3c01a203", "score": "0.5531426", "text": "def get_hash(s):\n\n return str(int(hashlib.sha256(s.encode(\"utf-8\")).hexdigest(), 16) % 10**16)", "title": "" }, { "docid": "a862dc63cf8541daa9fd830d9c62901e", "score": "0.552477", "text": "def get_string_val(check_string):\r\n stringval = 0\r\n check_string = check_string.lower()\r\n for char in check_string:\r\n if char in string.lowercase:\r\n stringval += ord(char) - ord('a')\r\n else:\r\n stringval -= 1\r\n return stringval", "title": "" }, { "docid": "3d6d10cfad02aaf7e6548e8f3e75c27b", "score": "0.55132645", "text": "def calcCRC(s):\n\n cs = 0\n\n for c in iterbytes(s):\n cs = crc_table[cs ^ c]\n\n return cs", "title": "" }, { "docid": "4dbb6fcb50eaa25da8bc5c304473ee4b", "score": "0.55126196", "text": "def get_bytes(data):\n data = str(data)\n return int(len(sanatize_hex(data)) / 2)", "title": "" }, { "docid": "2c098e3f666807e6ec86364cbd5c801d", "score": "0.55019987", "text": "def calculate_checksum(data, skip=16):\n acc = 0x6363\n for byte in data:\n if skip > 0:\n skip -= 1\n else:\n acc ^= byte\n acc = ((acc >> 8) | (acc << 8)) & 0xffff\n acc ^= ((acc & 0xff00) << 4) & 0xffff\n acc ^= (acc >> 8) >> 4\n acc ^= (acc & 0xff00) >> 5\n return acc", "title": "" }, { "docid": "807907f29707ca0b94dca53d1d747b68", "score": "0.5497826", "text": "def compute_checksum(line):\n return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10", "title": "" }, { "docid": "92fe069507f3f8c9b4214610db4464a2", "score": "0.5490721", "text": "def checksum(msg: bytes):\n if len(msg) % 2:\n msg += b'0'\n\n s = 0\n for i in range(0, len(msg), 2):\n w = msg[i] + (msg[i + 1] << 8)\n s = carry_around_add(s, w)\n return ~s & 0xffff", "title": "" }, { "docid": "e95e82b4bc91bb2778d36089fe8983e3", "score": "0.54772484", "text": "def _additive_hash(self, key):\n return sum([ord(c) for c in key])", "title": "" }, { "docid": "c51b8ffd7983f33b91ae99ae14937bfa", "score": "0.5473909", "text": "async def sha256Command(self, ctx: commands.Context, *, text: str = \"\"):\n result = sha256(text.encode(\"utf-8\"))\n await ctx.reply(f\"**Hex:** `{result.hexdigest()}`\\n**Base64:** `{base64.b64encode(result.digest()).decode('utf-8')}`\", mention_author=False)", "title": "" }, { "docid": "b42c54a6cf3962cfb3efc277a2771c12", "score": "0.5470983", "text": "def djb2(self, key):\n # Your code here\n hash = 5381\n for character in key:\n hash = ((hash << 5)+hash)+ord(character)\n return hash & 0xFFFFFFFF", "title": "" }, { "docid": "5f93a2a5ab0e385804e69f6350bad158", "score": "0.546272", "text": "def hash_string_256(string):\n return hashlib.sha256(string).hexdigest()", "title": "" }, { "docid": "08c47739109403562ac10378a8232f61", "score": "0.546153", "text": "def hash_function(self, x):\n h = 0\n for char in x:\n h += ord(char)\n return h % self.MAX", "title": "" }, { "docid": "a160d79ca03b293d563b5fcd9c018735", "score": "0.54549515", "text": "def _SHA256(self, string):\n md = hashlib.sha256()\n md.update(string.encode())\n return md.digest()", "title": "" }, { "docid": "c4c4d51e6f169e7c665eb613ee8242ba", "score": "0.54511184", "text": "def hash_character(c):\n h = c.encode()\n for n in range(10000):\n h = hashlib.sha256(h).digest()\n return h.hex()", "title": "" }, { "docid": "1051824eabd2c61814eca2c8f2c1cb19", "score": "0.5434702", "text": "def get_fitness_factor(self,string):\n k = len(string)\n sum = 0\n for i in range(0,k):\n sum += ord(string[i])\n return sum", "title": "" }, { "docid": "51535e66eca41a3f2838c9ae42ea21ca", "score": "0.5432792", "text": "def _crc(string):\n return binascii.crc_hqx(bytes(string, 'utf-8'), 0xffff)", "title": "" }, { "docid": "da821c07f9cef8255f582324d88d9d79", "score": "0.54264253", "text": "def hashChar(c):\n\treturn ord(c)", "title": "" }, { "docid": "6c147e57fb125ec29d7962494f71f468", "score": "0.542567", "text": "def decryptBytes(data):", "title": "" }, { "docid": "e2559d38fee410dd4d62784a13485fea", "score": "0.54155725", "text": "def single_char_xor_detection() -> None:\n with open(\"s1c4_in.txt\") as f:\n decrypted = max((single_byte_xor_cipher(bytes.fromhex(line.rstrip())) for line in f), key=lambda t: score_english_similarity(t[1]))", "title": "" }, { "docid": "e3041446c6a46587461fc3b9a25160e6", "score": "0.5415109", "text": "def hash(self, val):\n if type(val) == int:\n return val % len(self.hash_table)\n if type(val) == str:\n sum = 0\n for i in range(len(val)):\n sum += ord(val[i])\n return sum % len(self.hash_table)", "title": "" }, { "docid": "5cc67ce60e92973d393c6d9a996d8716", "score": "0.5389251", "text": "def checksum(self):\n return sum(self.addends()) % 10", "title": "" }, { "docid": "507d95e54766c94c34cdcde10e929b14", "score": "0.5374882", "text": "def hash_function(self, x):\r\n if not x:\r\n return -1\r\n hashed_value = 0\r\n\r\n for char in x:\r\n hashed_value = 181 * hashed_value + ord(char)\r\n\r\n return hashed_value % self.capacity", "title": "" }, { "docid": "670126c66ec57b27d12438201d754ba2", "score": "0.5374865", "text": "def gethash(string, cap=0xffffffff):\n return hash(string) & cap", "title": "" }, { "docid": "1b9d15088f92c7590b12210fbfc5eba7", "score": "0.5373673", "text": "def decode_64(s):\n return sum([(ord(s[idx])-48)\n *(64**(len(s)-idx-1)) for idx in (xrange(len(s)))])", "title": "" }, { "docid": "c31b8ba323098c91f8b82c0fe7b0f048", "score": "0.5373312", "text": "def num_lower(val):\n return hex(int(\"{:032b}\".format(val)[-16:], 2))", "title": "" }, { "docid": "2ef323657b3ca771df7e3f5ccb725c46", "score": "0.53732747", "text": "def ncrypt(string):\n click.echo(hashlib.sha256(string.encode()).hexdigest())", "title": "" }, { "docid": "f5059ae57cb261edf0d59f1ea60b79f1", "score": "0.53718495", "text": "def compute_checksum(self, data): #def compute_checksum(register, data):\n xor = 0 #xor = register\n for data_byte in data:\n xor ^= data_byte\n return xor", "title": "" }, { "docid": "3bff703e45e3e566d3d72ef7ce84906d", "score": "0.5370793", "text": "def combine_characters_threshold(self):\n return 384*8", "title": "" }, { "docid": "24635203da184843a2c67d99dbe4004f", "score": "0.5360275", "text": "def decode(s):\r\n decoded = 0\r\n multi = 1\r\n s = s[::-1]\r\n for char in s:\r\n decoded += multi * alphabet.index(char)\r\n multi = multi * base_count\r\n \r\n return decoded", "title": "" }, { "docid": "2442753499e1689e470b51dd0bf4d391", "score": "0.5359262", "text": "def as_twos_comp(value: int) -> int:\n if value & 0x8000_0000 != 0:\n # negative\n flipped_value = (~value) & 0xFFFF_FFFF\n return -(flipped_value + 1)\n else:\n # positive\n return value", "title": "" }, { "docid": "cc8f969663aa186e7f106ef05b5b9ab1", "score": "0.5350484", "text": "def getimm(hexstring, signed = True):\n if signed: return twoscomp(hexstring)\n else: return int(hexstring, 16)", "title": "" }, { "docid": "e369fad2a883db4bd9d1d1c4dcb91130", "score": "0.5343777", "text": "def hash_string(self, string):\n total = 0\n for c in range(len(string)):\n total += ord(string[c]) * (c+1)\n\n return total % len(self.table)", "title": "" }, { "docid": "ca5a09681dff33951bdc2c7c213fb8c0", "score": "0.5319948", "text": "def sha256(string, hex_digest=True):\n return _crypt('sha256', string, hex_digest)", "title": "" }, { "docid": "9cbb46cb51f2dc9124b3d20f0866e468", "score": "0.53187555", "text": "def hexint(mask):\n if mask >= 2**31:\n return int(mask-2**32)\n return mask", "title": "" }, { "docid": "1f15d49110dace361e858327c336c73b", "score": "0.5318015", "text": "def hash(self,key):\n hash = 0\n key = str(key)\n for value in key :\n hash += ord(value)\n return (hash *3) % self.size", "title": "" }, { "docid": "bdaa3ba299b9376e0cbacbd3615714d4", "score": "0.53131187", "text": "def _twos_complement(self, num):\r\n # TODO: Switch to something like the line below\r\n # pv_val = int('{:b}'.format(abs(int(pv_val) - (1 << pv_len)))[-pv_len:], 2)\r\n tmp = '{:b}'.format(num)\r\n tmp = tmp.replace('0', '2')\r\n tmp = tmp.replace('1', '0')\r\n tmp = tmp.replace('2', '1')\r\n\r\n while len(tmp) < self.bit_len:\r\n tmp = '1' + tmp\r\n return int(tmp, 2) + 1", "title": "" }, { "docid": "2fd8c09ebe93b05112231a4a49e75ce8", "score": "0.5311311", "text": "def checksum(byte_string):\n\n if len(byte_string) % 2 == 0:\n # Divided byte_string in to separate parts by 16 bits and put those parts into list.\n bit_block_list = [(('%02x' % x) + ('%02x' % y)) for x, y in zip(byte_string[0::2], byte_string[1::2])]\n first_sum = 0\n for value in bit_block_list:\n first_sum += eval('0x' + value)\n if len(hex(first_sum)) == 6:\n result = ~first_sum & 0xffff # Bitwise complement operation\n return result\n elif len(hex(first_sum)) > 6:\n temp_hex = eval('0x' + hex(first_sum)[-4:]) + eval('0x' + hex(first_sum)[2:-4])\n result = ~temp_hex & 0xffff # Bitwise complement operation\n return result\n else:\n byte_string += b'\\x00'\n # Divided byte_string in to separate parts by 16 bits and put those parts into list.\n bit_block_list = [(('%02x' % x) + ('%02x' % y)) for x, y in zip(byte_string[0::2], byte_string[1::2])]\n first_sum = 0\n for value in bit_block_list:\n first_sum += eval('0x' + value)\n if len(hex(first_sum)) == 6:\n result = ~first_sum & 0xffff # Bitwise complement operation\n return result\n elif len(hex(first_sum)) > 6:\n temp_hex = eval('0x' + hex(first_sum)[-4:]) + eval('0x' + hex(first_sum)[2:-4])\n result = ~temp_hex & 0xffff # Bitwise complement operation\n return result", "title": "" }, { "docid": "235b1dffc51145efc381d48d81d266e8", "score": "0.53020114", "text": "def sum_value(key,message):\n\tkey_idx = 0\n\ttotal = 0\n\tfor item in cipher:\n\t\ttotal+=item^key[key_idx]\n\t\tkey_idx+=1\n\t\tkey_idx=key_idx%3\n\treturn total", "title": "" }, { "docid": "08b05cb2333f82a885b283899a3de950", "score": "0.5298645", "text": "def calcByte(self, ch, crc):\r\n\t if type(ch) == type(\"c\"):\r\n\t by = ord( ch)\r\n\t else:\r\n\t by = ch\r\n\t crc = (crc >> 8) ^ self.table[(crc ^ by) & 0xFF]\r\n\t return (crc & 0xFFFF)", "title": "" }, { "docid": "1bbd16c1667cb13302643c588ff0d899", "score": "0.5298351", "text": "def decrypt(shift):\n erray = []\n myarray = list(const.cipherText)\n for x in myarray:\n a = (ord(x) - (shift % const.alphabet))\n if a < ord('A'):\n a += const.alphabet\n b = chr(a)\n erray.append(b)\n return ''.join(erray)", "title": "" }, { "docid": "1afb9a68c79c1fe107808f0d8180bf64", "score": "0.5296995", "text": "def calculate_checksum(bytes):\n # null pad bytes to ensure 16-bit values\n if len(bytes) % 2:\n bytes += b'\\x00'\n n_values = len(bytes) / 2\n values = struct.unpack('%dH' % n_values, bytes)\n sum = functools.reduce(operator.add, values)\n sum = (sum >> 16) + (sum & 0xFFFF)\n sum += sum >> 16\n return (~sum) & 0xFFFF", "title": "" }, { "docid": "9a20b0251a40b22cb0a855c1a9787391", "score": "0.529693", "text": "def int(val):\n sign = -1 if val >> 7 == 1 else 1\n value = val & 0x7F\n if sign == 1:\n return value\n else:\n return -1 * (128 - value)", "title": "" }, { "docid": "41a85d2b9ffaaa01c8d3c1619cb3b06e", "score": "0.52916664", "text": "def knothash(buf):\n\n data = [ord(i) for i in buf]\n\n endseq = [17, 31, 73, 47, 23]\n data.extend(endseq)\n\n pos = 0\n skp = 0\n lst = list(range(0,256))\n llen = len(lst)\n for i in range(64):\n for length in data:\n # Triple list to wrap around freely\n tmp = lst + lst + lst\n side1 = tmp[pos:pos+length]\n side2 = tmp[pos+length:pos+llen]\n side1.reverse()\n # Double result list so we can get it with proper offset\n tmp = side1 + side2 + side1 + side2\n lst = tmp[pos:llen+pos]\n lst = tmp[llen-pos:2*llen-pos]\n pos += length + skp\n while pos >= llen:\n pos -= llen\n skp += 1\n\n res = bytes()\n for sli in [slice(i, i+16) for i in range(0, 256, 16)]:\n val = 0\n for i in lst[sli]:\n val ^= i\n res += bytes([val],)\n\n return(res)", "title": "" }, { "docid": "e269258da4768547a2c7836608ce9ebb", "score": "0.5287815", "text": "def rc4(data, key):\n S, j, out = list(range(256)), 0, []\n\n for i in range(256):\n j = (j + S[i] + ord(key[i % len(key)])) % 256\n S[i], S[j] = S[j], S[i]\n\n i = j = 0\n for ch in data:\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i]\n out.append(chr(ord(ch) ^ S[(S[i] + S[j]) % 256]))\n\n if PY_VER is 2:\n return \"\".join(out)\n else:\n return bytes(\"\".join(out), 'utf-8')", "title": "" }, { "docid": "0e9ed7e9c7c8573ec475e4ebd82ad57b", "score": "0.5287088", "text": "def let_to_numb(str):\n return ord(str.upper()) - 64", "title": "" }, { "docid": "586322ab15fbeed345fd2e04ad74e0ee", "score": "0.52863634", "text": "def icmp_checksum(message):\n res = 0\n for value in message:\n res += value\n res = (res & 0xffff) + (res >> 16)\n return ~res & 0xffff", "title": "" }, { "docid": "7ebe5bd6a14731964cd91ac1c8f469a0", "score": "0.52784956", "text": "def rc4(data, key):\n S, j, out = list(range(256)), 0, []\n\n for i in range(256):\n j = (j + S[i] + ord(key[i % len(key)])) % 256\n S[i], S[j] = S[j], S[i]\n\n i = j = 0\n for ch in data:\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i]\n out.append(chr(ch ^ S[(S[i] + S[j]) % 256]))\n\n return \"\".join(out)", "title": "" }, { "docid": "5aab48eeb14d65f764017d84edb851d9", "score": "0.5278245", "text": "def sha256_content_checksum(data):\n sha256 = hashlib.sha256()\n sha256.update(data)\n return base64.urlsafe_b64encode(sha256.digest()).rstrip(b'=')", "title": "" }, { "docid": "8f5ca639a140f3dd897fd9810babd9ac", "score": "0.5275157", "text": "def exp256(e):\n assert isinstance(e, six.integer_types)\n assert e >= 0\n return 1 << (e << 3) # == 2**(e*8) == (2**8)**e == 256**e", "title": "" }, { "docid": "b2069544b73d1d6619017a27c32d80c6", "score": "0.5273176", "text": "def f1(self, s1, s2):\r\n return ''.join(chr(ord(x1) ^ ord(x2)) for x1, x2 in zip(s1, s2))", "title": "" }, { "docid": "c5cc951fd9c83f4fe609df80eec35b24", "score": "0.52717006", "text": "def decrypt(c, x, G):\n \n c1, c2 = c\n s = G.mul(c1, x)\n # m = c2 xor H(c1*x)\n hs = sha256(repr(s).encode('utf-8')).digest()\n m = bytearray([i ^ j for i,j in zip(c2, hs)])\n return bytes(m)", "title": "" }, { "docid": "59fd0cd0475fb995a15e488a48752659", "score": "0.5265779", "text": "def getchar(state):\n res = state.new_symbolic_value(32, \"getchar_res\")\n state.constrain(0 < res)\n state.constrain(res < 256)\n return [res]", "title": "" }, { "docid": "a221e595fcf1e4686041fdb5f9ddf1f8", "score": "0.5257562", "text": "def decode(self):\n if abs(self.encoding) >= self.max_int:\n raise OverflowError('Overflow detected in decrypted number')\n return self.encoding * pow(self.BASE, self.exponent)", "title": "" }, { "docid": "bb6476f7e21e7194f00b047dbf023c96", "score": "0.5255482", "text": "def hash_function_1(key: str) -> int:\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash", "title": "" }, { "docid": "bb6476f7e21e7194f00b047dbf023c96", "score": "0.5255482", "text": "def hash_function_1(key: str) -> int:\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash", "title": "" }, { "docid": "805cae6aa350140e253a26418396ef49", "score": "0.52539665", "text": "def hash_name(name):\n i = 0\n for char in name:\n i = ord(char.lower()) + 16 * i\n if i & 0xF0000000:\n i ^= i & 0xF0000000 ^ ((i & 0xF0000000) >> 24)\n return i", "title": "" }, { "docid": "180c76898853c094e69de8c66f87cbeb", "score": "0.5250862", "text": "def hashShingling(self, shingle, mod=2 ** 32 - 1):\n\n val = 0\n for c in shingle:\n val = (val * 26 + ord(c)) % mod\n return val", "title": "" }, { "docid": "ef3bda898c0f0f2a1a8b231f3dc93812", "score": "0.52497226", "text": "def hex_amount (self, val):\n res = ''\n for n in range(8):\n rem = val % (1 << 8)\n val >>= 8\n res += '%02x' % rem\n return res", "title": "" }, { "docid": "0acbe90fe3ce502c8e76e7e9f1077ce1", "score": "0.5247698", "text": "def rev_byte(cb, pb, map):\n # Split the hex bytes into their single digit components\n ch , ph = cb >> 4, pb >> 4\n cl , pl = cb % 16, pb % 16\n # Determine the high key value\n for i in range(len(map)):\n if map[ph][i] == ch:\n kl = i\n # Determine the low key value\n for i in range(len(map)):\n if map[pl][i] == cl:\n kh = i\n # Combine the low and high key values\n return (kh << 4) + kl", "title": "" }, { "docid": "e64d395f102b698dc0bad78faa613907", "score": "0.52420557", "text": "def get_byte_map(word):\n arr = bytearray(26)\n for c in word:\n if ord(c) < ord(\"a\") or ord(c) > ord(\"z\"):\n # skip invalid char\n continue\n index = ord(c) % ord(\"a\")\n if arr[index] == 255:\n raise ValueError(\n f\"Char `{c}` in word `{word}` exceed 255, which is a invalid input for this system.\"\n )\n arr[index] += 1\n return arr", "title": "" }, { "docid": "506fbb5f8ca2642f68aa532f525e9b22", "score": "0.52400404", "text": "def invhash32(text:StrBytes, init:int) -> int:\n crc = unsigned_I(init) ^ 0xffffffff # xorout\n for o in reversed(to_bytes(text)): # feed postfix text in reverse\n idx = CRC32_INDEX[crc >> 24]\n crc = (((crc ^ CRC32_TABLE[idx]) & 0x00ffffff) << 8) | (idx ^ o)\n return crc ^ 0xffffffff # xorout or init??", "title": "" }, { "docid": "cd0e7afaa71b9c4cfa79bb8364352f02", "score": "0.52316785", "text": "def single_xor(input_string: str, key_value: int) -> str:\n\n return b\"\".join([bytes([byte ^ key_value]) for byte in input_string])", "title": "" } ]
21bc9a94854948e612cbde5974c136a5
Return a XMLRPC wrapper function which can optionally intercept a call to the given ROS master XMLRPC method. method is the desired ROS master XMLRPC function to call
[ { "docid": "a32b1731a45187225a9d21080b5d405f", "score": "0.7624827", "text": "def __getWrapper(self, method):\n def wrap(*args):\n \"\"\"Callback function for an XMLRPC function.\n\n * args -- the method input arguments\n\n \"\"\"\n # If this class has a method callback registered, then\n # make sure we call the callback. Otherwise, just pass\n # the call through to the true ROS master server\n try:\n callback = \"_%s\" % method\n if hasattr(self, callback):\n callbackFn = getattr(self, callback)\n callbackFn(*args)\n except Exception, e:\n import traceback\n traceback.print_exc()\n\n # Pass the call through to the ROS master\n masterFn = getattr(self.__client, method)\n return masterFn(*args)\n\n return wrap", "title": "" } ]
[ { "docid": "10d52d7f9b2d1cfc71a797955024df93", "score": "0.7143489", "text": "def rpcmethod(func):\n func.rpcmethod = True\n return func", "title": "" }, { "docid": "3074070b2ffa1480ab2b169d64206e36", "score": "0.68000335", "text": "def rpc_method(func):\n func.rpc_callable = True\n return func", "title": "" }, { "docid": "0b6b615435df4ce200ed88ee0b0f7730", "score": "0.6490059", "text": "def client_member_function(self, method: ProtoServiceMethod) -> None:", "title": "" }, { "docid": "119e6ba33fb3c0dd42e02e3b82c63524", "score": "0.6440268", "text": "def present_rpc_method(method, send_probe):\n svc_path_bk = method[\"rmtSvcIntName\"].split(\".\")[-1]\n if method[\"service\"] is None:\n svc_path_bk = svc_path_bk[0].lower() + svc_path_bk[1:]\n writer(\n f\"Warning: Unable to correlate method to a service path. Guessed /{svc_path_bk}\\n\"\n + \" - Strong name unknown - Use --svc to see options\",\n FORMAT['WARNING']\n )\n if RPC_VERSION != \"7\":\n writer(\n \"Warning: RPC body generation may be invalid - version 7 expected\"\n + f\", version {RPC_VERSION} found\",\n FORMAT['WARNING']\n )\n if len(method[\"complexTypes\"]) != 0:\n writer(\n \"Warning: Unhandled complex type found - RPC likely invalid:\\n - {}\"\n .format('\\n - '.join(method['complexTypes'])),\n FORMAT['WARNING']\n )\n\n service_path = (\n method[\"service\"][\"servicePath\"]\n if method[\"service\"] is not None\n else svc_path_bk\n )\n rpc_call = '|'.join(method[\"methodRpcCall\"]) + \"|\"\n\n writer(\n \"POST /{}{} HTTP/1.1\\r\".format(\n '/'.join(BASE_URL.split(\"/\")[3:]), service_path\n ).replace(\"//\", \"/\")\n )\n writer(f\"Host: {BASE_URL.split('/')[2]}\\r\")\n writer(f\"Content-Type: {CONTENT_TYPE}\\r\")\n writer(f\"X-GWT-Permutation: {GWT_PERMUTATION}\\r\")\n writer(f\"X-GWT-Module-Base: {BASE_URL}\\r\")\n writer(f\"Content-Length: {len(rpc_call.encode('utf-8'))}\\r\\n\\r\")\n writer(f\"{rpc_call}\\n\")\n\n if send_probe:\n url = (BASE_URL + service_path)\n send_rpc_probe(url, rpc_call)", "title": "" }, { "docid": "dc2816ca02e0a86102d6d4a7283da153", "score": "0.63989836", "text": "def ProtoRPCServiceMethod(method):\n\n def wrapper(self, request):\n assert isinstance(request, wrapper.rpc_method_spec.request_type)\n logging.info(\"Request:\\n%s\", request)\n response = method(self, request)\n assert isinstance(response, wrapper.rpc_method_spec.response_type)\n logging.info(\"Response:\\n%s\", response)\n return response\n\n # Since the service's descriptor will be parsed when the class is created,\n # which is later than the invocation time of this decorator, here it just\n # place the placeholder with dummy contents.\n wrapper.rpc_method_spec = _ProtoRPCServiceMethodSpec(None, None)\n return wrapper", "title": "" }, { "docid": "0058ff4995df31b97b3b133b0737fecc", "score": "0.6396937", "text": "def wrap(*args):\n # If this class has a method callback registered, then\n # make sure we call the callback. Otherwise, just pass\n # the call through to the true ROS master server\n try:\n callback = \"_%s\" % method\n if hasattr(self, callback):\n callbackFn = getattr(self, callback)\n callbackFn(*args)\n except Exception, e:\n import traceback\n traceback.print_exc()\n\n # Pass the call through to the ROS master\n masterFn = getattr(self.__client, method)\n return masterFn(*args)", "title": "" }, { "docid": "498132a1c2a16ade1d690a8b29198657", "score": "0.6351856", "text": "def xmlrpc_method(returns='string', args=None, name=None):\r\n # Args should be a list\r\n if args is None:\r\n args = []\r\n\r\n def _xmlrpc_func(func):\r\n \"\"\"Inner function for XML-RPC method decoration. Adds a signature to\r\n the method passed to it.\r\n\r\n func\r\n The function to add the signature to\r\n \"\"\"\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func\r\n\r\n return _xmlrpc_func", "title": "" }, { "docid": "c726928aa609d011d5d3ec758881630b", "score": "0.6295389", "text": "def allow_remote_invocation(func, method='auto'):\r\n setattr(func, 'allow_rmi', method)\r\n return func", "title": "" }, { "docid": "94d7fcf6718e5f994bfbe229f818cc07", "score": "0.6163918", "text": "def _process_method(self, method):\n return method", "title": "" }, { "docid": "47e91c6a4669164eb57c03d7e4b85ec8", "score": "0.6139524", "text": "def get_method(self, method): \n for provider in self.method_handlers:\n for candidate in provider.xmlrpc_methods():\n #self.env.log.debug(candidate)\n p = Method(provider, *candidate)\n if p.name == method:\n return p\n raise MethodNotFound('RPC method \"%s\" not found' % method)", "title": "" }, { "docid": "e65bd503b9c19f7e5df08ee57805e4d6", "score": "0.6123122", "text": "def client_member_function(self, method: ProtoServiceMethod) -> None:\n\n if method.type() in (ProtoServiceMethod.Type.CLIENT_STREAMING,\n ProtoServiceMethod.Type.BIDIRECTIONAL_STREAMING):\n self.line('// Nanopb RPC clients for '\n f'{method.type().name.lower().replace(\"_\", \" \")} '\n 'methods are not yet supported.')\n self.line('// See pwbug/428 (http://bugs.pigweed.dev/428).')\n # TODO(pwbug/428): Support client & bidirectional streaming clients.\n return\n\n req = method.request_type().nanopb_name()\n res = method.response_type().nanopb_name()\n method_id = pw_rpc.ids.calculate(method.name())\n\n callbacks, functions, moved_functions = _client_functions(method)\n\n call_alias = f'{method.name()}Call'\n\n moved_functions = list(f'std::move({function.name})'\n for function in functions)\n\n self.line(f'using {call_alias} = {RPC_NAMESPACE}::NanopbClientCall<')\n self.line(f' {callbacks}<{res}>>;')\n self.line()\n\n # TODO(frolv): Deprecate this channel-based API.\n # ======== Deprecated API ========\n self.line('// This function is DEPRECATED. Use pw_rpc::nanopb::'\n f'{method.service().name()}::{method.name()}() instead.')\n self.line(f'static {call_alias} {method.name()}(')\n with self.indent(4):\n self.line(f'{RPC_NAMESPACE}::Channel& channel,')\n self.line(f'const {req}& request,')\n\n # Write out each of the callback functions for the method type.\n for i, function in enumerate(functions):\n if i == len(functions) - 1:\n self.line(f'{function}) {{')\n else:\n self.line(f'{function},')\n\n with self.indent():\n self.line(f'{call_alias} call(&channel,')\n with self.indent(len(call_alias) + 6):\n self.line('kServiceId,')\n self.line(f'0x{method_id:08x}, // Hash of \"{method.name()}\"')\n self.line(f'{callbacks}({\", \".join(moved_functions)}),')\n self.line(f'{req}_fields,')\n self.line(f'{res}_fields);')\n self.line('call.SendRequest(&request);')\n self.line('return call;')\n\n self.line('}')\n self.line()\n\n # ======== End deprecated API ========\n\n self.line(f'{call_alias} {method.name()}(')\n with self.indent(4):\n self.line(f'const {req}& request,')\n\n # Write out each of the callback functions for the method type.\n for i, function in enumerate(functions):\n if i == len(functions) - 1:\n self.line(f'{function}) {{')\n else:\n self.line(f'{function},')\n\n with self.indent():\n self.line()\n self.line(f'{call_alias} call(&client(),')\n with self.indent(len(call_alias) + 6):\n self.line('channel_id(),')\n self.line('kServiceId,')\n self.line(f'0x{method_id:08x}, // Hash of \"{method.name()}\"')\n self.line(f'{callbacks}({\", \".join(moved_functions)}),')\n self.line(f'{req}_fields,')\n self.line(f'{res}_fields);')\n\n # Unary and server streaming RPCs send initial request immediately.\n if method.type() in (ProtoServiceMethod.Type.UNARY,\n ProtoServiceMethod.Type.SERVER_STREAMING):\n self.line()\n self.line('if (::pw::Status status = '\n 'call.SendRequest(&request); !status.ok()) {')\n with self.indent():\n self.line('call.callbacks().InvokeRpcError(status);')\n self.line('}')\n self.line()\n\n self.line('return call;')\n\n self.line('}')\n self.line()", "title": "" }, { "docid": "8ef11584e79eca01cb734119b48f4773", "score": "0.60685897", "text": "def __getattr__(self, method):\n def run_callback(func, plus, result):\n \"\"\"Execute the given callback safely.\n Get data and/or error from result and call func passing it\n data, plus (if needed) and error. Catch, log and suppress\n all exceptions.\n func (function): the callback to invoke.\n plus (object): optional additional data.\n result (AsyncResult): the result of a (finished) RPC call.\n \"\"\"\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)\n\n def remote_method(**data):\n \"\"\"Forward arguments to execute_rpc.\n \"\"\"\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result\n\n return remote_method", "title": "" }, { "docid": "37f121660e6e8bc4db3c7ffadbc70572", "score": "0.6067481", "text": "def xmlrpc_methods():", "title": "" }, { "docid": "d28a3ccae4fc0f3881533fe80d2e72cc", "score": "0.6016587", "text": "def xen_rpc_call(ip, method, *args):\n try:\n if not ip:\n return xen_api_error(\"Invalid ip for rpc call\")\n # create\n proxy = ServerProxy(\"http://\" + ip + \":9363/\")\n \n # login \n response = proxy.session.login('root')\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription']) \n session_ref = response['Value']\n \n # excute\n method_parts = method.split('_')\n method_class = method_parts[0]\n method_name = '_'.join(method_parts[1:])\n \n if method.find(\"host_metrics\") == 0:\n method_class = \"host_metrics\"\n method_name = '_'.join(method_parts[2:])\n #log.debug(method_class)\n #log.debug(method_name)\n if method_class.find(\"Async\") == 0:\n method_class = method_class.split(\".\")[1]\n response = proxy.__getattr__(\"Async\").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n else:\n response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription'])\n # result\n return response\n except socket.error:\n return xen_api_error('socket error')", "title": "" }, { "docid": "600b4da65e908de5c16b5fe1c75f5441", "score": "0.59741735", "text": "def client_static_function(self, method: ProtoServiceMethod) -> None:", "title": "" }, { "docid": "09646f43417d57d0ca61d9af9d796237", "score": "0.5864757", "text": "def __getattr__(self, method: str):\n @exception_handler\n def func(*args, **kwargs):\n return self._client.PyCall(method, list(args), kwargs,\n self._wait_for_ready, self._call_timeout,\n self._compress)\n\n setattr(self, method, func)\n return func", "title": "" }, { "docid": "535802dcda1770f5817e2e6fe84f823e", "score": "0.5812762", "text": "def make_xml_rpc_api_call(uri, method, args=None, headers=None,\r\n http_headers=None, timeout=None, proxy=None):\r\n if args is None:\r\n args = tuple()\r\n try:\r\n largs = list(args)\r\n largs.insert(0, {'headers': headers})\r\n\r\n payload = xmlrpc_client.dumps(tuple(largs),\r\n methodname=method,\r\n allow_none=True)\r\n session = requests.Session()\r\n req = requests.Request('POST', uri, data=payload,\r\n headers=http_headers).prepare()\r\n LOGGER.debug(\"=== REQUEST ===\")\r\n LOGGER.info('POST %s', uri)\r\n LOGGER.debug(req.headers)\r\n LOGGER.debug(payload)\r\n\r\n response = session.send(req,\r\n timeout=timeout,\r\n proxies=_proxies_dict(proxy))\r\n LOGGER.debug(\"=== RESPONSE ===\")\r\n LOGGER.debug(response.headers)\r\n LOGGER.debug(response.content)\r\n response.raise_for_status()\r\n result = xmlrpc_client.loads(response.content,)[0][0]\r\n return result\r\n except xmlrpc_client.Fault as ex:\r\n # These exceptions are formed from the XML-RPC spec\r\n # http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php\r\n error_mapping = {\r\n '-32700': NotWellFormed,\r\n '-32701': UnsupportedEncoding,\r\n '-32702': InvalidCharacter,\r\n '-32600': SpecViolation,\r\n '-32601': MethodNotFound,\r\n '-32602': InvalidMethodParameters,\r\n '-32603': InternalError,\r\n '-32500': ApplicationError,\r\n '-32400': RemoteSystemError,\r\n '-32300': TransportError,\r\n }\r\n raise error_mapping.get(ex.faultCode, SoftLayerAPIError)(\r\n ex.faultCode, ex.faultString)\r\n except requests.HTTPError as ex:\r\n raise TransportError(ex.response.status_code, str(ex))\r\n except requests.RequestException as ex:\r\n raise TransportError(0, str(ex))", "title": "" }, { "docid": "83632d7d098a935af18fc07c6b563dd5", "score": "0.5779396", "text": "def method_with_args_wrapper(method):\n if options['name'] is None:\n options['name'] = method.__name__\n method.__servicemethod__ = options\n\n if options['store'] is not None:\n options['store'].service.add_method(method)\n\n return method", "title": "" }, { "docid": "e50f0708d6ce46cedcc7f399bb5d0377", "score": "0.5766135", "text": "def ServiceMethod(fn):\n\n fn.IsServiceMethod = True\n return fn", "title": "" }, { "docid": "618c7ab5ae8f328593e535d0849db650", "score": "0.5756815", "text": "def override(method):\n method.override = True\n return method", "title": "" }, { "docid": "2a69129e90a9371c576e6a99aa2e8f13", "score": "0.5746238", "text": "def rpc_call(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n return func(*args, **kwargs)\n decorator.rpc_call = True\n return decorator", "title": "" }, { "docid": "a9f662be843926be10e437f9a264b79d", "score": "0.57393026", "text": "def _xmlrpc_func(func):\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func", "title": "" }, { "docid": "fd89492b9edf1da04ddaf205622ef4dd", "score": "0.5719186", "text": "def wrap_method(cls, methodName, newMethod):\n cls[methodName].exclude()\n add_member_function(cls, methodName, newMethod)", "title": "" }, { "docid": "b3229073f906c05c021d50846d1d7cb5", "score": "0.57029563", "text": "def call_method(method, meta):\n try:\n return method()\n except (OSError, NotImplementedError) as e:\n ometa = meta.copy()\n ometa[MK_PAYLOAD] = False\n ometa[MK_ERROR] = type(e).__name__\n return ometa.data, None", "title": "" }, { "docid": "9c83db6ff93b087c76b885ecf35fa7b9", "score": "0.5677541", "text": "def _call(self, rpc_method_name, *args, **kwargs):\n method = getattr(self, rpc_method_name)\n return method(*args, **kwargs)", "title": "" }, { "docid": "ca0625fcdb365e3768fc90f5298ff121", "score": "0.56374776", "text": "def _wrap_method(attr):\n def new_fn_like_old_fn(self, *args, **kwargs):\n value = self._value() # pylint: disable=protected-access\n old_fn = getattr(type(value), attr)\n return old_fn(value, *args, **kwargs)\n return new_fn_like_old_fn", "title": "" }, { "docid": "380fa471dfe8487723c85560b939640d", "score": "0.5602222", "text": "def decorate_HTTP_verb_method(method):\n @functools.wraps(method)\n def wrapper(self, RIC_base_uri, **kwargs):\n partition = kwargs.pop('partition', '')\n name = kwargs.pop('name', '')\n sub_path = kwargs.pop('subPath', '')\n suffix = kwargs.pop('suffix', '')\n uri_as_parts = kwargs.pop('uri_as_parts', False)\n if uri_as_parts:\n REST_uri = generate_bigip_uri(RIC_base_uri, partition, name,\n sub_path, suffix, **kwargs)\n else:\n REST_uri = RIC_base_uri\n pre_message = \"%s WITH uri: %s AND suffix: %s AND kwargs: %s\" %\\\n (method.__name__, REST_uri, suffix, kwargs)\n logging.debug(pre_message)\n response = method(self, REST_uri, **kwargs)\n post_message =\\\n \"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:\"\\\n \" %s\\nText: %r\" % (response.status_code,\n response.headers.get('Content-Type', None),\n response.headers.get('Content-Encoding', None),\n response.text)\n logging.debug(post_message)\n if response.status_code not in range(200, 207):\n error_message = '%s Unexpected Error: %s for uri: %s\\nText: %r' %\\\n (response.status_code,\n response.reason,\n response.url,\n response.text)\n raise iControlUnexpectedHTTPError(error_message, response=response)\n return response\n return wrapper", "title": "" }, { "docid": "0a100c3427f4ee95fa4f539467b91887", "score": "0.553337", "text": "def call_rpc(rpc_user, rpc_pwd, method):\n base_url = 'http://127.0.0.1:37128/'\n try:\n response = post(base_url,\n data=method,\n auth=(rpc_user, rpc_pwd)\n )\n except Exception as e:\n response = e\n return evaluate_response(response)", "title": "" }, { "docid": "19c58a7ee6b3da175739c28b38aaf7cb", "score": "0.55310434", "text": "def remoteboundmethod(func):\n def new_func(self, *args, **kwargs):\n try:\n rpcclt = self.editwin.flist.pyshell.interp.rpcclt\n except AttributeError:\n rpcclt = None\n\n if rpcclt:\n return rpcclt.run_extension_function(self.__class__.__name__, func.__name__, args, kwargs)\n else:\n return func(self, *args, **kwargs)\n new_func.orig_func = func\n return new_func", "title": "" }, { "docid": "30b12d44b5a6900b1c1399e741c4c1a7", "score": "0.549189", "text": "def make_call(self, method, _):\n raise CallNotFound('{}.{} does not exist'.format(self.service_name,\n method))", "title": "" }, { "docid": "c8325f8b7319f0e8f20ada51c1106c9b", "score": "0.54876506", "text": "def _ServerProxy__request(self, methodname, params):\n\n paddedHandler = self._ServerProxy__handler\n\n # add on the methodName\n sep = '&'\n if '?' not in paddedHandler:\n sep = '?'\n paddedHandler = paddedHandler + \"%smethod=%s\" % (sep, methodname)\n sep = '&'\n\n # add on the auth token\n if self._authToken:\n paddedHandler = paddedHandler + \"%sauth_token=%s\" % (sep, urllib.quote_plus(self._authToken))\n\n # add on the partnerId\n if self._partnerId:\n paddedHandler = paddedHandler + \"%spartner_id=%s\" % (sep, self._partnerId)\n\n # add on the userId\n if self._userId:\n paddedHandler = paddedHandler + \"%suser_id=%s\" % (sep, self._userId)\n\n EXCLUDED_PAYLOAD_CALLS = ([\n \"auth.partnerLogin\",\n \"test.\",\n \"debug.\",\n \"testability.\"\n ])\n encryptRequest = True\n if self._requestCipher:\n for excludedMethodPattern in EXCLUDED_PAYLOAD_CALLS:\n if methodname.startswith(excludedMethodPattern):\n encryptRequest = False\n break\n else:\n encryptRequest = False\n\n # add the syncTime request\n if encryptRequest and self._sync:\n server_value, sync_time = self._sync\n params[0]['syncTime'] = server_value + int(time.time()) - sync_time\n\n request = xmlrpclib.dumps(params, methodname,\n encoding=self._ServerProxy__encoding,\n allow_none=self._ServerProxy__allow_none)\n\n #print \"------- XML REQUEST --------\"\n #print request\n\n if encryptRequest:\n request = self.encodeRequest(request)\n\n if self.x509:\n response = self._ServerProxy__transport.request(\n (self._ServerProxy__host, self.x509),\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n else:\n response = self._ServerProxy__transport.request(\n self._ServerProxy__host,\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n\n if len(response) == 1:\n response = response[0]\n\n #print \"------ RESPONSE ------\"\n #print response\n\n return response", "title": "" }, { "docid": "f63b8060217ab30f0f30f5cbed2b1ee2", "score": "0.547784", "text": "def ipcmethod(fn):\n def proxy_fn(inst, *args, **kwargs):\n # pylint: disable=protected-access\n return inst._ipc_call(fn.__name__, *args, **kwargs)\n proxy_fn.orig_fn = fn\n return proxy_fn", "title": "" }, { "docid": "ed73b6dddd42f566060a2b5af0b188c6", "score": "0.54724044", "text": "def override(self, method, function: Union[bool, Callable] = False):\n method = method if isinstance(method, str) else method.__name__\n if not hasattr(self, method):\n raise AttributeError(\n f\"Can't override method '{method}' because it's not defined\")\n if function is False:\n # assume it's used as a decorator\n # @train.override('step')\n # def custom_step(trainer, inputs, targets):\n def decorator(f):\n setattr(self, method, types.MethodType(f, self))\n\n return decorator\n else:\n setattr(self, method, types.MethodType(function, self))", "title": "" }, { "docid": "ad29973608dbd18e0c6ac71222560033", "score": "0.54715455", "text": "def xmlrpc_view(wrapped):\n \n def _curried(context, request):\n params, method = parse_xmlrpc_request(request)\n value = wrapped(context, *params)\n return xmlrpc_response(value)\n _curried.__name__ = wrapped.__name__\n _curried.__grok_module__ = wrapped.__module__ \n\n return _curried", "title": "" }, { "docid": "6dd413031360e87aa979589971356c2f", "score": "0.54406196", "text": "def call_history(method: Callable) -> Callable:\n inputs = method.__qualname__ + \":inputs\"\n outputs = method.__qualname__ + \":outputs\"\n\n @wraps(method)\n def wrapper(self, *args, **kwds):\n \"\"\"[wrapper of decorator]\n\n Returns:\n [type]: [description]\n \"\"\"\n # print('Calling decorated function')\n self._redis.rpush(inputs, str(args))\n method_return = method(self, *args, **kwds)\n self._redis.rpush(outputs, str(method_return))\n return method_return\n return wrapper", "title": "" }, { "docid": "bc1e78b48eb105f39d06101adb707f7c", "score": "0.5430126", "text": "def make_method_wrapper(servicename, method, delegateattribute):\n #FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)\n fa = inspect.getfullargspec(method)\n methname = method.__name__\n args = []\n params = []\n\n # Simply copy args names.\n for a in fa.args:\n args.append(a)\n params.append(a)\n\n # Setup default values for latest named arguments.\n # process by reverse order, with same negative index in both lists.\n if fa.defaults is not None:\n for i in range(-1, -len(fa.defaults) - 1, -1):\n args[i] = args[i] + '=' + repr(fa.defaults[i])\n\n # Process sequence of variable arguments.\n if fa.varargs:\n args.append(\"*\" + fa.varargs)\n params.append(\"*\" + fa.varargs)\n\n # Process sequence of named arguments.\n if fa.varkw:\n args.append(\"**\" + fa.varkw)\n params.append(\"**\" + fa.varkw)\n\n # If missing, make basic documentation.\n doc = inspect.getdoc(method)\n if doc is None:\n doc = \"Wrapper for \" + servicename + \" \" + methname + \".\"\n\n # Now, create the wrapper method source.\n src = srctpl.format(\n fctname=methname,\n doc=doc,\n service=servicename,\n delegattrib=delegateattribute,\n args=','.join(args),\n params=','.join(params[1:]) # Omit self !\n )\n\n # print(src)\n\n #wrappercode = compile(src,\"delegator.py\",\"exec\")\n\n methwrapperspace = {}\n exec(src, globals(), methwrapperspace)\n methwrapper = methwrapperspace[methname]\n\n #methwrapper.__name__ = methname\n #methwrapper.__doc__ = doc\n # methwrapper.__code__ = wrappercode # We inject our wrapper code.\n #methwrapper.__defaults__ = fa.defaults\n\n return methwrapper", "title": "" }, { "docid": "b6ed3c7e8083d7da01d7c7909ffe5f84", "score": "0.5426388", "text": "def _call_method(self, module, method, *args, **kwargs):\n return self.invoke_api(module, method, *args, **kwargs)", "title": "" }, { "docid": "9258d0d83808a0fdda1af90f15bc2c20", "score": "0.541891", "text": "def _wrapped_method(self, _meth_name, *args, **kwargs):\n return self._delegate(_meth_name, *args, **kwargs)", "title": "" }, { "docid": "2b4952343f189ea9f62c2bf227f2a10c", "score": "0.541088", "text": "def method():\n pass", "title": "" }, { "docid": "3564b616c3531c58f68e883fb24f7b64", "score": "0.5395659", "text": "def remote_method(**data):\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result", "title": "" }, { "docid": "b9d4138b2eed9404ce002d10404a5c13", "score": "0.5368383", "text": "def expose_rpc(permission, return_type, *arg_types):\n def decorator(func):\n if not hasattr(func, '_xmlrpc_signatures'):\n func._xmlrpc_signatures = []\n func._xml_rpc_permission = permission\n func._xmlrpc_signatures.append((return_type,) + tuple(arg_types))\n return func\n return decorator", "title": "" }, { "docid": "26a062e17378a5ccff14ed9b0cbeb10d", "score": "0.5358935", "text": "def _call(self,\n context: 'IconScoreContext',\n method: str,\n params: dict) -> Any:\n\n self._push_context(context)\n handler = self._handlers[method]\n ret_val = handler(context, params)\n self._pop_context()\n return ret_val", "title": "" }, { "docid": "bb3a20d9e1aa93f515e3634707d3d340", "score": "0.53551525", "text": "def _dispatch(self, method, params):\n logging.debug('Calling %s%s', method, params)\n self._rpc_received_event.set()\n return SimpleJSONRPCServer.SimpleJSONRPCServer._dispatch(\n self, method, params)", "title": "" }, { "docid": "91f1ec57fb4eb6f89bcc15142e0ddb4f", "score": "0.5347537", "text": "def wrap_method(self, fn):\n\n @functools.wraps(fn)\n def wrapper(self_, *args):\n fname = fn.__name__\n if len(args) > len(self._insigs):\n raise TypeError(\n (\"{}() takes {} positional arguments but {} were \" \"given\").format(\n fname, len(self._insigs) + 1, len(args) + 1\n )\n )\n try:\n newargs = [self._input(self._insigs[i], arg) for i, arg in enumerate(args)]\n except Exception as e:\n message = \"{}: converting input arguments for function '{}\".format(e, fname)\n raise type(e)(message) from None\n\n try:\n result = fn(self_, *newargs)\n except Exception as e:\n message = \"{}: raised by {}()\".format(e, fname)\n raise type(e)(message) from e\n try:\n return self._output(self._outsig, result)\n except Exception as e:\n raise type(e)(\"{} for output of {}()\".format(e, fname)) from None\n\n return wrapper", "title": "" }, { "docid": "018f9b74d624d36ce8511849c0927345", "score": "0.5341622", "text": "def unary_stub(self, method: ProtoServiceMethod,\n output: OutputFile) -> None:", "title": "" }, { "docid": "193044d6a6f005186725fc0bd64cb1b2", "score": "0.5329803", "text": "def _new_method(self, *arg, **kws):\n try:\n re_entrant_thread_lock.acquire()\n # call the actual function.\n return function(self, *arg, **kws)\n finally:\n re_entrant_thread_lock.release()", "title": "" }, { "docid": "555da236e9553e39ab5f75ab11a3c16b", "score": "0.52906334", "text": "def custom_method(self, foo):\n pass", "title": "" }, { "docid": "3da65b52d38ea1fc23e7c890a3d358a8", "score": "0.52740806", "text": "def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))", "title": "" }, { "docid": "8687223008a316ac3092eba6b687a598", "score": "0.52670044", "text": "def rpc_immediate(func):\n decorator = rpc_call(func)\n decorator.rpc_immediate = True\n return decorator", "title": "" }, { "docid": "ee831aac821a837f8fe1baf9de78b2f0", "score": "0.5266206", "text": "def builtin_wrapper(method_name):\n def wrapper(self, *args, **kwargs):\n if method_name not in self.__dict__:\n raise AttributeError(\n \"object '%s' has no mock method '%s'\" %\n (mock_object_names.get(id(self)), method_name)\n )\n return self.__dict__[method_name](*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "8dab0421c45909cb5099935c2ab398dd", "score": "0.5265916", "text": "def call(self, method=None, args=[]):\n\n client = xmlclient.ServerProxy(uri=self.api_endpoint, encoding='utf-8',\n allow_none=True)\n response = getattr(client, method)(self.username, self.password, *args)\n if response == 'OK':\n return True\n elif 'AUTH_ERROR' in response:\n raise AuthError()\n elif response == 'DOMAIN_OCCUPIED':\n raise DomainOccupiedError()\n elif response == 'RATE_LIMITED':\n raise RateLimitedError()\n elif response == 'BAD_INDATA':\n raise BadInDataError()\n elif response == 'UNKNOWN_ERROR':\n raise UnknownError()\n else:\n return response", "title": "" }, { "docid": "657888dc0cf4443523a73241b8dfaf79", "score": "0.52414155", "text": "def call(self, method, *args):\n flatcall = flatten(\n m(n=method, t=self.groupName)[[\n squish(x) for x in args if x is not None]])\n self.socket.write(flatcall + '\\0')", "title": "" }, { "docid": "12438e3673bc9b6f9aed5cd011624a35", "score": "0.5234635", "text": "def process_method(self, method, args, kwargs, request_id=None, **context):\n return method(*([] if args is None else args), **({} if kwargs is None else kwargs))", "title": "" }, { "docid": "64c4d3cc19226818688e25392ac49883", "score": "0.52293706", "text": "def __getattr__(self, cmd):\n\n if hasattr(self._rpc, cmd+'Request'):\n lnfunc = getattr(self._rpc, cmd+'Request')\n elif hasattr(self._rpc, f'Get{cmd}Request'):\n lnfunc = getattr(self._rpc, f'Get{cmd}Request')\n else:\n raise NotImplementedError('Unhandled method self._rpc.(Get)' + cmd + 'Request')\n\n if hasattr(self._stub, cmd):\n stubfunc = getattr(self._stub, cmd)\n\n def rpcCommand(*args,**kwargs):\n return stubfunc(lnfunc(*args, **kwargs))\n return rpcCommand\n\n elif hasattr(self._stub, 'Get'+cmd):\n stubfunc = getattr(self._stub, 'Get'+cmd)\n def rpcCommand(*args,**kwargs):\n if args:\n raise TypeError('Cannot use positional arguments with this command')\n return stubfunc(lnfunc(**kwargs))\n return rpcCommand\n\n else:\n raise NotImplementedError('Unhandled method stub.(Get)' + cmd)", "title": "" }, { "docid": "272e14eb1a22badf32e05032d35f30ae", "score": "0.5229296", "text": "def identify_method(self, func):", "title": "" }, { "docid": "808ff1803f34232767724b4855731cb1", "score": "0.52273256", "text": "def _trace_call(self, method: module_utils._FunctionWrapper, method_name: str):\n\n def call(*args, **kwargs):\n # Pop manually specified tolerances from the kwargs (if any).\n tolerances = {}\n tolerances[\"rtol\"] = kwargs.pop(\"rtol\", None)\n tolerances[\"atol\"] = kwargs.pop(\"atol\", None)\n # Only pass these to ModuleCall if they were specified by the user.\n tolerances = {k: v for k, v in tolerances.items() if v is not None}\n\n # Ensure the inputs are numpy inputs.\n args = tf_utils.convert_to_numpy(args)\n kwargs = tf_utils.convert_to_numpy(kwargs)\n\n # Run the method and record the details of the call.\n outputs = method(*args, **kwargs)\n serialized_inputs, serialized_outputs = method.get_serialized_values()\n self._trace.calls.append(\n ModuleCall(\n method_name,\n args,\n outputs,\n serialized_inputs,\n serialized_outputs,\n **tolerances,\n )\n )\n return outputs\n\n return call", "title": "" }, { "docid": "532bd6eb0a652edc1b4f2505567c955d", "score": "0.5219983", "text": "def _apply_wrappers(method, *args):\n # Local functions\n name = method.__name__\n local = name in ('area', 'areax', 'plotx', 'parametric', 'heatmap', 'scatterx')\n\n for func in args[::-1]:\n # Apply wrapper\n # NOTE: Must assign fucn and method as keywords to avoid overwriting\n # by loop scope and associated recursion errors.\n method = functools.wraps(method)(\n lambda self, *args, _func=func, _method=method, **kwargs:\n _func(self, *args, _method=_method, **kwargs)\n )\n\n # List wrapped methods in the driver function docstring\n if not hasattr(func, '_methods_wrapped'):\n func._methods_wrapped = []\n if not hasattr(func, '_docstring_orig'):\n func._docstring_orig = func.__doc__ or ''\n docstring = func._docstring_orig\n if '{methods}' not in docstring:\n continue\n pkg = 'proplot' if local else 'matplotlib'\n link = f'`~{pkg}.axes.Axes.{name}`'\n methods = func._methods_wrapped\n if link not in methods:\n methods.append(link)\n prefix = ', '.join(methods[:-1])\n modifier = ', and ' if len(methods) > 2 else ' and ' if len(methods) > 1 else ''\n suffix = methods[-1] + '.'\n func.__doc__ = docstring.format(methods=prefix + modifier + suffix)\n\n # Remove documentation\n # NOTE: Without this step matplotlib method documentation appears on proplot\n # website! This doesn't affect user experience because help() will search for\n # documentation on superclass matplotlib.axes.Axes above proplot.axes.Axes.\n if not local:\n method.__doc__ = None # let help() function seek the superclass docstring\n\n return method", "title": "" }, { "docid": "b22f64b8a8c4f18bcdbabf0cb227e280", "score": "0.5202471", "text": "def __getattr__(self, method, **arg):\n if not self.__handlerCache.has_key(method):\n def handler(_self = self, _method = method, **arg):\n url = _self.END_POINT\n arg[\"action\"] = _method\n postData = _self.__url_encode_params(params=arg)\n # print \"--url---------------------------------------------\"\n # print url\n # print \"--postData----------------------------------------\"\n # print postData\n\t\tlogging.debug(url+postData)\n f = urllib.urlopen(url + postData)\n data = f.read()\n\n # print \"--response----------------------------------------\"\n \n# print data\n f.close()\n \n xml = XMLNode.parseXML(data, True)\n\t\tif method=='get_account_tree':\n\t\t # logging.debug(xml.tree[0].elementText)\n\t\t decode_data=base64.b64decode(xml.tree[0].elementText)\n\t\t fp=StringIO(decode_data)\n\t\t zfp=zipfile.ZipFile(fp,'r')\n\t\t ftree=zfp.read(zfp.namelist()[0])\n\t\t # logging.debug(ftree)\n\t\t \n\t\t xml.tree[0]=XMLNode.parseXML(ftree)\n #print ecode_data\n _self.check_errors(_method, xml) \n return xml\n\n self.__handlerCache[method] = handler;\n\n return self.__handlerCache[method]", "title": "" }, { "docid": "ce9504d2c2eb6a699330c0aa7c937ccf", "score": "0.5201163", "text": "def tranquilize(method='get', requires_authentication=None):\n\n #just to be safe\n method = method.lower()\n\n def _dart(f):\n f._spec = _prepare(f)\n f._method = method\n f._methods = None\n f._requires_authentication = requires_authentication\n return f\n\n return _dart", "title": "" }, { "docid": "e64e6ebbeb726db52414007e42c68e11", "score": "0.5198447", "text": "def trace(\n method: Callable[['Evaluator', lark.Tree], Any]) -> Callable[['Evaluator', lark.Tree], Any]:\n @wraps(method)\n def concrete_method(self: 'Evaluator', tree: lark.Tree) -> Any:\n self.logger.info(f\"{self.level*'| '}{tree!r}\")\n result = method(self, tree)\n self.logger.info(f\"{self.level*'| '}{tree.data} -> {result!r}\")\n return result\n return concrete_method", "title": "" }, { "docid": "2e2c5f11a2ecdeec19de5ea98583700f", "score": "0.5191245", "text": "def __getattr__(self, attr):\n\n #print \"checking for attribute %r remotely\" % attr\n\n # note that this will fail if it doesn't exist, and that's good\n check_attr = getattr(self.machine, attr)\n\n if callable(check_attr):\n # it's a method, call it and return it\n def wrap_remote_call(*args, **kargs):\n return check_attr(*args, **kargs)\n return wrap_remote_call\n else:\n # it' just an attribute, return it\n return check_attr", "title": "" }, { "docid": "adfd0619f05cc1596045f5bfc2dfe99c", "score": "0.51777977", "text": "def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types", "title": "" }, { "docid": "01f21b4b3f2ba4a4a7cca06d6e5552b1", "score": "0.51528776", "text": "def callable(self, method):\n # Look up method\n if method not in self.all_methods:\n raise PLCInvalidAPIMethod(method)\n\n # Get new instance of method\n try:\n classname = method.split(\".\")[-1]\n if method in self.native_methods:\n fullpath = \"PLC.Methods.\" + method\n else:\n fullpath = self.other_methods_map[method]\n module = __import__(fullpath, globals(), locals(), [classname])\n return getattr(module, classname)(self)\n except (ImportError, AttributeError):\n raise PLCInvalidAPIMethod(\"import error %s for %s\"\n % (AttributeError, fullpath))", "title": "" }, { "docid": "7a9befd2e6721f6a4268effd57cd4ba4", "score": "0.5139309", "text": "def wrapper(func) -> None:\r\n\t\t\tself.paths[method].append((pathnames, func))", "title": "" }, { "docid": "ea1d4a0055ee8d61a11f3f8d47a05365", "score": "0.5136667", "text": "def __getattr__(self, method_name):\n return partial(self.exec, method_name.replace(\"_\", \" \"))", "title": "" }, { "docid": "b2667bccbd5f21df62be9da412db6943", "score": "0.51303196", "text": "def __call__(self, method):\n self._method = method\n\n async def wrapper(*args, **kwargs):\n \"\"\"Wrap the method.\"\"\"\n if self.name is None:\n self.name = str(self._method.__qualname__).lower().replace(\".\", \"_\")\n try:\n self._coresys = args[0].coresys\n except AttributeError:\n return False\n\n if not self._coresys:\n raise JobException(f\"coresys is missing on {self.name}\")\n\n job = self._coresys.jobs.get_job(self.name)\n\n if self.conditions and not self._check_conditions():\n return False\n\n try:\n return await self._method(*args, **kwargs)\n except HassioError as err:\n raise err\n except Exception as err:\n _LOGGER.exception(\"Unhandled exception: %s\", err)\n sentry_sdk.capture_exception(err)\n raise JobException() from err\n finally:\n if self.cleanup:\n self._coresys.jobs.remove_job(job)\n\n return wrapper", "title": "" }, { "docid": "e55167ae56ae67b1dfb386ae5230ac90", "score": "0.513021", "text": "def test_basic_method_call_wrapper():\n my_method = SGMethod(\"test\")\n other_method = SGMethod(\"other\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 0", "title": "" }, { "docid": "2a5d0f507c8a74f9f97e48cd91ce51c9", "score": "0.5129701", "text": "def do_rpc(self, method, **params):\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']", "title": "" }, { "docid": "a4fe38532b15b250ad2e37b7333b7386", "score": "0.5127883", "text": "def call_history(method: Callable) -> Callable:\n i_keys = method.__qualname__ + \":inputs\"\n o_keys = method.__qualname__ + \":outputs\"\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Set list keys to wrapped function\n \"\"\"\n self._redis.rpush(i_keys, str(args))\n r = method(self, *args, **kwargs)\n self._redis.rpush(o_keys, str(r))\n return r\n\n return wrapper", "title": "" }, { "docid": "ffad33f54f0244873c92523662506bbc", "score": "0.5126563", "text": "def execute_rpc(self, method, data):\n result = gevent.event.AsyncResult()\n result.set_exception(\n RPCError(\"Called a method of a non-configured service.\"))\n return result", "title": "" }, { "docid": "cc2fa95d85b41c003362676ced8f2e10", "score": "0.51116604", "text": "def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator", "title": "" }, { "docid": "6cd35820060acd01323038451d5374b9", "score": "0.5099967", "text": "def add_method(self, method: Callable):\n self._add(method.__name__, method)", "title": "" }, { "docid": "3cd7ea6f6d5255d1574231cdb7ca4528", "score": "0.509522", "text": "def derived_from(original_method):\n\n def wrapper(method):\n doc = original_method.__doc__.replace(\"*,\", \"\\*,\") # noqa\n doc = doc.replace(\n \":ref:`ufunc docs <ufuncs.kwargs>`.\",\n \"`ufunc docs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html#ufuncs-kwargs>`_.\",\n )\n\n # remove examples\n doc = doc.split(\"\\n\\n Examples\\n\")[0]\n\n # remove references\n doc = [a for a in doc.split(\"\\n\\n\") if \"References\\n----------\\n\" not in a]\n\n l1 = \"This docstring was copied from numpy.{}\".format(original_method.__name__)\n l2 = \"Some inconsistencies with the Workflows version may exist\"\n\n if isinstance(original_method, np.ufunc):\n # what the function does\n info = doc[1]\n\n # parameters (sometimes listed on separate lines, someimtes not)\n parameters = [a for a in doc if \"Parameters\\n\" in a][0].split(\"\\n\")\n if parameters[4][0] == \"x\":\n parameters = \"\\n\".join(parameters[:6])\n else:\n parameters = \"\\n\".join(parameters[:4])\n\n # return value\n returns = [a for a in doc if \"Returns\\n\" in a][0]\n\n # final docstring\n doc = \"\\n\\n\".join([info, l1, l2, parameters, returns])\n else:\n # does the first line contain the function signature? (not always the case)\n if doc[0][-1] == \")\":\n doc = [doc[1]] + [\"\\n\\n\" + \" {}\\n\\n {}\\n\\n\".format(l1, l2)] + doc[2:]\n else:\n doc = [doc[0]] + [\"\\n\\n\" + \" {}\\n\\n {}\\n\\n\".format(l1, l2)] + doc[1:]\n doc = \"\\n\\n\".join(doc)\n\n method.__doc__ = doc\n return method\n\n return wrapper", "title": "" }, { "docid": "2a338d9c9a17e911ac564dc8b826d946", "score": "0.5087606", "text": "def _wrap_method_output(f, method):\n\n @wraps(f)\n def wrapped(self, X, *args, **kwargs):\n data_to_wrap = f(self, X, *args, **kwargs)\n if isinstance(data_to_wrap, tuple):\n # only wrap the first output for cross decomposition\n return_tuple = (\n _wrap_data_with_container(method, data_to_wrap[0], X, self),\n *data_to_wrap[1:],\n )\n # Support for namedtuples `_make` is a documented API for namedtuples:\n # https://docs.python.org/3/library/collections.html#collections.somenamedtuple._make\n if hasattr(type(data_to_wrap), \"_make\"):\n return type(data_to_wrap)._make(return_tuple)\n return return_tuple\n\n return _wrap_data_with_container(method, data_to_wrap, X, self)\n\n return wrapped", "title": "" }, { "docid": "79862d4486efc6af9d6daa28f6caece1", "score": "0.5079165", "text": "def monorail_api_method(\n request_message, response_message, **kwargs):\n time_fn = kwargs.pop('time_fn', time.time)\n method_name = kwargs.get('name', '')\n method_path = kwargs.get('path', '')\n http_method = kwargs.get('http_method', '')\n def new_decorator(func):\n @endpoints.method(request_message, response_message, **kwargs)\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n method_identifier = (ENDPOINTS_API_NAME + '.' +\n (method_name or func.__name__)\n + '/' + (method_path or func.__name__))\n start_time = time_fn()\n approximate_http_status = 200\n request = args[0]\n ret = None\n c_id = None\n c_email = None\n mar = None\n try:\n if settings.read_only and http_method.lower() != 'get':\n raise permissions.PermissionException(\n 'This request is not allowed in read-only mode')\n requester = endpoints.get_current_user()\n logging.info('requester is %r', requester)\n logging.info('args is %r', args)\n logging.info('kwargs is %r', kwargs)\n auth_client_ids, auth_emails = (\n client_config_svc.GetClientConfigSvc().GetClientIDEmails())\n if settings.local_mode:\n auth_client_ids.append(endpoints.API_EXPLORER_CLIENT_ID)\n if self._services is None:\n self._set_services(service_manager.set_up_services())\n cnxn = sql.MonorailConnection()\n c_id, c_email = api_base_checks(\n request, requester, self._services, cnxn,\n auth_client_ids, auth_emails)\n mar = self.mar_factory(request, cnxn)\n self.ratelimiter.CheckStart(c_id, c_email, start_time)\n self.increment_request_limit(request, c_id, c_email)\n ret = func(self, mar, *args, **kwargs)\n except exceptions.NoSuchUserException as e:\n approximate_http_status = 404\n raise endpoints.NotFoundException(\n 'The user does not exist: %s' % str(e))\n except (exceptions.NoSuchProjectException,\n exceptions.NoSuchIssueException,\n exceptions.NoSuchComponentException) as e:\n approximate_http_status = 404\n raise endpoints.NotFoundException(str(e))\n except (permissions.BannedUserException,\n permissions.PermissionException) as e:\n approximate_http_status = 403\n logging.info('Whitelist ID %r email %r', auth_client_ids, auth_emails)\n raise endpoints.ForbiddenException(str(e))\n except endpoints.BadRequestException:\n approximate_http_status = 400\n raise\n except endpoints.UnauthorizedException:\n approximate_http_status = 401\n # Client will refresh token and retry.\n raise\n except oauth.InvalidOAuthTokenError:\n approximate_http_status = 401\n # Client will refresh token and retry.\n raise endpoints.UnauthorizedException(\n 'Auth error: InvalidOAuthTokenError')\n except (exceptions.GroupExistsException,\n exceptions.InvalidComponentNameException,\n ratelimiter.ApiRateLimitExceeded) as e:\n approximate_http_status = 400\n raise endpoints.BadRequestException(str(e))\n except Exception as e:\n approximate_http_status = 500\n logging.exception('Unexpected error in monorail API')\n raise\n finally:\n if mar:\n mar.CleanUp()\n now = time_fn()\n elapsed_ms = int((now - start_time) * 1000)\n if c_id and c_email:\n self.ratelimiter.CheckEnd(c_id, c_email, now, start_time)\n\n fields = {\n # Endpoints APIs don't return the full set of http status values.\n 'status': approximate_http_status,\n # Use the api name, not the request path, to prevent an\n # explosion in possible field values.\n 'name': method_identifier,\n 'is_robot': False,\n }\n\n http_metrics.server_durations.add(\n elapsed_ms, fields=fields)\n http_metrics.server_response_status.increment(\n fields=fields)\n http_metrics.server_request_bytes.add(\n len(protojson.encode_message(request)), fields=fields)\n response_size = 0\n if ret:\n response_size = len(protojson.encode_message(ret))\n http_metrics.server_response_bytes.add(\n response_size, fields=fields)\n\n return ret\n\n return wrapper\n return new_decorator", "title": "" }, { "docid": "470a92e9c6970fc25561fefce40bfde0", "score": "0.5073894", "text": "def wrapper(self, *args):\n key = method.__qualname__\n output = method(self, *args)\n self._redis.rpush('{}:inputs'.format(key), str(args))\n self._redis.rpush('{}:outputs'.format(key), output)\n return output", "title": "" }, { "docid": "454e8c3c33a02f1583d2ef5798790741", "score": "0.50671935", "text": "def unary_unary(self,\n method,\n request_serializer=None,\n response_deserializer=None):\n return UnaryUnaryMultiCallable(self._channel, _common.encode(method),\n request_serializer,\n response_deserializer)", "title": "" }, { "docid": "59c488d648b5c0605b6ce6516b06c877", "score": "0.50637835", "text": "def remote(self, method, params=()):\n\n response = self.transport.request(self.host, \n '/RPC2',\n dumps(params, method))\n return response", "title": "" }, { "docid": "fa60c78abe03b57a63378438275722e0", "score": "0.50616443", "text": "def _call_method(self, method, req, resp_class):\n payload = req.SerializeToString()\n headers = {\n 'Content-Type': 'application/x-protobuf',\n 'Content-Length': str(len(payload))\n }\n response, content = self._http.request(\n self._url + method, method='POST', body=payload, headers=headers)\n if response.status != 200:\n raise RPCError(method, response, content)\n resp = resp_class()\n resp.ParseFromString(content)\n return resp", "title": "" }, { "docid": "37d8a9848a4943a8c0bf431c2b96812d", "score": "0.50580186", "text": "def setup_method(self, method):\n pass", "title": "" }, { "docid": "37d8a9848a4943a8c0bf431c2b96812d", "score": "0.50580186", "text": "def setup_method(self, method):\n pass", "title": "" }, { "docid": "1614847cd2a597d00ff94571dcae1095", "score": "0.50568324", "text": "def setMethod(self, method):\n\t\tself.method = method", "title": "" }, { "docid": "fe2e81cd4800fb18389bb78b30f0f59a", "score": "0.5042651", "text": "def rpc_call(self, method: str, params: Optional[list] = None) -> Any:\r\n if params is None:\r\n params = []\r\n data = json.dumps({ # json string used in HTTP requests\r\n 'jsonrpc': '2.0',\r\n 'method': method,\r\n 'params': params,\r\n 'id': self.id\r\n })\r\n url = \"http://{}:{}\".format(self.ip.address, self.rpc_port)\r\n with SEMAPHORE:\r\n with requests.Session() as r:\r\n # sleep(0.01) ###\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n while response.headers['Content-Type'] != 'application/json':\r\n print(self.ip.address, self.rpc_port)\r\n print(response.status_code, response.headers)\r\n print(response.content)\r\n sleep(0.05)\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n content = response.json()\r\n # sleep(0.02)\r\n print(content)\r\n result = content.get('result')\r\n err = content.get('error')\r\n if err:\r\n raise RuntimeError(self.ip.address, self.rpc_port, err.get('message'))\r\n\r\n print('%s @%s : %s %s' % (method, self.ip.address, self.rpc_port, result))\r\n return result", "title": "" }, { "docid": "acbe13be2bdcf219299ce974a7899986", "score": "0.50418234", "text": "def map_method(self, method_name, *args, **kwds):\r\n return self.map(self._call_extension_method,\r\n method_name, *args, **kwds)", "title": "" }, { "docid": "ac030c13346fc8209a12e0a800438656", "score": "0.5034493", "text": "async def _register_method_handler(self, method_handler):\n generic_handler = grpc.method_handlers_generic_handler(\n \"test\",\n dict(Test=method_handler),\n )\n self._server.add_generic_rpc_handlers((generic_handler,))\n await self._server.start()", "title": "" }, { "docid": "fa28bd6eb0e069ab29c3ad0dce163454", "score": "0.50263786", "text": "def method_descriptor(self, method: ProtoServiceMethod,\n method_id: int) -> None:", "title": "" }, { "docid": "78567858b96a4b7111fd917362e407ac", "score": "0.5025847", "text": "def _dispatch(self, method, params):\n func = None\n try:\n # check to see if a matching function has been registered\n func = self.server.funcs[method]\n except KeyError:\n if self.server.instance is not None:\n # check for a _dispatch method\n if hasattr(self.server.instance, '_dispatch'):\n return self.server.instance._dispatch(method, params)\n else:\n # call instance method directly\n try:\n func = resolve_dotted_attribute(\n self.server.instance,\n method,\n self.server.allow_dotted_names\n )\n except AttributeError:\n pass\n\n request = Request(\n client_address=self.client_address,\n headers=self.headers,\n )\n if func is not None:\n return func(request, *params)\n else:\n raise Exception('method \"%s\" is not supported' % method)", "title": "" }, { "docid": "4a6581156df116b917408fdd70070031", "score": "0.50154376", "text": "def inner(func):\r\n\r\n service = func.__qualname__.split(\".\")[0]\r\n _Router().add_route(\r\n service=service,\r\n grpc_method=func.__name__,\r\n url_path=url,\r\n http_method=method\r\n )\r\n if pre_request is not None and len(pre_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pre_request, url)\r\n if pos_request is not None and len(pos_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pos_request, url)\r\n return func", "title": "" }, { "docid": "b4b270a17cf7b748320b24c52143220a", "score": "0.49997243", "text": "def talk_to_me(callback_method: Callable) -> Callable:\n\n @wraps(callback_method)\n async def wrapped(*args):\n if len(args) < 1 or args[1].effective_user.id not in (QKZKID, MESKOID):\n return\n return await callback_method(*args)\n\n return wrapped", "title": "" }, { "docid": "cd50d221ddf2beb8e32d34a84a9f3020", "score": "0.49948245", "text": "def add_method(self, method, name=None, request_arg=True, store_arg=True):\n # Was this a decorated servicemethod?\n if hasattr(method, '__servicemethod__'):\n options = method.__servicemethod__\n else:\n options = {'name': name or method.__name__, 'store': self.store,\n 'request_arg': request_arg, 'store_arg': store_arg}\n\n method.__servicemethod__ = options\n self.methods[ options['name'] ] = method", "title": "" }, { "docid": "1eabe50e64991afe88326c2b36b5798e", "score": "0.4993762", "text": "def get_custom_operation_service(self, method: \"wrappers.Method\") -> \"wrappers.Service\":\n if not method.output.is_extended_operation:\n raise ValueError(\n f\"Method is not an extended operation LRO: {method.name}\")\n\n op_serv_name = self.naming.proto_package + \".\" + \\\n method.options.Extensions[ex_ops_pb2.operation_service]\n op_serv = self.services.get(op_serv_name)\n if not op_serv:\n raise ValueError(\n f\"No such service: {op_serv_name}\"\n )\n\n if not op_serv.operation_polling_method:\n raise ValueError(\n f\"Service is not an extended operation operation service: {op_serv.name}\")\n\n return op_serv", "title": "" }, { "docid": "733508d14d5dad14a544c177d7aaa876", "score": "0.49916923", "text": "def servicemethod(*args, **kwargs):\n # Default options\n options = {'name': None, 'store': None, 'request_arg': True, 'store_arg': True}\n\n # Figure out if we were called with arguments\n # If we were called with args, ie:\n # @servicemethod(name='Foo')\n # Then the only argument here will be the pre-decorated function/method object.\n method = ( (len(args) == 1) and callable(args[0]) ) and args[0] or None\n\n if method is None:\n # We were called with args, (or @servicemethod() )\n # so figure out what they were ...\n\n # The method name should be either the first non-kwarg\n # or the kwarg 'name'\n # Example: @servicemethod('my_method', ...) or @servicemethod(name='my_method')\n options.update({\n 'name': bool(args) and args[0] or kwargs.pop('name', None),\n 'store': (len(args) >= 2) and args[1] or kwargs.pop('store', None),\n 'request_arg': kwargs.pop('request_arg', True),\n 'store_arg': kwargs.pop('store_arg', True),\n })\n else:\n options['name'] = method.__name__\n method.__servicemethod__ = options\n\n def method_with_args_wrapper(method):\n \"\"\" Wrapper for a method decorated with decorator arguments\n \"\"\"\n if options['name'] is None:\n options['name'] = method.__name__\n method.__servicemethod__ = options\n\n if options['store'] is not None:\n options['store'].service.add_method(method)\n\n return method\n\n return method or method_with_args_wrapper", "title": "" }, { "docid": "5e1bf0f4ff26e25203b79cdd8f11d12f", "score": "0.49911812", "text": "def _call_method(self, call, method):\n raise Exception(\"_call_method must be implemented by subclasses.\")", "title": "" }, { "docid": "04395a4fce48e50c6763053331c48618", "score": "0.49885386", "text": "def call(self, port, method, *args, **kwargs):\n method = self.provides[port][method]\n return method(*args, **kwargs)", "title": "" }, { "docid": "3132fed2558b4fd658b462e9930e8ff7", "score": "0.49833393", "text": "def set_method(self, method):\n self.method = method", "title": "" }, { "docid": "1d40c6db1bb39453a100e296123bc866", "score": "0.49785188", "text": "def proxy_method(self, rest_path, sign, kwargs):", "title": "" }, { "docid": "50154145ee89375affb12c7d2a5f5c9e", "score": "0.49728248", "text": "def system_methodSignature(self, method):\r\n # See if we can find the method in our funcs dict\r\n # TODO: Handle this better: We really should return something more\r\n # formal than an AttributeError\r\n func = self.funcs[method]\r\n\r\n try:\r\n sig = func._xmlrpc_signature\r\n except:\r\n sig = {\r\n 'returns': 'string',\r\n 'args': ['string' for arg in getargspec(func)[0]],\r\n }\r\n\r\n return [sig['returns']] + sig['args']", "title": "" }, { "docid": "4d4f085ad6e8e6c913bd069af72ef1f4", "score": "0.49585733", "text": "def _platformix_call(self, context, fake_reply, method, *args, **kwargs):\r\n if hasattr(self.host, method):\r\n if not callable(getattr(self.host, method)):\r\n self._reply(context, proto_failure(\"Attribute {} of {} is a property\".format(\r\n property, self.host.name)), fake_reply)\r\n return\r\n try:\r\n result = getattr(self.host, method)(*args, **kwargs)\r\n except Exception as e:\r\n eprint(\"Platformix protocol: failed to call method {} of {} with args {}, kwargs {} \"\r\n \"due to exception {}\".format(method, self.host.name, args, kwargs, e))\r\n exprint()\r\n self._reply(context, proto_failure(\r\n \"Failed to call method {} of {} with args {}, kwargs {} \"\r\n \"due to exception {}\".format(method, self.host.name, args, kwargs, e)), fake_reply)\r\n return\r\n self._reply(context, proto_success(result), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Method {} not found on {}\".format(property, self.host.name)),\r\n fake_reply)", "title": "" }, { "docid": "ea11771f6566c5c5b16e1f17fb7609ce", "score": "0.4957861", "text": "def rpc_call(self, request, method=None, params=None, **kwargs):\r\n args = []\r\n kwargs = dict()\r\n if isinstance(params, dict):\r\n kwargs.update(params)\r\n else:\r\n args = list(as_tuple(params))\r\n\r\n method_key = \"{0}.{1}\".format(self.scheme_name, method)\r\n if method_key not in self.methods:\r\n raise AssertionError(\"Unknown method: {0}\".format(method))\r\n method = self.methods[method_key]\r\n\r\n if hasattr(method, 'request'):\r\n args.insert(0, request)\r\n\r\n return method(*args, **kwargs)", "title": "" }, { "docid": "c6f35dcecd26d9b48de33acee2430c01", "score": "0.49496195", "text": "def wrapper(fn):\n if name is None:\n name_ = fn.__name__\n else:\n name_ = name\n original_method = getattr(cls,name_,default)\n new_method = fn(original_method)\n setattr(cls,name_,new_method)\n return fn", "title": "" } ]
8bcfc6a93aee455d8653b8831eda99de
Run an arbitrary query by Variable Elimination. What is the analytic cost of this? You have to do K noise queries in a graph with K endog nodes + K exog nodes in normal CFI. In twin network inference, you have to do 1 query in a graph with 2K endog nodes + K exog nodes.
[ { "docid": "8164bfb7ca118009b8158e8e045cb136", "score": "0.5620961", "text": "def query(self, var, observed, counterfactual=False, twin=False):\n if not isinstance(var, list):\n var = [var]\n if twin:\n # time_start = time.time()\n infer = VariableElimination(self.efficient_twin_model)\n result, time_elapsed = infer.query(var, evidence=observed, stopwatch=True)\n self.twin_inference_time = time_elapsed\n elif counterfactual:\n # time_start = time.time()\n infer = VariableElimination(self.counterfactual_model)\n result, time_elapsed = infer.query(var, evidence=observed, stopwatch=True)\n self.standard_inference_time = self.joint_inference_time + time_elapsed\n else:\n infer = VariableElimination(self.model)\n result, time_elapsed = infer.query(var, evidence=observed, stopwatch=True)\n return result, time_elapsed", "title": "" } ]
[ { "docid": "9d9bd34ad7a79c09351a5f63d8b1a69e", "score": "0.64059097", "text": "def run(self, query, observed, elim_order):\n\n # CHANGE from original code:\n # Here is where my code begins\n\n # Step 1: indentifying and reducing observables/evidence\n self.overwrite_log(\"STEP 1 : Factor Identification\\n\\n\")\n\n factor_identifier = FactorIdentifier(self.network.nodes, self.network.probabilities, observed)\n factors = factor_identifier.factor_identification() # dataframes with observed values removed\n\n # Step 2 Call elim_order() and run it to get the specified elimination order\n self.append_log(\"STEP 2 : Elimination Order\\n\\n\")\n\n ordered_list = elim_order() # elimination order in alphabetical order\n self.append_log(f\"Nodes after sorting: {ordered_list}\\n\\n\")\n\n # Step 3: Variable elimination\n self.append_log(\"STEP 3: Variable Elimination\\n\\n\")\n VarElim = VariableElimination(factors, ordered_list)\n #VarElim.variable_elimination()\n\n #test sumout:\n for key, factor in factors.items():\n VarElim.sum_out(factor, key)", "title": "" }, { "docid": "9d226bbb6066c1ad04516794ea27ef6a", "score": "0.5933294", "text": "def run(self, query, observed, elim_order):\n out = sys.stdout\n logs = open(\"notes.txt\", \"w\")\n sys.stdout = logs\n print(\"++++++++++++++++++++++ The nodes we have ++++++++++++++++++++++\")\n print(\"++++++++++++++++++++++\" + network.nodes.__str__() + \"++++++++++++++++++++++\")\n print(\"++++++++++++++++++++++ The query variable is: \"+query+\" ++++++++++++++++++++++\")\n\n order = [x for x in elim_order if x != query]\n print(\"++++++++++++++++++++++ Our elimination order is: \" + str(elim_order)+\"\\n\")\n facs = []\n print(\"++++++++++++++++++++++ Remove the observed variable(s) which is/are: \"+str(observed))\n for node in observed:\n self.observe(self.network, node, observed[node])\n self.removeFromNet(self.network, node)\n if node in order:\n order.remove(node)\n xs = [x for x in self.network.nodes if node != x]\n facs = facs + (xs)\n\n facs = set(facs)\n formulas = [x for x in facs]\n elim_order = order\n if len(formulas) == 0:\n formulas =[x for x in self.network.nodes]\n \n logs.write(\"++++++++++++++++++++++We are going to be working with++++++++++++++++++++++\\n \")\n\n self.properPrint(self.network.probabilities)\n while (elim_order):\n node = elim_order.pop(0)\n logs.write(\"++++++++++++++++++++++ We are eliminating\" + node + \" ++++++++++++++++++++++\\n\")\n factor, formulas = self.multiply(node, formulas)\n logs.write(\"++++++++++++++++++++++ Now we are left with ++++++++++++++++++++++\\n\")\n\n logs.write(\"++++++++++++++++++++++\" + str(formulas) + \" ++++++++++++++++++++++\\n\")\n logs.write(\"++++++++++++++++++++++ Now We are working with ++++++++++++++++++++++\\n \")\n self.properWrite(formulas)\n\n if factor != None:\n logs.write(\"++++++++++++++++++++++ Marginalize the table ++++++++++++++++++++++\\n\")\n self.network.probabilities[factor] = marginalize(node, self.network.probabilities[factor])\n formulas = formulas + [factor]\n logs.write(\"++++++++++++++++++++++Now We are working with++++++++++++++++++++++\\n \")\n self.properWrite(formulas)\n\n for fac in formulas:\n if self.network.probabilities[fac].shape[0] == 1:\n formulas.remove(fac)\n logs.write(\"++++++++++++++++++++++Before the end++++++++++++++++++++++\\n \")\n self.properWrite(formulas)\n result = formulas[0]\n for r in range(1, len(formulas)):\n self.network.probabilities[result] = product(self.network.probabilities[result], self.network.probabilities[formulas[r]])\n\n sum = 0\n for prob in network.probabilities[result]['prob']:\n sum += prob\n network.probabilities[result]['prob'] = network.probabilities[result]['prob']/sum\n logs.write(\"++++++++++++++++++++++ The result is: ++++++++++++++++++++++\\n \")\n print(network.probabilities[result])\n sys.stdout = out\n logs.close()\n\n return result", "title": "" }, { "docid": "00c9d0fb5033d30f171d9b3459ebfa5c", "score": "0.5456156", "text": "def rejection_sampling(input_values, Graph, Hash_Nodes, N):\n\n evidence_values = []\n ecount = 1\n while ecount < (len(input_values)-1):\n evidence_values.append([input_values[ecount],input_values[ecount+1]])\n ecount += 2\n\n query = input_values[0]\n query_variable = None\n for i in xrange(0, len(Graph)):\n if Graph[i].name == query:\n query_variable = Graph[i].random_variable\n\n\n atomic_event_list = [None]*N\n\n # all 1000 events get randomly assigned\n # prior sampling\n for i in xrange(0, N):\n event = AtomicEvent(Graph)\n prior_sampling(event)\n atomic_event_list[i] = event\n\n accept_list = []\n\n # rejects the samples that don't follow the evidence\n for i in xrange(0, N):\n event = atomic_event_list[i]\n if accepted_event(event, evidence_values):\n accept_list.append(event)\n\n # counts the samples for each outcome in the query variable's domain\n samples_count_per_outcome = np.zeros(query_variable.domain.size, dtype=np.int32)\n for i in xrange(0, len(accept_list)):\n update_count(samples_count_per_outcome, accept_list[i], query_variable)\n\n print \"Query Variable: \" + str(query_variable.name)\n print \"Evidence: \" + str(evidence_values)\n print query_variable.domain.domain_list\n posterior_distribution = samples_count_per_outcome/float(np.sum(samples_count_per_outcome))\n print posterior_distribution\n return posterior_distribution", "title": "" }, { "docid": "f7dfeb3d01e063b6e18c075383b7a2f8", "score": "0.5359048", "text": "def query(self,var):\n return VE(Graphical_model(self.dbn.vars1,self.current_factors)).query(var,self.current_obs)", "title": "" }, { "docid": "4a8362f28c5fb0887764b850a994453f", "score": "0.52503556", "text": "def eliminateVar(self,Z,E,factorList):\n\n useFactors = []#the index of the factor that contains the variable Z\n scope = []\n\n #print 'Z: ', Z\n\n \n #get a list containining the index in self.factorLlist of factors\n #that contain the variable Z to be eliminated\n # get the scope of variables from the factors that contain variable Z\n for i in range (len(factorList)):\n \n if Z in factorList[i].getVar().tolist():\n useFactors.append(i)#the ith factor is being currently involved in elimination\n scope=list(set.union(set(scope), factorList[i].getVar().tolist() ))\n\n \n # update edge map\n \"\"\" These represent the induced edges for the VE graph.\n once the variable Z is eliminated, its edges are removed from the graph\n but in the process of elimination, we create a new factor. This\n introduces fill edges (see pg. 307 Koller and Friedman)\n Z is one based, but the indices in E are zero based, hence Z-1\n also the variable names in scope are 1 based, so we subtract 1 when\n updating the induced VE graph \"\"\"\n\n for i in range ( len(scope)):\n for j in range ( len(scope)):\n if i != j:\n E[ scope[i]-1, scope[j]-1 ]=1\n E[ scope[j]-1, scope[i]-1 ]=1\n E[Z-1,:]=0\n E[:,Z-1]=0\n\n #G=nx.from_numpy_matrix(E)\n #print 'induced graph edges:\\n', (G.edges())\n #nx.draw_shell(G)\n #plt.show()\n\n \n #these are the indices of factorList which are not involved in VE\n unusedFactors= list( set.difference ( set(range(len(factorList))), set(useFactors) ) )\n \n newF=None\n #check first if there are any unused factors left!\n if len(unusedFactors) > 0:\n newF=len(unusedFactors)*[None]\n newmap=np.zeros(max(unusedFactors)+1,dtype=int).tolist()\n \n #newF is a new factor list, we populate it first\n #with the unused factors\n #newmap is maps the new location of ith unusedFactor\n for i in range( len(unusedFactors)):\n newF[i]=factorList[ unusedFactors[i] ]\n newmap[ unusedFactors[i] ]= i\n \n #print 'newmap ', newmap,\"\\n\"\n #print 'length of newmap: ', len(newmap), \"\\n\"\n\n newFactor = Factor( [], [], [], 'newFactor')\n\n #we multiple in all the factors that contain the variable Z\n for i in range( len (useFactors)):\n newFactor = FactorProduct(newFactor,factorList[ useFactors[i] ])\n \n\n #then we marginalize Z out and obtain a new factor\n #then append it the end of newF, the new factor list\n newFactor = FactorMarginalization( newFactor,[Z] )\n #print 'newFactor: ',newFactor\n #newF(length(nonUseFactors)+1) = newFactor;\n if newFactor != None:\n newF.append ( newFactor )\n\n \n\n if newF != None:\n factorList=newF\n #return E\n\n ########################################################################\n \"\"\" the remaining code builds the edges of the clique tree \"\"\"\n\n \"\"\" add new node with the factors that contain the variable Z\n adding a new node represents new clique.\n The scope of every factor generated during the variable elimination process is a clique pg. 309 Koller & Friedman \"\"\"\n\n self.nodeList.append ( scope )\n \n #newC is the total number of nodes in the clique tree\n newC=len( self.nodeList )\n #print 'newC: ', newC\n\n #factorInds are individual factors with one variable ... I think\n self.factorInds.append ( len(unusedFactors) + 1 )\n \n\n #print 'range( newC -1) ', range( newC-1 )\n #print 'factorInds: ', self.factorInds\n #print 'useFactors: ', useFactors\n #pdb.set_trace()\n \"\"\" we update the edges of the clique tree \"\"\"\n for i in range( newC -1 ):\n \n #if self.factorInds [ i ] -1 in useFactors:\n #there was the off by onoe erorr - the values in factorInds\n #were one-based, need to subtract 1\n if self.factorInds [ i ] -1 in useFactors:\n \n self.edges[ i, newC-1 ] = 1\n self.edges [ newC-1, i ] = 1\n self.factorInds[ i ] = 0\n else:\n if self.factorInds [i] != 0:\n #print 'i: ', i\n #print 'factorInds: ', self.factorInds\n #print 'newmap: ', newmap\n #print 'newmap [ self.factorInds[i] -1: ', newmap [ self.factorInds[i] -1 ]\n #print 'self.factorInds[ i ] = newmap [ self.factorInds[i] - 1 ] + 1 '\n if len(unusedFactors) > 0:\n #self.factorInds[ i ] = newmap [ self.factorInds[i] -1 ] +1\n self.factorInds[ i ] = newmap [ self.factorInds[i] -1 ] +1\n #self.factorInds[ i ] = newmap [ self.factorInds[i] ]\n \n #print 'factorInds right before returning: ', self.factorInds\n return E, factorList", "title": "" }, { "docid": "5cc15fa22cc265f4921a34f238dbdb17", "score": "0.5059412", "text": "def search(self):\n \n #check depth\n if self.node.depth<=1:\n self.terminate('D')\n return\n#engine collect_thetas_taus\n#print(\"master:search() collect_thetas_taus\")\n #thetas,taus=self.eng.getParam()\n self.dview.execute('thetas,taus=eng.getParam()')\n #thetas=self.dview['thetas']\n #taus=self.dview['taus']\n\n \n #mearge_thetas_taus\n #all_thetas=np.concatenate(thetas,axis=1)\n #all_taus=np.concatenate(taus,axis=1)\n all_thetas=self.dview.gather('thetas')\n all_taus=self.dview.gather('taus')\n##engine compute ensemble entropy\n#print(\"master:search() compute entropy\")\n #QH,Q=self.eng.getQH(all_thetas,all_taus)\n self.dview['all_thetas']=all_thetas\n self.dview['all_taus']=all_taus\n self.dview.execute('QH,Q=eng.getQH(all_thetas,all_taus)')\n QH=np.sum(np.array(self.dview['QH']),axis=0)\n Q=np.array(self.dview['Q'])#+np.finfo(np.float32).tiny\n \n #check bag size Q\n##print(\"master:search() check bag size\")\n if np.max(Q)<self.minbagsize:\n self.terminate('Q')\n return\n##compute the entropy gain\n#print(\"master:search() compute entropy gain\")\n gain=self.node.H-QH/(np.sum(Q))\n #find best gain index\n bgi = np.argmax(gain)\n #check gain\n if gain[bgi]<np.finfo(np.float32).tiny:\n self.terminate('G')\n return\n##engine split\n#print(\"master:search() engine split\")\n best_theta=all_thetas[bgi,:]\n best_tau=all_taus[bgi]\n self.dview['best_theta']=best_theta\n self.dview['best_tau']=best_tau\n #print(\"best theta: {},tau: {}\".format(best_theta,best_tau))\n#print(\"master:search() eng.split()\")\n self.dview.execute('HL,QL,HR,QR = eng.split(best_theta,best_tau)')\n HLs=np.array(self.dview['HL'])\n QLs=np.array(self.dview['QL'])\n HRs=np.array(self.dview['HR'])\n QRs=np.array(self.dview['QR'])\n \n HL=np.sum(HLs*QLs)/np.sum(QLs)\n QL=np.sum(QLs)\n \n HR=np.sum(HRs*QRs)/np.sum(QRs)\n QR=np.sum(QRs)\n## append_nodes \n#print(\"master:search() append_nodes\")\n self.node.L=mnode(self.node.depth-1,HL,QL,'L',self.node)\n self.node.R=mnode(self.node.depth-1,HR,QR,'R',self.node)\n self.queue.append(self.node.L)\n self.queue.append(self.node.R)\n \n self.node.theta=all_thetas[bgi,:]\n self.node.tau=all_taus[bgi]\n self.node.char=self.node.char+'-'", "title": "" }, { "docid": "973e531377c972bdbfd76f77a62b53e6", "score": "0.5055201", "text": "def optimizer(x):\n \n return 1 * (portfolioVariance(x))", "title": "" }, { "docid": "0159f88cbe8165d43f5ed2fee9d58b85", "score": "0.5035717", "text": "def eval(self, dropout=False):\n def apply_dropout(m):\n \"\"\"https://discuss.pytorch.org/t/dropout-at-test-time-in-densenet/6738/6.\"\"\"\n if type(m) == torch.nn.Dropout:\n m.train()\n self.model.eval()\n if dropout:\n self.model.apply(apply_dropout)", "title": "" }, { "docid": "9669c5300c9310b39d48ad21efd90f70", "score": "0.50262076", "text": "def optimizer():\n raise ValueError(\"litepresence needs funding\")", "title": "" }, { "docid": "e265750074c7bddc730d3cac7cb16cba", "score": "0.5015158", "text": "def vary_rem_noise(n_reps=10):\n p = {}\n p['n_input_graphs'] = 4\n p['duplicates'] = 5\n p['density_multiplier'] = 1\n p['p_keep_edge'] = 0.05\n p['g'] = 0.5\n p['f'] = 2\n p['gap_cost'] = p['f']\n p['n_entities'] = 50\n p['n_input_graph_nodes'] = 50\n p['max_iters'] = 500\n\n varied_param = 'p_keep_edge'\n p[varied_param] = np.arange(0, 1.01, 0.1)\n\n experiment_template(n_reps, p, varied_param, cv=False, title='rem_noise')", "title": "" }, { "docid": "7555f96f9ee40c545b9003cc2ee6f370", "score": "0.49621788", "text": "def performRejectionSampling(self, queryVar, givenVars, numSamples):\n # TODO\n\n q1 = 0\n q2 = 0\n\n for x in range(1, numSamples):\n y = self.pSample()\n\n for z in givenVars:\n\n if y[z.getName()] == givenVars[z]:\n\n if y[queryVar.getName()]:\n q1 = q1 + 1\n\n else:\n q2 = q2 + 1\n\n return self.Normalize([q1, q2])", "title": "" }, { "docid": "86f234555e83a730beac67c8f633333d", "score": "0.49131468", "text": "def run_experient(self,args):\n\n\n\n if args.test_ddqn:\n agent.load('ddqn_per_weights.h5')\n agent.update_target_model()\n agent.epsilon=1e-10\n self.flag = True\n\n print(\"--------------------\")\n print(\" \")\n print(\"testing started....\")\n print(\" \")\n print(\"--------------------\")\n for i in range(args.episodes):\n agent.run_episode(i,args)\n\n\n return\n\n\n\n # epsilon values to be used during training\n epsilon_vals = np.linspace(self.epsilon,0.01,args.episodes)\n\n print(\"Initialzing Replay Memory with {} samples\".format(self.TRAIN_START))\n print(\" \")\n print(\"--------------------\")\n while self.count < self.TRAIN_START:\n self.run_episode(0,args,init=True)\n\n\n print(\"--------------------\")\n print(\" \")\n print(\"training started....\")\n print(\" \")\n print(\"--------------------\")\n self.count = 0\n for i in range(args.episodes):\n\n\n agent.epsilon = epsilon_vals[i]\n self.run_episode(i,args)\n\n\n # I do not want to evaluate the model at episode 0\n if i % model_check == 0 and i != 0:\n\n agent.flag = True\n agent.epsilon = 1e-10\n print(\" \")\n print(\"--------------\")\n print(\" \")\n agent.run_episode(i,args)\n print(\" \")\n print(\"--------------\")\n print(\" \")\n agent.flag = False", "title": "" }, { "docid": "206dd5717336ef37237133e105ed4044", "score": "0.4880883", "text": "def episodic_semi_gradient_sarsa_on_secret_env5() -> DeepQNetwork:\n tf.config.set_visible_devices([], 'GPU')\n env = Env5()\n pre_warm = 500\n epsilon = 0.5\n gamma = 0.9999\n max_episodes_count = 200000\n print_every_n_episodes = 500\n\n total_score = 0\n\n state_description_length = env.state_description_length()\n max_actions_count = env.max_actions_count()\n\n print(state_description_length, max_actions_count)\n q = tf.keras.Sequential(\n [\n tf.keras.layers.Dense(128, input_shape=(state_description_length, ), activation=\"relu\"),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(3, activation=\"softmax\"),\n ]\n )\n\n q.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.mse)\n q.summary()\n\n if os.path.isfile(\"semi_gradient_sarsa_se.h5\"):\n q.load_weights(\"semi_gradient_sarsa_se.h5\")\n print(\"Restore model\")\n # test_nn(env, state_description_length, max_actions_count, q)\n\n for episode_id in tqdm.tqdm(range(max_episodes_count)):\n env.reset()\n\n # if episode_id == 1000:\n # epsilon = 0.05\n if epsilon > 0.02:\n epsilon = epsilon * 0.9997\n\n i = 0\n while not env.is_game_over() and i < (100 if (episode_id < pre_warm) else 200):\n i += 1\n s = env.state_description().reshape(-1, state_description_length)\n available_actions = env.available_actions_ids()\n\n if episode_id < pre_warm or np.random.uniform(0.0, 1.0) < epsilon:\n chosen_action = np.random.choice(available_actions)\n else:\n chosen_action = predict(q, s, available_actions)\n\n previous_score = env.score()\n env.act_with_action_id(chosen_action)\n r = env.score() - previous_score\n s_p = env.state_description().reshape(-1, state_description_length)\n\n if env.is_game_over():\n target = np.zeros(max_actions_count)\n target[chosen_action] = r\n q.train_on_batch(s, target.reshape(-1, max_actions_count))\n\n if episode_id % print_every_n_episodes == 0:\n print(\"\\nAvg. score:\", int(total_score / 1000), \"/// Eps:\", epsilon)\n total_score = 0\n break\n\n next_available_actions = env.available_actions_ids()\n\n if episode_id < pre_warm or np.random.uniform(0.0, 1.0) < epsilon:\n next_chosen_action = np.random.choice(next_available_actions)\n else:\n next_chosen_action = None\n next_chosen_action_q_value = None\n q_values_ = np.squeeze(q.predict(s_p))\n for a in next_available_actions:\n p = q_values_[a]\n if next_chosen_action is None or next_chosen_action_q_value < p:\n next_chosen_action = a\n next_chosen_action_q_value = p\n\n next_chosen_action_q_value = np.squeeze(q.predict(s_p))[next_chosen_action]\n target = np.zeros(max_actions_count)\n target[next_chosen_action] = r + gamma * next_chosen_action_q_value\n q.train_on_batch(s, target.reshape(-1, max_actions_count))\n\n if episode_id % 1000 == 0:\n q.save_weights(\"semi_gradient_sarsa_se.h5\")\n\n q.save_weights(\"semi_gradient_sarsa_se.h5\")\n pass", "title": "" }, { "docid": "965a38ad9577eccb85b71f003a6a966a", "score": "0.4879376", "text": "def run_query(querylist, supports, result_name, rosetta, output_path='.', prune=True):\n kgraph = KnowledgeGraph( querylist, rosetta )\n kgraph.execute()\n kgraph.print_types()\n if prune:\n kgraph.prune()\n kgraph.enhance()\n kgraph.support(supports)\n kgraph.export(result_name)", "title": "" }, { "docid": "247c21f0e46c1837643143282487aca0", "score": "0.48556134", "text": "def inference(features,hidden_nodes,activation_type,keep_prob,isTrain):\n def f_train(h,k_p):\n\t #add dropout to training optimization process\n\t return tf.nn.dropout(h,k_p)\n def f_nontrain(h):\n\t #doesn't add dropout\n\t return h\n pre_hidden_units=0 #keep previous layer nodes number\n for i,hidden_units in enumerate(hidden_nodes):\n layer_index=i+1\n if layer_index==1:\n with tf.name_scope(\"hidden\"+str(layer_index)):\n weights=tf.Variable(tf.truncated_normal([FEATURE_SIZE, hidden_units], dtype=tf.float32)/20,name=\"weights\") # divided by 20 to generate smaller ouptut at initial, otherwise exp(output) will be inf\n #biases1=tf.Variable(tf.zeros([hidden_units]),name=\"biases\")\n #hidden1=tf.nn.relu(tf.matmul(features,weights1)+biases1)\n\t\thidden=activation(tf.matmul(features,weights),activation_type)\n #hidden=tf.nn.relu(tf.matmul(features,weights))\n\t\thidden_dropout=tf.cond(isTrain,lambda: f_train(hidden,keep_prob),lambda: f_nontrain(hidden))\n pre_hidden_units=hidden_units\n else:\n with tf.name_scope(\"hidden\"+str(layer_index)):\n weights=tf.Variable(tf.truncated_normal([pre_hidden_units, hidden_units], dtype=tf.float32)/20,name=\"weights\") # divided by 20 to generate smaller ouptut at initial, otherwise exp(output) will be inf\n #biases1=tf.Variable(tf.zeros([hidden_units]),name=\"biases\")\n #hidden1=tf.nn.relu(tf.matmul(features,weights1)+biases1)\n\t\thidden=activation(tf.matmul(hidden_dropout,weights),activation_type)\n\t\t#if add_dropout:\n\t #\t\thidden_dropout=tf.nn.dropout(hidden,keep_prob)\n\t#\telse:\n\t#\t\thidden_dropout=hidden\n\t\t#hidden=tf.nn.relu(tf.matmul(hidden,weights))\n\t\thidden_dropout=tf.cond(isTrain,lambda: f_train(hidden,keep_prob),lambda: f_nontrain(hidden))\n pre_hidden_units=hidden_units\n\n\n with tf.name_scope('output'):\n weights2=tf.Variable(tf.truncated_normal([hidden_units,NUM_CLASSES],dtype=tf.float32)/20,name=\"weights\")\n #biases2=tf.Variable(tf.zeros([NUM_CLASSES]),name='biases')\n #output=tf.matmul(hidden1,weights2)+biases2\n output=tf.matmul(hidden_dropout,weights2)\n\n return output", "title": "" }, { "docid": "ca971560790ea54e27d2a7b246feb909", "score": "0.4853778", "text": "def compute_volumes(self, queries=None, evidence=None, cache=False):\n if queries is None:\n queries = []\n if evidence is None:\n evidence = []\n \n queries2 = []\n n_copies = []\n for q in queries:\n qvars = list(q.get_free_variables())\n if len(qvars) == 2: # bivariate query\n qvars.sort(key=lambda v : v.symbol_name())\n x, y = qvars[0].symbol_name(), qvars[1].symbol_name()\n if (x,y) in self.relaxations:\n xcopy = self.relaxations[(x,y)]['copy_name']\n q = q.substitute({qvars[0] : self.primal.nodes()[xcopy]['var']})\n queries2.append(q)\n n_copies.append(1)\n elif len(qvars) == 1: # univariate query\n x = qvars[0].symbol_name()\n if x in self.relaxations:\n copies = self.relaxations[x]['copies']\n qs = [q] + [q.substitute(\n {qvars[0]: self.primal.nodes()[xcopy]['var']}\n ) for xcopy in copies]\n queries2.extend(qs)\n n_copies.append(len(copies) + 1)\n else:\n queries2.append(q)\n n_copies.append(1)\n\n evidence2 = []\n for e in evidence:\n evars = list(e.get_free_variables())\n if len(evars) == 2:\n evars.sort(key=lambda v : v.symbol_name())\n x, y = evars[0].symbol_name(), evars[1].symbol_name()\n if (x,y) in self.relaxations:\n xcopy = self.relaxations[(x,y)]['copy_name']\n e = e.substitute({evars[0] : self.primal.nodes()[xcopy]['var']})\n\n evidence2.append(e)\n \n Z, vols = super().compute_volumes(queries2, evidence2, cache=cache)\n mean_vols = []\n id = 0\n for i, v in enumerate(vols):\n mean_vols.append(sum(vols[id: id + n_copies[i]]) / n_copies[i])\n id = id + n_copies[i]\n print(f\"len of queries {len(queries)} and len of mean vols {len(mean_vols)}\")\n return Z, mean_vols", "title": "" }, { "docid": "fde48071d7dc144c0ac2f48d1f434a2d", "score": "0.48503217", "text": "def denoise(self):\n\n # Step 1 : Basic Estimate\n for i, j in product(range(self.N), repeat=2):\n group_x_R_th = self.grouping_from_noisy(i, j)\n tf_3d = self.transformation_3d(group_x_R_th)\n\n thresholded, N_xR_har = self.hard_threshold(tf_3d)\n self.w_th[i, j] = self.weight_th(thresholded, N_xR_har)\n self.th_itf_3d[i, j, :, :] = self.itransformation_3d(thresholded)\n\n self.compute_y_basic()\n\n # Step 2 : Final Estimate\n for i, j in product(range(self.N), repeat=2):\n group_xR_noisy = self.grouping_from_noisy(i, j)\n group_xR_basic = self.grouping_from_basic_estimate(i, j)\n\n tf_3d_noisy = self.transformation_3d(group_xR_noisy)\n tf_3d_basic = self.transformation_3d(group_xR_basic)\n\n self.compute_wiener_energy(i, j, group_xR_basic)\n self.w_wie[i, j] = self.weight_wie(i, j)\n\n wienered = self.wiener_filter(tf_3d_noisy, i, j)\n self.wie_itf_3d[i, j, :, :] = self.itransformation_3d(wienered)\n\n self.compute_y_final()\n\n return self.img_final_estimate", "title": "" }, { "docid": "301b33615b8413def631031b5462633f", "score": "0.48389974", "text": "def compute_var(self,X,xTest): \n \n xTest=np.asarray(xTest)\n xTest=np.atleast_2d(xTest)\n if self.kernel_name=='SE':\n #Euc_dist=euclidean_distances(xTest,xTest)\n #KK_xTest_xTest=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(xTest.shape[0])*self.noise_delta\n ur = unique_rows(X)\n X=X[ur]\n if xTest.shape[0]<=800:\n Euc_dist_test_train=euclidean_distances(xTest,X)\n #Euc_dist_test_train=dist(xTest, X, matmul='gemm', method='ext', precision='float32')\n KK_xTest_xTrain=np.exp(-np.square(Euc_dist_test_train)/self.lengthscale)\n else:\n KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))\n\n Euc_dist_train_train=euclidean_distances(X,X)\n self.KK_bucb_train_train=np.exp(-np.square(Euc_dist_train_train)/self.lengthscale)+np.eye(X.shape[0])*self.noise_delta \n else:\n #KK=pdist(xTest,lambda a,b: self.kernel_dist(a,b,self.lengthscale))\n #KK=squareform(KK)\n #KK_xTest_xTest=KK+np.eye(xTest.shape[0])*(1+self.noise_delta)\n ur = unique_rows(X)\n X=X[ur]\n KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))\n self.KK_bucb_train_train=cdist(X,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))+np.eye(X.shape[0])*self.noise_delta\n try:\n temp=np.linalg.solve(self.KK_bucb_train_train,KK_xTest_xTrain.T)\n except:\n temp=np.linalg.lstsq(self.KK_bucb_train_train,KK_xTest_xTrain.T, rcond=-1)\n temp=temp[0]\n \n #var=KK_xTest_xTest-np.dot(temp.T,KK_xTest_xTrain.T)\n var=np.eye(xTest.shape[0])-np.dot(temp.T,KK_xTest_xTrain.T)\n var=np.diag(var)\n var.flags['WRITEABLE']=True\n var[var<1e-100]=0\n return var", "title": "" }, { "docid": "45249fff51ef5a8b3e21e0ab81e8b114", "score": "0.48331782", "text": "def local_opt_kernel(d_next_gen, d_is_elite):\n i = cuda.grid(1)\n if i < d_next_gen.shape[0]:\n if d_is_elite[i] == False:\n improved = True\n count = 0\n while count < d_next_gen.shape[0] and improved:\n improved = local_opt_one_trial(d_next_gen, i)\n count += 1", "title": "" }, { "docid": "162c3e403ca9784eafcd0bd787dd96d3", "score": "0.4826581", "text": "def query(self, var, evidence={}):\n q = self._enumeration_ask(var, evidence).reshape(-1, 1)\n return Variable(var, self.bayesian_network.variables[var].no_states, q)", "title": "" }, { "docid": "9407549c8f8ff19a0d19e72818e3a59e", "score": "0.48209265", "text": "def modelN(G,x=0,params=(50,80,105,71,1,0.01),tf=6,Nt=400,display=False):\n \n a,theta0,theta1,g,k,tau=params\n tarray = np.linspace(0,tf,Nt+1)\n \n #Do the hard work here\n N = len(G.nodes())\n A = nx.adjacency_matrix(G).todense() #adjacency matrix\n B = np.multiply(A, sum(A)).transpose() #ij entry is qi*Aij\n F = tau*np.nan_to_num(np.divide(B, sum(B))) #ij entry is as needed for F. nan_to_num fixed any division by zero\n bigmat = sparse.block_diag((F, F, F)).toarray()\n \n #To compute RHS of equations\n def RHS(y,t):\n \"\"\"Compute RHS of model at time t\n input: y should be a 3N x 1 array containing with\n y[:N],y[N:2*N],y[2*N:3*N] corresponding to\n S on nodes 0 to N-1, I on nodes 0 to N-1, and\n V on nodes 0 to N-1, respectively.\n output: dy: also a 3N x 1 array corresponding to dy/dt\n\n ---Discussion---\n \n Once the right-hand side of the equations are written in vector form,\n they can be greatly simplified.\n For instance, the Fji's in the summation terms all become tau, so here\n these terms are added in one go at the end.\n Similarly 'bigmat' is a (pre-calculated) block-diagonal matrix of three\n copies of F, which allows us to quickly add the first term from each\n summation in one go.\n All that remains are the non-summation terms, which can be quickly\n formed with some vector additions and subtractions.\n \n dSi/dt operation count estimation:\n Extract S from y: N operations\n Preallocate dy: N\n Find theta and theta*S*V: these do not contribute to finding dSi/dt\n a*I: N\n (g+k): 1\n (g+k)*S: N\n a*I - (g+k)*S: N\n tau*y: N\n bigmat[:N,:].dot(S): 3N*N = 3N^2\n Add results in 'return' line: 2N\n \n Total: 3N^2 + 8N + 1\n\n We could also include some non-summation terms in bigmat, such as the\n (g+k)Si term in dSi/dt. This would remove 2N+1 operations from the above,\n but would require many more (O(N^2)) operations to form the resulting\n bigmat in each call to the function (even if using scipy.sparse).\n In the approach below, as much as possible is put in to the matrix before\n the function call, so this takes fewer operations.\n \"\"\"\n \n S, I, V = y[:N], y[N:2*N], y[2*N:] #vectors S, I, V\n theta = theta0 + theta1*(1-np.sin(2*np.pi*t))\n dy = np.zeros(3*N) #output ready to be filled in\n tSV = theta*S*V #this term appears twice, calculate it here once\n \n dy[:N] = a*I - (g+k)*S\n dy[N:2*N] = tSV - (k+a)*I\n dy[2*N:] = k*(1-V) - tSV\n \n return dy - tau*y + bigmat.dot(y)\n \n #Set initial conditions and solve\n y0 = 3*N*[0]\n y0[x], y0[x+N], y0[x+2*N] = 0.1, 0.05, 0.05\n sol = odeint(RHS, y0, tarray)\n \n #Find mean and variance of S across all nodes, for all times\n Smean = [sol[i][:N].mean() for i in range(Nt+1)]\n Svar = [sol[i][:N].var() for i in range(Nt+1)]\n \n #Plots\n if display:\n for (stat, name) in [(Smean, 'Mean value'), (Svar, 'Variance')]:\n plt.figure(figsize=(8,8))\n plt.plot(tarray, stat)\n plt.title(name + ' of S across all nodes against time \\n Christophe Jefferies \\n Plot by modelN')\n plt.xlabel('Time')\n plt.ylabel(name + ' of S across all nodes')\n plt.show()\n\n return Smean, Svar", "title": "" }, { "docid": "921008042693513beb57f1d21ca422e3", "score": "0.47543892", "text": "def computeRmse(model, data, n):", "title": "" }, { "docid": "0bcf3cbcb6e1887a52938312d9cad607", "score": "0.4747991", "text": "def dqn(\n env,\n brain_name,\n state_size=37,\n action_size=4,\n name=\"\",\n break_early=False,\n solved_threshold=13.0,\n n_episodes=1800, \n max_t=1000, \n eps_start=1.0, \n eps_end=0.01, \n eps_decay=0.995,\n batch_size=64,\n update_every=4,\n fc1_units=64,\n fc2_units=64,\n lr=5e-4,\n gamma=0.99,\n tau=1e-3\n ):\n \n params = {key: val for key, val in locals().items() if key != \"env\"}\n # print the set of parameters used in this call\n print(json.dumps(params, indent=2, default=str), end=\"\\r\")\n \n # initialize agent\n agent = Agent(\n state_size=state_size, \n action_size=action_size, \n seed=0, \n batch_size=batch_size,\n gamma=gamma,\n tau=tau,\n lr=lr,\n update_every=update_every,\n fc1_units=fc1_units, \n fc2_units=fc2_units\n )\n\n\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations[0]\n score = 0\n for t in range(max_t):\n action = agent.act(state, eps)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n mean_score = np.mean(scores_window)\n print(f'\\rEpisode {i_episode}\\tAverage Score: {round(mean_score, 2)}', end=\"\")\n if mean_score >= solved_threshold:\n print(f'\\nEnvironment solved in {i_episode} episodes!\\tAverage Score: {round(mean_score, 2)}')\n torch.save(agent.qnetwork_local.state_dict(), f'checkpoint{name}.pth')\n if break_early == True:\n break\n\n\n return scores", "title": "" }, { "docid": "3875d3b991d05cfcd860e04f2ba5962a", "score": "0.4731487", "text": "def dyn_entropy_based_dyn(deltavars, epsilon=10**-10):\n outlets = deltavars['outlets']\n A = deltavars['A_w_trans'].copy()\n\n # Fluxes at each node F and subnetworks subN\n F = deltavars['F_w_trans'].copy()\n SubN = deltavars['SubN_w_trans'].copy()\n F = F / np.sum(F)\n # Fluxes at links\n L_F = np.matmul(A, np.diag(F))\n\n DMI = np.empty((SubN.shape[1], 2))\n DCE = np.empty((SubN.shape[1], 2))\n for i in range(SubN.shape[1]):\n\n # Nodes that belong to subnetwork i\n nodes_in = np.where(SubN[:, i] > epsilon)[0]\n # Nodes that don't belong to subnetwork i\n nodes_out = np.where(SubN[:, i] < epsilon)[0]\n outlet_SubN = list(set(outlets).intersection(set(nodes_in)))[0]\n # Fluxes within subnetwork i - remove nodes_out\n subN_F = L_F.copy()\n subN_F[:, nodes_out] = 0\n subN_F[nodes_out, :] = 0\n\n # Compute fluxes leaving (Fn_out) and entering (Fn_in) each node in\n # the subnetwork, and total flux in the subnetwork (FS)\n Fn_out = np.sum(subN_F, axis=0)\n Fn_in = np.sum(subN_F, axis=1)\n FS = np.sum(subN_F)\n\n # Normalize all fluxes by FS\n subN_F = subN_F / FS\n Fn_out = Fn_out / FS\n Fn_in = Fn_in / FS\n\n # Compute TMI and TCE\n DMI_sum = 0\n DCE_sum = 0\n for ni in nodes_in:\n downN = np.where(subN_F[:, ni] > 0)[0]\n if len(downN) != 0:\n for d in downN:\n DMI_sum = DMI_sum + subN_F[d, ni] * \\\n np.log2(subN_F[d, ni] / (Fn_in[d] * Fn_out[ni]))\n DCE_sum = DCE_sum - subN_F[d, ni] * \\\n np.log2(subN_F[d, ni] * subN_F[d, ni] / Fn_in[d] / Fn_out[ni])\n DMI[i, 0] = outlet_SubN\n DMI[i, 1] = DMI_sum\n DCE[i, 0] = outlet_SubN\n DCE[i, 1] = DCE_sum\n\n return DMI, DCE", "title": "" }, { "docid": "173cb60b5984af661c3dac4f55cdbe96", "score": "0.47150058", "text": "def _variational_inference_function(inputs, nb_of_samples):\n samples = [variational_base_function([inputs, 1]) for _ in range(nb_of_samples)]\n return numpy.concatenate(samples)", "title": "" }, { "docid": "42a55715c527cd431aedb748dd9d35b0", "score": "0.47088823", "text": "def input_prune(cls, model: NNCFNetwork, node: NNCFNode, graph: NNCFGraph):", "title": "" }, { "docid": "76d9544fdae2e1de44329eb4cae69d0a", "score": "0.4697006", "text": "def decon_study(study, test_dataset = False, TM = False,TrainedNoiseModel = None):\n cpm = h5py.File(file_name, 'r')\n ext = study # for ease of readability? not sure if it makes things more or\n# # less confusing\n# # TODO : perhaps split into multiple functions?\n if test_dataset:\n studies = np.array(cpm.get('study')).astype(str)[0:150]\n else:\n studies = np.array(cpm.get('study')).astype(str)\n cpm.close()\n\n a = set(studies) # set of all studies\n s = set([study]) # set with single element - study of interest\n m = a - s # set of all studies except study of interest\n\n # converts sets into lists of indices (artifact of old process)\n query_list = []\n reference_list = []\n for i in range(len(studies)):\n if studies[i] in m:\n reference_list.append(i)\n else:\n query_list.append(i)\n # handle single experiment-studies by duplicating query, as DeconRNASeq\n # requires at least two queries to run\n single_exp = len(query_list) == 1\n if single_exp:\n query_list *= 2\n\n # write tsvs, build dictionary\n info = write_tsvs(reference_list, query_list, ext)\n info[\"labeled_type\"] = [\n [co.get_term_name(celltype) for celltype in\n exp_to_celltypes(exp_acc[i])] for i in query_list] # TEST\n info[\"study_id\"] = study\n query_file = decon_temp_shell + \"query_\" + ext + \".tsv\"\n reference_file = decon_temp_shell + \"reference_\" + ext + \".tsv\"\n \n # return None\n decon_fileNormal = decon_temp_shell + \"results_Normal_\" + ext + \".tsv\"\n decon_fileWeight = decon_temp_shell + \"results_Weight_\" + ext + \".tsv\"\n \n# # Deconvolution\n result = deconrnaseq(query_file, reference_file, use_scale = False)\n result[0].to_csv(decon_fileNormal, sep=\"\\t\")\n \n if TM == True:\n result = deconrnaseqweight(query_file, reference_file, use_scale = False, Trainmodel = True, ProvideTM = TrainedNoiseModel)\n result[0].to_csv(decon_fileWeight, sep=\"\\t\")\n\n # delete duplicate query from results file\n# if single_exp:\n# trim_single_exp(ext)\n\n # generate nonzero tsv for each study. if successfully completed, delete\n # query and reference tsvs\n try:\n study_nonzero(info)\n os.remove(query_file)\n os.remove(reference_file)\n except IndexError:\n print(str(IndexError)) # TODO: get a better error message.\n # why does this print '<class IndexError>'?\n\n # I'm keeping this return statement for debugging purposes\n # TODO : delete eventually\n return info", "title": "" }, { "docid": "633e67186e246c6b1578140896950433", "score": "0.46932667", "text": "def vary_add_noise(n_reps=10):\n p = {}\n p['n_input_graphs'] = 4\n p['duplicates'] = 5\n p['density_multiplier'] = 1\n p['p_keep_edge'] = 0.8\n p['g'] = 0.5\n p['f'] = 2\n p['gap_cost'] = p['f']\n p['n_entities'] = 50\n p['n_input_graph_nodes'] = 50\n p['max_iters'] = 500\n\n varied_param = 'density_multiplier'\n p[varied_param] = [1, 1.1, 1.2, 1.4, 1.6, 1.8, 2, 2.5, 3]\n\n experiment_template(n_reps, p, varied_param, cv=False, title='add_noise')", "title": "" }, { "docid": "ee0cf1048008f44719e6e978f53e0c15", "score": "0.4687958", "text": "def select_neurons(self, domain):\n \n if self.filter_type == \"gabor\":\n h_alpha,h_l,h_x0,h_y0,h_ab,self.KAPPA = self.read_vrf()\n else:\n h_alpha,h_x0,h_y0,self.sigma_center,self.sigma_surround = self.read_vrf()\n \n allind = np.arange(self.num_neurons)\n \n Sx = domain[1]-domain[0]\n Sy = domain[3]-domain[2]\n \n shiftx = domain[0] + Sx / 2\n shifty = domain[2] + Sy / 2 \n \n self.choose = allind[(h_x0 > domain[0]-0.01) & (h_x0 < domain[1] +0.01) & (h_y0 > domain[2]-0.01) & (h_y0 < domain[3]+0.01)]\n self.alpha = h_alpha[self.choose]\n \n self.x0 = h_x0[self.choose] - shiftx\n \n self.y0 = h_y0[self.choose] - shifty\n \n if self.filter_type == \"gabor\":\n self.l = h_l[self.choose]\n self.ab = h_ab[self.choose]\n \n self.decode_neurons = self.choose.size", "title": "" }, { "docid": "119efd402e5a9130a6aa5e62a900a45c", "score": "0.46868175", "text": "def query_test(query_json_file, limit, weighting_keys, k_depth, discount, write_graph=False):\n munger = get_query_munger(query_json_file, limit=limit)\n eval_and_sum(munger, discount=discount, k_depth=k_depth, weighting_keys=weighting_keys, write_graph=write_graph)", "title": "" }, { "docid": "ccdede4a99d05eb4f4d88347db93f12d", "score": "0.4682417", "text": "def _fetch_eliminated_nodes_value(self):\n\n def _for_reshape():\n \"\"\"Do reshape nodes.\"\"\"\n nonlocal self\n output_tensors = []\n if not self.dynamic_reshape_node:\n return\n for node in self.dynamic_reshape_node:\n shape_ref = self._nodes_dict[node].input_name_list[1]\n output_tensors.append(shape_ref)\n feed_dict = build_feed_dict(self.model, self.input_nodes)\n fetch_dict = fetch_output_from_onnx_model(self.model, feed_dict=feed_dict, output_nodes=output_tensors)\n for opt_tensor_name, value in fetch_dict.items():\n self.tensors_dict[opt_tensor_name] = OnnxTensor(value, opt_tensor_name)\n\n def _for_resize():\n \"\"\"Do resize nodes.\"\"\"\n nonlocal self\n output_tensors = []\n if not self.dynamic_resize_node:\n return\n for node in self.dynamic_resize_node:\n shape_ref = self._nodes_dict[node].input_name_list[3]\n output_tensors.append(shape_ref)\n feed_dict = build_feed_dict(self.model, self.input_nodes)\n fetch_dict = fetch_output_from_onnx_model(self.model, feed_dict=feed_dict, output_nodes=output_tensors)\n for opt_tensor_name, value in fetch_dict.items():\n self.tensors_dict[opt_tensor_name] = OnnxTensor(value, opt_tensor_name)\n\n _for_reshape()\n _for_resize()", "title": "" }, { "docid": "f712f9cb56a5cc64e52a287d5ddace0f", "score": "0.4672682", "text": "def dynamics(\n adata: AnnData,\n filter_gene_mode: Literal[\"final\", \"basic\", \"no\"] = \"final\",\n use_smoothed: bool = True,\n assumption_mRNA: Literal[\"ss\", \"kinetic\", \"auto\"] = \"auto\",\n assumption_protein: Literal[\"ss\"] = \"ss\",\n model: Literal[\"auto\", \"deterministic\", \"stochastic\"] = \"auto\",\n est_method: Literal[\"ols\", \"rlm\", \"ransac\", \"gmm\", \"negbin\", \"auto\", \"twostep\", \"direct\"] = \"auto\",\n NTR_vel: bool = False,\n group: Optional[str] = None,\n protein_names: Optional[List[str]] = None,\n concat_data: bool = False,\n log_unnormalized: bool = True,\n one_shot_method: Literal[\"combined\", \"sci-fate\", \"sci_fate\"] = \"combined\",\n fraction_for_deg: bool = False,\n re_smooth: bool = False,\n sanity_check: bool = False,\n del_2nd_moments: Optional[bool] = None,\n cores: int = 1,\n tkey: str = None,\n **est_kwargs,\n) -> AnnData:\n\n del_2nd_moments = DynamoAdataConfig.use_default_var_if_none(\n del_2nd_moments, DynamoAdataConfig.DYNAMICS_DEL_2ND_MOMENTS_KEY\n )\n if \"pp\" not in adata.uns_keys():\n raise ValueError(f\"\\nPlease run `dyn.pp.receipe_monocle(adata)` before running this function!\")\n if tkey is None:\n tkey = adata.uns[\"pp\"][\"tkey\"]\n (experiment_type, has_splicing, has_labeling, splicing_labeling, has_protein,) = (\n adata.uns[\"pp\"][\"experiment_type\"],\n adata.uns[\"pp\"][\"has_splicing\"],\n adata.uns[\"pp\"][\"has_labeling\"],\n adata.uns[\"pp\"][\"splicing_labeling\"],\n adata.uns[\"pp\"][\"has_protein\"],\n )\n\n X_data, X_fit_data = None, None\n filter_list, filter_gene_mode_list = (\n [\n \"use_for_pca\",\n \"pass_basic_filter\",\n \"no\",\n ],\n [\"final\", \"basic\", \"no\"],\n )\n filter_checker = [i in adata.var.columns for i in filter_list[:2]]\n filter_checker.append(True)\n filter_id = filter_gene_mode_list.index(filter_gene_mode)\n which_filter = np.where(filter_checker[filter_id:])[0][0] + filter_id\n\n filter_gene_mode = filter_gene_mode_list[which_filter]\n\n valid_bools = get_valid_bools(adata, filter_gene_mode)\n gene_num = sum(valid_bools)\n if gene_num == 0:\n raise Exception(f\"no genes pass filter. Try resetting `filter_gene_mode = 'no'` to use all genes.\")\n\n if model.lower() == \"auto\":\n model = \"stochastic\"\n model_was_auto = True\n else:\n model_was_auto = False\n\n if tkey is not None:\n if adata.obs[tkey].max() > 60:\n main_warning(\n \"Looks like you are using minutes as the time unit. For the purpose of numeric stability, \"\n \"we recommend using hour as the time unit.\"\n )\n\n if model.lower() == \"stochastic\" or use_smoothed or re_smooth:\n M_layers = [i for i in adata.layers.keys() if i.startswith(\"M_\")]\n\n if len(M_layers) < 2 or re_smooth:\n main_info(\"removing existing M layers:%s...\" % (str(list(M_layers))), indent_level=2)\n for i in M_layers:\n del adata.layers[i]\n main_info(\"making adata smooth...\", indent_level=2)\n\n if group is not None and group in adata.obs.columns:\n moments(adata, genes=valid_bools, group=group)\n else:\n moments(adata, genes=valid_bools, group=tkey)\n elif tkey is not None:\n main_warning(\n f\"You used tkey {tkey} (or group {group}), but you have calculated local smoothing (1st moment) \"\n f\"for your data before. Please ensure you used the desired tkey or group when the smoothing was \"\n f\"performed. Try setting re_smooth = True if not sure.\"\n )\n\n valid_adata = adata[:, valid_bools].copy()\n if group is not None and group in adata.obs.columns:\n _group = adata.obs[group].unique()\n if any(adata.obs[group].value_counts() < 50):\n main_warning(\n f\"Note that some groups have less than 50 cells, this may lead to the velocities for some \"\n f\"cells are all NaN values and cause issues for all downstream analysis. Please try to \"\n f\"coarse-grain cell groupings. Cell number for each group are {adata.obs[group].value_counts()}\"\n )\n\n else:\n _group = [\"_all_cells\"]\n\n for cur_grp_i, cur_grp in enumerate(_group):\n if cur_grp == \"_all_cells\":\n kin_param_pre = \"\"\n cur_cells_bools = np.ones(valid_adata.shape[0], dtype=bool)\n subset_adata = valid_adata[cur_cells_bools]\n else:\n kin_param_pre = str(group) + \"_\" + str(cur_grp) + \"_\"\n cur_cells_bools = (valid_adata.obs[group] == cur_grp).values\n subset_adata = valid_adata[cur_cells_bools]\n\n if model.lower() == \"stochastic\" or use_smoothed:\n moments(subset_adata)\n (\n U,\n Ul,\n S,\n Sl,\n P,\n US,\n U2,\n S2,\n t,\n normalized,\n ind_for_proteins,\n assump_mRNA,\n ) = get_data_for_kin_params_estimation(\n subset_adata,\n has_splicing,\n has_labeling,\n model,\n use_smoothed,\n tkey,\n protein_names,\n log_unnormalized,\n NTR_vel,\n )\n\n valid_bools_ = valid_bools.copy()\n if sanity_check and experiment_type.lower() in [\"kin\", \"deg\"]:\n indices_valid_bools = np.where(valid_bools)[0]\n t, L = (\n t.flatten(),\n (0 if Ul is None else Ul) + (0 if Sl is None else Sl),\n )\n t_uniq = np.unique(t)\n\n valid_gene_checker = np.zeros(gene_num, dtype=bool)\n for L_iter, cur_L in tqdm(\n enumerate(L),\n desc=f\"sanity check of {experiment_type} experiment data:\",\n ):\n cur_L = cur_L.A.flatten() if issparse(cur_L) else cur_L.flatten()\n y = strat_mom(cur_L, t, np.nanmean)\n slope, _ = fit_linreg(t_uniq, y, intercept=True, r2=False)\n valid_gene_checker[L_iter] = (\n True\n if (slope > 0 and experiment_type == \"kin\") or (slope < 0 and experiment_type == \"deg\")\n else False\n )\n valid_bools_[indices_valid_bools[~valid_gene_checker]] = False\n main_warning(f\"filtering {gene_num - valid_gene_checker.sum()} genes after sanity check.\")\n\n if len(valid_bools_) < 5:\n raise Exception(\n f\"After sanity check, you have less than 5 valid genes. Something is wrong about your \"\n f\"metabolic labeling experiment!\"\n )\n\n U, Ul, S, Sl = (\n (None if U is None else U[valid_gene_checker, :]),\n (None if Ul is None else Ul[valid_gene_checker, :]),\n (None if S is None else S[valid_gene_checker, :]),\n (None if Sl is None else Sl[valid_gene_checker, :]),\n )\n subset_adata = subset_adata[:, valid_gene_checker]\n adata.var[kin_param_pre + \"sanity_check\"] = valid_bools_\n\n if assumption_mRNA.lower() == \"auto\":\n assumption_mRNA = assump_mRNA\n if experiment_type.lower() == \"conventional\":\n assumption_mRNA = \"ss\"\n elif experiment_type.lower() in [\"mix_pulse_chase\", \"deg\", \"kin\"]:\n assumption_mRNA = \"kinetic\"\n\n if model.lower() == \"stochastic\" and experiment_type.lower() not in [\n \"conventional\",\n \"kinetics\",\n \"degradation\",\n \"kin\",\n \"deg\",\n \"one-shot\",\n ]:\n \"\"\"\n # temporially convert to deterministic model as moment model for mix_std_stm\n and other types of labeling experiment is ongoing.\"\"\"\n\n model = \"deterministic\"\n\n if model_was_auto and experiment_type.lower() in [\n \"kinetic\",\n \"kin\",\n \"degradation\",\n \"deg\",\n ]:\n model = \"deterministic\"\n\n if assumption_mRNA.lower() == \"ss\" or (experiment_type.lower() in [\"one-shot\", \"mix_std_stm\"]):\n if est_method.lower() == \"auto\":\n est_method = \"gmm\" if model.lower() == \"stochastic\" else \"ols\"\n\n if experiment_type.lower() == \"one-shot\":\n beta = subset_adata.var.beta if \"beta\" in subset_adata.var.keys() else None\n gamma = subset_adata.var.gamma if \"gamma\" in subset_adata.var.keys() else None\n ss_estimation_kwargs = {\"beta\": beta, \"gamma\": gamma}\n else:\n ss_estimation_kwargs = {}\n\n est = ss_estimation(\n U=U.copy() if U is not None else None,\n Ul=Ul.copy() if Ul is not None else None,\n S=S.copy() if S is not None else None,\n Sl=Sl.copy() if Sl is not None else None,\n P=P.copy() if P is not None else None,\n US=US.copy() if US is not None else None,\n S2=S2.copy() if S2 is not None else None,\n conn=subset_adata.obsp[\"moments_con\"],\n t=t,\n ind_for_proteins=ind_for_proteins,\n model=model,\n est_method=est_method,\n experiment_type=experiment_type,\n assumption_mRNA=assumption_mRNA,\n assumption_protein=assumption_protein,\n concat_data=concat_data,\n cores=cores,\n **ss_estimation_kwargs,\n ) # U: (unlabeled) unspliced; S: (unlabeled) spliced; U / Ul: old and labeled; U, Ul, S, Sl: uu/ul/su/sl\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n if experiment_type.lower() in [\"one-shot\", \"one_shot\"]:\n est.fit(one_shot_method=one_shot_method, **est_kwargs)\n else:\n # experiment_type can be `kin` also and by default use\n # conventional method to estimate k but correct for time\n est.fit(**est_kwargs)\n\n alpha, beta, gamma, eta, delta = est.parameters.values()\n\n U, S = get_U_S_for_velocity_estimation(\n subset_adata,\n use_smoothed,\n has_splicing,\n has_labeling,\n log_unnormalized,\n NTR_vel,\n )\n vel = Velocity(estimation=est)\n\n if experiment_type.lower() in [\n \"one_shot\",\n \"one-shot\",\n \"kin\",\n \"mix_std_stm\",\n ]:\n U_, S_ = get_U_S_for_velocity_estimation(\n subset_adata,\n use_smoothed,\n has_splicing,\n has_labeling,\n log_unnormalized,\n not NTR_vel,\n )\n\n # also get vel_N and vel_T\n if NTR_vel:\n if has_splicing:\n if experiment_type == \"kin\":\n Kc = np.clip(gamma[:, None], 0, 1 - 1e-3) # S - U slope\n gamma_ = -(np.log(1 - Kc) / t[None, :]) # actual gamma\n\n vel_U = U.multiply(csr_matrix(gamma_ / Kc)) - csr_matrix(beta).multiply(U_) # vel.vel_s(U_)\n vel_S = vel.vel_s(U_, S_)\n\n vel_N = (U - csr_matrix(Kc).multiply(U)).multiply(csr_matrix(gamma_ / Kc)) # vel.vel_u(U)\n # scale back to true velocity via multiplying \"gamma_ / Kc\".\n vel_T = (U - csr_matrix(Kc).multiply(S)).multiply(csr_matrix(gamma_ / Kc))\n elif experiment_type == \"mix_std_stm\":\n # steady state RNA: u0, stimulation RNA: u_new;\n # cell-wise transcription rate under simulation: alpha1\n u0, u_new, alpha1 = solve_alpha_2p_mat(\n t0=np.max(t) - t,\n t1=t,\n alpha0=alpha[0],\n beta=beta,\n u1=U,\n )\n vel_U = alpha1 - csr_matrix(beta[:, None]).multiply(U_)\n vel_S = vel.vel_s(U_, S_)\n\n vel_N = alpha1 - csr_matrix(gamma[:, None]).multiply(u_new)\n vel_T = alpha1 - csr_matrix(beta[:, None]).multiply(S)\n else:\n vel_U = vel.vel_u(U_)\n vel_S = vel.vel_s(U_, S_)\n vel_N = vel.vel_u(U)\n vel_T = vel.vel_s(U, S - U) # need to consider splicing\n else:\n if experiment_type == \"kin\":\n vel_U = np.nan\n vel_S = np.nan\n\n Kc = np.clip(gamma[:, None], 0, 1 - 1e-3) # S - U slope\n gamma_ = -(np.log(1 - Kc) / t[None, :]) # actual gamma\n vel_N = (U - csr_matrix(Kc).multiply(U)).multiply(csr_matrix(gamma_ / Kc)) # vel.vel_u(U)\n # scale back to true velocity via multiplying \"gamma_ / Kc\".\n vel_T = (U - csr_matrix(Kc).multiply(S)).multiply(csr_matrix(gamma_ / Kc))\n elif experiment_type == \"mix_std_stm\":\n vel_U = np.nan\n vel_S = np.nan\n\n # steady state RNA: u0, stimulation RNA: u_new;\n # cell-wise transcription rate under simulation: alpha1\n u0, u_new, alpha1 = solve_alpha_2p_mat(\n t0=np.max(t) - t,\n t1=t,\n alpha0=alpha[0],\n beta=gamma,\n u1=U,\n )\n\n vel_N = alpha1 - csr_matrix(gamma[:, None]).multiply(u_new)\n vel_T = alpha1 - csr_matrix(gamma[:, None]).multiply(S)\n else:\n vel_U = np.nan\n vel_S = np.nan\n vel_N = vel.vel_u(U)\n vel_T = vel.vel_u(S) # don't consider splicing\n else:\n if has_splicing:\n if experiment_type == \"kin\":\n Kc = np.clip(gamma[:, None], 0, 1 - 1e-3) # S - U slope\n gamma_ = -(np.log(1 - Kc) / t[None, :]) # actual gamma\n\n vel_U = U_.multiply(csr_matrix(gamma_ / Kc) - csr_matrix(beta).multiply(U)) # vel.vel_u(U)\n vel_S = vel.vel_s(U, S)\n\n vel_N = (U_ - csr_matrix(Kc).multiply(U_)).multiply(\n csr_matrix(gamma_ / Kc)\n ) # vel.vel_u(U_)\n # scale back to true velocity via multiplying \"gamma_ / Kc\".\n vel_T = (U_ - csr_matrix(Kc).multiply(S_)).multiply(csr_matrix(gamma_ / Kc))\n elif experiment_type == \"mix_std_stm\":\n # steady state RNA: u0, stimulation RNA: u_new;\n # cell-wise transcription rate under simulation: alpha1\n u0, u_new, alpha1 = solve_alpha_2p_mat(\n t0=np.max(t) - t,\n t1=t,\n alpha0=alpha[0],\n beta=beta,\n u1=U_,\n )\n\n vel_U = alpha1 - csr_matrix(beta[:, None]).multiply(U)\n vel_S = vel.vel_s(U, S)\n\n vel_N = alpha1 - csr_matrix(gamma[:, None]).multiply(u_new)\n vel_T = alpha1 - csr_matrix(beta[:, None]).multiply(S_)\n\n else:\n vel_U = vel.vel_u(U)\n vel_S = vel.vel_s(U, S)\n vel_N = vel.vel_u(U_)\n vel_T = vel.vel_s(U_, S_ - U_) # need to consider splicing\n else:\n if experiment_type == \"kin\":\n vel_U = np.nan\n vel_S = np.nan\n\n Kc = np.clip(gamma[:, None], 0, 1 - 1e-3) # S - U slope\n gamma_ = -(np.log(1 - Kc) / t[None, :]) # actual gamma\n vel_N = (U_ - csr_matrix(Kc).multiply(U_)).multiply(\n csr_matrix(gamma_ / Kc)\n ) # vel.vel_u(U_)\n # scale back to true velocity via multiplying \"gamma_ / Kc\".\n vel_T = (U_ - csr_matrix(Kc).multiply(S_)).multiply(csr_matrix(gamma_ / Kc))\n elif experiment_type == \"mix_std_stm\":\n vel_U = np.nan\n vel_S = np.nan\n\n # steady state RNA: u0, stimulation RNA: u_new;\n # cell-wise transcription rate under simulation: alpha1\n u0, u_new, alpha1 = solve_alpha_2p_mat(\n t0=np.max(t) - t,\n t1=t,\n alpha0=alpha[0],\n beta=gamma,\n u1=U_,\n )\n\n vel_N = alpha1 - csr_matrix(gamma[:, None]).multiply(u_new)\n vel_T = alpha1 - csr_matrix(gamma[:, None]).multiply(S_)\n else:\n vel_U = np.nan\n vel_S = np.nan\n vel_N = vel.vel_u(U_)\n vel_T = vel.vel_u(S_) # don't consider splicing\n else:\n vel_U = vel.vel_u(U)\n vel_S = vel.vel_s(U, S)\n vel_N, vel_T = np.nan, np.nan\n\n vel_P = vel.vel_p(S, P)\n\n adata = set_velocity(\n adata,\n vel_U,\n vel_S,\n vel_N,\n vel_T,\n vel_P,\n _group,\n cur_grp,\n cur_cells_bools,\n valid_bools_,\n ind_for_proteins,\n )\n\n adata = set_param_ss(\n adata,\n est,\n alpha,\n beta,\n gamma,\n eta,\n delta,\n experiment_type,\n _group,\n cur_grp,\n kin_param_pre,\n valid_bools_,\n ind_for_proteins,\n )\n\n elif assumption_mRNA.lower() == \"kinetic\":\n return_ntr = True if fraction_for_deg and experiment_type.lower() == \"deg\" else False\n\n if model_was_auto and experiment_type.lower() == \"kin\":\n model = \"mixture\"\n if est_method == \"auto\":\n est_method = \"direct\"\n data_type = \"smoothed\" if use_smoothed else \"sfs\"\n\n (params, half_life, cost, logLL, param_ranges, cur_X_data, cur_X_fit_data,) = kinetic_model(\n subset_adata,\n tkey,\n model,\n est_method,\n experiment_type,\n has_splicing,\n splicing_labeling,\n has_switch=True,\n param_rngs={},\n data_type=data_type,\n return_ntr=return_ntr,\n **est_kwargs,\n )\n\n if type(params) == dict:\n alpha = params.pop(\"alpha\")\n params = pd.DataFrame(params)\n else:\n alpha = params.loc[:, \"alpha\"].values if \"alpha\" in params.columns else None\n\n len_t, len_g = len(np.unique(t)), len(_group)\n if cur_grp == _group[0]:\n if len_g != 1:\n # X_data, X_fit_data = np.zeros((len_g, adata.n_vars, len_t)), np.zeros((len_g, adata.n_vars,len_t))\n X_data, X_fit_data = [None] * len_g, [None] * len_g\n\n if len(_group) == 1:\n X_data, X_fit_data = cur_X_data, cur_X_fit_data\n else:\n # X_data[cur_grp_i, :, :], X_fit_data[cur_grp_i, :, :] = cur_X_data, cur_X_fit_data\n X_data[cur_grp_i], X_fit_data[cur_grp_i] = (\n cur_X_data,\n cur_X_fit_data,\n )\n\n a, b, alpha_a, alpha_i, beta, gamma = (\n params.loc[:, \"a\"].values if \"a\" in params.columns else None,\n params.loc[:, \"b\"].values if \"b\" in params.columns else None,\n params.loc[:, \"alpha_a\"].values if \"alpha_a\" in params.columns else None,\n params.loc[:, \"alpha_i\"].values if \"alpha_i\" in params.columns else None,\n params.loc[:, \"beta\"].values if \"beta\" in params.columns else None,\n params.loc[:, \"gamma\"].values if \"gamma\" in params.columns else None,\n )\n if alpha is None:\n alpha = fbar(a, b, alpha_a, 0) if alpha_i is None else fbar(a, b, alpha_a, alpha_i)\n all_kinetic_params = [\n \"a\",\n \"b\",\n \"alpha_a\",\n \"alpha_i\",\n \"alpha\",\n \"beta\",\n \"gamma\",\n ]\n\n extra_params = params.loc[:, params.columns.difference(all_kinetic_params)]\n # if alpha = None, set alpha to be U; N - gamma R\n params = {\"alpha\": alpha, \"beta\": beta, \"gamma\": gamma, \"t\": t}\n vel = Velocity(**params)\n # Fix below:\n U, S = get_U_S_for_velocity_estimation(\n subset_adata,\n use_smoothed,\n has_splicing,\n has_labeling,\n log_unnormalized,\n NTR_vel,\n )\n\n U_, S_ = get_U_S_for_velocity_estimation(\n subset_adata,\n use_smoothed,\n has_splicing,\n has_labeling,\n log_unnormalized,\n not NTR_vel,\n )\n\n # also get vel_N and vel_T\n if NTR_vel:\n if has_splicing:\n if experiment_type == \"kin\":\n vel_U = vel.vel_u(U_)\n vel_S = vel.vel_s(U_, S_)\n vel.parameters[\"beta\"] = gamma\n vel_N = vel.vel_u(U)\n vel_T = vel.vel_u(S) # no need to consider splicing\n elif experiment_type == \"deg\":\n if splicing_labeling:\n vel_U = np.nan\n vel_S = vel.vel_s(U_, S_)\n vel_N = np.nan\n vel_T = np.nan\n else:\n vel_U = np.nan\n vel_S = vel.vel_s(U_, S_)\n vel_N = np.nan\n vel_T = np.nan\n elif experiment_type in [\"mix_kin_deg\", \"mix_pulse_chase\"]:\n vel_U = vel.vel_u(U_, repeat=True)\n vel_S = vel.vel_s(U_, S_)\n vel.parameters[\"beta\"] = gamma\n vel_N = vel.vel_u(U, repeat=True)\n vel_T = vel.vel_u(S, repeat=True) # no need to consider splicing\n else:\n if experiment_type == \"kin\":\n vel_U = np.nan\n vel_S = np.nan\n\n # calculate cell-wise alpha, if est_method is twostep, this can be skipped\n alpha_ = one_shot_alpha_matrix(U, gamma, t)\n\n vel.parameters[\"alpha\"] = alpha_\n\n vel_N = vel.vel_u(U)\n vel_T = vel.vel_u(S) # don't consider splicing\n elif experiment_type == \"deg\":\n vel_U = np.nan\n vel_S = np.nan\n vel_N = np.nan\n vel_T = np.nan\n elif experiment_type in [\"mix_kin_deg\", \"mix_pulse_chase\"]:\n vel_U = np.nan\n vel_S = np.nan\n vel_N = vel.vel_u(U, repeat=True)\n vel_T = vel.vel_u(S) # don't consider splicing\n else:\n if has_splicing:\n if experiment_type == \"kin\":\n vel_U = vel.vel_u(U)\n vel_S = vel.vel_s(U, S)\n vel.parameters[\"beta\"] = gamma\n vel_N = vel.vel_u(U_)\n vel_T = vel.vel_u(S_) # no need to consider splicing\n elif experiment_type == \"deg\":\n if splicing_labeling:\n vel_U = np.nan\n vel_S = vel.vel_s(U, S)\n vel_N = np.nan\n vel_T = np.nan\n else:\n vel_U = np.nan\n vel_S = vel.vel_s(U, S)\n vel_N = np.nan\n vel_T = np.nan\n elif experiment_type in [\"mix_kin_deg\", \"mix_pulse_chase\"]:\n vel_U = vel.vel_u(U, repeat=True)\n vel_S = vel.vel_s(U, S)\n vel.parameters[\"beta\"] = gamma\n vel_N = vel.vel_u(U_, repeat=True)\n vel_T = vel.vel_u(S_, repeat=True) # no need to consider splicing\n else:\n if experiment_type == \"kin\":\n vel_U = np.nan\n vel_S = np.nan\n\n # calculate cell-wise alpha, if est_method is twostep, this can be skipped\n alpha_ = one_shot_alpha_matrix(U_, gamma, t)\n\n vel.parameters[\"alpha\"] = alpha_\n\n vel_N = vel.vel_u(U_)\n vel_T = vel.vel_u(S_) # need to consider splicing\n elif experiment_type == \"deg\":\n vel_U = np.nan\n vel_S = np.nan\n vel_N = np.nan\n vel_T = np.nan\n elif experiment_type in [\"mix_kin_deg\", \"mix_pulse_chase\"]:\n vel_U = np.nan\n vel_S = np.nan\n vel_N = vel.vel_u(U_, repeat=True)\n vel_T = vel.vel_u(S_, repeat=True) # don't consider splicing\n\n vel_P = vel.vel_p(S, P)\n\n adata = set_velocity(\n adata,\n vel_U,\n vel_S,\n vel_N,\n vel_T,\n vel_P,\n _group,\n cur_grp,\n cur_cells_bools,\n valid_bools_,\n ind_for_proteins,\n )\n\n adata = set_param_kinetic(\n adata,\n alpha,\n a,\n b,\n alpha_a,\n alpha_i,\n beta,\n gamma,\n cost,\n logLL,\n kin_param_pre,\n extra_params,\n _group,\n cur_grp,\n cur_cells_bools,\n valid_bools_,\n )\n # add protein related parameters in the moment model below:\n elif model.lower() == \"model_selection\":\n main_warning(\"Not implemented yet.\")\n\n if group is not None and group in adata.obs[group]:\n uns_key = group + \"_dynamics\"\n else:\n uns_key = \"dynamics\"\n\n if sanity_check and experiment_type in [\"kin\", \"deg\"]:\n sanity_check_cols = adata.var.columns.str.endswith(\"sanity_check\")\n adata.var[\"use_for_dynamics\"] = adata.var.loc[:, sanity_check_cols].sum(1).astype(bool)\n else:\n adata.var[\"use_for_dynamics\"] = False\n adata.var.loc[valid_bools, \"use_for_dynamics\"] = True\n\n adata.uns[uns_key] = {\n \"filter_gene_mode\": filter_gene_mode,\n \"t\": t,\n \"group\": group,\n \"X_data\": X_data,\n \"X_fit_data\": X_fit_data,\n \"asspt_mRNA\": assumption_mRNA,\n \"experiment_type\": experiment_type,\n \"normalized\": normalized,\n \"model\": model,\n \"est_method\": est_method,\n \"has_splicing\": has_splicing,\n \"has_labeling\": has_labeling,\n \"splicing_labeling\": splicing_labeling,\n \"has_protein\": has_protein,\n \"use_smoothed\": use_smoothed,\n \"NTR_vel\": NTR_vel,\n \"log_unnormalized\": log_unnormalized,\n \"fraction_for_deg\": fraction_for_deg,\n }\n\n if del_2nd_moments:\n remove_2nd_moments(adata)\n\n return adata", "title": "" }, { "docid": "7d84c3d22a6aee6a06d41c6d512e2c28", "score": "0.4666266", "text": "def query(self, query, k):", "title": "" }, { "docid": "9a1efcb41f39dc72041ff894dcb67320", "score": "0.46553674", "text": "def _default_script_query(\n query_vector: List[float],\n space_type: str = \"l2\",\n pre_filter: Dict = MATCH_ALL_QUERY,\n) -> Dict:\n return {\n \"query\": {\n \"script_score\": {\n \"query\": pre_filter,\n \"script\": {\n \"source\": \"knn_score\",\n \"lang\": \"knn\",\n \"params\": {\n \"field\": \"vector_field\",\n \"query_value\": query_vector,\n \"space_type\": space_type,\n },\n },\n }\n }\n }", "title": "" }, { "docid": "b84c0aa97e2a8f2301f7c4f012ee3a64", "score": "0.46458516", "text": "def ss_denoise(fn, noised, sigma0):\n sigma02 = sigma0 ** 2\n with func.RequiresGradContext(*noised, requires_grad=True):\n fn_val = fn(*noised)\n grads = autograd.grad(fn_val.sum(), noised)\n denoised = [x.detach() - sigma02 * grad for x, grad in zip(noised, grads)]\n return denoised, fn_val", "title": "" }, { "docid": "b288f9b1b9f594bac99fbf82a258176b", "score": "0.46450624", "text": "def predict_unseen(test_query, top_N, training_images, embedding_space, is_gpu):\n if os.path.isfile('checkpoints/neighbor_model.joblib'):\n neighbor_model = load('models/neighbor_model.joblib')\n else:\n neighbor_model = train_neighbor_model(embedding_space)\n \n query_img = tranform_test_img(Image.open(test_query).convert('RGB')).reshape(1, 3, 224, 224)\n \n net = load_net(is_gpu)\n \n query_embed = gen_embedding(net, query_img, is_gpu)\n \n predictions = neighbor_model.kneighbors(query_embed)\n \n plot_results(test_query, top_N, predictions, training_images)", "title": "" }, { "docid": "2ac7b8f5c780569bbf547fbb18e49c04", "score": "0.46435878", "text": "def inference(x, hidden1_units, hidden2_units, hidden3_units, keep_prob):\n # Hidden 1\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.random_normal([N_FEATURES, hidden1_units], stddev=tf.sqrt(2/N_FEATURES)), name = 'weights')\n logits = tf.matmul(x, weights, name = 'logits')\n mean, variance = tf.nn.moments(logits, [0], name = 'moments')\n betas = tf.Variable(tf.zeros([hidden1_units]), name = 'betas')\n normalized_logits = tf.nn.batch_normalization(logits, mean, variance, betas, None, 1e-5, name = 'batch_norm')\n relu = tf.nn.relu(normalized_logits, name = 'relu')\n dropout = tf.nn.dropout(relu, keep_prob, name = 'dropout')\n hidden1 = dropout\n # Hidden 2\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.random_normal([hidden1_units, hidden2_units], stddev=tf.sqrt(2/hidden1_units)), name = 'weights')\n logits = tf.matmul(hidden1, weights, name = 'logits')\n mean, variance = tf.nn.moments(logits, [0], name = 'moments')\n betas = tf.Variable(tf.zeros([hidden2_units]), name = 'betas')\n normalized_logits = tf.nn.batch_normalization(logits, mean, variance, betas, None, 1e-5, name = 'batch_norm')\n relu = tf.nn.relu(normalized_logits, name = 'relu')\n dropout = tf.nn.dropout(relu, keep_prob, name = 'dropout')\n hidden2 = dropout\n # Hidden 3\n with tf.name_scope('hidden3'):\n weights = tf.Variable(tf.random_normal([hidden2_units, hidden3_units], stddev=tf.sqrt(2/hidden2_units)), name = 'weights')\n logits = tf.matmul(hidden2, weights, name = 'logits')\n mean, variance = tf.nn.moments(logits, [0], name = 'moments')\n betas = tf.Variable(tf.zeros([hidden3_units]), name = 'betas')\n normalized_logits = tf.nn.batch_normalization(logits, mean, variance, betas, None, 1e-5, name = 'batch_norm')\n relu = tf.nn.relu(normalized_logits, name = 'relu')\n dropout = tf.nn.dropout(relu, keep_prob, name = 'dropout')\n hidden3 = dropout\n with tf.name_scope('probabilities'):\n weights = tf.Variable(tf.random_normal([hidden3_units, 2], stddev=tf.sqrt(1/hidden3_units)), name = 'weights')\n biases = tf.Variable(tf.zeros([2]), name = 'bias')\n logits = tf.matmul(hidden3, weights) + biases\n probabilities = tf.nn.softmax(logits, name = 'probabilities')\n return probabilities", "title": "" }, { "docid": "2fcea64e71139a2d2b35b0e0647d36eb", "score": "0.46407858", "text": "def denoise(self, adata, batch_size = 64):\n \n input_ds = simpleloader(adata.layers[\"normalized input\"][:, adata.var['Variance Type'] == 'HVG'], batch_size)\n \n output = np.zeros((adata.shape[0], self.dims[0]), dtype = 'float32')\n start = 0\n \n for x in input_ds:\n end = start + x.shape[0]\n output[start:end] = self(x).numpy()\n start = end\n \n return output", "title": "" }, { "docid": "9e2adf80ad5483e215dd56f69adcca6a", "score": "0.46320724", "text": "def attention(query):\n\t\t\t\tds = []# Results of attention reads will be stored here.\n\t\t\t\taw = []# Attention weights will be stored here\n\t\t\t\ttiled_query = tf.tile(tf.reshape(query, [-1, 1, 1, state_size]),[1,attn_length,1, 1])\n\t\t\t\tprint(hidden.get_shape())\n\t\t\t\tprint(tiled_query.get_shape())\n\t\t\t\tconcat_input = tf.concat(axis=3, values=[hidden, tiled_query])\n\t\t\t\t#concat_input = tf.concat(3, [hidden, hidden])\n\t\t\t\tfor a in xrange(num_heads):\n\t\t\t\t\twith variable_scope.variable_scope(\"Attention_%d\" % a):\n\t\t\t\t\t\ts = None\n\t\t\t\t\t\tif self.hparams.att_strategy == 'multi':\n\t\t\t\t\t\t\tprint('Attention: multiply')\n\t\t\t\t\t\t\ty = linear(query, attention_vec_size, True)\n\t\t\t\t\t\t\ty = tf.reshape(y, [-1, 1, 1, attention_vec_size])\n\t\t\t\t\t\t\t#s = math_ops.reduce_sum(\n\t\t\t\t\t\t\t#\tu[a] * math_ops.tanh(y * hidden_features[a]), [2, 3])\n\t\t\t\t\t\t\ts = math_ops.reduce_sum(\n\t\t\t\t\t\t\t\thidden * math_ops.tanh(y), [2, 3])\n\t\t\t\t\t\t\t\t#hidden_features[a] * math_ops.tanh(y), [2, 3])\n\n\t\t\t\t\t\telif self.hparams.att_strategy == 'multi_add':\n\t\t\t\t\t\t\tprint('Attention: multiply_add')\n\t\t\t\t\t\t\ty = linear(query, attention_vec_size, True, scope='y')\n\t\t\t\t\t\t\ty2 = linear(query, attention_vec_size, True , scope='y2')\n\t\t\t\t\t\t\ty = tf.reshape(y, [-1, 1, 1, attention_vec_size])\n\t\t\t\t\t\t\ty2 = tf.reshape(y2, [-1, 1, 1, attention_vec_size])\n\t\t\t\t\t\t\t#s = math_ops.reduce_sum(\n\t\t\t\t\t\t\t#\tu[a] * math_ops.tanh(y * hidden_features[a]), [2, 3])\n\t\t\t\t\t\t\ts = math_ops.reduce_sum(\n\t\t\t\t\t\t\t\thidden * math_ops.tanh(y2), [2, 3])\n\t\t\t\t\t\t\ts = s + math_ops.reduce_sum(\n\t\t\t\t\t\t\t\tv[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])\n\n\t\t\t\t\t\telif self.hparams.att_strategy == 'NTN':\n\t\t\t\t\t\t\tprint('Attention: NTN')\n\t\t\t\t\t\t\ty = linear(query, attn_size, False)\n\t\t\t\t\t\t\ty = tf.tile(tf.reshape(y, [-1, 1, 1, attn_size]),[1,attn_length,1,1])\n\t\t\t\t\t\t\ts = math_ops.reduce_sum(hidden * y, [2,3]) #bilnear\n\t\t\t\t\t\t\ts = s + math_ops.reduce_sum(nn_ops.conv2d(concat_input, linear_w[a], [1, 1, 1, 1], \"SAME\"), [2,3]) #linear\n\t\t\t\t\t\t\ts = s + linear_b[a] #bias\n\t\t\t\t\t\t\t#print(s.get_shape())\n\t\t\t\t\t\t\t#s = tf.tanh(s) #non linear\n\n\t\t\t\t\t\telif self.hparams.att_strategy == 'elu':\n\t\t\t\t\t\t\tprint('Attention: elu')\n\n\t\t\t\t\t\t\tcur_input = concat_input\n\t\t\t\t\t\t\t#for i in xrange(len(abstract_layers)):\n\t\t\t\t\t\t\t#\tcur_input = tf.contrib.layers.fully_connected(cur_input, abstract_layers[i], activation_fn=tf.nn.elu)\n\t\t\t\t\t\t\tfor i in xrange(len(abstract_layers)):\n\t\t\t\t\t\t\t\tcur_input = nn_ops.conv2d(cur_input, abstract_w[a][i], [1, 1, 1, 1], \"SAME\")\n\t\t\t\t\t\t\t\tcur_input = cur_input + abstract_b[a][i]\n\t\t\t\t\t\t\t\tcur_input = tf.nn.elu(cur_input)\n\t\t\t\t\t\t\ts = math_ops.reduce_sum(cur_input,[2,3])\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint('Attention: add')\n\t\t\t\t\t\t\ty = linear(query, attention_vec_size, True)\n\t\t\t\t\t\t\ty = tf.reshape(y, [-1, 1, 1, attention_vec_size])\n\t\t\t\t\t\t\ts = math_ops.reduce_sum(\n\t\t\t\t\t\t\t\tv[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])\n\n\t\t\t\t\t\tatt = s * head_weights[a]#nn_ops.softmax(s)\n\t\t\t\t\t\taw.append(att)\n\t\t\t\t\t\t# Now calculate the attention-weighted vector d.\n\t\t\t\t\t\td = math_ops.reduce_sum(\n\t\t\t\t\t\t\ttf.reshape(att, [-1, attn_length, 1, 1]) * hidden,\n\t\t\t\t\t\t\t\t[1, 2])\n\t\t\t\t\t\tds.append(tf.reshape(d, [-1, attn_size]))\n\t\t\t\treturn aw, ds", "title": "" }, { "docid": "f410b9c0df08f777897137feb15878c1", "score": "0.4627584", "text": "def optimize(node, environment):\r\n optimizer = Optimizer(environment)\r\n return optimizer.visit(node)", "title": "" }, { "docid": "d8c744bfc5115e7f7b640755c4458940", "score": "0.4616026", "text": "def quantum_neural_net(var, x=None):\n # Encode input x into quantum state\n qml.Displacement(x[0], 0., wires=0)\n qml.Displacement(x[1], 0., wires=1)\n\n # \"layer\" subcircuits\n for v in var:\n layer(v)\n\n return qml.expval.X(0)", "title": "" }, { "docid": "2388813f42aeb0dd90edf4a05cea6cb2", "score": "0.45847458", "text": "def vars_without_dep_on(self, dep_graph, exclude):\n nodes = set(dep_graph.nodes())\n # remove 'exlude' vars and anything that depends on them\n for var in exclude:\n nodes.discard(var)\n nodes -= networkx.descendants(dep_graph, var)\n return self.linearize_vars(nodes)", "title": "" }, { "docid": "454ebe05e8211eb536ad8dd6673f717d", "score": "0.45847222", "text": "def mitigate_with_zne(\n circuit: Union[QNode, QuantumTape],\n scale_factors: Sequence[float],\n folding: callable,\n extrapolate: callable,\n folding_kwargs: Optional[Dict[str, Any]] = None,\n extrapolate_kwargs: Optional[Dict[str, Any]] = None,\n reps_per_factor=1,\n) -> float:\n folding_kwargs = folding_kwargs or {}\n extrapolate_kwargs = extrapolate_kwargs or {}\n\n if isinstance(folding, qml.batch_transform):\n folding = fold_global_tape\n\n tape = circuit.expand(stop_at=lambda op: not isinstance(op, QuantumScript))\n script_removed = QuantumScript(tape.operations[tape.num_preps :])\n\n tapes = [\n [folding(script_removed, s, **folding_kwargs) for _ in range(reps_per_factor)]\n for s in scale_factors\n ]\n\n tapes = [tape_ for tapes_ in tapes for tape_ in tapes_] # flattens nested list\n prep_ops = tape.operations[: tape.num_preps]\n out_tapes = [QuantumScript(prep_ops + tape_.operations, tape.measurements) for tape_ in tapes]\n\n def processing_fn(results):\n \"\"\"Maps from input tape executions to an error-mitigated estimate\"\"\"\n for i, tape in enumerate(out_tapes):\n # stack the results if there are multiple measurements\n # this will not create ragged arrays since only expval measurements are allowed\n if len(tape.observables) > 1:\n results[i] = qml.math.stack(results[i])\n\n # Averaging over reps_per_factor repetitions\n results_flattened = []\n for i in range(0, len(results), reps_per_factor):\n # The stacking ensures the right interface is used\n # averaging over axis=0 is critical because the qnode may have multiple outputs\n results_flattened.append(mean(qml.math.stack(results[i : i + reps_per_factor]), axis=0))\n\n extrapolated = extrapolate(scale_factors, results_flattened, **extrapolate_kwargs)\n\n extrapolated = extrapolated[0] if shape(extrapolated) == (1,) else extrapolated\n\n # unstack the results in the case of multiple measurements\n return extrapolated if shape(extrapolated) == () else tuple(qml.math.unstack(extrapolated))\n\n return out_tapes, processing_fn", "title": "" }, { "docid": "4bc7521dbed531dba177d3fd8c467990", "score": "0.45843378", "text": "def evaluate_normal(args):\n np.random.seed(123)\n dataset, model, classifier = get_dataset_and_model(args)\n\n print(tabulate([classifier.evaluate(dataset['X_query'],\n dataset['y_query'],\n query=args.query,\n index=dataset['index'],\n )],\n headers=classifier.metric_names))", "title": "" }, { "docid": "cc02d6e77672fd2d2b550023c2049105", "score": "0.45812246", "text": "def objective(**params):\n scores = dqn(env=env, brain_name=brain_name, n_episodes=N_EPISODES, break_early=False, **params)\n return -np.mean(scores[-100:])", "title": "" }, { "docid": "73d4a86faedaa99181e4337d75ae4c3d", "score": "0.45791852", "text": "def compute_evoked_query(epochs, query, label):\n\n # Compute evokeds based on ERP or TFR epochs\n if isinstance(epochs, EpochsTFR):\n evoked = epochs[query].average()\n else: # `EpochsTFR.average()` has no `picks` argument\n evoked = epochs[query].average(picks=['eeg', 'misc'])\n evoked.comment = label\n\n return evoked", "title": "" }, { "docid": "92c53411479bd993fe82d82213b4b29d", "score": "0.45720392", "text": "def filter_genes_dispersion(result, log=False, show=None, save=None): \n highly_variable_genes(result, log=False, show=None, save=None, highly_variable_genes=False)", "title": "" }, { "docid": "a59c171721227122072fb47b5a54ba14", "score": "0.45689207", "text": "def find_result(query, dataset, result, groundtruth, maxK, task, epsilon):\r\n\r\n tStart = time.time()\r\n top_k = find_top_k(query, dataset, maxK, task, epsilon) #find top-k similar trajectories in dataset\r\n tEnd = time.time()\r\n \r\n approx_result = [tid for (tid,dist) in top_k] #get top-k tids\r\n \r\n \r\n if len(groundtruth) == 0:\r\n groundtruth = approx_result # use approx_result as groundtruth if groundtruth is empty\r\n \r\n \r\n k_list = [k for k in range(5,maxK+1,5)] # built a list of ascending k by step 5\r\n \r\n result['task_time'].append(tEnd - tStart)\r\n \r\n for k in k_list: # calculate the effectiveness from different k\r\n \r\n k_groundtruth = groundtruth[:k]\r\n k_approx_result = approx_result[:k]\r\n \r\n nDCG = calculate_nDCG(k_approx_result, k_groundtruth)\r\n hit_rate = len( set(k_groundtruth)&set(k_approx_result) )/ float(k)\r\n MAP = AP(k_groundtruth, k_approx_result)\r\n \r\n result['nDCG'][k].append(nDCG)\r\n \r\n result['hit_rate'][k].append(hit_rate)\r\n result['MAP'][k].append(MAP)\r\n \r\n return groundtruth", "title": "" }, { "docid": "192b641acfdc982305193cf2cd883f0c", "score": "0.45618156", "text": "def SLRGP_FIXED(Z,U, model,k): \n \n\n q = U.shape[0] # No. Unevaluated pts\n \n # # All points -> Evaluated and Unevaluated points\n X = np.vstack((Z,U))\n \n #print(\"calculating L...\")\n L = calculate_laplacian(Z,U,model)\n\n #L = calculate_laplacian_FIXED(Z,Y,model)\n #print(\"optimising Lambda...\")\n Lambda = optimize_SLRGP_parameter_FIXED(Z,U,X,k,model,Lambda_vals = [1e-05,1e-04,1e-03,1e-02,1e-01,1,1e1,1e2])\n \n\n ave_reductions = []\n K_zz_inv = linalg.inv( k.K(Z) + Lambda*k.K(Z,X)@[email protected](X,Z) + k.rbf.variance*np.eye(k.K(Z).shape[0]) ) \n for i in range(q):\n reduction = 0\n \n # Term 1\n xi = U[i][None,:]\n z_xi = k.K(np.vstack((Z,xi)))\n K_z_xi_inv = linalg.inv( k.K(z_xi) + Lambda*k.K(np.vstack((Z,xi)),X)@[email protected](X,np.vstack((Z,xi))) + k.rbf.variance*np.eye(k.K(np.vstack((Z,xi))).shape[0]) )\n \n for j in range(q - 1 ):\n if j != i:\n \n xj = U[j][None,:] \n T1 = k.K(xj) - k.K(xj,Z) @ K_zz_inv @ k.K(Z,xj)\n \n T2 = k.K(xj) - k.K(xj,z_xi) @ K_z_xi_inv @ k.K(z_xi,xj)\n \n \n reduction += T1 - T2\n \n ave_reductions.append( reduction/(q - 1 ) )\n \n indx = np.argmax(ave_reductions)\n new_point = U[indx]\n \n return [new_point], [indx]", "title": "" }, { "docid": "d909ce14ee93b74aa562db1226bba5f9", "score": "0.4557914", "text": "def objective_function(variable):\n global optimal_action\n global d\n player_veridical = variable[0]\n predator_veridical = variable[1]\n prey_veridical = variable[2]\n actual_action = variable[3]\n optimal_action = ddqn_veridical_action(player_veridical, predator_veridical, prey_veridical)\n d = difference([optimal_action, actual_action]) / 4\n return 1 - d", "title": "" }, { "docid": "db31398eba8ab74fd62154845b63f7ea", "score": "0.45435286", "text": "def build_variational_inference_function(variational_inference_model):\n input_to_model = variational_inference_model.input\n output_from_model = variational_inference_model.output[0]\n variational_base_function = K.function(\n inputs=[input_to_model, K.learning_phase()],\n outputs=[output_from_model])\n\n def _variational_inference_function(inputs, nb_of_samples):\n \"\"\"\n A function for variational inference.\n :param inputs: Inputs for variational inference.\n :param nb_of_samples: Number of variational samples.\n :return: A sample table with shape (number_of_samples, ..).\n \"\"\"\n samples = [variational_base_function([inputs, 1]) for _ in range(nb_of_samples)]\n return numpy.concatenate(samples)\n\n return _variational_inference_function", "title": "" }, { "docid": "b3b2c4e5fea7bc38c91fb472826b9ff8", "score": "0.45387015", "text": "def test_sanity_filtering_from_dataset():\n\n nt = 37\n w = 1 / 6\n d, t, _ = velocity_dataset(nt, w)\n\n f = filtering.LagrangeFilter(\n \"sanity_test\",\n d,\n {\"U\": \"u\", \"V\": \"v\"},\n {\"lon\": \"x\", \"lat\": \"y\", \"time\": \"time\"},\n sample_variables=[\"U\"],\n mesh=\"flat\",\n window_size=18 * 3600,\n highpass_frequency=(w / 2) / 3600,\n advection_dt=30 * 60,\n )\n\n # filter from the middle of the series\n filtered = f.filter_step(f.advection_step(t[nt // 2]))[\"var_U\"]\n # we expect a lot of parcels to hit the edge and die\n # but some should stay alive\n filtered = filtered[~np.isnan(filtered)]\n assert filtered.size > 0\n value = filtered.item(0)\n assert value == pytest.approx(0.0, abs=1e-3)", "title": "" }, { "docid": "8c6cdb1cb9bf2eeda64cbe0ddfbd7f89", "score": "0.45323157", "text": "def create_V_train_op(self):\n self.V_td_target = tf.placeholder(tf.float32, [None], 'V_td_target')\n self.loss_V = tf.reduce_mean(tf.square(self.V_td_target - tf.squeeze(self.V)))\n self.V_opt = tf.train.AdamOptimizer(self.lr_V)\n self.V_op = self.V_opt.minimize(self.loss_V)", "title": "" }, { "docid": "e324cac382a115b3b3bf8ac440ee2eaa", "score": "0.45244583", "text": "def generate_graph_queries_perf(gremlin_query_location_cold, gremlin_query_location_hot = None):\n if gremlin_query_location_hot is None:\n gremlin_query_location_hot = gremlin_query_location_cold\n logger.info(\"*\"*80)\n logger.info(\"Creating gremlin query files from gremlin_cold.groovy file present at %s \\\n for Gremlin cold cache for Orient, Tinker, Neo4j and Sparksee\" % (gremlin_query_location_cold))\n gremlin_queries = open(gremlin_query_location_cold, \"r\").read()\n sparksee_filehandler = open(\"/scripts/sparksee/SparkseeQueryColdPerf.groovy\", \"w\")\n sparksee_filehandler.write(\"\"\"import com.tinkerpop.blueprints.impls.sparksee.*\n\nx = new SparkseeGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"Dataset is \" + args[1]\nprintln \"==============Running The Queries==========\"\n\"\"\")\n sparksee_filehandler.write(gremlin_queries)\n sparksee_filehandler.write(\"\\nx.shutdown()\");\n sparksee_filehandler.close()\n\n tinker_filehandler = open(\"/scripts/tinker/TinkerQueryColdPerf.groovy\", \"w\")\n tinker_filehandler.write(\"\"\"x = new TinkerGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"Dataset is \" + args[1]\nprintln \"==============Running The Queries==========\"\n\"\"\")\n tinker_filehandler.write(gremlin_queries)\n tinker_filehandler.write(\"\\nx.shutdown()\");\n tinker_filehandler.close()\n\n\n neo4j_filehandler = open(\"/scripts/neo4j/Neo4jQueryColdPerf.groovy\", \"w\")\n neo4j_filehandler.write(\"\"\"x = new Neo4jGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"==============Starting to Run The Queries==========\"\n\"\"\");\n neo4j_filehandler.write(gremlin_queries)\n neo4j_filehandler.write(\"\\nx.shutdown()\")\n neo4j_filehandler.close() \n\n orient_filehandler = open(\"/scripts/orient/OrientQueryColdPerf.groovy\", \"w\")\n orient_filehandler.write(\"\"\"println \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx = new OrientGraph(\"memory:\"+args[0])\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"==============Starting to Run The Queries==========\"\n\"\"\")\n orient_filehandler.write(gremlin_queries)\n orient_filehandler.write(\"\\nx.shutdown()\")\n orient_filehandler.close()\n\n\n\n gremlin_queries = open(gremlin_query_location_hot, \"r\").read()\n sparksee_filehandler = open(\"/scripts/sparksee/SparkseeQueryHotPerf.groovy\", \"w\")\n sparksee_filehandler.write(\"\"\"import com.tinkerpop.blueprints.impls.sparksee.*\n\nx = new SparkseeGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"Dataset is \" + args[1]\nprintln \"==============Running The Queries==========\"\n\"\"\")\n sparksee_filehandler.write(gremlin_queries)\n sparksee_filehandler.write(\"\\nx.shutdown()\");\n sparksee_filehandler.close()\n\n\n tinker_filehandler = open(\"/scripts/tinker/TinkerQueryHotPerf.groovy\", \"w\")\n tinker_filehandler.write(\"\"\"\nx = new TinkerGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\nprintln \"Dataset is \" + args[1]\nprintln \"==============Running The Queries==========\"\n\"\"\")\n tinker_filehandler.write(gremlin_queries)\n tinker_filehandler.write(\"\\nx.shutdown()\");\n tinker_filehandler.close()\n\n neo4j_filehandler = open(\"/scripts/neo4j/Neo4jQueryHotPerf.groovy\", \"w\")\n neo4j_filehandler.write(\"\"\"x = new Neo4jGraph(args[0])\nprintln \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\n\nprintln \"==============Starting to Run The Queries==========\"\n\"\"\");\n neo4j_filehandler.write(gremlin_queries)\n neo4j_filehandler.write(\"\\nx.shutdown()\")\n neo4j_filehandler.close() \n\n orient_filehandler = open(\"/scripts/orient/OrientQueryHotPerf.groovy\", \"w\")\n orient_filehandler.write(\"\"\"println \"===============Loading the Graph Model============\"\nloadModel = System.currentTimeMillis()\nx = new OrientGraph(\"memory:\"+args[0])\nx.loadGraphML(args[1])\nprintln \"Time taken to load the graph Model:\" + (System.currentTimeMillis() - loadModel)\nprintln \"===============Graph Model Loaded============\"\nx.V.count();\n\nprintln \"==============Starting to Run The Queries==========\"\n\"\"\")\n orient_filehandler.write(gremlin_queries)\n orient_filehandler.write(\"\\nx.shutdown()\")\n orient_filehandler.close()\n\n logger.info(\"*\"*80)", "title": "" }, { "docid": "d556cc751084fed4f46a6738731b8423", "score": "0.4523523", "text": "def test_of_illustration_of_computation_dense_graph():\n edge_sequence = [(1, 2), (1, 4), (3, 4), (2, 3), (5, 6), (5, 7), (6, 7), (4, 5), (1, 3), (7, 4)]\n dense_graph_fast(7, edge_sequence)", "title": "" }, { "docid": "9b66ce2a4afd8fcafcfd5c4627f59c24", "score": "0.4522307", "text": "def optimize(self, enc):\n\n # a dummy model (everything is deselected)\n model = [-v for v in range(enc.nv)]\n all_vars = set()\n\n # MaxSAT formula to work with\n formula = WCNF()\n\n # hard clauses\n for cl in enc.clauses:\n formula.append(cl)\n\n for j in range(1, self.nof_terms + 1):\n for r in range(1, self.nof_feats + 1):\n formula.append([-self.dvar1(j, r)], 1)\n formula.append([-self.dvar0(j, r)], 1)\n all_vars.add(self.dvar1(j, r))\n all_vars.add(self.dvar0(j, r))\n\n if self.options.approx:\n hitman = LBX(formula, use_cld=self.options.use_cld,\n solver_name=self.options.solver)\n\n hses = []\n for i, hs in enumerate(hitman.enumerate()):\n hitman.block(hs)\n hses.append(hs)\n\n if i + 1 == self.options.approx:\n break\n\n hs = list(map(lambda v: -formula.soft[v - 1][0], min(hses, key=lambda x: len(x))))\n hitman.delete()\n else:\n hitman = RC2(formula, solver=self.options.solver, adapt=True,\n exhaust=True, incr=False, minz=False, trim=self.options.trim)\n\n hs = list(filter(lambda v: v > 0 and v in all_vars, hitman.compute()))\n hitman.delete()\n\n # filling the model with the right values\n for e in hs:\n model[e - 1] = e\n\n return model", "title": "" }, { "docid": "fec53b6e64d28a3383d55391d091a4e6", "score": "0.4515511", "text": "def computeEVOI(self, query):\n (qType, qContent) = query\n # if the query gives up, then epu is 0\n if qContent is None: return 0\n\n priorValue = self.computeCurrentSafelyOptPiValue()\n\n if qType == 'F':\n feat = qContent\n epu = self.consProbs[feat] * self.findConstrainedOptPi(activeCons=set(self.unknownCons) - {feat})['obj']\\\n + (1 - self.consProbs[feat]) * priorValue\n elif qType == 'R':\n rIndices = qContent\n\n mdpIfTrueReward = copy.deepcopy(self.mdp)\n mdpIfTrueReward.updatePsi(computePosteriorBelief(mdpIfTrueReward.psi, consistentRewards=rIndices))\n posteriorValueIfTrue = self.findConstrainedOptPi(activeCons=self.unknownCons, mdp=mdpIfTrueReward)['obj']\n\n mdpIfFalseReward = copy.deepcopy(self.mdp)\n mdpIfFalseReward.updatePsi(computePosteriorBelief(mdpIfFalseReward.psi, inconsistentRewards=rIndices))\n posteriorValueIfFalse = self.findConstrainedOptPi(activeCons=self.unknownCons, mdp=mdpIfFalseReward)['obj']\n\n epu = sum(self.mdp.psi[_] for _ in rIndices) * posteriorValueIfTrue +\\\n + (1 - sum(self.mdp.psi[_] for _ in rIndices)) * posteriorValueIfFalse\n else:\n raise Exception('unknown query ' + query)\n\n evoi = epu - priorValue\n assert evoi >= -1e-4, 'evoi value %f' % evoi\n return evoi", "title": "" }, { "docid": "b94c82ed8d37d2f932e7ca1c71519850", "score": "0.45098978", "text": "def _solve_normal_equation(self):\n # Get THE instance of the sampling class already stored in learner object \n # sample = self.get('sampling') \n # Check if the sampling is already done or not\n if not self.learning_done:\n logger.error('_solve_normal_equation: The sampling is not done yet. Call sampler.sampling.build_sampling_sets() first')\n sys.exit(1)\n\n x = self.learning_x # (m, n)\n if self.exclude_eta_column: x = x[:, :-1]\n x = utils.prepend_with_column_1(x) # (m, n+1)\n y = self.learning_y # (m, K)\n\n a = np.dot(x.T, x) # (n+1, n+1)\n b = np.linalg.inv(a) # (n+1, n+1)\n c = np.dot(x.T, y)\n d = np.dot(b, c)\n\n theta = d[:] # (n+1, K)\n self.set('normal_equation_theta', theta)\n\n # observed frequency from list of modes\n modes = self.modes\n\n # Now, solve for the \"best-value\" features using theta from above\n freqs = np.array([ mode.freq for mode in modes ]).T # (K, 1)\n\n e = np.linalg.inv(np.dot(theta, theta.T)) # (n+1, n+1)\n f = np.dot(theta, freqs) # (n+1, K) x (K, 1) == (n+1, 1)\n g = np.dot(e, f) # (n+1, n+1) x (n+1, 1) == (n+1, 1)\n\n self.set('normal_equation_features', g)\n\n h = np.dot(g.T, theta) - freqs\n J = old_div(np.dot(h.T, h), (2 * len(freqs)))\n\n self.set('normal_equation_cost', J)", "title": "" }, { "docid": "fde001224f8bbd8c1412808092424be9", "score": "0.450649", "text": "def condat_denoise(lam,inputName=\"noised\",outputName=\"denoised\"):\n command=['denoising','--lambda',str(lam),'--input',inputName,'--output',outputName]\n val=check_output(command)", "title": "" }, { "docid": "dda57d44af64ef7080fad737ebd35a26", "score": "0.44985646", "text": "def deltaLam ( lam, t, g, nfr, nobs) : # Nemati 2020\n ft1 = lam**2 # frequent term #1\n ft2 = 6 + 3 * lam + ft1 # frequent term #2\n ft3 = 2 * g**2 * ft2 # frequent term #3 ; ft3 = 2 * g**2 * ( 6 + 3 * lam + ft1 )\n \n # Epsilon PC = Epsilon Photon Counting = Thresholding Efficiency\n epsThr3 = np.exp( - t / g ) * ( t**2 * ft1 + 2 * g * t * lam * ( 3 + lam ) + ft3 ) / ft3 \n\n # Epsilon Coincidence Loss = Coincidence Loss (Efficiency)\n epsCL = ( 1 - np.exp ( - lam ) ) / lam\n \n func = lam * nfr * epsThr3 * epsCL - nobs\n \n # dfdlam\n dfdlam_1tN = np.exp ( - t / g - lam) * nfr # First term numerator\n dfdlam_1tD = 2 * g**2 * ft2**2 # 1t denominator ; { dfdlam_1tD = 2 * g**2 * ( 6 + 3 * lam * ft1 )**2 } \n dfdlam_2ts1 = dfdlam_1tD # 2t, 1 summand ; { dfdlam_2ts1 = 2 * g**2 * ( 6 + 3 * lam * ft1 )**2 }\n #dfdlam_2ts2 = t**2 * lam * ( -12 + 3 * lam + 3 * ft1 + lam**3 + 3 * np.exp ( lam ) * (4 + lam) ) # 2t, 2s\n dfdlam_2ts2 = t**2 * lam * ( -12 + 3 * lam + 3 * ft1 + ft1*lam + 3 * np.exp ( lam ) * (4 + lam) ) # 2t, 2s\n #dfdlam_2ts3 = 2 * g * t * ( -18 + 6 * lam + 15 * ft1 + 6 * lam**3 + lam**4 + 6 * np.exp ( lam ) * ( 3 + 2 * lam ) ) # 2t, 3s\n dfdlam_2ts3 = 2 * g * t * ( -18 + 6 * lam + 15 * ft1 + 6 * ft1*lam + ft1**2 + 6 * np.exp ( lam ) * ( 3 + 2 * lam ) ) # 2t, 3s\n dfdlam = dfdlam_1tN * dfdlam_1tD * ( dfdlam_2ts1 + dfdlam_2ts2 + dfdlam_2ts3 ) \n \n dlam = func / dfdlam\n \n# print(\"dlam\",dlam)\n return dlam", "title": "" }, { "docid": "116c0a3ad270762761513f2647da85fa", "score": "0.44901925", "text": "def SLRGP_z_variances(Z,U, model,k): \n \n\n q = U.shape[0] # No. Unevaluated pts\n \n # # All points -> Evaluated and Unevaluated points\n X = np.vstack((Z,U))\n \n #print(\"calculating L...\")\n L = calculate_laplacian(X,Z,U,model)\n #L = calculate_alternative_laplacian(Z,U,model)\n #print(\"optimising Lambda...\")\n #Lambda = optimize_SLRGP_parameter(Z,U,X,k,model,Lambda_vals = [1e-05,1e-04,1e-03,1e-02,1e-01,1,1e1,1e2])\n \n Lambda = optimize_SLRGP_parameter_FIXED(Z,U,X,k,model,Lambda_vals = [1e-05,1e-04,1e-03,1e-02,1e-01,1,1e1,1e2])\n \n Lambda_1 = Lambda*k.variance*linalg.inv( k.K(Z) + np.eye(k.K(Z).shape[0])*1e-04 )\n \n #Lambda_1 = Lambda\n #Lambda_2 = Lambda\n \n #print(\"Shape of Lambda 1:\", Lambda_1.shape)\n \n ave_reductions = []\n #K_zz_inv = linalg.inv( k.K(Z) + Lambda_1*k.K(Z,X)@[email protected](X,Z) + k.rbf.variance*np.eye(k.K(Z).shape[0]) ) \n \n K_zz_inv = linalg.inv( k.K(Z) + [email protected](Z,X)@[email protected](X,Z) + k.variance*np.eye(k.K(Z).shape[0]) ) \n\n current_vars = []\n potential_vars = []\n \n for i in range(q):\n reduction = 0\n c_var = 0\n p_var = 0\n \n # Term 1\n\n \n xi = U[i][None,:]\n z_xi = k.K(np.vstack((Z,xi)))\n \n Lambda_2 = Lambda*k.variance*linalg.inv( k.K(z_xi) + np.eye(k.K(z_xi).shape[0])*1e-04 )\n \n #print(\"Shape of Lambda 2:\", Lambda_2.shape)\n #K_z_xi_inv = linalg.inv( k.K(z_xi) + Lambda_2*k.K(np.vstack((Z,xi)),X)@[email protected](X,np.vstack((Z,xi))) + k.rbf.variance*np.eye(k.K(np.vstack((Z,xi))).shape[0]) )\n \n K_z_xi_inv = linalg.inv( k.K(z_xi) + [email protected](np.vstack((Z,xi)),X)@[email protected](X,np.vstack((Z,xi))) + k.variance*np.eye(k.K(np.vstack((Z,xi))).shape[0]) )\n \n \n for j in range(q - 1 ):\n if j != i:\n \n xj = U[j][None,:] \n T1 = k.K(xj) - k.K(xj,Z) @ K_zz_inv @ k.K(Z,xj)\n \n T2 = k.K(xj) - k.K(xj,z_xi) @ K_z_xi_inv @ k.K(z_xi,xj)\n \n \n reduction += T1 - T2\n c_var += T1\n p_var += T2\n #print(reduction)\n \n ave_reductions.append( reduction/(q - 1 ) )\n current_vars.append(c_var)\n potential_vars.append(p_var)\n \n #np.array(ave_reductions).squeeze(), \n return np.array(current_vars).squeeze(), np.array(potential_vars).squeeze()", "title": "" }, { "docid": "333be1994812fd18de014e686b4054ce", "score": "0.44870454", "text": "def visit_not_query(self, query):\n return query", "title": "" }, { "docid": "38b2ba065bac8b5f3473cd4152fdf694", "score": "0.44869864", "text": "def model_compute(Q: List[SingleQuery]) -> List[float]:\n\n tokens = tokenize(Q)\n\n with torch.no_grad():\n outputs = model(\n input_ids=tokens[\"input_ids\"], attention_mask=tokens[\"attention_mask\"],\n )\n\n # Outputs are [Logits, Attention Heads, ...]\n logits = outputs[0]\n\n # Keep only the entailment and contradiction logits\n contradiction_id = 0\n entailment_id = 2\n logits = logits[..., [contradiction_id, entailment_id]]\n\n # Detach the logits from the computation graph\n logits = logits.detach().cpu().numpy()\n\n # Softmax over remaining logits for each sequence\n scores = np.exp(logits) / np.exp(logits).sum(-1, keepdims=True)\n\n del outputs\n del tokens\n torch.cuda.empty_cache()\n\n # Return the value entailment\n return scores[..., -1]", "title": "" }, { "docid": "28c6da9ec64d44e44390bc30ce5c1cbf", "score": "0.44810277", "text": "def forward(self, query, key, value, mask):\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n \n if self.restrict > 0:\n # TODO use stride or padding to make time2 equal to time1\n scale = k.shape[2] // q.shape[2]\n assert q.shape[2] == k.shape[2], \"restricted attention is not implemented for source attention now\"\n unfold = nn.Unfold(kernel_size=(self.restrict, 1), stride=(1, 1), padding=(self.restrict // 2, 0))\n # (batch, self.h * self.d_k * self.restrict, time2)\n k = unfold(k.transpose(2, 3).contiguous().view(n_batch, self.h * self.d_k, -1, 1))\n # (batch, self.h, time2, self.d_k, self.restrict)\n k = k.view(n_batch, self.h, self.d_k, self.restrict, -1).permute(0, 1, 4, 2, 3)\n # (batch, self.h * self.d_k * self.restrict, time2)\n v = unfold(v.transpose(2, 3).contiguous().view(n_batch, self.h * self.d_k, -1, 1))\n # (batch, self.h, time2, self.restrict, self.d_k)\n v = v.view(n_batch, self.h, self.d_k, self.restrict, -1).transpose(2, 4)\n # (batch, head, time1, 1, d_k) x (batch, head, time1, d_k, self.restrict) -> (batch, head, time1, 1, self.restrict)\n scores = q.unsqueeze(-2).matmul(k) / math.sqrt(self.d_k)\n if mask is not None:\n mask = mask.unsqueeze(-1).unsqueeze(-1)\n self.attn_ = torch.softmax(scores, dim = -1) # (batch, head, time1, time2)\n self.attn_ = self.attn_.masked_fill(mask == 0, 0)\n else:\n # (batch, head, time1, d_k) x (batch, head, d_k, time2) -> (batch, head, time1, time2)\n scores = q.matmul(k.transpose(-2, -1)) / math.sqrt(self.d_k)\n if mask is not None:\n mask = mask.unsqueeze(1)\n scores = scores.masked_fill(mask == 0, MIN_VALUE)\n self.attn_ = torch.softmax(scores, dim = -1) # (batch, head, time1, time2)\n p_attn = self.dropout(self.attn_)\n x = torch.matmul(p_attn, v) # (batch, head, time1, d_k)\n x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) # (batch, time1, d_model)\n return self.linear_out(x) # (batch, time1, d_model)", "title": "" }, { "docid": "8d665fcacfb7beff5a1efbc1f7dccc8a", "score": "0.44797567", "text": "def FILTER():", "title": "" }, { "docid": "6ff1b6f2d0a935055d996278a94c70d8", "score": "0.44786873", "text": "def create_test_program2():\n # Create the variables\n x = Var()\n variables = {\"x\":x}\n\n # Create the program graph\n pgm = nx.DiGraph()\n\n # Programme exemple du sujet\n pgm.add_node(1, attr_dict={\"operation\":node.If()})\n\n pgm.add_node(2, attr_dict={\"operation\":node.Always()})\n pgm.add_edge(1, 2, attr_dict={\"expr\": boolean.InfOrEqual(x, Var(value=0)), \"instr\": instructions.Skip()})\n\n pgm.add_node(3, attr_dict={\"operation\":node.Always()})\n pgm.add_edge(1, 3, attr_dict={\"expr\": boolean.InfOrEqual(x, Var(value=0), no=True), \"instr\": instructions.Skip()})\n\n pgm.add_node(4, attr_dict={\"operation\":node.If()})\n pgm.add_edge(2, 4, attr_dict={\"expr\": boolean.Always(), \"instr\": instructions.Assign(x, arithmetic.Min(Var(value=0), x))})\n pgm.add_edge(3, 4, attr_dict={\"expr\": boolean.Always(), \"instr\": instructions.Assign(x, arithmetic.Min(Var(value=1), x))})\n\n pgm.add_node(5, attr_dict={\"operation\":node.Always()})\n pgm.add_edge(4, 5, attr_dict={\"expr\": boolean.InfOrEqual(x, Var(value=1)), \"instr\": instructions.Skip()})\n\n pgm.add_node(6, attr_dict={\"operation\":node.Always()})\n pgm.add_edge(4, 6, attr_dict={\"expr\": boolean.InfOrEqual(x, Var(value=1), no=True), \"instr\": instructions.Skip()})\n pgm.add_edge(6, 4, attr_dict={\"expr\": boolean.Always(), \"instr\": instructions.Assign(x,arithmetic.Min(x,Var(value = 1)))})\n\n pgm.add_node(\"_\", attr_dict={\"operation\":node.End()})\n pgm.add_edge(5, \"_\", attr_dict={\"expr\": boolean.Always(), \"instr\": instructions.Assign(x, Var(value=1))})\n\n # Get the program from the program graph\n pgm = Program(pgm, variables)\n return pgm", "title": "" }, { "docid": "f46d568e484d52b3f89b208c92ac0f72", "score": "0.44765088", "text": "def get_synth_data(\n key: np.ndarray,\n num: int,\n equations: Text,\n num_xstar: int = 100,\n external_equations: Equations = None,\n disconnect_instrument: bool = False\n) -> DataSynth:\n if external_equations is not None:\n eqs = external_equations\n elif equations == \"np\":\n return get_newey_powell(key, num, num_xstar)\n else:\n eqs = structural_equations[equations]\n\n key, subkey = random.split(key)\n ex, ey = eqs[\"noise\"](subkey, num)\n key, subkey = random.split(key)\n confounder = eqs[\"confounder\"](subkey, num)\n key, subkey = random.split(key)\n z = eqs[\"f_z\"](subkey, num)\n x = eqs[\"f_x\"](z, confounder, ex)\n y = eqs[\"f_y\"](x, confounder, ey)\n\n values = whiten({'x': x, 'y': y, 'z': z, 'confounder': confounder,\n 'ex': ex, 'ey': ey})\n\n # Evaluate E[ Y | do(x^*)] empirically\n xmin, xmax = np.min(x), np.max(x)\n xstar = np.linspace(xmin, xmax, num_xstar)\n ystar = []\n for _ in range(500):\n key, subkey = random.split(key)\n tmpey = eqs[\"noise\"](subkey, num_xstar)[1]\n key, subkey = random.split(key)\n tmpconf = eqs[\"confounder\"](subkey, num_xstar)\n tmp_ystar = whiten_with_mu_std(\n eqs[\"f_y\"](xstar, tmpconf, tmpey), values[\"y_mu\"], values[\"y_std\"])\n ystar.append(tmp_ystar)\n ystar = np.array(ystar)\n xstar = whiten_with_mu_std(xstar, values[\"x_mu\"], values[\"x_std\"])\n if disconnect_instrument:\n key, subkey = random.split(key)\n values['z'] = random.normal(subkey, shape=z.shape)\n return values, xstar, ystar", "title": "" }, { "docid": "ed04295d9299c392de53a6ae575aab0e", "score": "0.44755417", "text": "def test_mevo_eval():\n weight = torch.nn.Linear(3, 4).cuda().weight\n input = torch.rand(1, 5, 3).cuda()\n k = MEVO(weight)\n k.eval()\n out = k(input, None)\n assert out.shape == (1, 5, 4)", "title": "" }, { "docid": "a28e92656e7a4784eafd32a49003a620", "score": "0.4472804", "text": "def simplification_epsilon(dataset, epsilon, algo, dataset_name):\r\n dataset[dataset_name] = []\r\n error_list = []\r\n sum_error_list = []\r\n v_error_list = []\r\n \r\n len_raw = 0.0\r\n len_sim = 0.0\r\n \r\n tSimp = 0\r\n \r\n for tid, tra in dataset['raw']:\r\n tStart = time.time()\r\n s_idx = algo(tra, epsilon)\r\n tSimp += time.time() - tStart\r\n \r\n s_tra = [tra[i] for i in s_idx] #simplified trajectory\r\n \r\n dataset[dataset_name].append((tid, s_tra))\r\n error_list.append(CED(tra, s_idx))\r\n # sum_error_list.append(scale_RMSE(tra, s_idx))\r\n v_error_list.append(V_ERROR(tra, s_idx))\r\n # v_error_list.append(length_ratio(tra, s_idx))\r\n \r\n len_raw += float(len(tra))\r\n len_sim += float(len(s_idx))\r\n\r\n return {\r\n \"compression_time\": tSimp,\r\n \"error\": sum(error_list) / len(error_list),\r\n \"storage\": len_sim / len_raw,\r\n # \"sum_error\": sum(sum_error_list) / len(sum_error_list),\r\n \"v_error\": (sum(v_error_list) / len(v_error_list)) * 111 / 15 * 3600\r\n # \"v_error\": (sum(v_error_list) / len(v_error_list))\r\n }", "title": "" }, { "docid": "bf87c307b7f418fdd8f700206f5fbb06", "score": "0.4471615", "text": "def prune(self):\n #TODO, that is maybe a bit too much. You can't have disease-gene-disease for instance !\n # But degree is not enough because you can have A and B both go to C but C doesn't go anywhere.\n # Probably need to interact with the query to decide whether this node is prunable or not.\n self.logger.debug('Pruning Graph')\n removed = True\n #make the set of types that we don't want to prune. These are the end points (both text and id versions).\n ttypes = self.userquery.get_terminal_types()\n keep_types = set()\n keep_types.update(ttypes[0])\n keep_types.update(ttypes[1])\n keep_types.add( node_types.DISEASE_NAME )\n keep_types.add( node_types.DRUG_NAME )\n n_pruned = 0\n while removed:\n removed = False\n to_remove = []\n for node in self.graph.nodes():\n if node.node_type in keep_types:\n continue\n #Graph is directed. graph.neighbors only returns successors \n neighbors = self.graph.successors(node) + self.graph.predecessors(node)\n neighbor_types = set( [ neighbor.node_type for neighbor in neighbors ] )\n if len(neighbor_types) < 2 :\n to_remove.append(node)\n for node in to_remove:\n removed=True\n n_pruned += 1\n self.graph.remove_node(node)\n self.logger.debug('Pruned {} nodes.'.format(n_pruned) )", "title": "" }, { "docid": "97eb16456a3602efe00194fe97e6c0e1", "score": "0.44709823", "text": "def evaluate(args, model, tokenizer, prefix=\"\"):\n\n # Make output dir if necessary\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n \n\n # Get dev data\n dev_data = load_and_cache_dataset(args, tokenizer, 'dev')\n dev_queries = dev_data[\"queries\"]\n dev_query_token_ids = dev_data[\"query_token_ids\"]\n candidate_token_ids = dev_data[\"candidate_token_ids\"]\n candidates = list(dev_data[\"candidate2id\"].keys())\n candidate_ids = list(dev_data[\"candidate2id\"].values())\n dev_pos_candidate_ids = dev_data[\"gold_hypernym_candidate_ids\"]\n \n \n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Nb queries: {}\".format(len(dev_queries)))\n \n # Accumulate average precision scores\n ap_scores = []\n\n # Loop over queries\n total_eval_loss = 0.0\n nb_queries = len(dev_queries)\n for i in range(nb_queries):\n # Create a dataset for this query and all the candidates\n query_token_ids = dev_query_token_ids[i]\n candidate_labels = [0] * len(candidate_ids)\n for candidate_id in dev_pos_candidate_ids[i]:\n candidate_labels[candidate_id] = 1\n eval_dataset = make_dataset(tokenizer,\n [query_token_ids],\n candidate_token_ids,\n [candidate_ids],\n candidate_labels=[candidate_labels],\n max_length=args.max_seq_length,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True)\n # Evaluate model on dataset\n logger.info(\" *** Running evaluation on query {} ('{}') ***\".format(i, dev_queries[i]))\n y_probs, y_true, eval_loss = get_model_predictions(args, model, eval_dataset)\n total_eval_loss += eval_loss\n y_score = y_probs[:,1]\n ap = average_precision_score(y_true=y_true, y_score=y_score)\n ap_scores.append(ap)\n\n # Compute mean average precision\n MAP = np.mean(ap_scores)\n loss = total_eval_loss/nb_queries\n \n logger.info(\"***** Results *****\")\n logger.info(\" MAP: {}\".format(MAP))\n logger.info(\" loss: {}\".format(loss))\n return {\"MAP\":MAP, \"loss\":loss}", "title": "" }, { "docid": "47caf0f5fafcd8cb8e972125cece566c", "score": "0.4470012", "text": "def eval_individual(genome, substrate, vd_environment):\n #substrate.PrintInfo()\n # Create ANN from provided CPPN genome and substrate\n net = NEAT.NeuralNetwork()\n genome.BuildHyperNEATPhenotype(net, substrate)\n\n fitness, dist = vd_environment.evaluate_net(net)\n return fitness, dist", "title": "" }, { "docid": "b248e8a3f809364fdb3b342944d9fd96", "score": "0.4462496", "text": "def parse(training_path, query):\n engine = SnipsNLUEngine.from_path(training_path)\n\n if query:\n print_parsing_result(engine, query)\n return\n\n while True:\n query = input(\"Enter a query (type 'q' to quit): \").strip()\n if query == \"q\":\n break\n print_parsing_result(engine, query)", "title": "" }, { "docid": "bf43d46c200aee374aa378cefce7cc5e", "score": "0.44566566", "text": "def nnedi3(self) -> _Plugin_nnedi3_Unbound:", "title": "" }, { "docid": "7fda52437ae6a85c4134d1741fa0a5f0", "score": "0.44519904", "text": "def test_unsupervised_density():\n # !TODO: Implement a suitable scenario.\n pass", "title": "" }, { "docid": "f91ad9507f2604a538fb0ab1c80c91f1", "score": "0.44502592", "text": "def sde_evo_mnist(tspan, init_cond, time, class_mag_mat, feature_array,\n octo_hits, mP, exP, seed_val):\n\n def piecewise_lin_pseudo_sig(x, span, slope):\n \"\"\"\n Piecewise linear 'sigmoid' used for speed when squashing neural inputs in difference eqns.\n \"\"\"\n y = x*slope\n y = _np.maximum(y, -span/2) # replace values below -span/2\n y = _np.minimum(y, span/2) # replace values above span/2\n return y\n\n def wiener(w_sig, mean_spont_, old_, tau_, inputs_):\n \"\"\"\n Calculate wiener noise.\n \"\"\"\n d_ = dt*(-old_*tau_ + inputs_)\n # Wiener noise:\n dW_ = _np.sqrt(dt)*w_sig*mean_spont_*_np.random.normal(0,1,(d_.shape))\n # combine them:\n return old_ + d_ + dW_\n\n # if argin seed_val is nonzero, fix the rand seed for reproducible results\n if seed_val:\n _np.random.seed(seed_val) # Reset random state\n\n spin = '/-\\|' # create spinner for progress bar\n\n # numbers of objects\n (nC,_) = class_mag_mat.shape\n nP = mP.nG\n nL = mP.nG\n nR = mP.nG\n\n ## noise in individual neuron FRs\n # These are vectors, one vector for each type:\n wPsig = mP.noisePvec.squeeze()\n wPIsig = mP.noisePIvec.squeeze() # no PIs for mnist\n wLsig = mP.noiseLvec.squeeze()\n wRsig = mP.noiseRvec.squeeze()\n wKsig = mP.noiseKvec.squeeze()\n wEsig = mP.noiseEvec.squeeze()\n\n # steady-state RN FR, base + noise:\n RspontRatios = mP.Rspont/mP.Rspont.mean() # used to scale stim inputs\n\n ## param for sigmoid that squashes inputs to neurons:\n # the slope at x = 0 = mP.slope_param*span/4\n pSlope = mP.slope_param*mP.cP/4\n piSlope = mP.slope_param*mP.cPI/4 # no PIs for mnist\n lSlope = mP.slope_param*mP.cL/4\n rSlope = mP.slope_param*mP.cR/4\n kSlope = mP.slope_param*mP.cK/4\n\n#-------------------------------------------------------------------------------\n\n dt = round(time[1] - time[0], 2) # this is determined by start, stop and step in calling function\n N = int( (tspan[1] - tspan[0]) / dt ) # number of steps in noise evolution\n T = _np.linspace(tspan[0], tspan[1]-dt, N) # the time vector\n\n#-------------------------------------------------------------------------------\n\n P = _np.zeros((nP, N))\n PI = _np.zeros((mP.nPI, N)) # no PIs for mnist\n L = _np.zeros((nL, N))\n R = _np.zeros((nR, N))\n K = _np.zeros((mP.nK, N))\n E = _np.zeros((mP.nE, N))\n\n # initialize the FR matrices with initial conditions\n P[:,0] = init_cond[ : nP ] # col vector\n PI[:,0] = init_cond[ nP : nP + mP.nPI ] # no PIs for mnist\n L[:,0] = init_cond[ nP + mP.nPI : nP + mP.nPI + nL ]\n R[:,0] = init_cond[ nP + mP.nPI + nL : nP + mP.nPI + nL + nR ]\n K[:,0] = init_cond[ nP + mP.nPI + nL + nR : nP + mP.nPI + nL + nR + mP.nK ]\n E[:,0] = init_cond[ -mP.nE : ]\n # P2Kheb = mP.P2K # '-heb' suffix is used to show that it will vary with time\n # PI2Kheb = mP.PI2K # no PIs for mnist\n # K2Eheb = mP.K2E\n\n P2Kmask = mP.P2K > 0\n PI2Kmask = mP.PI2K > 0 # no PIs for mnist\n K2Emask = mP.K2E > 0\n newP2K = mP.P2K.copy() # initialize\n newPI2K = mP.PI2K.copy() # no PIs for mnist\n newK2E = mP.K2E.copy()\n\n # initialize the counters for the various classes\n class_counter = _np.zeros(nC)\n\n # make a list of Ts for which heb is active\n hebRegion = _np.zeros(T.shape)\n for i in range(len(exP.hebStarts)):\n inds = _np.bitwise_and(T >= exP.hebStarts[i], T <= (exP.hebStarts[i] + exP.hebDurations[i]))\n hebRegion[inds] = 1\n\n ## DEBUG STEP:\n # import matplotlib.pyplot as _plt\n # fig, ax = _plt.subplots()\n # ax.plot(T, hebRegion)\n # ax.set(title='hebRegion vs T')\n # ax.grid() # fig.savefig(\"test.png\")\n # _plt.show()\n\n#-------------------------------------------------------------------------------\n\n meanCalc1Done = False # flag to prevent redundant calcs of mean spont FRs\n meanCalc2Done = False\n meanCalc3Done = False\n\n mean_spont_P = _np.zeros(nP)\n mean_spont_PI = _np.zeros(mP.nPI) # no PIs for mnist\n mean_spont_L = _np.zeros(nL)\n mean_spont_R = _np.zeros(nR)\n mean_spont_K = _np.zeros(mP.nK)\n # mean_spont_E = _np.zeros(mP.nE)\n # ssMeanSpontP = _np.zeros(nP)\n # ssStdSpontP = _np.ones(nP)\n\n # placeholder until we have an estimate based on spontaneous PN firing rates\n maxSpontP2KtimesPval = 10\n\n ## Main evolution loop:\n # iterate through time steps to get the full evolution:\n for i in range(N-1): # i = index of the time point\n prog = int(15*(i/N))\n remain = 15-prog-1\n mult = 50 # multiplier (spinner speed control)\n print(f\"{spin[int((i%(len(spin)*mult))/mult)]} SDE evolution:[{prog*'*'}{remain*' '}]\", end='\\r')\n\n # step = _np.round(time[1] - time[0], 4)\n\n # if sufficiently early, or we want the entire evo\n if T[i]<(exP.stopSpontMean3 + 5) or mP.saveAllNeuralTimecourses:\n oldP = P[:,i]\n oldPI = PI[:,i] # no PIs for mnist\n oldL = L[:,i]\n oldR = R[:,i]\n oldK = K[:,i]\n else: # version to save memory:\n oldP = P.reshape(P.shape[0], -1)[:,-1]\n oldPI = PI.reshape(PI.shape[0], -1)[:,-1]\n oldL = L.reshape(L.shape[0], -1)[:,-1]\n oldR = R.reshape(R.shape[0], -1)[:,-1]\n oldK = K.reshape(K.shape[0], -1)[:,-1]\n oldE = E[:,i]\n oldT = T[i]\n\n oldP2K = newP2K.copy() # these are inherited from the previous iteration\n oldPI2K = newPI2K.copy() # no PIs for mnist\n oldK2E = newK2E.copy()\n\n#-------------------------------------------------------------------------------\n\n # set flags to say:\n # 1. whether we are past the window where mean_spont_FR is calculated,\n # so noise should be weighted according to a first estimate of\n # mean_spont_FR (mean_spont_1)\n # 2. whether we are past the window where mean_spont_FR is recalculated\n # to mean_spont_2 and\n # 3. whether we are past the window where final stdSpontFR can be calculated\n adjustNoiseFlag1 = oldT > exP.stopPreNoiseSpontMean1\n adjustNoiseFlag2 = oldT > exP.stopSpontMean2\n adjustNoiseFlag3 = oldT > exP.stopSpontMean3\n\n if adjustNoiseFlag1 and not(meanCalc1Done):\n # ie we have not yet calc'ed the noise weight vectors\n inds = _np.nonzero(_np.logical_and(T > exP.startPreNoiseSpontMean1,\n T < exP.stopPreNoiseSpontMean1))[0]\n mean_spont_P = P[:,inds].mean(axis=1)\n mean_spont_PI = PI[:,inds].mean(axis=1)\n mean_spont_L = L[:,inds].mean(axis=1)\n mean_spont_R = R[:,inds].mean(axis=1)\n mean_spont_K = K[:,inds].mean(axis=1)\n # mean_spont_E = E[:,inds].mean(axis=1)\n meanCalc1Done = 1 # so we don't calc this again\n\n if adjustNoiseFlag2 and not(meanCalc2Done):\n # ie we want to calc new noise weight vectors. This stage is surplus\n inds = _np.nonzero(_np.logical_and(T > exP.startSpontMean2,\n T < exP.stopSpontMean2))[0]\n mean_spont_P = P[:,inds].mean(axis=1)\n mean_spont_PI = PI[:,inds].mean(axis=1)\n mean_spont_L = L[:,inds].mean(axis=1)\n mean_spont_R = R[:,inds].mean(axis=1)\n mean_spont_K = K[:,inds].mean(axis=1)\n # mean_spont_E = E[:,inds].mean(axis=1)\n # stdSpontP = P[:,inds].std(axis=1) # for checking progress\n meanCalc2Done = 1 # so we don't calc this again\n\n if adjustNoiseFlag3 and not(meanCalc3Done):\n # we want to calc stdSpontP for use with LH channel and maybe for use in heb\n # maybe we should also use this for noise calcs (eg dWP).\n # But the difference is slight.\n inds = _np.nonzero(_np.logical_and(T > exP.startSpontMean3,\n T < exP.stopSpontMean3))[0]\n ssMeanSpontP = P[:,inds].mean(axis=1) # 'ss' means steady state\n ssStdSpontP = P[:,inds].std(axis=1)\n ssMeanSpontPI = PI[:,inds].mean(axis=1) # no PIs for mnist\n ssStdSpontPI = PI[:,inds].std(axis=1) # no PIs for mnist\n meanCalc3Done = 1 # so we don't calc this again\n\n # set a minimum damping on KCs based on spontaneous PN activity,\n # sufficient to silence the MB silent absent odor:\n temp = _np.sort(mP.P2K.dot(ssMeanSpontP)) # 'ascending' by default\n ignoreTopN = 1 # ie ignore this many of the highest vals\n temp = temp[:-ignoreTopN] # ignore the top few outlier K inputs\n maxSpontP2KtimesPval = temp.max() # The minimum global damping on the MB\n meanCalc3Done = 1\n\n # create class_counter - the counters for the various classes\n if i: # if i is not zero\n class_counter += _np.logical_and(class_mag_mat[:,i-1]==0, class_mag_mat[:,i]>0)\n\n # get values of feature inputs at time index i, as a col vector.\n # This allows for simultaneous inputs by different classes, but current\n # experiments apply only one class at a time.\n thisInput = _np.zeros(mP.nF)\n thisStimClassInd = []\n for j in range(nC):\n if class_mag_mat[j,i]: # if class_mag_mat[j,i] is not zero\n # thisInput += class_mag_mat[j,i]*feature_array[:,int(class_counter[j]),j]\n imNum = int(class_counter[j] - 1) # indexing: need the '-1' so we don't run out of images\n thisInput += class_mag_mat[j,i]*feature_array[:,imNum,j]\n thisStimClassInd.append(j)\n\n#-------------------------------------------------------------------------------\n\n # get value at t for octopamine:\n thisOctoHit = octo_hits[i]\n # octo_hits is a vector with an octopamine magnitude for each time point\n\n#-------------------------------------------------------------------------------\n\n # dP:\n Pinputs = (1 - thisOctoHit*mP.octo2P*mP.octoNegDiscount).squeeze()\n Pinputs = _np.maximum(Pinputs, 0) # pos. rectify\n Pinputs *= -mP.L2P.dot(oldL)\n Pinputs += (mP.R2P.squeeze()*oldR)*(1 + thisOctoHit*mP.octo2P).squeeze()\n # ie octo increases responsivity to positive inputs and to spont firing, and\n # decreases (to a lesser degree) responsivity to neg inputs.\n Pinputs = piecewise_lin_pseudo_sig(Pinputs, mP.cP, pSlope)\n\n # Wiener noise\n newP = wiener(wPsig, mean_spont_P, oldP, mP.tau_P, Pinputs)\n\n#-------------------------------------------------------------------------------\n\n # dPI: # no PIs for mnist\n PIinputs = (1 - thisOctoHit*mP.octo2PI*mP.octoNegDiscount).squeeze()\n PIinputs = _np.maximum(PIinputs, 0) # pos. rectify\n PIinputs *= -mP.L2PI.dot(oldL)\n PIinputs += mP.R2PI.dot(oldR)*(1 + thisOctoHit*mP.octo2PI).squeeze()\n # ie octo increases responsivity to positive inputs and to spont firing, and\n # decreases (to a lesser degree) responsivity to neg inputs.\n PIinputs = piecewise_lin_pseudo_sig(PIinputs, mP.cPI, piSlope)\n\n # Wiener noise\n newPI = wiener(wPIsig, mean_spont_PI, oldPI, mP.tau_PI, PIinputs)\n\n#-------------------------------------------------------------------------------\n\n # dL:\n Linputs = (1 - thisOctoHit*mP.octo2L*mP.octoNegDiscount).squeeze()\n Linputs = _np.maximum(Linputs, 0) # pos. rectify\n Linputs *= -mP.L2L.dot(oldL)\n Linputs += (mP.R2L.squeeze()*oldR)*(1 + thisOctoHit*mP.octo2L).squeeze()\n Linputs = piecewise_lin_pseudo_sig(Linputs, mP.cL, lSlope)\n\n # Wiener noise\n newL = wiener(wLsig, mean_spont_L, oldL, mP.tau_L, Linputs)\n\n#-------------------------------------------------------------------------------\n\n # dR:\n # inputs: S = stim, L = lateral neurons, mP.Rspont = spontaneous FR\n # NOTE: octo does not affect mP.Rspont. It affects R's response to input odors.\n Rinputs = (1 - thisOctoHit*mP.octo2R*mP.octoNegDiscount).squeeze()\n Rinputs = _np.maximum(Rinputs, 0) # pos. rectify Rinputs\n Rinputs *= -mP.L2R.dot(oldL)\n neur_act = mP.F2R.dot(thisInput)*RspontRatios.squeeze()\n neur_act *= (1 + thisOctoHit*mP.octo2R).squeeze()\n Rinputs += neur_act + mP.Rspont.squeeze()\n Rinputs = piecewise_lin_pseudo_sig(Rinputs, mP.cR, rSlope)\n\n # Wiener noise\n newR = wiener(wRsig, mean_spont_R, oldR, mP.tau_R, Rinputs)\n\n#-------------------------------------------------------------------------------\n\n # Enforce sparsity on the KCs:\n # Global damping on KCs is controlled by mP.sparsityTarget\n # (during octopamine, by octSparsityTarget).\n # Assume that inputs to KCs form a gaussian, and use a threshold\n # calculated via std devs to enforce the correct sparsity.\n\n # Delays from AL -> MB and AL -> LH -> MB (~30 mSec) are ignored.\n\n # the # st devs to give the correct sparsity\n numNoOctoStds = _np.sqrt(2)*erfinv(1 - 2*mP.sparsityTarget)\n numOctoStds = _np.sqrt(2)*erfinv(1 - 2*mP.octoSparsityTarget)\n # select for either octo or no-octo\n numStds = (1-thisOctoHit)*numNoOctoStds + thisOctoHit*numOctoStds\n # set a minimum damping based on spontaneous PN activity, so that\n # the MB is silent absent odor\n minDamperVal = 1.2*maxSpontP2KtimesPval\n thisKinput = oldP2K.dot(oldP) - oldPI2K.dot(oldPI) # (no PIs for mnist, only Ps)\n\n damper = thisKinput.mean() + numStds*thisKinput.std()\n damper = max(damper, minDamperVal)\n\n dampening = (damper*mP.kGlobalDampVec).squeeze() + oldPI2K.dot(oldPI)\n pos_octo = _np.maximum(1 - mP.octo2K*thisOctoHit, 0).squeeze()\n\n Kinputs = oldP2K.dot(oldP)*(1 + thisOctoHit*mP.octo2K).squeeze() # but note that mP.octo2K == 0\n Kinputs -= dampening*pos_octo # but no PIs for mnist\n Kinputs = piecewise_lin_pseudo_sig(Kinputs, mP.cK, kSlope)\n\n # Wiener noise\n newK = wiener(wKsig, mean_spont_K, oldK, mP.tau_K, Kinputs)\n\n#-------------------------------------------------------------------------------\n\n # Readout neurons E (EN = 'extrinsic neurons'):\n # These are readouts, so there is no sigmoid.\n # mP.octo2E == 0, since we are not stimulating ENs with octo.\n # dWE == 0 since we assume no noise in ENs.\n Einputs = oldK2E.dot(oldK)\n # oldK2E.dot(oldK)*(1 + thisOctoHit*mP.octo2E) # mP.octo2E == 0\n dE = dt*( -oldE*mP.tau_E + Einputs )\n\n # Wiener noise\n dWE = 0 # noise = 0 => dWE == 0\n # combine them\n newE = oldE + dE + dWE # always non-neg\n\n#-------------------------------------------------------------------------------\n\n ## HEBBIAN UPDATES:\n\n # Apply Hebbian learning to mP.P2K, mP.K2E:\n # For ease, use 'newK' and 'oldP', 'newE' and 'oldK', ie 1 timestep of delay.\n # We restrict hebbian growth in mP.K2E to connections into the EN of the\n # training stimulus.\n\n # Hebbian updates are active for about half the duration of each stimulus\n if hebRegion[i]:\n # the PN contribution to hebbian is based on raw FR\n #tempP = oldP.copy()\n #tempPI = oldPI.copy() # no PIs for mnist\n nonNegNewK = _np.maximum(newK, 0) # since newK has not yet been made non-neg\n\n ## dP2K:\n dp2k = (1/mP.heb_tau_PK) * nonNegNewK.reshape(-1, 1).dot(oldP.reshape(-1, 1).T)\n dp2k *= P2Kmask # if original synapse does not exist, it will never grow\n\n # decay some P2K connections if wished: (not used for mnist experiments)\n if mP.die_back_tau_PK > 0:\n oldP2K *= -(1/mP.die_back_tau_PK)*dt\n\n newP2K = _np.maximum(oldP2K + dp2k, 0)\n newP2K = _np.minimum(newP2K, mP.hebMaxPK)\n\n#-------------------------------------------------------------------------------\n\n ## dPI2K: # no PIs for mnist\n dpi2k = (1/mP.heb_tau_PIK) * nonNegNewK.reshape(-1, 1).dot(oldPI.reshape(-1, 1).T)\n dpi2k *= PI2Kmask # if original synapse does not exist, it will never grow\n\n # kill small increases:\n temp = oldPI2K.copy() # this detour prevents dividing by zero\n temp[temp == 0] = 1\n keepMask = dpi2k/temp\n keepMask = keepMask.reshape(dpi2k.shape)\n dpi2k *= keepMask\n if mP.die_back_tau_PIK > 0:\n oldPI2K -= oldPI2K*(1/die_back_tau_PIK)*dt\n newPI2K = _np.maximum(oldPI2K + dpi2k, 0)\n newPI2K = _np.minimum(newPI2K, mP.hebMaxPIK)\n\n#-------------------------------------------------------------------------------\n\n ## dK2E:\n #tempK = oldK\n # oldK is already nonNeg\n dk2e = (1/mP.heb_tau_KE) * newE.reshape(-1, 1).dot(oldK.reshape(-1, 1).T)\n dk2e *= K2Emask\n\n # restrict changes to just the i'th row of mP.K2E, where i = ind of training stim\n restrictK2Emask = _np.zeros(mP.K2E.shape)\n restrictK2Emask[thisStimClassInd,:] = 1\n dk2e *= restrictK2Emask\n\n#-------------------------------------------------------------------------------\n\n # inactive connections for this EN die back:\n if mP.die_back_tau_KE:\n # restrict dieBacks to only the trained EN\n targetMask = _np.zeros(dk2e.shape)\n targetMask[ dk2e == 0 ] = 1\n targetMask *= restrictK2Emask\n dieBack = (oldK2E + 2)*(1/mP.die_back_tau_KE)*dt\n # the '+1' allows weights to die to absolute 0\n oldK2E -= targetMask*dieBack\n\n newK2E = oldK2E + dk2e\n newK2E = _np.maximum(newK2E, 0)\n newK2E = _np.minimum(newK2E, mP.hebMaxKE)\n\n else: # case: no heb or no octo\n newP2K = oldP2K.copy()\n newPI2K = oldPI2K.copy() # no PIs for mnist\n newK2E = oldK2E.copy()\n\n#-------------------------------------------------------------------------------\n\n # update the evolution matrices, disallowing negative FRs.\n if T[i]<(exP.stopSpontMean3 + 5) or mP.saveAllNeuralTimecourses:\n # case: do not save AL and MB neural timecourses after the noise\n # calibration is done, to save on memory\n P[:,i+1] = _np.maximum(newP, 0)\n PI[:,i+1] = _np.maximum(newPI, 0) # no PIs for mnist\n L[:,i+1] = _np.maximum(newL, 0)\n R[:,i+1] = _np.maximum(newR, 0)\n K[:,i+1] = _np.maximum(newK, 0)\n else:\n P = _np.maximum(newP, 0)\n PI = _np.maximum(newPI, 0) # no PIs for mnist\n L = _np.maximum(newL, 0)\n R = _np.maximum(newR, 0)\n K = _np.maximum(newK, 0)\n\n E[:,i+1] = newE # always save full EN timecourses\n\n print('\\r')\n # Time-step simulation is now over.\n\n this_run = dict() # pre-allocate\n # combine so that each row of fn output Y is a col of [P; PI; L; R; K]\n if mP.saveAllNeuralTimecourses:\n Y = _np.vstack((P, PI, L, R, K, E))\n this_run['Y'] = Y.T\n else:\n this_run['Y'] = []\n\n this_run['T'] = T.T # store T as a col\n this_run['E'] = E.T # length(T) x mP.nE matrix\n this_run['P2Kfinal'] = oldP2K\n this_run['K2Efinal'] = oldK2E\n\n return this_run", "title": "" }, { "docid": "36b1c8ef4af3ee3cb140604b851d21fd", "score": "0.44500285", "text": "def apply_prune(tensors,prune_Rate,sess):\n # Store nonzero index for each weights.\n dict_nzidxs = {}\n\n #for each untarget layers\n for untarget in model.config.untarget_layer:\n wl = \"w_\" + untarget\n tensor = tensors[wl]\n weight = tensor.eval()\n nzidxs1 = abs(weight) != 0\n dict_nzidxs[wl] = nzidxs1\n # For each target layers,\n for target in model.config.target_layer:\n wl = \"w_\" + target\n #print(wl + \" threshold:\\t\" + str(model.config.th[wl]))\n # Get target layer's weights\n tensor = tensors[wl]\n weight = tensor.eval()\n\n # Apply pruning\n weight, nzidxs = model.papl.prune_dense(weight, name=wl,prune_rate=prune_Rate)\n\n # Store pruned weights as tensorflow objects\n dict_nzidxs[wl] = nzidxs\n #tensor.assign(weight)\n #tensor.eval()\n sess.run(tensor.assign(weight))\n\n return dict_nzidxs,sess", "title": "" }, { "docid": "0b09877d4bf33c42c13d3db2016ff7c3", "score": "0.44486514", "text": "def add_vars(model):\n # This is equation (3) in the paper\n model.x = Var(model.V, domain=Binary)\n model.y = Var(model.E, domain=NonNegativeReals)", "title": "" }, { "docid": "0fafb2bc5061591676d136a29ed560d0", "score": "0.44450712", "text": "def e_step(self, ids=None):\n self.model.eval()\n with torch.no_grad():\n if ids is None:\n lambdas = self.model(self.X)\n self.gamma = self.compute_gamma(lambdas)\n else:\n lambdas = self.model(self.X[ids].to(self.device))\n self.gamma = self.compute_gamma(\n lambdas, x=self.X[ids], size=(self.n_clusters, len(ids))\n )", "title": "" }, { "docid": "6d3b1c32a47b5741fe7920dc297f0b4c", "score": "0.44446814", "text": "def model_computation(j):\n return 3 * j['age'] - 2* j['income']", "title": "" }, { "docid": "3c31cb4e5acf2862a9b4034321c819f4", "score": "0.44436225", "text": "def _variational_inference_saliency_function(inputs, nb_of_samples):\n samples = [variational_base_function([inputs, 1]) for _ in range(nb_of_samples)]\n return numpy.concatenate(samples)", "title": "" }, { "docid": "3337a688ed4132920898f6fa57d4be70", "score": "0.44429132", "text": "def _VariationalDropout(self, cell, input_size):\n return tf.nn.rnn_cell.DropoutWrapper(\n cell,\n input_keep_prob=self._variational_input_keep_prob,\n output_keep_prob=self._variational_output_keep_prob,\n variational_recurrent=True,\n input_size=input_size)", "title": "" }, { "docid": "c2f94513dbda61afd1193904c6446fb4", "score": "0.44409904", "text": "def _cal_dis(self, query_fea: torch.tensor, gallery_fea: torch.tensor) -> torch.tensor:\n query_fea = query_fea.transpose(1, 0)\n inner_dot = gallery_fea.mm(query_fea)\n dis = (gallery_fea ** 2).sum(dim=1, keepdim=True) + (query_fea ** 2).sum(dim=0, keepdim=True)\n dis = dis - 2 * inner_dot\n dis = dis.transpose(1, 0)\n return dis", "title": "" }, { "docid": "f04e52b5413c8efb03ef58f578ec2dec", "score": "0.44397736", "text": "def experiment_runner(args, x_0_dict):\n gc.enable()\n gc.collect()\n \n exp_res_dict = dict()\n \n if args.verbose:\n print(\"Started DetGNM!\")\n exp_res_dict['DetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['DetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['DetGNM'][name][n] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _ = DetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None)\n start = time.time() - start\n exp_res_dict['DetGNM'][name][n][i] = {'f_vals': f_vals, 'nabla_f_2_norm_vals': nabla_f_2_norm_vals, 'avg_time_s': start / len(f_vals), 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, start\n gc.collect()\n \n if args.verbose:\n print(\"Started ArmijoAccDetGNM!\")\n exp_res_dict['ArmijoAccDetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['ArmijoAccDetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['ArmijoAccDetGNM'][name][n] = dict()\n for pair_num, (c1, c2) in enumerate(zip(args.c1_list, args.c2_list)):\n if args.verbose:\n print(' c1 = {:.4f}, c2 = {:.4f}:'.format(c1, c2))\n exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _, local_steps_list, spec_steps_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, \"Armijo\", c1=c1, c2=c2)\n start = time.time() - start\n exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num][i] = {'f_vals': f_vals,\n 'nabla_f_2_norm_vals': nabla_f_2_norm_vals,\n 'local_steps_list': local_steps_list,\n 'spec_steps_list': spec_steps_list,\n 'avg_time_s': start / len(f_vals),\n 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, local_steps_list, spec_steps_list, start\n gc.collect()\n \n if args.verbose:\n print(\"Started ExtrapolationAccDetGNM!\")\n exp_res_dict['ExtrapolationAccDetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['ExtrapolationAccDetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['ExtrapolationAccDetGNM'][name][n] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, \"Extrapolation\")\n start = time.time() - start\n exp_res_dict['ExtrapolationAccDetGNM'][name][n][i] = {'f_vals': f_vals,\n 'nabla_f_2_norm_vals': nabla_f_2_norm_vals,\n 'n_iter_list': n_iter_list,\n 'avg_time_s': start / len(f_vals),\n 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start\n gc.collect()\n \n if args.verbose:\n print(\"Started InterpolationAccDetGNM!\")\n exp_res_dict['InterpolationAccDetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['InterpolationAccDetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['InterpolationAccDetGNM'][name][n] = dict()\n for n_points in args.n_points_list:\n if args.verbose:\n print(' n_points:', n_points)\n exp_res_dict['InterpolationAccDetGNM'][name][n][n_points] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, \"Interpolation\")\n start = time.time() - start\n exp_res_dict['InterpolationAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,\n 'nabla_f_2_norm_vals': nabla_f_2_norm_vals,\n 'avg_time_s': start / len(f_vals),\n 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, start\n gc.collect()\n \n if args.verbose:\n print(\"Started SamplingAccDetGNM!\")\n exp_res_dict['SamplingAccDetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['SamplingAccDetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['SamplingAccDetGNM'][name][n] = dict()\n for n_points in args.n_points_list:\n if args.verbose:\n print(' n_points:', n_points)\n exp_res_dict['SamplingAccDetGNM'][name][n][n_points] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, \"Sampling\")\n start = time.time() - start\n exp_res_dict['SamplingAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,\n 'nabla_f_2_norm_vals': nabla_f_2_norm_vals,\n 'avg_time_s': start / len(f_vals),\n 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, start\n gc.collect()\n \n if args.verbose:\n print(\"Started GoldenRatioAccDetGNM!\")\n exp_res_dict['GoldenRatioAccDetGNM'] = dict()\n for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:\n if args.verbose:\n print('Oracle:', name)\n exp_res_dict['GoldenRatioAccDetGNM'][name] = dict()\n for n in args.n_dims:\n if args.verbose:\n print(' n:', n)\n exp_res_dict['GoldenRatioAccDetGNM'][name][n] = dict()\n for i in range(args.n_starts):\n if args.verbose:\n print(' start #:', i + 1)\n start = time.time()\n _, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, \"GoldenRatio\")\n start = time.time() - start\n exp_res_dict['GoldenRatioAccDetGNM'][name][n][i] = {'f_vals': f_vals,\n 'nabla_f_2_norm_vals': nabla_f_2_norm_vals,\n 'n_iter_list': n_iter_list,\n 'avg_time_s': start / len(f_vals),\n 'time_s': start}\n del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start\n gc.collect()\n \n return exp_res_dict", "title": "" }, { "docid": "f85a80fda1ec7e884a2d59371e13bcce", "score": "0.4439502", "text": "def main_denoising(wav_files, output_dir, verbose, use_gpu, gpu_id, truncate_minutes, mode, model_select='1000h',stage_select=3):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # Load global MVN statistics.\n global_mean_var_matf = os.path.join(HERE, 'model', 'global_{}_mvn_stats.mat'.format(model_select) )\n global_mean_var = sio.loadmat(global_mean_var_matf)\n global_mean = global_mean_var['global_mean']\n global_var = global_mean_var['global_var']\n\n # Perform speech enhancement.\n for src_wav_file in wav_files:\n # Perform basic checks of input WAV.\n if not os.path.exists(src_wav_file):\n utils.error('File \"%s\" does not exist. Skipping.' % src_wav_file)\n continue\n if not utils.is_wav(src_wav_file):\n utils.error('File \"%s\" is not WAV. Skipping.' % src_wav_file)\n continue\n if utils.get_sr(src_wav_file) != SR:\n utils.error('Sample rate of file \"%s\" is not %d Hz. Skipping.' %\n (src_wav_file, SR))\n continue\n if utils.get_num_channels(src_wav_file) != NUM_CHANNELS:\n utils.error('File \"%s\" is not monochannel. Skipping.' % src_wav_file)\n continue\n if utils.get_bitdepth(src_wav_file) != BITDEPTH:\n utils.error('Bitdepth of file \"%s\" is not %d. Skipping.' %\n (src_wav_file, BITDEPTH))\n continue\n\n # Denoise.\n try:\n bn = os.path.basename(src_wav_file)\n dest_wav_file = os.path.join(output_dir, bn)\n denoise_wav(src_wav_file, dest_wav_file, global_mean, global_var, use_gpu, gpu_id, truncate_minutes, mode, model_select, stage_select )\n print('Finished processing file \"%s\".' % src_wav_file)\n except Exception as e:\n msg = 'Problem encountered while processing file \"%s\". Skipping.' % src_wav_file\n if verbose:\n msg = '%s Full error output:\\n%s' % (msg, e)\n utils.error(msg)\n continue", "title": "" }, { "docid": "d4b9001f489e57df2392d6a5312778f5", "score": "0.44390833", "text": "def objective(params, hparams):\n # print(\"params\", params)\n # print(\"hparams\", hparams)\n # return np.random.uniform(0.0, 1.0)\n\n # high-level params\n numsteps = hparams[\"numsteps\"]\n cm = hparams[\"measure\"]\n # core params\n # n = Genet(M = params[\"M\"])\n # print(\"len(params)\", len(params), params)\n\n rows = len(params)\n if rows > 0:\n cols = len(params[0])\n else:\n status = STATUS_FAIL\n \n M = np.array(params[0:(rows*cols)]).reshape((rows,cols))\n \n # a dict containg network config, timeseries, loss\n rundata = dict()\n \n # # create network\n # # n = Genet(2, 2)\n # n = Genet(M = M)\n \n # # state trajectory\n # Xs = np.zeros((numsteps, n.state_dim))\n # # loop over timesteps\n # for i in range(numsteps):\n # # x = np.dot(M, x)\n # n.step()\n # # Xs[i] = n.x.reshape((n.state_dim,))\n # # print(\"n.networks[\\\"fast\\\"][\\\"x\\\"].shape\", n.networks[\"fast\"][\"x\"].shape)\n # Xs[i,:n.state_dim] = n.networks[\"fast\"][\"x\"].reshape((n.state_dim,))\n\n conf = {\n \"numsteps\": numsteps,\n \"generator\": args.generator,\n \"params\": M\n }\n \n pi = 0\n for i in range(args.numindeval):\n Xs = evaluate_individual(conf)\n \n Xs_meas = Xs[:,[1,2]]\n \n # pi = cm.compute_pi(Xs)\n # pi = cm.compute_ais(Xs)\n # pi = cm.compute_pi_local(Xs)\n pi += cm.compute(Xs_meas)\n pi /= float(args.numindeval)\n pi = max(0, pi) + 1e-9\n # print(\"pi = %f nats\" % pi)\n # loss = -np.log(pi)\n loss = -pi\n\n status = STATUS_OK\n # return structure: params, timeseries, scalar loss\n rundata = {\n \"loss\": loss, # compute_complexity(Xs)\n \"status\": status, # compute_complexity(Xs)\n \"M\": M, # n.networks[\"fast\"][\"M\"],\n \"timeseries\": Xs.copy(),\n # \"loss\": np.var(Xs),\n }\n if hparams[\"continuous\"]:\n return rundata[\"loss\"]\n else:\n return rundata", "title": "" }, { "docid": "3be3a6625b5958bee223e5ca41328d69", "score": "0.4437971", "text": "def evaluate_noisy(self, x):\n raise NotImplementedError('evaluate_noisy not available')", "title": "" }, { "docid": "83e1de737862f728e0108a0f278edffc", "score": "0.44351983", "text": "def run_epoch(sess, cost_op, ops, reset, num_unrolls, var1, var2):\n# print(\"len.cost_op\",len(cost_op))\n# print('test',sess.run(test))\n# pdb.set_trace()\n# print(sess.run(grad))\n start = timer()\n sess.run(reset)\n# pdb.set_trace()\n \n# ops=[*zip(update, step)]\n# print(len(ops))\n for j in xrange(num_unrolls):\n #step = sess.run(ops)\n cost=[]\n# print(sess.run(grad))\n# constants = []\n# print('test',sess.run(constant[0]))\n# print(sess.run(cost_op))\n #exit(0)\n step = sess.run(ops)\n #print (\"step\", step)\n for i in range(len(cost_op)):\n# sub_constant = [sess.run(item) for item in constant[i]]\n sub_cost = (sess.run([cost_op[i]]) + step)[0] \n cost.append(sub_cost)\n# constants.append(sub_constant)\n #print ('l1, l2:', t1, t2, max([t2]))\n print('cost', cost)\n# print (sess.run(pair_dis))\n# print(sub_cost)\n# print(ops[1])\n# print(sess.run([cost_op[0]] + list(opss[0]))[0] )\n# cost = [sess.run([cost_op[i]] + list(ops[i]))[0] for i in range(len(cost_op))]\n return timer() - start, cost, var1, var2", "title": "" }, { "docid": "e3a23524cb1738a54d9fc8f1255f6734", "score": "0.4435192", "text": "def optimize(self):\n if self.replay_buffer.size() < self.batch_size:\n return\n\n observations, actions, returns = self.replay_buffer.sample(self.batch_size)\n self.optimizer.zero_grad()\n q_values = self.nec_net(observations.to(self.device))[range(self.batch_size), actions] # pick q_values for chosen actions\n loss = self.loss_fn(q_values, returns.to(self.device))\n loss.backward()\n self.optimizer.step()", "title": "" }, { "docid": "3e3634710e6dcc651922288851718bc0", "score": "0.44330838", "text": "def test_5():\r\n m = 200\r\n f = est_dir.quad_f_noise\r\n minimizer = np.ones((m,))\r\n centre_point = np.random.uniform(0, 10, (m, ))\r\n matrix = est_dir.quad_func_params(1, 1, m)\r\n func_args = (minimizer, matrix, 0, 5)\r\n np.random.seed(90)\r\n no_vars = 10\r\n region = 1\r\n set_all_positions = np.arange(m)\r\n (design, y,\r\n positions,\r\n func_evals) = est_dir.compute_frac_fact(m, centre_point, no_vars,\r\n f, func_args, region,\r\n set_all_positions)\r\n assert(positions.shape[0] == no_vars)\r\n assert(y.shape == (16, ))\r\n assert(func_evals == 16)\r\n assert(np.all(design == np.array([[-1, -1, -1, -1, -1,\r\n -1, -1, -1, +1, +1],\r\n [+1, -1, -1, -1, +1,\r\n -1, +1, +1, -1, -1],\r\n [-1, +1, -1, -1, +1,\r\n +1, -1, +1, -1, -1],\r\n [+1, +1, -1, -1, -1,\r\n +1, +1, -1, +1, +1],\r\n [-1, -1, +1, -1, +1,\r\n +1, +1, -1, -1, +1],\r\n [+1, -1, +1, -1, -1,\r\n +1, -1, +1, +1, -1],\r\n [-1, +1, +1, -1, -1,\r\n -1, +1, +1, +1, -1],\r\n [+1, +1, +1, -1, +1,\r\n -1, -1, -1, -1, +1],\r\n [-1, -1, -1, +1, -1,\r\n +1, +1, +1, -1, +1],\r\n [+1, -1, -1, +1, +1,\r\n +1, -1, -1, +1, -1],\r\n [-1, +1, -1, +1, +1,\r\n -1, +1, -1, +1, -1],\r\n [+1, +1, -1, +1, -1,\r\n -1, -1, +1, -1, +1],\r\n [-1, -1, +1, +1, +1,\r\n -1, -1, +1, +1, +1],\r\n [+1, -1, +1, +1, -1,\r\n -1, +1, -1, -1, -1],\r\n [-1, +1, +1, +1, -1,\r\n +1, -1, -1, -1, -1],\r\n [+1, +1, +1, +1, +1,\r\n +1, +1, +1, +1, +1]])))\r\n assert(np.all(y > 0))", "title": "" }, { "docid": "9f1d112df88628a12f3f3cec30e6f0fa", "score": "0.44303003", "text": "def optimize(self) -> None:\n if self.is_GPY:\n self.model_gpy.randomize()\n self.model_gpy.optimize_restarts(1, robust=True)\n nmll = self.model_gpy.log_likelihood()\n self.nmll_vec.append(nmll)\n length = self.model_gpy.kern['.*lengthscale'].values()\n length = length.reshape(-1,self.data_dim)\n weights = self.model_gpy.kern['.*variance'].values()\n self.sm_params.weights = torch.from_numpy(weights).float().unsqueeze(-1)\n self.sm_params.var = torch.from_numpy(1/4/math.pi**2/length**2).float()\n else:\n if self.ai:\n num_data = self.X_train_torch.shape[0]\n data_dim = self.X_train_torch.shape[1]\n X_data =self.X_train_torch.unsqueeze(0) # 1 X N X D\n F = self.Y_train_torch.unsqueeze(0).squeeze(-1) # 1 X N\n node_mask = torch.ones(1, num_data) # 1 X N\n diagonal_mask = torch.zeros(1, num_data) # 1 X N\n dim_mask = torch.ones(1, data_dim) # 1 X D\n kernel_mask = torch.ones(1, num_data, num_data) # 1 X N X N\n N = torch.ones(1) * num_data # 1\n #Timer starts\n time_start = time.time()\n if self.is_no_mu:\n var, weights, nmll = self.ai_model(X_data,X_data,F,F,node_mask,dim_mask,kernel_mask,diagonal_mask,N,device=self.device)\n else:\n mu, var, weights, nmll = self.ai_model(X_data,X_data,F,F,node_mask,dim_mask,kernel_mask,diagonal_mask,N,device=self.device)\n #Timer ends\n time_end = time.time()\n self.nmll_vec.append(nmll.item())\n self.time_count = self.time_count + time_end - time_start\n if not self.is_no_mu:\n self.sm_params.mu = mu.detach().squeeze(0) # M X D\n self.sm_params.var = var.detach().squeeze(0) # M X D\n self.sm_params.weights = weights.detach().squeeze(0)\n else:\n if self.settings.opt_method == 'Adam':\n optimizer = torch.optim.Adam([\n {'params': self.gp_sm_model.parameters()}, # Includes GaussianLikelihood parameters\n ], lr=self.settings.lr) \n elif self.settings.opt_method == 'LBFGS':\n optimizer = torch.optim.LBFGS([\n {'params': self.gp_sm_model.parameters()}, # Includes GaussianLikelihood parameters\n ], lr=self.settings.lr, max_iter=10, tolerance_grad=2.0e-4)\n else:\n raise ValueError(\"No opt method of given name!\")\n loss = self.gp_sm_model(self.X_train_torch, self.Y_train_torch, self.settings.epsilon, self.settings.device)\n self.gp_sm_model.train()\n\n #Timer starts\n time_start = time.time()\n for j in range(self.settings.training_iter):\n def closure():\n # Zero gradients from previous iteration\n optimizer.zero_grad()\n # Output from model\n loss = self.gp_sm_model(self.X_train_torch, self.Y_train_torch, self.settings.epsilon, self.settings.device)\n # Calc loss and backprop gradients\n loss.backward()\n if self.settings.is_print:\n grad_norm = 0\n for p in self.gp_sm_model.parameters():\n if p.requires_grad:\n param_norm = p.grad.data.norm()\n grad_norm += param_norm.item() ** 2\n grad_norm = grad_norm ** (1./2)\n print('Iter %d/%d - Loss: %.3f - grad_norm: %.3e' % (j + 1, self.settings.training_iter, loss.item(), grad_norm))\n return loss\n loss = optimizer.step(closure)\n \n nmll = loss.detach().cpu().numpy()\n self.nmll_vec.append(nmll.item())\n var_sm = torch.clamp(self.gp_sm_model.var, min=-30.0, max=30.0)\n var_sm = torch.exp(var_sm)\n weights_sm = torch.clamp(self.gp_sm_model.weights, min=-30.0, max=30.0)\n weights_sm = torch.softmax(weights_sm, dim=-2)\n #Timer ends\n time_end = time.time()\n self.time_count = self.time_count + time_end - time_start\n\n if not self.is_no_mu:\n mu_sm = torch.clamp(self.gp_sm_model.mu, min=-30.0, max=30.0)\n mu_sm = torch.exp(mu_sm)\n self.sm_params.mu = mu_sm.detach()\n self.sm_params.var = var_sm.detach()\n self.sm_params.weights = weights_sm.detach()", "title": "" }, { "docid": "ff446d87ce484bda65d9eea09fb4dd10", "score": "0.4430296", "text": "def output_prune(cls, model: NNCFNetwork, node: NNCFNode, graph: NNCFGraph):", "title": "" }, { "docid": "133d2a05ecdda5f3dd2827e3c0651515", "score": "0.44252816", "text": "async def query(request: Request, *, exclude_sets=False) -> Message:\n message = request.message.dict()\n qgraph = message['query_graph']\n results = message['results']\n\n qnodes = qgraph['nodes']\n qedges = qgraph['edges']\n\n # knode_map = {knode['id']: knode for knode in knodes}\n qnode_map = {qnode['id']: qnode for qnode in qnodes}\n qedge_map = {qedge['id']: qedge for qedge in qedges}\n\n driver = Neo4jDatabase(\n url=NEO4J_URL,\n credentials={\n 'username': NEO4J_USER,\n 'password': NEO4J_PASSWORD,\n },\n )\n redges_by_id = dict()\n count_plans = defaultdict(lambda: defaultdict(list))\n for kdx, result in enumerate(results):\n rgraph = get_rgraph(result, message)\n redges_by_id.update({\n (kdx, redge['id']): redge\n for redge in rgraph['edges']\n })\n\n for redge in rgraph['edges']:\n if (not exclude_sets) or qnode_map[redge['qg_target_id']].get('set', False):\n count_plans[redge['kg_source_id']][(redge['eb']['qg_id'], redge['qg_target_id'])].append(\n (kdx, redge['id'])\n )\n if (not exclude_sets) or qnode_map[redge['qg_source_id']].get('set', False):\n count_plans[redge['kg_target_id']][(redge['eb']['qg_id'], redge['qg_source_id'])].append(\n (kdx, redge['id'])\n )\n\n count_to_redge = {}\n for ldx, batch in enumerate(batches(list(count_plans.keys()), 1000)):\n batch_bits = []\n for idx, ksource_id in enumerate(batch):\n sets = []\n plan = count_plans[ksource_id]\n anchor_node_reference = NodeReference({\n 'id': f'n{idx:04d}',\n 'curie': ksource_id,\n 'type': 'named_thing'\n })\n anchor_node_reference = str(anchor_node_reference)\n base = f\"MATCH ({anchor_node_reference}) \"\n for jdx, (qlink, redge_ids) in enumerate(plan.items()):\n cypher_counts = []\n qedge_id, qtarget_id = qlink\n count_id = f\"c{idx:03d}{chr(97 + jdx)}\"\n qedge = qedge_map[qedge_id]\n edge_reference = EdgeReference(qedge, anonymous=True)\n anon_node_reference = NodeReference({\n **qnode_map[qtarget_id],\n 'id': count_id,\n })\n if qedge['source_id'] == qtarget_id:\n source_reference = anon_node_reference\n target_reference = anchor_node_reference\n elif qedge['target_id'] == qtarget_id:\n source_reference = anchor_node_reference\n target_reference = anon_node_reference\n cypher_counts.append(f\"{anon_node_reference.name}: count(DISTINCT {anon_node_reference.name})\")\n count_to_redge[count_id] = redge_ids\n sets.append(f'MATCH ({source_reference}){edge_reference}({target_reference})' + ' RETURN {' + ', '.join(cypher_counts) + '} as output')\n batch_bits.append(' UNION ALL '.join(sets))\n cypher = ' UNION ALL '.join(batch_bits)\n response = driver.run(cypher)\n\n degrees = {\n key: value\n for result in response\n for key, value in result['output'].items()\n }\n\n for key in degrees:\n for redge_id in count_to_redge[key]:\n eb = redges_by_id[redge_id]['eb']\n eb['weight'] = eb.get('weight', 1.0) / degrees[key]\n\n message['results'] = results\n return Message(**message)", "title": "" }, { "docid": "d0156faf7d057a4a147fdec6e4d5b622", "score": "0.44235557", "text": "def E_out(batch, data):\n xs, target = zip(*data)\n values = compute_batch(batch, xs)\n g_bar = np.mean(values, axis=0)\n g_var = np.var(values, axis=0)\n return g_var.mean() + np.mean((g_bar - np.array(target))**2)", "title": "" }, { "docid": "c0901ecc8e94282e85e41206a03e720f", "score": "0.44233975", "text": "def test_var_exp(self):\n for test_setup in self.test_setups:\n l = test_setup.likelihood\n y = test_setup.Y\n with l.tf_mode():\n r1 = tf.Session().run(l.logp(self.F, y), feed_dict={self.x: l.get_free_state(), self.F:self.F_data})\n r2 = tf.Session().run(l.variational_expectations(self.F, self.F * 0,test_setup.Y), feed_dict={self.x: l.get_free_state(), self.F:self.F_data}) \n self.failUnless(np.allclose(r1, r2, test_setup.tolerance, test_setup.tolerance))", "title": "" }, { "docid": "9f80baee07c059ff4342eb509a2f4d62", "score": "0.44231093", "text": "def standard_counterfactual_query(self, node_of_interest, observed, intervention, n_samples_for_approx=None):\n # infer latents and generate model, also initializes self.standard_inference_time\n self._generate_counterfactual_model(observed, intervention, n_samples=n_samples_for_approx)\n # then run the query\n ## for stability, pass in as evidence a deterministic value for the intervention node\n int_noise_node_values = {\"U{}\".format(k): intervention[k] for k in intervention}\n q, time_elapsed = self.query(node_of_interest, observed=int_noise_node_values, counterfactual=True)\n self.standard_inference_time = self.joint_inference_time + time_elapsed\n return q", "title": "" }, { "docid": "b264aadfaf358381f72738074335b2a1", "score": "0.4422142", "text": "def test_2():\r\n n = 16\r\n m = 10\r\n no_vars = m\r\n positions = np.sort(np.random.choice(np.arange(m), no_vars,\r\n replace=False))\r\n assert(np.unique(positions).shape[0] == no_vars)\r\n f = est_dir.quad_f_noise\r\n design = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, +1, +1],\r\n [+1, -1, -1, -1, +1, -1, +1, +1, -1, -1],\r\n [-1, +1, -1, -1, +1, +1, -1, +1, -1, -1],\r\n [+1, +1, -1, -1, -1, +1, +1, -1, +1, +1],\r\n [-1, -1, +1, -1, +1, +1, +1, -1, -1, +1],\r\n [+1, -1, +1, -1, -1, +1, -1, +1, +1, -1],\r\n [-1, +1, +1, -1, -1, -1, +1, +1, +1, -1],\r\n [+1, +1, +1, -1, +1, -1, -1, -1, -1, +1],\r\n [-1, -1, -1, +1, -1, +1, +1, +1, -1, +1],\r\n [+1, -1, -1, +1, +1, +1, -1, -1, +1, -1],\r\n [-1, +1, -1, +1, +1, -1, +1, -1, +1, -1],\r\n [+1, +1, -1, +1, -1, -1, -1, +1, -1, +1],\r\n [-1, -1, +1, +1, +1, -1, -1, +1, +1, +1],\r\n [+1, -1, +1, +1, -1, -1, +1, -1, -1, -1],\r\n [-1, +1, +1, +1, -1, +1, -1, -1, -1, -1],\r\n [+1, +1, +1, +1, +1, +1, +1, +1, +1, +1]])\r\n centre_point = np.random.uniform(0, 10, (m, ))\r\n matrix = est_dir.quad_func_params(1, 1, m)\r\n minimizer = np.ones((m,))\r\n func_args = (minimizer, matrix, 0, 5)\r\n region = 1\r\n y, func_evals = est_dir.compute_y(centre_point, design, positions, n, m,\r\n f, func_args, region)\r\n assert(y.shape == (n, ))\r\n assert(func_evals == n)\r\n assert(np.all(y > 0))", "title": "" }, { "docid": "316b280b6de57ad61adb124d4b38816d", "score": "0.44161758", "text": "def _get_dnn(x):\n\n dense1 = tf.layers.dense(\n x, 1024, activation = tf.tanh,\n kernel_initializer = tf.random_normal_initializer()\n )\n batch1 = tf.layers.batch_normalization(dense1)\n dropout1 = tf.layers.dropout(batch1, rate = 0.1)\n\n dense2 = tf.layers.dense(\n dropout1, 512, activation = tf.nn.relu,\n kernel_initializer = tf.random_normal_initializer()\n )\n batch2 = tf.layers.batch_normalization(dense2)\n dropout2 = tf.layers.dropout(batch2, rate = 0.3)\n\n\n dense3 = tf.layers.dense(\n dropout2, 256, activation = tf.nn.relu,\n kernel_initializer = tf.random_normal_initializer()\n )\n batch3 = tf.layers.batch_normalization(dense3)\n dropout3 = tf.layers.dropout(batch3, rate = 0.2)\n\n dense4 = tf.layers.dense(\n dropout3, 128, activation = tf.nn.relu,\n kernel_initializer = tf.random_normal_initializer()\n )\n batch4 = tf.layers.batch_normalization(dense4)\n dropout4 = tf.layers.dropout(batch4, rate = 0.1)\n\n y_pred = tf.layers.dense(dropout4, 1)\n\n return y_pred", "title": "" }, { "docid": "709814407abfea370e7eec24917932cf", "score": "0.4412415", "text": "def filter_variables(node, copier, **kw):\n if isinstance(node, tf.Operation) and node.type == 'VariableV2':\n return node\n elif ( isinstance(node, tf.Operation)\n and node.type == 'Identity'\n and len(node.inputs) == 1\n and node.inputs[0].op.type == 'VariableV2'):\n return node\n else:\n return None", "title": "" }, { "docid": "20e8d6bc3e70d723fb1c2ae8d969637f", "score": "0.44106925", "text": "def deactivated_test_extreme_index(self):\n\n nf = 40\n nt = 150\n n = nf * nt\n dt = 0.26214\n BW = 1. / dt / 2.\n time_stream = sp.zeros((nf, nt))\n time_stream = al.make_vect(time_stream, axis_names=(\"freq\", \"time\"))\n time = dt * (sp.arange(nt) + 50)\n N = dirty_map.Noise(time_stream, time)\n # Thermal.\n thermal = sp.zeros(nf, dtype=float) + 0.0002 * BW * 2.\n thermal[22] = dirty_map.T_infinity**2\n N.add_thermal(thermal)\n # Time mean and slope.\n N.deweight_time_mean()\n N.deweight_time_slope()\n # Extreem index over_f bit.\n mode = -sp.ones(nf, dtype=float) / sp.sqrt(nf - 1)\n mode[22] = 0\n # Parameters measured from one of the data sets. Known to screw things\n # up.\n #N.add_over_f_freq_mode(8.128e-7, -4.586, 1.0, 1.422e-7, mode, True)\n N.add_over_f_freq_mode(0.001729, -0.777, 1.0, 1e-8, mode, True)\n #N.orthogonalize_modes()\n N.finalize()\n # Check if the fast inverse works.\n N_mat = N.get_mat()\n N_mat.shape = (n, n)\n N_inv = N.get_inverse()\n N_inv.shape = (n, n)\n #eye = sp.dot(N_mat, N_inv)\n #plt.figure()\n #plt.plot(eye.flat[::n + 1])\n #plt.figure()\n #plt.imshow(sp.reshape(eye.flat[::n + 1], (nf, nt)))\n #plt.colorbar()\n #eye.shape = (nf, nt, nf, nt)\n #plt.figure()\n #plt.imshow(eye[1,:,1,:])\n #plt.colorbar()\n #plt.figure()\n #plt.imshow(eye[:,1,:,1])\n #plt.colorbar()\n #plt.show()", "title": "" } ]
7c4d9379deed687e5ce4e2288b5a1b41
Simple JSON Formatter wrapper for consistent formatting.
[ { "docid": "6997219fa49faa74f7a7f38d7dcb8b21", "score": "0.0", "text": "def proto_to_json(proto: message.Message) -> str:\n return json_format.MessageToJson(\n message=proto, sort_keys=True, preserving_proto_field_name=True)", "title": "" } ]
[ { "docid": "0cca7b880ba3f7c97ca0b4032b21db93", "score": "0.7336949", "text": "def make_formatter(format_name):\n\n if \"json\" in format_name:\n from json import dumps\n import datetime\n\n def jsonhandler(obj):\n obj.isoformat() if isinstance(\n obj, (datetime.datetime, datetime.date)\n ) else obj\n\n if format_name == \"prettyjson\":\n\n def jsondumps(data):\n return dumps(\n data, default=jsonhandler, indent=2, separators=(\",\", \": \")\n )\n\n else:\n\n def jsondumps(data):\n return dumps(data, default=jsonhandler)\n\n def jsonify(data):\n if isinstance(data, dict):\n print(jsondumps(data))\n elif isinstance(data, list):\n print(jsondumps([device._asdict() for device in data]))\n else:\n print(dumps({\"result\": data}))\n\n return jsonify\n else:\n\n def printer(data):\n if isinstance(data, dict):\n print(data)\n else:\n for row in data:\n print(row)\n\n return printer", "title": "" }, { "docid": "a7e0b5b4d3f9a100e71440be91cd53ca", "score": "0.70558834", "text": "def JsonFormatter(issue, format='long'):\n\n if format == 'short':\n issue = issue.get('id', '')\n\n return json.dumps(issue, indent=4, default=str, sort_keys=True)", "title": "" }, { "docid": "1a0ee7545513bcd9fbe271ace12270af", "score": "0.6937898", "text": "def __init__(self, fmt: str, *args, **kwargs):\n jsonlogger.JsonFormatter.__init__(self, fmt=fmt, *args, **kwargs)", "title": "" }, { "docid": "d8ea80edff39451eeb5e5268092721d6", "score": "0.67870814", "text": "def formatter(args):\n if args.repr:\n return repr\n elif args.json:\n from json import dumps\n return dumps\n elif args.pprint:\n from pprint import pformat\n return pformat\n elif args.format:\n return lambda value: format(value, args.format)\n else:\n return str", "title": "" }, { "docid": "c5a599c2dd2b2fb329c99ed2f37997b9", "score": "0.65589726", "text": "def format(self, jsondata: dict) -> str:\n return json.dumps(jsondata, indent=\" \")", "title": "" }, { "docid": "4280d1009d250a64814c4bc2b47f076b", "score": "0.6505996", "text": "def json_wrapper():\n sub = regex.compile(r\"\\[\\s+((?P<n>-?\\d+),?\\s+)+\\]\").sub\n unwrap_int_list = lambda m: f\"[{', '.join(m.captures('n'))}]\"\n return lambda data: sub(unwrap_int_list, json.dumps(data, indent=2)) + \"\\n\"", "title": "" }, { "docid": "e51fc885c6bf7c8c437f831e4bb8cf52", "score": "0.63327867", "text": "def json_pretty(obj):\n return json.dumps(obj, indent = 4, separators = (',', ':'), sort_keys = True)", "title": "" }, { "docid": "e4db8ad72863eb0f97552c737d1269d9", "score": "0.62956357", "text": "def wrapper(*args, **kwargs):\n out = f(*args, **kwargs)\n printable = repr(out)\n lexer = lexers.PythonLexer()\n try:\n serialized = json.loads(out)\n except ValueError:\n pass\n else:\n if isinstance(serialized, collections.abc.Container):\n printable = json.dumps(serialized, indent=2)\n lexer = lexers.JsonLexer()\n printable = highlight(\n printable,\n lexer,\n formatters.Terminal256Formatter(style=MonokaiStyle))\n return printable", "title": "" }, { "docid": "34ad8fe1cdfc4c75c47b8415168e2c3a", "score": "0.6284511", "text": "def format(self, fmt, cmd, json):\n if isinstance(json, bytes):\n json = j.loads(json.decode('utf-8'))\n if fmt not in enums.Format:\n raise TypeError('format not supported')\n if fmt == enums.Format.JSON:\n return json\n if fmt == enums.Format.MARKDOWN:\n func = '%s_markdown' % cmd\n elif fmt == enums.Format.PLAINTEXT:\n func = '%s_plaintext' % cmd\n else:\n raise TypeError('format not supported')\n\n if not hasattr(self.__cmd, func):\n raise TypeError('format not supported')\n if json is None: # Running or pending\n return json\n if isinstance(json, dict) and 'error' in json: # Handle error message formating\n if fmt == enums.Format.JSON:\n return json\n if fmt == enums.Format.MARKDOWN:\n return '**' + json['error'] + '**'\n elif fmt == enums.Format.PLAINTEXT:\n return json['error']\n\n return self.__cmd.__getattribute__(func)(json)", "title": "" }, { "docid": "203295684aa2cb99aa8f2fcfeae482c0", "score": "0.6234302", "text": "def __format__(self, format_spec):\n if format_spec:\n # At this point, format_spec is a string that looks something like\n # \"indent=4,sort_keys=True\". What we want is to build a function call\n # from that which looks like:\n #\n # json_encoder_factory(indent=4,sort_keys=True)\n #\n # which we can then eval() to create our encoder instance.\n make_encoder = \"json_encoder_factory(\" + format_spec + \")\"\n encoder = eval(\n make_encoder, {\"json_encoder_factory\": self.json_encoder_factory}\n )\n else:\n encoder = self.json_encoder\n return encoder.encode(self.value)", "title": "" }, { "docid": "b928bc222b9494e038316d4c4f6a1eb1", "score": "0.6228454", "text": "def json_format(filename, indent=DEFAULT_INDENT_SIZE, **kwargs):\r\n console = kwargs.get(\"console\", logging.getLogger(\"console\"))\r\n encoding = kwargs.get(\"encoding\", None)\r\n dry_run = kwargs.get(\"dry_run\", False)\r\n if indent is None:\r\n sort_keys = False\r\n else:\r\n sort_keys = True\r\n\r\n message = \"%s ...\" % filename\r\n# if not (os.path.exists(filename) and os.path.isfile(filename)):\r\n# console.error(\"%s ERROR: file not found.\", message)\r\n# return 0\r\n\r\n contents = open(filename, \"r\").read()\r\n data = json.loads(contents, encoding=encoding)\r\n contents2 = json.dumps(data, indent=indent, sort_keys=sort_keys)\r\n contents2 = contents2.strip()\r\n contents2 = \"%s\\n\" % contents2\r\n if contents == contents2:\r\n console.info(\"%s SKIP (already pretty)\", message)\r\n return 2 #< SKIPPED.\r\n elif not dry_run:\r\n outfile = open(filename, \"w\")\r\n outfile.write(contents2)\r\n outfile.close()\r\n console.warn(\"%s OK\", message)\r\n return 1 #< OK\r", "title": "" }, { "docid": "eafcd84f94125563fa535a0e3703a5ba", "score": "0.62283", "text": "def jsonify(self, *args, **kwargs):\n indent = None\n separators = (',', ':')\n\n if self.app.config['JSONIFY_PRETTYPRINT_REGULAR'] or self.app.debug:\n indent = 2\n separators = (', ', ': ')\n\n if args and kwargs:\n raise TypeError(\n 'jsonify() behavior undefined when passed both args and kwargs'\n )\n elif len(args) == 1: # single args are passed directly to dumps()\n data = args[0]\n else:\n data = args or kwargs\n\n return self.app.response_class(\n (json.dumps(\n data, indent=indent,\n separators=separators, cls=MongoJsonEncoder),\n '\\n'),\n mimetype=self.app.config['JSONIFY_MIMETYPE']\n )", "title": "" }, { "docid": "63b0dbd500d8c1a2f9efb8e883a9c730", "score": "0.62282085", "text": "def json_safe_format(json_input=None, **kwargs):\n ############################ Custom Code Goes Below This Line #################################\n import json\n import phantom.rules as phantom\n \n outputs = {}\n \n safe_json = json.dumps(json.loads(json_input, strict=False))\n \n outputs['json_output'] = safe_json\n \n # Return a JSON-serializable object\n assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable\n return outputs", "title": "" }, { "docid": "a4a60fef617f1a8946f60248544b0220", "score": "0.6220235", "text": "def pformat_json(data):\n if data is None:\n return ''\n try:\n json_data = json.loads(data) if isinstance(data, (str, bytes)) else data\n return json.dumps(json_data, indent=2, sort_keys=True)\n except (TypeError, ValueError):\n return data", "title": "" }, { "docid": "4b25fd6a9d5413ed0ec2077853c807fb", "score": "0.619142", "text": "def pretty(js):\n return json.dumps(js, indent=2, ensure_ascii=False)", "title": "" }, { "docid": "4cf1ff6486a6023d4a90e826c7731260", "score": "0.6156818", "text": "def render_json(value):\n return json_dumps(value, indent=4, sort_keys=True)", "title": "" }, { "docid": "dbccd0c53340b4ed2fa68f47e69d7c37", "score": "0.6151331", "text": "def format(self, fmt, cmd, json):\n if fmt not in enums.Format:\n raise TypeError('format not supported')\n if fmt == enums.Format.JSON:\n return json\n if fmt == enums.Format.MARKDOWN:\n func = '%s_markdown' % cmd\n elif fmt == enums.Format.PLAINTEXT:\n func = '%s_plaintext' % cmd\n else:\n raise TypeError('format not supported')\n\n if not hasattr(self.__intf, func):\n raise TypeError('format not supported')\n return self.__intf.__getattribute__(func)(json)", "title": "" }, { "docid": "3594767ae351b24de198b363638ca723", "score": "0.61373544", "text": "def json(self, fp=None, indent=None, separators=(', ', ': ')):\n \n if fp is None:\n return json.dumps( self, indent=indent, separators=separators)\n else:\n json.dump(self, fp, indent=indent, separators=separators)", "title": "" }, { "docid": "94e5ad9ca340e8318c405d93a40292d4", "score": "0.61362046", "text": "def format_json(self, input_str=None):\n io = StringIO()\n json.dump(input_str, io)\n return io.getvalue()", "title": "" }, { "docid": "9829d3f36bd9e55d84e34daf14124ce6", "score": "0.6114374", "text": "def pretty_print(js):\n try:\n return json.dumps(js, indent=4, separators=(\",\", \":\"), sort_keys=True)\n except Exception as e:\n return \"%s\" % js", "title": "" }, { "docid": "fb83822225ed829b55cef35ef9fc84f0", "score": "0.61068195", "text": "def json(value, pretty=True):\n if _Duration is None:\n _late_import()\n return _json_encoder(value, pretty=pretty)", "title": "" }, { "docid": "1f869aa11aa82b079550cfa42854c3d4", "score": "0.60853505", "text": "def jsonify(self) -> str:\n pass", "title": "" }, { "docid": "6fac1e0b0a5f685be4adfa99261c58e0", "score": "0.6068353", "text": "def pretty_json(dict):\n #return json.dumps(dict, indent=4, cls=DecimalEncoder)\n #return json.dumps(my_dictionary, indent=4, sort_keys=True, default=str)\n\n return json.dumps(dict, indent=2, default=json_serial, sort_keys=True, )", "title": "" }, { "docid": "6197a1ec499cafe45806e1f386c17fe8", "score": "0.6063559", "text": "def format_json(args, skeleton, submissions, subreddit):\n\n format_status = Status(\n \"Finished formatting data for JSON export.\",\n \"Formatting data for JSON export.\",\n \"white\"\n )\n\n format_status.start()\n skeleton[\"data\"] = submissions\n \n if args.rules:\n FormatJSON._add_subreddit_rules(skeleton, subreddit)\n \n format_status.succeed()", "title": "" }, { "docid": "9c11673f8da05e1b64a81994ad5e26c4", "score": "0.60200065", "text": "def render_json(diff):\n return json.dumps(diff, indent=2)", "title": "" }, { "docid": "e96d86aaf4e698c67f4bb392f21998e8", "score": "0.5987762", "text": "def defaultFormatter(cls,descriptions,values):\n return (\"application/json\",yaml.dump(values))", "title": "" }, { "docid": "ed0a2c6b1272ed1ecd20d56d58ec9b83", "score": "0.59724605", "text": "def json_pretty(obj):\n return dumps(obj, sort_keys=True, indent=4, separators=(',', ': '),\n cls=SetEncoder)", "title": "" }, { "docid": "11bf282842b5403074c7e66e0be2e22d", "score": "0.5933132", "text": "def pretty_json(self):\n return json.dumps(self.__dict__, indent=4)", "title": "" }, { "docid": "7859d7382481f20579a58ebf1587270c", "score": "0.59245706", "text": "def formatter( self ):\n return self._formatter", "title": "" }, { "docid": "5ae4951e1bd0c640617b589f0f19692c", "score": "0.592159", "text": "def _format_output(output):\n\n if isinstance(output, six.string_types):\n return output\n\n try:\n return json.dumps(output)\n except (TypeError, ValueError):\n return str(output)", "title": "" }, { "docid": "b41bca0ccc0a302423cbb178534fa559", "score": "0.5911413", "text": "def json(a):\n kwargs = {}\n if settings.DEBUG:\n kwargs['indent'] = 4\n kwargs['separators'] = (',', ': ')\n json_str = json_dumps(a, cls=FusionboxJSONEncoder, **kwargs)\n\n # Escape all the XML/HTML special characters.\n escapes = ['<', '>', '&']\n for c in escapes:\n json_str = json_str.replace(c, r'\\u%04x' % ord(c))\n\n # now it's safe to use mark_safe\n return mark_safe(json_str)", "title": "" }, { "docid": "c712b0630506f95b9966fd13c0624dbf", "score": "0.58950686", "text": "def json(self, pretty=False):\n\n kwargs = {}\n if pretty:\n kwargs.update({\n 'indent': 4,\n 'sort_keys': True\n })\n return json.dumps(self.dict(), **kwargs)", "title": "" }, { "docid": "a1b4de69c523057291c228c6a6bdfebf", "score": "0.58947676", "text": "def to_json(self,\n indent: Optional[int] = None,\n separators: Optional[Tuple[str, str]] = None) -> str:\n return json.dumps(self.to_dict(), indent=indent, separators=separators)", "title": "" }, { "docid": "0d605a14934c25ccbe8af978556297fd", "score": "0.58806247", "text": "def simplejsonise(self):\n jsoniser = JSONiser()\n jsoniser.dejson(self.jsondictionary())\n result = json.dumps(jsoniser.parameters)\n return result", "title": "" }, { "docid": "226c645df786ff8c4f06ae52751e60e1", "score": "0.5879202", "text": "def readableJson(data):\n return json.dumps(data, indent=4)", "title": "" }, { "docid": "3fe06d25057e3b790d008317a0d8dacb", "score": "0.5877712", "text": "def formatter(format):\n def format_func(self):\n return format % tuple(map(self, lambda x: x.tostring()))\n return format_func", "title": "" }, { "docid": "59a5a525b40599a9f60265f17409762c", "score": "0.58620363", "text": "def return_json(f):\n\n @functools.wraps(f)\n def wrapper(*args, **kw):\n return jsonify(f(*args, **kw))\n\n return wrapper", "title": "" }, { "docid": "420c1dce00312e162f3e33c6df256ea9", "score": "0.58570546", "text": "def json_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj", "title": "" }, { "docid": "464e9e32aba19b17cfac010d6fa719b2", "score": "0.5801854", "text": "def format_issue(self, issue):\n formatted_issue = \"%s\\n\" % json.dumps(issue)\n return formatted_issue", "title": "" }, { "docid": "7f6071bab86a113679751906fd72065f", "score": "0.57816416", "text": "def render_json(self):\n self.print(json.dumps(self.data, indent=2))", "title": "" }, { "docid": "9aeb3347f86e6c3f9021abbeefadc536", "score": "0.5778413", "text": "def formatter(self, value):\r\n return value", "title": "" }, { "docid": "4bc2251d6091c15963429219c533f30d", "score": "0.57765234", "text": "def serialize_format(self):\n return self.get(\"serialize_format\", default=\"json\")", "title": "" }, { "docid": "d68d361ab9cf402fdb6f78246fdfef4d", "score": "0.5773917", "text": "def print_pretty_json(data):\r\n print(json.dumps(data, sort_keys=False, indent=4, separators=(',', ': ')))", "title": "" }, { "docid": "b1c63b618c2a6fd4984e57ca2ffb230f", "score": "0.5767431", "text": "def __json__(self):\n #TODO implement the __json__ function\n pass", "title": "" }, { "docid": "2bcdc531dd3223a4ee4ac13b8cb146b3", "score": "0.57638687", "text": "def pretty(json_string):\n return json.dumps(json.loads(json_string), indent=4)", "title": "" }, { "docid": "e5002fa51b2f82dd417789d408fbc4cc", "score": "0.5750417", "text": "def format():", "title": "" }, { "docid": "0eb461ef0a0ab7cf7f363f1e12d38f38", "score": "0.5748963", "text": "def pretty_printable(iterable_data, colorize=False):\n result = pretty_json(list(iterable_data))\n if colorize and not enabled_colors:\n raise JFError(\"Can't import pygments module\")\n\n if colorize:\n return highlight(result, JsonLexer(), TerminalFormatter())\n\n return result", "title": "" }, { "docid": "d25e7885b8d38ca7ef0e47307146c58d", "score": "0.5748667", "text": "def encoderstatjson():\n\treturn encoderstatus(textOnly = False)", "title": "" }, { "docid": "7755a60f04ce85fea5477ee88cf468e7", "score": "0.5726318", "text": "def render(self, data, *args, **kwargs):\n data = json.loads(json.dumps(data, cls=self.encoder_class))\n return super(JSONRenderer, self).render(data, *args, **kwargs)", "title": "" }, { "docid": "90105d61e8ff1abd39a4b754933a9b3e", "score": "0.57236016", "text": "def float_format(formatstr='.15g'):\n formatter = json.encoder.FLOAT_REPR\n json.encoder.FLOAT_REPR = lambda o: format(o, formatstr)\n yield\n json.encoder.FLOAT_REPR = formatter", "title": "" }, { "docid": "ce27b7e98d7f927ed583991527868ec3", "score": "0.5718316", "text": "def render_json_string(s):\n return json.dumps(s)", "title": "" }, { "docid": "f38c27c3946fd1f119afb4c99e9a24aa", "score": "0.5710317", "text": "def pretty(data):\n\n doc = json.loads(data)\n print(json.dumps(doc, sort_keys=True, indent=2, separators=(',', ': ')))", "title": "" }, { "docid": "967c0edbabe6446efa0e62f56ab7102c", "score": "0.5695587", "text": "def __str__(self):\n return json.dumps(self.render(), indent=2, sort_keys=True)", "title": "" }, { "docid": "e692a33fdb2e54f4c09ee01dec175cfd", "score": "0.5694383", "text": "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "title": "" }, { "docid": "e692a33fdb2e54f4c09ee01dec175cfd", "score": "0.5694383", "text": "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "title": "" }, { "docid": "2dd54703871a6e9d094b863302cebccd", "score": "0.56872606", "text": "def render_to_json(**jsonargs):\n def outer(f):\n @wraps(f)\n def inner_json(request, *args, **kwargs):\n result = f(request, *args, **kwargs)\n r = HttpResponse(mimetype='application/json')\n if result:\n indent = jsonargs.pop('indent', 4)\n r.write(json.dumps(result, indent=indent, **jsonargs))\n else:\n r.write(\"{}\")\n return r\n return inner_json\n return outer", "title": "" }, { "docid": "9a77202bea0919157f1673b22ba90366", "score": "0.5683191", "text": "def __format__(self, fmt):\n return str(self).__format__(fmt)", "title": "" }, { "docid": "82559d62bba2576b14f66c5f5bcf840d", "score": "0.5678727", "text": "def test_result_is_json(self) -> None:\n assert isinstance(self.formatter, SarifFormatter)\n output = self.formatter.format_result(self.matches)\n json.loads(output)\n # https://github.com/ansible/ansible-navigator/issues/1490\n assert \"\\n\" not in output", "title": "" }, { "docid": "2f93477a7975f94cc91a4dc78a9012c7", "score": "0.5672927", "text": "def _json_serializer(obj):\r\n if isinstance(obj, (datetime.datetime, datetime.date)):\r\n return obj.isoformat()\r\n elif isinstance(obj, decimal.Decimal):\r\n return float(obj)\r\n else:\r\n raise TypeError(f\"Type {type(obj)} is not JSON serializable\")", "title": "" }, { "docid": "f2b316f4dd103e00e2be51875650b4fc", "score": "0.5665832", "text": "def __format__(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "1bc23d1122de17e1e29c7bbb2d84d675", "score": "0.5664735", "text": "def __dump_pretty_json(file, data, flags):\n with open(file, flags, encoding=\"utf-8\") as file_fd:\n json.dump(data, file_fd, indent=4)\n file_fd.write(\"\\n\") # Add newline cause Py JSON does not\n file_fd.flush()", "title": "" }, { "docid": "2aed3ae6c05dc4df50e20250428d885a", "score": "0.56592745", "text": "def render_json(serializer_class):\n\n def wrapper(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n result = func(*args, **kwargs)\n serializer = serializer_class(result)\n rendered = JSONRenderer().render(serializer.data)\n return json.loads(rendered.decode())\n\n return wrapped\n\n return wrapper", "title": "" }, { "docid": "ff78ee1897a7ed305549ef81b4f2a5e2", "score": "0.5659156", "text": "def json_str(self) -> str:\n return json.dumps(self.json_dict(), sort_keys=True, indent=4)", "title": "" }, { "docid": "ff78ee1897a7ed305549ef81b4f2a5e2", "score": "0.5659156", "text": "def json_str(self) -> str:\n return json.dumps(self.json_dict(), sort_keys=True, indent=4)", "title": "" }, { "docid": "ff78ee1897a7ed305549ef81b4f2a5e2", "score": "0.5659156", "text": "def json_str(self) -> str:\n return json.dumps(self.json_dict(), sort_keys=True, indent=4)", "title": "" }, { "docid": "1f464fa216ced925a61c327031ae7742", "score": "0.565482", "text": "def format(self, record):\n extras = {}\n if isinstance(record.msg, dict):\n extras = record.msg\n record.message = None\n else:\n record.message = record.getMessage()\n # only format time if needed\n if \"asctime\" in self._required_fields:\n record.asctime = self.formatTime(record, self.datefmt)\n\n try:\n log_record = OrderedDict()\n except NameError:\n log_record = {}\n\n for field in self._required_fields:\n log_record[field] = record.__dict__.get(field)\n log_record.update(extras)\n self.merge_record_extra(record, log_record, reserved=self._skip_fields)\n\n return json.dumps(log_record, default=self.json_default, cls=self.json_encoder)", "title": "" }, { "docid": "ba2a27bfb7b0a0fe8f3b6df99757d2ae", "score": "0.5649993", "text": "def prettify_json(data):\n if isinstance(data, str):\n data = json.loads(data)\n html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>'\n return mark_safe(html)", "title": "" }, { "docid": "08581f67ee15723ea6c62fc8ef1174e0", "score": "0.5648715", "text": "def json_reformat_action(json_doc, processor, key, format_str):\n json_doc[key] = format_str % json_doc", "title": "" }, { "docid": "5adb2fdebcdbe1e62146b5f258e1b9f1", "score": "0.56393117", "text": "def pretty_print_json(json_raw_as_str: str) -> str:\n return json.dumps(json.loads(json_raw_as_str), sort_keys=True, indent=JSON_INDENT)", "title": "" }, { "docid": "2e971f2c7ad3ece44c40f810bef0e105", "score": "0.56392974", "text": "def f_json(data, caller, filename=None):\n\n if filename:\n with open(filename, 'w') as fp:\n json.dump(data, fp)\n else:\n print(json.dumps(data, indent=4))", "title": "" }, { "docid": "3fc170116e60fff4935715a5b5347159", "score": "0.5639179", "text": "def __format__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "e1525f5149d225cde223b128689f9516", "score": "0.5637405", "text": "def json_default(obj):\n return str(obj)", "title": "" }, { "docid": "62da184628d511f0f5be75d654c73491", "score": "0.5633978", "text": "def to_str(self) -> str:\n return json.dumps(self.to_dict(), default=json_format_converter)", "title": "" }, { "docid": "a6bc1b33b522cd489fd0b3d8f8ad49fd", "score": "0.56338143", "text": "def json(self):\n return self.fake_json", "title": "" }, { "docid": "de35894b51a2df48e9a7fe571ef65fcb", "score": "0.56303805", "text": "def format(self, value, group_name=None):\r\n formatter = None\r\n if isinstance(self.formatter, dict):\r\n formatter = self.formatter.get(group_name)\r\n if formatter is None and not group_name is None:\r\n formatter = self.formatter.get(None)\r\n else:\r\n formatter = self.formatter\r\n if isinstance(formatter, types.FunctionType):\r\n return formatter(value)\r\n elif not formatter is None:\r\n return formatter.format(value)\r\n return value", "title": "" }, { "docid": "a40a46561c0cf008bcf9dea47c900b8d", "score": "0.5624232", "text": "def data_prettified(js):\n\n # Convert the data to sorted, indented JSON\n response = json.dumps(js, sort_keys=True, indent=2)\n\n # Truncate the data. Alter as needed\n response = response[:5000]\n\n # Get the Pygments formatter\n formatter = HtmlFormatter(style='colorful')\n\n # Highlight the data\n response = highlight(response, JsonLexer(), formatter)\n\n # Get the stylesheet\n style = \"<style>\" + formatter.get_style_defs() + \"</style><br>\"\n\n # Safe the output\n return mark_safe(style + response)", "title": "" }, { "docid": "4e252f50ea36307cfd5bba8a40f0130f", "score": "0.56012225", "text": "def _format(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "2705d5078809e4909ec16e98728edb19", "score": "0.56011546", "text": "def jsonify(func):\n\n def json_wrapper(self, *args, **kwargs):\n result = func(self, *args, **kwargs)\n response = self.request.RESPONSE\n if 'json' in (response.getHeader('Content-Type') or ''):\n # already converted to json, e.g. on error.\n return result\n\n response.setHeader('Content-Type', 'application/json; charset=utf-8')\n return json.dumps(result, indent=4, encoding='utf-8') + '\\n'\n\n json_wrapper.__doc__ = func.__doc__\n json_wrapper.__name__ = func.__name__\n json_wrapper.action_info = getattr(func, 'action_info', None)\n return json_wrapper", "title": "" }, { "docid": "2e34c5d645cb9891c2e25408a1a096ad", "score": "0.5567962", "text": "def pretty_print_json(json_obj: str):\n import pprint\n\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(json_obj)", "title": "" }, { "docid": "7d8abacdc57e18ce93a2f06d786b628d", "score": "0.55655044", "text": "def dict_formatter(func):\n\n @wraps(func)\n def convert(*args, **kwargs):\n result = func(*args, **kwargs)\n return _do_format(result)\n return convert", "title": "" }, { "docid": "30707fc438c9e879ada201232f998f06", "score": "0.5563601", "text": "def json(self):", "title": "" }, { "docid": "3328f99d321df1fab66ee164382575c4", "score": "0.5553628", "text": "def __str__(self):\n\n\t\treturn json.dumps(self.to_dict(), indent=4, separators=(\",\", \": \"), sort_keys=True)", "title": "" }, { "docid": "3328f99d321df1fab66ee164382575c4", "score": "0.5553628", "text": "def __str__(self):\n\n\t\treturn json.dumps(self.to_dict(), indent=4, separators=(\",\", \": \"), sort_keys=True)", "title": "" }, { "docid": "3328f99d321df1fab66ee164382575c4", "score": "0.5553628", "text": "def __str__(self):\n\n\t\treturn json.dumps(self.to_dict(), indent=4, separators=(\",\", \": \"), sort_keys=True)", "title": "" }, { "docid": "3328f99d321df1fab66ee164382575c4", "score": "0.5553628", "text": "def __str__(self):\n\n\t\treturn json.dumps(self.to_dict(), indent=4, separators=(\",\", \": \"), sort_keys=True)", "title": "" }, { "docid": "3328f99d321df1fab66ee164382575c4", "score": "0.5553628", "text": "def __str__(self):\n\n\t\treturn json.dumps(self.to_dict(), indent=4, separators=(\",\", \": \"), sort_keys=True)", "title": "" }, { "docid": "31f5e8159c3dda46dd4e39f3e6b1786a", "score": "0.5544659", "text": "def __format__(self, format_spec):\n return str(self)", "title": "" }, { "docid": "1594d1640a56883d3fee53a61bb0f0a2", "score": "0.5535325", "text": "def format(self, record):\n data = OrderedDict({\n 'logger': record.name,\n 'level': self.syslog_handler.mapPriority(record.levelname),\n 'time': datetime.utcnow().isoformat(),\n })\n\n message = record.getMessage()\n # Only include the 'msg' key if it has content and is not already a JSON blob.\n if message and not message.startswith(\"{\") and not message.endswith(\"}\"):\n data[\"msg\"] = message\n\n data.update({\n 'hostname': self.hostname,\n })\n\n if global_ctx.user:\n data['user_id'] = global_ctx.user.pk\n if global_ctx.remote_addr:\n data['remote_addr'] = global_ctx.remote_addr\n\n # Include any other custom attributes set on the record.\n data.update({\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n })\n\n if global_ctx.trace_id:\n data['trace_id'] = global_ctx.trace_id\n if global_ctx.amzn_trace:\n data['amzn_trace'] = global_ctx.amzn_trace\n\n # If there is an error, format it for nice output.\n if record.exc_info is not None:\n data[\"error\"] = repr(record.exc_info[1])\n data[\"traceback\"] = safer_format_traceback(*record.exc_info)\n\n return json.dumps(data, cls=SafeJSONEncoder)", "title": "" }, { "docid": "e0db2f48999a3c14f8c69667d04b7fc0", "score": "0.55342555", "text": "def format_result(result, options):\r\n format = options.get('format', '').lower()\r\n if format == 'json':\r\n return simplejson.dumps(result)\r\n else: # js\r\n json = simplejson.dumps(result)\r\n callback = options.get(\"callback\")\r\n if callback:\r\n return \"%s(%s);\" % (callback, json)\r\n else:\r\n return \"var _OLBookInfo = %s;\" % json", "title": "" }, { "docid": "18355de1e0e4ef03028eaaf97d545218", "score": "0.5519279", "text": "def to_json_string(self) -> Any:\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "title": "" }, { "docid": "18355de1e0e4ef03028eaaf97d545218", "score": "0.5519279", "text": "def to_json_string(self) -> Any:\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "title": "" }, { "docid": "7c8a26a57f4729c521dc3c98d85e20da", "score": "0.5517987", "text": "def json_encode(self, value):\r\n return simplejson.dumps(value, separators=(\",\", \":\"))", "title": "" }, { "docid": "5c8d40150cb53fa233afac65c7fcb6c6", "score": "0.55176973", "text": "def json_serializer(input_key=None, input_value=None, **kwargs):\n ############################ Custom Code Goes Below This Line #################################\n import json\n import phantom.rules as phantom\n \n outputs = {}\n outputs['json'] = json.dumps({input_key: input_value})\n \n # Return a JSON-serializable object\n assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable\n return outputs", "title": "" }, { "docid": "a0d9c35e2d06d25bf98ba663b8438558", "score": "0.5511105", "text": "def to_format(self):\n\n json_string = json.dumps(self.students_in_room, indent=2)\n\n logger.info('Data was serialized into JSON format.')\n\n return json_string", "title": "" }, { "docid": "7406ec770b0712808a6539a02526a09c", "score": "0.5508703", "text": "def json_encode(input):\n\treturn json.dumps(input, indent='\\t', sort_keys=True)", "title": "" }, { "docid": "5f206e307155f47ea5012092f7e3b14a", "score": "0.550786", "text": "def prettify(output):\n args = get_args()\n if args.format.lower() == \"xml\":\n parsed = xml.dom.minidom.parseString(output)\n pretty = parsed.toprettyxml()\n\n else:\n parsed = json.loads(output)\n pretty = json.dumps(\n parsed, indent=4, sort_keys=True)\n\n return pretty", "title": "" }, { "docid": "7ba8c98fb2d31be6c66e7f34f577505e", "score": "0.55066365", "text": "def handle_json(value: Union[dict, list]) -> str:\n return json.dumps(value)", "title": "" }, { "docid": "26a289991d48050e4806a2dc89d8789f", "score": "0.55064124", "text": "def __repr__(self):\n return json.dumps(self.as_dict(), indent=2)", "title": "" }, { "docid": "f9c5252e669ba191fd2305051f0f8474", "score": "0.5502083", "text": "def output_format(override=None):\r\n def _output_format(func):\r\n\r\n @wraps(func)\r\n def _format_wrapper(self, *args, **kwargs):\r\n response = func(self, *args, **kwargs)\r\n if self.output_format is 'pandas':\r\n if override is None:\r\n df = pd.DataFrame(response)\r\n return df\r\n else:\r\n import warnings\r\n warnings.warn(\"Pandas output not supported for this \"\r\n \"endpoint. Defaulting to JSON.\")\r\n else:\r\n if self.key is 'share':\r\n return response[self.symbols[0]]\r\n return response\r\n return _format_wrapper\r\n return _output_format", "title": "" }, { "docid": "2f756ac4ab257ff5f478d19fce236fef", "score": "0.5483103", "text": "def pretty_str(self, indent_size=2, trim_size=12,\n root=True):\n\n def small_str(value, size):\n\n if not isinstance(value, str): \n value_str = json.dumps(value)\n else:\n value_str = str(value)\n\n if len(value_str) > (size + len('...')):\n value_str = value_str[:size] + '...'\n \n if isinstance(value, str):\n value_str = '\"' + value_str + '\"'\n return value_str\n \n def indent_str(value_str, size):\n indent = \" \" * size\n return indent + value_str.replace(\"\\n\", \"\\n\" + indent)\n\n if self.is_added_value():\n return \"+++ %s was added\" % small_str(self.json_b, trim_size)\n\n elif self.is_removed_value():\n return \"--- %s was removed\" % small_str(self.json_a, trim_size)\n\n elif self.type_diff:\n return \"*** %s and %s have different types (%s vs %s)\" % \\\n (small_str(self.json_a, trim_size), small_str(self.json_b, trim_size),\n self.type_diff[0].__name__, self.type_diff[1].__name__)\n\n elif self.numeric_type_diff:\n return \"### %s and %s have different numeric types (%s vs %s)\" % \\\n (small_str(self.json_a, trim_size), small_str(self.json_b, trim_size),\n self.numeric_type_diff[0].__name__, self.numeric_type_diff[1].__name__)\n\n elif self.is_numeric_value_diff():\n # TODO: Report close values differently?\n return \"::: %s and %s do not match\" % (self.json_a, self.json_b)\n\n elif self.value_diff:\n return \"::: %s and %s do not match\" % \\\n (small_str(self.json_a, trim_size), small_str(self.json_b, trim_size))\n\n elif self.dict_diff:\n keys = list(self.dict_diff.keys())\n keys.sort()\n pretty = \"\"\n for key in keys:\n \n if key != keys[0]:\n pretty = pretty + \"\\n\"\n\n next_diff = self.dict_diff[key]\n if next_diff.is_similar_value():\n continue\n \n if isinstance(key, int):\n key = \"[%s]\" % key\n elif not root:\n key = \".%s\" % key\n\n pretty = pretty + \"%s:\\n%s\" % (key, \n indent_str(next_diff.pretty_str(indent_size=indent_size,\n trim_size=trim_size,\n root=False),\n indent_size))\n\n return pretty\n\n else:\n return \"(empty)\"", "title": "" }, { "docid": "6ba24415eb80a78d4a9b6ed97de86d6a", "score": "0.54733634", "text": "def format_event(self, event):\n formatted_event = \"%s\\n\" % json.dumps(event)\n return formatted_event", "title": "" } ]
84a2546916e464b4e9efb9ec4517028f
Generate a static plot of a population
[ { "docid": "8d12c199b22d65fbbed1d75d237cf117", "score": "0.683494", "text": "def plot_population(self,\n population_name: str,\n x: str,\n y: str,\n xlim: tuple = None,\n ylim: tuple = None,\n transforms: dict or None = None,\n sample: float or None = None,\n figsize: tuple = (5, 5),\n ctrl_id: str or None = None):\n fig, ax = plt.subplots(figsize=figsize)\n data = self.gating.get_population_df(population_name,\n transform=False,\n ctrl_id=ctrl_id).copy()\n assert data is not None, 'Invalid population'\n\n if transforms is None:\n print('No transforms provided, defaulting to logicle')\n transforms = dict(x='logicle', y='logicle')\n data = transform_axes(data=data, axes_vars={'x': x, 'y': y}, transforms=transforms)\n if sample is not None:\n data = data.sample(frac=sample)\n\n xlim, ylim = plot_axis_lims(x=x, y=y, xlim=xlim, ylim=ylim)\n if data.shape[0] < 1000:\n ax.scatter(x=data[x], y=data[y], s=3)\n ax = self._plot_asthetics(ax, x, y, xlim, ylim, title=population_name)\n else:\n self._2dhist(ax, data, x, y)\n ax = self._plot_asthetics(ax, x, y, xlim, ylim, title=population_name)\n fig.show()", "title": "" } ]
[ { "docid": "31c58aced541f7fab74b9e4cce69790d", "score": "0.65963537", "text": "def plot_population():\n import numpy as np\n import matplotlib.pyplot as plt\n import astropy.units as u\n from astroquery.nasa_exoplanet_archive import NasaExoplanetArchive\n\n\n #First establish the rules that a planet must satisfy in order to be printed / highlighted.\n teq_min = 1000 * u.K\n teq_max = 1300 * u.K\n rad_min = 2 * u.R_earth\n rad_max = 4 * u.R_earth\n gaia_mag_limit = 13\n P_min = 0.0 * u.day\n P_max = 1.0 * u.day\n\n #Read the archive and compute the equilibrium temperature.\n #TO DO: SOME PLANETS FALL OUT BECAUSE THEY DONT HAVE A STELLAR EFFECTIVE TEMPERATURE AND/OR STELLAR RADIUS.\n #HOWEVER THESE CAN BE APPROXIMATED FROM THE SPECTRAL TYPE. FOR EACH MISSING VALUE, I NEED TO LOOK UP WHAT A STAR WITH\n #THAT SPECTRAL TYPE TYPICALLY HAS FOR VALUES OF R_S AND T_EFF, AND REPLACE THOSE.\n table = NasaExoplanetArchive.get_confirmed_planets_table(all_columns=True)#This is an astropy table.\n transiting = table[(table['pl_tranflag'].data).astype(bool)]#Select only the transiting ones, and put them in a new table.\n rp = transiting['pl_radj']#Short-hand for planet radii.\n equilibrium_temperature = (transiting['st_teff'] * np.sqrt(transiting['st_rad'] / 2 / transiting['pl_orbsmax'])).decompose()#Compute T_eq.\n g = transiting['gaia_gmag'].quantity#Short-hand for the Gaia magnitude.\n P = transiting['pl_orbper']\n transiting['teq'] = equilibrium_temperature\n\n #Create boolean arrays for selecting the rows in the table, based on the above rules.\n temp_constraints = (equilibrium_temperature < teq_max) & (equilibrium_temperature > teq_min)\n rad_constraints = (rp < rad_max) & (rp > rad_min)\n gmag_constaints = (g < gaia_mag_limit)\n P_constraints = (P > P_min) & (P < P_max)\n targets = transiting[temp_constraints & rad_constraints & gmag_constaints]#These are the highlighted planets.\n targets = transiting[P_constraints]\n targets.sort('gaia_gmag')\n targets['r_earth'] = targets['pl_radj'].to(u.R_earth)\n targets[['pl_name', 'gaia_gmag', 'teq', 'r_earth', 'pl_orbper','st_rad','st_teff','st_spstr']].pprint(max_lines=1000)\n\n\n\n #Now we move on to plotting the population\n\n #These are rules for the planets that will be plotted as gray background points.\n has_rp = (rp > 0.0)#There needs to be a radius\n has_rs = (transiting['st_rad'] > 0)#...a stellar radius\n has_teff = (transiting['st_teff'] > 0)#... a stellar T_eff\n # is_spt = (transiting['st_spstr'].astype(str) == 'K2 V')#Test for being a particular spectral type. Will be needed to fill in systems with missing effective temperatures.\n systems_to_plot = transiting[has_rp & has_rs & has_teff]#Only transiting planets here.\n\n fig,ax = plt.subplots()\n sc = plt.scatter(systems_to_plot['teq'],systems_to_plot['pl_radj'].to(u.R_earth),c='gray',s=20,alpha=0.5)\n sct=plt.scatter(targets['teq'],targets['pl_radj'].to(u.R_earth),c='orange',s=20,alpha=0.5)\n ax.set_ylabel('Radius ($R_E$)')\n ax.set_xlabel('Equilibrium temperature (K)')\n ax.set_title('Temperature versus Radius')\n ax.set_xlim(ax.get_xlim()[::-1])\n\n\n #And this is all annotation, taken from :\n annot = ax.annotate(\"\", xy=(0,0), xytext=(20,20),textcoords=\"offset points\",bbox=dict(boxstyle=\"round\", fc=\"w\"),arrowprops=dict(arrowstyle=\"->\"))\n annot.set_visible(False)\n names = systems_to_plot['pl_name']\n gmags = systems_to_plot['gaia_gmag']\n Ps = systems_to_plot['pl_orbper']\n def update_annot_new(ind):\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n text=''\n n_in = len(ind[\"ind\"])\n prefix=''#This becomes a newline if the forloop is run through more than once.\n for n in ind[\"ind\"]:\n text+=prefix+names[n]+'\\n'\n text+=' G = %s \\n'%np.round(gmags[n],2)\n text+=' P = %s'%np.round(Ps[n],2)\n prefix='\\n'\n # text = \"{}, {}\".format(\" \".join(list(map(str,ind[\"ind\"]))),\" \".join([names[n] for n in ind[\"ind\"]]))\n annot.set_text(text)\n annot.get_bbox_patch().set_alpha(0.4)\n def newhover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n if cont:\n update_annot_new(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n fig.canvas.mpl_connect(\"motion_notify_event\", newhover)\n plt.show()", "title": "" }, { "docid": "a74b66c958f2392dadaa3f2b3b6f77be", "score": "0.65892756", "text": "def draw_population_fitness(tournament, \n selecting=[], \n filtering = [], \n cmap = 'Greys_r', \n x_size = 40, \n y_size = 20\n ):\n ls = tournament.summed_fitness_history#fitness_history_sum_list(tournament, selecting=selecting, filtering = filtering)\n\n fig, ax = plt.subplots(figsize =(x_size, y_size))\n cmap = plt.get_cmap(cmap)\n\n plt.plot(ls,c='black',linewidth=1)\n plt.xlabel(\"Round Number\", fontsize = 24)\n plt.ylabel(\"Fitness Level\", fontsize = 24)\n plt.tick_params(axis='both',labelsize=14)", "title": "" }, { "docid": "d34244bbaa0ba46103a38c70094afee9", "score": "0.6508685", "text": "def generation_plot(self, generation: int):\n\n # define how the spacing of the chart\n plt.figure(figsize=(20, 10))\n\n # get the current population values about fitness and round them all to the same amount of digits in order\n # to make the displayed numbers consistent\n best_title = round(self.best_fitness, 6)\n worst_title = round(self.worst_fitness, 6)\n avg_title = round(self.population_fitness_avg[-1], 6)\n\n # add a main title on the graph\n plt.suptitle(f\"Generation: {generation}, best fit: {best_title}, worst fit: {worst_title}, \"\n f\"avg. fit: {avg_title}\", fontsize=16)\n\n # create fitness scatter plot. Each member of the population will be a single point in the graph displayed with\n # the hex color code color of it's id.\n plt.subplot(1, 3, 1)\n for member in self.population:\n plt.scatter(member.avg_precision, member.avg_recall, color=member.id)\n\n # define x and y descriptions and axis limits for the scatter plot\n plt.xlabel(\"precision\")\n plt.ylabel(\"recall\")\n plt.ylim(0, 1)\n plt.xlim(0, 1)\n\n # bar plot to display fitness divergence\n plt.subplot(1, 3, 2)\n plt.bar([\"Best Fitness\", \"Worst Fitness\"], [self.best_fitness, self.worst_fitness])\n plt.ylabel(\"fitness\")\n plt.ylim(0, 2)\n\n # define a line chart in order to display how the average fitness of the population has changed compared\n # to all previous generations\n plt.subplot(1, 3, 3)\n plt.plot([x for x in range(1, generation + 1)], self.population_fitness_avg)\n plt.xlabel(\"generation\")\n plt.ylabel(\"avg. population fitness\")\n plt.ylim(0, 2)\n\n # in case the folder \"run\" is not available in the current working directory yet, create it\n if not os.path.isdir(\"run\"):\n os.mkdir(os.path.join(os.getcwd(), \"run\"))\n\n # save the image as png file in the run folder with the current generation id\n plt.savefig(os.path.join(os.getcwd(), f\"run/generation{generation}.png\"))", "title": "" }, { "docid": "db85752a6b82e097e48667ef3e93e919", "score": "0.64135295", "text": "def gen_plots():\n\n # Open and load the stuff from the results file.\n fh = open(\"results.pkl\", \"r\")\n params = pickle.load(fh)\n population = pickle.load(fh)\n fh.close()\n\n # x (generations) axis ticks.\n x = [i * params[\"report_freq\"] \\\n for i in range(0, params[\"generations\"] / \\\n params[\"report_freq\"] + 1)]\n\n # Calculate number of susceptible, infected, recovered invididuals, \n # and mean disease-induced death rate per generation.\n y1 = [] # number of susceptible individuals\n y2 = [] # number of infected individuals\n y3 = [] # number of recovered individuals\n y4 = [] # mean disease-induced death rate\n for i in range(0, len(x)): \n freq_s = 0\n freq_i = 0\n freq_r = 0\n freq_v = 0\n c_sum = 0.0\n c_mean = 0.0\n for p in population:\n if p.get_state_list()[i] == Player.INFECTED:\n freq_i += 1\n c_sum += p.get_c_list()[i]\n elif p.get_state_list()[i] == Player.RECOVERED:\n freq_r += 1\n elif p.get_state_list()[i] == Player.VACCINATED:\n freq_s += 1\n else:\n freq_v += 1\n if freq_i != 0:\n c_mean = c_sum / freq_i\n y1.append(freq_s / (1.0 * params[\"population\"]))\n y2.append(freq_i / (1.0 * params[\"population\"]))\n y3.append(freq_r / (1.0 * params[\"population\"]))\n y4.append(c_mean)\n\n # Calculate mean virulence over the last 10% of the generations.\n last = int(params[\"generations\"] * 0.1)\n i = (params[\"generations\"] / params[\"report_freq\"]) - \\\n (last / params[\"report_freq\"])\n l = y4[i:]\n final_c_mean = sum(l) / len(l) \n\n # Plot 1 (number of susceptible, infected, and recovered individuals \n # against generation).\n pylab.figure(1, figsize = (7, 4.5), dpi = 500)\n pylab.xlabel(r\"$t$\")\n pylab.ylabel(r\"$s, x, r$\")\n pylab.plot(x, y1, \"#000000\", alpha = 0.6, linewidth = 2.0)\n pylab.plot(x, y2, \"#FF0000\", alpha = 0.6, linewidth = 2.0)\n pylab.plot(x, y3, \"#0000FF\", alpha = 0.6, linewidth = 2.0)\n pylab.legend((r\"$s$\", r\"$x$\", r\"$r$\"), 'best', shadow = False)\n pylab.ylim(0, 1.0)\n ax = pylab.gca()\n ax.xaxis.major.formatter.set_powerlimits((0,0))\n pylab.savefig(\"plot1.pdf\", format = \"pdf\")\n pylab.close(1)\n\n # Plot 2 (Mean disease-induced death rate versus generation).\n pylab.figure(2, figsize = (7, 4.5), dpi = 500)\n pylab.xlabel(r\"$t$\")\n pylab.ylabel(r\"$\\bar{c}$\")\n pylab.plot(x, y4, \"#000000\", alpha = 0.6, linewidth = 2.0)\n pylab.figtext(0.82, 0.85, r\"$\\bar{c}_\\infty = %4.3f$\" %(final_c_mean), \n ha = 'center', va = 'center', bbox = dict(facecolor = 'white', edgecolor = 'black'))\n pylab.ylim(0, 1.0)\n ax = pylab.gca()\n ax.xaxis.major.formatter.set_powerlimits((0,0))\n pylab.savefig(\"plot2.pdf\", format = \"pdf\")\n pylab.close(2)", "title": "" }, { "docid": "aff1943f845d36b8633f372e7c79e2c7", "score": "0.636803", "text": "def create_plots():\n # graphics imports are needed only for the plot function\n # this way the file can be imported only for function \n # calculation with only basic python\n import panel as pn\n import holoviews as hv\n from bokeh.resources import INLINE\n hv.extension('bokeh')\n pn.extension(safe_embed=True)\n age_delta = 10\n ages = [i for i in range(0, 100, age_delta)]\n times = [i for i in range(60)]\n plot_dict = {}\n x_label = 'Time since infection in days'\n y_label = 'Mortality probability'\n plot_list = []\n for age in ages:\n probabilities = [mortality_prob(age, time) for time in times]\n data = {x_label: times, y_label: probabilities} \n marker_plot = hv.Scatter(data, kdims=[x_label], vdims=[y_label]).opts(title = 'Mortality for ages ' + str(age) + '-' + str(age + age_delta), height=200, width=600,\n marker= 'o', size = 7, color='green', tools=['hover'])\n curve_plot = hv.Curve(data, kdims=[x_label], vdims=[y_label]).opts(color='red')\n single_plot = marker_plot * curve_plot\n plot_list.append(single_plot)\n plot_dict[age] = single_plot\n plot_list_obj = hv.Layout(plot_list).cols(1)\n panel_object = pn.pane.HoloViews(plot_list_obj)\n panel_object.save('COVID19_Mortality_Castiglione_Grid', embed=True, resources=INLINE) \n hmap = hv.HoloMap(plot_dict, kdims=['age']).opts(height=600, width=800, \n title = 'Daily Probability of COVID-19 Mortality by Age and Time Since Infection')\n panel_object = pn.pane.HoloViews(hmap)\n panel_object.save('COVID19_Mortality_Castiglione', embed=True, resources=INLINE)", "title": "" }, { "docid": "8f0b36764807bf456df21fc50fbb60f2", "score": "0.636563", "text": "def population_density(self):\n print(\"Population Density\")\n all = []\n for r in self.map:\n row = []\n for box in r:\n row.append(len(box))\n all.append(row)\n return all\n # ax = sns.heatmap(np.array(all), annot=True, fmt=\"d\", cmap=\"YlGnBu\", linewidths=.5, xticklabels=False,\n # yticklabels=False)\n # plt.savefig(\"output.png\")", "title": "" }, { "docid": "b15382ff7e5413c58f939ddcb600f9cc", "score": "0.6347675", "text": "def plot(self):", "title": "" }, { "docid": "b15382ff7e5413c58f939ddcb600f9cc", "score": "0.6347675", "text": "def plot(self):", "title": "" }, { "docid": "2328523cd87af1a9eddde4733f516407", "score": "0.6266583", "text": "def genplot():\n data = genfromtxt('data.csv', delimiter=',')\n # generate data from data.csv\n\n plt.xlabel('Year')\n plt.ylabel('Trait value per trait per capita')\n plt.title('Generational trait progression')\n # set labels and title\n\n plt.plot(data)\n plt.savefig('data.png')\n plt.show()\n # plot data, save to data.png and show plot", "title": "" }, { "docid": "88558e25a0e2a54bbf0b421cec437fcd", "score": "0.6233897", "text": "def _create_plot(self):", "title": "" }, { "docid": "72fc57c00da16215860ac4b50ec562f6", "score": "0.6178959", "text": "def gen_figure(states: List[str]) -> go:\n fig = go.Figure()\n for state in states:\n fig.add_trace(\n go.Scatter(\n x=norm_df.index,\n y=norm_df.loc[:, state],\n mode=\"lines+markers\",\n name=state,\n )\n )\n fig.update_layout(\n title=\"COVID-19 Testing Conducted per Capita\",\n xaxis_title=\"Date\",\n yaxis_title=\"Number of Tests per Million People\",\n )\n return fig", "title": "" }, { "docid": "a06c924af42e79b84480cc4ef9d8fcda", "score": "0.61565924", "text": "def display(self):\n atoms = {a.split('_')[1].title(): a for a in self.atoms}\n quants = ['Populations', 'LTE Populations', 'Departure coefficients']\n #nlevel = getattr(self.rhobj, self.atoms[0]).nlevel\n nx, ny, nz = self.rhobj.atmos.temperature.shape\n if nx == 1:\n x_slider = fixed(0)\n else:\n x_slider = (0, nx - 1)\n if ny == 1:\n y_slider = fixed(0)\n else:\n y_slider = (0, ny - 1)\n\n def _pop_plot(atom):\n \"\"\"Starts population plot\"\"\"\n pop = getattr(self.rhobj, atom).populations\n height = self.rhobj.atmos.height_scale[0, 0] / 1e6 # in Mm\n _, ax = plt.subplots()\n pop_plot, = ax.plot(height, pop[0, 0, 0])\n ax.set_xlabel(\"Height (Mm)\")\n ax.set_ylabel(\"Populations\")\n ax.set_title(\"Level 1\")\n return ax, pop_plot\n\n ax, p_plot = _pop_plot(self.atoms[0])\n\n @interact(atom=atoms, quantity=quants, y_log=False,\n x=x_slider, y=y_slider)\n def _pop_update(atom, quantity, y_log=False, x=0, y=0):\n nlevel = getattr(self.rhobj, atom).nlevel\n\n # Atomic level singled out because nlevel depends on the atom\n @interact(level=(1, nlevel))\n def _pop_update_level(level=1):\n n = getattr(self.rhobj, atom).populations[level - 1, x, y]\n nstar = getattr(\n self.rhobj, atom).populations_LTE[level - 1, x, y]\n if quantity == 'Departure coefficients':\n tmp = n / nstar\n ax.set_ylabel(quantity + ' (n / n*)')\n elif quantity == 'Populations':\n tmp = n\n ax.set_ylabel(quantity + ' (m$^{-3}$)')\n elif quantity == 'LTE Populations':\n tmp = nstar\n ax.set_ylabel(quantity + ' (m$^{-3}$)')\n p_plot.set_ydata(tmp)\n ax.relim()\n ax.autoscale_view(True, True, True)\n ax.set_title(\"Level %i, x=%i, y=%i\" % (level, x, y))\n if y_log:\n ax.set_yscale(\"log\")\n else:\n ax.set_yscale(\"linear\")", "title": "" }, { "docid": "a44adc2b8fc3f03111bfbbc7c3c73c44", "score": "0.61397374", "text": "def visualize(self):", "title": "" }, { "docid": "e60d7b00c2b9d8eb668f5a32bb8c7651", "score": "0.6118826", "text": "def plot(self):\n pass # pragma: no cover", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.6109614", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.6109614", "text": "def plot(self):\n pass", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.6109614", "text": "def plot(self):\n pass", "title": "" }, { "docid": "453801ebcf57cc4d58f04fdf8a502a83", "score": "0.60901767", "text": "def main(datfile):\n X = loadData(datfile, pre_process=True, plot=False)\n proj_b = proj_pursuit(X, n_iter=100, learning_rate=0.01, tol=1e-8)\n \n fig = plt.figure()\n fig.clf()\n \n plt.plot(X[:,0], X[:,1], 'rx', mfc='white', mec='r', ms=7, mew=1) # plot samples\n \n # plot projection pursuit direction\n X_mean = X.mean(axis=0) \n plt.plot( [proj_b[0] * i + X_mean[0] for i in np.linspace(-7,7)], \n [proj_b[1] * i + X_mean[1] for i in np.linspace(-7,7)], 'b', lw=4)\n \n figname = 'proj_pursuit' + datfile.split('.')[0] + '.png'\n plt.axis('equal')\n plt.axis('off')\n plt.savefig('fig1/' + figname)\n plt.show()", "title": "" }, { "docid": "8a9bfbbcc164eed4042471a2d9f0e285", "score": "0.6056497", "text": "def prepare_plots(self):\n\n sns.set_style('darkgrid')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_xlabel('Initial population size (N)')\n ax.set_ylabel('Variation of adult counts over 10 years')\n\n a = np.arange(1, len(self.initial_pops)+1, 1)\n b, c = [], []\n\n for pop in self.initial_pops:\n result = self.result_dict[pop]\n\n b.append(result['mean_stdev'])\n c.append(result['ci'])\n\n ax.errorbar(a,b,yerr=c)\n ax.scatter(a,b,s=40)\n ax.plot(a,b)\n plt.xticks(a,self.initial_pops)\n\n plt.show()\n plt.savefig('results/results.png', bbox_inches='tight')", "title": "" }, { "docid": "dd20cc79883295352d1c2461a27f7bb3", "score": "0.6033707", "text": "def test_plot():\n my_prop = htm.ArrheniusProperty(\n 1 * htm.ureg.dimensionless, 0.2 * htm.ureg.eV * htm.ureg.particle**-1\n )\n\n htm.plotting.plot(my_prop, alpha=0.2)\n plt.clf()", "title": "" }, { "docid": "953473a1075a8f780a370a34b7bfa15d", "score": "0.6011481", "text": "def show(self, individuals=False):\n self.plot(individuals=individuals)\n plt.colorbar()\n plt.show()", "title": "" }, { "docid": "b6e36fb8b9cb24464199d6a2bec363fc", "score": "0.5996052", "text": "def init_figure():\n return plot_continent_data(data, keyword=\"New\"), plot_top_k_countries(10, \"TotalCases\"),\\\n plot_boxplots(data), plot_pie_chart(5, \"ActiveCases\"), geo_plot(data, \"TotalCases\")", "title": "" }, { "docid": "fe3a293f80541f4366536ae65c606adc", "score": "0.593686", "text": "def show_values(self, title='Grid World', fig_path=None, fig_name=None, save_fig=False):\n \n cmap = mcolors.LinearSegmentedColormap.from_list('cmap', ['red', 'black', 'limegreen'])\n rc('axes', linewidth=4)\n\n fig, ax = plt.subplots(facecolor='black', edgecolor='white', linewidth=4) \n\n grid = ax.pcolor(self.values, edgecolors='white', linewidths=4, cmap=cmap, \n vmin=self.values.min(), vmax=self.values.max())\n\n warnings.simplefilter('ignore', MatplotlibDeprecationWarning)\n ax = grid.get_axes()\n\n\n orient_dict = {0:0, 1:np.pi/2., 2:np.pi, 3:3*np.pi/2., \n 4:7*np.pi/4., 5:np.pi/4., 6:3*np.pi/4., 7:5*np.pi/4.}\n\n dist = 0.42\n arrow_loc = {0:(0, dist), 1:(-dist, 0), 2:(0, -dist),\n 3:(dist, 0), 4:(dist, dist), 5:(-dist, dist),\n 6:(-dist, -dist), 7:(dist, -dist)}\n\n count = 0\n\n for p, value, choice in izip(grid.get_paths(), grid.get_array(), self.policy):\n x, y = p.vertices[:-2, :].mean(0)\n\n ax.text(x, y, \"%.2f\" % value, ha=\"center\", va=\"center\", color='white', \n fontweight='bold', fontsize='24')\n\n if count in self.terminal_states:\n pass\n else: \n orient = orient_dict[choice]\n direct = arrow_loc[choice]\n \n ax.add_patch(patches.RegularPolygon((x + direct[0], y + direct[1]), \n 3, .05, color='white', orientation=orient))\n \n count += 1\n\n for spine in ax.spines.values():\n spine.set_edgecolor('white')\n \n x_axis_size = self.values.shape[1]\n y_axis_size = self.values.shape[0]\n\n xlabels = [str(val) for val in range(0, x_axis_size)]\n ylabels = [str(val) for val in range(y_axis_size-1, -1, -1)]\n\n ax.set_xticks(np.arange(0.5, len(xlabels)))\n ax.set_yticks(np.arange(0.5, len(ylabels)))\n\n ax.set_xticklabels(xlabels) \n ax.set_yticklabels(ylabels) \n\n ax.tick_params(axis='x', colors='white')\n ax.tick_params(axis='y', colors='white')\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(24)\n tick.label1.set_fontweight('bold')\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(24)\n tick.label1.set_fontweight('bold')\n \n plt.title(title, color='white', fontsize='24', fontweight='bold')\n \n fig.set_size_inches((self.values.shape[1]*4, self.values.shape[0]*4))\n\n if save_fig:\n if fig_path is None:\n fig_path = os.getcwd() + '/../figs'\n\n if fig_name is None:\n title = title.translate(None, string.punctuation)\n fig_name = '_'.join(title.split()) + '.png'\n\n plt.savefig(os.path.join(fig_path, fig_name), facecolor='black')\n\n plt.show()", "title": "" }, { "docid": "0d57b278e7e1c7ffa6df334353f43a6a", "score": "0.58884776", "text": "def create_plot(df):\n states = alt.topo_feature(data.us_10m.url, 'states')\n\n states = alt.Chart(states, title='Number of covid-19 cases by state on 06/04/2021').mark_geoshape(\n stroke='black',\n ).encode(\n color='cases:Q',\n tooltip=['state:N', 'cases:Q'],\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(df, 'fips', ['state', 'cases'])\n ).properties(\n width=700,\n height=500\n ).project(\n type='albersUsa'\n ).configure_title(\n fontSize=24\n ).configure_legend(\n gradientLength=400,\n titleFontSize=18,\n labelFontSize=15\n )\n\n if inpt == 0:\n states.show()\n elif inpt == 1:\n states.save('images/plot6.html')\n states.save(\"images/plot6.png\")\n print('Image saved in: ./images/plot6.png')\n else:\n raise Exception('incorrect command!')", "title": "" }, { "docid": "dcd72e9dd55699643b33454ba623cebf", "score": "0.5886619", "text": "def plot(self, *args, **kwargs):\n # Create dummy universe but preserve used_ids\n u = openmc.Universe(cells=[self], universe_id=openmc.Universe.next_id + 1)\n openmc.Universe.used_ids.remove(u.id)\n return u.plot(*args, **kwargs)", "title": "" }, { "docid": "62b45d807570614e04215c94656f4bcf", "score": "0.5883026", "text": "def main2(datfile): \n X = loadData('data/'+datfile, pre_process=True, plot=False)\n \n G = [lambda s: s**4, lambda s: np.log(np.cosh(s)), lambda s: -np.exp(-(s**2)/2.)]\n g = [lambda s: 4 * (s**3), lambda s: np.tanh(s), lambda s: s*np.exp(-(s**2)/2)]\n g_der = [lambda s: 12 * (s**2), lambda s: 1 - (np.tanh(s))**2, lambda s: (1-s**2)*np.exp(-(s**2)/2)]\n \n func_name = ('s^4', 'log_cosh', 'expo')\n choice = 2\n proj_b = proj_pursuit_2(X, G[choice], g[choice], g_der[choice], n_iter=1000, tol=1e-8)\n \n fig = plt.figure()\n fig.clf()\n plt.plot(X[:,0], X[:,1], 'rx', mfc='white', mec='r', ms=7, mew=1) # plot samples\n \n # plot projection pursuit direction\n X_mean = X.mean(axis=0) \n plt.plot( [proj_b[0] * i + X_mean[0] for i in np.linspace(-5,5)], \n [proj_b[1] * i + X_mean[1] for i in np.linspace(-5,5)], 'b', lw=4)\n \n figname = 'general_proj_pursuit_with_%s' % func_name[choice] + datfile.split('.')[0] + '.png'\n plt.axis('equal')\n plt.axis('off')\n plt.savefig('fig2/' + figname)\n plt.show()", "title": "" }, { "docid": "920fc3eb5cf85c815c8d7793cb056599", "score": "0.5880443", "text": "def plot(n=100, pop=100, ar=[20], numcuts=100, md=0, n_pl=1, top20=0):\n hitrates = []\n ideal = []\n maxavgs = []\n cuts = gen_cuts(numcuts)\n if md == 1:\n ar = [choose_planet() for star in range(pop)]\n # ar ^^^ will be a list of lists of a/R*s (len(ar) == pop)\n for x in range(n):\n generate_pop(n=pop, ar=ar, top20=top20, md=md, n_pl=n_pl)\n\n # Read data from text document and then evaluate\n data = []\n sini_list = []\n siniu_list = []\n with open('gen_pop.txt') as file:\n for line in file:\n data.append(line.strip('\\n'))\n transits = ast.literal_eval(data[-1])\n data = data[1:-1]\n for i in range(len(data)):\n elem = data[i].split(' ')\n sini = float(elem[0])\n siniu = float(elem[1].strip())\n sini_list += [sini]\n siniu_list += [siniu]\n\n evals = eval_cut2(cuts, sini_list, siniu_list, transits, top20=top20)\n hitrates += [evals[0]]\n avg_hr = [sum(e)/(x+1) for e in zip(*hitrates)]\n index = avg_hr.index(max(avg_hr))\n ideal += [cuts[index]]\n avg_ideal = np.mean(ideal)\n\n maxtr = evals[2]\n maxavgs += [maxtr[0]]\n avgid = np.mean(maxavgs) # average num of transits at ideal cut\n high = 100 * max(avg_hr)\n # Average of transits spotted : stars observed at ideal sini cut-off\n obs = avgid/max(avg_hr) # number of stars observed at ideal sini cut\n frac = 'Highest average HR = {:3.1f}% = {:3.1f}/{:3.1f}'.format(high,\n avgid,\n obs)\n\n plt.figure()\n plt.plot(cuts, avg_hr)\n if md == 1:\n s_type = 'M-Dwarfs'\n title = 'Hit Rate v. sin(i) Cut-Off for a population of {0} {1}\\n\\\nAverage of {2} trials'.format(pop, s_type, x+1)\n else:\n s_type = 'Ultracool Dwarfs'\n title = 'Hit Rate v. sin(i) Cut-Off for a population of {0} {1}\\n\\\n{2} planet(s) per star, a/R* = {3}, Average of {4} trials'.format(pop, s_type,\n n_pl, ar,\n x+1)\n plt.title(title)\n plt.xlabel('sini cut-off')\n plt.xticks(np.arange(0, 1.1, 0.1))\n plt.ylabel('Hit Rate\\n [# transits seen : # stars observed]')\n if max(avg_hr) < 0.40:\n plt.ylim(0, 0.40)\n else:\n plt.ylim(0, (max(avg_hr)+0.1))\n label = 'ideal sini cut-off: {:1.5f} \\n'.format(avg_ideal)\n label += frac\n ax = plt.gca()\n plt.text(.5, .9,\n label,\n horizontalalignment='center',\n transform=ax.transAxes)\n\n cwd = os.getcwd()\n if md == 0:\n path = cwd + '\\\\sini_curves'\n if md == 1:\n path = cwd + '\\\\mdwarf_curves'\n name = 'avg' + str(x+1) + '.png'\n plt.savefig(os.path.join(path, name))\n plt.close()\n status = str(x+1) + ' plots complete.'\n print(status)\n\n return", "title": "" }, { "docid": "5c5d655fb0abd951fb6d7be44d7f9ec9", "score": "0.5866291", "text": "def plot(setsOfCodes):\n plt.figure(figsize=(16,8))\n plt.scatter(range(len(setsOfCodes)), [x.fitness for x in setsOfCodes])", "title": "" }, { "docid": "eaf591f68de64e330b6ab40091f5519e", "score": "0.584986", "text": "def plot_relaxation_results():", "title": "" }, { "docid": "28915c7bff3b07a10dbccc8fbe85e467", "score": "0.584632", "text": "def demographics(exp_df):\n pyplot.figure(figsize=(12, 12))\n\n # Bin and plot ages\n ax = pyplot.subplot(2, 2, 1)\n ax.set_title(\"Ages\", family=font_family, size=title_size)\n ages = exp_df[\"age\"]\n age_bins = [0, 0, 0, 0, 0]\n age_bins[0] = len(ages[ages <= 20])\n age_bins[1] = len(ages[(20 < ages) & (ages < 25)])\n age_bins[2] = len(ages[(25 <= ages) & (ages <= 30)])\n age_bins[3] = len(ages[(30 < ages) & (ages <= 35)])\n age_bins[4] = len(ages[35 < ages])\n\n ax.pie(age_bins, labels=[\"18-20\", \"20-24\", \"25-30\", \"31-35\", \"> 35\"],\n autopct=\"%1.1f%%\", shadow=False, colors=colors)\n\n shade_axis(ax, size=text_size)\n\n # Bin and plot Python experience\n ax = pyplot.subplot(2, 2, 2)\n ax.set_title(\"Years of\\nPython Experience\", family=font_family, size=title_size)\n py = exp_df[\"py_years\"]\n py_bins = [0, 0, 0, 0, 0]\n py_bins[0] = len(py[py < .5])\n py_bins[1] = len(py[(.5 <= py) & (py <= 1)])\n py_bins[2] = len(py[(1 < py) & (py <= 2)])\n py_bins[3] = len(py[(2 < py) & (py <= 5)])\n py_bins[4] = len(py[5 < py])\n\n ax.pie(py_bins, labels=[\"< 1/2\", \"1/2-1\", \"1-2\", \"2-5\", \"> 5\"],\n autopct=\"%1.1f%%\", shadow=False, colors=colors)\n\n shade_axis(ax, size=text_size)\n\n # Bin and plot programming experience\n ax = pyplot.subplot(2, 2, 3)\n ax.set_title(\"Years of\\nProgramming Experience\", family=font_family, size=title_size)\n prog = exp_df[\"prog_years\"]\n prog_bins = [0, 0, 0, 0, 0]\n prog_bins[0] = len(prog[prog < 2])\n prog_bins[1] = len(prog[(2 <= prog) & (prog <= 3)])\n prog_bins[2] = len(prog[(3 < prog) & (prog <= 5)])\n prog_bins[3] = len(prog[(5 < prog) & (prog <= 10)])\n prog_bins[4] = len(prog[10 < prog])\n\n ax.pie(prog_bins, labels=[\"< 2\", \"2-3\", \"3-5\", \"5-10\", \"> 10\"],\n autopct=\"%1.1f%%\", shadow=False, colors=colors)\n\n shade_axis(ax, size=text_size)\n\n # Bin and plot education\n ax = pyplot.subplot(2, 2, 4)\n ax.set_title(\"Highest Degree\\nReceived\", family=font_family, size=title_size)\n degrees = exp_df[\"degree\"].value_counts()\n\n ax.pie(degrees.values, labels=[x.capitalize() for x in degrees.keys()],\n autopct=\"%1.1f%%\", shadow=False, colors=colors)\n\n shade_axis(ax, size=text_size)\n\n pyplot.tight_layout()\n pyplot.savefig(\"plots/demographics.png\")", "title": "" }, { "docid": "d91c06fc76db8be195409820de41f061", "score": "0.58265996", "text": "def example02():\n\n latlim = [-60, 60]\n latstp = 2.\n iri2016Obj = IRI2016Profile(alt=600, hour=17., latlim=latlim, latstp=latstp, \\\n lon=-76.77, option=2, verbose=False, year=2004)\n\n latbins = arange(latlim[0], latlim[1] + latstp, latstp)\n\n nlat = len(latbins)\n index = range(nlat)\n\n fig = figure(figsize=(8,12))\n\n pn = fig.add_subplot(211) \n NmF2 = iri2016Obj.b[0, index]\n NmF1 = IRI2016()._RmNeg(iri2016Obj.b[2, index])\n NmE = iri2016Obj.b[4, index] \n pn.plot(latbins, NmF2, label='N$_m$F$_2$')\n pn.plot(latbins, NmF1, label='N$_m$F$_1$')\n pn.plot(latbins, NmE, label='N$_m$E') \n pn.set_title(iri2016Obj.title1)\n pn.set_xlim(latbins[[0, -1]])\n pn.set_xlabel('Geog. Lat. ($^\\circ$)')\n pn.set_ylabel('(m$^{-3}$)') \n pn.set_yscale('log')\n legend(loc='best')\n\n pn = fig.add_subplot(212)\n hmF2 = iri2016Obj.b[1, index]\n hmF1 = IRI2016()._RmNeg(iri2016Obj.b[3, index])\n hmE = iri2016Obj.b[5, index] \n pn.plot(latbins, hmF2, label='h$_m$F$_2$')\n pn.plot(latbins, hmF1, label='h$_m$F$_1$')\n pn.plot(latbins, hmE, label='h$_m$E') \n pn.set_xlim(latbins[[0, -1]])\n pn.set_title(iri2016Obj.title2)\n pn.set_xlabel('Geog. Lat. ($^\\circ$)')\n pn.set_ylabel('(km)')\n legend(loc='best')", "title": "" }, { "docid": "350bea304004401e7f6db7c6413e795d", "score": "0.5824427", "text": "def test_set_plot_limits():\n BioSim(island_map='W', ini_pop=[], seed=1, ymax_animals=20,\n cmax_animals={'Herbivore': 10, 'Carnivore': 20})", "title": "" }, { "docid": "ea75eea3e68577fb41f3c913124517ab", "score": "0.5821803", "text": "def render(self, path: str) -> None:\n print(\"Rendering to \" + path)\n self.load_config()\n self.population_util = PopulationUtil(k = self.k, r = self.r, min_age = self.min_age, max_age = self.max_age, size = self.size,\n mortality_rate = self.mortality_rate, infection_range = self.infection_range, recovery_time = self.recovery_time,\n total_healthcare_capacity = self.total_healthcare_capacity, social_distance_per = self.social_distance_per,\n mask_effectiveness = self.mask_effectiveness, speed=self.speed, social_distancing_at = self.enforce_social_distance_at,\n mask_wearing_at = self.enforce_mask_wearing_at)\n Visualization(self.population_util, render_mode=True, render_path=path)", "title": "" }, { "docid": "a08cce3b8a0ebee63d2238a5076fa706", "score": "0.58207154", "text": "def _build_geom_plot(self, data: pd.DataFrame, x: str, y: str or None, geoms: dict, ax: matplotlib.pyplot.axes,\n xlim: tuple or None, ylim: tuple or None, title: str) -> matplotlib.pyplot.axes or None:\n\n if any([x['shape'] == 'sml' for _, x in geoms.items()]):\n print(f'Error: {title} is a supervised machine learning gate. This type of gating does not produce'\n f'2D geometries but instead classifies cells based using high dimensional feature space. To observe'\n f'a population classified by this method in 2D, use the `plot_sml` method')\n return None\n\n if data.shape[0] < 1000:\n ax.scatter(x=data[x], y=data[y], s=3)\n ax = self._plot_asthetics(ax, x, y, xlim, ylim, title)\n else:\n ax = self._2dhist(ax, data, x, y)\n ax = self._plot_asthetics(ax, x, y, xlim, ylim, title)\n\n # Draw geom\n for (child_name, geom), cc in zip(geoms.items(), self.colours):\n colour = '#EB1313'\n if geom is None or geom == dict():\n print(f'Population {child_name} has no associated gate, skipping...')\n continue\n if geom['shape'] == 'threshold':\n ax.axvline(geom['threshold'], c=colour, lw=2.5)\n if geom['shape'] == '2d_threshold':\n ax.axvline(geom['threshold_x'], c=colour, lw=2.5)\n ax.axhline(geom['threshold_y'], c=colour, lw=2.5)\n if geom['shape'] == 'ellipse':\n ellipse = patches.Ellipse(xy=geom['centroid'], width=geom['width'], height=geom['height'],\n angle=geom['angle'], fill=False, edgecolor=colour, lw=2.5)\n ax.add_patch(ellipse)\n if geom['shape'] == 'rect':\n rect = patches.Rectangle(xy=(geom['x_min'], geom['y_min']),\n width=((geom['x_max']) - (geom['x_min'])),\n height=(geom['y_max'] - geom['y_min']),\n fill=False, edgecolor=colour, lw=2.5)\n ax.add_patch(rect)\n if geom['shape'] == 'poly':\n x = geom['cords']['x']\n y = geom['cords']['y']\n ax.plot(x, y, '-k', c=colour, label=child_name, lw=2.5)", "title": "" }, { "docid": "4133f6cc9fb42a2579c36bc13bb984c4", "score": "0.5815976", "text": "def gripper_visualization(self):\n raise NotImplementedError", "title": "" }, { "docid": "a64a27742df654032e5bb3658568fbb6", "score": "0.5813933", "text": "def demo4(rng=np.random):\n\n prior = Prior()\n model = Model()\n\n vs = model.sim(prior.gen(30), rng=rng)\n ts = h.dt * np.arange(vs.shape[1])\n\n fig, axs = plt.subplots(6, 5)\n\n for v, ax in zip(vs, axs.flatten()):\n ax.plot(ts, v)\n\n plt.show()", "title": "" }, { "docid": "b1897569484adaef4bc8fd31e970b966", "score": "0.5803575", "text": "def draw_popularity_distribution(self):\n if self.df_pop is None:\n raise ValueError(\"Should first get df_pop\")\n\n fig, axes = plt.subplots(2, 2)\n\n axes[0, 0].set_yscale('log')\n self.df_pop[\"popularity\"].hist(bins=200, ax=axes[0, 0])\n axes[0, 0].set_title(\"Station Popularity Distribution\")\n axes[0, 0].set_xlabel(\"Popularity\")\n axes[0, 0].set_ylabel(\"Number of stations\")\n\n axes[0, 1].set_yscale('log')\n self.df_pop[\"bike_out\"].hist(bins=200, ax=axes[0, 1])\n axes[0, 1].set_title(\"Bike Out Distribution\")\n axes[0, 1].set_xlabel(\"Number of bikes out\")\n axes[0, 1].set_ylabel(\"Number of stations\")\n\n axes[1, 0].set_yscale('log')\n self.df_pop[\"bike_in\"].hist(bins=200, ax=axes[1, 0])\n axes[1, 0].set_title(\"Bike In Distribution\")\n axes[1, 0].set_xlabel(\"Number of bikes in\")\n axes[1, 0].set_ylabel(\"Number of stations\")\n\n axes[1, 1].set_yscale('log')\n self.df_pop[\"abs_pop\"].hist(bins=200, ax=axes[1, 1])\n axes[1, 1].set_title(\"Absolute Popularity Distribution\")\n axes[1, 1].set_xlabel(\"Number of bikes in and out\")\n axes[1, 1].set_ylabel(\"Number of stations\")", "title": "" }, { "docid": "b91d92916970815f310e968314ba9847", "score": "0.5802284", "text": "def test_render_xy_plot():\r\n gdpinfo = {\r\n \"gdpfile\": \"isp_gdp.csv\",\r\n \"separator\": \",\",\r\n \"quote\": '\"',\r\n \"min_year\": 1960,\r\n \"max_year\": 2015,\r\n \"country_name\": \"Country Name\",\r\n \"country_code\": \"Country Code\"\r\n }\r\n\r\n render_xy_plot(gdpinfo, [], \"isp_gdp_xy_none.svg\")\r\n render_xy_plot(gdpinfo, [\"China\"], \"isp_gdp_xy_china.svg\")\r\n render_xy_plot(gdpinfo, [\"United Kingdom\", \"United States\"],\r\n \"isp_gdp_xy_uk+usa.svg\")", "title": "" }, { "docid": "287fb4e4bd77d153b277e05146723d9c", "score": "0.5799129", "text": "def plot(self):\n raise NotImplementedError", "title": "" }, { "docid": "acae74600c083c309586387f85c95f2b", "score": "0.57970387", "text": "def starting_population(self):\r\n # get station name and count the number of each station by join city_station table\r\n sql = f\"SELECT COUNT(START_STATION_ID) as Number , STATION_NAME as Name from \" \\\r\n f\"T_USER_RENTAL_LOG JOIN T_DM_CITY_STATION ON T_DM_CITY_STATION.STATION_ID = \" \\\r\n f\"T_USER_RENTAL_LOG.START_STATION_ID WHERE START_TIME BETWEEN '{self.start} 'AND '{self.end}' \" \\\r\n f\"GROUP BY START_STATION_ID ORDER BY Number DESC LIMIT 10\"\r\n # print(sql)\r\n tmp = self.connect(sql)\r\n plt.bar(tmp['Name'], tmp['Number'])\r\n plt.xticks(fontsize=5)\r\n plt.grid(True)\r\n\r\n # get the lon and lat and number to show on the map\r\n sql = f\"SELECT LON,LAT, COUNT(START_STATION_ID) as Number from T_USER_RENTAL_LOG JOIN T_DM_CITY_STATION \" \\\r\n f\"ON T_DM_CITY_STATION.STATION_ID = T_USER_RENTAL_LOG.START_STATION_ID \" \\\r\n f\"WHERE START_TIME BETWEEN '{self.start}' AND '{self.end}' GROUP BY START_STATION_ID\"\r\n\r\n tmp = self.connect(sql)\r\n\r\n df = geopandas.read_file('CA_2011_EoR_Glasgow_City.shp')\r\n df = df.to_crs(epsg=4326)\r\n\r\n df.plot(edgecolors='black')\r\n # Map (long, lat) to (x, y) for plotting\r\n plt.scatter(tmp['LAT'], tmp['LON'], (50 * tmp['Number'] / np.mean(tmp['Number'])), c='red', alpha=0.5,\r\n zorder=10)\r\n # m.scatter(tmp['LAT'], tmp['LON'], tmp['Number'] ** 2, c='red', alpha=0.5, zorder=10)\r\n plt.show()", "title": "" }, { "docid": "8a132b86a89342e9faed406a280b1c8e", "score": "0.5787113", "text": "def make_plot_1b(selected_region = 'Select a Region Please'):\n\n # Update Data source based on user selection:\n a = selected_region\n plot_b_data = final_df.query('sub_region in @a').query('suicides_per_100k_pop>0.1').query('year < 2015 and year > 1986').groupby(['year','sub_region'],as_index = False).agg({\"suicides_per_100k_pop\":\"mean\",\"country\":\"nunique\"})\n\n # Create a plot 1b\n source = plot_b_data.round(1)\n\n nearest = alt.selection(type='single', nearest=True, on='mouseover',\n fields=['year'],empty='none')\n line= alt.Chart(source).mark_line(point=False).encode(\n x = alt.X('year:O',axis=alt.Axis(title='Year',labelAngle=-45)),\n y = alt.Y('suicides_per_100k_pop',axis=alt.Axis(title='Suicides per 100 k pop'),scale=alt.Scale(zero=True)),\n color = alt.Color('sub_region',legend=alt.Legend(title = 'Legend'))\n ).properties(\n width=500,\n height=200,\n title='Suicide Rate per Region'\n )\n selectors = alt.Chart(source).mark_point().encode(\n x='year:O',\n opacity=alt.value(0),\n ).add_selection(\n nearest\n )\n points = line.mark_point().encode(\n opacity=alt.condition(nearest, alt.value(1), alt.value(0))\n )\n text = line.mark_text(align='left', dx=5, dy=-5).encode(\n text=alt.condition(nearest, 'suicides_per_100k_pop', alt.value(' '))\n )\n rules = alt.Chart(source).mark_rule(color='gray').encode(\n x='year:O',\n ).transform_filter(\n nearest\n )\n line2= alt.Chart(general_data).mark_line(stroke=\"black\",point=False,strokeDash=[10],interpolate ='monotone',size =2,color=\"#FFAA00\").encode(\n x = alt.X('year:O',axis=alt.Axis(title='Year',labelAngle=-45)),\n y = alt.Y('suicides_per_100k_pop',axis=alt.Axis(title='Suicides per 100 k pop'),scale=alt.Scale(zero=True)),\n color = alt.Color('Label',legend=alt.Legend())\n )\n chart_1b = alt.layer(\n line,line2, selectors, points, rules, text\n ).properties(\n width=600, height=300\n ).configure_legend(\n strokeColor='gray',\n fillColor='#EEEEEE',\n padding=10,\n cornerRadius=10\n ) \n\n return chart_1b", "title": "" }, { "docid": "e7f319fc3f2c5714b678142a406a50ad", "score": "0.5782289", "text": "def plot_data():\n\t# initialization\n\tdf_sanitation_raw = pd.read_csv('sanitation_facilities.csv', encoding='windows-1252')\n\tdf_sanitation = df_sanitation_raw.dropna()\n\tincome_levels = ['Low income', 'Lower middle income', 'Upper middle income', 'High income']\n\tcolors = ['red', 'lightcoral', 'gray', 'silver']\n\tdfs = []\n\n\tdf_world = df_sanitation[df_sanitation.Country == 'World']\n\tdf_world = df_world.reindex(index=df_world.index[::-1])\n\n\t# get the data for different income levels and order them in ascending order by year\n\tfor income_level in income_levels:\n\t\tdf_income_level = df_sanitation[df_sanitation.Country == income_level]\n\t\tdf_sorted = df_income_level.reindex(index=df_income_level.index[::-1])\n\t\tdfs.append(df_sorted)\n\n\t# plot the life expectancy for the different income levels\n\tfig, ax = plt.subplots(figsize=(15,10))\n\tdf_world.plot(x='Year', y='Sanitation', color='black', subplots=True, figsize=(15,10), ax=ax, linestyle = '--', label='World')\n\tfor i, df in enumerate(dfs):\n\t\tdf.plot(x='Year', y='Sanitation', color=colors[i], subplots=True, figsize=(15,10), ax=ax, label=income_levels[i])\n\t\n\t# axis and title settings for the plot\n\tfor spine in plt.gca().spines.values():\n\t\tspine.set_visible(False)\n\tax.grid(b=True, axis = 'y', linestyle = \"-\", linewidth=0.5, color=\"#DCDCDC\")\n\tttl = ax.title\n\tttl.set_position([.5, 1.08])\n\n\t# plot settings\n\tplt.ylim(-2, 100)\n\tplt.title('Improved sanitation facilities (% of population with access), 1990 to 2015')\n\tplt.xlabel(\"\")\n\tplt.ylabel(\"% of population with access to improved sanitation facilities\", labelpad = 20)\n\tplt.tick_params(top = False, right = False, bottom = False, left = False)\n\tplt.legend(loc = 'lower right', frameon = False)\n\t\n\t# pdb.set_trace()\n\t# handles, labels = ax.get_legend_handles_labels()\n\t# labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n\t# ax.legend(handles, labels)\n\n\tplt.savefig('sanitation_facilities.png')\n\tplt.show()", "title": "" }, { "docid": "963bc7424776062b3b7061a09eb884b6", "score": "0.5778911", "text": "def medisiner_og_befolkning(self, prevalens, sykdom = None, gender = 'Mann', region = 'Hele landet', age_start = 15, age_end= 49, period_start = 2004, period_end = 2018, save_fig = False, label = False):\n\n if type(prevalens) == type([]):\n prevalens = self.probability(np.array(prevalens)/100)\n else:\n prevalens /= 100\n #Source SSB https://www.ssb.no/statbank/table/07459/\n\n age_indexes = self.age_parameters(age_start, age_end) #The correct indexes in self.age_indexes based on the age arguments\n data = self.drug_array(age_indexes, region, gender)\n\n if region == 'Hele landet': #The data from SSB do not have the variable Hele landet, this it's necessarry to sum up everything.\n if 'Hele landet' in self.p_places:\n p_data = self.population_array(age_indexes, region, gender) #If the data is from reseptregisteret do nothing.\n else: #If the data is from SSB and not having the variable Hele landet\n p_data = self.population_array(age_indexes, self.p_places[0], gender)\n for k in range(1,len(self.p_places)):\n p_data += self.population_array(age_indexes, self.p_places[k], gender)\n else:\n p_data = self.population_array(age_indexes, region, gender)\n\n plotting_data = np.zeros((len(self.drugs) + 1, len(self.year_keys)))\n plotting_data[:-1] = data\n plotting_data[-1] = p_data*prevalens\n drugs_name = self.drugs[:]\n try:\n drugs_name.append(sykdom + ' Prevalens: %.2f%%' %(prevalens*100))\n except:\n drugs_name.append('Prevalens: %.2f%%' %(prevalens*100))\n\n if label == False:\n self.part1_plotting(plotting_data, period_start, period_end, drugs_name, age_indexes, gender, region, label = 'Antall personer', save_fig = save_fig)\n else:\n self.part1_plotting(plotting_data, period_start, period_end, drugs_name, age_indexes, gender, region, label = label, save_fig = save_fig)", "title": "" }, { "docid": "ccbe7f826bc2173406f025d800f25b9b", "score": "0.57764107", "text": "def profile_data():\n conn = lite.connect('world_ed.db')\n dframe = pd.read_sql_query('select country, men, women from ed_life', conn)\n fig, (ax1, ax2) = plt.subplots(2, sharey=True)\n fig.subplots_adjust(hspace=0.6)\n ax1.hist(dframe['men'])\n ax1.set_title('Men: School Life Expectancy')\n ax1.set_xlabel('Years of Education')\n ax1.set_ylabel('No. of Countries')\n ax2.hist(dframe['women'])\n ax2.set_title('Women: School Life Expectancy')\n ax2.set_xlabel('Years of Education')\n ax2.set_ylabel('No. of Countries')\n fig.savefig('school_hist.png')\n print (\"See 'school_hist.png' for histograms of school life \"\n \"expectancy by sex\")\n print \"The 2 distributions appear roughly normal: use means\"\n print (\"Avg. School Life Expectancy for Men:\\t{:.4}\"\n .format(dframe['men'].mean()))\n print (\"Avg. School Life Expectancy for Women:\\t{:.4}\"\n .format(dframe['women'].mean()))", "title": "" }, { "docid": "7c2b92dadc20131b8d1e14ec1e48b88d", "score": "0.577068", "text": "def create_plot(df):\n # Number of confirmed cases vs age\n sns.set(font_scale=1.1)\n sns.set_style(\"whitegrid\")\n ax = sns.displot(df, x=\"Age group\", height=6, aspect=10/6, color=\"cornflowerblue\", edgecolor=\"darkblue\")\n ax.set(xlabel=\"Age group\", ylabel=\"Number of cases\")\n plt.title(\"Number of confirmed cases vs age group\", y=0.85, x=0.65, fontsize=17)\n return plt.savefig(\"../images/fig7\"), plt.close()", "title": "" }, { "docid": "f844ff1c33544ce9c633ffd862524e13", "score": "0.57635075", "text": "def create_plots(filename, seed=123):\n np.random.seed(seed)\n\n universe_size = 100\n num_samples = 100\n estimation_repetitions = 2000\n\n universe, probabilities, original_values = setup_universe(universe_size)\n\n # Manipulate values here.\n values = original_values\n\n exact = np.dot(probabilities, values)\n print('Exact value: {}'.format(exact))\n\n estimation_methods = [\n ('HGE',\n functools.partial(hindsight_gumbel_estimation, normalize=False),\n '#4285F4'), # Google blue.\n ('HGE, norm.',\n functools.partial(hindsight_gumbel_estimation, normalize=True),\n '#0F9D58'), # Google green.\n\n ('Repeated HGE (x10)',\n functools.partial(repeated_hindsight_gumbel_estimation,\n repetitions=10,\n normalize=False),\n '#F4B400'), # Google yellow.\n ('Repeated HGE (x10), norm.',\n functools.partial(repeated_hindsight_gumbel_estimation,\n repetitions=10,\n normalize=True),\n '#DB4437'), # Google red.\n\n # ('PPSWOR Priority Sampling',\n # functools.partial(ppswor_priority_sampling, normalize=False),\n # 'red'),\n # ('PPSWOR Priority Sampling, Normalized',\n # functools.partial(ppswor_priority_sampling, normalize=True),\n # 'darkorange'),\n\n ('Monte Carlo sampling', monte_carlo_sampling, '#9E9E9E') # Google gray.\n ]\n\n estimations_k = list(range(1, num_samples + 1))\n\n all_estimators_data = collections.defaultdict(list)\n\n for _ in range(estimation_repetitions):\n\n for name, method, _ in estimation_methods:\n estimations = method(universe, probabilities, values, num_samples)\n all_estimators_data[name].append(estimations)\n\n matplotlib.rcParams.update({'font.size': 12})\n\n plt.figure(facecolor='w', edgecolor='k', figsize=[6.4, 4.8])\n for name, _, color in estimation_methods:\n data = all_estimators_data[name]\n\n # Cut off first point to reduce noise in the plot.\n cut_data = [x[1:] for x in data]\n cut_estimations_k = estimations_k[1:]\n\n plt.plot(cut_estimations_k, np.percentile(cut_data, 95, axis=0),\n color=color, linestyle=':', alpha=0.5)\n plt.plot(cut_estimations_k, np.percentile(cut_data, 5, axis=0),\n color=color, linestyle=':', alpha=0.5)\n\n plt.plot(cut_estimations_k, np.percentile(cut_data, 25, axis=0),\n color=color, linestyle='-', label=name)\n plt.plot(cut_estimations_k, np.percentile(cut_data, 75, axis=0),\n color=color, linestyle='-')\n\n plt.title('HGE Variations on Synthetic Data')\n plt.axhline(y=exact, color='k', linestyle='--', label='Exact value')\n plt.ylim(exact - 1, exact + 1)\n plt.ylabel('Estimate')\n plt.xlim(0, num_samples)\n plt.xlabel('Number of Samples')\n plt.legend(loc='upper right', fontsize=10)\n\n print('Saving plot to {}'.format(filename))\n plt.savefig(filename)", "title": "" }, { "docid": "09c46cacaa49d7e442e1acb335cab3e2", "score": "0.57518584", "text": "def visualize(self):\n plt.figure(figsize=(8,3))\n\n nc = self.geometry.nlos()\n for i in range(0, nc):\n sen = self.sensitivities[i,:]\n if np.sum(sen) == 0: continue\n\n plt.plot(self.r, sen/np.amax(sen), linewidth=2)\n\n plt.xlim([0, np.amax(self.r)])\n plt.ylim([0, 1.25])\n plt.xlabel('Minor radius (m)')\n plt.ylabel('Normalized intensity')\n plt.gca().get_yaxis().set_ticks([])\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "9ed4f4bff47ad9762503f09ac12eaaac", "score": "0.57470006", "text": "def make_plot(self, sequence):\n df = pd.DataFrame(self.sequence)\n print(df)\n sns.lmplot(data = df, x=\"Iteration\", y=\"Win Percentage\", hue=\"Switched\").savefig('visualization.png', dpi=600, format='png')", "title": "" }, { "docid": "490feefca9e2b3df8b7ba15dfe2b1a36", "score": "0.57438606", "text": "def visualize(self):\n pass", "title": "" }, { "docid": "11b23dd4cd54055479fdadeee2aa608a", "score": "0.5714769", "text": "def plot_data(X):\n plt.figure()\n num_plot = 5\n f, ax = plt.subplots(num_plot, num_plot)\n for i in range(num_plot):\n for j in range(num_plot):\n idx = np.random.randint(0, X.shape[0])\n ax[i, j].imshow(X[idx])\n ax[i, j].get_xaxis().set_visible(False)\n ax[i, j].get_yaxis().set_visible(False)\n f.subplots_adjust(hspace=0.1) # No horizontal space between subplots\n f.subplots_adjust(wspace=0)", "title": "" }, { "docid": "fa075bd6f66eb21b300a0324d36876e8", "score": "0.57129514", "text": "def visualize(self, brain_name, log_dir):\n probabilities = self.observations / (self.opportuniities + \n tools.epsilon)\n max_prob = np.max(probabilities)\n fig = plt.figure(num=777777)\n plt.clf()\n ax = fig.add_subplot(111, projection='3d')\n indices = np.where(self.probabilities > .4 * max_prob)\n ax.scatter(indices[0], indices[1], zs=indices[2], \n zdir=u'z', s=5, c=tools.dark_copper, depthshade=True) \n ax.set_xlabel('Past elements')\n ax.set_ylabel('Goals')\n ax.set_zlabel('Future elements')\n # Adjust azim to look at the plot from different directions.\n ax.azim = 250\n plt.title('Cerebellum transitions'.format(brain_name))\n fig.show()\n fig.canvas.draw()\n\n # Save a copy of the plot.\n filename = 'cerebellum_{0}.png'.format(brain_name)\n pathname = os.path.join(log_dir, filename)\n plt.savefig(pathname, format='png')\n\n fig = plt.figure(num=777778)\n plt.clf()\n ax = plt.subplot(2,3,1)\n plt.gray()\n plt.imshow(self.opportunities, interpolation=\"nearest\")\n ax.set_xlabel(' '.join(['max', str(np.max(self.opportunities))]))\n plt.title('Cerebellum opportunities {0}'.format(brain_name))\n\n ax = plt.subplot(2,3,2)\n plt.gray()\n plt.imshow(np.sum(self.observations,axis=2), interpolation=\"nearest\")\n ax.set_xlabel(' '.join(['max', str(np.max(self.observations))]))\n plt.title('observations')\n\n #ax = plt.subplot(2,3,3)\n #ax = plt.subplot(2,3,4)\n\n ax = plt.subplot(2,3,5)\n plt.gray()\n\n ax = plt.subplot(2,3,6)\n plt.gray()\n plt.imshow(self.curiosities, interpolation=\"nearest\")\n ax.set_xlabel(' '.join(['max', str(np.max(self.curiosities))]))\n plt.title('curiosities')\n\n fig.show()\n fig.canvas.draw()\n\n # Save a copy of the plot.\n filename = 'cerebellum_{0}.png'.format(brain_name)\n pathname = os.path.join(log_dir, filename)\n plt.savefig(pathname, format='png')", "title": "" }, { "docid": "bfce1685de7c35697d6fa874feec08f6", "score": "0.5709288", "text": "def plotBoxes(data, startPos=0):\r\n subSize = 0.8\r\n xOff = 0.165\r\n yOff = 0.135\r\n W = 1 - xOff - 0.1 # 0.11\r\n H = 1 - yOff - 0.1 # 0.105\r\n\r\n #SETTING UP AXES\r\n fig = plt.figure()\r\n nPop = data.shape[1]\r\n if nPop != 4:\r\n raise Exception(\"Not implemented for other than 4 pops\")\r\n\r\n majorAx = plt.gca()\r\n majorAx.set_xlim(0, nPop)\r\n majorAx.set_ylim(0, nPop)\r\n #majorAx = AA.Axes(fig, [0, 0, nPop + 1, nPop + 1])\r\n majorAx.set_ylabel(\"Source Population\")\r\n majorAx.set_yticks([.5, 1.5, 2.45, 3.4])\r\n majorAx.set_yticklabels(['4', '3', '2', '1'])\r\n majorAx.set_xlabel(\"Target Population\")\r\n majorAx.set_xticks([.6, 1.55, 2.5, 3.45])\r\n majorAx.set_xticklabels(['1', '2', '3', '4'])\r\n fig.add_axes(majorAx)\r\n axes = []\r\n divVal = float(nPop)\r\n for target in range(nPop)[::-1]:\r\n for source in range(nPop):\r\n ax = plt.axes(\r\n [xOff + W * (source / divVal),\r\n yOff + H * (target / divVal),\r\n W * subSize / divVal, H * subSize / divVal])\r\n fig.add_axes(ax)\r\n axes.append(ax)\r\n\r\n #axes = [fig.add_subplot(nPop, nPop, i + 1) for i in range(nPop ** 2)]\r\n #data = get_Dd(Y)\r\n\r\n heatMapVals = []\r\n plotI = 0\r\n for sourcePop in range(nPop):\r\n heatRow = []\r\n for targetPop in range(nPop):\r\n\r\n if sourcePop != targetPop:\r\n\r\n tmpD = data[sourcePop][targetPop][startPos:]\r\n tmpCleanD = tmpD[np.isfinite(tmpD)]\r\n if tmpCleanD.size == 0:\r\n r = plt.Rectangle((0, 0), 1, 1)\r\n r.set_facecolor('white')\r\n axes[plotI].add_patch(r)\r\n axes[plotI].set_axis_off()\r\n tmpMean = np.nan\r\n else:\r\n axes[plotI].boxplot(tmpCleanD)\r\n axes[plotI].set_ylim(0, 1.0)\r\n axes[plotI].set_xticklabels([])\r\n tl = plt.getp(axes[plotI], 'ymajorticklabels')\r\n plt.setp(tl, fontsize='x-small')\r\n \"\"\"\r\n axes[plotI].set_xticklabels([\"{0} -> {1}\".format(\r\n sourcePop + 1, targetPop + 1)])\r\n \"\"\"\r\n tmpMean = tmpCleanD.mean()\r\n\r\n else:\r\n tmpMean = np.nan\r\n if plotI > 0:\r\n r = plt.Rectangle((0, 0), 1, 1)\r\n r.set_facecolor('grey')\r\n axes[plotI].add_patch(r)\r\n axes[plotI].set_axis_off()\r\n\r\n print \"{0} -> {1}: {2}\".format(\r\n sourcePop + 1, targetPop + 1, tmpMean)\r\n heatRow.append(tmpMean)\r\n plotI += 1\r\n\r\n heatMapVals.append(heatRow)\r\n\r\n heatVals = np.array(heatMapVals)\r\n imax = axes[0].imshow(heatVals, interpolation='nearest', cmap=plt.cm.RdBu,\r\n vmin=0, vmax=1)\r\n il = plt.getp(imax.axes, 'ymajorticklabels')\r\n plt.setp(il, fontsize='x-small')\r\n il = plt.getp(imax.axes, 'xmajorticklabels')\r\n plt.setp(il, fontsize='x-small')\r\n divider = make_axes_locatable(axes[0])\r\n cax = divider.append_axes(\"right\", \"5%\", pad=\"3%\")\r\n cb = plt.colorbar(imax, cax=cax)\r\n cb.set_ticks([0, 0.5, 1])\r\n cb.set_ticklabels(['0', '.5', '1'])\r\n cl = plt.getp(cb.ax, 'ymajorticklabels')\r\n plt.setp(cl, fontsize='x-small')\r\n\r\n axes[0].set_axis_on()\r\n axes[0].set_yticks(np.arange(nPop))\r\n axes[0].set_xticks(np.arange(nPop))\r\n axes[0].set_xticklabels(np.arange(nPop) + 1)\r\n axes[0].set_yticklabels(np.arange(nPop) + 1)\r\n #axes[0].set_ylabel(\"Source\")\r\n #axes[0].set_xlabel(\"Target\")\r\n\r\n #fig.tight_layout()\r\n\r\n return fig", "title": "" }, { "docid": "f3509461ae6ddb94f349a9d20c478c75", "score": "0.5704352", "text": "def __plotProjectedData(self):\n\t\t#set plot window size\n\t\t#figsize(11,5) defines the window size of 11x5 inches at 100 dpi\n\t\tplt.figure(num=None, figsize=(11,5), dpi=100)\n\t\t#plot the FD projection against dates\n\t\tplt.plot(self.dateRange, self.FDProjection, \n\t\t\tlabel=\"FD Appreciation\", marker='o')\n\t\t#plot the SB projections against dates\n\t\tplt.plot(self.dateRange, self.SBProjection, \n\t\t\tlabel=\"SB Appreciation\", marker='o')\n\t\t#label X-Axis\n\t\tplt.xlabel('Dates')\n\t\t#rotate X-Axis labels by 90ยฐ\n\t\tplt.xticks(rotation=90)\n\t\t#label Y-Axis\n\t\tplt.ylabel('Exchange Rate')\n\t\t#set title of the plot\n\t\tplt.title('Favourable Exchange Rate Projection')\n\t\t#set to show the legend of the plot\n\t\tplt.legend()\n\t\t#show the plot\n\t\tplt.show()", "title": "" }, { "docid": "75de1f8193eb7fdcb11c311faf64ff1e", "score": "0.56996244", "text": "def specie_elite_fitness(pop: Population, func, window: int = 5, show: bool = True):\n # Fetch name based on used function\n name = f'elites{\"_EMA\" if func == EMA else \"_SMA\" if func == SMA else \"\"}_gen_{pop.generation}'\n \n ax = plt.figure(figsize=(20, 10)).gca()\n max_gen = 0\n max_fitness = 0\n for specie_id, specie in pop.species_hist.items():\n # Fetch specie-data\n history = sorted(specie.items(), key=lambda x: x[0])\n if len(history) < window: continue\n generations, elite_fitness = zip(*history)\n assert len(elite_fitness) == len(generations)\n max_fitness = max(max_fitness, max(elite_fitness))\n \n # Update max_gen\n if generations[-1] > max_gen: max_gen = generations[-1]\n \n # Plot the specie\n plt.plot(generations, func(elite_fitness, window), label=f'specie {specie_id}')\n \n # Additional plot attributes\n if max_gen == 0: return\n if func == SMA:\n plt.title(f\"Specie fitness in population: {pop}\\nSimple Moving Average (window={window})\")\n elif func == EMA:\n plt.title(f\"Specie fitness in population: {pop}\\nExponential Moving Average (window={window})\")\n else:\n plt.title(f\"Specie fitness in population: {pop}\")\n plt.xlabel(\"generation\")\n plt.ylabel(\"fitness of specie's elites\")\n box = ax.get_positions()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n ax.xaxis.set_major_locator(MaxNLocator(integer=True)) # Forces to use only integers\n if max_fitness <= 1:\n plt.yticks([i / 10 for i in range(11)]) # Fitness expressed in range of 0..1 (hops of 0.1)\n plt.grid(axis='y')\n plt.tight_layout()\n \n # Save the result\n plt.savefig(f\"population{'_backup' if pop.use_backup else ''}/\"\n f\"storage/\"\n f\"{pop.folder_name}/\"\n f\"{pop}/\"\n f\"images/\"\n f\"species/\"\n f\"{name}\")\n if show:\n plt.show()\n plt.close()", "title": "" }, { "docid": "5527f15cd62fc93e0bc3156a87ae0224", "score": "0.567085", "text": "def visualize__initial_co2_data():\n clf()\n kwargs = {'show_image': False}\n image = plot_image(plot_CO2, kwargs)\n \n return render_template(\"co2_data.html\", image=image.decode('utf-8'),\n tmin=1751, tmax=2012, ymin=0, ymax=10000)", "title": "" }, { "docid": "a369bd46219cb291344fc61d120a3a48", "score": "0.56588435", "text": "def demo5(rng=np.random):\n\n prior = Prior()\n model = Model()\n stats = Stats()\n\n vs = model.sim(prior.gen(12), rng=rng)\n ts = h.dt * np.arange(vs.shape[1])\n xs = stats.calc(vs)\n\n fig, axs = plt.subplots(6, 2)\n\n for v, x, ax in zip(vs, xs, axs):\n ax[0].plot(ts, v)\n ax[1].bar(np.arange(x.size) + 1, x)\n\n plt.show()", "title": "" }, { "docid": "71847e973b7d729974f464a21887eae1", "score": "0.5648221", "text": "def plot(self):\n getattr(self, '_plot_results_' + self.xp.name)()", "title": "" }, { "docid": "d06a387392b07912638a78be36383939", "score": "0.5645079", "text": "def plot_samples(self, probs):\n raise NotImplemented('QuantumAnnealer does not currently support plot_samples.')", "title": "" }, { "docid": "496b8df9f6c6359ce30ca8410a384410", "score": "0.56279325", "text": "def create_diam_vs_cum_region_oplots(body, body_dict):\n \n fig=pylab.figure(figsize=(14.5,10.5))\n\n for region in body_dict.keys():\n\n crater_bins, crater_counts = bin_and_transform_crater_sizes(body, (body_dict[region])[2])\n \n\n color = return_color(body, region)\n\n pylab.scatter(crater_bins/1000., crater_counts, marker='d', \\\n label=region, c=color, edgecolors='none') #size=100\n \n pylab.xlabel('LOG Crater diameter D [km]', fontsize=22, weight='bold')\n pylab.ylabel('LOG Cumalitive craters/region with diameter > D', fontsize=22, weight='bold') \n pylab.tick_params(axis='both', which='major', labelsize=20)\n pylab.title(body + ' TOTAL (non-normalized) crater counts', fontsize=22)\n #pylab.xlim([0,5])\n #pylab.ylim([-.1,6])\n pylab.legend(scatterpoints=1)\n pylab.savefig(path+'outputs/'+body+'_plots/diam_vs_cum_region/all_regions.png', \\\n bbox_inches='tight') \n pylab.close()", "title": "" }, { "docid": "1a6bf7d147b63318cd663504ed475d29", "score": "0.5625477", "text": "def plot(self):\n if self.hasfood:\n color = 'red'\n else:\n color = 'green'\n plt.scatter(self.loc[1], self.loc[0], color=color)", "title": "" }, { "docid": "460a5c92a166b8f1d66b09a3595311f7", "score": "0.56246483", "text": "def vis_growth_hist(GDPgrowth_percountry, growth_percol, growth_stats):\n statFrance = growth_stats[0]\n statGB = growth_stats[1]\n statAll = growth_stats[2]\n\n plt.figure()\n x = np.linspace(statAll['avg'] - 4*statAll['stddev'], statAll['avg'] + 4*statAll['stddev'], 100)\n plt.plot(x, stats.norm.pdf(x, statFrance['avg'], statFrance['stddev']), label='Fitted normal distribution France', color='blue')\n plt.plot(x, stats.norm.pdf(x, statGB['avg'], statGB['stddev']), label='Fitted normal distribution Great_Britain', color='red')\n plt.hist(growth_percol[0], 100, density=True, color='blue', alpha=0.5, label='Growth rates French ex-colonies')\n plt.hist(growth_percol[1], 100, density=True, color='red', alpha=0.5, label='Growth rates British ex-colonies')\n plt.axvline(x=statGB['avg'], color='red')\n plt.axvline(x=statFrance['avg'], color='blue')\n plt.title(f'Distribution of anual growth rates, {startyear} - {endyear}.')\n plt.ylabel('Frequency')\n plt.xlabel('Annual growth rate of GDP')\n plt.legend(loc='upper left')\n plt.savefig('gdp_growth_figs/gdp_growth_hist.png')", "title": "" }, { "docid": "f1e5e9603cc4fab48f0c4c90d825bf25", "score": "0.5612095", "text": "def show(a):\n fig, ax = plt.subplots() # Create a figure containing a single axis\n x = np.linspace(0, len(a) / SR, len(a))\n ax.plot(x, a) # Plot some data on the axes.\n plt.show()", "title": "" }, { "docid": "474f550694fbfce4ee22d41175c92d20", "score": "0.56110454", "text": "def plotGeneralPopInfo(X, Y, Z, step=10,\r\n shift_square_threshold=0.0001):\r\n\r\n delta = (Y[step:] - Y[:-step]) ** 2\r\n fig = plt.figure()\r\n\r\n #Alleles in pop\r\n ax = fig.add_subplot(3, 1, 1)\r\n ax.set_title(\"Number of alleles in population\")\r\n ax.plot(X, (Y > 0).sum(axis=-1).sum(axis=-1))\r\n\r\n #Moving alleles\r\n ax = fig.add_subplot(3, 1, 2)\r\n ax.set_title(\"Alleles with shifted freq after {0} generations\".format(\r\n step))\r\n ax.plot(X[:-step], (delta > shift_square_threshold).sum(axis=-1).sum(\r\n axis=-1))\r\n\r\n #Moving alleles\r\n ax = fig.add_subplot(3, 1, 3)\r\n ax.set_title(\"Population sizes\")\r\n ax.plot(X, Z)\r\n\r\n fig.tight_layout()\r\n return fig", "title": "" }, { "docid": "6ec72681aef4900a4ca39d6ba6c259e0", "score": "0.5607436", "text": "def generate_plot(df, x_variable, y_variables, plot_title):\r\n #Plot results\r\n df.plot(x=x_variable, y=y_variables, title=plot_title)\r\n plt.show()", "title": "" }, { "docid": "8a1fec8c3ee7c5f3630474bdb0f95280", "score": "0.56066245", "text": "def plot_precip(slice_function, region_name=\"the Sahel\", ylim=(-.2, .4)):\r\n wet_sahel = slice_function(miroc_data.precip, slice(0, len(miroc_data.precip[0])))\r\n dry_sahel = slice_function(gfdl_data.precip, slice(0, len(gfdl_data.precip[0])))\r\n wsa = miroc_data.get_average_value_timeseries_of_region(wet_sahel)\r\n dsa = gfdl_data.get_average_value_timeseries_of_region(dry_sahel)\r\n wsa = miroc_data.absolute_timeseries_to_anomaly(wsa)\r\n dsa = gfdl_data.absolute_timeseries_to_anomaly(dsa)\r\n wsa_s = miroc_data.get_average_value_timeseries_of_region_s(wet_sahel)\r\n dsa_s = gfdl_data.get_average_value_timeseries_of_region_s(dry_sahel)\r\n wsa_s = miroc_data.absolute_timeseries_to_anomaly(wsa_s)\r\n dsa_s = gfdl_data.absolute_timeseries_to_anomaly(dsa_s)\r\n\r\n fig, ax1 = plt.subplots(figsize=(12, 5))\r\n years_w = range(miroc_data.start_year, miroc_data.start_year+len(miroc_data.precip[0]))\r\n years_d = range(gfdl_data.start_year, gfdl_data.start_year+len(gfdl_data.precip[0]))\r\n years_w_s = range(miroc_data.start_year+2, miroc_data.start_year+len(miroc_data.precip[0])-2)\r\n years_d_s = range(gfdl_data.start_year+2, gfdl_data.start_year+len(gfdl_data.precip[0])-2)\r\n # years_s = range(2008, 2099)\r\n ax1.set_ylim(ylim)\r\n ax1.set_ylabel(\"Anomaly (m yโปยน)\")\r\n ax1.set_xlabel(\"Year\")\r\n \r\n ax1.plot(years_w, wsa, \":\", label=\"MIROC-ESM-CHEM (raw)\", \r\n color=\"lightseagreen\", linewidth=1)\r\n ax1.plot(years_d, dsa, \":\", label=\"GFDL CM3 (raw)\", \r\n color=\"brown\", linewidth=1)\r\n ax1.plot(years_w_s, wsa_s, label=\"MIROC-ESM-CHEM (smoothed)\", color=\"lightseagreen\")\r\n ax1.plot(years_d_s, dsa_s, label=\"GFDL CM3 (smoothed)\", color=\"brown\")\r\n ax1.axhline(y=0, color=\"black\", linewidth=1)\r\n ax1.axvline(x=2020, color=\"red\", linewidth=1)\r\n\r\n title_text=\"Projected Precipitation Anomaly in \"+region_name\r\n ax1.set_title(title_text)\r\n fig.legend(loc=1, bbox_to_anchor=(1,1), bbox_transform=ax1.transAxes)\r\n plt.tight_layout()\r\n ## save as pdf:\r\n f = plt.gcf() # f = figure(n) if you know the figure number\r\n plt.savefig(\"Output/Figures/\"+title_text.replace(\" \",\"-\")+\".pdf\",format='pdf');", "title": "" }, { "docid": "eaa7b8f555263effe34d1dfdf2c6216a", "score": "0.55962896", "text": "def CreateSamplePlot(X_train, y_train, outFil):\n imgs = []\n for i in range(10):\n imgs.append(X_train[y_train == i][0].reshape(28, 28))\n return tools.PlotGrid(2, 5, imgs, outFil)", "title": "" }, { "docid": "ef32d1848f647c670d1a97cd268b55b6", "score": "0.5594782", "text": "def pop_chart(cur, conn):\n \n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n\n label = []\n population = []\n\n # Grabbing Populations and States from the Database\n cursor = cur.execute(\"SELECT state, population FROM Population\")\n for row in cursor:\n if row[0].split(\":\")[1] == \"2020\":\n label.append(row[0].split(\":\")[0])\n num = row[1]\n num = int(num.replace(',', ''))\n population.append(num)\n\n clearLabels = label[:8]\n for x in label[8:]:\n clearLabels.append(\"\")\n\n fig1, ax1 = plt.subplots()\n ax1.pie(population, labels=clearLabels, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title(\"Total 2020 US Population by State\")\n\n plt.show()", "title": "" }, { "docid": "4dd9a463549f17fd138738249271e36c", "score": "0.55942255", "text": "def make_plot(self, n_samples=100, theta=torch.tensor([0])):\n x, y = self.conditioned_sample(n_samples=n_samples, theta=theta)\n fig = plt.figure()\n axe = ax = fig.gca()\n axe.set_xlim(-2, 2)\n axe.set_ylim(-2, 2)\n sp, = axe.plot(x[:, 0], x[:, 1], color='k', marker='o', ls='')\n\n plt.show()\n return fig, axe, sp", "title": "" }, { "docid": "613e02c52e6143b14d4bd32674b234da", "score": "0.55923015", "text": "def visualize_data(data, response_name, predictor_name):\n ax = data.plot(kind='scatter', x=1, y=0,\n title=('Relationship of %s vs. %s' % (response_name, predictor_name)),\n #xlim=(axis_low, axis_high), ylim=(axis_low, axis_high)\n )\n ax.set_xlabel(predictor_name)\n ax.set_ylabel(response_name)\n #ax.set_aspect(1)", "title": "" }, { "docid": "f52c269b11d866f5d13d84e1cb30b5a3", "score": "0.5590248", "text": "def plot(self):\n\n plot_minutiae(self.image_enhanced, list(self.profile.keys()), size=8)", "title": "" }, { "docid": "e3232e7f4e75596004ce77a0c47d7c89", "score": "0.55844665", "text": "def plot_species(species_sizes):\n num_generations = len(species_sizes)\n curves = np.array(species_sizes).T\n\n fig, ax = plt.subplots()\n ax.stackplot(range(num_generations), *curves)\n\n plt.title(\"Speciation\")\n plt.ylabel(\"Size per Species\")\n plt.xlabel(\"Generations\")\n\n plt.show()\n plt.close()", "title": "" }, { "docid": "0bae28a2f3dd765e050b07c74e5ed889", "score": "0.55841804", "text": "def plot_prototypes(self):\n figs = {}\n for h in np.unique(self.design.header):\n fig = pl.figure()\n splines = self.design.get(h)\n if 'rate' in h:\n pl.plot(np.sum(self.design.get(h)[:self.design.trial_length] * self.beta[self.design.getIndex(h)],1))\n pl.title(h)\n elif len(splines.shape) == 1 or (splines.shape[0] == 1):\n pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1),'o')\n pl.title(h)\n elif len(splines.shape) == 2:\n pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1))\n pl.title(h)\n elif len(splines.shape) == 3:\n slices = np.zeros(splines.shape)\n for (i, ind) in zip(range(splines.shape[0]),self.design.getIndex(h)):\n slices[i,:,:] = splines[i,:,:] * self.beta[ind]\n pl.imshow(slices.sum(axis=0),cmap='jet')\n figs[h + '_sum'] = fig\n fig = pl.figure()\n for i in range(len(slices)):\n pl.subplot(np.ceil(np.sqrt(slices.shape[0])),np.ceil(np.sqrt(slices.shape[0])),i+1)\n pl.imshow(slices[i],vmin=np.percentile(slices,1),vmax=np.percentile(slices,99),cmap='jet')\n pl.suptitle(h)\n figs[h] = fig\n else:\n pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1))\n pl.title(h)\n figs[h] = fig\n return figs", "title": "" }, { "docid": "8b4b0dd385c564dd5b9473403cfacf35", "score": "0.55787134", "text": "def visualize_data(X, T):\n\n plt.title(\"Blood Pressure vs. Age\")\n plt.scatter(X[:,0], T[:])\n plt.xlabel('Age (years)')\n plt.ylabel('Systolic Blood Pressure')\n plt.show()\n \n plt.title(\"Blood Pressure vs. Weight\")\n plt.scatter(X[:,1], T[:])\n plt.xlabel('Weight (pounds)')\n plt.ylabel('Systolic Blood Pressure')\n plt.show()", "title": "" }, { "docid": "b77942bc5ca98a1bc250284a16e8aded", "score": "0.55761385", "text": "def static_figure():\n \n # i. docstring\n \n # ii. make the plot\n ax = mean['n_persons'].plot(legend = True)\n \n # iii. set thousands separator on y-axis\n ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))\n \n # iv. set x-axis to the number of weeks according to duration\n ax.set_xticks([52, 78, 104, 130, 156])\n \n # v. set labels\n ax.set_ylabel('number of persons')\n ax.set_xlabel('duration in weeks')\n \n # vi. set title\n ax.set_title('Figure 1')", "title": "" }, { "docid": "a333cb0e4da03c469ca74547263ed5da", "score": "0.5556511", "text": "def summary_plot():\n\n # Mass flux versus distance, lower limit, compact particles, average radius per mass bin\n plt.figure()\n [plt.semilogy(data.AU,data.flux_lower[:,bin],ls='dashed',marker='+',label=\"%3.2f\" % data.diam_compact_um[bin]+' um') for bin in range(data.useful_bins)]\n plt.legend(loc=0)\n plt.grid(1)\n plt.title('Mass flux versus heliocentric distance from Fulle (2010)\\nLower limit, average radius, compact particles')\n plt.xlabel('Heliocentric distance (AU)')\n plt.ylabel('Mass flux (kg/s)')\n\n # Mass flux versus mean particle diameter, compact particles\n\n plt.figure()\n [plt.loglog(data.diam_compact,data.flux_lower[bin,:],ls='dashed',marker='+',label=\"%3.2f\" % data.AU[bin]+' AU') for bin in range(data.num_au)]\n plt.legend(loc=0)\n plt.grid(1)\n plt.title('Mass flux versus mean diameter from Fulle (2010)\\nLower limit, compact particles')\n plt.xlabel('Mean diameter (m)')\n plt.ylabel('Mass flux (kg/s)')\n\n # Number flux per mass bin as a function of average diameter per mass bin (one curve for each distance)\n\n plt.figure()\n [plt.loglog(data.diam_compact,data.flux_upper[bin,:]/data.mass_compact_avg,ls='dashed',marker='+',label=\"%3.2f\" % data.AU[bin]+' AU') for bin in range(data.num_au)]\n plt.legend(loc=0)\n plt.grid(1)\n plt.ylabel('Dust flux (#/s)')\n plt.xlabel('Mean diameter (m)')\n plt.title('Number flux versus mean diameter from Fulle (2010)\\nUpper limit, compact particles')\n\n plt.show()", "title": "" }, { "docid": "82d3a2f1797279a60e2b7182ad2f35ac", "score": "0.55554485", "text": "def plot_populations(self, category_to_loop='agegroups', scenario=0, fraction=False, requirements=('',),\n exclusions=('we all love futsal',)):\n\n # prelims\n fig, ax, max_dim, n_rows, n_cols = initialise_figures_axes(1, room_for_legend=True)\n start_time = self.inputs.model_constants['plot_start_time']\n start_time_index = self.find_start_time_index(start_time - 1., scenario, purpose='manual')\n times = self.outputs['manual']['epi'][0]['times'][start_time_index:]\n\n # get data\n cumulative_data = [0.] * len(times)\n current_data = self.sum_compartments_by_category(category_to_loop, scenario, start_time_index,\n requirements=requirements, exclusions=exclusions)\n\n # plot patches and proxy by category\n for l, label in enumerate(current_data):\n previous_data, cumulative_data = increment_list_for_patch(current_data[label], cumulative_data)\n colour = self.colour_theme[l + 1]\n\n if len(previous_data) == len(cumulative_data) and len(times) == len(previous_data):\n ax.fill_between(times, previous_data, cumulative_data, facecolor=colour, edgecolor=colour, alpha=.8)\n ax.plot([-1e2], [0.], color=colour, label=t_k.find_title_from_dictionary(label), linewidth=5.) # proxy\n\n # finish off\n self.tidy_x_axis(ax, start_time, 2035., max_dim)\n self.tidy_y_axis(ax, '', max_dim, max_value=max(cumulative_data))\n ax.legend(bbox_to_anchor=(1.3, 1))\n filename = '_' + ('fraction' if fraction else 'population') + '_' + category_to_loop\n title = ('Fraction' if fraction else 'Size') + ' of population by ' \\\n + t_k.find_title_from_dictionary(category_to_loop, capital_first_letter=False)\n for requirement in requirements:\n if requirement != '':\n filename += '_only' + requirement\n title += ',' + requirement + ' only'\n for exclusion in exclusions:\n if exclusion != 'we all love futsal':\n filename += '_exclude' + exclusion\n title += ', except ' + exclusion\n self.finish_off_figure(fig, 1, filename, title)", "title": "" }, { "docid": "3bd90d847607b0dd6e9ecc994260c9d9", "score": "0.55543035", "text": "def test_plot_gp_dist(self):\n import matplotlib.pyplot as plt\n\n X = 100\n S = 500\n fig, ax = plt.subplots()\n pm.gp.util.plot_gp_dist(\n ax, x=np.linspace(0, 50, X), samples=np.random.normal(np.arange(X), size=(S, X))\n )\n plt.close()\n pass", "title": "" }, { "docid": "bd33570ceaf1d1ee0bddbe1969871234", "score": "0.5554129", "text": "def plot_sample(x):\n plt.imshow(x[:, :, 0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:, :, 1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:, :, 2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "title": "" }, { "docid": "b1a8623b58d0721af145c80a66c427e7", "score": "0.55540353", "text": "def make_population(self, np):\r\n self.NP = np\r\n for i in range(self.NP):\r\n x = Individual(self.d, self.lB, self.uB)\r\n x.randomize()\r\n self.population.append(x.parameters)\r\n self.generations = [self.population] # save all generations for animation\r", "title": "" }, { "docid": "7912d5a884e16d6daeec9cab789694b1", "score": "0.5546489", "text": "def showPlot1():\n x_axis = []\n y_axis = []\n for x in range(1,11):\n y = runSimulation(x,1.0,20,20,.8,10,StandardRobot)\n x_axis.append(x)\n y_axis.append(y)\n pylab.plot(x_axis,y_axis,'bo')\n pylab.xlabel('Roombas')\n pylab.ylabel('Time/Steps')\n pylab.title('Time to clean 80% of a 20x20 room with each of 1-10 roombas')\n pylab.show()", "title": "" }, { "docid": "5069adf0405990b76899405d1628a42c", "score": "0.5545737", "text": "def showPlot3():\n # TODO: Your code goes here\n roomShape = [(20,20),(25,16),(40,10),(50,8),(80,5),(100,4)]\n min_coverage = 0.75\n num_sims = 50\n timesteps = []\n for i in roomShape:\n result = runSimulation(2,1.0,i[0],i[1],min_coverage,num_sims,Robot, False)\n timesteps.append(averageLength(result))\n xAxis = []\n for i in roomShape:\n xAxis.append(float(i[0])/i[1])\n \n pylab.plot(xAxis,timesteps)\n pylab.xlabel('ratio of width and height')\n pylab.ylabel('Timesteps')\n pylab.title('Time to clean 75% of rooms with different room shape')\n pylab.show()", "title": "" }, { "docid": "241be14cb1fa2c33142394dfc4ad48ed", "score": "0.5542577", "text": "def make_predictions_visual(predictions):\n df = pd.DataFrame(predictions) \n \n df.sort_values(by=\"probability\",\n ascending = False,\n inplace = True)\n \n labels = df['label'].values\n sizes = df['probability'].values\n \n p = figure(x_range = labels, plot_height = 350,\n title = \"Confidence over dog breeds\",\n tools = \"\", toolbar_location = None)\n p.vbar(x=labels, top=sizes, width=0.9, color=Reds5)\n p.xgrid.grid_line_color = None\n p.y_range.start = 0\n p.y_range.end = 1\n \n p.xaxis.axis_label = 'Potential Breeds'\n p.yaxis.axis_label = 'Probability'\n \n script, div = components(p)\n return script,div", "title": "" }, { "docid": "a6811233a9ba733162dc2ec853ecb9a6", "score": "0.5539957", "text": "def make_plot(huc_12):\n import matplotlib\n\n matplotlib.use(\"agg\")\n import matplotlib.pyplot as plt\n\n (fig, ax) = plt.subplots(2, 1, figsize=(7, 9))\n IDEPDB = get_dbconn(\"idep\")\n cursor = IDEPDB.cursor()\n\n cursor.execute(\n \"\"\"\n select extract(year from valid) as yr, sum(loss), hs_id from results \n where huc_12 = %s and scenario = 0 GROUP by hs_id, yr\n \"\"\",\n (huc_12,),\n )\n data = {}\n for row in cursor:\n year = row[0]\n if not data.has_key(year):\n data[year] = []\n data[year].append(row[1] * 4.463)\n\n colors = {}\n colorpool = list(\n plt.get_cmap(\"jet\")(np.linspace(0, 1.0, len(data.keys())))\n )\n\n # Convert into numpy arrays\n for yr in data.keys():\n data[yr] = np.array(data[yr])\n\n x = []\n y = []\n for i in range(1, len(data[yr])):\n x.append(i)\n y.append(np.average(data[yr][:i]))\n\n stddev = np.std(data[yr])\n mu = y[-1]\n cnt = np.sum(np.where(data[yr] > (mu + 2.0 * stddev), 1, 0))\n cnt += np.sum(np.where(data[yr] < (mu - 2.0 * stddev), 1, 0))\n c = colorpool.pop()\n line = ax[0].plot(\n x,\n y,\n color=c,\n label=\"%i $\\mu$=%.1f $\\mathbb{R}$=%.1f\\n$\\sigma$=%.1f <$2\\sigma$=%.1f%%\"\n % (\n yr,\n mu,\n max(data[yr]) - min(data[yr]),\n stddev,\n (len(data[yr]) - cnt) / float(len(data[yr])) * 100.0,\n ),\n )\n colors[yr] = line[0].get_color()\n\n ax[0].set_title(\"Soil Detachment Convergence for HUC12: %s\" % (huc_12))\n ax[0].set_ylabel(\"Average Soil Detachment [tons/acre]\")\n ax[0].set_xlabel(\"Increasing Hillslope Sample Size\")\n ax[0].grid(True)\n\n # Shrink current axis's height by 10% on the bottom\n box = ax[0].get_position()\n ax[0].set_position(\n [box.x0, box.y0 + box.height * 0.35, box.width, box.height * 0.65]\n )\n\n ax[0].legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.25),\n fancybox=True,\n shadow=True,\n ncol=3,\n scatterpoints=1,\n fontsize=10,\n )\n\n for yr in data.keys():\n sorted_data = np.sort(data[yr])\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data))\n ax[1].plot(sorted_data, yvals * 100.0, color=colors[yr], lw=2)\n\n ax[1].grid(True)\n ax[1].set_yticks([0, 10, 25, 50, 75, 90, 100])\n ax[1].set_ylabel(\"Sample CDF [%]\")\n ax[1].set_xlabel(\"Average Soil Detachment [tons/acre]\")\n\n box = ax[1].get_position()\n ax[1].set_position([box.x0, box.y0, box.width, box.height * 0.75])\n\n # Sent output\n ram = cStringIO.StringIO()\n plt.savefig(ram, format=\"png\")\n ram.seek(0)\n r = ram.read()\n\n sys.stdout.write(\"Content-type: image/png\\n\\n\")\n sys.stdout.write(r)", "title": "" }, { "docid": "f8328ec3bafca0c4faa44209ff1a5101", "score": "0.5535671", "text": "def returnMontage(np_array, title):\n fig, (ax1) = plt.subplots(1, 1, figsize=(20, 20))\n ax1.set(title=title)\n ax1.imshow(montage(np_array), cmap='gray')", "title": "" }, { "docid": "3975678eb2dac8bf5775b072e7f7e2cd", "score": "0.5535302", "text": "def generate_population_data():\n return pd.DataFrame([{\n 'year': year,\n 'people': randint(1, 10),\n 'sex': sex,\n 'age': age\n } for age in range(0, 99) for year in range(1890, 2000)\n for sex in (0, 1)])", "title": "" }, { "docid": "f0865654fb26659642272261304b38dd", "score": "0.5533059", "text": "def main(overpass, fname, **kwargs):\r\n params = defaults\r\n params.update(**kwargs)\r\n \r\n bg_kwargs = { 'background_factor':params.f_background,\r\n 'ymax_positive':params.y_max_positive,\r\n 'ymax_negative':params.y_max_negative,\r\n 'ymin_negative':params.y_min_negative,\r\n 'ymin_positive':params.y_min_positive,\r\n 'offset':params.offset,\r\n 'sign':params.direction\r\n }\r\n \r\n filt_args = { 'chi_squared_max':params.chi_squared_max,\r\n 'snr_strong_co2_min':params.snr_strong_co2_min,\r\n 'albedo_min':params.albedo_min,\r\n 'albedo_max':params.albedo_max,\r\n 'outcome_flags':params.outcome_flags,\r\n 'surface_pressure_min':params.surface_pressure_min,\r\n 'surface_pressure_max':params.surface_pressure_max\r\n }\r\n \r\n plume_args = dict( plume_factor = params.f_plume,\r\n xmax = params.x_max\r\n )\r\n\r\n print \"Creating subplots\"\r\n # Initialize gridspecs for the different panels\r\n obs_gs = gridspec.GridSpec(*params._shape)\r\n obs_gs.update(**params._obs_grid)\r\n \r\n mod_gs = gridspec.GridSpec(*params._shape)\r\n mod_gs.update(**params._mod_grid)\r\n \r\n cbar_gs = gridspec.GridSpec(*params._shape)\r\n cbar_gs.update(**params._cbar_grid)\r\n \r\n vert_gs = gridspec.GridSpec(*params._shape)\r\n if params.show_latitude:\r\n params._vert_grid.update(right=params._lat_rspace)\r\n vert_gs.update(**params._vert_grid)\r\n \r\n fig = plt.figure(figsize=params._size)\r\n \r\n h, w = params._shape\r\n half_w = params._obs_width\r\n \r\n # add subplots using gridspecs\r\n vert_ax = fig.add_subplot(vert_gs[:params._vert_height,\r\n :params._vert_width])\r\n \r\n obs_ax = fig.add_subplot(obs_gs[:params._obs_height,\r\n params._vert_width:params._vert_width + half_w])\r\n \r\n mod_ax = fig.add_subplot(mod_gs[:int(h//2),\r\n params._vert_width + half_w:])\r\n modfull_ax = fig.add_subplot(mod_gs[int(h//2):,\r\n params._vert_width + half_w:])\r\n \r\n cbar_ax = fig.add_subplot(cbar_gs[params._cbar_height,\r\n params._vert_width:params._vert_width + half_w])\r\n \r\n # set tick locations if the step sizes are specified\r\n if params.x_step:\r\n dx = params.x_step\r\n xticks = np.arange(int(dx*math.ceil(params.xlim[0]/dx)),\r\n params.xlim[1]+1., dx)\r\n obs_ax.set_xticks(xticks)\r\n mod_ax.set_xticks(xticks)\r\n modfull_ax.set_xticks(xticks)\r\n \r\n if params.y_step:\r\n dy = params.y_step\r\n yticks = np.arange(int(dy*math.ceil(params.ylim[0]/dy)),\r\n params.ylim[1], dy)\r\n yticks_full = np.arange(int(dy*math.ceil(params.ylim[0]/dy)),\r\n params.ylim[1]+1., dy)\r\n obs_ax.set_yticks(yticks_full)\r\n mod_ax.set_yticks(yticks_full)\r\n modfull_ax.set_yticks(yticks)\r\n \r\n # add labels, titles, set fonts\r\n label_args = dict( fontsize = params._labelfont,\r\n fontname = params.font )\r\n obs_ax.set_xlabel(params._xlabel, **label_args)\r\n obs_ax.set_ylabel(params._ylabel, **label_args)\r\n modfull_ax.set_xlabel(params._xlabel, **label_args)\r\n \r\n mod_ax.tick_params('x', bottom='off', labelbottom='off')\r\n \r\n title_args = dict( fontsize = params._titlefont,\r\n fontname = params.font )\r\n obs_ax.set_title(params._obslabel, **title_args)\r\n mod_ax.set_title(params._modlabel, **title_args)\r\n \r\n vlabel_args = dict(labelpad=params._vleftlabelpad)\r\n vlabel_args.update(**label_args)\r\n vert_ax.set_xlabel(params._vxlabel, **label_args)\r\n vert_ax.set_ylabel(params._vylabel, **vlabel_args)\r\n \r\n\r\n \r\n # now start the real work, axes are set as much as we can right now\r\n \r\n a = params.stability if params.stability else overpass.a\r\n \r\n try:\r\n wind = getattr(overpass, params.wind_source)\r\n except AttributeError:\r\n raise AttributeError('Overpass has no wind source \"%s\"' % params.wind_source)\r\n except Exception:\r\n raise\r\n wind = wind.rotate(params.wind_adjustment)\r\n \r\n windspeed = Formats.windspeedformat % wind.speed\r\n wind_direction = Formats.winddirectionformat % wind.bearing\r\n fig.suptitle(params._title % (overpass.info, windspeed, wind_direction), **title_args)\r\n \r\n F = overpass.get_emissions(temporal_factors=params.temporal_factors)\r\n F.convert(Units._model_units)\r\n u = wind.speed\r\n \r\n free_secondary_emissions = [src.get_emissions(overpass, temporal_factors=params.temporal_factors)\r\n for src in params.secondary_sources]\r\n \r\n fixed_secondary_emissions = [src.get_emissions(overpass, temporal_factors=params.temporal_factors)\r\n for src in params.fixed_secondary_sources]\r\n all_emissions = [F] + free_secondary_emissions + fixed_secondary_emissions\r\n \r\n co2_var = \"smoothed_{}_xco2\" if params.smooth else \"{}_xco2\"\r\n co2_var = co2_var.format(params.bias_correction)\r\n \r\n print 'Running Model with parameters'\r\n model_results = ModelFit.Model(\r\n overpass,\r\n f_plume = params.f_plume,\r\n f_background = params.f_background,\r\n offset = params.offset,\r\n y_max_positive = params.y_max_positive,\r\n y_max_negative = params.y_max_negative,\r\n y_min_negative = params.y_min_negative,\r\n y_min_positive = params.y_min_positive,\r\n direction = params.direction,\r\n wind_adjustment = params.wind_adjustment,\r\n wind_sources = params.wind_source,\r\n smooth = params.smooth,\r\n surface_stability = a,\r\n stability = params.stability,\r\n temporal_factors = params.temporal_factors,\r\n bias_correction = params.bias_correction,\r\n LocalBackground = PlumeModel.InBackground,\r\n LocalInPlume = PlumeModel.InPlume,\r\n co2_source = 'xco2',\r\n custom_wind = None,\r\n snr_strong_co2_min = params.snr_strong_co2_min,\r\n chi_squared_max = params.chi_squared_max,\r\n albedo_min = params.albedo_min,\r\n albedo_max = params.albedo_max,\r\n outcome_flags = params.outcome_flags,\r\n surface_pressure_max = params.surface_pressure_max,\r\n surface_pressure_min = params.surface_pressure_min, \r\n background_average = params.background_average,\r\n secondary_sources = params.secondary_sources,\r\n fixed_secondary_sources = params.fixed_secondary_sources,\r\n x_max = params.x_max,\r\n scatter_plot = params.scatter_plot,\r\n force_winds = params.force_winds,\r\n units = params.units,\r\n sza_adjustments = params.sza_adjustments,\r\n weighted = params.weighted,\r\n uncertainty = params.uncertainty\r\n)\r\n _, _, n_plume, cor = model_results[0]\r\n \r\n if params.secondary_sources:\r\n coordinate = Geometry.CoordGeom(wind)\r\n posns_from_main = [(0,0)]+[coordinate.coord_to_wind_basis(overpass.lat, overpass.lon, second.lat, second.lon) for second in params.secondary_sources]\r\n x = [pair[0] for pair in posns_from_main]\r\n y = [pair[1] for pair in posns_from_main]\r\n\r\n if params.plot_offset:\r\n x_center, y_center = np.average(np.array([x,y]), axis=1, weights=([F]+free_secondary_emissions))\r\n x_from_center = [x0 - x_center for x0 in x]\r\n y_from_center = [y0 - y_center for y0 in y]\r\n \r\n # indices of top and bottom sources\r\n neg_offset = y_from_center.index(max(y_from_center))\r\n pos_offset = y_from_center.index(min(y_from_center))\r\n \r\n ## -1*y because the y in wind-basis is left-handed but we want the\r\n ## plots to be the regular right-handed system\r\n # offset of bottom source on plot\r\n pos_start = (x_from_center[pos_offset],-1*y_from_center[pos_offset])\r\n # offset of top source on plot\r\n neg_start = (x_from_center[neg_offset],-1*y_from_center[neg_offset])\r\n \r\n else:\r\n x_center, y_center = 0, 0\r\n \r\n pos_start = (0,0)\r\n neg_start = (0,0)\r\n\r\n else:\r\n x_center, y_center = (0,0)\r\n \r\n pos_start = (0,0)\r\n neg_start = (0,0)\r\n \r\n c_offset = (x_center, y_center)\r\n \r\n x_min = params.xlim[0]*1000.\r\n x_max = params.xlim[1]*1000.\r\n y_min = params.ylim[0]*1000.\r\n y_max = params.ylim[1]*1000.\r\n \r\n print 'Opening full file for making plots'\r\n data = File.full(overpass)\r\n \r\n def distance(vertices, tlat, tlon, wind, shift=(0,0)):\r\n \"\"\"Takes in an array shape (4, 2) of\r\n [ [lon1 lat1],\r\n [lon2 lat2],...]\r\n and returns an equivalenty shaped array of\r\n [ [x1 y1],\r\n [x2 y2], ...]\r\n As measured by in the direction of the wind\"\"\"\r\n distance_array = []\r\n for i in range(4):\r\n lon, lat = vertices[i]\r\n x, y = Geometry.CoordGeom(wind).coord_to_wind_basis(tlat, tlon, lat, lon)\r\n distance_array.append((x-shift[0],-(y-shift[1])))\r\n return np.array(Geometry.convex_hull(distance_array))\r\n \r\n distances = np.array([distance(data.retrieval_vertex_coordinates[j], overpass.lat, overpass.lon, wind, shift=c_offset) for j in range(len(data.retrieval_vertex_coordinates))])\r\n \r\n background = File.File()\r\n background_k = []\r\n \r\n plume = File.File()\r\n else_data = File.File()\r\n failed_quality_data = File.File()\r\n \r\n x_offset, y_offset = data.get_offset(overpass, wind)\r\n \r\n if params.secondary_sources:\r\n secondary_offsets = data.get_secondary_offset(overpass, wind, secondary_sources=params.secondary_sources)\r\n \r\n print 'Classifying points'\r\n \r\n verts = []\r\n observed_xco2 = []\r\n model_xco2 = []\r\n \r\n verts_qf = []\r\n observed_xco2_qf = []\r\n model_xco2_qf = []\r\n all_sources = params.secondary_sources + params.fixed_secondary_sources\r\n for i in range(len(data)):\r\n coordinate = Geometry.CoordGeom(wind)\r\n rlat = data.retrieval_latitude[i]\r\n rlon = data.retrieval_longitude[i]\r\n x,y = coordinate.coord_to_wind_basis(overpass.lat,\r\n overpass.lon ,rlat, rlon)\r\n dist = Geometry.CoordGeom.cartesian_distance((x,y),\r\n (x_offset, y_offset))\r\n \r\n in_background = PlumeModel.InBackground(x,y,dist,u,1.0,a, **bg_kwargs)\r\n in_plume = PlumeModel.InPlume(x,y,u,F,a,**plume_args)\r\n for j,second in enumerate(params.secondary_sources):\r\n xs,ys = coordinate.coord_to_wind_basis(second.lat,second.lon,\r\n rlat, rlon)\r\n dx, dy = secondary_offsets[j]\r\n secondary_dist = Geometry.CoordGeom.cartesian_distance((xs,ys),\r\n (dx,dy))\r\n in_secondary_background = PlumeModel.InBackground(xs,ys,secondary_dist,u,1.,a,**bg_kwargs)\r\n in_secondary_plume = PlumeModel.InPlume(xs,ys,u,1.,a,**plume_args)\r\n \r\n in_background = in_background and in_secondary_background\r\n in_plume = in_plume or in_secondary_plume\r\n \r\n if in_plume:\r\n if data.quality(i, **filt_args):\r\n plume.append(data, i)\r\n else:\r\n failed_quality_data.append(data, i)\r\n \r\n elif in_background:\r\n if data.quality(i, **filt_args):\r\n background.append(data, i)\r\n background_k.append(data.k[i])\r\n else:\r\n failed_quality_data.append(data, i)\r\n \r\n else:\r\n if data.quality(i, **filt_args):\r\n else_data.append(data, i)\r\n else:\r\n failed_quality_data.append(data, i)\r\n \r\n \r\n x_shifted = x - c_offset[0]\r\n y_shifted = y + c_offset[1]\r\n \r\n if y_min<=y_shifted<=y_max and x_min<=x_shifted<=x_max:\r\n lat_row = int((y_max-y_shifted)//1000)\r\n lon_col=int((x_shifted-x_min)//1000)\r\n \r\n if params.sza_adjustments:\r\n sza = Geometry.SZA(data,wind)\r\n enhancement = sza.V(x,y,u,F,a,i)\r\n else:\r\n enhancement = PlumeModel.V(x,y,u,F,a)\r\n\r\n for ind, child in enumerate(all_sources):\r\n F_second = all_emissions[ind+1]\r\n \r\n x_p, y_p = coordinate.coord_to_wind_basis(overpass.lat, overpass.lon, child.lat, child.lon)\r\n xs, ys = (x-x_p, y-y_p)\r\n \r\n if params.sza_adjustments:\r\n second_enhancement = sza.V(xs,ys,u,F_second,a,i)\r\n else:\r\n second_enhancement = PlumeModel.V(xs,ys,u,F_second,a)\r\n \r\n enhancement+=second_enhancement\r\n if data.quality(i, **filt_args):\r\n model_xco2.append(enhancement)\r\n verts.append(distances[i])\r\n observed_xco2.append(data[co2_var][i])\r\n else:\r\n model_xco2_qf.append(enhancement)\r\n verts_qf.append(distances[i])\r\n observed_xco2_qf.append(data[co2_var][i])\r\n \r\n if params.background_average:\r\n background_mean = background_average\r\n else:\r\n background_mean = np.mean(background[co2_var])\r\n background_mean_k = np.mean(background_k)\r\n background_n = len(background)\r\n \r\n model_xco2 = np.array(model_xco2)\r\n model_xco2/=(background_mean*background_mean_k)\r\n model_xco2+=1.\r\n \r\n model_xco2_qf = np.array(model_xco2_qf)\r\n model_xco2_qf/=(background_mean*background_mean_k)\r\n model_xco2_qf+=1.\r\n \r\n verts = np.array(verts)\r\n verts_qf = np.array(verts_qf)\r\n observed_xco2 = np.array(observed_xco2)/float(background_mean)\r\n observed_xco2_qf = np.array(observed_xco2_qf)/float(background_mean)\r\n \r\n print 'Making grid plot'\r\n observed_coll = PolyCollection(verts/1000., array = observed_xco2,\r\n edgecolors='none', cmap=params._cmap)\r\n\r\n observed_coll_qf = PolyCollection(verts_qf/1000., array = observed_xco2_qf,\r\n edgecolors = 'none', cmap = params._cmap,\r\n alpha = params.opacity)\r\n \r\n model_coll = PolyCollection(verts/1000., array = model_xco2,\r\n edgecolors= 'none', cmap = params._cmap)\r\n \r\n model_coll_qf = PolyCollection(verts/1000., array = model_xco2_qf,\r\n edgecolors='none', cmap = params._cmap,\r\n alpha = params.opacity)\r\n \r\n collections = [observed_coll, observed_coll_qf,\r\n model_coll, model_coll_qf]\r\n \r\n for coll in collections:\r\n coll.set_clim(*params.clim)\r\n \r\n obs_ax.add_collection(observed_coll)\r\n obs_ax.add_collection(observed_coll_qf)\r\n \r\n mod_ax.add_collection(model_coll)\r\n mod_ax.add_collection(model_coll_qf)\r\n \r\n cbarticks = np.linspace(params.clim[0], params.clim[1], 5)\r\n cbar = fig.colorbar(observed_coll, cax=cbar_ax, orientation='horizontal',\r\n use_gridspec=True, ticks=cbarticks)\r\n \r\n cbar.set_label(params._cbarlabel, **label_args)\r\n cbar.ax.set_xticklabels([params._ctick_fmt.format(x) for x in cbarticks],\r\n fontname=params.font)\r\n \r\n bg_mean = Formats.ppmformat % background_mean\r\n scor = Formats.ppmformat % cor\r\n \r\n # plume = File.File()\r\n # else_data = File.File()\r\n # failed_quality_data = File.File()\r\n\r\n # make vertplot\r\n print 'Making vertical plot'\r\n ticks_plus = np.append(np.arange(0,params.vplot_max,50),\r\n [params.vplot_max])\r\n ticks_minus = sorted(-1*np.append(np.arange(50,params.vplot_min,50),\r\n [params.vplot_min]))\r\n \r\n dist_ticks = np.append(ticks_minus,ticks_plus)\r\n \r\n all_source_latitudes = [overpass.lat] + [second.lat for second in params.secondary_sources]\r\n lat_length = 0.001*Geometry.CoordGeom(wind).distance(overpass.lat,overpass.lon,overpass.lat+1,overpass.lon)\r\n weighted_avg_lat = np.average(all_source_latitudes,\r\n weights=([F]+free_secondary_emissions))\r\n \r\n lat_max = weighted_avg_lat + params.vplot_max/lat_length\r\n lat_min = weighted_avg_lat - params.vplot_min/lat_length\r\n \r\n lat_offset = overpass.lat - weighted_avg_lat\r\n \r\n vert_ax.scatter(plume[co2_var], plume.retrieval_latitude,\r\n color=params._plume_colour, s=params._msize)\r\n vert_ax.scatter(else_data[co2_var], else_data.retrieval_latitude,\r\n color=params._else_colour, s=params._msize)\r\n vert_ax.scatter(background[co2_var], background.retrieval_latitude,\r\n color=params._bg_colour, s=params._msize)\r\n vert_ax.scatter(failed_quality_data[co2_var],\r\n failed_quality_data.retrieval_latitude,\r\n color=params.failed_quality_colour, s=params._msize)\r\n vert_ax.plot([background_mean, background_mean], [-params.vplot_min, params.vplot_max],\r\n color = params._bg_mean_colour)\r\n vert_ax.plot(background_mean, weighted_avg_lat,\r\n markersize=params._vplot_src_msize,\r\n marker = params._vplot_src_marker,\r\n color=params._vplot_src_colour)\r\n \r\n vert_ax.set_ylim(lat_min, lat_max)\r\n vert_ax.set_xlim(params.xco2_min, params.xco2_max)\r\n vert_ax.set_title(params._vplot_title % background_n,\r\n fontsize=params._labelfont,\r\n fontname=params.font)\r\n \r\n vert_ax.set_xticks(np.linspace(params.xco2_min, params.xco2_max, 5))\r\n \r\n \r\n dax = vert_ax.twinx()\r\n \r\n vert_ax.tick_params(labelsize=params._ticklabelfont)\r\n dax.tick_params(labelsize=params._ticklabelfont)\r\n \r\n if params.show_latitude:\r\n dax.set_ylabel(params._vlatlabel, fontsize=params._labelfont,\r\n fontname = params.font, labelpad = params._vrightlabelpad)\r\n \r\n dax.set_yticks(dist_ticks)\r\n dax.set_ylim(-params.vplot_min, params.vplot_max)\r\n dax.tick_params('y', left=True, labelleft=True,\r\n right=False, labelright=False)\r\n \r\n if params.show_latitude:\r\n vert_ax.tick_params('y', left=False, right=True,\r\n labelleft=False, labelright=True)\r\n else:\r\n vert_ax.tick_params('y', left=False, right=False,\r\n labelleft=False, labelright=False)\r\n vert_ax.set_yticks([])\r\n \r\n if params.background_x_step:\r\n xco2_ticks = np.arange(params.xco2_min, params.xco2_max+0.1,\r\n params.background_x_step)\r\n vert_ax.set_xticks(xco2_ticks)\r\n Formats.set_tickfont(vert_ax)\r\n Formats.set_tickfont(dax)\r\n \r\n # add model to plot\r\n print 'Calculating model values for grid plot'\r\n position_from_main = [(0,0)]\r\n coordinate = Geometry.CoordGeom(wind)\r\n all_sources = params.secondary_sources + params.fixed_secondary_sources\r\n for second in all_sources:\r\n posn = coordinate.coord_to_wind_basis(overpass.lat, overpass.lon, second.lat, second.lon)\r\n position_from_main.append(posn)\r\n \r\n position_from_center = [(x-c_offset[0], y-c_offset[1]) for (x,y) in position_from_main]\r\n \r\n x_ax_absolute = np.arange(x_min, x_max+params._dx, params._dx)\r\n y_ax_absolute = np.arange(y_min, y_max+params._dy, params._dy)\r\n y_ax_absolute-=0.5*params._dy\r\n model_array = np.ones((len(y_ax_absolute), len(x_ax_absolute)))\r\n \r\n x_ax_plot = (x_ax_absolute-0.5*params._dx)/1000.\r\n y_ax_plot = (y_ax_absolute-0.5*params._dx)/1000.\r\n \r\n normalize = background_mean*background_mean_k\r\n for ind in range(len(position_from_main)):\r\n x_rel, y_rel = position_from_center[ind]\r\n x_ax = x_ax_absolute - x_rel\r\n y_ax = y_ax_absolute + y_rel\r\n F = all_emissions[ind]\r\n rows, cols = model_array.shape\r\n for row in range(rows):\r\n for col in range(cols):\r\n x,y = x_ax[col], y_ax[row]\r\n enhancements = PlumeModel.V(x, y, u, F, a)/normalize\r\n model_array[row, col] += enhancements\r\n \r\n mask_value = 1+(params.clim[1]-1)*(params._decay_threshold)\r\n model_array = np.ma.masked_less_equal(model_array, mask_value)\r\n \r\n modfull_ax.pcolormesh(x_ax_plot, y_ax_plot, model_array, cmap=params._cmap, vmin=params.clim[0], vmax=params.clim[1])\r\n modfull_ax.grid(True)\r\n \r\n obs_ax.text(params._paneltext_x, params._paneltext_y,\r\n 'a = {}\\nBackground = {} ppm'.format(a, bg_mean),\r\n fontsize=params._textfont, fontname=params.font,\r\n transform = obs_ax.transAxes)\r\n \r\n obs_ax.text(params._paneltext_xspace, 1-params._paneltext_y,\r\n 'Number of points in plume: %s\\nR = %s'%(n_plume, scor),\r\n fontsize = params._textfont, verticalalignment = 'top',\r\n fontname = params.font, transform = obs_ax.transAxes)\r\n \r\n bounds = boundaries.Boundaries(obs_ax, mod_ax, modfull_ax)\r\n bounds.plot(a, pos_start, neg_start, params.plume_thresholds,\r\n params.background_thresholds, xmax=1000*params.xlim[1],\r\n ymax=params.ylim[1], offset=params.offset) \r\n \r\n grid_axes = (obs_ax, mod_ax, modfull_ax)\r\n axes = (vert_ax, obs_ax, mod_ax, modfull_ax)\r\n for gax in grid_axes:\r\n gax.set_xlim(*params.xlim)\r\n gax.set_ylim(*params.ylim)\r\n gax.grid(True)\r\n \r\n for ax in grid_axes:\r\n Formats.set_tickfont(ax)\r\n \r\n fig.savefig(fname, dpi=params._DPI, bbox_inches='tight')\r\n print 'Saved figure as', fname", "title": "" }, { "docid": "16053c21a69757fc5159c632e4c3b3c3", "score": "0.55319387", "text": "def plot_data(x, y):\n plt.plot(x, y, 'bx')\n plt.axis([-5, 24, -5, 25])\n plt.ylabel('Profit in $10,000s')\n plt.xlabel('Population of City in 10,000s')\n plt.savefig('plot_data.png')", "title": "" }, { "docid": "ff916f7b451a4655fd935f703295183a", "score": "0.5531067", "text": "def test_plotter(self):\n self.material.plot('days', 'adens',\n names=['Xe135', 'U235'])", "title": "" }, { "docid": "c37e9ad1d57d4b710d9bad0446b54285", "score": "0.55304414", "text": "def plot(self):\n self.walls.plot()\n self.doors.plot()\n self.windows.plot()", "title": "" }, { "docid": "6b20ad6303ba8fd7cb26258ffd331d08", "score": "0.5530157", "text": "def charting():\n return render_template(\"Money_Choropleth.html\")", "title": "" }, { "docid": "949d9dfa038ef6625c29d75672806071", "score": "0.5526219", "text": "def visualize(self):\r\n # Make something pretty like http://www.harrisgeospatial.com/docs/EM1_SurfacePlot.html\r\n # https://matplotlib.org/basemap/users/examples.html\r\n\r\n ax = plt.gca()\r\n\r\n # cmap_fire = colors.get_cmap('hot')\r\n # cmap_fire.set_under('k', alpha=0)\r\n # \r\n # plt.pcolor(self.list_x[:, 0], self.list_x[:, 1], \r\n # np.diag(self.covar_env), vmin=0.01, vmax=1.5, \r\n # cmap=cmap_fire, zorder=1)\r\n # plt.colorbar()\r\n\r\n if (self.b_terrain):\r\n plt.pcolor(self.x_plt, self.y_plt, self.map_terrain,\r\n cmap='Greens_r')\r\n\r\n ax.scatter(self.list_q[:, 0], self.list_q[:, 1], color='black', marker='o')\r\n ax.set_xlim([0, self.env_size[0]])\r\n ax.set_ylim([0, self.env_size[1]])", "title": "" }, { "docid": "94288a96d7f8848fba6777605c4f6aa5", "score": "0.552575", "text": "def plot(self, **kwargs):\n return uwoesc_plot(self.WOE_df, self.targ_var, **kwargs)", "title": "" }, { "docid": "639f8bfcb3a6ca9c7f24fa94c3623825", "score": "0.5523608", "text": "def plot(self):\n super().plot(color=self.color)\n self.doors.plot()\n self.windows.plot()", "title": "" }, { "docid": "0c859d880409f2d4268d9e78cce937a4", "score": "0.55194575", "text": "def create_plot(Pi, a_prior, b_prior):\n\n # generate prior probability distribution\n n = 1000\n a = a_prior\n b = b_prior\n\n x = np.linspace(0, 1, n)\n dist = beta(a, b)\n p = dist.pdf(x)\n\n s1 = ColumnDataSource(data=dict(x=x, p=p))\n s2 = ColumnDataSource(data=dict(params=[Pi, a_prior, b_prior, a, b]))\n\n # arrays for the area under the curve patch\n xs = np.hstack((x, [1, 0]))\n ys = np.hstack((p, [0, 0]))\n s3 = ColumnDataSource(data=dict(x=xs, y=ys))\n\n # plot probability distribution\n plot = Figure(title='Posterior Distribution')\n plot.xaxis.axis_label = 'Probability of Heads (-)'\n plot.yaxis.axis_label = 'Probability Density (-)'\n plot.line('x', 'p', source=s1, line_width=4)\n plot.patch('x', 'y', source=s3, alpha=0.25, line_width=0)\n\n # calculate mode of prior\n if a == 1 and b == 1:\n mode_str = \"any value\"\n else:\n mode_str = str(round((a - 1.0) / (a + b - 2.0), 7))\n\n # add current stats of simulation\n text = \"\"\"<b>True Probability:</b> {:g}<br>\n <b>Number of Heads:</b> {:d}<br>\n <b>Number of Tails:</b> {:d}<br>\n <b>Mode:</b> {:s}<br>\n <b>Variance: </b> {:g}\n \"\"\".format(Pi, a - a_prior, b - b_prior, mode_str, 1.0 / 12)\n div = Div(text=text)\n\n # create button widget and JS callback\n with open(os.path.join('flask_app', 'static', 'callback.js'), 'r') as fp:\n code = fp.read()\n\n callback = CustomJS(args=dict(s1=s1, s2=s2, s3=s3, div=div), code=code)\n button = Button(label='Flip Coin', callback=callback)\n\n # combine button and plot into one object and return components\n widgets = row(button, div)\n layout = column(widgets, plot)\n\n return components(layout)", "title": "" }, { "docid": "0fa57844a4283bcb0ee833a40fc458ad", "score": "0.5518055", "text": "def test_plot_group():\n\n htm.plotting.plot(htm.diffusivities)\n plt.clf()", "title": "" }, { "docid": "7a787b8f45aeede8f47e48b79f76c2e2", "score": "0.55127513", "text": "def plot(self):\n gs = gridspec.GridSpec(1, 3)\n\n plt.figure()\n ax = plt.subplot(gs[0, 0]) # row 0, col 0\n plt.scatter(self.y,self.y_hat)\n\n\n ax = plt.subplot(gs[0, 1]) # row 0, col 1\n residual = []\n for i in range(len(self.y_hat)):\n residual.append(abs(self.y_hat[i]-self.y[i]))\n residual = pd.DataFrame(residual)\n residual.plot.kde()\n plt.xlabel(\"residuals\")\n plt.ylabel(\"Probability density\")\n title =\"Mean : \" +str(float(residual.mean()))+ \" Variance : \" +str(float(residual.var()))\n plt.title(title)\n plt.show()\n # plt.plot([0,1])\n ax = plt.subplot(gs[0,2]) # row 1, span all columns\n # plt.plot([0,1])\n objects = []\n if self.fit_intercept==True:\n objects.append(1)\n for i in self.X:\n objects.append(i)\n y_pos = np.arange(len(objects))\n performance = []\n for i in self.theta:\n performance.append(i)\n \n plt.bar(y_pos, performance, align='center', alpha=0.5)\n plt.xticks(y_pos, objects)\n plt.ylabel('Coefficients')\n plt.title('Features and Coefficients')\n plt.show()", "title": "" }, { "docid": "27354aa725ffd77d7ef48d71bd7906ce", "score": "0.55079454", "text": "def test_plot_slice():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"lr\", metric=[\"f1\", \"recall\"], n_trials=3)\n atom.plot_slice(display=False)", "title": "" }, { "docid": "0714511e534b31d8743babbd5b323353", "score": "0.5507358", "text": "def plots(self, show=['populations', 'fields', 'mixing angle'], \n loc='upper right', coherences=False, kwargs=None):\n if self.rho is None:\n print('simulation hasn\\'t been run yet')\n print('running simulation...')\n self.runsim()\n \n # could probably use kwargs here to allow passing in axes\n def plot_ax(ax, title): \n ax.set_title(title)\n ax.set_xlim((self.t[0], self.t[-1]))\n return ax\n \n def pop_plot(ax, title):\n ax = plot_ax(ax, title)\n for n,p in enumerate(self.populations):\n ax.plot(self.t, np.real(p), label=rf'$\\rho[{n},{n}]$')\n if coherences:\n for n,c in zip(self.idx_c, self.coherences):\n # TODO: calculate m,n from self.idx_c\n ax.plot(self.t, np.real(c))#, label=f'rho_{m,n}')\n return ax\n \n def field_plot(ax, title):\n ax = plot_ax(ax, title)\n for i,f in enumerate(self.fields): # these are lambda functions\n if type(f) == sp.Mul: \n # TODO: redefine f as lambdified f\n assert len(f.free_symbols) == 1, \"multivariable fields not supported\"\n arg = list(f.free_symbols)[0]\n f = lambdify(arg, f)\n ax.plot(self.t, [np.real(f(t)) for t in self.t], \n label=rf'$\\Omega${i+1}')\n return ax \n \n def mixing_plot(ax, title):\n ax = plot(ax, title)\n assert len(self.fields) == 2\n f1, f2 = self.fields \n ax.plot(self.t, [np.atan(f2(t)/f1(t)) for t in self.t]) #TODO: check this\n return ax\n \n # plotdict = {plottype: propdict, ...}\n # want more plot types? add another dict entry and plot function\n plotdict = OrderedDict({'populations': \n {'show': False, \n 'title':'Density matrix elements',\n 'plot_func': pop_plot},\n 'fields': \n {'show': False, \n 'title':'Applied fields',\n 'plot_func': field_plot},\n 'mixing angle': \n {'show': False, \n 'title': 'State mixing angle',\n 'plot_func': mixing_plot}\n })\n\n for key in show:\n if key in plotdict:\n plotdict[key]['show'] = True\n \n fig, axes = plt.subplots(1, sum([plotdict[key]['show'] for key in \n plotdict]), **kwargs)\n if type(axes) != np.ndarray: # only one subplot\n axes = [axes]\n\n ax_idx = 0\n for propdict in plotdict.values():\n if propdict['show']:\n axes[ax_idx] = propdict['plot_func'](axes[ax_idx], \n propdict['title'])\n axes[ax_idx].legend(loc=loc)\n ax_idx += 1\n\n return fig, axes", "title": "" }, { "docid": "b89127d96b642f0613e3f3e222b7f479", "score": "0.5506431", "text": "def plot(self):\n return self._plot(self.runs)", "title": "" }, { "docid": "8a95114beba9ed2b0b6d7677071cdd21", "score": "0.55000037", "text": "def visualizer(self):\n plt.subplot(121) #superimposed image\n plt.imshow(self.sum_data, vmin=0, vmax=1500, cmap=self.irissjicolor)\n plt.scatter(self.x_area, self.y_area, s=0.0005)\n plt.plot(self.x_area[self.h], self.y_area[self.h],'g')\n\n plt.subplot(122) #time ditance image\n plt.imshow(self.cut[:,self.h-self.eps:self.h+self.eps,:].mean(1).T, vmin=0, vmax=30, cmap=self.irissjicolor,aspect=0.4)\n for i in range(self.all_pos.shape[0]):\n plt.plot(self.all_pos[i,0], self.funcs[i])", "title": "" } ]
b5800e48092206b95caebfbb2b34ccd2
Intialize a generic file reader with batching for list of files
[ { "docid": "4c72d0005257cfd74efa0b155be92ac2", "score": "0.6491739", "text": "def __init__(self, records_list, image_options={}):\n print(\"Initializing Batch Dataset Reader...\")\n print(image_options)\n self.files = records_list\n self.image_options = image_options\n self._read_images(image_options.get(\"is_TIFF\", False))", "title": "" } ]
[ { "docid": "5a66de5c5579ae4e5898e279c0a111d4", "score": "0.6745472", "text": "def reader_for_list(filelist):\n\n for filename in open(filelist):\n filename = filename.rstrip()\n logging.info(str(filename))\n \n for x in reader(filename):\n yield x", "title": "" }, { "docid": "c4e6ff954a013960fa38977b7aaaafd7", "score": "0.6554994", "text": "def _native_reader(self, input_path, **kwargs):", "title": "" }, { "docid": "9255f0f8566d94d8d9728e662dd86fb3", "score": "0.64377636", "text": "def file_reader(self,\n filename,\n mode=\"train\",\n batch_size=32,\n max_seq_len=126):\n\n def wrapper():\n fread = io.open(filename, \"r\", encoding=\"utf-8\")\n headline = next(fread)\n headline = headline.strip().split('\\t')\n assert len(headline) == 2 and headline[0] == \"text_a\" and headline[\n 1] == \"label\"\n buf = []\n for line in fread:\n words, labels = line.strip(\"\\n\").split(\"\\t\")\n if len(words) < 1:\n continue\n word_ids = self.word_to_ids(words.split(\"\\002\"))\n label_ids = self.label_to_ids(labels.split(\"\\002\"))\n assert len(word_ids) == len(label_ids)\n word_ids = word_ids[0:max_seq_len]\n words_len = np.int64(len(word_ids))\n word_ids += [0 for _ in range(max_seq_len - words_len)]\n label_ids = label_ids[0:max_seq_len]\n label_ids += [0 for _ in range(max_seq_len - words_len)]\n assert len(word_ids) == len(label_ids)\n yield word_ids, label_ids, words_len\n fread.close()\n\n return wrapper", "title": "" }, { "docid": "72023ef80ad503f7ad94a56e4fb493a1", "score": "0.63728136", "text": "def __init__(self, records_list, image_options={}):\n print(\"Initializing Batch Dataset Reader...\")\n print(image_options)\n self.files = records_list\n self.image_options = image_options\n self._read_images()", "title": "" }, { "docid": "dc2c9f4dd5cc2c7892dc84f7c5b74ac0", "score": "0.63126254", "text": "def __iter__(self):\n for i, file_obj in enumerate(self._file_objs):\n if not i % self._batch_size:\n # On a batch boundary, load the next set.\n _LoadFiles(self._file_objs[i:i + DEFAULT_BATCH_SIZE])\n yield file_obj", "title": "" }, { "docid": "a14b7dd8e4eaa2e66471fd290e404f09", "score": "0.6295635", "text": "def get_readers(self, fname, **kwargs):\n # XXX: skip binary files that are not supported\n # http://www.garykessler.net/library/file_sigs.html\n _log.info(\"processing\\t%s\", fname)\n\n if isinstance(fname, list) or isinstance(fname, set) or isinstance(fname, tuple):\n return itertools.chain(*map(self.get_readers, fname))\n elif is_url(fname):\n return self.get_readers_from_url(fname)\n elif is_url_file(fname):\n return self.get_readers_from_url_file(fname, **kwargs)\n elif is_excel_file(fname):\n return self.get_readers_from_excel_file(fname, **kwargs)\n elif is_old_excel_file(fname):\n return self.get_readers_from_old_excel_file(fname, **kwargs)\n elif os.path.isdir(fname):\n dataiters = []\n args = {'kwargs' : kwargs, 'dataiters' : dataiters}\n os.path.walk(fname, self.get_readers_walk_cb, args)\n return dataiters\n elif zipfile.is_zipfile(fname):\n return self.get_readers_from_zip_file(fname, **kwargs)\n elif is_html_file(fname):\n return self.get_readers_from_html_file(fname, **kwargs)\n return self.get_readers_from_text_file(fname, **kwargs)", "title": "" }, { "docid": "3e297744b29c437ea920cb92caad629e", "score": "0.62550306", "text": "def from_file_list(cls, file_list):\n for file in file_list:\n pass", "title": "" }, { "docid": "55651828ccbc9af81daf4fd96b70ae48", "score": "0.6253674", "text": "def __init__(self, reader, batch_size=1,\n transform_fn=None,\n shuffling_queue_capacity=0):\n super(BatchedDataLoader, self).__init__()\n self.reader = reader\n self.batch_size = batch_size\n self.transform_fn = transform_fn or torch.as_tensor\n\n # _batch_acc accumulates samples for a single batch.\n self._batch_acc = []\n self.shuffling_queue_capacity = shuffling_queue_capacity\n self._in_iter = None", "title": "" }, { "docid": "769ae991eb19948cce1ebcd86e118ac5", "score": "0.618804", "text": "def _read_file(self, dataset):\n if dataset.mode.lower() == 'pil-image1':\n if self.flow:\n # map flow\n flow = {f.stem: f for f in self.flow}\n self.file_objects = [ImageFile(fp).attach_flow(flow[fp.stem]) for fp in\n self.file_names]\n else:\n self.file_objects = [ImageFile(fp) for fp in self.file_names]\n elif dataset.mode.upper() in _ALLOWED_RAW_FORMAT:\n self.file_objects = [\n RawFile(fp, dataset.mode, (dataset.width, dataset.height))\n for fp in self.file_names]\n return self", "title": "" }, { "docid": "e56a57f486860652424bdd40cd82ead9", "score": "0.61723363", "text": "def __init__(self, reader, batch_size=1, collate_fn=decimal_friendly_collate,\n shuffling_queue_capacity=0):\n super(DataLoader, self).__init__()\n self.reader = reader\n self.batch_size = batch_size\n self.collate_fn = collate_fn\n\n # _batch_acc accumulates samples for a single batch.\n self._batch_acc = []\n self.shuffling_queue_capacity = shuffling_queue_capacity\n self._in_iter = None", "title": "" }, { "docid": "aa07feb9cb258e12a1a1576edb9460b9", "score": "0.61390257", "text": "def __init__(self, files='multibinit.files'):\n self.files = os.path.basename(files)\n if os.path.exists(files):\n self.basedir = os.path.dirname(os.path.abspath(files))\n self.read_files()\n if os.path.isfile(self.basedir + os.sep + self.files_list['in']):\n self.read_input()", "title": "" }, { "docid": "094b76851f809590bf11dc0abf58e753", "score": "0.60855985", "text": "def get_readers_walk_cb(self, args, dirname, fnames):\n kwargs = args['kwargs']\n dataiters = args['dataiters']\n for fname in fnames:\n fullname = os.path.join(dirname, fname)\n if not os.path.isdir(fullname):\n for dataiter in self.get_readers(fullname, **kwargs):\n dataiter.fname = fname\n dataiter.file_index = len(dataiters)\n dataiters.append(dataiter)", "title": "" }, { "docid": "964ae9ec6ac1ca3f04029456b159ebb9", "score": "0.6043917", "text": "def _batch_inputs(files, batch_size, num_epochs, num_preprocess_threads):\r\n if not num_epochs: num_epochs = None\r\n\r\n with tf.name_scope('input'):\r\n filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs)\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n single_example = _decode_single_example(serialized_example)\r\n #example_list =[ decode_single_example(serialized_example) for _ in range(10)]\r\n #batch_examples = tf.train.shuffle_batch(\r\n batch_examples = tf.train.batch(\r\n list(single_example),\r\n #example_list,\r\n batch_size=batch_size,\r\n num_threads=num_preprocess_threads,\r\n capacity=1000 + 3 * batch_size,\r\n allow_smaller_final_batch=True\r\n # Ensures a minimum amount of shuffling of examples.\r\n #min_after_dequeue=min_after_dequeue\r\n )\r\n return batch_examples", "title": "" }, { "docid": "8def306764f4ecfd9a8bd06f38aca173", "score": "0.60096717", "text": "def read_files(self, files):\n for f in files:\n self.read_file(f)", "title": "" }, { "docid": "923dde14166194e87844d0412137eceb", "score": "0.5980476", "text": "def from_file_list(prefix: str,\n default_tensor_name: str = \"arr_0\") -> Callable:\n check_argument_types()\n\n def load(files: List[str]) -> Iterable[np.ndarray]:\n for list_file in files:\n with open(list_file, encoding=\"utf-8\") as f_list:\n for line in f_list:\n path = os.path.join(prefix, line.rstrip())\n with np.load(path) as npz:\n yield npz[default_tensor_name]\n\n return load", "title": "" }, { "docid": "140a875f54f1c82607538550a0d85932", "score": "0.59626395", "text": "def prepare_reader(self, filename_queue, batch_size=1024):\n opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)\n if self.decode_zlib:\n reader = tf.TFRecordReader(options=opts)\n else:\n reader = tf.TFRecordReader()\n _, serialized_examples = reader.read_up_to(filename_queue, batch_size)\n\n tf.add_to_collection(\"serialized_examples\", serialized_examples)\n return self.prepare_serialized_examples(serialized_examples)", "title": "" }, { "docid": "3422ef4f3c61cb99a12d2c8956f19e00", "score": "0.5951589", "text": "def _read(cls, filepath_or_buffer, **kwargs):\n if not (isinstance(filepath_or_buffer, str) and \"*\" in filepath_or_buffer):\n warnings.warn(\"Defaulting to Modin core implementation\")\n return cls.single_worker_read(\n filepath_or_buffer,\n single_worker_read=True,\n **kwargs,\n )\n filepath_or_buffer = sorted(glob.glob(filepath_or_buffer))\n\n if len(filepath_or_buffer) == 0:\n raise ValueError(\n f\"There are no files matching the pattern: {filepath_or_buffer}\"\n )\n\n partition_ids = []\n lengths_ids = []\n widths_ids = []\n\n if len(filepath_or_buffer) != NPartitions.get():\n # do we need to do a repartitioning?\n warnings.warn(\"can be inefficient partitioning\")\n\n for file_name in filepath_or_buffer:\n partition_id = cls.deploy(\n cls.parse,\n 3,\n dict(\n fname=file_name,\n **kwargs,\n ),\n )\n partition_ids.append(partition_id[:-2])\n lengths_ids.append(partition_id[-2])\n widths_ids.append(partition_id[-1])\n\n lengths = cls.materialize(lengths_ids)\n widths = cls.materialize(widths_ids)\n\n # while num_splits is 1, need only one value\n partition_ids = cls.build_partition(partition_ids, lengths, [widths[0]])\n\n new_index = cls.frame_cls._partition_mgr_cls.get_indices(\n 0, partition_ids, lambda df: df.axes[0]\n )\n new_columns = cls.frame_cls._partition_mgr_cls.get_indices(\n 1, partition_ids, lambda df: df.axes[1]\n )\n\n return cls.query_compiler_cls(\n cls.frame_cls(partition_ids, new_index, new_columns)\n )", "title": "" }, { "docid": "12c7cb14acea6d581e6613eb84b903b2", "score": "0.59282154", "text": "def generate_data(file_list, labels, batch_size):\n\ti = 0\n\tfile_list = os.listdir(directory)\n\twhile True:\n\t\timage_batch = []\n\t\tfor b in range(batch_size):\n\t\t\tif i == len(file_list):\n\t\t\t\ti = 0\n\n\t\t\t\tcombined = list(zip(file_list, labels))\n\t\t\t\trandom.shuffle(file_list)\n\t\t\tsample = file_list[i]\n\t\t\ti += 1\n\t\t\timage = cv2.resize(cv2.imread(sample[0]), INPUT_SHAPE)\n\t\t\timage_batch.append((image.astype(float) - 128) / 128)\n\n\t\tyield np.array(image_batch)", "title": "" }, { "docid": "2e746881a32b27ccb538eaa05ce85baf", "score": "0.5927274", "text": "def _read_batches(self, train_fns, test_fn):\n with open(test_fn, 'rb') as f:\n test_batch = pickle.load(f, encoding='bytes')\n\n # Note, that we ignore the two keys: \"batch_label\" and \"filenames\".\n test_labels = np.array(test_batch['labels'.encode()])\n test_samples = test_batch['data'.encode()]\n \n # Read training batches.\n for i, fn in enumerate(train_fns):\n with open(fn, 'rb') as f:\n curr_batch = pickle.load(f, encoding='bytes')\n\n curr_labels = np.array(curr_batch['labels'.encode()])\n curr_samples = curr_batch['data'.encode()]\n\n if i == 0:\n train_labels = curr_labels\n train_samples = curr_samples\n else:\n train_labels = np.concatenate((train_labels, curr_labels))\n train_samples = np.concatenate((train_samples, curr_samples),\n axis=0)\n\n train_inds = np.arange(train_labels.size)\n test_inds = np.arange(train_labels.size, \n train_labels.size + test_labels.size)\n\n labels = np.concatenate([train_labels, test_labels])\n labels = np.reshape(labels, (-1, 1))\n\n images = np.concatenate([train_samples, test_samples], axis=0)\n\n # Note, images are currently encoded in a way, that there shape\n # corresponds to (3, 32, 32). For consistency reasons, we would like to\n # change that to (32, 32, 3).\n images = np.reshape(images, (-1, 3, 32, 32))\n images = np.rollaxis(images, 1, 4)\n images = np.reshape(images, (-1, 32 * 32 * 3))\n # Scale images into a range between 0 and 1.\n images = images / 255\n\n self._data['in_data'] = images\n self._data['train_inds'] = train_inds\n self._data['test_inds'] = test_inds\n\n if self._data['is_one_hot']:\n labels = self._to_one_hot(labels)\n\n self._data['out_data'] = labels", "title": "" }, { "docid": "49a0bc79b62b99a3b19ed2368ac469fb", "score": "0.59251684", "text": "def prepare_reader(self, unused_filename_queue):\n raise NotImplementedError()", "title": "" }, { "docid": "6cfde292013730d79000e7717df91ee2", "score": "0.5916643", "text": "def create_reader(path, is_training, input_dim, num_label_classes): \n return MinibatchSource(CTFDeserializer(path, StreamDefs(\n labels = StreamDef(field='labels', shape=num_label_classes),\n features = StreamDef(field='features', shape=input_dim)\n )), randomize = is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)", "title": "" }, { "docid": "322a9d66a3d2684d798100801e0ecac0", "score": "0.5894253", "text": "def read_list(self):\n folder = os.path.join(args.txt_root, 'recognition_{}_txt'.format(args.split), '*.txt')\n txt_lst = glob.glob(folder)\n\n if args.is_shuffle:\n random.shuffle(txt_lst)\n\n for txt in txt_lst:\n print('parsing txt file : {}'.format(txt))\n with open(txt, 'r') as f:\n for line in f.readlines():\n try:\n # do something, extract info from a txt file and pack to item.\n item = master_label_operation(line)\n except Exception as e:\n print(\"Parsing txt file met error for %s, detail: %s\" % (txt, e))\n continue\n yield item", "title": "" }, { "docid": "00205120d1ef1b9f0f6c7f8c0152fd4b", "score": "0.5875306", "text": "def read_lines(files):\n for file in files:\n yield np.load(file, allow_pickle=True)", "title": "" }, { "docid": "b607062f35cfa37f95d6dfa9e4af0c38", "score": "0.58415675", "text": "def file_reader_helper(rspecifier: str, filetype: str, train=True,\n return_shape: bool = False, return_dict: bool = False,\n transform: Transformation = None):\n if filetype == \"mat\":\n return KaldiReader(rspecifier, return_shape=return_shape, return_dict=return_dict,\n transform=transform, train=train, num_workers=0)\n elif filetype == \"hdf5\":\n return HDF5Reader(rspecifier, return_shape=return_shape, return_dict=return_dict,\n transform=transform, train=train, num_workers=0)\n elif filetype == \"sound\":\n return SoundReader(rspecifier, return_shape=return_shape, return_dict=return_dict,\n transform=transform, train=train, num_workers=0)\n else:\n raise NotImplementedError(f\"filetype={filetype}\")", "title": "" }, { "docid": "2ca80040c990cf9b4f2ee7f7485822dd", "score": "0.58358604", "text": "def open_file(filename, batch_size, **kw_args):\n return TableReader(filename, batch_size, **kw_args)", "title": "" }, { "docid": "3b2552b131f7d8a8b73a66e3630df0bf", "score": "0.58069384", "text": "def __init__(self, size, file_path, n_files):\n self.file_path = file_path\n self.n_files = n_files\n self.totalSize = size\n self.eachSize = size // n_files\n self.bufferList = []\n for i in range(n_files):\n self.bufferList.append(slBuffer_oneFile(self.eachSize, i))\n self.sample_round = -1 # this is the round-robin index for sample from the list of slBuffer_oneFile\n self.sample_list = np.zeros(self.n_files, dtype=np.bool)", "title": "" }, { "docid": "8ea849c20c3e228eccba697c5a677215", "score": "0.5805716", "text": "def initialise_reader(self, data_param, task_param):\n if not self.names:\n tf.logging.fatal('Please specify data names, this should '\n 'be a subset of SUPPORTED_INPUT provided '\n 'in application file')\n raise ValueError\n self._names = [name for name in self.names\n if vars(task_param).get(name, None)]\n\n self._input_sources = {name: vars(task_param).get(name)\n for name in self.names}\n data_to_load = {}\n for name in self._names:\n for source in self._input_sources[name]:\n try:\n data_to_load[source] = data_param[source]\n except KeyError:\n tf.logging.fatal(\n 'reader name [%s] requires [%s], however it is not '\n 'specified as a section in the config, '\n 'current input section names: %s',\n name, source, list(data_param))\n raise ValueError\n\n self._file_list = util_csv.load_and_merge_csv_files(data_to_load)\n self.output_list = _filename_to_image_list(\n self._file_list, self._input_sources, data_param)\n for name in self.names:\n tf.logging.info(\n 'image reader: loading [%s] from %s (%d)',\n name, self.input_sources[name], len(self.output_list))", "title": "" }, { "docid": "7eb99284948d037508a06e73f352c285", "score": "0.5796099", "text": "def read_batch(self):\n batch = []\n while True:\n for value in self.inputDataset.open('r'):\n batch.append(value)\n if len(batch) == self.inputBatchSize:\n yield self.process_training_batch(batch=batch)\n batch = []", "title": "" }, { "docid": "0ac8883232b6731b63310e0ff371556c", "score": "0.5792506", "text": "def generate_batch_from_files(self):\n\t import nibabel as nib\n\n\t np.random.seed(self.seed) # Set a random seed\n\n\t idx = 0\n\t idy = 0\n\n\t while True:\n\n\t \"\"\"\n\t Pack N_IMAGES files at a time to queue\n\t \"\"\"\n\t NUM_QUEUED_IMAGES = 1 + self.batch_size // self.num_slices_per_scan # Get enough for full batch + 1\n\t \n\t for idz in range(NUM_QUEUED_IMAGES):\n\n\t label_filename = self.filenames[idx][1]\n\t img_filename = self.filenames[idx][0]\n\t img = np.array(nib.load(img_filename).dataobj)\n\t img = img[:,:,:] \n\t img = self.preprocess_img(img)\n\n\t label = np.array(nib.load(label_filename).dataobj)\n\t \n\n\t if idz == 0:\n\t img_stack = img\n\t label_stack = label\n\n\t else:\n\n\t img_stack = np.concatenate((img_stack,img), axis=self.slice_dim)\n\t label_stack = np.concatenate((label_stack,label), axis=self.slice_dim)\n\t \n\t idx += 1 \n\t if idx >= len(self.filenames):\n\t idx = 0\n\t np.random.shuffle(self.filenames) # Shuffle the filenames for the next iteration\n\t \n\t img = img_stack\n\t label = label_stack\n\n\t num_slices = img.shape[self.slice_dim]\n\t \n\t if self.batch_size > num_slices:\n\t raise Exception(\"Batch size {} is greater than\"\n\t \" the number of slices in the image {}.\"\n\t \" Data loader cannot be used.\".format(self.batch_size, num_slices))\n\n\t \"\"\"\n\t We can also randomize the slices so that no 2 runs will return the same slice order\n\t for a given file. This also helps get slices at the end that would be skipped\n\t if the number of slices is not the same as the batch order.\n\t \"\"\"\n\t if self.augment:\n\t slice_idx = np.random.choice(range(num_slices), num_slices)\n\t img = img[:,:,slice_idx] # Randomize the slices\n\t label = label[:,:,slice_idx]\n\n\t name = self.filenames[idx]\n\t \n\t if (idy + self.batch_size) < num_slices: # We have enough slices for batch\n\t img_batch, label_batch = img[:,:,idy:idy+self.batch_size], label[:,:,idy:idy+self.batch_size] \n\n\t else: # We need to pad the batch with slices\n\n\t img_batch, label_batch = img[:,:,-self.batch_size:], label[:,:,-self.batch_size:] # Get remaining slices\n\n\t if self.augment:\n\t img_batch, label_batch = self.augment_data(img_batch, label_batch)\n\t \n\t if len(np.shape(img_batch)) == 3:\n\t img_batch = np.expand_dims(img_batch, axis=-1)\n\t if len(np.shape(label_batch)) == 3:\n\t label_batch = np.expand_dims(label_batch, axis=-1)\n\t \n\t yield np.transpose(img_batch, [2,0,1,3]).astype(np.float32), np.transpose(label_batch, [2,0,1,3]).astype(np.float32)\n\n\n\t idy += self.batch_size\n\t if idy >= num_slices: # We finished this file, move to the next\n\t idy = 0\n\t idx += 1\n\n\t if idx >= len(self.filenames):\n\t idx = 0\n\t np.random.shuffle(self.filenames) # Shuffle the filenames for the next iteration", "title": "" }, { "docid": "0944566892838a7db6791ca3205e41da", "score": "0.5787248", "text": "def __init__(self, file_pattern, dataset_type=DataSet):\n self.filelist = glob(file_pattern)\n self.filelist.sort()\n self.dataset = dataset_type(self.filelist)\n self.high = len(self.filelist)\n self.preload_range[1] = len(self.filelist) - 1\n src = self.dataset[0]\n if max(src.shape) > 200:\n step = 2\n else:\n step = 1\n self.xslice, self.yslice, self.zslice = (0, src.shape[0], step), \\\n (0, src.shape[1], step), (0, src.shape[2], step)\n self.dataset.slices = (slice(*self.xslice),\n slice(*self.yslice),\n slice(*self.zslice))\n self.sync_trait('time', self.dataset)", "title": "" }, { "docid": "48627fc9f6cdec06fe496b242890b084", "score": "0.57825255", "text": "def mutli_file_generator(self, files):\n for f in files:\n for data in self.record_generator(f):\n yield data", "title": "" }, { "docid": "865a359d311e4162266ac2e55a11eaf6", "score": "0.57821125", "text": "def _input_fn():\n filenames = tf.data.Dataset.list_files(file_pattern)\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(\n _parse_example,\n num_parallel_calls=multiprocessing.cpu_count())\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=10)\n return dataset", "title": "" }, { "docid": "ea2fb9fac8841269da91aedb0c1e216b", "score": "0.5769808", "text": "def _read_data(file_read_func, file_pattern, shuffle, num_readers,\n filenames_shuffle_buffer_size, num_epochs, read_block_length,\n shuffle_buffer_size):\n # Shard, shuffle, and read files.\n dataset = tf.data.Dataset.list_files(\n file_pattern=file_pattern, shuffle=shuffle)\n if shuffle:\n dataset = dataset.shuffle(filenames_shuffle_buffer_size)\n elif num_readers > 1:\n logging.warning('`shuffle` is false, but the input data stream is '\n 'still slightly shuffled since `num_readers` > 1.')\n dataset = dataset.repeat(num_epochs or None)\n\n records_dataset = dataset.interleave(\n map_func=file_read_func,\n cycle_length=num_readers,\n block_length=read_block_length,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n deterministic=shuffle)\n\n if shuffle:\n records_dataset = records_dataset.shuffle(shuffle_buffer_size)\n return records_dataset", "title": "" }, { "docid": "a56b4e287ab27bde290fad1a6b28b7b0", "score": "0.5767945", "text": "def __init__(self, dir_path, maxlen, vocab, tokenize_text, to_lower):\n \n # Read dataset arguments\n self.dir_path = dir_path\n self.maxlen = maxlen\n self.vocab = vocab\n self.tokenize_text = tokenize_text\n self.to_lower = to_lower\n self.file_list_collection = []\n self.overallMaxlen = 0\n\n # Leaving 2 free threads for other purposes\n self.num_cpu = multiprocessing.cpu_count() - 2\n if self.num_cpu <= 0:\n self.num_cpu = 1\n\n file_list_full = []\n # Reading data in the specified folder\n dir_path_curr = glob.glob(dir_path)\n # Traverse every file in the directory\n for file_path in dir_path_curr:\n file_list_full.append(file_path) # Keep track of the filename\n \n batch_size = len(file_list_full) // (self.num_cpu)\n if (len(file_list_full) % self.num_cpu > 0): batch_size += 1\n \n self.file_list_collection = [file_list_full[i:i+batch_size] for i in range(0, len(file_list_full), batch_size)]", "title": "" }, { "docid": "afc32b2d1bc77ebe9160be6afe2a8a77", "score": "0.5761625", "text": "def split_input(cls, mapper_spec):\n params = _get_params(mapper_spec)\n file_paths = params[cls.FILE_PATHS_PARAM]\n\n if isinstance(file_paths, basestring):\n # This is a mechanism to allow multiple file paths (which do not contain\n # commas) in a single string. It may go away.\n file_paths = file_paths.split(\",\")\n\n file_sizes = {}\n\n for file_path in file_paths:\n fp = files.BufferedFile(file_path)\n fp.seek(0, 2)\n file_sizes[file_path] = fp.tell()\n\n shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)\n shards_per_file = shard_count // len(file_paths)\n\n if shards_per_file == 0:\n shards_per_file = 1\n\n chunks = []\n\n for file_path, file_size in file_sizes.items():\n file_chunk_size = file_size // shards_per_file\n for i in xrange(shards_per_file - 1):\n chunks.append(GoogleStorageLineInputReader.from_json(\n {cls.FILE_PATH_PARAM: file_path,\n cls.INITIAL_POSITION_PARAM: file_chunk_size * i,\n cls.END_POSITION_PARAM: file_chunk_size * (i + 1)}))\n chunks.append(GoogleStorageLineInputReader.from_json(\n {cls.FILE_PATH_PARAM: file_path,\n cls.INITIAL_POSITION_PARAM: file_chunk_size * (shards_per_file - 1),\n cls.END_POSITION_PARAM: file_size}))\n\n return chunks", "title": "" }, { "docid": "89d249c8eea9c0674eb41f201004f0d3", "score": "0.5759515", "text": "def files(file_list):\n for filename in file_list:\n f = open(filename)\n for line in f:\n yield line", "title": "" }, { "docid": "574d3c2e24181d1c367e7b925083104f", "score": "0.57542187", "text": "def get_input_batches(file_list, batch_size):\n filename_queue = tf.train.string_input_producer(file_list, shuffle=True, num_epochs=None)\n reader = tf.TextLineReader()\n _, line = reader.read(filename_queue)\n\n min_after_dequeue = 1000\n capacity = min_after_dequeue + 3 * batch_size\n return tf.train.shuffle_batch(\n [line],\n batch_size=batch_size,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue)", "title": "" }, { "docid": "890ef672362f1c5faadfdefad809cff2", "score": "0.57462305", "text": "def file_reader(path, format_name='all', filter_name='all', block_size=4096):\n with new_archive_read(format_name, filter_name) as archive_p:\n try:\n block_size = stat(path).st_blksize\n except (OSError, AttributeError): # pragma: no cover\n pass\n ffi.read_open_filename_w(archive_p, path, block_size)\n yield ArchiveRead(archive_p)", "title": "" }, { "docid": "b9de91fa8dfb5734132fb76ebec4e071", "score": "0.5745391", "text": "def __init__(self, filename, batch_size, **kw_args):\n self.streamer = mtb.Streamer(filename, **kw_args)\n self.vars = []\n self.batch_size = batch_size\n self.queues = []", "title": "" }, { "docid": "789a9231387ff3e8fc0285f584ec4605", "score": "0.5742987", "text": "def _input_fn():\r\n input_files = sorted(list(tf.gfile.Glob(input_dir)))\r\n logging.info(\"Reading files from %s\", input_dir)\r\n include_target_column = (mode != tf.contrib.learn.ModeKeys.INFER)\r\n\r\n reader_fn = tf.TFRecordReader(\r\n options=tf.python_io.TFRecordOptions(\r\n compression_type=TFRecordCompressionType.GZIP))\r\n\r\n features = tf.contrib.learn.io.read_batch_features(\r\n file_pattern=input_dir,\r\n batch_size=batch_size,\r\n queue_capacity=3 * batch_size,\r\n randomize_input=mode == tf.contrib.learn.ModeKeys.TRAIN,\r\n feature_queue_capacity=5,\r\n reader=reader_fn,\r\n features=self.featdef())\r\n target = None\r\n if include_target_column:\r\n target = features.pop('label')\r\n return features, target", "title": "" }, { "docid": "5758681154b1933cfb5a1b235ad6c7d5", "score": "0.5719632", "text": "def __init__(self, sample_directory: str, label_directory: str, batchsize: int, sample_suffix: str = '',\n label_suffix: str = '', verbose: bool = True):\n \n #\n # Search for and store the filenames\n #\n self.log(\"Collecting sample data...\")\n samples = load_files_in_dir(sample_directory, sample_suffix)\n self.log(\"Found {} sample files\".format(len(samples)))\n \n self.log(\"Collecting label data...\")\n labels = load_files_in_dir(label_directory, label_suffix)\n self.log(\"Found {} label files\".format(len(labels)))\n \n #\n # Calculate number of minibatches\n #\n n_mbs = np.int(np.ceil(len(labels) / batchsize))\n \n #\n # Determine shape of samples and labels\n #\n # TODO: Since this is a template, the shape determination has to modified to fit the specific task\n X_shape = (batchsize, 5)\n y_shape = (batchsize, 1)\n \n #\n # Set attributes of reader (these are required for the class to work properly)\n #\n self.verbose = verbose\n self.processes = list()\n self.samples = samples\n self.n_samples = len(samples)\n self.labels = labels\n self.n_labels = len(labels)\n self.batchsize = batchsize\n self.n_mbs = n_mbs\n self.X_shape = X_shape\n self.y_shape = y_shape", "title": "" }, { "docid": "430b44dfba3d1ea025d4d1fd66388440", "score": "0.57170373", "text": "def readFromFile(self):\n self.__data = []\n f = open(self.__filename, 'r')\n for i in range(625):\n line = f.readline()\n tok = line.split(',')\n clas = tok[0]\n lw = int(tok[1])\n ld = int(tok[2])\n rw = int(tok[3])\n rd = int(tok[4])\n self.__data.append([clas, lw, ld, rw, rd])", "title": "" }, { "docid": "38f5c1011d872094b31bfc1762b5efe6", "score": "0.57148725", "text": "def prepare_reader(self,\n filename_queue,\n max_quantized_value=2,\n min_quantized_value=-2):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n return self.prepare_serialized_examples(serialized_example,\n max_quantized_value, min_quantized_value)", "title": "" }, { "docid": "4c9638f03bd1c3b48cff659d993e10dc", "score": "0.57091033", "text": "def load_pickle_files(input_files: List[str]) -> Iterator[Any]:\n for input_file in input_files:\n yield load_pickle_file(input_file)", "title": "" }, { "docid": "4ea77bc915d88711097a51b1d3ad221e", "score": "0.57086354", "text": "def example_process_in_batches_generator():\n\n files_lst = DataLoading.get_files_list('../../../data/*.json.bz2')\n generator = Batches.process_in_batches_generator(files_lst, read_func=DataLoading.read_compressed_bz2_json_file,\n func_to_apply=counter)\n results = list(generator)\n return results", "title": "" }, { "docid": "eb81c074316665369d2b21029611fef6", "score": "0.5699581", "text": "def open_for_read_iter(fnames: Iterable[str], gcp_prj: Optional[str] = None) -> Generator[TextIO, None, None]:\n for fname in fnames:\n with open_for_read(fname, gcp_prj) as f:\n yield f", "title": "" }, { "docid": "c9326f5b9b558dfe0789ef38546d17dd", "score": "0.5686911", "text": "def read_files(fns, patt=None, delim=None, comment=COMMENT_CHAR,\n kind='delimited', reverse=False, head=None, header_patt=None,\n filters=None, by_col_no=False, columns=(), linenos=None,\n add_filename=None, raw=False, width=None, clean_output=False,\n full_filenames=False, headerless=False, anypatt=False,\n add_colnos=False, add_linenos=False, noheader=False,\n merge_headers=False, null_value=DEFAULT_NULL_VALUE):\n if merge_headers:\n yield do_merge_headers(\n read_files(\n fns,\n patt=patt,\n delim=delim,\n comment=comment,\n kind=kind,\n reverse=reverse,\n head=head,\n header_patt=header_patt,\n filters=filters,\n by_col_no=by_col_no,\n columns=columns,\n linenos=linenos,\n add_filename=add_filename,\n raw=raw,\n width=width,\n clean_output=clean_output,\n full_filenames=full_filenames,\n headerless=headerless,\n anypatt=anypatt,\n add_colnos=add_colnos,\n add_linenos=add_linenos,\n noheader=noheader),\n null_value=null_value)\n return\n\n table = None\n prev_header = None\n shortnames = None\n for filename in fns:\n filetable = _read_input(filename, patt=patt, delim=delim,\n comment=comment, kind=kind,\n reverse=reverse, head=head,\n header_patt=header_patt, linenos=linenos,\n filters=filters, by_col_no=by_col_no,\n columns=columns, raw=raw, width=width,\n clean_output=clean_output, anypatt=anypatt,\n add_colnos=add_colnos, add_linenos=add_linenos)\n\n try:\n # Save the header from before we add filenames\n header = next(filetable)\n except StopIteration:\n continue\n\n # If there are multiple files, add a column of filenames\n # to the table.\n if (add_filename is None and len(fns) > 1) or add_filename:\n fn_to_add = filename\n if not full_filenames:\n if shortnames is None:\n shortnames = dict(zip(fns, shorten_filenames(fns)))\n fn_to_add = shortnames[filename]\n if headerless:\n header = [fn_to_add] + header\n else:\n header = ['FILE'] + header\n def _newfiletable(filetable, filename):\n '''\n Add the filename as the first column to each row\n '''\n for line in filetable:\n yield [filename] + line\n filetable = _newfiletable(filetable, fn_to_add)\n\n if table is None:\n if noheader:\n table = filetable\n else:\n table = itertools.chain([header], filetable)\n elif prev_header == header:\n # the header matches the previous file, so just stick\n # the data onto the existing table.\n table = itertools.chain(table, filetable)\n else:\n yield(table)\n if noheader:\n table = filetable\n else:\n table = itertools.chain([header], filetable)\n prev_header = header\n\n if table is not None:\n yield(table)", "title": "" }, { "docid": "412dabf8d13c0852fe971447217b663b", "score": "0.56868976", "text": "def example_process_in_batches():\n files_lst = DataLoading.get_files_list('../../../data/*.json.bz2')\n results = Batches.process_in_batches(files_lst, read_func=DataLoading.read_compressed_bz2_json_file,\n func_to_apply=counter, verbose=False)\n return results", "title": "" }, { "docid": "d00133951f0439b435f9d5ba514e236b", "score": "0.56820256", "text": "def threaded_reader(items_to_read, reader, max_threads=4):\r\n thread_pool = []\r\n\r\n def thread_process():\r\n \"\"\"\r\n The process inside the threads.\r\n\r\n 1) Get any files off the file queue\r\n 2) Read the file in chunks\r\n 3) Put a chunk onto a reply queue\r\n \"\"\"\r\n try:\r\n source = source_queue.pop(0)\r\n except IndexError:\r\n source = None\r\n while source:\r\n source_reader = reader.read_from_source(source)\r\n for chunk in dictset.page_dictset(source_reader, 256):\r\n reply_queue.put(chunk) # this will wait until there's a slot\r\n try:\r\n source = source_queue.pop(0)\r\n except IndexError:\r\n source = None\r\n\r\n\r\n source_queue = items_to_read.copy()\r\n\r\n # scale the number of threads, if we have more than the number of files\r\n # we're reading, will have threads that never complete\r\n t = min(len(source_queue), max_threads, 8)\r\n reply_queue = queue.Queue(t * 8)\r\n\r\n # start the threads\r\n for _ in range(t):\r\n thread = threading.Thread(target=thread_process)\r\n thread.daemon = True\r\n thread.start()\r\n thread_pool.append(thread)\r\n time.sleep(0.01) # offset the start of the threads\r\n\r\n # when the threads are all complete and all the records have been read from\r\n # the reply queue, we're done\r\n while any([t.is_alive() for t in thread_pool]) or not(reply_queue.empty()):\r\n try:\r\n # don't wait forever\r\n records = reply_queue.get(timeout=10) \r\n yield from records\r\n except queue.Empty:\r\n pass # most likely reason get being here is a race condition", "title": "" }, { "docid": "0119a3bf15e06358f2b9bfcb8f9fe1fb", "score": "0.56811017", "text": "def ReadFiles():\n\n with open(sys.argv[1]) as rubric:\n # with open(\"Samples/SampleRubric.csv\") as rubric:\n readin = rubric.readlines()\n global rubricData\n for line in readin:\n rubricData.append(line.strip(\"\\n\").split(\"|\"))\n\n with open(sys.argv[2]) as groupList:\n # with open(\"Samples/SampleInput.csv\") as groupList:\n readin = groupList.readlines()\n global groupInputData\n for line in readin:\n groupInputData.append(line.strip(\"\\n\").split(\",\"))", "title": "" }, { "docid": "f6c9a54bf322a9c7ccefaaf444a030eb", "score": "0.567539", "text": "def generate_batch_from_files(self):\n\t import nibabel as nib\n\n\t np.random.seed(self.seed) # Set a random seed\n\n\t idx = 0\n\t idy = 0\n\n\t while True:\n\n\t \"\"\"\n\t Pack N_IMAGES files at a time to queue\n\t \"\"\"\n\t NUM_QUEUED_IMAGES = 1 + self.batch_size // self.num_slices_per_scan # Get enough for full batch + 1\n\t \n\t for idz in range(NUM_QUEUED_IMAGES):\n\n\t # label_filename = self.filenames[idx][1]\n\t img_filename = self.filenames\n\t img = np.array(nib.load(img_filename).dataobj)\n\t if img.shape[1] == 256 and img.shape[2] == 256:\n\t \timg = np.moveaxis(img,0,2)\n\t # img = np.rollaxis(img,2,0)#img.tranpose(1,2,0)\n\t elif img.shape[0] == 256 and img.shape[2] == 256:\n\t \timg = np.moveaxis(img,1,2)\n\t # img = np.rollaxis(img,2,1)#img.tranpose(0,2,1)\n\t img = img[:,:,:] \n\t img = self.normalize(img)\n\n\t # label = np.array(nib.load(label_filename).dataobj)\n\t \n\n\t if idz == 0:\n\t img_stack = img\n\t # label_stack = label\n\n\t else:\n\n\t img_stack = np.concatenate((img_stack,img), axis=self.slice_dim)\n\t # label_stack = np.concatenate((label_stack,label), axis=self.slice_dim)\n\t \n\t idx += 1 \n\t if idx >= len(self.filenames):\n\t idx = 0\n\t np.random.shuffle(self.filenames) # Shuffle the filenames for the next iteration\n\t \n\t img = img_stack\n\t # label = label_stack\n\n\t num_slices = img.shape[self.slice_dim]\n\t \n\t if self.batch_size > num_slices:\n\t raise Exception(\"Batch size {} is greater than\"\n\t \" the number of slices in the image {}.\"\n\t \" Data loader cannot be used.\".format(self.batch_size, num_slices))\n\n\t \"\"\"\n\t We can also randomize the slices so that no 2 runs will return the same slice order\n\t for a given file. This also helps get slices at the end that would be skipped\n\t if the number of slices is not the same as the batch order.\n\t \"\"\"\n\t if self.augment:\n\t slice_idx = np.random.choice(range(num_slices), num_slices)\n\t img = img[:,:,slice_idx] # Randomize the slices\n\t # label = label[:,:,slice_idx]\n\n\t name = self.filenames[idx]\n\t \n\t if (idy + self.batch_size) < num_slices: # We have enough slices for batch\n\t img_batch = img[:,:,idy:idy+self.batch_size] \n\n\t else: # We need to pad the batch with slices\n\n\t img_batch = img[:,:,-self.batch_size:] # Get remaining slices\n\n\t \n\t if len(np.shape(img_batch)) == 3:\n\t img_batch = np.expand_dims(img_batch, axis=-1)\n\t # if len(np.shape(label_batch)) == 3:\n\t # label_batch = np.expand_dims(label_batch, axis=-1)\n\t \n\t yield np.transpose(img_batch, [2,0,1,3]).astype(np.float32)\n\n\t idy += self.batch_size\n\t if idy >= num_slices: # We finished this file, move to the next\n\t idy = 0\n\t idx += 1\n\n\t if idx >= len(self.filenames):\n\t idx = 0\n\t np.random.shuffle(self.filenames) # Shuffle the filenames for the next iteration", "title": "" }, { "docid": "43e58e70477e2944bca1a5739ca010fc", "score": "0.567085", "text": "def batch_iterator(iterator, batch_size) :\n entry = True #Make sure we loop once\n while entry :\n batch = []\n while len(batch) < batch_size :\n try :\n entry = iterator.next()\n except StopIteration :\n entry = None\n if entry is None :\n #End of file\n break\n batch.append(entry)\n if batch :\n yield batch", "title": "" }, { "docid": "73cb8ebe58f7c17a9862361041a3b6da", "score": "0.5665361", "text": "def __init__(self, reader, batch_size=1,\n transform_fn=None,\n num_epochs=1,\n seed=0,\n rows_capacity=1024,\n shuffle=False):\n super(InMemBatchedDataLoader, self).__init__()\n self._batch_size = batch_size\n self._num_epochs = num_epochs\n self._seed = seed\n self._shuffle = shuffle\n self._in_iter = False\n # keys is a dict_keys storing column names and buffer is a list storing corresponding rows/tensors.\n self._keys, self._buffer = _load_rows_into_mem(reader, transform_fn or torch.as_tensor, rows_capacity)", "title": "" }, { "docid": "748490289efa64a8dee2cf77bfc85d18", "score": "0.5656988", "text": "def main():\n files = glob.glob('raw/*.csv')\t\t\t\t\t\t\t#look for a list of csv file for given path\n for file in files:\t\t\t\t\t\t\n read_csv(file)\t\t\t\t\t\t\t\t\t\t\t#read each csv file one by one", "title": "" }, { "docid": "ea2fc765eb77943de048b744fa883691", "score": "0.56418943", "text": "def make_batch(self, \n batch_size=None, \n filenames=None,\n initializable=True,\n repeat=None,\n return_iterator=True):\n #with tf.device('/cpu:0'):\n batch_size = batch_size or FLAGS.batch_size\n filenames = filenames or self.get_filenames()\n logging.info(self.subset, 'num files', len(filenames))\n assert filenames, self.subset\n min_queue_examples = 20000\n if repeat is None:\n if tf.executing_eagerly():\n repeat = False \n else:\n if self.subset == 'train' or melt.num_gpus() > 1:\n repeat = True\n else:\n repeat = False\n\n if self.subset == 'train':\n shuffle_files=True \n fix_sequence = False\n else:\n shuffle_files = False\n fix_sequence = True\n\n balance_pos_neg=False\n if self.pos_filter_fn and self.neg_filter_fn:\n balance_pos_neg = True\n\n # for bow using cpu 69 insts/s using gpu 54 inst/s\n with tf.device('/cpu:0'):\n return melt.dataset_decode.inputs(\n filenames, \n decode_fn=self.parser,\n batch_size=batch_size,\n num_threads=FLAGS.num_threads,\n shuffle_files=shuffle_files,\n fix_sequence=fix_sequence,\n buffer_size=min_queue_examples + 3 * batch_size if not FLAGS.buffer_size else FLAGS.buffer_size,\n initializable=initializable,\n repeat=repeat,\n bucket_boundaries=FLAGS.buckets,\n bucket_batch_sizes=FLAGS.batch_sizes,\n length_index=FLAGS.length_index,\n length_key=FLAGS.length_key,\n seed=FLAGS.random_seed,\n return_iterator=return_iterator,\n filter_fn=self.filter_fn if self.subset == 'train' else None,\n balance_pos_neg=balance_pos_neg,\n pos_filter_fn=self.pos_filter_fn if self.subset == 'train' else None,\n neg_filter_fn=self.neg_filter_fn if self.subset == 'train' else None,\n count_fn=self.count_fn if self.subset == 'train' else None,\n name=self.subset)", "title": "" }, { "docid": "52f2602ec2a753fffa650799f1d17d64", "score": "0.5641356", "text": "def read_batches(dir, batch_size):\n filepaths = get_filepaths(dir)\n filename_queue = tf.train.string_input_producer(filepaths, num_epochs=EPOCHS)\n image, label = read_single_example(filename_queue)\n image_batch, label_batch = tf.train.shuffle_batch(\n [image, label], batch_size=batch_size, num_threads=4, capacity=1000 + 3 * batch_size, min_after_dequeue=1000)\n return image_batch, label_batch", "title": "" }, { "docid": "d2e3fcc3d00af9929a8f5534c6318dbe", "score": "0.5640628", "text": "def get_file_list(self):", "title": "" }, { "docid": "d67230ec34aa5a87cdd7f7bd0676ea0d", "score": "0.56261855", "text": "def create_reader_raw(path, is_training, input_dim, num_label_classes):\n return MinibatchSource(CTFDeserializer(path, StreamDefs(\n labels = StreamDef(field='rawlabels', shape=num_label_classes),\n features = StreamDef(field='rawfeatures', shape=input_dim)\n )), randomize = is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)", "title": "" }, { "docid": "19b7382cadb4df65819b73f346550ec5", "score": "0.5624713", "text": "def read_files_batched(filenames,\n file_batch_size=8192, \n file_batch_shuffle=False, \n max_batches=math.inf,\n return_mode='array', \n n_jobs=-1,\n max_batches_in_queue=1000,\n max_queue_wait_seconds=0.5,\n pd_kwargs={}):\n def listify_generator(func,*args,**kwargs):\n listified_generator = list(func(*args,**kwargs))\n return(listified_generator)\n \n if n_jobs == -1:\n n_jobs = cpu_count()-1\n n_jobs = min((n_jobs,len(filenames)))\n\n # Parallel\n if n_jobs > 1:\n\n # Batch queue, appended in callback\n batch_queue = deque(maxlen=max_batches_in_queue)\n def callback(batch):\n while True:\n if len(batch_queue) < max_batches_in_queue:\n batch_queue.append(batch)\n break\n else:\n time.sleep(0.1)\n \n # Create processes\n p = Pool(n_jobs)\n for filename in filenames:\n p.apply_async(listify_generator,\n (read_file_batched,filename),\n dict(file_batch_size=file_batch_size,\n file_batch_shuffle=file_batch_shuffle, \n max_batches=max_batches,\n return_mode=return_mode,\n pd_kwargs=pd_kwargs),\n callback=callback)\n \n # Yield from queue \n keep_trying = True\n last_non_empty_batch = None\n while keep_trying:\n if len(batch_queue) > 0:\n for batch in batch_queue.popleft():\n yield batch\n last_non_empty_batch = time.clock()\n \n if len(batch_queue) == 0:\n if last_non_empty_batch is not None:\n if time.clock()-last_non_empty_batch >= max_queue_wait_seconds:\n keep_trying = False \n p.close()\n p.join()\n \n # Single process\n else:\n for filename in filenames:\n for batch in read_file_batched(filename,\n file_batch_size=file_batch_size,\n file_batch_shuffle=file_batch_shuffle, \n max_batches=max_batches,\n return_mode=return_mode,\n pd_kwargs=pd_kwargs):\n yield batch", "title": "" }, { "docid": "6edabc1462bd40a254c5c9d4f9a7472b", "score": "0.56150126", "text": "def __iter__(self):\n for fname in self.filelist:\n for line in open(fname, 'r'):\n yield self.tokenize(line)", "title": "" }, { "docid": "7b728affb1dc2663bd2d7449625b57ae", "score": "0.561217", "text": "def make_batch(filenames, batch_size):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(single_example_parser, num_parallel_calls=1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size, drop_remainder=True)\n iterator = dataset.make_one_shot_iterator()\n\n image_batch, label_batch = iterator.get_next()\n return image_batch, label_batch", "title": "" }, { "docid": "63ba7c53f72c4b6c1be09b92835de5a1", "score": "0.5609657", "text": "def __init__(\r\n self,\r\n *, # force all paramters to be keyworded\r\n select: list = ['*'],\r\n from_path: str = None,\r\n where: Callable = None,\r\n inner_reader: BaseReader = GoogleCloudStorageReader, # type:ignore\r\n data_format: str = \"json\",\r\n **kwargs):\r\n # rather than deprecation warning, we'll give the user a reminder to\r\n # fix their spelling\r\n if kwargs.get('extension') is not None:\r\n get_logger().warning('Reader parameter \"extention\" should be \"extension\"')\r\n\r\n if not isinstance(select, list):\r\n raise TypeError(\"Reader 'select' parameter must be a list\")\r\n if where is not None and not hasattr(where, '__call__'):\r\n raise TypeError(\"Reader 'where' parameter must be Callable or None\")\r\n\r\n # load the line converter\r\n self.parser = PARSERS.get(data_format.lower())\r\n if self.parser is None:\r\n raise TypeError(F\"Data format unsupported: {data_format}.\")\r\n\r\n # instantiate the injected reader class\r\n self.reader_class = inner_reader(from_path=from_path, **kwargs) # type:ignore\r\n\r\n self.select = select.copy()\r\n self.where: Optional[Callable] = where\r\n\r\n # initialize the reader\r\n self._inner_line_reader = None\r\n\r\n args_passed_in_function = [\r\n F\"select={select}\",\r\n F\"from_path='{from_path}'\",\r\n F\"where={where.__name__ if not where is None else 'Select All'}\",\r\n F\"inner_reader={inner_reader.__name__}\", # type:ignore\r\n F\"data_format='{data_format}'\"]\r\n kwargs_passed_in_function = [f\"{k}={v!r}\" for k, v in kwargs.items()]\r\n formatted_arguments = \", \".join(args_passed_in_function + kwargs_passed_in_function)\r\n\r\n # threaded reader\r\n self.thread_count = int(kwargs.get('thread_count', 0))\r\n\r\n get_logger().debug(f\"Reader({formatted_arguments})\")\r\n\r\n \"\"\" FEATURES IN DEVELOPMENT \"\"\"\r\n\r\n # number of days to walk backwards to find records\r\n self.step_back_days = int(kwargs.get('step_back_days', 0))\r\n if self.step_back_days > 0:\r\n get_logger().warning(\"STEP BACK DAYS IS IN DEVELOPMENT\")\r\n\r\n # multiprocessed reader\r\n self.fork_processes = bool(kwargs.get('fork_processes', False))\r\n if self.thread_count > 0 and self.fork_processes:\r\n raise InvalidCombinationError('Forking and Threading can not be used at the same time')\r\n if self.fork_processes:\r\n get_logger().warning(\"MULTI-PROCESS READER IS EXPERIMENTAL, IT IS LIKELY TO NOT RETURN ALL DATA\")", "title": "" }, { "docid": "8ad06a393106b59e021a2b8e20e49cf7", "score": "0.5609187", "text": "def build_reader(data_dir, batch_size):\n train_samples, valid_samples = choose_samples(data_dir)\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(train_samples), buf_size=102400),\n batch_size=batch_size)\n\n # testing data is not shuffled\n test_reader = paddle.batch(\n reader.data_reader(\n valid_samples, is_train=False),\n batch_size=batch_size)\n return train_reader, test_reader, len(train_samples)", "title": "" }, { "docid": "a96df8085f085542d53eff2146902496", "score": "0.5609057", "text": "def open(*args, **kwargs):\n # Dispatch using the first argument which is assumed to be a file buffer,\n # filename, or filename glob.\n reader = _dispatch(args[0])\n return reader(*args, **kwargs)", "title": "" }, { "docid": "f0f9b5daf5f1dbc2fd3fecd56fe97b49", "score": "0.5606039", "text": "def yield_images(basepath, filelist, gray=False):\n for file in filelist:\n yield load_image(basepath, file, gray)", "title": "" }, { "docid": "d9ebe8817eccc1d8a26a45f6babc637e", "score": "0.55867296", "text": "def batch_reader(img_names, index, read_dir, labels_df, batch_size=64):\n img_tensor = []\n ground_truth = []\n indexes = img_names[index:index+batch_size]\n for counter, index in enumerate(indexes):\n feature_name = index + \".npy\"\n feature_file_name = os.path.join( read_dir, feature_name )\n img_tensor.append(np.load(feature_file_name))\n ground_truth.append(labels_df[counter])\n\n return np.array(img_tensor), np.array(ground_truth)", "title": "" }, { "docid": "415df80266c17db3d7f95798b015b39a", "score": "0.5582074", "text": "def __run__(self, reads):\n for row in self.__reader__:\n reads(row)", "title": "" }, { "docid": "ffb1bf0fa1034f4e30e4941ae681aae5", "score": "0.5581805", "text": "def read_list(self):\n folder = os.path.join(args.txt_folder, '*.txt')\n txt_lst = glob.glob(folder)\n\n if args.is_shuffle:\n random.shuffle(txt_lst)\n\n for txt in txt_lst:\n try:\n # do something, extract info from a txt file and pack to item.\n item = structure_label_operation(txt)\n except Exception as e:\n print(\"Parsing txt file met error for %s, detail: %s\" % (txt, e))\n continue\n yield item", "title": "" }, { "docid": "66edde55640830e69363c56c8672dca6", "score": "0.5574546", "text": "def batch_import_findings(Findings=None):\n pass", "title": "" }, { "docid": "b3292700ef6d2731f85612cbe5ba3feb", "score": "0.55721354", "text": "def __init__(\n self, file_paths, single_file=False, start=0, chunk_size=None\n ):\n super().__init__()\n self.file_paths = file_paths\n self.single_file = single_file\n self.start = start\n self.chunk_size = chunk_size\n\n if self.single_file and not chunk_size:\n raise DDSException(\"Missing chunk_size argument...\")", "title": "" }, { "docid": "dd9c9de195c0904e33f5eef115db42ef", "score": "0.5571913", "text": "def load_batch_streamlines(\n self, streamline_ids_per_subj: List[Tuple[int, list]]):\n if self.context is None:\n raise ValueError(\"Context must be set prior to using the batch \"\n \"loader.\")\n\n # The batch's streamline ids will change throughout processing because\n # of data augmentation, so we need to do it subject by subject to\n # keep track of the streamline ids. These final ids will correspond to\n # the loaded, processed streamlines, not to the ids in the hdf5 file.\n final_s_ids_per_subj = defaultdict(slice)\n batch_streamlines = []\n for subj, s_ids in streamline_ids_per_subj:\n logger.debug(\n \" Data loader: Processing data preparation for \"\n \"subj {} (preparing {} streamlines)\".format(subj, len(s_ids)))\n\n # No cache for the sft data. Accessing it directly.\n # Note: If this is used through the dataloader, multiprocessing\n # is used. Each process will open a handle.\n subj_data = \\\n self.context_subset.subjs_data_list.get_subj_with_handle(subj)\n subj_sft_data = subj_data.sft_data_list[self.streamline_group_idx]\n\n # Get streamlines as sft\n logger.debug(\" Loading sampled streamlines...\")\n sft = subj_sft_data.as_sft(s_ids)\n sft = self._data_augmentation_sft(sft)\n\n # Remember the indices of this subject's (augmented) streamlines\n ids_start = len(batch_streamlines)\n ids_end = ids_start + len(sft)\n final_s_ids_per_subj[subj] = slice(ids_start, ids_end)\n\n # Add all (augmented) streamlines to the batch\n # What we want is the streamline coordinates, to eventually get\n # the underlying input(s). Sending to vox and to corner to\n # be able to use our trilinear interpolation\n sft.to_vox()\n sft.to_corner()\n batch_streamlines.extend(sft.streamlines)\n batch_streamlines = [torch.as_tensor(s) for s in batch_streamlines]\n\n return batch_streamlines, final_s_ids_per_subj", "title": "" }, { "docid": "5593fde89c0bb7cbaf46861d5ebd8cec", "score": "0.5567133", "text": "def feature_loader(paths, batch_size=64):\n while True:\n files = []\n for path in paths:\n files.extend(glob.glob(os.path.join(path, '*.npz')))\n np.random.shuffle(files)\n for npz in files:\n # Load pack into memory\n archive = np.load(npz)\n features = archive['features']\n categories = archive['categories']\n del archive\n _shuffle_in_unison(features, categories)\n # Split into mini batches\n num_batches = len(categories) // batch_size\n features = np.array_split(features, num_batches)\n categories = np.array_split(categories, num_batches)\n while categories:\n batch_features = features.pop()\n batch_categories = categories.pop()\n yield batch_features, batch_categories", "title": "" }, { "docid": "a0278651079a8e5b58e3c632cbd7dd40", "score": "0.5565957", "text": "def read_batch_features(file_pattern,\n batch_size,\n features,\n reader,\n reader_args=None,\n randomize_input=True,\n num_epochs=None,\n capacity=10000):\n if isinstance(file_pattern, str):\n filenames = _get_file_names(file_pattern, randomize_input)\n else:\n filenames = file_pattern\n if reader_args:\n dataset = reader(filenames, *reader_args)\n else:\n dataset = reader(filenames)\n dataset = dataset.repeat(num_epochs)\n if randomize_input:\n dataset = dataset.shuffle(capacity)\n dataset = dataset.map(\n lambda x: _parse_example(nest.flatten(x), features)\n )\n dataset = dataset.batch(batch_size)\n return dataset", "title": "" }, { "docid": "65b221366acec2cc4ef46a91323860f2", "score": "0.5560069", "text": "async def _load_data(\n api_factory: lusid.utilities.ApiClientFactory,\n single_requests: list,\n file_type: str,\n **kwargs,\n):\n\n # Dynamically call the correct async function to use based on the file type\n return await getattr(BatchLoader, f\"load_{file_type}_batch\")(\n api_factory,\n single_requests,\n # Any specific arguments e.g. 'code' for transactions, 'effective_at' for holdings is passed in via **kwargs\n **kwargs,\n )", "title": "" }, { "docid": "ab60fb9ea67887714f65eb0f0583b111", "score": "0.5559951", "text": "def _split_and_load(batch, ctx_list):\n new_batch = []\n for _, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "title": "" }, { "docid": "b6145d9c22559cb8dc8c409905fe08f3", "score": "0.55595285", "text": "def load_data(input_files: List[str],\n shard_size: Optional[int] = None) -> Iterator[Any]:\n if len(input_files) == 0:\n raise ValueError(\"The length of `filenames` must be more than 1.\")\n\n file_type = _get_file_type(input_files[0])\n if file_type == \"sdf\":\n if shard_size is not None:\n logger.info(\"Ignoring shard_size for sdf input.\")\n for value in load_sdf_files(input_files):\n yield value\n elif file_type == \"csv\":\n for value in load_csv_files(input_files, shard_size):\n yield value\n elif file_type == \"pickle\":\n if shard_size is not None:\n logger.info(\"Ignoring shard_size for pickle input.\")\n for value in load_pickle_files(input_files):\n yield value", "title": "" }, { "docid": "3d984523b1fdba57bc7727dc0e042560", "score": "0.5555195", "text": "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n # Repeat infinitely.\n #dataset = tf.contrib.data.TFRecordDataset(filenames).repeat()\n dataset = tf.data.TFRecordDataset(filenames).repeat()\n\n # Parse records.\n #dataset = dataset.map(\n # self.parser, num_threads=8, output_buffer_size=2 * batch_size)\n dataset = dataset.map(self.parser,\n num_parallel_calls=16)\n\n # Potentially shuffle records.\n if self.subset == 'train':\n min_queue_examples = int(\n ShadownetDataSet.num_examples_per_epoch(self.subset) * 0.001)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "title": "" }, { "docid": "f010bcebd0ea0ad1cd45569a5dbd7cb0", "score": "0.5549479", "text": "def audio_reader(prefix: str = \"\",\n audio_format: str = \"wav\") -> Callable:\n\n if audio_format == \"wav\":\n load_file = _load_wav\n elif audio_format == \"sph\":\n load_file = _load_sph\n else:\n raise ValueError(\n \"Unsupported audio format: {}\".format(audio_format))\n\n def load(list_files: List[str]) -> Iterable[Audio]:\n for list_file in list_files:\n with open(list_file) as f_list:\n for audio_file in f_list:\n path = os.path.join(prefix, audio_file.rstrip())\n yield load_file(path)\n\n return load", "title": "" }, { "docid": "4d868974f5e4fb5080be21d53fa30a7f", "score": "0.554558", "text": "def __init__(self, fname, max_fsize=4, texthead=None, *args, **kwargs):\n self.fnamebase = os.path.splitext(fname)[0]\n self.fileid = 0\n self.max_fsize = max_fsize*(1024**3)\n self.cur_fsize = 0\n self.arrshape = None\n self.databuf = []\n self.texthead = texthead\n self.textbuf = []", "title": "" }, { "docid": "ce484896caf0835c2dfc1167eebf4d65", "score": "0.554356", "text": "def _process_dataset(filenames, synsets, labels, output_directory, prefix, num_shards):\n NUM_THREADS = 32\n chunksize = int(math.ceil(len(filenames) / num_shards))\n \n files = []\n\n for shard in range(0, num_shards, NUM_THREADS):\n threads = []\n for i in range(NUM_THREADS):\n chunk_files = filenames[(shard + i) * chunksize : (shard + i + 1) * chunksize]\n chunk_synsets = synsets[(shard + i) * chunksize : (shard + i + 1) * chunksize]\n output_file = os.path.join(\n output_directory, '%s-%.5d-of-%.5d' % (prefix, shard + i, num_shards))\n\n t = Thread(target=_process_image_files_batch, args=(output_file, chunk_files, chunk_synsets, labels, \"tmp_%d\" % i))\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n\n tf.logging.info('Finished writing file: %s' % output_file)\n files.append(output_file)\n return files", "title": "" }, { "docid": "f208fefbe4e03f1c97f4422bd5968b05", "score": "0.5534478", "text": "def input_fn():\n\t\t# For training, we want a lot of parallel reading and shuffling.\n\t\t# For eval, we want no shuffling and parallel reading doesn't matter.\n\t\tif is_training:\n\t\t\td = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n\t\t\td = d.repeat()\n\t\t\td = d.shuffle(buffer_size=len(input_files))\n\n\t\t\t# `cycle_length` is the number of parallel files that get read.\n\t\t\tcycle_length = min(num_cpu_threads, len(input_files))\n\n\t\t\t# `sloppy` mode means that the interleaving is not exact. This adds\n\t\t\t# even more randomness to the training pipeline.\n\t\t\td = d.apply(\n\t\t\t\t\ttf.contrib.data.parallel_interleave(\n\t\t\t\t\t\t\ttf.data.TFRecordDataset,\n\t\t\t\t\t\t\tsloppy=is_training,\n\t\t\t\t\t\t\tcycle_length=cycle_length))\n\t\t\td = d.shuffle(buffer_size=100)\n\t\telse:\n\t\t\td = tf.data.TFRecordDataset(input_files)\n\t\t\t# Since we evaluate for a fixed number of steps we don't want to encounter\n\t\t\t# out-of-range exceptions.\n\t\t\td = d.repeat(1)\n\n\t\t# We must `drop_remainder` on training because the TPU requires fixed\n\t\t# size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n\t\t# and we *don't* want to drop the remainder, otherwise we wont cover\n\t\t# every sample.\n\t\td = d.apply(\n\t\t\t\ttf.contrib.data.map_and_batch(\n\t\t\t\t\t\tlambda record: _parse_fn(record, name_to_features),\n\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\tnum_parallel_batches=num_cpu_threads,\n\t\t\t\t\t\tdrop_remainder=True))\n\t\treturn d", "title": "" }, { "docid": "30a278270157ce433da64e84cd672225", "score": "0.55283684", "text": "def handleFiles(args, pg):\n \n if containsData():\n pg.fileList.append(sys.stdin)\n for filename in args:\n try:\n f = open(filename, 'r')\n except IOError:\n print \"No file %s found!\" % filename\n sys.exit(1)\n pg.fileList.append(f)\n return pg", "title": "" }, { "docid": "f5507d4c6197851337adfcb9f1c57899", "score": "0.5525234", "text": "def _load_mini_batch(source, lst, classes, classes_dict, data_root):\n\n x, y = [], []\n for i in range(len(lst)):\n thisx, thisy = _load_data(source, lst[i], classes, data_root)\n x.extend(thisx)\n y.extend(thisy)\n y = [classes_dict[i] for i in y]\n return x, y", "title": "" }, { "docid": "e41c8a68d9d0f89b2b7fc11f88cd5cd9", "score": "0.55238044", "text": "def get_input_data_tensors(\n reader, \n data_pattern1,\n data_pattern2,\n data_pattern3,\n batch_size, \n num_readers=1):\n with tf.name_scope(\"input\"):\n files1 = gfile.Glob(data_pattern1)\n files2 = gfile.Glob(data_pattern2)\n files3 = gfile.Glob(data_pattern3)\n files = files1 + files2 + files3\n \n if not files:\n raise IOError(\"Unable to find input files. data_pattern='\" +\n data_pattern1 + \"'\")\n logging.info(\"number of input files: \" + str(len(files)))\n filename_queue = tf.train.string_input_producer(\n files, num_epochs=1, shuffle=False)\n examples_and_labels = [reader.prepare_reader(filename_queue)\n for _ in range(num_readers)]\n\n video_id_batch, video_batch, unused_labels, num_frames_batch = (\n tf.train.batch_join(examples_and_labels,\n batch_size=batch_size,\n allow_smaller_final_batch = True,\n enqueue_many=True))\n return video_id_batch, video_batch, num_frames_batch", "title": "" }, { "docid": "1effa6a89ad69a7d16d8422f471ae57b", "score": "0.55219704", "text": "def read_instance_files(self, train=True):\n if train:\n output_file = self.dir + \"train_\"\n else:\n output_file = self.dir + \"test_\"\n\n rf1 = open(output_file + \"token.txt\", 'r', encoding='utf-8')\n rf2 = open(output_file + \"label.txt\", 'r', encoding='utf-8')\n rf3 = open(output_file + \"entity_type.txt\", 'r', encoding='utf-8')\n rf4 = open(output_file + \"dep.txt\", 'r', encoding='utf-8')\n\n inputs = []\n labels = []\n entity_labels = []\n deps = []\n\n while True:\n line = rf1.readline()\n if line == \"\":\n break\n inputs.append(line)\n line = rf2.readline()\n labels.append(line)\n line = rf3.readline()\n entity_labels.append(line)\n line = rf4.readline()\n deps.append(line)\n\n rf1.close()\n rf2.close()\n rf3.close()\n rf4.close()\n\n return inputs, labels, entity_labels, deps", "title": "" }, { "docid": "632629a53ab7f101369deb26b6bd13fe", "score": "0.55110884", "text": "def _read_batch_from_file(self):\n count = 0\n if not self._strio:\n self._strio = io.StringIO()\n else:\n self._strio.seek(0)\n self._strio.truncate(0)\n self._strio.write(self._header)\n\n while True:\n line = self._tmp_file.readline().decode('utf-8')\n if line:\n self._strio.write(line)\n count += 1\n if count == self._batch_size or not line:\n break\n\n self._strio.seek(0)\n self._reader = csv.DictReader(self._strio, delimiter=self._delimiter)\n return count", "title": "" }, { "docid": "8603c5f12232b64d51d6ded299e72133", "score": "0.55086476", "text": "def batch_iterator(iterator, batch_size):\n entry = True #Make sure we loop once\n while entry:\n batch = []\n while len(batch) < batch_size:\n try:\n entry = iterator.next()\n except StopIteration:\n entry = None\n if entry is None:\n #End of file\n break\n batch.append(entry)\n if batch:\n yield batch", "title": "" }, { "docid": "3117a3bb564d4efa59e6b6f7b0a0ef7a", "score": "0.550455", "text": "def parallel_safe_read_all_files(cls, args, file_number):\n all_read_arguments, pickle_directory = args\n read_arguments = all_read_arguments[file_number]\n log.debug(f\"Reading file {file_number}\")\n scan = cls.return_scan_from_read_arguments(*read_arguments)\n\n if pickle_directory is not None:\n pickle_file = multiprocessing.pickle_object(\n scan, os.path.join(pickle_directory,\n f'{file_number}-{id(scan)}.p'))\n del scan\n return pickle_file\n\n return scan", "title": "" }, { "docid": "c99a3be2fa4400a2a5d1699385dc1937", "score": "0.5497633", "text": "def read_dataset_file_list(file_list, maxlen, vocab, tokenize_text, to_lower, thread_id = 0, char_level=False):\n\n data_x, data_y, filename_y = [], [], []\n num_hit, unk_hit, total = 0., 0., 0.\n maxlen_x = -1\n total_len = 0\n num_files = 0\n\n # Traverse every file in the directory\n for file_path in file_list:\n ###################################################\n ## BEGIN READ FREE-TEXT\n #\n indices = []\n with codecs.open(file_path, mode='r', encoding='ISO-8859-1') as input_file:\n for line in input_file:\n splitBrLine = line.replace(\"<br />\", \"\\n\").replace(\"<br/>\", \"\\n\").replace(\"<br>\", \"\\n\").split(\"\\n\")\n for subline in splitBrLine:\n content = subline\n \n if to_lower:\n content = content.lower()\n if tokenize_text:\n content = text_cleaner.tokenize(content)\n else:\n content = content.split()\n if maxlen > 0 and len(content) > maxlen:\n continue\n \n for word in content:\n if word in vocab:\n indices.append(vocab[word])\n if (word == '<num>'): num_hit += 1\n else:\n indices.append(vocab['<unk>'])\n unk_hit += 1\n total += 1\n # if this line is not a blank\n if ('<newline>' in vocab):\n indices.append(vocab['<newline>'])\n\n data_x.append(indices)\n if (\"pos\" in file_path):\n data_y.append(float(1))\n elif (\"neg\" in file_path):\n data_y.append(float(0))\n else:\n data_y.append(float(-1))\n filename_y.append(input_file.name) # Keep track of the filename\n\n if maxlen_x < len(indices):\n maxlen_x = len(indices)\n total_len += len(indices)\n num_files += 1\n\n return data_x, data_y, filename_y, maxlen_x, num_hit, unk_hit, total, total_len, num_files", "title": "" }, { "docid": "e8178787b2927b8c9059687770ebadea", "score": "0.5488389", "text": "def from_file(cls, path, **kwargs):\n for table in file_reader(path, **kwargs):\n yield table", "title": "" }, { "docid": "1ddfb625a9aa4c4a19788eb8db8cad03", "score": "0.5487248", "text": "def read_and_parse_data(files):\n ds = tf.data.Dataset.from_tensor_slices(files)\n ds = ds.repeat()\n ds = ds.map(parse_fn)\n\n return ds", "title": "" }, { "docid": "297e7a05bb3b2c49958afc52b6ebe628", "score": "0.5483418", "text": "def readFile(fpath, format='plain'): # tested\n \n if format != 'plain' and format != 'list':\n raise ValueError(\"Invalid Mode for readFile! Need to be either 'plain' or 'list'\")\n\n with open(fpath, 'r', encoding='utf-8') as f:\n while True:\n line = f.readline()\n if not line:\n break\n \n if format == 'plain':\n yield line.strip()\n elif format == 'list':\n yield line.strip().split()", "title": "" }, { "docid": "5aa8b54b78851d2bad5755befd340884", "score": "0.5482833", "text": "def train_input_fn(params):\n file_pattern = os.path.join(getattr(params, \"data_dir\", \"\"), \"*train*\")\n return _read_and_batch_from_files(\n file_pattern, params.batch_size, params.max_length,\n params.num_parallel_calls, shuffle=True, repeat=params.repeat_dataset)", "title": "" }, { "docid": "a4585b94d2dae4e300551edee858f2a5", "score": "0.5479021", "text": "def raw(self, fileids: Optional[Any] = ...):\n ...", "title": "" }, { "docid": "b7c5169f564d5cdacc38c7234fc44efe", "score": "0.54786503", "text": "def __getitem__(self, batch_num):\n\n # 2 Cases - Data is all in one file, or data is split over two files\n # 1st file is found by using the formula f1_idx = int(batch_num*self.batch_size/self.file_len)\n f1_idx = int(batch_num * self.batch_size / self.file_len)\n lower = (batch_num * self.batch_size) % self.file_len\n upper = ((batch_num + 1) * self.batch_size) % self.file_len\n\n # Load samples from f1 - This will always be done.\n if self.last_file_index != f1_idx:\n self.g = np.load(os.path.join(self.files_location, self.game_files[self.indices[f1_idx]]))\n self.e = np.load(os.path.join(self.files_location, self.equilibria_files[self.indices[f1_idx]]))\n self.last_file_index = f1_idx\n\n # If lower > upper, then two files needed\n if lower > upper:\n remainder = self.batch_size - upper\n f2_idx = f1_idx + 1\n\n # If f2_idx >= self.num_training_files, then the end has been reached, and this is a special case\n # In this case, copy to the variables as much as possible and let the rest unchanged\n if f2_idx >= self.num_training_files:\n self.x[0: self.file_len - lower] = np.copy(self.g[lower: self.file_len])\n self.y[0: self.file_len - lower] = np.copy(self.e[lower: self.file_len])\n\n # If not, then things are normal\n else:\n # Assign stuff from file 1\n self.x[0:remainder] = self.g[lower: self.file_len]\n self.y[0:remainder] = self.e[lower: self.file_len]\n\n # Load f2\n self.g = np.load(os.path.join(self.files_location, self.game_files[self.indices[f2_idx]]))\n self.e = np.load(os.path.join(self.files_location, self.equilibria_files[self.indices[f2_idx]]))\n self.last_file_index = f2_idx\n\n # Assign the rest of the values to x and y\n self.x[remainder:self.batch_size] = self.g[0: upper]\n self.y[remainder:self.batch_size] = self.e[0: upper]\n\n # Only one file needed\n else:\n self.x = self.g[lower: upper]\n self.y = self.e[lower: upper]\n\n # Process samples and return\n return self.__process_data(self.x, self.y)", "title": "" }, { "docid": "aae5ca449cb4ab814c6b5dbf88509e7b", "score": "0.5477643", "text": "def file_line_generator(self) -> Iterable[str]:\n ...", "title": "" }, { "docid": "981feb4296e49618898d0606546baa6c", "score": "0.5470824", "text": "def input_pipeline(tfrecord_files, config, name='input_pipeline', shuffle=True, mode='training'):\n\n with tf.name_scope(name):\n # Read the data from TFRecord files, decode and create a list of data samples by using multiple threads.\n if mode is \"training\":\n # Create a queue of TFRecord input files.\n filename_queue = tf.train.string_input_producer(tfrecord_files, num_epochs=config['num_epochs'], shuffle=shuffle)\n sample_list = [read_and_decode_sequence(filename_queue, config) for _ in range(config['num_read_threads'])] # Length 4\n batch_sample = tf.train.batch_join(sample_list, # Returna a list of dictionnaries of tensors\n batch_size=config['batch_size'],\n capacity=config['queue_capacity'],\n enqueue_many=False,\n dynamic_pad=True,\n allow_smaller_final_batch=False,\n name=\"batch_join_and_pad\")\n \n return batch_sample\n\n else:\n filename_queue = tf.train.string_input_producer(tfrecord_files, num_epochs=1, shuffle=False)\n sample_list = [read_and_decode_sequence_test_data(filename_queue, config)]\n batch_sample = tf.train.batch_join(sample_list,\n batch_size=config['batch_size'],\n capacity=config['queue_capacity'],\n enqueue_many=False,\n dynamic_pad=True,\n allow_smaller_final_batch=False,\n name=\"batch_join_and_pad\")\n return batch_sample", "title": "" }, { "docid": "dc52f9d1f5b6ea98d5d0f4626b3b132a", "score": "0.54701746", "text": "def __init__(self, file_sets=[0, 1, 2]):\n self._find_image_files(file_sets)\n self._balance_image_files()\n self._time_series_drop()\n self._train_test_split()", "title": "" }, { "docid": "9bcf1de8acb790a62e6f10ebf56d2682", "score": "0.5467318", "text": "def test_import_ciprs_records_multi_files(fake_pdf, fake_pdf2, user, mock_ciprs_reader):\n record = {\"Defendant\": {\"Name\": \"Jon Doe\"}}\n mock_ciprs_reader.return_value = [record]\n batch = import_ciprs_records([fake_pdf, fake_pdf2], user)\n assert Batch.objects.count() == 1\n assert batch.label == record[\"Defendant\"][\"Name\"]\n assert batch.records.count() == 2", "title": "" }, { "docid": "b7d31c0037906820982e9ee9b95771b5", "score": "0.5443233", "text": "def from_files(self, *paths: str):\n for file_name in only_files_in(paths):\n file_path = os.path.abspath(file_name)\n with open(file_path, 'r') as stream:\n content = stream.read()\n self.add(content, file_path)", "title": "" } ]
e25d912f5d2e53d37a000386bda81e4f
read data/nodes.json file from this project
[ { "docid": "9ea1887b72b94e53d863e551452aab0c", "score": "0.80150056", "text": "def readNodesJsonFile():\n fileName = \"data/nodes.json\"\n with open(fileName, \"r\") as f:\n jsonString = f.read()\n return jsonString", "title": "" } ]
[ { "docid": "59d00d70bbdea5d642c76118be182d5d", "score": "0.6795475", "text": "def read(filename):\n\twith open(filename) as data_file: \n\t data = json.load(data_file)\n\t results = []\n\tfor obj in data[\"nodes\"]:\n\t\ttemp = obj.split(\":\")\n\t\tif len(temp) == 2:\n\t\t\tresults.append(Link(temp[0],int(temp[1])))\n\t\telse:\n\t\t\tpass\n\t\t\t# temp = obj.split(\"]\")\n\t\t\t# results.append(Link(temp[0][1:],temp[1],True))\n\treturn results", "title": "" }, { "docid": "b1f28955c55733d1d5a50f037fd079ac", "score": "0.6786821", "text": "def parse_data(self, filename):\n with open(filename, 'r') as f:\n self.js_graph = json.load(f)\n return json_graph.node_link_graph(self.js_graph)", "title": "" }, { "docid": "4b5f43b412f5eaec15533a1f20e8f9e3", "score": "0.6720886", "text": "def build_node_info(self, node):\n directory = os.path.dirname(os.path.abspath(__file__))\n directory = directory+'/nodes/'\n data = False\n setup_file= 'info_nodes.json'\n try:\n with open(directory + setup_file) as f:\n data = json.load(f)\n f.close()\n except Exception as e:\n print(\"Failure in read node config file - {} - {} - {}\".format(directory, setup_file, e))\n\n if data:\n for dt in data:\n if dt == node:\n #find the files and serve them\n tabs = data[dt]\n for tb, tab in enumerate(tabs):\n file = tab[\"file\"]\n if not type(file) is list:\n try:\n with open(directory + file) as f:\n lines = f.readlines()\n lines = ('').join([markdown2.markdown(x.rstrip()) for x in lines])\n data[dt][tb][\"content\"] = lines\n f.close()\n except Exception as e:\n pass\n return json.dumps(data)", "title": "" }, { "docid": "2a1979bcfe46e1ef9a81d0d067a9f5bf", "score": "0.6508454", "text": "def get_detail(self):\n directory = os.path.dirname(os.path.abspath(__file__))\n directory = directory+'/nodes/'\n the_file = 'table_nodes.json'\n try:\n with open(directory + the_file) as data_file:\n data = json.load(data_file)\n data = self.build_detail(data)\n return self.http_response_from_struct(data)\n except Exception as e:\n data = {}\n print(\"Failure in read node detail file - {} - {} - {}\".format(directory, the_file, e))\n return self.http_response_from_struct(data)", "title": "" }, { "docid": "3d01d382d531ce93a2097c0c93440280", "score": "0.6404349", "text": "def read_json(self, path):\n pass", "title": "" }, { "docid": "d8edb5e74f8f5a5d496ef736fef0e18a", "score": "0.6255272", "text": "def _read_topo_from_file (self, path):\n raise NotImplementedError", "title": "" }, { "docid": "a6f8ea03f3c72a1660f8ed7b7f2c153d", "score": "0.6245198", "text": "def read(self):\n # Clear current list of motes:\n self.clear()\n \n try:\n network_file = open(self.filepath) \n network_data = json.load(network_file)[\"network\"]\n network_file.close()\n \n # Initialize endpoints \n \n for endp_data in network_data:\n for endp in self.endpoints:\n if endp.id == endp_data[\"id\"]:\n endp.name = endp_data[\"name\"]\n endp.location = endp_data[\"location\"] \n except IOError:\n pass", "title": "" }, { "docid": "93ad0b4f0fc5d769c9b3866088b2e706", "score": "0.6181664", "text": "def load_from_json(self, file_name: str) -> bool:\r\n g = DiGraph()\r\n try:\r\n with open(file_name, \"r\") as f:\r\n details = json.load(f)\r\n nodes = details.get(\"Nodes\")\r\n edges_out = details.get(\"Edges\")\r\n for dic in nodes:\r\n key = dic.get(\"id\")\r\n pos = dic.get(\"location\")\r\n g.add_node(key)\r\n g.get_node(key).set_pos(pos)\r\n for dic in edges_out:\r\n g.add_edge(dic.get(\"src\"), dic.get(\"dest\"), dic.get(\"w\"))\r\n self.graph = g\r\n return True\r\n except Exception as e:\r\n print(e)\r\n return False", "title": "" }, { "docid": "37f47075ad98e950ccc29d7bff7f9f31", "score": "0.6168879", "text": "def _load_data(self):\n data_file = constant.RESSOURCE_FOLDER / \"data.json\"\n try:\n with open(str(data_file), \"r\") as json_file:\n data = json.load(json_file)\n\n return data\n\n except (FileNotFoundError, FileExistsError) as error:\n print(error)\n print(\"Please check ressource folder before playing Game\")\n return\n\n except KeyError as error:\n print(\n f\"error while attempting to read {error}\"\n + \"from data.jon file\"\n )\n return", "title": "" }, { "docid": "8eec0197b879b06b9fe14fb414936ba5", "score": "0.6163754", "text": "def read_json_file():\n\twith open(json_dump_location) as json_data:\n\t\treturn json.load(json_data)", "title": "" }, { "docid": "d7b29ffd773a8431ad40d78cf9da1834", "score": "0.6119412", "text": "def load_data(self, file_name):\n print \"Loading data from\", file_name\n f = uproot.open(file_name)\n tree = f[self.tree_name]\n return tree.arrays(tree.keys())", "title": "" }, { "docid": "506d34ca555eb730388de05022a423bf", "score": "0.60937434", "text": "def get_pipeline_data(self):\n if not os.path.isfile(self.datafilepath):\n print \"file not found\"\n data = DEFAULT_SECTIONS\n icFU.dict_to_folders(data, self.path)\n icFU.write_json_file(data, self.datafilepath)\n\n else:\n data = open(self.datafilepath)\n data = json.load(data)\n\n return data", "title": "" }, { "docid": "91ae5fbf9c51bb26c7353f74b34d0f71", "score": "0.60484135", "text": "def _crawl_json():\n with open(_crawl_file_path()) as f:\n crawl_json = f.read()\n return crawl_json", "title": "" }, { "docid": "57aa2010da142be03898852675d117d4", "score": "0.60457176", "text": "def main():\n json_config_filepath = \"/home/rush/Projects/Temp/postFedora/config.json\"\n json_config_file=JsonData()\n json_config_file.open_json_infile(json_config_filepath)\n json_config_file.print_json_dict()\n json_config_file.print_json_dict_keys()\n json_config_file.print_json_dict_values()", "title": "" }, { "docid": "ef53658a8875a04169a40969e730c087", "score": "0.6028954", "text": "def demo_reader_read_data(path, config):", "title": "" }, { "docid": "38ebdbf75e77bca4ea4b4e82415c9c37", "score": "0.60225487", "text": "def read_data(filename):\n with open(filename, 'r') as f:\n data = f.read()\n #print(data)\n #data = json.loads(json_data) #for loading .json file\n return data", "title": "" }, { "docid": "b0ced372b91fc360c98083885f7d361e", "score": "0.60103273", "text": "def get_json_data():\n with open(\"active-game-files/players.json\", \"r\") as json_file:\n json_data = json.load(json_file)\n return json_data", "title": "" }, { "docid": "45f12d87ad78ff1fd7460da7c3f32dc6", "score": "0.59947985", "text": "def loadFromFile(self, filename):\n Log.addLogMessage(Log.INFO, 'Opened grap ' + filename)\n self.filename = filename\n with open(filename, 'r') as f:\n jsonstr = f.read()\n\n jsondata = json.loads(jsonstr)\n\n # make sure the name becomes camelcase without spaces: required by CLaSH\n namestr = jsondata['name'].strip()\n namestr = string.capwords(namestr)\n namestr = namestr.replace(' ', '')\n self.name = namestr\n\n # Load the predefined clash types when available\n if 'clashtypes' in jsondata.keys():\n self.clashtypes = jsondata['clashtypes']\n\n # Load all nodes and their attributes\n for jsnode in jsondata['nodes']:\n nodeName = jsnode['name']\n nodeFunction = jsnode['function']\n nodeClashCode = ''\n if 'clashcode' in jsnode.keys():\n nodeClashCode = jsnode['clashcode']\n nodeColor = self.DEFAULT_NODE_COLOR\n if 'color' in jsnode.keys():\n nodeColor = jsnode['color']\n nodePosition = jsnode['pos'][0], jsnode['pos'][1]\n self.add_node(nodeName, nodeFunction, nodePosition, clashcode=nodeClashCode, color=nodeColor)\n\n # Load all edges and their attributes\n for jsedge in jsondata['edges']:\n edgeSource = jsedge['src']\n edgeDestination = jsedge['dst']\n edgeResNumber = jsedge['resnr']\n edgeArgNumber = jsedge['argnr']\n edgePRates = CSDFGraph._flattenRateList(jsedge.get('prates', [1]))\n edgeCRates = CSDFGraph._flattenRateList(jsedge.get('crates', [1]))\n edgeTokens = jsedge.get('tkns', [])\n edgeColor = self.DEFAULT_EDGE_COLOR\n if 'color' in jsedge.keys():\n edgeColor = jsedge['color']\n self.add_edge(\n edgeSource, edgeDestination, edgeResNumber, edgeArgNumber,\n edgePRates, edgeCRates, edgeTokens, color=edgeColor)\n\n # Now that the graph is construcuted, validate it: \n self.validateGraph()", "title": "" }, { "docid": "cf285715363282ba40aee0f26b7ae4c7", "score": "0.5992424", "text": "def read_json_input(filename):\r\n with open(filename, 'r') as f:\r\n json_data = json.load(f)\r\n print(json_data)\r\n \r\n return json_data", "title": "" }, { "docid": "2ac60c15352fa3b71183ef7d190fb13d", "score": "0.59823054", "text": "def read_geojson():\n filepath = os.path.join(ROOT_DIR, \"static/all_hotels.geojson\")\n if file_exists(filepath):\n with open(filepath) as f:\n return json.load(f)", "title": "" }, { "docid": "56ffdc88f3edf6b15de06acba0730b80", "score": "0.5979592", "text": "def read_data():\n\tinitialise.prepare_data(folder)", "title": "" }, { "docid": "c62f3834ae5ebdb586454dce71415a11", "score": "0.59742063", "text": "def load_json(self):\n if self.json_path:\n if os.path.exists(self.json_path):\n with open(self.json_path, encoding='utf-8') as f:\n data = json.load(f)\n self.load_data(data)\n print(\"{} {}(s) loaded from json file \\\"{}\\\"\".format(\n len(self.data), self.child_class.__name__, self.json_path))\n else:\n print(\"Path \\\"{}\\\" does not exist\".format(self.json_path))\n else:\n # print(\"No json path specified\")\n pass", "title": "" }, { "docid": "5d4d7ad4967beafe63b73c5813eea846", "score": "0.59723353", "text": "def readingJsonFile():\n mybmi = Bmi()\n mybmi.readJsonFile('data.json')\n data = mybmi.returnData('data.json')\n return mybmi", "title": "" }, { "docid": "81fe250cb48572bc2b8659817f6ab5c0", "score": "0.5967392", "text": "def get_data():\n if path.exists():\n data = path.read_text()\n return json.loads(data)\n return None", "title": "" }, { "docid": "7c5d43bb6bc7424929bf092b55af402f", "score": "0.5958566", "text": "def readData(path):\n f = open(path, 'r')\n data = json.load(f)\n f.close()\n return data", "title": "" }, { "docid": "f0c24d440888acbf1c52389c8c0c4358", "score": "0.59343207", "text": "def load_graph(file: LoadSource) -> nx.Graph:\n\n with ensure_open(file) as f:\n data = json.load(f)\n\n return nx.readwrite.json_graph.node_link_graph(data)", "title": "" }, { "docid": "83adbf06dff2025c9633a8b11fbac756", "score": "0.5924482", "text": "def loadFromFile():\n with open('output.json', 'r') as f:\n return json.load(f)", "title": "" }, { "docid": "8185a7410beb213a5d2fb155039abf98", "score": "0.59226006", "text": "def __read_json_testfile_api__(self):\n with open(os.path.join(self.config_path, \"api-links.json\")) as config_file:\n test_file = json.load(config_file)\n config_file.close()\n return test_file", "title": "" }, { "docid": "6581f65d687990d0c572ea4dab43d2cb", "score": "0.5914957", "text": "def load_data():\n file = open(\"json/course.json\")\n courses = json.load(file)\n # print(courses)\n return courses", "title": "" }, { "docid": "c006d9a45516a5a3b2c60f5c1218e137", "score": "0.5896386", "text": "def get_data(self):\n return json.loads(self.data_file.read())", "title": "" }, { "docid": "83abd987fe2cce3b9daa8e3e80f6b3b9", "score": "0.5894382", "text": "def load_from_json(self, file_name: str) -> bool:\r\n try:\r\n with open(file_name, 'r') as f:\r\n s = json.load(f)\r\n g = DiGraph()\r\n for node in s[\"Nodes\"]:\r\n if \"pos\" in node:\r\n pos = tuple(map(float, str(node[\"pos\"]).split(\",\")))\r\n g.add_node(node['id'], pos)\r\n else:\r\n g.add_node(node['id'])\r\n\r\n for edge in s[\"Edges\"]:\r\n g.add_edge(edge[\"src\"], edge[\"dest\"], edge[\"w\"])\r\n self.graph = g\r\n return True\r\n except Exception as e:\r\n print(e)\r\n return False", "title": "" }, { "docid": "78f481ed1b7689817922fe6f6e9dea1e", "score": "0.58942014", "text": "def load_node_results(filename):\n\n with pyfits.open(filename) as image:\n data = image[1].data\n return data", "title": "" }, { "docid": "15ef99b5385ab3f9cba4776c4dfd2b59", "score": "0.5891677", "text": "def read_json(dir_rnx: str, logger: logging.Logger):\n # cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n # read the JSON file created by processing pyconvbin.py\n json_name = glob.glob(os.path.join(dir_rnx, '*.json'))[0]\n\n with open(json_name) as f:\n amc.dRTK = json.load(f)\n\n pass", "title": "" }, { "docid": "6df6211569d6c948f3701807a20b0c10", "score": "0.5888347", "text": "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "title": "" }, { "docid": "2333381fe3c391d850894f2c1ab2bb02", "score": "0.5886745", "text": "def get_local_nodes_info(cls, keyname):\n try:\n with open(cls.get_locations_json_location(keyname), 'r') as file_handle:\n file_contents = json.loads(file_handle.read())\n if isinstance(file_contents, list):\n cls.upgrade_json_file(keyname)\n file_handle.seek(0)\n file_contents = json.loads(file_handle.read())\n return file_contents.get('node_info', [])\n except IOError:\n raise BadConfigurationException(\"Couldn't read from locations file, \"\n \"AppScale may not be running with \"\n \"keyname {0}\".format(keyname))", "title": "" }, { "docid": "585835d98a76d1b8c5b2f2ff695a5c9e", "score": "0.58811134", "text": "def get_info_from_file(self):\n with open(self.file_name) as outfile:\n temp = json.load(outfile)\n return temp", "title": "" }, { "docid": "c36c60672dc7627ce0485710fab811e8", "score": "0.5868842", "text": "def read_structure(path: str = RESOURCES_FILE):\n Structure.structure = read_json_file(path)", "title": "" }, { "docid": "575ab5df6e253e1a4d570f90c5c7acfe", "score": "0.58567274", "text": "def test_read_json_content(self):\n\t\tin_file = input_file(test_data['data_dir'] + test_data['input_json'])\n\t\tjson = in_file.get_json()\n\t\tprint(json)\n\t\tself.assertEqual(json[0]['lorem'], \"ipsum\")", "title": "" }, { "docid": "561f959b097360e13e2ad433c77f2e7c", "score": "0.58545417", "text": "def load_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n return projects", "title": "" }, { "docid": "8220b2103414b6a9c34fa6e576166b38", "score": "0.58450466", "text": "def test_valid_read_json_file(self):\n\n json_data = {'features': 'GPS Distance'}\n json_file_path = 'model_data.json'\n\n ROOT_DIR = Path(__file__).parent.parent.parent\n full_path = os.path.join(*[str(ROOT_DIR), 'test_gui', 'test_shared', json_file_path])\n\n self.assertEqual(read_json_file(full_path), json_data)", "title": "" }, { "docid": "80c9a9f5c23003303fa11211b3d4ab8d", "score": "0.5839888", "text": "def load_data():\n with open('./json/course.json', 'r') as openfile:\n # Reading from json file\n courses = json.load(openfile)\n return courses", "title": "" }, { "docid": "9cda63dea81f774685de263802fadd0a", "score": "0.5837844", "text": "def load_trees(self, projects_dir):\n\n for file_name in os.listdir(projects_dir):\n if file_name.endswith(\".b3\"):\n\n print \"Loading \" + file_name\n\n with open(projects_dir + file_name) as data_file:\n data = json.load(data_file)['data']\n\n # Extract the custom node types from the tree.\n if 'custom_nodes' in data:\n node_types.add_types_from_json(data['custom_nodes'])\n\n if 'trees' in data:\n for tree_data in data['trees']:\n tree = behaviour_tree.BehaviourTree()\n tree.load_from_json(tree_data, os.path.splitext(file_name)[0])\n self._trees[tree.title()] = tree\n\n # Notify other objects of the new tree.\n self.tree_added.emit(tree.title())", "title": "" }, { "docid": "4dfbe544ae4e0c3da19b2b54db6bb3c5", "score": "0.58308756", "text": "def readin(folder=\"\"):\n nodes = np.loadtxt(folder + 'nodes.txt')\n mats = np.loadtxt(folder + 'mater.txt')\n elements = np.loadtxt(folder + 'eles.txt')\n loads = np.loadtxt(folder + 'loads.txt')\n\n return nodes, mats, elements, loads", "title": "" }, { "docid": "475f46c174b73c85c607e24baf6e3431", "score": "0.5827131", "text": "def read(self):\n with open(self.path, encoding=self.encoding) as inputfile:\n return json.load(inputfile)", "title": "" }, { "docid": "9e9b6e747ff37328958491672e99044e", "score": "0.5826625", "text": "def test_node_json(self):\n n = Sensor()\n\n out_file = json.dumps(n.to_json())\n n_load = Node.from_json(json.loads(out_file))\n\n self.assertEqual(n, n_load)\n self.assertEqual(n.bias, n_load.bias)\n self.assertEqual(n.activation, n_load.activation)", "title": "" }, { "docid": "e1fbe22731b0b8ea4bb03c0b5165ccc1", "score": "0.5815087", "text": "def _get_json_file_data():\n loaded_data = []\n with open(FLAGS.in_file, mode='r') as file_json:\n loaded_data = json.load(file_json)\n return loaded_data", "title": "" }, { "docid": "475a3a046c20a83f912b35851f0ca453", "score": "0.5811229", "text": "def read_json_data(filename):\n path = os.path.join(os.path.dirname(__file__), \"data\", filename)\n with open(path, \"r\") as file:\n data = json.load(file)\n return data", "title": "" }, { "docid": "35248bd4d88b9d106082c42a757fb901", "score": "0.57987106", "text": "def ReadJsonFile(f):\r\n with open(f, \"r\") as json_file:\r\n data = json.load(json_file)\r\n return data", "title": "" }, { "docid": "4a1e400acff3c3c73921da572003a8b1", "score": "0.57967955", "text": "def __read_json_file(path_file):\n try:\n fp = open(path_file, 'r')\n data = fp.read()\n except IOError, e:\n content = urllib2.urlopen('https://raw.githubusercontent.com/horacioibrahim/py-polymer/master/config.json')\n data = content.read()\n\n return json.loads(data)", "title": "" }, { "docid": "ec923f00d176b474149a117db0988f15", "score": "0.578274", "text": "def load(filename):\n snet=SimpleNetwork()\n with open(filename, 'r') as file:\n (snet.nodes, snet.links)=json.load(file)\n file.close()\n return(snet)", "title": "" }, { "docid": "fd778096d7d1e360a0e8a0cf05d0fa57", "score": "0.57813704", "text": "def load(path):\n with open(path,\"r\") as fh:\n data=json.load(fh)\n return data", "title": "" }, { "docid": "fa7ffada6b26ee715cdd07cb1c36dac7", "score": "0.5776341", "text": "def __read_json_testfile__(self):\n with open(os.path.join(self.config_path, \"config.json\")) as config_file:\n test_file = json.load(config_file)\n config_file.close()\n return test_file", "title": "" }, { "docid": "ab8ab0e3024dc4589c05982a62958329", "score": "0.5773814", "text": "def read_from_file(self):\n if self.__filename_read is not None:\n try:\n with open(self.__filename_read, \"r\") as json_file:\n self.metadata = json.load(json_file)\n except IOError as e:\n print(\"Error occurred while opening file: \", e)\n else:\n print(\"Read file is None\")", "title": "" }, { "docid": "f385592f8671beea6a8f50c5ca131fcb", "score": "0.5759278", "text": "def read(self, name, location):\n with open(os.path.join(location, name), \"r\") as f:\n line = f.read().strip()\n\n data = json.loads(line)\n return models.Graph.decode(data)", "title": "" }, { "docid": "f0a6f7d14a66a02aaaaf6335a4776e49", "score": "0.5758218", "text": "def load(self):\n\n with open(self.path, r\"r\") as that:\n return json.load(that)", "title": "" }, { "docid": "2234f917c45da17898514eb38aa7c718", "score": "0.5756406", "text": "def read_grid(grid_file):\n with open(grid_file, \"r\") as f:\n\n grid_data = json.load(f)[\"features\"]\n f.close()\n\n return grid_data", "title": "" }, { "docid": "5b1e9a4dab50ccba8b7b361119bba554", "score": "0.5756345", "text": "def load(self, filename):\n with open(filename, 'r') as f:\n json_data = json.loads(f.read())\n self.features = json_data['features']", "title": "" }, { "docid": "a5f5007eedee22acdc1e48d7e0732ae6", "score": "0.5728357", "text": "def load_json_file(json_file):\n # json_file๏ผšjson/F1367.json'\n with open(json_file, 'r') as load_f:\n json_content = json.load(load_f)\n print(json_content)\n\n return json_content", "title": "" }, { "docid": "5e9dd55a026e9de2f744a00712913571", "score": "0.57192177", "text": "def read_json(self, filename):\n try:\n with open(filename, 'r') as f:\n data = json.loads(json.load(f))\n\n # Set classes\n if 'classes' in data:\n self.classes = data['classes']\n\n except IOError:\n log.error(\"Couldn't read from %s\" % filename)", "title": "" }, { "docid": "0a5a25aff88f47b2f3c2ba530b99a342", "score": "0.56978494", "text": "def get_json_data(self, file_name):\n with open(file_name).read() as f:\n return json.loads(f)", "title": "" }, { "docid": "ed232f861423360e8fcaa59a3b27b272", "score": "0.5696865", "text": "def load(filepath):\n\n return json.load(open(filepath))", "title": "" }, { "docid": "72453792b4abd7d6709f3b0b05f28529", "score": "0.56918645", "text": "def get():\n\n # Open file just keep the best n-trees.\n with open(MODEL_FILE_PATH, 'r') as f:\n model_json = json.loads(f.read())\n\n return model_json", "title": "" }, { "docid": "baf1aa1a63a4aa9123b6ef326bc51c09", "score": "0.5690652", "text": "def readFromJson():\n file = open(STAT_FILE_NAME, 'r')\n data = json.load(file)\n file.close()\n return data", "title": "" }, { "docid": "197c238f769ab4116a9b8baa1e003a4e", "score": "0.5678415", "text": "def importData(fname,verbose=False):\n\n #Error check\n if os.path.exists(fname)==False:\n print(\"Can not find file \" + fname)\n return\n\n if fname.lower().endswith('json')==False:\n print(\"Data should be a JSON file\")\n return\n\n #Build tree\n with open(fname) as f:\n obj = json.load(f) \n\n\n flattenedTree = tree_flatten(obj['msg'][0])\n colNames = 'id|parent|atlas_id|acronym|name|color'\n return (flattenedTree,colNames)", "title": "" }, { "docid": "e2e16cb41436f092b090255cfdfc19b7", "score": "0.56771916", "text": "def read_mat_data(file_path=None, default_path=\"project://scene\"):\n\n if not file_path:\n return\n\n\n if not os.path.exists(file_path):\n return\n\n # Open the json file and read the data\n with open(file_path, 'r') as fp:\n data = json.load(fp)\n\n\n if not data:\n return\n\n selected = None\n try:\n selected = ix.selection[0]\n print(selected)\n except:\n print(\"Nothing selected\")\n\n if not selected:\n return\n\n# shading_groups = selected.attrs.shading_groups\n# print(shading_groups)\n\n\n for material, values in data.items():\n connections = values[\"connections\"]\n nodes = values[\"nodes\"]\n objects = values[\"objects\"]\n nodeTree = NodeTree(material, nodes, connections, objects, selected)\n nodeTrees.append(nodeTree)\n\n nodeTree.createNodes()\n\n #nodeTree.linkNodes(connections)\n\n #standard_mat = ix.cmds.CreateObject(\"pSphere1\" + '_mat', \"MaterialPhysicalStandard\", \"Global\", default_path)", "title": "" }, { "docid": "d27d18651084b9128af3d1bc839b8276", "score": "0.56704545", "text": "def read_toml(filename: str) -> Graph:\n pass", "title": "" }, { "docid": "1c879d4cf42ab601cbb45ebb351a0074", "score": "0.56663686", "text": "def network_json_files():\n net_fol = TEST_DATADIR / \"networks\"\n data = {\n \"good\": list(net_fol.glob(\"good/*.json\")),\n \"bad\": list(net_fol.glob(\"bad/*json\")),\n }\n return data", "title": "" }, { "docid": "1c879d4cf42ab601cbb45ebb351a0074", "score": "0.56663686", "text": "def network_json_files():\n net_fol = TEST_DATADIR / \"networks\"\n data = {\n \"good\": list(net_fol.glob(\"good/*.json\")),\n \"bad\": list(net_fol.glob(\"bad/*json\")),\n }\n return data", "title": "" }, { "docid": "b54a1c64b2510a8b0b83952f65f0be25", "score": "0.5649095", "text": "def _load_json(self, filename):\n full_path = os.path.realpath(os.sep.join([os.path.dirname(__file__),\n 'files', filename]))\n fp = open(full_path, 'rb')\n json = fp.read()\n fp.close()\n return json", "title": "" }, { "docid": "e13528d9e507aadac35ea5cd1d3cee40", "score": "0.56471616", "text": "def __init__(self, json_file: str='data/train-v1.1-TA.json'):\r\n ### YOUR CODE HERE\r\n with open(json_file) as fp:\r\n self.data = json.load(fp)[\"data\"]\r\n print(\"Extracting data sample from raw SQuAD json file... >>\")\r\n self.dataset = get_train_data(data_dir=\"\", filename=json_file)\r\n ### END YOUR CODE\r", "title": "" }, { "docid": "697d6fa8953ae099c2562717ae8523ae", "score": "0.56470877", "text": "def readfrom_nodecay(f, json_names, json_ids):\n all_lines = f.readlines()\n\n table_country_name = remove_brackets(all_lines[0][30:-1]).lower()\n country_id = remove_brackets(all_lines[0][25:30])\n tot_tons = int(all_lines[0][0:8])\n\n at_ocean = float(all_lines[1][0:8])\n at_ocean_perc = float(remove_brackets(all_lines[1][10:14]))\n\n at_beach = float(all_lines[2][0:8])\n at_beach_perc = float(all_lines[2][10:14])\n\n try:\n country_name = json_names[json_ids.index(country_id)].lower()\n\n data_from = []\n for c_line in all_lines[4:]:\n tons = float(c_line[0:8])\n perc = float(c_line[10:14])\n name = remove_brackets(c_line[21:-1])\n tobj = {\n 'name': name,\n 'tons': tons,\n 'perc': perc\n }\n data_from.append(tobj)\n\n obj_from = {\n 'name': country_name,\n 'tot_tons': tot_tons,\n 'ocean_tons': at_ocean,\n 'ocean_perc': at_ocean_perc,\n 'beach_tons': at_beach,\n 'beach_perc': at_beach_perc,\n 'from': data_from\n }\n return obj_from, country_name\n except Exception as e:\n print(F\"Not found: {table_country_name}\")\n return -1, table_country_name", "title": "" }, { "docid": "78044356a4ca4f9276a8e91c5b98beb8", "score": "0.5644243", "text": "def load_json(self, data: str):\n\t\tG = adjacency_graph(json.loads(data))", "title": "" }, { "docid": "a8ce108b8c5c659e7beb4032edac6e0f", "score": "0.56267977", "text": "def read_json(filepath: str) -> object:\n\n return json.loads(read_file(filepath))", "title": "" }, { "docid": "edd070463181cc91fd41ea3d0567e984", "score": "0.5623868", "text": "def read_data(self):\n self.data = pd.read_json(self.data_fname)", "title": "" }, { "docid": "5bc87ed72b8272ac9cde774cd4348778", "score": "0.5622817", "text": "def load_data(in_file):\n with open(in_file) as in_handle:\n return json.load(in_handle)", "title": "" }, { "docid": "eec7a5258bbf0243280c19417708b178", "score": "0.5621299", "text": "def open_json_file(file):\n with open(file) as data_file:\n data = json.load(data_file)\n return (data['jobs'])", "title": "" }, { "docid": "d03c0de04559a49680346324dacb2e0b", "score": "0.5619169", "text": "def parse_json_file(file):\n with open(file, 'r') as json_file:\n \tdata = json.load(json_file)\n return data", "title": "" }, { "docid": "2cb955fe610b6358fbf98e757df75f39", "score": "0.5619144", "text": "def open_graph(self):\n\n with open(\"Graph.json\") as json_file:\n self.Graph = json.load(json_file)\n\n return self.Graph", "title": "" }, { "docid": "d4c6ea6f2e57f9d83a8e20db663625c6", "score": "0.5615406", "text": "def json_reader(self):\n fn = self.fn\n if os.path.isfile(fn) and os.path.getsize(fn) > 0:\n with open(fn, 'rt') as ff:\n return json.load(ff)", "title": "" }, { "docid": "f3cfa88ecc22275b37b1849d2974bc93", "score": "0.5612933", "text": "def read_json(file):\n with open(file, 'r') as f:\n fl = json.load(f)\n return fl", "title": "" }, { "docid": "1ba5d254e679d4ae62d7e8ee66129cdc", "score": "0.5610952", "text": "def do_load(self, ):\n try:\n file = input(\"Path: \")\n with open(os.path.abspath(file), 'r+') as json_file:\n self.info = json.load(json_file)\n print(f\"{Fore.GREEN}Loaded Successfully.\")\n except Exception as e:\n print(f\"{Fore.RED}{e}\")", "title": "" }, { "docid": "0b4a0c56ec073dc46d32c606b77a5959", "score": "0.56030554", "text": "def read_json(self) -> None:\r\n\r\n self._books = json.load(open('data.json', 'r'))", "title": "" }, { "docid": "9f6405eb5952e28d99d019d364952f09", "score": "0.5600502", "text": "def get_layer_data():\n with open('metadata/vgg_layer_info.json') as f:\n data = json.load(f)\n return data", "title": "" }, { "docid": "fe5c516744c3ae58581b1074d82033e1", "score": "0.55968016", "text": "def load_data():", "title": "" }, { "docid": "2ee1093419e2f9615c99ee955d0db0a4", "score": "0.5595259", "text": "def read_json_graph(path: PathLike) -> Graph:\n path: PathLike = Path(path)\n with open(path) as graph_file:\n return node_link_graph(load(graph_file))", "title": "" }, { "docid": "c0c15dcfc625623fd86780dc8dc4e0f4", "score": "0.5592956", "text": "def test_read_from_file(self):\n json_data = TestUGJSONReader.get_test_json()\n\n # write a temp file for testing.\n filename = \"test.json\"\n with open(filename, \"w\") as json_file:\n json_file.write(json_data)\n\n ugjr = UGJsonReader()\n uags = ugjr.read_from_file(filename=filename)\n self.verify_uags(uags)\n\n # clean up the temp file\n os.remove(filename)", "title": "" }, { "docid": "fefc5910a4dcb2c804571f010e30a342", "score": "0.55927074", "text": "def readMetadata(self):\n with open(self.metadataFile, 'r') as fh:\n try:\n metadata = json.load(fh)\n self.timeStep = int(metadata['timeStep'])\n if metadata.get('aggregationMethod'):\n self.aggregationMethod = metadata['aggregationMethod']\n return metadata\n except (KeyError, IOError, ValueError) as e:\n raise CorruptNode(self, \"Unable to parse node metadata: %s\" % e.args)", "title": "" }, { "docid": "294e445958ef4729b4c16b67c46df595", "score": "0.5592438", "text": "def json_to_nx(filename: str) -> nx.Graph:\n return nx.balanced_tree(2, 3)", "title": "" }, { "docid": "c3184d1c5c3164de4abcca3414b1a03d", "score": "0.55902267", "text": "def get_json_data(self, filename):\n\n return(json.dumps(json.load(open(filename, \"r\"))))", "title": "" }, { "docid": "a8470c2669e888fab135dbbf9a5b4414", "score": "0.5588613", "text": "def open_json_files(topic, dates, phase):\n folder = '/future_new/' if dates and phase == 'test' else '/shuffle/'\n with open('../data' + folder + topic + f'/{phase}.json', 'r') as infile:\n data = json.load(infile)\n return data", "title": "" }, { "docid": "88191f9cc9df2e2cfac24c5b8c588319", "score": "0.5585208", "text": "def load(filename):\r\n f = open(filename, 'r')\r\n data = json.load(f)\r\n f.close()\r\n return data", "title": "" }, { "docid": "2b1ec2c5e05f3d5f48563665cf8c8a35", "score": "0.5584823", "text": "def read_data():\n\n Config.trajectories_dict = read_trajectories()\n\n # Read the fairway sections\n Config.fairway_section_list = read_network()\n\n # Read all the bridges in the network\n Config.bridges = read_bridges()\n\n # Read all the locks in the network\n Config.locks = read_locks()\n\n Config.vessels_dict = read_passages()", "title": "" }, { "docid": "9edf6aec6000e2f17691b45baa6f2c22", "score": "0.55756897", "text": "def read_jsonlike(jsonlike_file):\n with open(jsonlike_file, 'r') as f:\n f.readline() # skips the /usr/bin/env line\n try:\n data = json.load(f)\n # data = json_util.loads(data)\n except ValueError:\n return 1\n data['mdsum'] = os.path.splitext(os.path.basename(jsonlike_file))[0].split('_')[-1]\n print(data['mdsum'])\n return data", "title": "" }, { "docid": "eaf5219788086c7cf74a9c20e37537bd", "score": "0.5575326", "text": "def config_parse(filepath):\n with open(filepath) as json_file:\n data = json.load(json_file)\n return data", "title": "" }, { "docid": "f90480b7a96fb53f7302bbdbe040dd99", "score": "0.55720747", "text": "def openFile(self):\n try:\n f = open(self.filePath)\n data = dict(json.load(f))\n print(\"Opened file successfully\")\n except:\n traceback.print_exception(*sys.exc_info())\n return data", "title": "" }, { "docid": "4383c38472c1de953a3efdcb4b4b5af1", "score": "0.55713403", "text": "def run_read_data_train():\n data = read_data(\"src/tests/databaselinemodel/train_happy_frame.json\")\n return data", "title": "" }, { "docid": "428401e6bd41f4e27ab57c227263c190", "score": "0.5561218", "text": "def get_data(name):\n cfgpath = os.path.join(ROOTDIR, \"config\", name)\n open(cfgpath, \"a\").close() # create cfg file if it doesnt exist\n\n with open(cfgpath, \"r\") as f:\n data = json.load(f)\n\n return data", "title": "" }, { "docid": "57c1ba3f677d692ca8fc0c92269ae874", "score": "0.5558609", "text": "def extract_route_data(self, file):\n with open(file) as data:\n data = json.load(data)[\"elements\"]\n ways = [x[\"nodes\"] for x in data if x[\"type\"] == \"way\"]\n raw_nodes = (n for n in data if n[\"type\"] == \"node\")\n nodes = {n[\"id\"]: Node(n[\"id\"], n[\"lat\"], n[\"lon\"]) for n in raw_nodes}\n\n return ways, nodes", "title": "" }, { "docid": "4efa5cf106458e838e0ae972399d09b8", "score": "0.5554145", "text": "def openJsonTrajectory(file):\r\n with open(file) as f:\r\n content = f.read()\r\n return json.loads(content)", "title": "" }, { "docid": "e18e42ac63554b66b5227f714426f50d", "score": "0.554526", "text": "def read_guildsFile():\n try:\n with open('guilds.json') as guilds_file:\n guilds = json.load(guilds_file)\n return guilds\n except Exception as e:\n print(e)", "title": "" } ]
be307e889961de8b22b58dbcb9b623f4
Return max Return the peak (and/or the index of the peak) in a given 2D array
[ { "docid": "1179a2aecb80a7c30e687babc324e634", "score": "0.0", "text": "def _return_max(x, y, exp_dnu=None,):\n xx, yy = np.copy(x), np.copy(y)\n if list(yy) != []:\n if exp_dnu is not None:\n lst = list(np.absolute(xx-exp_dnu))\n idx = lst.index(min(lst))\n else:\n lst = list(yy)\n idx = lst.index(max(lst))\n else:\n return None, np.nan, np.nan\n return idx, xx[idx], yy[idx]", "title": "" } ]
[ { "docid": "1eb06e174c15dd61c4e2c4aa238fdf6e", "score": "0.783653", "text": "def findPeak(image) :\n maxValue = -100000.0\n maxIndex = [0,0]\n if(len(image.shape) == 2) :\n maxAxis = image.argmax(0)\n for pos in maxAxis :\n if(pos != 0) :\n for i in range(0,image.shape[1]) :\n if(image[pos][i] > maxValue) :\n maxValue = image[pos][i]\n maxIndex = [pos,i]\n print maxValue,maxIndex\n else :\n maxAxis = image.argmax(1)\n for pos in maxAxis[0] :\n if(pos != 0) :\n for i in range(0,image.shape[1]) :\n if(image[0][pos][i] > maxValue) :\n maxValue = image[0][pos][i]\n maxIndex = [pos,i]\n return maxIndex", "title": "" }, { "docid": "f11eb8e355821ea0d3ec49b2d4639a46", "score": "0.74140084", "text": "def peak(x, y, arr):\n\n x = x.flatten()\n y = y.flatten()\n a = arr[x, y].flatten()\n\n apeak = np.where(a == np.nanmax(a))\n\n return x[apeak], y[apeak]", "title": "" }, { "docid": "1a1bc02372d7f2a137cb5798b4ff9b26", "score": "0.72952276", "text": "def __dc_find_max_subarray(array, ibegin, iend):\n if ibegin < iend:\n imid = (ibegin + iend) // 2\n msaLeft = __dc_find_max_subarray(array, ibegin, imid)\n msaRight = __dc_find_max_subarray(array, imid + 1, iend)\n msaCombo = __dc_find_msa_linear(array, ibegin, iend)\n\n # Compare all 3 subarrays and determine the highest.\n # The highest should be the one that is returned.\n if msaCombo[2] >= msaLeft[2] and msaCombo[2] >= msaRight[2]:\n return msaCombo\n elif msaLeft[2] >= msaRight[2] and msaLeft[2] >= msaCombo[2]:\n return msaLeft\n else:\n return msaRight\n\n # Return original array\n return (ibegin, iend, array[ibegin])", "title": "" }, { "docid": "d0a87a060b34eb94ff19c0f4b424c6c3", "score": "0.7294032", "text": "def find_local_maxima(arr):\n \n # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710\n \n # neighborhood is simply a 3x3x3 array of True\n neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)\n local_max = ( filters.maximum_filter(arr, footprint=neighborhood) == arr )\n \n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion\n background = ( arr == 0 )\n eroded_background = morphology.binary_erosion(background,\n structure=neighborhood,\n border_value=1)\n \n # we obtain the final mask, containing only peaks, \n # by removing the background from the local_min mask\n detected_max = local_max ^ eroded_background # ^ = XOR\n \n return np.where(detected_max)", "title": "" }, { "docid": "d7c31940f0fc2a7d61a21b2c8d9fb18e", "score": "0.7271372", "text": "def get_max_index(a):\n # TODO: Your code here\n return np.argmax(a)", "title": "" }, { "docid": "a265aa4e04d7a84972e1826de95f79c8", "score": "0.7203673", "text": "def peak_gm(xdata,ydata):\n tdata=gm(xdata,ydata)\n return max(zip(xdata,ydata,tdata),key=lambda x: x[2])", "title": "" }, { "docid": "7afdf102cd764c2ffbcae7906d05e806", "score": "0.7155186", "text": "def __dc2_find_max_subarray(array, ibegin, iend):\n if ibegin == iend:\n return (ibegin, iend, array[ibegin])\n else:\n imid = (ibegin + iend) // 2\n (llx, lrx, lsum) = __dc2_find_max_subarray(array, ibegin, imid)\n (rlx, rrx, rsum) = __dc2_find_max_subarray(array, imid + 1, iend)\n (clx, crx, csum) = __dc2_find_max_crossing(array, ibegin, imid, iend)\n\n if (lsum >= rsum) and (lsum >= csum):\n return (llx, lrx, lsum)\n elif (rsum >= lsum) and (rsum >= csum):\n return (rlx, rrx, rsum)\n else:\n return (clx, crx, csum)", "title": "" }, { "docid": "f5395c3ef147c1f36755f51f544e3093", "score": "0.7134809", "text": "def peak_xy(self):\n\n return np.where(self.image == self.image.max())", "title": "" }, { "docid": "8777810c9840670b1bb12bf7c68de891", "score": "0.711172", "text": "def npmax(array: np.ndarray) -> Tuple[int]:\n arrayindex = array.argmax(1)\n arrayvalue = array.max(1)\n i = arrayvalue.argmax()\n j = arrayindex[i]\n return i, j", "title": "" }, { "docid": "7e119dad23ca44dec75d19145617d7e6", "score": "0.70545703", "text": "def maximum(self):\n # Find position of main peak\n self.max = np.where(self.derivative[self.threshold:] < 0)[0][0] + self.threshold", "title": "" }, { "docid": "9358248d96342eb276235536da5f1459", "score": "0.7044993", "text": "def max(self):\n # Find index and value of largest element\n idx = self.data.argmax()\n val = self.data.max()\n\n # Find frame that contains the brightest data point using `unravel`,\n # which maps the flat index `idx_px` onto the high-dimensional\n # indices (x,y,z).\n # What we want is index `z` (i.e., the frame index), given by the last\n # dimension in the return argument.\n idx_frame = np.unravel_index(idx, self.data.shape)[-1]\n\n # Convert index to time\n t = idx_frame * self.tsample\n\n return t, val", "title": "" }, { "docid": "c811f9fd9471886f160001d6e5d55074", "score": "0.70361423", "text": "def two_dim_peak(arr, row=0):\r\n #Base case [We consider single rows to have a peak]\r\n if len(arr) == 1:\r\n row_val, row_pos = one_dim_peak(arr[0])\r\n return (row_val, (row, row_pos))\r\n \r\n #Initialize midpoint and check for row being valid.\r\n mid = math.floor((len(arr) - 1)/2)\r\n up = mid > 0\r\n \r\n #Obtain the index j of the peak of mid row.\r\n row_val, row_pos = one_dim_peak(arr[mid])\r\n \r\n #Recursive case\r\n if arr[mid - 1][row_pos] > arr[mid][row_pos] and up:\r\n #Rows above mid contain peak\r\n return two_dim_peak(arr[0:mid], row)\r\n elif arr[mid + 1][row_pos] > arr[mid][row_pos]:\r\n #Rows below mid contain peak\r\n return two_dim_peak(arr[mid+1:len(arr)], row + mid + 1)\r\n else:\r\n #We've obtained a peak, so we return value and coordinates.\r\n return (arr[mid][row_pos], (mid, row_pos))", "title": "" }, { "docid": "15fa7908a98f6e95b9eb7ab730c9cf1d", "score": "0.70282257", "text": "def find_max2D(\n field): # 2D field\n #---------------------------------------------------------------------#\n # Finding the maximum subroutine #\n #---------------------------------------------------------------------#\n index = np.unravel_index(np.argmax(field, axis=None), field.shape)\n Loc1 = index[0]\n Loc2 = index[1]\n Val = np.amax(field)\n if Val != field[Loc1, Loc2]:\n print(\"**** Error in finding the maximum value\")\n sys.exit(1)\n\n return Val, Loc1, Loc2", "title": "" }, { "docid": "264ea0d4a870b6d4b22eafc9939ecf39", "score": "0.6984381", "text": "def argmax(a):\n return numpy.unravel_index(a.argmax(), a.shape)", "title": "" }, { "docid": "78c7c1d0f6097da9713b2061e4006609", "score": "0.6972969", "text": "def argmax(a):\n return np.unravel_index(a.argmax(), a.shape)", "title": "" }, { "docid": "75aea589c0a907ca2109497032955400", "score": "0.69618994", "text": "def maximum(arr1d):\n import numpy as np\n return np.max(arr1d)", "title": "" }, { "docid": "ff1726ab5921cd1d0ef34023868e5a0d", "score": "0.69530594", "text": "def greyMax(arr):\n return np.max(arr, axis = 2)", "title": "" }, { "docid": "ba260ba6ac35813cec3030c7d29e093d", "score": "0.69066364", "text": "def getMax(array):\n maximum = 0\n for v in array:\n maximum = max(v, maximum)\n return maximum", "title": "" }, { "docid": "e3ccbb72fcc5e67bc427ceb7d8d5d790", "score": "0.68962216", "text": "def get_largest_peak(self):\n return self.good_peaks_sorted[self.smallest_good_peak_idx]", "title": "" }, { "docid": "5f7a493caef2f78be86d76d64b694980", "score": "0.68961227", "text": "def max_from_zero_1d(array):\n val = max(np.max(array), np.min(array), key=abs)\n \n return val", "title": "" }, { "docid": "12a6a8b5fd6df900c5faafd49ce74d0d", "score": "0.68920493", "text": "def max_subset(arr):\n\tprev = current = 0\n\n\tfor i in arr:\n\t\tprev, current = current, max(prev + i, current)\n\treturn current", "title": "" }, { "docid": "e5b538123b659222287c923a4054e991", "score": "0.6890295", "text": "def peak_to_peak(self, a, axis=0):\n try:\n ans = np.max(a, axis) - np.min(a, axis)\n return ans\n except Exception as e:\n print(\"An exception occurred. Here is the message:\\n\", e)", "title": "" }, { "docid": "537f83519f5a7748a4a91f35e06380c8", "score": "0.688734", "text": "def cell_max(values):\n maxima = maximum_filter(values, size=2, mode='constant')\n indices = (slice(1, None),) * np.ndim(values)\n return maxima[indices]", "title": "" }, { "docid": "0a72af49b9f1cb6b23d92290de15a481", "score": "0.6882809", "text": "def max(self, a, axis=0):\n try:\n ans = np.max(a, axis)\n return ans\n except Exception as e:\n print(\"An exception occurred. Here is the message:\\n\", e)", "title": "" }, { "docid": "c697aa81ef39690bc252feee25d7bfd7", "score": "0.68698657", "text": "def get_bb_max(self):\n times = self.get_bb_max_times()\n points = [self.fp(t) for t in times]\n return np.max(points, axis=0)", "title": "" }, { "docid": "26c458eafb1b85faf39ed0c807dba0f6", "score": "0.6855567", "text": "def get_max_peak(self):\n for i in range(len(self.divided_voltage_array)):\n dump = []\n new_time_array = self.divided_time_array[i]\n new_voltage_array = self.divided_voltage_array[i]\n max_peaks = []\n\n tmp_max, tmp_min = -np.Inf, np.Inf # tmp var to hold max, min\n\n for index, (pos, curr_val) in enumerate(zip(new_time_array,\n new_voltage_array)):\n if curr_val > tmp_max: # if current value is > tmp\n max_pos = pos\n tmp_max = curr_val # tmp = current\n\n if curr_val < tmp_min:\n tmp_min = curr_val\n\n # Look for local max\n if curr_val < tmp_max:\n if tmp_max != np.Inf:\n if new_voltage_array[index:index +\n self.MIN_DIST].max() < tmp_max:\n # Found a valid peak\n dump.append(True)\n max_peaks.append([max_pos, tmp_max])\n # Setting flags to show that a peak was found\n tmp_min = np.Inf\n tmp_max = np.Inf\n if index + self.MIN_DIST >= len(new_voltage_array):\n # window exceeds signal length\n break\n continue\n # Now, look for local min - using this search\n # to eliminate smaller peaks that aren't local peaks\n # Prevents collecting the same max peak multiple times\n if curr_val > tmp_min:\n if tmp_min != -np.Inf:\n # Found a min point\n if new_voltage_array[index:index +\n self.MIN_DIST].min() > tmp_min:\n dump.append(False)\n # Setting flags to show that min point was found\n tmp_min = -np.Inf\n tmp_max = -np.Inf # Trigger max peak finding\n if index + self.MIN_DIST >= len(new_voltage_array):\n # window exceeds signal length\n break\n\n self.total_peaks.append(max_peaks)\n # Remove the false hit on the first value\n try:\n if dump[0]:\n max_peaks.pop(0)\n del dump\n except IndexError:\n # no peaks were found\n print(\"No peaks were found\")", "title": "" }, { "docid": "b98f6a26e17a137952926f7c1d18e71e", "score": "0.6831962", "text": "def maximum(array):\n if not array:\n return 0\n elif array[0] > maximum(array[1:]):\n return array[0]\n return maximum(array[1:])", "title": "" }, { "docid": "4bd722182fc203181e628e185afdb1d2", "score": "0.68240774", "text": "def get_max(array):\n np_array = np.asarray(array)\n if np_array.any():\n return np_array.max()\n return None", "title": "" }, { "docid": "cadf388f32da30118fdf216ec269390b", "score": "0.68213564", "text": "def get_max_index(array_of_arrays):\n index_of_max_elements = []\n for array in array_of_arrays:\n max_index = np.where(array == array.max())\n # print(type(max_index)) # max_index is a tuple of form ==> (array(index with max val), )\n index_of_max_elements.append(max_index[0].tolist()[0])\n return index_of_max_elements", "title": "" }, { "docid": "edf6fd9ee8f7645c7fef3394a479b998", "score": "0.681217", "text": "def findpeak_window(arr,w):\n if(len(arr) < w+1):\n raise IndexError(f\"Array length is less than {w+1}\")\n\n # let's check if edge elements are peak\n if arr[0]>max(arr[1 : (w+1)]) :\n return 0\n if arr[-1]>max(arr[-2:-(w+2):-1]):\n return len(arr)-1\n \n for i in range(w,len(arr)-w):\n if arr[i] > max( max(arr[ i-w : i ]), max(arr[i+1:i+w+1])):\n return i\n\n return('No Peak found!!')", "title": "" }, { "docid": "602ec92cb2ad31b22d2db0ac52f2b5a6", "score": "0.6791584", "text": "def get_max(array):\n max_value = array[0]\n for item in array:\n if item > max_value:\n max_value = item\n return max_value", "title": "" }, { "docid": "03a91daca5a3835a556464d29a2c00df", "score": "0.67722774", "text": "def maxsubarray_linear(array):\n return __dc_find_msa_linear(array, 0, len(array) - 1)", "title": "" }, { "docid": "ee4d591368958ab08d051e284b5e031e", "score": "0.67716855", "text": "def maximum(x, y):", "title": "" }, { "docid": "b7ddd2e7c915c920ceff249274f53b84", "score": "0.6762787", "text": "def index_max(data)-> int:\n mi = 0\n m = data[mi]\n for i in range(len(data)):\n if data[i] > m:\n m = data[i]\n mi = i\n return mi", "title": "" }, { "docid": "04028559236135971b34af54847f3a7a", "score": "0.6759029", "text": "def FindMaxima(array):\n localmax = []\n for i in range(1,len(array)-1):\n PreviousValue = array[i-1]\n CurrentValue = array[i]\n NextValue = array[i+1]\n\n if CurrentValue > PreviousValue and CurrentValue > NextValue:\n localmax.append(i)\n \n return localmax", "title": "" }, { "docid": "9ecd323658101c767b994b5c97b76d1e", "score": "0.6749714", "text": "def peakIndexInMountainArray(self, A: List[int]) -> int:\n cnt = len(A)\n for i in range(1, cnt - 1):\n if A[i - 1] < A[i] > A[i + 1]:\n return i", "title": "" }, { "docid": "711fb3d32c226ed41489f5f7d82c84a9", "score": "0.67462325", "text": "def get_bb_max(self):\n return np.max(self.points, axis=0)", "title": "" }, { "docid": "711fb3d32c226ed41489f5f7d82c84a9", "score": "0.67462325", "text": "def get_bb_max(self):\n return np.max(self.points, axis=0)", "title": "" }, { "docid": "711fb3d32c226ed41489f5f7d82c84a9", "score": "0.67462325", "text": "def get_bb_max(self):\n return np.max(self.points, axis=0)", "title": "" }, { "docid": "2106dc3ee7771eca4c71c5423b5d26a2", "score": "0.67284995", "text": "def max_dense(array2d, results):\n max_val = results[0]\n i_row_max = results[1]\n i_col_max = results[2]\n for i_row in range(array2d.shape[0]): \n for i_col in range(array2d.shape[1]): \n if array2d[i_row, i_col] > max_val:\n max_val = array2d[i_row, i_col]\n i_row_max = i_row\n i_col_max = i_col\n results[0] = max_val\n results[1] = i_row_max\n results[2] = i_col_max", "title": "" }, { "docid": "2106dc3ee7771eca4c71c5423b5d26a2", "score": "0.67284995", "text": "def max_dense(array2d, results):\n max_val = results[0]\n i_row_max = results[1]\n i_col_max = results[2]\n for i_row in range(array2d.shape[0]): \n for i_col in range(array2d.shape[1]): \n if array2d[i_row, i_col] > max_val:\n max_val = array2d[i_row, i_col]\n i_row_max = i_row\n i_col_max = i_col\n results[0] = max_val\n results[1] = i_row_max\n results[2] = i_col_max", "title": "" }, { "docid": "bf81e37410ede823a5257a27d9230da0", "score": "0.67134917", "text": "def indice_maior_element_numpy(vet_numpy):\n\n result = numpy.where(vet_numpy == numpy.amax(vet_numpy))\n # print('List of Indices of maximum element :', result[1])\n # print(str(result[1]).replace(\"[\", \"\").replace(\"]\",\"\"))\n return str(result[1]).replace(\"[\", \"\").replace(\"]\", \"\")", "title": "" }, { "docid": "46a40be3af15672d7d16b02d53c3dbbf", "score": "0.67112094", "text": "def get_max(A):\n max_el = A[0][0]\n max_row_ix, max_col_ix = 0, 0\n for row in range(len(A)):\n for col in range(len(A[row])):\n if A[row][col] > max_el:\n max_el = A[row][col]\n max_row_ix = row\n max_col_ix = col\n return max_row_ix, max_col_ix", "title": "" }, { "docid": "9b59e8e2c48d04294268d5af0044a423", "score": "0.67071784", "text": "def peak_detect(x, y):\n peaks = np.array([i for i in range(1, len(x)-1) if y[i-1] < y[i] and\n y[i+1] < y[i]])\n l = y[peaks] == max(y[peaks])\n mx, my = x[peaks][l][0], y[peaks][l][0]\n return mx, my", "title": "" }, { "docid": "726a157d44debdc8964e9c11451ba76f", "score": "0.67017907", "text": "def get_max(self):\n return np.amax(self.data[:self.length])", "title": "" }, { "docid": "3a75cee90778c89f77a45bd6adc4df44", "score": "0.6697448", "text": "def find_max(cls, img) -> (int,int):\n iarr, jarr = np.argmax(img, axis=0), np.argmax(img, axis=1)\n i = 0 if len(iarr[iarr > 0]) == 0 else int(np.bincount(iarr[iarr > 0]).argmax())\n j = 0 if len(jarr[jarr > 0]) == 0 else int(np.bincount(jarr[jarr > 0]).argmax())\n return i, j", "title": "" }, { "docid": "e66800cdc4deb1b4f13f0f09174d8373", "score": "0.6696874", "text": "def get_largest_bb(bb_arr):\n return bb_arr[np.argmax([bb_hw(bb[0])[2]*bb_hw(bb[0])[3] for bb in bb_arr ])]", "title": "" }, { "docid": "42738d53a96a5598b870ce6aab1ae8d4", "score": "0.66918874", "text": "def one_dim_peak(arr, pointer = 0):\r\n #Base case [We consider singletons to have a peak]\r\n if len(arr) == 1:\r\n return (arr[0], pointer)\r\n \r\n #Initialize our midpoint. I decided to use a left-bias in my binary search algorithm.\r\n mid = math.floor((len(arr) - 1)/2)\r\n left = mid > 0 #Prevent out-of-range error for edge cases\r\n \r\n #Recursive case\r\n if arr[mid - 1] > arr[mid] and left: \r\n #Left side of our midpoint has a peak\r\n return one_dim_peak(arr[0:mid], pointer)\r\n elif arr[mid + 1] > arr[mid]:\r\n #Right side of midpoint has a peak\r\n return one_dim_peak(arr[mid+1:len(arr)], pointer + mid + 1)\r\n else:\r\n #We've obtained a peak, so return its value and index.\r\n return (arr[mid], mid)", "title": "" }, { "docid": "1135be87bad7a1651f8d8fc78f4a8b40", "score": "0.6682772", "text": "def _find_max(waveforms):\n max_w = np.zeros(len(waveforms))\n for i in range(len(waveforms)):\n max_temp = -np.inf\n for j in range(len(waveforms[i])):\n if waveforms[i][j] > max_temp:\n max_temp = waveforms[i][j]\n max_w[i] = max_temp\n return max_w", "title": "" }, { "docid": "4bb7f69775c9b401552d7d63a2b50ce9", "score": "0.666739", "text": "def maxPeakPos(self):\n return self.peak.pairs[self.npairs-1].readPos", "title": "" }, { "docid": "93a55069a509c4ad589701530255a0ff", "score": "0.6665196", "text": "def maxSubArray(A):\n\tmax_temp, max_global = A[0], A[0]\n\tstart, end, n = 0,1,0\n\n\tfor i in range(1, len(A)):\n\t\tn = A[i]\n\t\tmax_temp = max(n, max_temp + n)\n\n\t\tif max_temp == n:\n\t\t\tstart = i\n\t\tmax_global = max( max_global, max_temp)\n\t\n\t\tif max_global == max_temp:\n\t\t\tend = i\t\n\n\tif end < start:\n\t\tstart = A.index(max_global)\n\t\tend = start\n\tprint(\"Subarray max: \"+ str(max_global)+\", <start,end>=< \"+str(start)+\", \"+str(end) +\">\")", "title": "" }, { "docid": "a4a9e3e5745743e79f59298cd78b6085", "score": "0.6660729", "text": "def peak_prominent(image):\n peaks = peak_local_max(image, num_peaks=1, indices=True)\n return peaks[0]", "title": "" }, { "docid": "87b515b593d88e0e58a4794915fea68d", "score": "0.66581696", "text": "def get_GridMax(grid):\n\n index = np.argmax(grid)\n M = np.amax(grid)\n index = np.unravel_index(index, grid.shape)\n\n return M, index", "title": "" }, { "docid": "392ccfd3e0622879ba2563e2c99f85f4", "score": "0.6647936", "text": "def ax_max(arr, axis):\n idx = arr.argmax(axis=axis)\n idx.data[np.all(arr.data == False, axis=axis)] = -1\n return(idx)", "title": "" }, { "docid": "4081d30ee747de11804b93cbda0e51de", "score": "0.6645241", "text": "def max(self):\n max_list = [] \n for i in self.__data[1::]:\n max_pos = np.argmax(i)\n max_list.append(max_pos)\n pl.figure(1)\n pl.plot(self.__trange, max_list)\n pl.xlabel(\"t\")\n pl.ylabel(\"max_U position\")\n pl.show()\n return max_list", "title": "" }, { "docid": "fb29b8357f97baf03303c7c16ce6c8eb", "score": "0.6644931", "text": "def find_maxima(y,number_of_peaks):\n blurred2 = gaussian_filter(y, sigma=2)\n peaks, _ = find_peaks(blurred2)\n prominences = peak_prominences(blurred2, peaks)[0]\n prominences_sorted = np.argsort(prominences)\n peaks = peaks[prominences_sorted[-number_of_peaks:]]\n \n peak_indices = np.argsort(peaks)\n return peaks[peak_indices]", "title": "" }, { "docid": "26b373b70eb3fe3290e8e29f32c4a73c", "score": "0.6637224", "text": "def find_max(self):\n return max(self.x, self.y, self.z, self.e)", "title": "" }, { "docid": "10d79db83d6a396274921c43154e7afe", "score": "0.6636901", "text": "def _ay_max_(self):\n ay = self.GetYaxis()\n return ay.GetXmax()", "title": "" }, { "docid": "d1b39cf9d1475a0fac6cae6e5cc03a7a", "score": "0.6633588", "text": "def get_rect_xmax(data):\r\n return max(data[0][0], data[1][0], data[2][0], data[3][0])", "title": "" }, { "docid": "273c935fcd2fdf5e19c8dbc7d0e80e2d", "score": "0.6630699", "text": "def get_max_score(self):\r\n self.scores = np.squeeze(self.scores)\r\n max_indice = np.argmax(self.scores)\r\n return self.scores[max_indice],max_indice", "title": "" }, { "docid": "af9cc69d82bd1505ec7d30837cf18448", "score": "0.6627828", "text": "def amax(self, arr: array_like) -> float:\n return np.amax(arr)", "title": "" }, { "docid": "041462ab26c7b94598a8f67b2194fb13", "score": "0.66238374", "text": "def find_max_are_new_approach(array):\n lenarr = len(array)\n prices = [(x * (i + 1), i) for i, x in enumerate(array)]\n sorted_prices = sorted(prices, key=lambda x: -x[0])\n\n max_area = None\n left, right = None, None\n prev_pos = None\n for i in range(lenarr):\n cur_pos = sorted_prices[i][1]\n if prev_pos is None:\n prev_pos = cur_pos\n else:\n if max_area is None:\n left = prev_pos if prev_pos <= cur_pos else cur_pos\n right = prev_pos if prev_pos > cur_pos else cur_pos\n\n if cur_pos > left:\n compare = (left, cur_pos)\n else:\n compare = (right, cur_pos)\n\n val_a, val_b = array[compare[0]], array[compare[1]]\n area = min(val_a, val_b) * abs(compare[0] - compare[1])\n if max_area is None or area >= max_area:\n max_area = area\n left, right = sorted(compare)\n\n return max_area", "title": "" }, { "docid": "70bf116eb99223ddc3349eeb63d18b2a", "score": "0.6620645", "text": "def find_max_area(array):\n # Heights dict -> O(n)\n hdict = {key: i for i, key in enumerate(array)}\n\n # Find min -> O(n), find max -> O(n)\n hmax, hmin = max(hdict), min(hdict)\n\n # Remove \"drops\" in hdict. It must be always rising in values -> O(k)\n curmax = 0\n for x in range(hmax, hmin - 1, -1):\n h_val = hdict.get(x)\n if h_val is None:\n continue\n\n if h_val > curmax:\n curmax = h_val\n else:\n hdict[x] = curmax\n\n max_h = 0\n max_area = 0\n\n # Find all meaningful areas in container -> O(n) * O(k)\n for pos, h in enumerate(array):\n if h < max_h:\n continue\n\n for x in range(max_h + 1, h + 1):\n max_d = hdict.get(x)\n if max_d is None:\n continue\n\n area = (max_d - pos) * x\n if area > max_area:\n max_area = area\n\n if h > max_h:\n max_h = h\n\n return max_area", "title": "" }, { "docid": "34fe99f068d71897ecddcbe32b2290e8", "score": "0.66038305", "text": "def max_(array: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, array)", "title": "" }, { "docid": "e767353d24753643ce7b157e15ab7b32", "score": "0.6596192", "text": "def get_rect_xmax(data):\n return max(data[0][0], data[1][0], data[2][0], data[3][0])", "title": "" }, { "docid": "bea1b6ee35fbe8fb24e220b54f2a1bd6", "score": "0.65918076", "text": "def argmax(x):\n return x.index(max(x))", "title": "" }, { "docid": "ccb6b025e819a3fe00b86872a9533066", "score": "0.65742695", "text": "def find_local_max(NB06012014):", "title": "" }, { "docid": "45939f86233fa3553c08ba0e0f6e5590", "score": "0.6563722", "text": "def maximum(self):\n return np.max(self.data)", "title": "" }, { "docid": "59c45b7f98b99272e9cd2a406aa701c8", "score": "0.6562234", "text": "def extrema(arr, mode='max'):\n \n if mode not in ['max', 'min']:\n print(\"Mode must be either `max` or `min`. Using `max`.\")\n mode = 'max'\n y, x = np.unravel_index(getattr(np, 'arg%s' % mode)(arr), arr.shape)\n return x, y", "title": "" }, { "docid": "6b2acdd1561efb4b8a05f06a32f15278", "score": "0.6561486", "text": "def _sliceid_of_highest_peak_within_tolerance(maxvals, dist, tolerance):\n sub_sliceidxs=np.where(dist<tolerance)[0] # zero-based\n if len(sub_sliceidxs)==0:\n print(\"ERROR: No peak with suitable location found. Check expected peak locations and peak tolerance.\")\n return None\n \n sub_maxvals=maxvals[sub_sliceidxs]\n max_id=sub_sliceidxs[np.argmax(sub_maxvals)]+1 # convert to one-based\n\n return max_id", "title": "" }, { "docid": "dbc3bc9670a5e8be9f9dbca499ca0844", "score": "0.65557045", "text": "def get_max(data_slice: np.ndarray) -> (int, float):\n idx = np.argmax(data_slice, axis=0)\n return (idx, data_slice[idx])", "title": "" }, { "docid": "b972a5e44668edf5488be8d975d4a8ca", "score": "0.6551117", "text": "def argmax(array):\n max_indices = np.arange(len(array))[array == np.max(array)]\n idx = int(hashlib.sha256(np.asarray(array).tobytes()).hexdigest(),16) % len(max_indices)\n return max_indices[idx]", "title": "" }, { "docid": "8b4a3e991976225091b09882f158174a", "score": "0.65454113", "text": "def imregionalmax(image, ksize=3):\n filterkernel = np.ones((ksize, ksize)) # 8-connectivity\n reg_max_loc = peak_local_max(image,\n footprint=filterkernel, indices=False,\n exclude_border=0)\n return reg_max_loc.astype(np.uint8)", "title": "" }, { "docid": "e29799ce6019196998315153fcf78675", "score": "0.6520396", "text": "def compute_max_projection(data):\n\n return data.max(axis=0)", "title": "" }, { "docid": "fc60d4fc1a178f6198fea387cad6ef82", "score": "0.6519037", "text": "def peak_dispersion(self):\n\n return self.dispersion[np.argmax(self.fluxden)]", "title": "" }, { "docid": "5ec0bafde2a6f348157705f46ba7b729", "score": "0.6512428", "text": "def maxSubArray(arr):\r\n max_so_far = arr[0]\r\n curr_max = max_so_far\r\n for i in range(1, len(arr)):\r\n curr_max += arr[i]\r\n if curr_max < arr[i]:\r\n curr_max = arr[i]\r\n if curr_max > max_so_far:\r\n max_so_far = curr_max\r\n return max_so_far", "title": "" }, { "docid": "496b1aeedcebcc7d07cf0e8c6bf86088", "score": "0.6505054", "text": "def abs_max(arr):\n \n return (arr.T / np.abs(arr.max(axis=-1))).T", "title": "" }, { "docid": "dd8c79136252efa13c623653d54db44c", "score": "0.65048295", "text": "def findClusterMax(red, clusters):\n peaks = []\n for clus in clusters:\n adu_vals = []\n for pt in clus:\n adu_vals.append(red[pt[0], pt[1]])\n max_adu_idx = np.where(adu_vals == max(adu_vals))[0][0]\n peaks.append(clus[max_adu_idx])\n \n return np.array(peaks)", "title": "" }, { "docid": "43ebd8f13a38a5e46637e0255c2f451a", "score": "0.65046644", "text": "def arg_maxima(x):\n max_value = np.max(x)\n return np.argwhere(x == max_value).reshape(-1)", "title": "" }, { "docid": "eb0cc433d594398694d9cb20e2883e80", "score": "0.64983165", "text": "def findMaxMS1Intensity(spectrum, peak):\n maxPeak = None\n maxIntensity = 0\n for refPeak, intensity in zip(spectrum, spectrum.values()):\n heavyLightIdentifyer = refPeak[0]\n if heavyLightIdentifyer=='h':\n continue\n if intensity>maxIntensity:\n maxPeak = peak\n maxIntensity = intensity\n return maxPeak, maxIntensity", "title": "" }, { "docid": "b964c1e986efc40cf3c88d9af753e003", "score": "0.6497563", "text": "def get_max_station(self):\n self.data = self.df.iloc[:, 1:].to_numpy(dtype=float)\n self.data[abs(self.data-float(self.mask_val)) < 1e-5] = numpy.nan\n data_sum = numpy.sum(self.data, axis=0)\n max_idx = numpy.argmax(data_sum)\n print(\"Station with the largest time-average value: \" + str(max_idx))\n\n return max_idx", "title": "" }, { "docid": "a2a589029c6208708cb44a793f751c5f", "score": "0.6489238", "text": "def find_maxmum_subarray(data,low,high):\n\tif low == high:\n\t\treturn (low,high,data[low])\n\telse:\n\t\t(left_low,left_high,left_sum) = find_maxmum_subarray(data,low,(low + high) / 2)\n\t\t(right_low,right_high,right_sum) = find_maxmum_subarray(data,int(ceil((low + high) / 2.0)),high)\n\t\t(cross_low,cross_high,cross_sum) = find_max_cross_subarray(data,low,(low + high) / 2,high)\n\n\t\tif left_sum >= right_sum and left_sum >= cross_sum:\n\t\t\treturn (left_low,left_high,left_sum)\n\t\telif right_sum >= left_sum and right_sum >= cross_sum:\n\t\t\treturn (right_low,right_high,right_sum)\n\t\telse:\n\t\t\treturn (cross_low,cross_high,cross_sum)", "title": "" }, { "docid": "b2b1447a150893e49fdde25b7175cd52", "score": "0.6479836", "text": "def max_val(self):\n return np.max([self._max_intensity_bound, self._zero_intensity_bound])", "title": "" }, { "docid": "01d00ca6230715781c7dab0dde5135f8", "score": "0.64770097", "text": "def max_subarray(\n arr: Sequence[float], low: int, high: int\n) -> tuple[int | None, int | None, float]:\n if not arr:\n return None, None, 0\n if low == high:\n return low, high, arr[low]\n\n mid = (low + high) // 2\n left_low, left_high, left_sum = max_subarray(arr, low, mid)\n right_low, right_high, right_sum = max_subarray(arr, mid + 1, high)\n cross_left, cross_right, cross_sum = max_cross_sum(arr, low, mid, high)\n if left_sum >= right_sum and left_sum >= cross_sum:\n return left_low, left_high, left_sum\n elif right_sum >= left_sum and right_sum >= cross_sum:\n return right_low, right_high, right_sum\n return cross_left, cross_right, cross_sum", "title": "" }, { "docid": "ef7ffcd7b7aa4ad6dba3554a3d7cb05b", "score": "0.6465262", "text": "def peak(self) -> Tuple[MeasureInput, MeasureResult]:\n assert self._data\n return list(self._data.values())[0][0][2]", "title": "" }, { "docid": "bf222d13c91ce80ebab7dbe4038b5c5f", "score": "0.645679", "text": "def _abs_max(A: np.array, index):\n the_i, the_j = index, index\n\n for i in range(index, len(A)):\n for j in range(index, len(A)):\n if abs(A[i, j]) > abs(A[the_i, the_j]):\n the_i = i\n the_j = j\n\n return the_i, the_j", "title": "" }, { "docid": "203368b5d6797303db3a25fc76c4b34b", "score": "0.64549", "text": "def multi_peak_max(\n PCM: FloatArray, n: int = 2\n) -> Tuple[FloatArray, FloatArray, FloatArray]:\n row, col = np.unravel_index(np.argsort(PCM.ravel()), PCM.shape)\n vals = PCM[row[-n:][::-1], col[-n:][::-1]]\n return row[-n:][::-1], col[-n:][::-1], vals", "title": "" }, { "docid": "e76f9dbfcf8b4ffc5c741d9fe269a534", "score": "0.64541423", "text": "def argmax(self):\n\t\treturn tuple(centres[index] for centres, index in zip(self.centres(), numpy.unravel_index(self.array.argmax(), self.array.shape)))", "title": "" }, { "docid": "a9a201a757866189de81e62a53892630", "score": "0.6452196", "text": "def recursive_maximum_subarray(array: List, low: int, high: int) -> Tuple:\n\n if len(array) == 0:\n return low, high, 0\n elif high == low:\n return low, high, array[low]\n else:\n mid = (low + high) // 2\n\n left_low, left_high, left_sum = recursive_maximum_subarray(array, low, mid)\n right_low, right_high, right_sum = recursive_maximum_subarray(array, mid + 1, high)\n cross_low, cross_high, cross_sum = find_max_crossing_subarray(array, low, mid, high)\n\n if left_sum >= right_sum and left_sum >= cross_sum:\n return left_low, left_high, left_sum\n elif right_sum >= left_sum and right_sum >= cross_sum:\n return right_low, right_high, right_sum\n else:\n return cross_low, cross_high, cross_sum", "title": "" }, { "docid": "8cf356551e41223cd128a53c2eb2c86e", "score": "0.64467824", "text": "def solution_mine(array_):\n peaks_array, peaks_index = find_peaks(array_)\n if len(peaks_index) < 2:\n # len(peaks_index) == 0 ==>> No peaks\n # len(peaks_index) == 1 ==>> One peak\n return len(peaks_index)\n\n peak_distance = find_peak_distance(peaks_index)\n sorted_peak_distance = sorted(peak_distance)\n # Max distance between two peaks\n max_mid_distance = sorted_peak_distance[-1]\n\n # The left most peak's distance to the start point\n left_distance = peaks_index[0] + 1\n # The right most peak's distance to the end point\n right_distance = len(array_) - peaks_index[-1]\n\n max_distance = max(left_distance, right_distance, max_mid_distance//2)\n if max_distance == 0:\n return 0\n\n max_possible_group_count = len(array_)//max_distance\n while len(array_)%max_possible_group_count != 0:\n # make sure max_possible_group_count can be divided by the length of array_\n max_possible_group_count -= 1\n\n if max_possible_group_count == 1:\n return 1\n\n # We know len(array_)//max_possible_group_count == int(len(array_)/max_possible_group_count)\n # i.e. max_possible_group_count is divisible by len(array_)\n element_count_in_each_group = len(array_)//max_possible_group_count\n\n true_or_false = verify(\n array_, peaks_array,\n max_possible_group_count,\n element_count_in_each_group\n )\n # if true_or_false is True, then each group contains at least one peak\n while not true_or_false:\n max_possible_group_count -= 1\n if max_possible_group_count == 1:\n return max_possible_group_count\n\n while len(array_)%max_possible_group_count != 0:\n # Again, make sure that max_possible_group_count is divisible by len(array_)\n max_possible_group_count -= 1\n\n if max_possible_group_count == 1:\n return max_possible_group_count\n\n element_count_in_each_group = len(array_)//max_possible_group_count\n true_or_false = verify(\n array_,\n peaks_array,\n max_possible_group_count,element_count_in_each_group\n )\n\n return max_possible_group_count", "title": "" }, { "docid": "7c4d561180ee189d7460def2eaaa6181", "score": "0.64447075", "text": "def get_max_tile(grid):\r\n value = np.amax(grid)\r\n row, col = np.where(grid == value)\r\n max_coor = list(zip(row, col))\r\n return value, max_coor", "title": "" }, { "docid": "28e341ee259288d16d9b177c1edc2930", "score": "0.6444431", "text": "def peak_detect(y, delta, x = None):\n\n maxtab = []\n mintab = []\n\n if x is None:\n x = np.arange(len(y))\n\n y = np.asarray(y)\n mn, mx = np.Inf, -np.Inf\n mnpos, mxpos = np.NaN, np.NaN\n lookformax = True\n\n for i in np.arange(len(y)):\n this = y[i]\n if this > mx:\n mx = this\n mxpos = x[i]\n if this < mn:\n mn = this\n mnpos = x[i]\n\n if lookformax:\n if this < mx-delta:\n maxtab.append((mxpos, mx))\n mn = this\n mnpos = x[i]\n lookformax = False\n else:\n if this > mn+delta:\n mintab.append((mnpos, mn))\n mx = this\n mxpos = x[i]\n lookformax = True\n return np.array(maxtab) #, np.array(mintab). For now, only retun the PEAKS, not troughs", "title": "" }, { "docid": "4fa405f6b0d956ce98f1b9dedf54897c", "score": "0.6426033", "text": "def max_peak(self):\n return self.max_height - self.mean_height", "title": "" }, { "docid": "7ad525f692eafa1922f364e950b4dab2", "score": "0.64227605", "text": "def argmax(arr):\n if isinstance(arr, dict):\n return max(arr.items(), key=operator.itemgetter(1))[0]\n else:\n return np.argmax(arr)", "title": "" }, { "docid": "b0dc9fd019b8649727ea7b3e2ef5fa43", "score": "0.6416774", "text": "def find_xy(result):\n ij = np.unravel_index(np.argmax(result), result.shape)\n return ij[::-1]", "title": "" }, { "docid": "b1806d6e32d2b8eecc43b9ff3a55de8d", "score": "0.64161193", "text": "def solution(array):\n\n # we keep track of what is the biggest number, to make sure not to return 0 when there is only negative numbers\n max_item = -1000000\n\n max_ending = max_slice = 0\n for a in array:\n max_item = max(max_item, a)\n max_ending = max(0, max_ending + a)\n max_slice = max(max_slice, max_ending)\n\n # if we only have negative numbers, we return the maximum value\n if max_item < 0:\n return max_item\n\n return max_slice", "title": "" }, { "docid": "72dcfbe2505f89622c2e9a824b0c2253", "score": "0.64119065", "text": "def __max__(self):\r\n return max(self._array)", "title": "" }, { "docid": "0f21455edc79b13dc2511d36c5eae768", "score": "0.6410278", "text": "def max_(self, column):\n index = self.__columns[column][1]\n return max(map(ROW[index], self.__data_area.values()))", "title": "" }, { "docid": "c0dac83af8b4243c1b2020fcca8505ef", "score": "0.6408961", "text": "def upper_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:\n return lra(data=highest, shape=reference_data.shape)", "title": "" }, { "docid": "8263d263ef676fc3e4ccf7c9add9f1c2", "score": "0.64085245", "text": "def errmax(values):\n return np.max(array(abs(values)))", "title": "" }, { "docid": "5687350bd03d72240407abba8c7ed8f8", "score": "0.64040446", "text": "def detect_peak_simple(array, lthres):\n\n ind = np.where(array > lthres)[0].tolist()\n jumps = [ind.index(x) for x, y in zip(ind, ind[1:]) if y - x != 1]\n runs = np.split(ind, [i+1 for i in jumps])\n if runs[0].shape[0] > 0:\n peakindices = [[elem[0], elem[-1]] for elem in runs]\n else:\n peakindices = []\n return peakindices", "title": "" } ]
be816247720df41620388f43b0eda2db
Recursively creates the decision tree. This method assumes that the dataTuples are sorted in descending order of feature entropy.
[ { "docid": "91b41d7e4a24be12f78539f37179ebd5", "score": "0.67282295", "text": "def createTree(dataTuples):\n\t\n\tdataTuples = dataTuples[:]\n\n\tif len(dataTuples) == 0:\n\t\t# Return a value of 0 which will \n\t\t# later be replaced with the kind of tree that will be found at this leaf\n\t\t#print \"LEAF CREATED\"\n\t\treturn CoverageNode(0)\n\t\t\n\ttree = CoverageNode(dataTuples[0][1])\n\tbins = dataTuples[0][2]\n\t\n\tshavedTuples = dataTuples[1:len(dataTuples)]\n\t\n\tfor bin in bins:\n\t\ttree.addChild(bin, createTree(shavedTuples))\n\t\n\treturn tree", "title": "" } ]
[ { "docid": "2f3829909b554b823e81942a3ca0fd1f", "score": "0.718289", "text": "def _gen_decision_tree(self, data, cur_depth):\n label = data[:, -1]\n print(type(23432))\n print(len(self.f_info))\n # ! Case 1: Leaf nodes -> generate predictions\n # Case 1.1: pure node -> return label as prediction\n if len(np.unique(label)) == 1:\n return int(label[0])\n\n # Case 1.2: Max depth reached -> vote\n if cur_depth == self.max_depth:\n return int(sum(label == 1) > sum(label == 0))\n\n # Case 1.3: Pre-pruning\n if self.pre_pruning >= 1 and data.shape[0] < self.pre_pruning:\n return int(sum(label == 1) > sum(label == 0))\n\n # ! Case 2: Root node or internal nodes -> generate child decision trees\n # Step 1: calculate entropy and find feature to split\n if self.method == 'information_gain':\n info_gain_dict = {feat_id: info_gain(data, feat_id, self.f_info[feat_id].type)[0]\n for feat_id in self.cand_feat_list[cur_depth]}\n elif self.method == 'gain_ratio':\n info_gain_dict = {feat_id: info_gain_ratio(data, feat_id, self.f_info[feat_id].type)\n for feat_id in self.cand_feat_list[cur_depth]}\n feat_to_split = get_max_ind_in_dict(info_gain_dict)\n feat_type_to_split = self.f_info[feat_to_split].type\n # Step 2: split data and remove feature (nominal attributes only)\n if feat_type_to_split == 'NOMINAL':\n data_splits = split_data_nominal(data, feat_to_split)\n self._remove_feat(feat_to_split, cur_depth)\n elif feat_type_to_split == 'CONTINUOUS':\n _, threshold = find_threshold_continuous(data, feat_to_split)\n data_splits = split_data_by_threshold(data, feat_to_split, threshold)\n # Corner cases: splits not valid (can not split), return voting results\n for split in data_splits:\n if data_splits[split].shape[0] == 0:\n return int(sum(label == 1) > sum(label == 0))\n # Step 3: generate tree\n # if np.unique(data_splits.values()) == 1:\n # return float(int(label[0]))\n return {'feat_id': feat_to_split, 'children':\n {condition: self._gen_decision_tree(data_splits[condition], cur_depth + 1)\n for condition in data_splits}}", "title": "" }, { "docid": "3996f66851bfa62b6b7a817750d5a21c", "score": "0.7002513", "text": "def build_tree(data, impurity, chi_value=1):\r\n root = DecisionNode(None, None, chi_value)\r\n current_node = None\r\n\r\n # the queue holds tuples of unset nodes and data subsets\r\n queue = [(root, data)]\r\n\r\n while len(queue) != 0:\r\n # remove node from queue\r\n current_node = queue.pop()\r\n node = current_node[0]\r\n data_subset = current_node[1]\r\n\r\n # if the node is not pure\r\n if not current_node[0].pure:\r\n\r\n # if the node has single row dataset\r\n\r\n if np.shape(data_subset)[0] == 1 or impurity(data_subset) == 0:\r\n node.set_leaf(data_subset[0, -1], np.shape(data_subset)[0])\r\n\r\n else:\r\n\r\n # find best attr and threshold, then set it as the node values\r\n best_attr, threshold = find_best_attribute(data_subset, impurity)\r\n node.set_node_values(best_attr, threshold)\r\n\r\n # create new children nodes\r\n left_node = DecisionNode(None, None, chi_value)\r\n right_node = DecisionNode(None, None, chi_value)\r\n\r\n # check if the split is perfect\r\n left_set, right_set = split(data_subset, data_subset[:, best_attr] < threshold)\r\n is_pure = compute_set_impurity(data_subset, left_set, right_set, impurity) == 0\r\n\r\n # check for chi square values. if not good enough, create leaf\r\n chi_square_val = calc_chi_square(data_subset, left_set, right_set)\r\n if chi_value < 1 and chi_square_val < chi_table[node.chi]:\r\n node.set_leaf(data_subset[0, -1], np.shape(data_subset)[0])\r\n\r\n # if the split is perfect, create two leaves\r\n elif is_pure:\r\n left_node.set_leaf(left_set[0, -1], np.shape(left_set)[0])\r\n right_node.set_leaf(right_set[0, -1], np.shape(right_set)[0])\r\n\r\n # else, add the two nodes into the queue\r\n else:\r\n queue.append((left_node, left_set))\r\n queue.append((right_node, right_set))\r\n\r\n # finally, add the new nodes as children\r\n node.add_child(left_node)\r\n node.add_child(right_node)\r\n\r\n return root", "title": "" }, { "docid": "13eb6de6386cb6e833dc669f990f9ab9", "score": "0.6761607", "text": "def build_tree(rows):\n\n # Try partitioing the dataset on each of the unique attribute,\n # calculate the information gain,\n # and return the question that produces the highest gain.\n gain, question = find_best_split(rows, header)\n\n # Base case: no further info gain\n # Since we can ask no further questions,\n # we'll return a leaf.\n if gain == 0:\n return Leaf(rows)\n\n # If we reach here, we have found a useful feature / value\n # to partition on.\n true_rows, false_rows = partition(rows, question)\n\n # Recursively build the true branch.\n true_branch = build_tree(true_rows)\n\n # Recursively build the false branch.\n false_branch = build_tree(false_rows)\n\n # Return a Question node.\n # This records the best feature / value to ask at this point,\n # as well as the branches to follow\n # dependingo on the answer.\n return DecisionNode(question, true_branch, false_branch)", "title": "" }, { "docid": "0066c3fb457865441d4f4b0b3d50d1b1", "score": "0.6735579", "text": "def build_tree(self):\r\n # List of all rows/columns in X\r\n row_list = list(range(self.rows))\r\n x_col_list = list(range(self.xcols))\r\n \r\n # Randomly columns in X\r\n x_col_rand = np.random.choice(x_col_list, self.n_features, False) # Sample without replacement\r\n \r\n \r\n # Make sure that the corresponding y's chosen for the rows have 0's and 1's\r\n \r\n uniquey = 0\r\n while uniquey != 2:\r\n x_row_rand = np.random.choice(row_list, self.sample_size) # Sample with replacement\r\n y_train = self.y_train.iloc[x_row_rand, :]\r\n uniquey = len(y_train.iloc[:,0].unique())\r\n \r\n # Fit decision tree with sampled data\r\n x_train = self.x_train.iloc[x_row_rand, x_col_rand]\r\n tree = ct.DecisionTree(x_train, y_train, self.criterion)\r\n return tree", "title": "" }, { "docid": "8c7dc9f15afd22fd6874918df957bb0f", "score": "0.653226", "text": "def build_tree(dataset, score_function=entropy, max_depth=0, min_size=1):\n # rows in the dataset, either whole dataset or part of dataset during recursion\n # score_function = impurity measurement criteria. default=entropy\n # the input is a function\n if len(dataset) == 0: return decisionNode() # len(dataset) is the number of units in a set\n current_score = score_function(dataset) # the current impurity, as calculated by the score function\n\n # set up some variables to track the best criteria\n best_gain = 0.0\n best_criteria = None\n best_sets = None\n\n column_count = len(dataset[0]) - 1\n # count the first row to get the num of attributes/columns\n # -1 takes out the last column which is the ylabel\n\n # find best gain by going through all columns and comparing impurity\n for col in range(0, column_count):\n # generate the list of all possible different values in\n # the considered column\n global column_values # for debugging purposes\n column_values = {}\n\n for row in dataset:\n column_values[row[col]] = 1\n # fill the dictionary with column values from each row\n # '1' is arbitrary, we just need the keys\n\n # now try dividing the rows up for each value in this column\n # loops through each value and calculates information gain\n # keep best gain, criteria and sets\n for value in column_values.keys():\n # the var value here is the keys in the dict\n (set1, set2) = divide_set(dataset, col, value)\n # make split and put them in set1 and set2\n\n # information gain\n p = float(len(set1))/len(dataset)\n # p is the size of a child set relative to its parent\n # why calculate p? because it is used as the weight multiplier\n # for information gain (below)\n\n # calculate how much information we gain from splitting the\n # parent node into this particular set of child nodes\n gain = current_score - (p * score_function(set1)) - ((1 - p) * score_function(set2))\n # cf. formula information gain (what is cf?)\n # current score is the entropy of the node before splitting\n '''\n formula for IG:\n IG(btwn parent and children) = entropy_parent - (entropy_child1 * proportion_child1) -\n (entropy_child2 * proportion_child2)\n \n information gained by splitting the dataset by this* feature is calculated\n by taking the entropy(messiness) of the parent node and subtracting the entropy\n of both children weighted by the number of rows they represent (so if the messiness\n of both children add up to a higher entropy, it would be considered information loss,\n and would not be used).\n \n *this being the current column in the iteration\n '''\n\n # if set is not empty, and the gain is improving over the previous\n # measure of impurity, make this new gain the best gain\n if gain > best_gain and len(set1) > 0 and len(set2) > 0:\n # set must not be empty\n best_gain = gain\n best_criteria = (col, value) # remember, value is column_values.keys()\n best_sets = (set1, set2)\n\n # make branch according to the split that makes gives the best gain\n if best_gain > 0:\n # make sub branches\n # by calling the same definition (recursion)\n trueBranch = build_tree(best_sets[0])\n falseBranch = build_tree(best_sets[1])\n return decisionNode(col=best_criteria[0],\n value=best_criteria[1],\n tb=trueBranch,\n fb=falseBranch)\n else:\n return decisionNode(results=unique_counts(dataset))\n # if branch is no longer 'learning'(splits don't achieve better purity),\n # return the decision node with results as properties.\n # this is the leaf. current implementation splits until each node is 100% pure", "title": "" }, { "docid": "b7df289377937544d77d270d39cbc709", "score": "0.64953923", "text": "def build_tree(data, min_samples, recursions=0):\n\n # check whether we can create a leaf\n if (check_purity(data)) or recursions > 500:\n return np.mean(data[:, -1])\n if len(data) <= min_samples:\n write_output(outputfile, f'data {data} \\nhas less than {min_samples} samples, prune subtree')\n return np.mean(data[:, -1])\n\n # else create an internal node\n else:\n recursions += 1\n\n # split the data on the best possible split\n potential_splits = get_potential_splits(data)\n split_column, split_value = get_best_split(data, potential_splits)\n data_below, data_above = split_data(data, split_column, split_value)\n\n # determine the type of internal node\n col_name = col_names[split_column]\n feature_type = feature_types[split_column]\n if feature_type == \"numeric\":\n # create a <= condition on a numeric data type\n condition = f\"{col_name} <= {split_value}\"\n elif feature_type == \"categorical\":\n # create an == condition on a categorical data type\n condition = f\"{col_name} == {split_value}\"\n else:\n # something went wrong\n raise RuntimeError(\"Unrecognized feature type\")\n\n # recursively build subtrees\n sub_tree = {condition: []}\n condition_true = build_tree(data_below, min_samples, recursions)\n condition_false = build_tree(data_above, min_samples, recursions)\n\n # append to the subtree options (true is first, false is second)\n sub_tree[condition].append(condition_true)\n sub_tree[condition].append(condition_false)\n\n return sub_tree", "title": "" }, { "docid": "8ffd320facc562151563fdc7af4133b4", "score": "0.64858073", "text": "def build_tree(data, feature_names, min_samples_leaf=5, max_depth=4, current_depth=0, random_subset=False):\n raise NotImplementedError('Problem 4 Incomplete')", "title": "" }, { "docid": "2ef77e58ef0dc7718043f007381e9933", "score": "0.62830585", "text": "def create_tree(self) -> DecisionTree:\n idxs = np.random.permutation(len(self._train.y))[\n :self.forest.sample_sz]\n f_idxs = np.random.permutation(self._train.x.columns)[\n :self._n_features]\n\n train = DataPair(\n self._train.x.iloc[idxs], self._train.y.iloc[idxs].squeeze())\n\n tree_params = TreeArgs(self._n_features,\n np.array(range(self.forest.sample_sz)),\n f_idxs, train,\n self.forest.max_depth, self.forest.min_leaf)\n return DecisionTree(tree_params)", "title": "" }, { "docid": "ebec1ea5534174e95c4d865bbb8dae57", "score": "0.6261098", "text": "def run_decision_tree(data):\n \n # Filter out data to get labels 1 and 2 only\n data_1_2 = data[data[:,0]<=2]\n\n # Extract and binarize labels \n binary_labels_1_2 = np.where(data_1_2[:,0] == 1,0, 1)\n \n scaler_1_2 = preprocessing.StandardScaler()\n data_1_2 = scaler_1_2.fit_transform(data_1_2)\n data_1_2[:,0] = 1\n perceptron_1_2 = Perceptron(data_1_2, binary_labels_1_2, epochs=EPOCHS_1_2)\n weights_1_2 = perceptron_1_2.train()\n\n # delete data as we dont need it anymore\n del data_1_2\n del binary_labels_1_2\n\n # Filter out data to get labels 3 and 4 only\n data_3_4 = data[data[:,0] > 2]\n\n # Extract and binarize labels \n binary_labels_3_4 = np.where(data_3_4[:,0] == 3, 0, 1)\n \n scaler_3_4 = preprocessing.StandardScaler()\n data_3_4 = scaler_3_4.fit_transform(data_3_4)\n data_3_4[:,0] = 1\n perceptron_3_4 = Perceptron(data_3_4, binary_labels_3_4, epochs=EPOCHS_3_4)\n weights_3_4 = perceptron_3_4.train()\n del data_3_4\n del binary_labels_3_4\n\n # Train AB class split set\n Y = data[:,0]\n binary_classes = np.where(Y <= 2, 0, 1)\n scaler = preprocessing.StandardScaler()\n data = scaler.fit_transform(data)\n data[:,0] = 1\n perceptron = Perceptron(data, binary_classes, epochs=EPOCHS_A_B)\n weights = perceptron.train()\n\n # Decision tree: run classifcation on full data set\n predicted_labels = np.empty(data.shape[0])\n for i, x in enumerate(data):\n # Classify between A and B\n firstNode = Perceptron.classify(x, weights)\n \n # If class is A...\n if firstNode == 0:\n secondNode = Perceptron.classify(x, weights_1_2)\n predicted_labels[i] = 2 if secondNode == 1 else 1\n else:\n secondNode = Perceptron.classify(x, weights_3_4)\n predicted_labels[i] = 4 if secondNode == 1 else 3\n accuracy = Perceptron.get_accuracy(Y, predicted_labels)\n return accuracy", "title": "" }, { "docid": "11b52cef10c64a71b34734a63f4d48eb", "score": "0.62493026", "text": "def build( self, rows ):\n\n # Determine the best attribute and split value that gives most info gain\n gain, question = self.find_best_split( rows )\n\n # This is the base case, no further info gain to be made. Stop Recursion\n if gain == 0:\n return Node(0, rows)\n\n # Partition dataset based on best question\n true_rows, false_rows = self.partition( rows, question )\n\n # Build the true branch via recursion\n true_branch = self.build( true_rows )\n\n # Build the false branch via recursion\n false_branch = self.build( false_rows )\n\n # Return the Decision node, with references to question and branchs\n return Node(1, None, question, true_branch, false_branch)", "title": "" }, { "docid": "bc177f940562f52c29d1f8810fdb43bc", "score": "0.61954343", "text": "def decision_tree(self):\n sc = StandardScaler() # assigning standard scaler for independent variables\n # fitting and scaling independent variable\n x_train = sc.fit_transform(self.x_train_)\n x_test = sc.transform(self.x_test_)\n # choosing related classifier\n classifier_choice = DecisionTreeClassifier(criterion = self.criterion_, \\\n random_state = self.random_state_)\n classifier_choice.fit(x_train, self.y_train_) # training related classifier\n y_pred = classifier_choice.predict(x_test) # predicting results for given test set\n # outputing classifier, prediction, accuracy score of classifier\n return classifier_choice, y_pred, accuracy_score(self.y_test_,y_pred)", "title": "" }, { "docid": "b7e7e8fd2e15edd90140d59d12762987", "score": "0.6150059", "text": "def train_tree(self, data):\n logging.info('Training tree {}'.format(data[0] + 1))\n tree = DecisionTreeClassifier(max_depth=self.max_depth,\n min_leaf_examples=self.min_leaf_examples,\n max_split_features=self.max_split_features)\n features , targets = data[1], data[2]\n tree.fit(features, targets)\n return tree", "title": "" }, { "docid": "c093f988c6da09db28be6449a278f3cc", "score": "0.60802907", "text": "def create_tree(dataset, feature_Names):\n ## ่ทณๅ‡บ้€’ๅฝ’ๅ‡ฝๆ•ฐ็š„ไธคไธชๆกไปถ\n labels = list(dataset[:, -1])\n if labels.count(labels[0]) == len(labels):\n return labels[0]\n if len(dataset[0]) == 1:\n return Majority_vote(labels)\n ## ้ฆ–ๅ…ˆๆ‰พๅˆฐๅฝ“ๅ‰datasetไธญๆœ€ๅฅฝ็š„็‰นๅพ๏ผŒไฝœไธบๅˆคๆ–ญ่Š‚็‚น\n best_feature_idx = best_feature(dataset)\n best_feature_name = feature_Names[best_feature_idx]\n\n tree = {best_feature_name:{}}\n\n sub_feature_Names = feature_Names[:]\n del(sub_feature_Names[best_feature_idx]) # ๆถˆ่€—็‰นๅพ\n # del(feature_Names[best_feature_idx]) # ่ฟ™ไธชๆ“ไฝœไธๅฏไปฅ๏ผ๏ผ๏ผ\n\n ## ็„ถๅŽ้’ˆๅฏนๆœ€ๅฅฝ็š„็‰นๅพ่ฟ›่กŒๆ•ฐๆฎ้›†ๅˆ†็ฑป๏ผŒ้€’ๅฝ’่ฐƒ็”จ\n uni_val = set(dataset[:, best_feature_idx])\n for feature_value in uni_val:\n tree[best_feature_name][feature_value] = create_tree(split_dataset(dataset\\\n , best_feature_idx, feature_value), sub_feature_Names)\n\n return tree", "title": "" }, { "docid": "ebfdcd711c76aa3e3d12aaaab3827685", "score": "0.6077259", "text": "def build_tree_py(data, ldr_avail):\n \n travtree = {} \n #logger.debug('peakTree parent {}'.format(ncD.variables['parent'][it,ir,:]))\n parent = np.ma.masked_less(data[:,0], -990)\n avail_nodes = np.argwhere(parent > -10).ravel()\n #print(data[:,0].mask, type(data[:,0]), parent, avail_nodes)\n for k in avail_nodes.tolist():\n node = {'parent_id': np.asscalar(data[k,0]), \n 'thres': np.asscalar(data[k,5]), \n 'width': np.asscalar(data[k,3]), \n 'z': np.asscalar(data[k,1]), \n 'bounds': (np.asscalar(data[k,7]), np.asscalar(data[k,8])),\n #'coords': [0], \n 'skew': np.asscalar(data[k,4]),\n 'prominence': np.asscalar(data[k,6]),\n 'v': np.asscalar(data[k,2])}\n node['id'] = k\n node['bounds'] = list(map(int, node['bounds']))\n node['width'] = node['width'] if np.isfinite(node['width']) else -99\n node['skew'] = node['skew'] if np.isfinite(node['skew']) else -99\n node['thres'] = node['thres'] if np.isfinite(node['thres']) else -99\n node['prominence'] = node['prominence'] if np.isfinite(node['prominence']) else -99\n if ldr_avail:\n node['ldr'] = np.asscalar(data[k,9]) \n node['ldr'] = node['ldr'] if np.isfinite(node['ldr']) else -99\n node['ldrmax'] = np.asscalar(data[k,10])\n node['ldrmax'] = node['ldrmax'] if np.isfinite(node['ldrmax']) else -99\n else:\n node['ldr'], node['ldrmax'] = -99, -99\n if node['parent_id'] != -999:\n if k == 0:\n node['coords'] = [0]\n else:\n coords = travtree[node['parent_id']]['coords']\n if k%2 == 0:\n node['coords'] = coords + [1]\n else:\n node['coords'] = coords + [0]\n \n # remove the parent id for compatibility to peakTreeVis\n if node['parent_id'] == -1:\n del node['parent_id']\n # format for transport\n #v = {ky: format_for_json(val) for ky, val in v.items()}\n travtree[k] = node\n return travtree", "title": "" }, { "docid": "2c31b2fd1848e09d4d60856f1f25f20b", "score": "0.60465515", "text": "def learn_tree(self, examples):\n self.cur_depth += 1\n \n best_split = self.nextsplit(examples)\n \n if best_split[\"infogain\"]< 0.9 and self.cur_depth <= self.max_depth :\n lsub = self.nextsplit(best_split[\"leftChild\"])\n lthresh = lsub[\"infogain\"]#infogain of left sub tree \n rsub = self.nextsplit(best_split[\"rightChild\"])\n\n rthresh = rsub[\"infogain\"]#infogain of right child\n print(rthresh)\n if lthresh > rthresh:\n return DecisionNode(lsub['attribute'],lsub['threshold'],self.learn_tree(lsub['leftChild']),self.learn_tree(lsub['rightChild']),lsub['missChild'])\n else:\n return DecisionNode(rsub['attribute'],rsub['threshold'],self.learn_tree(rsub['leftChild']),self.learn_tree(rsub['rightChild']),rsub['missChild'])\n else:\n lsub = self.nextsplit(best_split[\"leftChild\"])\n lthresh = lsub[\"infogain\"]#infogain of left sub tree \n rsub = self.nextsplit(best_split[\"rightChild\"])\n rthresh = rsub[\"infogain\"]#infogain of right child\n print(rthresh)\n if len(best_split['leftChild']) < self.min_leaf_count or len(best_split['rightChild']) < self.min_leaf_count:\n return LeafNode(self.labelfinder(examples),len(examples),len(examples))\n elif lthresh > rthresh:\n return LeafNode(self.labelfinder(lsub['leftChild']),len(lsub['leftChild']),len(best_split))\n else:\n return LeafNode(self.labelfinder(rsub['rightChild']),len(rsub['rightChild'],len(best_split)))", "title": "" }, { "docid": "67423ee0508b8f529c875fa9e763c94b", "score": "0.60405076", "text": "def train_tree(self, data):\n logging.info('Training tree {}'.format(data[0] + 1))\n tree = DecisionTreeRegressor(max_depth=self.max_depth,\n min_leaf_examples=self.min_leaf_examples,\n max_split_features=self.max_split_features)\n features , targets = data[1], data[2]\n tree.fit(features, targets)\n return tree", "title": "" }, { "docid": "b608d19f88f44ab02aed6066250ae23f", "score": "0.59403497", "text": "def __init__(self, dataset, train_set, skip_feature_indices = {}, auto_build = True):\n\n self.dataset = dataset\n self.train_set = train_set\n\n # A list of indices that should be skipped when calculating expected\n # entropies. Normally, this is for the case when a feature is already\n # used as a node in previous levels of the tree.\n self.skip_feature_indices = skip_feature_indices\n\n self.feature_index = -1\n self.numeric_feature_split_index = -1\n # Children is a dict whose keys are the values of the feature that\n # corresponds to the feature_index and value is the root of sub decision\n # tree. \n self.children = {}\n \n self.class_distribution = self.__get_class_distribution()\n self.is_pruned = False\n self.can_prune = True\n\n if auto_build:\n self.__build_tree()", "title": "" }, { "docid": "e567ddb85af3f0d717e144517dc508ed", "score": "0.5935356", "text": "def build_tree(self, X, Y, depth):\n \n nexamples, nfeatures=X.shape\n # YOUR CODE HERE\n Split=0\n InfoGain=-float('Inf')\n RightChildInd=0\n LeftChildInd=0\n FeatureIndex=-1\n Label,Impurity=self.FindImpurity(Y)\n Learner=self.getWeakLearner()\n if depth==0 or len(X)<=self.exthreshold or Impurity>=self.purity:\n return Node(Impurity,Label,0,0,Learner)\n \n Split,InfoGain,LeftChildInd,RightChildInd=Learner.train(X,Y)\n Temp_X,Temp_Y=X[LeftChildInd],X[RightChildInd]\n \n if len(Temp_X)==0 or len(Temp_Y)==0:\n return Node(Impurity,Label,0,0,Learner)\n \n \n node=Node(Impurity,Label,0,InfoGain,Learner)\n RightNode=self.build_tree(X[RightChildInd],Y[RightChildInd],depth-1)\n LeftNode=self.build_tree(X[LeftChildInd],Y[LeftChildInd],depth-1)\n node.set_childs(LeftNode,RightNode)\n return node", "title": "" }, { "docid": "5f10d307342e82f70dd2dbba4f602a69", "score": "0.5932281", "text": "def buildTreeRecursive(self, xs, ys, attributes):\n\n # return leaf node when all the xs are in the same class\n if len(set(ys)) == 1:\n return LeafNode(ys[0])\n\n # Logit Regression\n LogitRegressor = LogitReg(xs.shape[1])\n LogitRegressor.load(xs, ys)\n LogitRegressor.learn(steps=50000)\n preds = np.array([LogitRegressor.predict(x) for x in xs])\n thisNode = MultiVarNode(attributes, LogitRegressor.w, -LogitRegressor.b)\n\n # for branch that weighted sum of the attribute values is less than threshold\n xs_less = xs[preds==0]\n ys_less = ys[preds==0]\n thisNode.insertChild(0, self.buildTreeRecursive(xs_less, ys_less, attributes))\n # for branch that weighted sum of the attribute values is greater than threshold\n xs_greater = xs[preds==1]\n ys_greater = ys[preds==1]\n thisNode.insertChild(1, self.buildTreeRecursive(xs_greater, ys_greater, attributes))\n\n return thisNode", "title": "" }, { "docid": "7376d118ae0b8ad7b89144e36e4be397", "score": "0.59217554", "text": "def decisionTreeModel(self):\n self.model = DecisionTreeClassifier(criterion='entropy', min_samples_leaf = 1, max_depth = 3)\n self.model = self.model.fit(self.Xtrain.drop(self.textCol,axis=1),self.Ytrain)\n self.data['Ypredict'] = self.model.predict_proba(self.data.drop(self.textCol,axis=1))[:,1]", "title": "" }, { "docid": "2966a744e0ddbdeac10c8b45a319beeb", "score": "0.58598435", "text": "def run(data, k, tree_dict):\n k = k\n\n result = compute_all(data)\n\n # terminate when all element is in the same class\n if result == 'Yes' or result == 'No' or result is None:\n tree_dict['Result'] = result\n print(result)\n print('---------------------------------------')\n else:\n # else continue to find the maximum info and split\n max_info_gain_attr = max(result, key=result.get)\n\n possible_attributes = attributes_values[max_info_gain_attr]\n\n # Split\n for value in possible_attributes:\n print('The ' + max_info_gain_attr + ' is the max at level ' + str(k))\n print('Inspecting Condition ' + str(value))\n sub_data = split(data, max_info_gain_attr, value)\n\n if sub_data.size == 0:\n yes_count = data['Attrition'].tolist().count('Yes')\n no_count = data['Attrition'].tolist().count('No')\n if yes_count > no_count:\n print('Yes')\n if max_info_gain_attr not in tree_dict.keys():\n tree_dict = {max_info_gain_attr: {}}\n tree_dict[max_info_gain_attr][value] = {'Result': 'Yes'}\n else:\n tree_dict[max_info_gain_attr][value] = {'Result': 'Yes'}\n print('---------------------------------------')\n else:\n print('No')\n if max_info_gain_attr not in tree_dict.keys():\n # tree_dict = {max_info_gain_attr: {}}\n tree_dict.update({max_info_gain_attr: {}})\n tree_dict[max_info_gain_attr][value] = {'Result': 'No'}\n else:\n tree_dict[max_info_gain_attr][value] = {'Result': 'No'}\n print('---------------------------------------')\n else:\n\n if max_info_gain_attr not in tree_dict.keys():\n branch = {}\n tree_dict.update({max_info_gain_attr: {value: branch}})\n run(sub_data, k + 1, branch)\n else:\n branch = {}\n tree_dict[max_info_gain_attr][value] = branch\n run(sub_data, k + 1, branch)\n\n return tree_dict", "title": "" }, { "docid": "64492d0e98412c630313fdfaa5c4c2d3", "score": "0.5849002", "text": "def build_tree(self, features, targets, depth):\n if len(features) == 0:\n return DecisionNode()\n if depth == 0:\n return DecisionNode(result=max(self.unique_counts(targets)))\n\n current_score = self.entropy(targets)\n best_gain = 0.0\n best_criteria = None\n best_sets = None\n\n considered_features = self.choose_random_features(features[0])\n for col in considered_features:\n column_values = set([row[col] for row in features])\n for value in column_values:\n feats1, targs1, feats2, targs2 = \\\n self.divide_set(features, targets, col, value)\n p = float(len(feats1)) / len(features)\n gain = current_score - p * self.entropy(targs1) - \\\n (1 - p) * self.entropy(targs2)\n if gain > best_gain and len(feats1) > 0 and len(feats2) > 0:\n best_gain = gain\n best_criteria = (col, value)\n best_sets = ((feats1, targs1), (feats2, targs2))\n\n if best_gain > 0:\n left_branch = self.build_tree(best_sets[0][0], best_sets[0][1], depth - 1)\n right_branch = self.build_tree(best_sets[1][0], best_sets[1][1], depth - 1)\n return DecisionNode(col=best_criteria[0], value=best_criteria[1],\n tb=left_branch, fb=right_branch)\n else:\n return DecisionNode(result=max(self.unique_counts(targets)))", "title": "" }, { "docid": "6d2bf616e28ef374732688b92a48e958", "score": "0.5813367", "text": "def BuildingDT(input_X,input_Y,feature_X_values,feature_Y_values,Attribute,mc_label):\n # If there is no output label\n if input_Y.shape[0] == 0:\n return DT(LeafValue=mc_label)\n \n children = [] #children nodes from the current selected node \n mcv = most_common_value(input_Y,feature_Y_values) # most common value of given input Y\n \n # If all labels of input_Y is the same or Attribute is empty\n all_values_same_boolean = check_all_values_same(input_Y,feature_Y_values) \n if all_values_same_boolean or Attribute == []:\n return DT(LeafValue=mcv)\n else:\n best_IG = 0\n best_set_X = []\n best_set_Y = []\n best_Attribute = 0\n current_entrophy = entrophy(input_Y,feature_Y_values)\n for index in Attribute:\n # index is the column index --> decides which attribute to use\n divided_X, divided_Y = divide_data(input_X,input_Y,index,feature_X_values)\n \n for j in range(len(divided_Y)): #iterate through each possible feature value\n # For tic-tac-toe --> divided_Y contains subsets for \"x\", \"o\" and \"b\"\n current_IG = information_gain(divided_Y[j],feature_Y_values,current_entrophy)\n if current_IG >= best_IG:\n best_IG = current_IG\n best_Attribute = index\n best_feature_ind = j\n \n\n # Now best attribute is selected --> divide data using that attribute\n # best_Attribute --> gives the column number of best attribute\n # best_feature_ind --> gives which feature value has the most IG, i.e. \"x\", \"o\" or \"b\" in tic-tac\n best_set_X, best_set_Y = divide_data(input_X,input_Y,best_Attribute,feature_X_values)\n \n # Remove the current attribute from the attribute list to\n # create remaning attributes --> no need to use same attribute over and\n # over again! --> If it is needed, it is gonna show up in the recursion anyway\n remaning_Attributes = []\n for index in Attribute:\n if index != best_Attribute:\n remaning_Attributes.append(index)\n\n \n for i in range(len(best_set_X)):\n children.append( BuildingDT(best_set_X[i],best_set_Y[i],feature_X_values,feature_Y_values, \n remaning_Attributes,mcv) )\n\n return DT(FeatureNum=best_Attribute,Children=children)", "title": "" }, { "docid": "34ae2ed9c9318942d96620e37890759b", "score": "0.5808513", "text": "def build_tree(self, features, targets, depth):\n if len(features) == 0:\n return DecisionNode()\n if depth == 0:\n return DecisionNode(result=self.mean_output(targets))\n\n lowest_variance = None\n best_criteria = None\n best_sets = None\n\n considered_features = self.choose_random_features(features[0])\n for column in considered_features:\n column_values = [feature[column] for feature in features]\n for feature_value in column_values:\n feats1, targs1, feats2, targs2 = \\\n self.divide_set(features, targets, column, feature_value)\n var1 = self.variance(targs1)\n var2 = self.variance(targs2)\n if var1 is None or var2 is None:\n continue\n variance = var1 + var2\n if lowest_variance is None or variance < lowest_variance:\n lowest_variance = variance\n best_criteria = (column, feature_value)\n best_sets = ((feats1, targs1),(feats2, targs2))\n\n # Check variance value also\n if lowest_variance is not None and \\\n len(best_sets[0][0]) >= self.min_leaf_examples and \\\n len(best_sets[1][0]) >= self.min_leaf_examples:\n left_branch = self.build_tree(best_sets[0][0], best_sets[0][1], depth - 1)\n right_branch = self.build_tree(best_sets[1][0], best_sets[1][1], depth - 1)\n return DecisionNode(col=best_criteria[0], value=best_criteria[1],\n tb=left_branch, fb=right_branch)\n else:\n return DecisionNode(result=self.mean_output(targets))", "title": "" }, { "docid": "46505372ef044068de49cddb7d5c529d", "score": "0.57974464", "text": "def tree_build(data_df, n):\n \n # get all the attributes \n attribute_list = data_df.loc[:, data_df.columns != 'target'].columns\n \n #print('attribute_list', attribute_list)\n\n # get the split points for the numeric attributes\n a_dict = {}\n \n for col in attribute_list:\n if data_df[col].dtypes != 'object':\n data_df[col] = data_df[col].astype(float)\n a_dict[col] = split_point(data_df, col, n)\n \n #print(a_dict)\n # get the attribute that has the most information gain\n attribute_s = None\n attribute_threshold = None\n attribute_mse = 100000000000000000\n \n \n for col in attribute_list:\n\n if data_df[col].dtypes == 'object':\n total_mse = 0\n for d in data_df[col].unique():\n sub_df = data_df[data_df[col] == d]['target']\n len_df = len(sub_df)\n total_mse += (len(sub_df) / len(data_df)) * mse(sub_df)\n \n if total_mse < attribute_mse:\n attribute_mse = total_mse\n attribute_s = col\n attribute_threshold = d\n \n \n #print('best information/attribute/threshold', attribute_gain, attribute_s, attribute_threshold)\n #print('total mse', attribute_mse)\n\n for a in a_dict.keys():\n #print('attribute selected:', a)\n thresholds = a_dict[a]\n \n # iterate each threshold\n for t in thresholds:\n #print('thresholds: ', t)\n #print('attribute for this threshold is', a)\n left_t = data_df[data_df[a] <= t]['target']\n #print('left_df', left_t)\n left_n = len(data_df[data_df[a] <= t])\n #print('left_df', left_n)\n mse_l = mse(left_t)\n #print('mse left', mse_l)\n\n right_t = data_df[data_df[a] > t]['target']\n right_n = len(data_df[data_df[a] > t])\n #print('right_df', right_n)\n mse_r = mse(right_t)\n #print('mse right', mse_r)\n \n # information gain\n total_mse = ((left_n / len(data_df)) * mse_l + ((right_n) / len(data_df)) * mse_r)\n \n #print('total mse', total_mse)\n \n if total_mse < attribute_mse:\n attribute_mse = total_mse\n attribute_s = a\n attribute_threshold = t\n #print('best information/attribute/threshold', gain, a, t) \n \n\n return attribute_mse, attribute_s, attribute_threshold", "title": "" }, { "docid": "9ba7f309fc1a46f129302a6b0e3ffcfe", "score": "0.5780185", "text": "def buildTree(self, X, y, depth):\n # check if we need to stop splitting\n\n # find best feature and attribute\n\n if (y.size == 0):\n y = [0]\n if (np.unique(y).size == 1):\n val = stats.mode(y, axis=None)[0][0]\n node = MyDecisionTree(self.max_depth - depth)\n node.tree = {\n 'feature': -1,\n 'value': val,\n 'IG': 0,\n 'feature_num': -1,\n 'isLeaf': True,\n 'is_categorical': False,\n 'leftTree': None,\n 'rightTree': None,\n 'depth': depth\n }\n return node\n if (depth == self.max_depth):\n val = stats.mode(y, axis=None)[0][0]\n node = MyDecisionTree(self.max_depth - depth)\n node.tree = {\n 'feature': -1,\n 'value': val,\n 'IG': 0,\n 'feature_num': -1,\n 'isLeaf': True,\n 'is_categorical': False,\n 'leftTree': None,\n 'rightTree': None,\n 'depth': depth\n }\n return node\n feature, index, IG, SplitVal = find_best_feature(X, y)\n X_left, X_right, y_left, y_right = partition_classes(X, y, index, SplitVal)\n leftTree = self.buildTree(X_left, y_left, depth + 1) # Be careful what should be the depth here\n rightTree = self.buildTree(X_right, y_right, depth + 1)\n node = MyDecisionTree(self.max_depth - depth) # Be careful what should be the depth here\n node.tree = {\n 'feature': feature,\n 'value': SplitVal,\n 'IG': IG,\n 'feature_num': index,\n 'isLeaf': False,\n 'is_categorical': False,\n 'leftTree': leftTree,\n 'rightTree': rightTree,\n 'depth': depth\n }\n return node", "title": "" }, { "docid": "9ac3ef5bc20e8ee5f44c04d42aefdb72", "score": "0.57531476", "text": "def create_tree(data, leaf_type=get_reg_leaf, error_type=get_reg_error, ops=(1, 4)):\n feature, value = choose_axis(data, leaf_type, error_type, ops)\n if feature is None:\n return value\n tree = {'split_axis': feature, 'split_value': value}\n left, right = binary_split(data, feature, value)\n tree['right'] = create_tree(right, leaf_type, error_type, ops)\n tree['left'] = create_tree(left, leaf_type, error_type, ops)\n return tree", "title": "" }, { "docid": "ea42571484fcd15b8cff555ce32be670", "score": "0.5727934", "text": "def main():\n\n dt_file = \"examples2.txt\"\n out_file = \"dtree.txt\"\n\n header_info, training_data = read_decision_tree(dt_file)\n log(header_info)\n\n n = len(header_info) - 1 # not include label column\n node = decision_tree_learning(training_data, training_data, header_info, list(range(n)))\n\n # write_tree(node, header_info, out_file)\n\n # For debugging\n # best_attr, best_gain = get_best_attr(training_data, header_info)\n # partitions = partition_by_a(training_data, best_attr, header_info)\n # node = decision_tree_learning(training_data, training_data, header_info, [2, 0, 1], fix_expand_order=True)\n print_tree(node, header_info)", "title": "" }, { "docid": "b0edc46e652b39f5a9772d5108e45fa0", "score": "0.57155764", "text": "def build_huffman_tree(data):\n huffman_tree = HuffmanTree()\n\n # Convert the input data into a list of tuples, sorted by frequency\n print(f\"Input data: {data}\")\n\n input_str_char_counter = Counter(data)\n\n input_str_char_freq_tuple_list = list()\n\n for key, value in input_str_char_counter.items():\n input_str_char_freq_tuple_list.append((key, value))\n\n # Sort tuple by char frequency\n input_str_char_freq_tuple_list.sort(key=lambda r: r[1])\n\n while len(input_str_char_freq_tuple_list) > 1:\n # Retrieve the first 2 tuples with lowest frequencies\n char_freq_tuple_left = input_str_char_freq_tuple_list.pop(0)\n # print(f\"char_freq_tuple_left: {char_freq_tuple_left}\")\n\n char_freq_tuple_right = input_str_char_freq_tuple_list.pop(0)\n # print(f\"char_freq_tuple_right: {char_freq_tuple_right}\")\n\n # Build a subtree from the list\n sub_tree, freq_sum = build_sub_tree_from_tuples(char_freq_tuple_left, char_freq_tuple_right)\n\n # Add merged tree into tuple list\n input_str_char_freq_tuple_list.append((sub_tree, freq_sum))\n\n # Sort the list again\n input_str_char_freq_tuple_list.sort(key=lambda r: r[1])\n # print(input_str_char_freq_tuple_list)\n\n if len(input_str_char_freq_tuple_list) > 0:\n huffman_tree = input_str_char_freq_tuple_list[0][0]\n\n return huffman_tree", "title": "" }, { "docid": "95c38038c7026b7e67995bb85d1c9f78", "score": "0.57076234", "text": "def classify():\n\n tree = TILDE()\n \n '''\n print (\"\"\"shows an example of classification\n\n this is data about men,women and dogs\n h(man) means man is happy\n o(man,dog) means man owns dog\n r(man,woman,term) means man is in relationship with woman for long term or short term\n\n \"\"\")\n\n print (\"\\nlearning classification tree for man's happiness\")\n\n #inputs to classification: data,examples,target and background\n train_data = ['o(m1,d1)','r(m1,w1,st)','o(m2,d2)','r(m2,w2,st)','o(m3,d3)','r(m3,w3,st)','o(m4,d4)','r(m4,w4,lt)','r(m5,w5,st)','r(m6,w6,lt)','r(m7,w7,lt)']\n train_pos = ['h(m1)','h(m2)','h(m4)','h(m6)']\n train_neg = ['h(m3)','h(m5)','h(m7)']\n target = 'h'\n bk = ['h(+man)','o(+man,-dog)','r(+man,-woman,#term)']\n\n #learns tree, can see tree clauses by printing tree.clauses\n tree.learn(train_data,bk,target,pos=train_pos,neg=train_neg)\n print (\"\\nlearned ordered tree clauses are:\\n\")\n print (tree.clauses)\n\n #inputs to testing\n test_data = train_data #cheating but you can add your own data\n test_example = train_pos[0] #just picking one example to show how it works\n infered_value = tree.infer(test_data,test_example)\n '''\n\n train_data = ['edge_center1(wh,or,wh)','edge_center1(wh,bl,y)']\n train_pos = ['center1(wh,or)']\n train_neg = ['center1(wh,bl)']\n target = 'center1'\n bk = ['center1(+piece,+piece)',\n 'edge_center1(+piece,+piece,-piece)',\n 'edge_center1(+piece,+piece,+piece)']\n\n tree.learn(train_data,bk,target,pos=train_pos,neg=train_neg)\n print (\"\\nlearned ordered tree clauses are:\\n\")\n print (tree.clauses)", "title": "" }, { "docid": "c4324f1dd5534ea1e2a05b2ea25d8c13", "score": "0.5679744", "text": "def train_decision_tree(X, y, max_depth=None):\n clf = DecisionTreeClassifier(criterion='entropy',max_depth=max_depth,random_state=11)\n learnedtree_dec = clf.fit(X, y)\n return(learnedtree_dec)", "title": "" }, { "docid": "e1df62dfa1a1b7d91a20456cddae4f41", "score": "0.5660271", "text": "def build(self, tree, X, y, sample_weight=None, X_idx_sorted=None):\n\n if tree.max_depth <= 10:\n init_capacity = (2 ** (tree.max_depth + 1)) - 1\n else:\n init_capacity = 2047\n\n tree.resize(init_capacity)\n\n splitter = self.splitter\n max_depth = self.max_depth\n sample_weight_ptr = None\n\n # Recursive partition (without actual recursion)\n splitter.init(X, y, sample_weight_ptr, X_idx_sorted)\n\n n_node_samples = splitter.n_samples\n weighted_n_node_samples = None\n\n first = 1\n max_depth_seen = -1\n split_record = SplitRecord()\n stack = list()\n\n stack.append(StackRecord(0, n_node_samples, 0, TREE_UNDEFINED, 0,\n INFINITY, 0, splitter.split_context))\n\n while len(stack) > 0:\n stack_record = stack.pop()\n\n start = stack_record.start\n end = stack_record.end\n depth = stack_record.depth\n parent = stack_record.parent\n is_left = stack_record.is_left\n impurity = stack_record.impurity\n n_constant_features = stack_record.n_constant_features\n split_context = stack_record.split_context\n\n # logger.debug(\"feature ranges:\\n%s\" % str(split_context))\n\n n_node_samples = 0\n splitter.node_reset(split_context)\n\n if first:\n first = 0\n\n is_leaf = (depth >= max_depth)\n\n if not is_leaf:\n splitter.node_split(impurity, split_record, n_constant_features)\n\n node_id = tree.add_node(parent, is_left, is_leaf, split_record.feature,\n split_record.threshold, split_context.r,\n impurity, n_node_samples,\n weighted_n_node_samples)\n # logger.debug(\"Node: %s\" % str(tree.nodes[node_id]))\n\n if not is_leaf:\n # Push right child on stack\n stack.append(StackRecord(split_record.pos, end, depth + 1, node_id, 0,\n split_record.impurity_right, n_constant_features, split_record.right_context))\n\n # Push left child on stack\n stack.append(StackRecord(start, split_record.pos, depth + 1, node_id, 1,\n split_record.impurity_left, n_constant_features, split_record.left_context))\n\n if False and parent >= 0:\n logger.debug(\"Parent Node: %s\" % str(tree.nodes[parent]))\n\n if depth > max_depth_seen:\n max_depth_seen = depth\n\n # tree.resize_c(tree.node_count)\n tree.max_depth = max_depth_seen\n\n tree.reset_n_node_samples()\n tree.add_samples(X)", "title": "" }, { "docid": "d8cb7f8a5138c7e3e3017b852603f127", "score": "0.5640155", "text": "def run_decision_tree(X_train, X_test, y_train, y_test, complexities):\n from sklearn.tree import DecisionTreeClassifier\n\n dtc_list, dtc_pred, dtc_accs, dtc_pred_times, dtc_fit_times =\\\n [[[] for _ in range(c.NUM_SLICES)] for _ in range(5)]\n for i in range(c.NUM_SLICES):\n for n in range(len(complexities)):\n dtc_list[i].append(DecisionTreeClassifier(max_depth=complexities[n]))\n t0 = time()\n dtc_list[i][n].fit(X_train[i], y_train[i])\n dtc_fit_times[i].append(round(time() - t0, 3))\n t0 = time()\n dtc_pred[i].append(dtc_list[i][n].predict(X_test[i]))\n dtc_pred_times[i].append(round(time() - t0, 3))\n dtc_accs[i].append(accuracy_score(y_test[i], dtc_pred[i][n]))\n return dtc_list, dtc_accs, dtc_pred, dtc_pred_times, dtc_fit_times", "title": "" }, { "docid": "7f3fd744ad27e877b45e986a8afc3624", "score": "0.5630176", "text": "def churn_decision_tree(X_train, X_test, y_train, y_test):\n dt = DecisionTreeClassifier(random_state=SEED)\n grid = {\"criterion\": ['gini', 'entropy'], \"max_depth\": [8, 9, 10, 11, 12, 13]}\n\n dt_cv = GridSearchCV(dt, grid, cv=5)\n dt_cv.fit(X_train, y_train)\n dt_cv.fit(X_train, y_train)\n\n print(\"tuned hpyerparameters :(best parameters) \", dt_cv.best_params_)\n print(\"accuracy :\", dt_cv.best_score_)\n # RESULTS ARE ENTROPY AND 10\n dt_final = DecisionTreeClassifier(random_state=13,\n max_depth=dt_cv.best_params_['max_depth'],\n criterion=dt_cv.best_params_['criterion'])\n\n dt_final.fit(X_train, y_train)\n dt_pred = dt_final.predict(X_test)\n decision_tree_info = {\n 'best_params': dt_final.best_params_,\n 'cv_accuracy': dt_final.best_score_,\n 'model': dt_final,\n 'accuracy': str(accuracy_score(y_test, dt_pred)),\n 'precision': str(precision_score(y_test, dt_pred)),\n 'recall': str(recall_score(y_test, dt_pred)),\n 'f1': str(f1_score(y_test, dt_pred)),\n 'predicted_values': dt_pred,\n 'confusion_matrix': create_confusion_matrix(y_test, dt_pred),\n 'tree_plot': tree.plot_tree(dt_final, filled=True)\n }\n return decision_tree_info", "title": "" }, { "docid": "7a5204a5549fa57207929de602aafd5d", "score": "0.56273186", "text": "def decision_tree(tumours):\r\n \r\n # get the attributes from all the tumours\r\n atts = get_attributes(tumours)\r\n \r\n # find how many in the list of tumours are Malignant, and how\r\n # many are Benign\r\n ben, mal = split_attributes(tumours, 2, True)\r\n \r\n # if the list of attributes is empty, there is nothing left to\r\n # distinguish the tumours, therefore return the most likely answer,\r\n # based on the given tumours\r\n if atts == []:\r\n return mal > ben\r\n \r\n # otherwise, split the tumours into two lists, by checking if they\r\n # have the first attribute in the list of all attributes\r\n att = atts[0]\r\n with_att, without_att = split_attributes(tumours, att)\r\n \r\n # if every tumour is Benign, it must be that this branch is always False\r\n if mal == []:\r\n return False\r\n \r\n # if every tumour is Malignant, it must be that this branch is always True\r\n elif ben == []:\r\n return True\r\n \r\n # otherwise, remove the attribute used to split the list from the tumours\r\n else:\r\n for i in range(len(with_att)):\r\n if att in with_att[i][1]:\r\n with_att[i][1].remove(att)\r\n \r\n # crate a Leaf, with the left value of the Leaf being the branch to \r\n # follow if the tumour does have that initial attribute, and the \r\n # right value being the branch to follow if the tumour does not\r\n # have the attribute\r\n return Node(att, decision_tree(with_att), decision_tree(without_att))", "title": "" }, { "docid": "01381a70be0d84efa0496f6f8b13e56b", "score": "0.56048423", "text": "def construct_greedy_id_tree(data, possible_classifiers, target_classifier, id_tree_node=None):\n if not id_tree_node:\n id_tree_node = IdentificationTreeNode(target_classifier)\n\n length = len( split_on_classifier(data, target_classifier).keys() )\n\n if length == 1:\n classified = target_classifier.classify(data[0])\n id_tree_node.set_node_classification(classified)\n return id_tree_node\n\n\n try:\n best_test = find_best_classifier(data, possible_classifiers, target_classifier)\n\n grouping = split_on_classifier(data, best_test)\n id_tree_node.set_classifier_and_expand(best_test, grouping)\n\n except NoGoodClassifiersError:\n return id_tree_node\n\n all_branches = id_tree_node.get_branches().items()\n\n for (feature,cn) in all_branches:\n small_data = grouping[feature]\n construct_greedy_id_tree(small_data, possible_classifiers, target_classifier, cn)\n\n return id_tree_node", "title": "" }, { "docid": "425bd7b6c4c169cd1e5624c09bc63ec4", "score": "0.55948067", "text": "def __init__(self, input_data, number_trees):\r\n if number_trees % 2 == 0 or number_trees <= 0:\r\n raise ValueError(\"Number of trees must be an odd positive integer\")\r\n self.trees = []\r\n for i in range(0, number_trees):\r\n bootstrapped_data = input_data.sample(max(input_data.count()), replace=True)\r\n self.trees.append(dt.DecisionTree(bootstrapped_data, random_subset=True))", "title": "" }, { "docid": "c4c89a5eddc4675f82f0835a46c4c85f", "score": "0.5590083", "text": "def transform_into_tree(data):\n lev = [(x, meta, x) for (x, meta) in data]\n while True:\n nlev = [(x, meta, (z.getparent() if z is not None else None)) for (x, meta, z) in lev]\n \n \n lev = nlev\n if len([z for (_,_,z) in lev if z is not None]) == 0:\n break", "title": "" }, { "docid": "f1187ed8c1b18aa032b592d5bbaaed76", "score": "0.5577422", "text": "def train(self, data, labels, depth):\n self.labels = labels\n self.depth = depth\n \n x = [None]*len(data)\n y = [None]*len(data)\n\n for row in range(len(data)):\n y[row] = data[row][0]\n t = []\n for col in range(1,len(data[row])):\n t += [data[row][col]]\n x[row] = t\n\n\n self.tree = DecisionTreeClassifier(criterion=\"entropy\", max_depth=depth, random_state=0)\n self.tree = self.tree.fit(x,y)", "title": "" }, { "docid": "464ef2546ca8f2f65437b76c4eb36cf9", "score": "0.5573756", "text": "def decision_function(self, X):\n leaves, nodeinds = self.tree_.apply(X, getleaves=True, getnodeinds=True)\n depths = np.array(np.transpose(nodeinds.sum(axis=1)))\n scores = self.tree_.n_node_samples[leaves] * (2. ** depths)\n return scores", "title": "" }, { "docid": "5fe952bc3d9eb1379c9966a94855d685", "score": "0.5572635", "text": "def iterative_build_tree(self, rows, header, maxNodes):\n\t\tcurrentNodes=1 #the first node is the root.\n\t\t\n\t\tgain,question=find_best_split(rows,header)\n\t\t\n\t\t# Since we can ask no further questions,\n\t\t# we'll return a leaf.\n\t\tif gain== 0\t:\n\t\t\treturn Leaf(rows)\t\n\t\telse :\t\t\t\t# now starts the real problem\n\t\t\troot=Node(question, gain, rows, None,None)\t#The root is a node now\n\t\t\t\n\t\t\ttrue_rows, false_rows= partition(root.rows, root.question)\n\t\t\t\n\t\t\tgainT,questionT = find_best_split(true_rows,header)\t#finds the best gini for both the false and the true\n\t\t\tgainF,questionF = find_best_split(false_rows,header)\n\t\t\t\n\t\t\ttrue_branch=None\n\t\t\tfalse_branch=None\n\t\t\tnodes_to_split= list()\n\t\t\t\n\t\t\troot= DecisionNode(question,None, None)\n\t\t\t\n\t\t\tif(gainT==0):\t\t# Check if the gain is 0... in that case that's a leaf\n\t\t\t\ttrue_branch=Leaf(true_rows)\n\t\t\telse:\n\t\t\t\ttrue_branch=Node(questionT,gainT,true_rows, root, True)\n\t\t\t\tnodes_to_split.append(true_branch)\n\t\t\t\t\n\t\t\troot.true_branch=true_branch\n\t\t\tif(gainF==0):\t\t# Check if the gain is 0... in that case that's a leaf\n\t\t\t\tfalse_branch=Leaf(false_rows)\n\t\t\telse:\n\t\t\t\tfalse_branch=Node(questionF,gainF,false_rows,root, False)\n\t\t\t\tnodes_to_split.append(false_branch)\n\t\t\t\t\n\t\t\troot.false_branch=false_branch\n\t\t\t\n\t\t\tcurrentNodes+=2\n\n\t\t\t#the number of nodes are not the max means that i can still partitionate if there are nodes that allows that\n\t\t\t#if the nodes to split are==0 means that there are not nodes to partitionate\n\t\t\twhile(currentNodes<maxNodes and (not len(nodes_to_split)==0)): \n\t\t\t\t# find the best gain from all the nodes. should be sorted?\n\t\t\t\tmax=0\n\t\t\t\tbestNodeIndex=0\n\t\t\t\tfor i in range(0,len(nodes_to_split)):\n\t\t\t\t\t\n\t\t\t\t\tif(nodes_to_split[i].gain>max):\n\t\t\t\t\t\tmax=nodes_to_split[i].gain\n\t\t\t\t\t\tbestNodeIndex=i\n\t\t\t\t\n\t\t\t\t#Now that we have the node with the best gain, we should partition as we did with the root\n\t\t\t\ti=bestNodeIndex\n\n\t\t\t\ttrue_rows, false_rows= partition(nodes_to_split[i].rows, nodes_to_split[i].question)\n\t\t\t\tgainT,questionT=find_best_split(true_rows,header)\t#finds the best gini for both the false and the true\n\t\t\t\tgainF,questionF=find_best_split(false_rows,header)\n\t\t\t\t\t\t\t\n\t\t\t\tif(nodes_to_split[i].isATrueChild):\t#the node has to stay on the true_branch\n\t\t\t\t\t\n\t\t\t\t\tnodes_to_split[i].father.true_branch=DecisionNode(nodes_to_split[i].question,None,None)\n\t\t\t\t\tif(gainT==0):\n\t\t\t\t\t\tnodes_to_split[i].father.true_branch.true_branch=Leaf(true_rows)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttrue_branch=Node(questionT,gainT,true_rows, nodes_to_split[i].father.true_branch, True)\n\t\t\t\t\t\tnodes_to_split[i].father.true_branch.true_branch=true_branch\n\t\t\t\t\t\tnodes_to_split.append(true_branch)\n\t\t\t\t\t\n\t\t\t\t\tif(gainF==0):\n\t\t\t\t\t\tnodes_to_split[i].father.true_branch.false_branch=Leaf(false_rows)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfalse_branch=Node(questionF,gainF,false_rows, nodes_to_split[i].father.true_branch, False)\n\t\t\t\t\t\tnodes_to_split[i].father.true_branch.false_branch=false_branch\n\t\t\t\t\t\tnodes_to_split.append(false_branch)\n\t\t\t\t\t\t\t\n\t\t\t\telse:\t#the node has to stay on the false_branch of the father\n\t\t\t\t\tnodes_to_split[i].father.false_branch=DecisionNode(nodes_to_split[i].question,None,None)\n\t\t\t\t\tif(gainT==0):\n\t\t\t\t\t\tnodes_to_split[i].father.false_branch.true_branch=Leaf(true_rows)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttrue_branch=Node(questionT,gainT,true_rows, nodes_to_split[i].father.false_branch, True)\n\t\t\t\t\t\tnodes_to_split[i].father.false_branch.true_branch=true_branch\n\t\t\t\t\t\tnodes_to_split.append(true_branch)\n\t\t\t\t\t\n\t\t\t\t\tif(gainF==0):\n\t\t\t\t\t\tnodes_to_split[i].father.false_branch.false_branch=Leaf(false_rows)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfalse_branch=Node(questionF,gainF,false_rows, nodes_to_split[i].father.false_branch, False)\n\t\t\t\t\t\tnodes_to_split[i].father.false_branch.false_branch=false_branch\n\t\t\t\t\t\tnodes_to_split.append(false_branch)\n\t\t\t\t\n\t\t\t\tdel nodes_to_split[i]\t#delete the now decision Node from the list of Nodes to split\n\t\t\t\tcurrentNodes+=2\n\t\t\t\n\t\t\t''' \n\t\t\tNow there are 2 cases:\n\t\t\t1) the max number of nodes is reached. \n\t\t\t\tThis mean that if there are other Nodes in the list, those have to become a leaf.\n\t\t\t2) the length of the node list is 0, this means that there are no other question to ask\n\t\t\t\n\t\t\tWe can check those cases with the len of the node list\n\t\t\t'''\n\t\t\tif(len(nodes_to_split)>0):\n\t\t\t\tfor node in nodes_to_split:\n\t\t\t\t\tif(node.isATrueChild==True):\n\t\t\t\t\t\tnode.father.true_branch=Leaf(node.rows)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnode.father.false_branch=Leaf(node.rows)\n\t\t\t\n\t\t\t#print(\"Number of total node (inner included):\"+ str(currentNodes))\n\t\t\tself.nodes=currentNodes\n\t\t\treturn root", "title": "" }, { "docid": "8b2fc750b3abc5f052726f25e373c5b2", "score": "0.55282205", "text": "def get_decision_rules(tree_strings):\n\n working_queue = deque()\n\n # Each queue item is a list [feature_id, previous_feature_ids]\n working_queue.append([tree_strings[0], []])\n i = 1\n decision_rules = set()\n\n while len(working_queue) > 0:\n cur_feature, pre_features = working_queue.popleft()\n\n cur_string = tree_strings[i]\n cur_string_split = cur_string.split()\n\n if len(cur_string_split) == 2:\n # Case 1: there are two values\n for j, s in enumerate(cur_string_split):\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n if s == \"-1\" or s == \"-2\":\n # We hit a decision node, add this decision rule chain\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n elif len(cur_string_split) == 4:\n # Case 2: there are four values: the first two correspond to the cur item\n # and the last two correspond to the next item in the queue\n for j, s in enumerate(cur_string_split[:2]):\n\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n\n if s == \"-1\" or s == \"-2\":\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n # Load the next item in the queue\n cur_feature, pre_features = working_queue.popleft()\n\n for j, s in enumerate(cur_string_split[2:]):\n\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n\n if s == \"-1\" or s == \"-2\":\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n elif len(cur_string_split) == 6:\n # Case 3: there are six values: the first two correspond to the cur item\n # and the last four correspond to the next two items in the queue\n for j, s in enumerate(cur_string_split[:2]):\n\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n\n if s == \"-1\" or s == \"-2\":\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n # Load the next item in the queue\n cur_feature, pre_features = working_queue.popleft()\n\n for j, s in enumerate(cur_string_split[2:4]):\n\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n\n if s == \"-1\" or s == \"-2\":\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n # Load the next item in the queue\n cur_feature, pre_features = working_queue.popleft()\n\n for j, s in enumerate(cur_string_split[4:]):\n\n formated_cur_feature = cur_feature\n formated_cur_feature += \"t\" if j == 0 else \"f\"\n cur_features = pre_features + [formated_cur_feature]\n\n if s == \"-1\" or s == \"-2\":\n decision_rules.add((tuple(cur_features), \"+\" if s == \"-2\" else \"-\"))\n else:\n working_queue.append([s, cur_features])\n\n else:\n raise ValueError(\"Error: encounter string size either 2 nor 4 nor 6\")\n\n i += 1\n\n return decision_rules", "title": "" }, { "docid": "08b7afe7b3690c1b83a31c3f3a9415ee", "score": "0.5525031", "text": "def tree(self):\n \n # applying bayesian optimization\n if self.formatter.bayes == True:\n \n bayes = bayes_tree(self.formatter , self.x_train, self.y_train, self.r_train)\n best_params_tree = bayes.bayes()\n (depth, min_samples, cp) = (best_params_tree.get(\"max_depth\", \"\"),\n best_params_tree.get(\"min_samples_leaf\", \"\"),\n best_params_tree.get(\"ccp_alpha\", \"\"))\n dt = DecisionTreeClassifier(min_samples_leaf = min_samples,\n ccp_alpha = cp,\n max_depth = depth).fit(self.x_train,\n self.y_train) \n train_probs = dt.predict_proba(self.x_train) \n test_probs = dt.predict_proba(self.x_test) \n \n if self.x_val is not None:\n val_probs = dt.predict_proba(self.x_val) \n return self.return_results(train_probs, test_probs, val_probs)\n else:\n return self.return_results(train_probs, test_probs)\n \n # calling the default model\n else: \n\n dt = DecisionTreeClassifier(max_depth = 8).fit(self.x_train, self.y_train)\n train_probs = dt.predict_proba(self.x_train) \n test_probs = dt.predict_proba(self.x_test) \n \n if self.x_val is not None:\n val_probs = dt.predict_proba(self.x_val) \n return self.return_results(train_probs, test_probs, val_probs)\n else:\n return self.return_results(train_probs, test_probs)", "title": "" }, { "docid": "e6beb4cd1911c711e06f0124bf08186f", "score": "0.5523623", "text": "def generate_tree(self, rxns=None, obj=None, thermo_database=None, T=1000.0, nprocs=1, min_splitable_entry_num=2,\n min_rxns_to_spawn=20, max_batch_size=800, outlier_fraction=0.02, stratum_num=8,\n max_rxns_to_reopt_node=100):\n if rxns is None:\n rxns = self.get_training_set(thermo_database=thermo_database, remove_degeneracy=True, estimate_thermo=True,\n fix_labels=True, get_reverse=True)\n\n if len(rxns) <= max_batch_size:\n template_rxn_map = self.get_reaction_matches(rxns=rxns, thermo_database=thermo_database, remove_degeneracy=True,\n fix_labels=True, exact_matches_only=True, get_reverse=True)\n self.make_tree_nodes(template_rxn_map=template_rxn_map, obj=obj, T=T, nprocs=nprocs - 1, depth=0,\n min_splitable_entry_num=min_splitable_entry_num, min_rxns_to_spawn=min_rxns_to_spawn)\n else:\n random.seed(1)\n logging.error(\"dividing into batches\")\n batches = self.get_rxn_batches(rxns, T=T, max_batch_size=max_batch_size, outlier_fraction=outlier_fraction,\n stratum_num=stratum_num)\n logging.error([len(x) for x in batches])\n for i, batch in enumerate(batches):\n if i == 0:\n rxns = batch\n else:\n rxns += batch\n logging.error(\"pruning tree\")\n self.prune_tree(rxns, thermo_database=thermo_database, max_rxns_to_reopt_node=max_rxns_to_reopt_node)\n logging.error(\"getting reaction matches\")\n template_rxn_map = self.get_reaction_matches(rxns=rxns, thermo_database=thermo_database, fix_labels=True,\n exact_matches_only=True, get_reverse=True)\n logging.error(\"building tree with {} rxns\".format(len(rxns)))\n self.make_tree_nodes(template_rxn_map=template_rxn_map, obj=obj, T=T, nprocs=nprocs - 1, depth=0,\n min_splitable_entry_num=min_splitable_entry_num, min_rxns_to_spawn=min_rxns_to_spawn)", "title": "" }, { "docid": "3897e9dd46437c248e63ccf0fd14d1f6", "score": "0.5523013", "text": "def predict(samples, tree_model, dataMissing=False):\n\n # w tej funkcji sprawdzam ktรณre prawdopodobieล„stwo z outputs byล‚o najwiฤ™ksze\n # czyli np. jak mamy w outputs 2/10 kobiet w wieku > 30 lat mieszkajฤ…cych na wsi = internet, 6/10 ... = prasa, 2/10 telewizja\n # to zaklasyfikuje nam, ลผe kobiety w wieku > 30 lat mieszkajฤ…ce na wsi czytajฤ… prasฤ™\n def classifyWithoutMissingData(samples, tree_model):\n if tree_model.outputs != None: # liล›ฤ‡\n return tree_model.outputs\n else:\n v = samples[tree_model.col] # col=best_value_labelled[0] czyli label\n branch = None\n if isinstance(v, int) or isinstance(v, float):\n if v >= tree_model.value:\n branch = tree_model.branch_with_value\n else:\n branch = tree_model.branch_with_others\n else:\n if v == tree_model.value:\n branch = tree_model.branch_with_value\n else:\n branch = tree_model.branch_with_others\n return classifyWithoutMissingData(samples, branch)\n\n\n def classifyWithMissingData(samples, tree_model):\n if tree_model.outputs != None: # leaf\n return tree_model.outputs\n else:\n v = samples[tree_model.col]\n if v == None:\n tr = classifyWithMissingData(samples, tree_model.branch_with_value)\n fr = classifyWithMissingData(samples, tree_model.branch_with_others)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount)/(tcount + fcount)\n fw = float(fcount)/(tcount + fcount)\n result = collections.defaultdict(int) # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n for k, v in tr.items(): result[k] += v*tw\n for k, v in fr.items(): result[k] += v*fw\n return dict(result)\n else:\n branch = None\n if isinstance(v, int) or isinstance(v, float):\n if v >= tree_model.value: branch = tree_model.branch_with_value\n else: branch = tree_model.branch_with_others\n else:\n if v == tree_model.value: branch = tree_model.branch_with_value\n else: branch = tree_model.branch_with_others\n return classifyWithMissingData(samples, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(samples, tree_model)\n else:\n return classifyWithoutMissingData(samples, tree_model)", "title": "" }, { "docid": "4593806eecea13d25cbfc05404122478", "score": "0.5508342", "text": "def build_sub_tree_from_tuples(tuple_left, tuple_right):\n\n sub_tree = HuffmanTree()\n\n # Check whether the tuple is a char or already merged tree\n tuple_left_data = tuple_left[0]\n tuple_left_freq = tuple_left[1]\n\n if isinstance(tuple_left_data, str):\n huff_node_left = HuffmanNode(data=tuple_left_data, node_type='character', frequency=tuple_left_freq)\n else:\n huff_node_left = tuple_left_data.root\n\n # Check whether the tuple is a char or already merged tree\n tuple_right_data = tuple_right[0]\n tuple_right_freq = tuple_right[1]\n\n if isinstance(tuple_right_data, str):\n huff_node_right = HuffmanNode(data=tuple_right_data, node_type='character', frequency=tuple_right_freq)\n else:\n huff_node_right = tuple_right_data.root\n\n # Construct a summary frequency node\n freq_sum = tuple_left_freq + tuple_right_freq\n\n summary_freq_node = HuffmanNode(node_type='frequency', frequency=freq_sum)\n summary_freq_node.left = huff_node_left\n summary_freq_node.right = huff_node_right\n sub_tree.root = summary_freq_node\n\n return sub_tree, freq_sum", "title": "" }, { "docid": "c5152d84529a702bbb144e30ecb55d94", "score": "0.55062824", "text": "def calDeciTree(self,data,indent):\r\n best_threshold=np.inf;best_attribute=np.inf;best_gini=np.inf;\r\n for attribute in range(0,len(data[0])-1):\r\n data_temp=[];column=0;\r\n for row in range(0,len(data)):\r\n data_temp.append(data[row][attribute])\r\n for temp_thr in data_temp:\r\n column+=1\r\n wt_gini=self.calGini(data,data_temp,temp_thr)\r\n if wt_gini<best_gini:\r\n best_gini=wt_gini\r\n best_threshold=temp_thr\r\n best_attribute=attribute\r\n best_column=column\r\n split=0\r\n data.sort(key=lambda data: data[best_attribute])\r\n # Stopping Condition\r\n return_class=self.checkStopCri(data)\r\n if return_class!= '':\r\n return return_class\r\n # if condition not satisfied then split data\r\n for j in range(0,len(data)):\r\n if data[j][best_attribute]==best_threshold:\r\n split=j;\r\n left=data[0:split];right=data[split+1:len(data)+1]\r\n output_str=indent+\"if float(data[value][\" + str(best_attribute) +\"]) <= \" + best_threshold +\":\"\r\n classifier.append(output_str)\r\n #recursively call left data\r\n if len(left)!=0:\r\n left_parse=self.calDeciTree(left,indent+\"\\t\")\r\n if left_parse is not None:\r\n output_str=indent+\"\\t target_class.append('\"+left_parse+\"')\"\r\n classifier.append(output_str)\r\n else:\r\n output_str = indent + \"\\t target_class.append('\" + 'Greyhound' + \"')\"\r\n classifier.append(output_str)\r\n classifier.append(indent+\"else: \")\r\n # recursively call right data\r\n if len(right)!=0:\r\n right_parse=self.calDeciTree(right,indent+\"\\t\")\r\n if right_parse is not None:\r\n output_str=indent+\"\\t target_class.append('\"+right_parse+\"')\"\r\n classifier.append(output_str)\r\n else:\r\n output_str = indent + \"\\t target_class.append('\" + 'Whippet' + \"')\"\r\n classifier.append(output_str)", "title": "" }, { "docid": "8666ff4006dd37714942a36c17803d9e", "score": "0.5505181", "text": "def grow_tree(data, algorithm_fun = entropy, columns_map=None):\n size = len(data)\n # data -> rekordy tabeli; jeลผeli sฤ… = 0 to zwracamy puste drzewo\n if size == 0: return DecisionTree()\n # algorithm_fun dla zbioru danych w pierwszej interacji bedzie sie odnosiล‚ do entropii caล‚ego zbioru danych.\n # w kolenych iteracjach bฤ™dzie to entropia poprzedniej decyzji\n current_result = algorithm_fun(data) # obliczanie entropii ukadu\n\n # best gain to najwyzszy wskaznik jakosci\n best_info_gain = 0.0\n best_value_labelled = None\n best_subsets = None\n\n col_num = len(data[0]) - 1 # zliczanie iloล›ci zmiennych opisujฤ…cych. ostatnia kolumna to target dlatego - 1\n for col in range(col_num):\n # iterowanie po zmiennych opisujฤ…cych. zmienna values_of_column bฤ™dzie zawieraฤ‡ listฤ™ wszystkich zmiennych dla danej kolumny\n # czyli dla ostatniej kolumny [zmeczony, srednie, wypoczety, srednie, srednie ... itd ]\n values_of_column = [row[col] for row in data]\n\n for value in values_of_column:\n # wyciฤ…gamy unikalne wartoล›ci (np. kobieta/mฤ™ลผczyzna, przedziaล‚y wiekowe itd.)\n # subset1, subset2 to sฤ… podzbiory: w jednym jest dana unikalna wartoล›ฤ‡,\n # w drugim znajduje siฤ™ pozostaล‚a reszta unikalnych wartoล›ci\n (subset1, subset2) = set_splitter(data, col, value)\n\n # p to prawdopodobieล„stwo wystฤ…pienia danej wartoล›ci\n p = float(len(subset1)) / len(data)\n #wyliczenie zysku -> szukamy max\n info_gain = current_result - p*algorithm_fun(subset1) - (1-p)*algorithm_fun(subset2)\n #jeล›li podzbiory nie sฤ… puste (czyli mamy z czego podziaล‚) i info_gain jest wiฤ™kszy od best_info_gain to:\n if info_gain > best_info_gain and len(subset1)>0 and len(subset2) > 0:\n best_info_gain = info_gain\n best_value_labelled = (col, value) #col to nazwa zmiennej decyzyjnej value to jej wartoล›ฤ‡ np. pล‚eฤ‡, kobieta\n best_subsets = (subset1, subset2)\n\n # jeลผeli wystฤ…piล‚ zysk to dzielimy dalej -> powtarzamy proces\n if best_info_gain > 0: # gdyby daฤ‡ tu 0.5 to byล‚by prepruning\n #rekurencja\n branch_with_value = grow_tree(best_subsets[0], columns_map=columns_map) # gaล‚ฤ…ลบ ktora zawiera danฤ… cechฤ™ (bardziej informatywnฤ…)\n branch_with_others =grow_tree(best_subsets[1], columns_map=columns_map) # gaล‚ฤ…ลบ z resztฤ… cech\n # branch_with_value i branch_with_others zapewniajฤ… rekurencjฤ™, czyli \"zadajemy\" kolejne pytania, tak dล‚ugo az zysk informacyjny = 0\n # depth drzewa rozwija siฤ™ wล‚aล›nie na tym etapie. W sytuacji gdy zysk informacyjny = 0, uruchamiany jest else, w ktรณrym obliczane sฤ…\n # wystฤ…pienia danej etykiety w liล›ciu\n # depth -> czyli gล‚ฤ™bokoล›ฤ‡ drzewa to kolejne instancje klasy Decision Tree - wฤ™zล‚y\n return DecisionTree(col=best_value_labelled[0], value=best_value_labelled[1], branch_with_value=branch_with_value, branch_with_others=branch_with_others,columns_map=columns_map, size = size)\n else:\n # zwraca liczebnoล›ฤ‡i labeli w formie sล‚ownika np. skaล‚y: 10, dom: 9, ล›cianka: 5\n return DecisionTree(outputs=unique_labels_counter(data), size = size)\n # DecisionTree zawiera wskaลบniki na instancje klasy DecisionTree ( te wskaลบniki to gaล‚ฤ™xie - \"maล‚e drzewa\")", "title": "" }, { "docid": "9d027afcf44896baf91673aab4ac9c32", "score": "0.5496224", "text": "def make_trees(self, nodes, tail_node):\n sorted_nodes = topological_sort_modified(nodes, tail_node)\n trees = []\n node_map = {}\n\n def mk_tr(inp):\n if inp.vreg:\n # If the input value has a vreg, use it\n child_tree = Tree(self.make_op(\"REG\", inp.ty), value=inp.vreg)\n elif inp.node in node_map:\n child_tree = node_map[inp.node]\n elif inp.node.name.op == \"LABEL\":\n child_tree = Tree(str(inp.node.name), value=inp.node.value)\n else: # inp.node.name.startswith('CONST'):\n # If the node is a constant, use that\n if inp.wants_vreg:\n raise ValueError(\"{} does require vreg\".format(inp))\n children = [mk_tr(i) for i in inp.node.data_inputs]\n child_tree = Tree(\n str(inp.node.name), *children, value=inp.node.value\n )\n return child_tree\n\n for node in sorted_nodes:\n assert len(node.data_outputs) <= 1\n\n # Determine data dependencies:\n children = []\n for inp in node.data_inputs:\n child_tree = mk_tr(inp)\n children.append(child_tree)\n\n # Create a tree node:\n tree = Tree(str(node.name), *children, value=node.value)\n self.debug_db.map(node, tree)\n\n # Handle outputs:\n if len(node.data_outputs) == 0:\n # If the tree was volatile, it must be emitted\n if node.volatile:\n trees.append(tree)\n else:\n # If the output has a vreg, put the value in:\n data_output = node.data_outputs[0]\n if data_output.vreg:\n vreg = data_output.vreg\n typ = data_output.ty\n if typ is None:\n print(node)\n tree = Tree(self.make_op(\"MOV\", typ), tree, value=vreg)\n trees.append(tree)\n tree = Tree(self.make_op(\"REG\", typ), value=vreg)\n elif node.volatile:\n trees.append(tree)\n\n # Store for later:\n node_map[node] = tree\n return trees", "title": "" }, { "docid": "11650c39deef25b99fe339e042646305", "score": "0.54898995", "text": "def __init__(self, X, y, sample_weights, class_weights):\n self.name = \"Decision Tree\"\n self.clf = self.train(X, y, sample_weights, class_weights)", "title": "" }, { "docid": "505f4e3103c3a2bc1e0cfb073b6c8d39", "score": "0.5485625", "text": "def build_tree(self, features, targets, depth):\n pass", "title": "" }, { "docid": "7ebc1059312d0d22590ac4ce29e4c053", "score": "0.54835457", "text": "def decision_tree(X, y, trainingparam): # out_file):\n return DecisionTreeClassifier(**trainingparam).fit(X, y)", "title": "" }, { "docid": "86c6284db7b6921d7dbfe056bbd3a920", "score": "0.54720396", "text": "def classify(self, example):\n #\n # fill in the function body here!\n #\n #use root to iterate through the tree\n #when advancing a node down, check if we are going to a decision node or a leaf node \n #if leaf node -> check predicted class as that is our assumed answer\n #if decision node, see if conditon is met -> if yes we get a leaf node, if not then we move to the next decision node \n\n\n return \"hello\", 0.42 # fix this line!", "title": "" }, { "docid": "58a150701a79738af7497d91b4f4e83f", "score": "0.54401445", "text": "def build_tree(self):\n\n def build_node(parent, var):\n\n if var.get('type') == 'TERMINAL':\n parent.set('type', var.get('type'))\n parent.set(key='expr', value=var.get('expr'))\n parent.set(key='expval', value=None)\n\n if var.get('type') == 'CHANCE':\n parent.set('type', var.get('type'))\n parent.set(key='node_number', value=self.node_number)\n parent.set(key='expval', value=None)\n self.node_number += 1\n for child in var.get('values'):\n prob, value, next_node = child\n tree_node = new_node(self.treenodes, tag=len(self.treenodes))\n tree_node.set(key='var', value=var.tag)\n tree_node.set(key='value', value=value)\n tree_node.set(key='prob', value=prob)\n if 'next_node' in parent.keys():\n parent.get(key='next_node').append(len(self.treenodes)- 1)\n else:\n parent.set(key='next_node', value=[len(self.treenodes) - 1])\n build_node(parent=tree_node, var=self.variables[next_node])\n\n if var.get('type') == 'DECISION':\n parent.set('type', var.get('type'))\n parent.set('max', var.get('max'))\n parent.set(key='node_number', value=self.node_number)\n parent.set(key='expval', value=None)\n self.node_number += 1\n for child in var.get('values'):\n value, next_node = child\n tree_node = new_node(self.treenodes, tag=len(self.treenodes))\n tree_node.set(key='var', value=var.tag)\n tree_node.set(key='value', value=value)\n tree_node.set(key='expval', value=None)\n if 'next_node' in parent.keys():\n parent.get(key='next_node').append(len(self.treenodes) - 1)\n else:\n parent.set(key='next_node', value=[len(self.treenodes) - 1])\n build_node(parent=tree_node, var=self.variables[next_node])\n\n\n\n self.node_number = 0\n parent = new_node(self.treenodes, tag=0)\n build_node(parent=parent, var=self.variables[0])", "title": "" }, { "docid": "0eb5ddbc1038ee4d27d984c8d24f08e4", "score": "0.5430782", "text": "def test_decision_tree(trainx, trainy, testx, testy):\n dt = tree.DecisionTreeClassifier(criterion=\"entropy\")\n dt = dt.fit(trainx, trainy)\n print 'train accuracy: {}'.format(dt.score(trainx, trainy))\n print 'test accuracy: {}'.format(dt.score(testx, testy))\n return dt", "title": "" }, { "docid": "58474eac0bb5459ca9dae019f4dab89e", "score": "0.5430288", "text": "def analyze_tree(dataset,my_tree):\n raise NotImplementedError('Problem 6 Incomplete')", "title": "" }, { "docid": "a93073f3810e982ac8061879def362f8", "score": "0.54258513", "text": "def create_tree(data, frequency_factor=1.0):\n\n symb2freq = defaultdict(int)\n for ch in data:\n symb2freq[ch] += 1\n\n for symbol in symb2freq:\n freq = symb2freq[symbol]\n symb2freq[symbol] = int(\n freq * frequency_factor + .5 / frequency_factor\n )\n\n huff = encode(symb2freq)\n canonical = canonicalize(huff)\n return canonical", "title": "" }, { "docid": "ee8a8b48f6aa08155d80f9020a892612", "score": "0.54255927", "text": "def decisionTreeClassifier(self):\r\n name = \"DT\"\r\n model = tree.DecisionTreeClassifier()\r\n # create a regressor object\r\n regressor = DecisionTreeRegressor(random_state=0)\r\n\r\n # fit the regressor with X and Y data\r\n regressor.fit(self.X_train, self.y_train)\r\n\r\n model.fit(self.X_train, self.y_train)\r\n\r\n y_predict = model.predict(self.X_test)\r\n score = accuracy_score(self.y_test, y_predict)\r\n\r\n print(\"***** Accuracy Score ******* \\n\")\r\n print(\"Score: \", score)\r\n\r\n ## Cross-validaton using 10-fold cross validation\r\n cv_scores = cross_val_score(model, self.X_train, self.y_train, cv=8)\r\n print(\"Cross-Validation average score: %.2f \\n\" % cv_scores.mean())\r\n\r\n cm = confusion_matrix(self.y_test, y_predict)\r\n print(\"***** Confusion Matrix ******* \\n\")\r\n print(cm)\r\n \r\n # Append value to the results\r\n self.results.append((name, cv_scores.mean()))\r\n \r\n # Save the model\r\n self.saveModel(model, name)\r\n \r\n return", "title": "" }, { "docid": "dc968ac5102b7c3dcafe883389781203", "score": "0.5422936", "text": "def buildTreesFromBranchInfo(self):\n # t is a map of parent to children\n self.mTreeKs = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mKs) for x in self.mBranchInfo])\n self.mTreeKa = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mKa) for x in self.mBranchInfo])\n self.mTreeKaks = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mKaks) for x in self.mBranchInfo])\n self.mTreeSds = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mSds) for x in self.mBranchInfo])\n self.mTreeNdn = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mNdn) for x in self.mBranchInfo])\n self.mTreeS = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mS) for x in self.mBranchInfo])\n self.mTreeN = TreeTools.Graph2Tree(\n [(x.mBranch1, x.mBranch2, x.mN) for x in self.mBranchInfo])", "title": "" }, { "docid": "d8d4ad37a5b7c489ba29a91e000372a0", "score": "0.541911", "text": "def BuildingDT_cont(input_X,input_Y,feature_X_values,feature_Y_values,Attribute,mc_label):\n # If there is no output label\n if input_Y.shape[0] == 0:\n return DT_cont(LeafValue=mc_label)\n \n left = [] # contains values less or equal than threshold \n right = [] # higher than threshold\n mcv = most_common_value(input_Y,feature_Y_values) # most common value of given input Y\n \n # If all labels of input_Y is the same or Attribute is empty\n all_values_same_boolean = check_all_values_same(input_Y,feature_Y_values) \n if all_values_same_boolean or Attribute == []:\n return DT_cont(LeafValue=mcv)\n else:\n best_IG = 0\n best_set_X = []\n best_set_Y = []\n best_Attribute = 0\n best_threshold = 0.0\n current_entrophy = entrophy(input_Y,feature_Y_values)\n for index in Attribute:\n # index is the column index --> decides which attribute to use\n all_thresholds = return_all_thresholds(input_X,input_Y,index)\n for threshold in all_thresholds:\n # Get divided data for each threshold for a given attribute = index\n divided_X, divided_Y = divide_data(input_X,input_Y,index,feature_X_values,threshold)\n\n for j in range(len(divided_Y)): #iterate through each possible feature value\n # For wine.data --> divided_Y contains subsets for lower then threshold\n # and higher than the threshold\n current_IG = information_gain(divided_Y[j],feature_Y_values,current_entrophy)\n if current_IG >= best_IG:\n best_IG = current_IG\n best_Attribute = index\n best_feature_ind = j\n best_threshold = threshold\n \n\n # Now best attribute and threshold are selected --> divide data using that attribute and thresh\n # best_Attribute --> gives the column number of best attribute\n # best threshold --> gives the best threshold value which gives max IG\n # best_feature_ind --> gives which feature value has the most IG, i.e. \"x\", \"o\" or \"b\" in tic-tac\n best_set_X, best_set_Y = divide_data(input_X,input_Y,best_Attribute,feature_X_values,best_threshold)\n \n # Remove the current attribute from the attribute list to\n # create remaning attributes --> no need to use same attribute over and\n # over again! --> If it is needed, it is gonna show up in the recursion anyway\n remaning_Attributes = []\n for index in Attribute:\n if index != best_Attribute:\n remaning_Attributes.append(index)\n\n # Left contains values less or equal than threshold\n # Right contains values more than threshold\n # For more detail, check divide data function\n #left.append( BuildingDT_cont(best_set_X[0],best_set_Y[0],feature_X_values,feature_Y_values, \n #remaning_Attributes,mcv) )\n #right.append( BuildingDT_cont(best_set_X[1],best_set_Y[1],feature_X_values,feature_Y_values, \n #remaning_Attributes,mcv) )\n return DT_cont(Left=BuildingDT_cont(best_set_X[0],best_set_Y[0],feature_X_values,feature_Y_values,\n remaning_Attributes,mcv),\n Right=BuildingDT_cont(best_set_X[1],best_set_Y[1],feature_X_values,feature_Y_values, \n remaning_Attributes,mcv),\n FeatureNum=best_Attribute,ThresholdNum=best_threshold \n )\n \n #return DT_cont(FeatureNum=best_Attribute,ThresholdNum=best_threshold,Left=left,Right=right)", "title": "" }, { "docid": "152b0ff511a5b4c5b48334aa0bfb5caf", "score": "0.5416121", "text": "def decision_tree_regressor(data, X_label, y_label, random_state, test_size=0.3, max_depth=None):\r\n \r\n # Split data\r\n X = data[X_label]\r\n y = data[y_label]\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\r\n \r\n print(X_train.shape)\r\n # Create a model and train it\r\n model = DecisionTreeRegressor(random_state=random_state, max_depth=max_depth)\r\n model = model.fit(X_train, y_train)\r\n \r\n # Predict values for test set and asses error\r\n y_pred = model.predict(X_test)\r\n rmse = np.sqrt(mean_squared_error(y_test, y_pred))\r\n \r\n # Return result object\r\n return {'rmse':rmse, 'model':model, 'y_pred':y_pred}", "title": "" }, { "docid": "955574679e258a4c44707a0a7c3d17e7", "score": "0.5414955", "text": "def get_decision_tree_classifier():\n param_grid = {\"decision_tree__max_depth\": range(1, 5),\n \"decision_tree__min_samples_leaf\": range(1, 5),\n \"decision_tree__min_samples_split\": range(2, 5)\n }\n \n return DecisionTreeClassifier(), param_grid", "title": "" }, { "docid": "14757735ecd43f0cda690b6db4c1a8b2", "score": "0.53926677", "text": "def train_decision_tree(X, y, max_depth=None):\n\tclassifier_1 = DecisionTreeClassifier(criterion = 'entropy', max_depth = max_depth)\n\tn_classifier = classifier_1.fit(np.array(X),y)\n\treturn n_classifier", "title": "" }, { "docid": "5bfd53f4f0b61d9c734c432fb35681be", "score": "0.53837067", "text": "def classify(tree, input_data, model_evaluation=evaluate_reg_tree):\n if not is_tree(tree):\n return model_evaluation(tree, input_data)\n if input_data[tree['split_axis']] > tree['split_value']:\n if is_tree(tree['left']):\n return classify(tree['left'], input_data, model_evaluation)\n else:\n return model_evaluation(tree['left'], input_data)\n else:\n if is_tree(tree['right']):\n return classify(tree['right'], input_data, model_evaluation)\n else:\n return model_evaluation(tree['right'], input_data)", "title": "" }, { "docid": "7571c89335e7a532e69a19246da73af3", "score": "0.5371367", "text": "def predict_tree(sample, my_tree):\n raise NotImplementedError('Problem 5 Incomplete')", "title": "" }, { "docid": "649ef1388cce702301f297184d729128", "score": "0.53536475", "text": "def generate_tree(self):\n name_to_id_dict = self.mapping_name_to_id()\n tree = Tree()\n tree.create_node(\"ROOT\", \"ROOT\")\n for node_leaf in self.node_leaf_list[:, 1:]:\n node_leaf = np.insert(node_leaf, 0, 'ROOT')\n for i in range(1, len(node_leaf)):\n if not tree.contains(node_leaf[i]):\n tree.create_node(tag=node_leaf[i], identifier=node_leaf[i], parent=node_leaf[i-1], data=name_to_id_dict['class'+str(i)][node_leaf[i]])\n print(\"Construct tree done! Tree structure is:\")\n print(tree.show())\n return tree", "title": "" }, { "docid": "cc0a396722d34bc39c45c88ac14a908c", "score": "0.5350789", "text": "def decision_tree_learning(examples, parent_examples,\n header_info, attr_lst, fix_expand_order=False):\n\n # Invalid\n if len(examples) == 0: # PLURALITY-VALUE(parent examples)\n pr_cls_cnt = class_counts(parent_examples)\n pred = max(pr_cls_cnt, key=lambda k: pr_cls_cnt[k])\n return Leaf_Node(parent_examples, pred)\n\n cls_cnt = class_counts(examples)\n\n # Success\n if len(cls_cnt.keys()) == 1: # only 1 class -> predict\n pred = cls_cnt.keys()[0]\n return Leaf_Node(examples, pred)\n\n if not attr_lst: # No more choice, PLURALITY-VALUE(examples)\n pred = max(cls_cnt, key=lambda k: cls_cnt[k])\n return Leaf_Node(examples, pred)\n\n # pick best attr if expansion order is not given\n if not fix_expand_order:\n best_attr, _ = get_best_attr(examples, header_info, attr_lst)\n else:\n best_attr = attr_lst[0]\n\n attr_lst.remove(best_attr)\n partitions = partition_by_a(examples, best_attr, header_info)\n node = Decision_Node(best_attr)\n\n for i, attr_val in enumerate(header_info[best_attr]['values']):\n _attr_lst = copy.copy(attr_lst)\n child = decision_tree_learning(\n partitions[i],\n examples,\n header_info,\n _attr_lst,\n fix_expand_order)\n node.child_nodes.append(child)\n\n return node", "title": "" }, { "docid": "22fdd435d2c45913ebf3b96c560f0613", "score": "0.5325245", "text": "def construct_new_branches(d):\n\n EPS = 1e-12 # division by zero protection\n\n # eid-features\n d['_BToKEE_l1_unBiased'] = d['Electron_unBiased'][d['BToKEE_l1Idx']]\n d['_BToKEE_l1_pfmvaId'] = d['Electron_pfmvaId'][d['BToKEE_l1Idx']]\n\n d['_BToKEE_l2_unBiased'] = d['Electron_unBiased'][d['BToKEE_l2Idx']]\n d['_BToKEE_l2_pfmvaId'] = d['Electron_pfmvaId'][d['BToKEE_l2Idx']]\n\n # e-r-features\n for name in e_r_features.keys():\n idkey = 'BToKEE_l1Idx' if ('l1' in name) else 'BToKEE_l2Idx'\n d[name] = d[e_r_features[name][0] ][d[idkey]] / (d[e_r_features[name][1] ][d[idkey]] + EPS)\n # k-features\n for name in k_features.keys():\n idkey = 'BToKEE_kIdx'\n d[name] = d[k_features[name][0] ][ d[idkey] ]\n\n # r-features\n for name in r_features.keys():\n d[name] = d[r_features[name][0]] / (d[r_features[name][1]] + EPS)\n # d-features\n for name in d_features.keys():\n d[name] = np.abs( d[d_features[name][0]] - d[d_features[name][1]] )", "title": "" }, { "docid": "41cc1f75d78770b008090b167017059f", "score": "0.5305347", "text": "def branchify_data(data, branchify_function):\n\n # train\n train_branches_texts = []\n train_branches_labels = []\n branchify_function(data['train'], train_branches_texts, train_branches_labels)\n\n # test\n test_branches_texts = []\n test_branches_labels = []\n branchify_function(data['test'], test_branches_texts, test_branches_labels, has_labels=False)\n\n # dev\n dev_branches_texts = []\n dev_branches_labels = []\n branchify_function(data['dev'], dev_branches_texts, dev_branches_labels)\n\n return train_branches_texts, train_branches_labels, test_branches_texts, test_branches_labels, \\\n dev_branches_texts, dev_branches_labels", "title": "" }, { "docid": "5cb555793011c7407063ebf9ec9d0d86", "score": "0.5304395", "text": "def predict(self,testdata):\n \n npoints = len(testdata)\n predictions = np.empty(npoints)\n for point_i in range(npoints):\n \n # Print out decisions for a point\n if point_i<10:\n if self.verbose == 'path10':\n print('Display of choices for point %i' %point_i)\n \n ParentNode = self.tree\n Rule = ParentNode.rule\n while Rule is not None:\n splitfeat_i = Rule[0]\n splitval = Rule[1]\n if testdata[point_i,splitfeat_i] <= splitval:\n ChildNode = ParentNode.left\n if point_i<10:\n if self.verbose == 'path10':\n print('Feature #'+str(splitfeat_i+1)+': ',testdata[point_i,splitfeat_i],'<=',splitval)\n \n else:\n if point_i<10:\n if self.verbose == 'path10':\n print('Feature #'+str(splitfeat_i+1)+': ',testdata[point_i,splitfeat_i],'>',splitval)\n ChildNode = ParentNode.right\n ParentNode = ChildNode\n Rule = ParentNode.rule\n predictions[point_i]=ParentNode.leaflabel\n if point_i<10:\n if self.verbose == 'path10':\n print('Point labeled as',ParentNode.leaflabel)\n return predictions.astype(int)", "title": "" }, { "docid": "db3f5199286f85b7b4337b7274b76ed8", "score": "0.53030884", "text": "def generate_huffman_tree(pairs):\n return successive_merge(\n make_leaf_set(pairs))", "title": "" }, { "docid": "2b1ea5b19988c22225486ecba7a86cc0", "score": "0.53012604", "text": "def predict(self, data):\n raise NotImplementedError(\"Concrete subclasses of decision tree must \"\n \"implement their own testing method\")", "title": "" }, { "docid": "496454887ee32ff4a879eae751f75ee5", "score": "0.52939796", "text": "def learn_dt(examples, features, label_dim, max_depth=math.inf): \n feature, split_value = choose_feature_split(examples, features)\n maj_dec = find_majority(examples)\n all_features_number = [x[-1] for x in examples]\n a= np.bincount(all_features_number)\n all_feature_count = np.pad(a,(0,len(global_var.index_label_dict.keys())-len(a)),'constant')\n\n\n all_feature_count = (all_feature_count/sum(np.array(all_feature_count))).tolist()\n entropy_root = entropy(all_feature_count)\n\n\n root = Node(name=\"root\", parent=None, fd=feature, leaf=False, split=split_value, dep=1, majority=maj_dec, entropy=entropy_root, edge=\"\")\n #print(RenderTree(root))\n split_node(root, examples, features, max_depth)\n return root", "title": "" }, { "docid": "7db2e575a494a9e2df070bdfedbc017e", "score": "0.52729166", "text": "def TreeMaker(self):\n\n Root = self.TreeNode(self.dataset, None, None, self.n_classes)\n\n self.RecursiveTreeMaker([Root], self.max_depth, self.min_samples, 1 , self.n_classes)\n\n return Root", "title": "" }, { "docid": "c78816f37c5f5234a2bed3d095c6ba3a", "score": "0.52263314", "text": "def decision_function(self, dataset, parallel=True):\n # handle NaNs\n if np.any(np.isnan(dataset)):\n dataset = np.nan_to_num(dataset, copy=False)\n\n # set up variables\n if self.batch_size > 0 and parallel:\n return self.decision_function_batch(dataset, self.batch_size)\n else:\n pairs_proba = np.empty((len(dataset), len(self.root_nodes)), float) # indexes of data points\n\n # get all clusters for all points in all trees\n for d_idx, d in enumerate(dataset):\n # traverse all trees\n for t_idx, tree in enumerate(self.root_nodes):\n d_mean, d_pct, d_pdf_mean, d_cov_det, d_cov_inv = descend_density_tree(d, tree)\n if d_pct > self.thresh_traverse:\n if self.method == 'normal':\n pairs_proba[d_idx, t_idx] = d_pct * my_normal(d, d_mean, d_cov_det, d_cov_inv)\n if self.standardize:\n pairs_proba[d_idx, t_idx] /= d_pdf_mean # standardize by max. probability\n else:\n pairs_proba[d_idx, t_idx] = euclidean(d_mean, d)\n if self.standardize:\n pairs_proba[d_idx, t_idx] /= d_pdf_mean # standardize by max. probability\n else:\n pairs_proba[d_idx, t_idx] = np.nan\n self.scores = np.log(np.nanmean(pairs_proba, axis=-1))\n return self.scores", "title": "" }, { "docid": "a66095cea302e49be466b63fc97740ff", "score": "0.5218965", "text": "def _parallel_build_trees(tree, forest, X, y, groups, sample_weight, tree_idx, n_trees,\n verbose=0, class_weight=None,\n n_samples_bootstrap=None):\n # print(\"Start parallel_build_trees\")\n if verbose > 1:\n print(\"building tree %d of %d\" % (tree_idx + 1, n_trees))\n\n if forest.random == False:\n forest.bootstrap = False\n\n if forest.bootstrap:\n n_samples = X.shape[0]\n if sample_weight is None:\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n else:\n curr_sample_weight = sample_weight.copy()\n\n indices = _generate_sample_indices(tree.random_state, n_samples,\n n_samples_bootstrap)\n\n sample_counts = np.bincount(indices, minlength=n_samples)\n curr_sample_weight *= sample_counts\n\n if class_weight == 'subsample':\n with catch_warnings():\n simplefilter('ignore', DeprecationWarning)\n curr_sample_weight *= compute_sample_weight('auto', y,\n indices=indices)\n elif class_weight == 'balanced_subsample':\n curr_sample_weight *= compute_sample_weight('balanced', y,\n indices=indices)\n # start = time.time()\n tree.fit(X, y, groups, sample_weight=curr_sample_weight, check_input=False)\n # end = time.time()\n # print(\"Temps construction arbre : \" + str(end-start) + \" secondes\")\n else:\n # start = time.time()\n tree.fit(X, y, groups, sample_weight=sample_weight, check_input=False)\n # end = time.time()\n # print(\"Temps construction arbre : \" + str(end - start) + \" secondes\")\n # print(\"End of tree construction nยฐ\"+str(tree_idx+1))\n # print(str(((tree_idx+1)/n_trees)*100)+\"% Done\")\n return tree", "title": "" }, { "docid": "0b855f66b7b075538dc767083c0c768b", "score": "0.52164435", "text": "def build_tree(records, symptoms):\r\n return build_tree_helper(records, symptoms, 0, [])", "title": "" }, { "docid": "43c77e3d06ea6236cc5dbed618511fb2", "score": "0.5212267", "text": "def construct_tree(depth):\n\n vertices = []\n for d in range(depth):\n for i in range(2**d):\n vertices.append('x_{}_{}'.format(d, i))\n \n edges = []\n for d in range(depth-1):\n for i in range(2**(d+1)):\n edges.append(('x_{}_{}'.format(d, i//2), 'x_{}_{}'.format(d+1, i)))\n\n observed = set()\n for i in range(2**(depth-1)):\n observed.add('x_{}_{}'.format(depth-1, i))\n \n return vertices, edges, observed", "title": "" }, { "docid": "4ebfccabe2063d37b173d5b67b0d51a8", "score": "0.5208814", "text": "def _compose_ht_tree(self, text):\n freq_table = self._compute_chr_freq(text)\n ht_queue = HeapPriorityQueue()\n for freq, lett in freq_table:\n ht_tree = LinkedBinaryTree()\n ht_tree._add_root((freq, lett))\n ht_queue.add(freq, ht_tree)\n\n while len(ht_queue) > 1:\n (freq1, subtree1) = ht_queue.remove_min()\n (freq2, subtree2) = ht_queue.remove_min()\n freq = freq1 + freq2\n ht_tree = LinkedBinaryTree()\n ht_tree._add_root((freq, None))\n ht_tree._attach(ht_tree.root(), subtree1, subtree2)\n ht_queue.add(freq, ht_tree)\n _, ht_tree = ht_queue.remove_min()\n return ht_tree", "title": "" }, { "docid": "7ff4625e81de0d0f79cd22c6283e68d6", "score": "0.5201413", "text": "def build_tree_top_down(columns, seq_names, cluster_method, evaluation_method):\n all_columns = columns\n all_seq_names = seq_names\n def is_finished(cluster, columns):\n return len(cluster) <= 2 or all([len(set(column)) == 1 for column in columns])\n def recurse(columns, seq_names):\n matrix = columns_to_matrix(columns, one_hot_encode=(cluster_method == 'k-means'))\n cluster_assignments = cluster_matrix(matrix, cluster_method)\n\n cluster0_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]\n cluster1_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]\n cluster0 = [seq_names[i] for i in cluster0_indices]\n cluster1 = [seq_names[i] for i in cluster1_indices]\n cluster0_columns = [[column[i] for i in cluster0_indices] for column in columns]\n if is_finished(cluster0, cluster0_columns):\n if len(cluster0) == 1:\n clade0 = Clade(name=cluster0[0])\n else:\n clade0 = Clade(clades=map(lambda x: Clade(name=x), cluster0))\n else:\n clade0 = Clade(clades=recurse(cluster0_columns, cluster0))\n\n cluster1_columns = [[column[i] for i in cluster1_indices] for column in columns]\n if is_finished(cluster1, cluster1_columns):\n if len(cluster1) == 1:\n clade1 = Clade(name=cluster1[0])\n else:\n clade1 = Clade(clades=map(lambda x: Clade(name=x), cluster1))\n else:\n clade1 = Clade(clades=recurse(cluster1_columns, cluster1))\n return (clade0, clade1)\n tree = Tree(Clade(clades=recurse(columns, seq_names)))\n return tree", "title": "" }, { "docid": "b6c8e0ba883ecb3f73ea80d1ebf12e0b", "score": "0.5193186", "text": "def _bdtrain(self, X1,X2,chleft,chright,features2,thlist,Y):\r\n count = 0\r\n N1 = X1.shape[0]\r\n N2 = X2.shape[0]\r\n \"\"\"Nsamples: the number of drug and target pairs \"\"\"\r\n Nsamples = N1*N2 \r\n \r\n #apply\r\n leafnode = np.zeros(Nsamples) \r\n leafnode2 = np.zeros_like(leafnode)\r\n #indices of rows and columns\r\n \"\"\" zeros_like: return the same shape of the input array but all elements are zeros\"\"\"\r\n nodeindex_rows = np.zeros_like(leafnode).astype(int) \r\n nodeindex_cols = np.zeros_like(leafnode).astype(int)\r\n \r\n \r\n \r\n for i in range(N1):\r\n for j in range(N2):\r\n node = 0 #root\r\n Xtemp = np.concatenate((X1[i],X2[j]))\r\n while chleft[node][0] != -1:\r\n # print node\r\n if Xtemp[features2[node]] <= thlist[node]:\r\n node = chleft[node][0].astype(int)\r\n else:\r\n node = chright[node][0].astype(int)\r\n \r\n leafnode[count] = node # store where each sample goes\r\n leafnode2[count] = Y[i,j] #store the label of each sample\r\n nodeindex_rows[count] = i\r\n nodeindex_cols[count] = j\r\n count += 1\r\n \"\"\"l is the id list of leaf nodes \"\"\"\r\n l = np.unique(leafnode) \r\n # print len(l)\r\n # print len(np.where(chleft==-1)[0])\r\n \r\n pred = np.zeros(Nsamples) # predict the values for each sample\r\n prednode = np.zeros(len(l)) # compute the values of each node\r\n # indprednode = np.zeros([len(l)]) \r\n prednode_rows = np.zeros([len(l),Y.shape[1]]) \r\n prednode_cols = np.zeros([len(l),Y.shape[0]]) \r\n \r\n \r\n \"\"\" build a dict whose key is the id (cont) of node and value is the index (count) of node \"\"\"\r\n ln_dict = dict()\r\n \"\"\"cont is the content/element in l i.e. (leaf) node id ; count is the index of cont in l\"\"\"\r\n for count,cont in enumerate (l):\r\n \"\"\" leafnode2[leafnode==cont] is the sub label matrix corresponding to (leaf) node cont \"\"\"\r\n prednode[count] = np.mean(leafnode2[leafnode==cont]) # the total average, p3\r\n pred[leafnode==cont] = prednode[count] # the prediction using the total average\r\n \"\"\"nodeindex_rows[leafnode==cont] is the row indices that belongs to leaf node cont \"\"\"\r\n prednode_rows[count] = np.mean(Y[nodeindex_rows[leafnode==cont]],0) #p1: the row-wise prediction of count-th leaf node (cont)\r\n prednode_cols[count] = np.mean(Y[:,nodeindex_cols[leafnode==cont]],1) #p2: the row-wise prediction of count-th leaf node (cont) \r\n # indprednode[count] = cont # (redundant) the same value as l\r\n ln_dict[cont] = count \r\n \r\n return leafnode,leafnode2,prednode,pred,l,prednode_rows,prednode_cols, ln_dict", "title": "" }, { "docid": "2b032aad0ea68177c9a936f375112f7d", "score": "0.5190547", "text": "def train_disjoint_trees(self, iteration, tree_algorithm): \n\n gating_values = self.all_gating_values[iteration]\n gate = np.argmax(gating_values, axis=1)\n gating_values_hard = np.zeros([self.n_input, self.n_experts])\n gating_values_hard[np.arange(0, self.n_input), gate] = 1\n DT_experts_disjoint = [None for _ in range(self.n_experts)]\n DT_experts_alternative_algorithm = [None for _ in range(self.n_experts)]\n\n if tree_algorithm == \"sklearn_default\":\n if self.X_contains_categorical:\n X, _ = self._one_hot_encode(self.X_original_pd)\n else:\n X = self.X_original\n for index_expert in range(0, self.n_experts):\n DT_experts_disjoint[index_expert] = tree.DecisionTreeClassifier(max_depth=self.max_depth)\n DT_experts_disjoint[index_expert].fit(X=X, y=self.y, sample_weight=gating_values_hard[:, index_expert])\n\n self.DT_experts_disjoint = DT_experts_disjoint\n\n # Alternative algorithms\n\n elif tree_algorithm == \"sklearn_asymmetric\":\n if self.X_contains_categorical:\n X, _ = self._one_hot_encode(self.X_original_pd)\n else:\n X = self.X_original\n\n max_leaf_nodes = int((2**(self.max_depth + 1)) / 2)\n\n for index_expert in range(0, self.n_experts):\n DT_experts_alternative_algorithm[index_expert] = tree.DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes)\n DT_experts_alternative_algorithm[index_expert].fit(X=X, y=self.y, sample_weight=gating_values_hard[:, index_expert])\n\n self.DT_experts_alternative_algorithm = DT_experts_alternative_algorithm\n\n elif tree_algorithm == \"optimal_trees\":\n\n from interpretableai import iai # Commercial software\n\n for index_expert in range(self.n_experts):\n mask = gating_values_hard[:, index_expert]\n mask = mask.astype(bool)\n X = self.X_original_pd[mask].copy() \n y = self.y_original[mask].copy() \n\n # Optimal trees cannot handle object dtypes\n X.loc[:, X.dtypes == 'object'] = X.select_dtypes(['object']).apply(lambda x: x.astype('category')) \n\n DT_experts_alternative_algorithm[index_expert] = iai.GridSearch(iai.OptimalTreeClassifier(), max_depth=self.max_depth)\n DT_experts_alternative_algorithm[index_expert].fit(X, y)\n \n self.DT_experts_alternative_algorithm = DT_experts_alternative_algorithm\n\n elif tree_algorithm == \"h2o\":\n\n # Allows multi-class but performs binary prediction\n\n from modt._alternative_DTs import H2o_classifier\n\n server = H2o_classifier(max_depth = -1)\n server.start_server()\n\n for index_expert in range(0, self.n_experts):\n mask = gating_values_hard[:, index_expert]\n mask = mask.astype(bool)\n X = self.X_original_pd[mask].copy() \n y = self.y_original[mask].copy() \n\n DT_experts_alternative_algorithm[index_expert] = H2o_classifier(max_depth=self.max_depth)\n DT_experts_alternative_algorithm[index_expert].fit(X=X, y=y, expert_identifier=index_expert)\n DT_experts_alternative_algorithm[index_expert].plot()\n\n self.DT_experts_alternative_algorithm = DT_experts_alternative_algorithm\n\n server.stop_server()\n\n else:\n raise Exception(\"Invalid tree algorithm.\")", "title": "" }, { "docid": "dcca1235a7001f2b159f19a15d155805", "score": "0.51851404", "text": "def generate_KDTree(data):\n\n kdtree = spatial.KDTree(zip(*data[0:-1]))\n\n return kdtree", "title": "" }, { "docid": "c99520e39404d0eaded47e0388701b36", "score": "0.51799256", "text": "def walker(value_arr, level=0, tree=Node(0)): \n #print(\"Inside walker()\")\n \n if ( (value_arr[0]==1 and value_arr[1]==0 and value_arr[2]==0) or (value_arr[0] == 0 and value_arr[1] == 1 and value_arr[2]==0) or (value_arr[0] == 0 and value_arr[1] == 0 and value_arr[2]==1) or\n (value_arr[0]==0 and value_arr[1]==0 and value_arr[2]==0) or level == scriptArgs.level_lim): # base case (leaf nodes have no children)\n #print(\"Base case\")\n tree.left = None \n tree.right = None\n return [(value_arr, level)], tree # store the leaf node value (ie k=0 or k=1) and its corresponding level as tuple-element of list\n \n # Generate a random number from uniform prob distribution:\n R = np.random.uniform(low=0.0, high=1.0) \n \n # Use binomial prob to determine how the number of charging events are split across 1st and 2nd half of step\n substep1_arr = binomial_prob_multiI(value_arr, R) # value holds the list of [k_e, k_e] number of charging events\n #substep2_arr = np.array(value_arr) - np.array(substep1_arr) # Note that because value_arr is a numpy array as is substep1, I can do elementwise subtraction (ie treat the electron and ion counters separately)\n substep2_arr = [99999999999999, 99999999999999, 99999999999999] # intialise list (rather than mixing np array type)\n substep2_arr[0] = value_arr[0] - substep1_arr[0]\n substep2_arr[1] = value_arr[1] - substep1_arr[1]\n substep2_arr[2] = value_arr[2] - substep1_arr[2]\n\n #print(f\"LEVEL = {level} R={R}, substep1 = {substep1_arr}\")\n #print(f\"substep2 = {substep2_arr}\")\n \n # Define the left and child nodes:\n tree.left = Node(nodify(substep1_arr)) # use my hacky nodify function as Node cannot handle non-numbers (e.g. arrays)\n tree.right = Node(nodify(substep2_arr))\n \n a_arr, tree_l = walker(substep1_arr, level+1, tree.left) # recursive call to left child\n b_arr, tree_r = walker(substep2_arr, level+1, tree.right) # recursive call to right child\n \n #print(f\"walker level{level} called with value {value_arr} splits into {substep1_arr} and {substep2_arr} as R = {R}\")\n #print(f\" left subtree a_arr = {a_arr}, right subtree b_arr = {b_arr}\")\n \n # Reconfigure the binary tree to update:\n tree.left = tree_l\n tree.right = tree_r\n return a_arr + b_arr, tree #concatenate list of lists (a_arr+b_arr) and also return tree", "title": "" }, { "docid": "0a85efa90e8c693864bd7b6c06d45e7e", "score": "0.5163575", "text": "def initialize_tree(self, env, done):\n root = DecisionNode(None, env.state, 1, done)\n self.build_tree(root, env)\n return root", "title": "" }, { "docid": "4aaa571e21c00cb47c542bb8874c9632", "score": "0.5162614", "text": "def build_stump(self, rows):\n gain, question = self.find_weak_split( rows)\n\n # Partition dataset based on best question\n true_rows, false_rows = self.partition( rows, question )\n # Build the true branch\n true_branch = Node(0, true_rows )\n # Build the false branch\n false_branch = Node(0, false_rows )\n # Return the Decision node, with references to question and branchs\n return Node(1, None, question, true_branch, false_branch)", "title": "" }, { "docid": "b12ac8189a5498580806c4cf81489aba", "score": "0.5159078", "text": "def generate_tree(pred):\n\n tree = {}\n\n for i, p in enumerate(pred):\n if p == -1:\n # root node\n tree[i] = [i]\n continue\n\n idx = p\n path = [idx]\n\n while idx >= 0:\n nextnode = pred[idx]\n idx = nextnode\n if idx >= 0:\n path.append(nextnode)\n tree[i] = path\n\n return tree", "title": "" }, { "docid": "af1c65c4bf05c4b6621c8446fe44b48b", "score": "0.51543826", "text": "def tree_tune_pruning(\n features_indices: list, features_names: pd.DataFrame, df_data: pd.DataFrame,\n df_target: pd.DataFrame, folds_num: int, min_to_select: int) -> pd.DataFrame:\n\n # split the data into folds_num stratified folds\n skf = StratifiedKFold(n_splits=folds_num, random_state=0)\n data_array: np.ndarray = df_data.to_numpy()\n target_array: np.ndarray = df_target.to_numpy()\n # define a decision tree classifier\n clf = DecisionTreeClassifier(random_state=0, criterion='entropy')\n # define an empty list that would facilitate the feature score and ranking per test fold\n ls_fold: list = []\n df_score_rank_fold = pd.DataFrame()\n # loop over the folds to train a decision tree and compute cost-complexity parameters\n # test each parameter on test, use the best score to get that tree feature importance\n # then use algorithm to indicate relevant features and their rank\n for fold_num, (train_index, test_index) in enumerate(skf.split(data_array, target_array)):\n data_array_train, data_array_test = data_array[train_index], data_array[test_index]\n target_array_train, target_array_test = target_array[train_index], target_array[test_index]\n path = clf.cost_complexity_pruning_path(data_array_train, target_array_train)\n cv_accuracy_ls: list = []\n ccp_alphas = path.ccp_alphas\n for ccp_alpha in ccp_alphas:\n clf_alpha = DecisionTreeClassifier(random_state=0, criterion='entropy',\n ccp_alpha=ccp_alpha)\n clf_alpha.fit(data_array_train, target_array_train)\n cv_accuracy_ls.append(clf_alpha.score(data_array_test, target_array_test))\n df_plot: pd.DataFrame = pd.concat([pd.Series(ccp_alphas, name='ccp_alpha'),\n pd.Series(cv_accuracy_ls, name='cv_accuracy')], axis=1)\n # df_plot.plot.scatter(x='ccp_alpha', y='cv_accuracy')\n df_plot.sort_values(by=['cv_accuracy', 'ccp_alpha'], ascending=[False, False], inplace=True)\n # use the lowest ccp_alpha in the highest cv_accuracy category\n # if this ccp_alpha is the max at the category it means that the entire tree is pruned\n # in this case take the 2nd lowest ccp_alpha that correspond to the 2nd highest cv_accuracy\n grouped = df_plot.groupby('cv_accuracy')\n if (grouped.max() - grouped.min()).iloc[-1, 0] != 0:\n best_ccp_alpha = grouped.min().iloc[-1, 0]\n else:\n best_ccp_alpha = grouped.min().iloc[-2, 0]\n\n clf = DecisionTreeClassifier(random_state=0, criterion='entropy',\n ccp_alpha=best_ccp_alpha)\n clf_fit = clf.fit(data_array_train, target_array_train)\n feature_importance = clf_fit.feature_importances_\n\n df_score_rank_fold['feature_num'] = features_indices\n df_score_rank_fold['feature'] = features_names\n df_score_rank_fold['score'] = feature_importance\n ls_fold.append(df_score_rank_fold)\n weighing = True\n df_score_rank = select_rank(ls_fold, min_to_select, weighing)\n return df_score_rank", "title": "" }, { "docid": "f33459564b155da6b4a78be7748ed293", "score": "0.5152764", "text": "def segment(self,data,labels):\n \n totals = np.bincount(labels)\n if len(totals)==1:\n totals = np.append(totals,[0])\n # Quick safety check\n if len(labels) != len(data):\n print('ERROR (DecisionTree.segment): There must be the same number of labels as datapoints.')\n \n # Calculate the initial entropy, used to find info gain\n C,D = 0,0 # C = in class left of split; D = not in class left of split\n c,d = totals[1],totals[0] # c = in class right of split; d = not in class right of split\n H_i = self.entropy(C,D,c,d) # the initial entropy, before any splitting\n \n # Initialize objects to store optimal split rules for iterative comparison\n maxinfogain = 0\n splitrule = [] \n \n for feature_i in range(len(data[0])):\n # Order the data for determining ideal splits\n lbldat = np.concatenate(([data[:,feature_i]],[labels]),axis=0)\n fv = np.sort(lbldat.T,axis=0)\n lastfeature = np.array(['',''])\n \n C,D = 0,0 # Reset the counters\n c,d = totals[1],totals[0]\n \n for point_i in range(len(fv)-1):\n \n # Update C,D,c,d to minmize runtime of entrop calc (keep at O(1) time)\n if fv[point_i,1] == 1:\n C += 1\n c -= 1\n elif fv[point_i,1] == 0:\n D += 1\n d -= 1\n else:\n print(\"ERROR (DecisionTree.segment): Classifications can only be 0 or 1.\")\n \n # Skip splitting values that are not separable\n if fv[point_i,0] == fv[point_i+1,0]:\n continue\n else:\n H_f = self.entropy(C,D,c,d)\n infogain = H_i-H_f\n if infogain > maxinfogain:\n maxinfogain = infogain\n splitrule = [feature_i,fv[point_i,0]]\n \n return splitrule", "title": "" }, { "docid": "17e49ae937ff67757f4c496ff437491a", "score": "0.5150664", "text": "def buildParseTree_tree(lista, cols):\n tam = len(lista)\n flag = 0\n expre = '('\n for idx in np.arange(0, tam - 1):\n \n if (list(lista[idx].keys())[0] == 1):\n if (list(lista[idx+1].keys())[0] == 1):\n expre = expre + ' ' + '(' + ' ' + lista[idx][1] + ' ' + '+'\n flag += 1\n elif (list(lista[idx+1].keys())[0] != 1):\n expre = expre + ' ' + lista[idx][1] + ' '\n \n for i in np.arange(0,flag):\n flag -= 1\n expre = expre + ')' + ' '\n expre = expre + '+' \\\n \n elif (list(lista[idx].keys())[0] == 2):\n \n if (list(lista[idx+1].keys())[0] == 2):\n flag += 1\n expre = expre + ' ' + lista[idx][2] + ' ' + '(' \n \n elif (list(lista[idx+1].keys())[0] != 2):\n expre = expre + ' ' + lista[idx][2] + ' ' + '(' + ' ' + '(' + ' ' + choose_features(cols) + ' ' + '-' + ' ' + choose_features(cols) + ' ' \\\n + ')' + ' ' + ')' + ' '\n for i in np.arange(0,flag):\n flag -= 1\n expre = expre + ')' + ' ' \n expre = expre + '+'\n \n if (list(lista[tam-1].keys())[0] == 1):\n if (list(lista[tam-2].keys())[0] == 2):\n expre = expre + ' ' + lista[tam-1][1] + ' '\n for i in np.arange(0,flag):\n expre = expre + ')' + ' '\n else:\n expre = expre + ' ' + lista[tam-1][1] + ' '\n for i in np.arange(0,flag):\n expre = expre + ')' + ' '\n \n elif (list(lista[tam-1].keys())[0] == 2):\n if (list(lista[tam-2].keys())[0] == 2):\n expre = expre + ' ' + lista[tam-1][2] + ' ' + '(' + ' ' + '(' + ' ' + choose_features(cols) + ' ' + '-' + ' ' + choose_features(cols) + ' ' + ')' + ' ' + ')' + ' '\n for i in np.arange(0,flag):\n expre = expre + ')' + ' ' \n elif (list(lista[tam-2].keys())[0] != 2):\n \n expre = expre + ' ' + lista[tam-1][2] + ' ' + '(' + ' ' + '(' + ' ' + choose_features(cols) + ' ' + '-' + ' ' + choose_features(cols) + ' ' + ')' + ' ' + ')' + ' '\n for i in np.arange(0,flag):\n expre = expre + ')' + ' ' \n expre = expre + ')'\n return expre", "title": "" }, { "docid": "a1d7766457bd3db05eeaf930334da47d", "score": "0.5141105", "text": "def _fit_tree_node(self, X, y, w, node_index, depth, passed_indices):\n if len(passed_indices) <= self.min_samples_split or depth >= self.max_depth:\n self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )\n return\n\n multiplier = numpy.random.choice([1, 3, 5, 7, 11, 13, 17, 23])\n n_categories = 2 ** self.n_categories_power\n mask = n_categories - 1\n # modulus = self.random_state.randint(self.min_modulus, self.max_modulus)\n selected_features = self.random_state.choice(self.n_features, size=self._n_used_features, replace=False)\n candidates = OrderedDict()\n costs = []\n for feature_id in selected_features:\n # the last element is 0, new elements will be sent to left subtree\n nominator = numpy.bincount((multiplier * X[passed_indices, feature_id]) & mask,\n weights=w[passed_indices] * y[passed_indices],\n minlength=n_categories)\n weights = numpy.bincount((multiplier * X[passed_indices, feature_id]) & mask,\n weights=w[passed_indices],\n minlength=n_categories) + 2\n nominator += 2 * numpy.random.random(len(nominator))\n means = nominator / weights\n order = numpy.argsort(means)\n means = means[order]\n weights = weights[order]\n na = numpy.newaxis\n cut, cost, _ = self._criterion.compute_best_splits(numpy.arange(n_categories)[:, na], means, weights)\n costs.append(cost)\n directions = numpy.argsort(order) > cut\n candidates[feature_id] = (cut, cost, directions)\n\n # feature that showed best pre-estimated cost\n feature_index = selected_features[numpy.argmin(costs)]\n split, cost, directions = candidates[feature_index]\n passed = numpy.take(directions, (multiplier * X[passed_indices, feature_index]) & mask)\n # computing information for (possible) children\n passed_left_subtree = passed_indices[~passed]\n passed_right_subtree = passed_indices[passed]\n left, right = self._children(node_index)\n if len(passed_left_subtree) == 0 or len(passed_right_subtree) == 0:\n # this will be leaf\n self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )\n else:\n # non-leaf, recurrent calls\n self.nodes_data[node_index] = (feature_index, multiplier, mask, directions)\n self._fit_tree_node(X, y, w, left, depth + 1, passed_left_subtree)\n self._fit_tree_node(X, y, w, right, depth + 1, passed_right_subtree)", "title": "" }, { "docid": "0aaca34ef435e405ebb80dc6f999b81f", "score": "0.51251644", "text": "def buildTree(self, data, info):\n\n rootNode = Tree()\n rootNode.info = info.copy()\n for i in range(len(data)):\n rootNode.addTransaction(data[i][1:])\n return rootNode", "title": "" }, { "docid": "0b9a59e472b7c7a7911f169819b15195", "score": "0.5122249", "text": "def get_hierarchy_tree(tree_strings):\n\n working_queue = deque()\n\n # Each queue item is a list [feature_id, previous_feature_ids]\n i = 1\n tree_dict = {}\n working_queue.append([tree_strings[0], tree_dict])\n\n while len(working_queue) > 0:\n cur_feature, sub_tree = working_queue.popleft()\n\n cur_string = tree_strings[i]\n cur_string_split = cur_string.split()\n\n if len(cur_string_split) % 2 != 0:\n raise ValueError(\"Error: current string size is not even.\")\n\n # The string can have even number of values: the first two correspond to\n # the current item, and the second two correspond to the next item in\n # the queue, and the next two corresponds to the next item in the queue...\n sub_tree[\"f\"] = [cur_feature]\n sub_tree[\"c\"] = []\n\n for s in cur_string_split[:2]:\n if s == \"-1\" or s == \"-2\":\n # We hit a decision node, add a leaf to this branch\n sub_tree[\"c\"].append({\"f\": [\"+\"] if s == \"-2\" else [\"-\"]})\n else:\n new_sub_tree = {}\n sub_tree[\"c\"].append(new_sub_tree)\n working_queue.append([s, new_sub_tree])\n\n # Index for the next pair\n pair_i = 2\n while pair_i < len(cur_string_split):\n # Load the next item in the queue\n cur_feature, sub_tree = working_queue.popleft()\n sub_tree[\"f\"] = [cur_feature]\n sub_tree[\"c\"] = []\n\n for s in cur_string_split[pair_i : pair_i + 2]:\n if s == \"-1\" or s == \"-2\":\n # We hit a decision node, add a leaf to this branch\n sub_tree[\"c\"].append({\"f\": [\"+\"] if s == \"-2\" else [\"-\"]})\n else:\n new_sub_tree = {}\n sub_tree[\"c\"].append(new_sub_tree)\n working_queue.append([s, new_sub_tree])\n\n pair_i += 2\n\n i += 1\n\n return tree_dict", "title": "" }, { "docid": "c4090fffada9c578ad7ae68006d87136", "score": "0.51166826", "text": "def _buildEvolutionTree(self):\n self.tree = dict()\n self.id_genx = dict()\n self.id_score = dict()\n\n for gen, pop in self.history.items():\n for p in pop:\n self.tree[p.id] = p.parentIds if hasattr(p, \"parentIds\") else ()\n self.id_genx[p.id] = p.gIdx\n self.id_score[p.id] = p.fitness.score", "title": "" }, { "docid": "f7ca491cd2ce66c8682d78efbc35c2be", "score": "0.5110731", "text": "def classify(observation, tree):\n\n # if the current branch is a leaf node, give results and end.\n if tree.results != None:\n return tree.results\n else:\n # for the given observation, pick the relevant column in the first\n # node and test its value. there are two ways to handle them,\n # integer/floats (quantitative data) or names/labels (qualitative data)\n # tree.col is an index number stored in the tree when building it\n v = observation[tree.col]\n branch = None\n if isinstance(v, int) or isinstance(v, float):\n # for numbers (int of float), if higher, goes into true branch\n # if false, goes into false branch\n if v >= tree.value:\n branch = tree.tb\n else:\n branch = tree.fb\n else:\n # for non numbers, if same value, it goes down the true branch\n # if false, goes down the false branch\n if v == tree.value:\n branch = tree.tb\n else:\n branch = tree.fb\n return classify(observation, branch)", "title": "" }, { "docid": "7ae96d1084b5c0c62e614e0df63d3370", "score": "0.51010114", "text": "def create_tree(self):\n attrs = list(range(0, len(self.root.dna_data[0]['attrs'])))\n #values = ['A', 'G', 'T', 'C']\n values = ['A', 'G', 'T', 'C', 'D', 'N', 'S', 'R']\n self.root.create_subtree(values, attrs)\n return", "title": "" }, { "docid": "8f3b68f8c5e07c34a41610cf8f3af941", "score": "0.50970596", "text": "def build_example(self):\n try:\n gens_node = self.add_check_unique(None, \"Gens\")\n self.tag_treestore.set( gens_node, 1, True )\n bob_node = self.add_check_unique(gens_node, \"Bob\")\n #self.tag_treestore.set( bob_node, 1, True )\n self.add_check_unique(gens_node, \"Marcel\")\n self.add_check_unique(gens_node, \"Louise\")\n nature_node = self.add_check_unique(None, \"Nature\")\n self.add_check_unique(nature_node, \"Foret\")\n self.add_check_unique(nature_node, \"Lac\")\n montagne_node = self.add_check_unique(nature_node, \"Montagne\")\n self.tag_treestore.set( montagne_node, 1, True )\n except TagData_UnicityWarning as e:\n print \"@@@\",e.__class__.__name__, e\n print self.dump_str()", "title": "" }, { "docid": "6cd750406b0fced61ab9ca51262b295f", "score": "0.5081778", "text": "def where_wrapper(problem, population):\n\n def divide(problem, population, lvl, where_configuration):\n\n\n def check_depth(): return lvl > where_configuration[\"depthMax\"] # depthMax is not updated\n\n def check_elements() : return len(population) < where_configuration[\"minSize\"]\n\n if check_depth() or check_elements(): return Node(lvl, population)\n\n print \"Length of population: \", len(population), \" Level: \", lvl\n population, east, west = fastmap(problem, population)\n mid = int(len(population) / 2)\n wests = population[:mid]\n easts = population[mid+1:]\n\n # print divide(problem, wests, lvl + 1, where_configuration), len(divide(problem, wests, lvl + 1, where_configuration))\n raw_input()\n r_population = []\n r_population += divide(problem, wests, lvl + 1, where_configuration)\n r_population += divide(problem, easts, lvl + 1, where_configuration)\n\n return r_population\n\n\n\n decisions = [pop.decisionValues for pop in population]\n where_configuration = {\n \"minSize\": 10, # min leaf size\n \"depthMin\": 2, # no pruning till this depth\n \"depthMax\": 10, # max tree depth\n \"wriggle\": 0.2, # min difference of 'better'\n \"prune\": True, # pruning enabled?\n \"b4\": '|.. ', # indent string\n \"verbose\": False, # show trace info?\n }\n leaves = divide(problem, decisions, 0, where_configuration)\n clusters = []\n print \"Length of leaves: \", len(leaves)\n import pdb\n pdb.set_trace()\n for leaf in leaves[-1]:\n cluster = []\n print \"Length of the leaf is: \", len(leaf), leaf\n for member in leaf:\n for pop in population:\n assert(len(member) == len(pop.decisionValues)), \"Something is wrong\"\n # compare list\n result = reduce(lambda x, y: x and y, [True if i == j else False for i, j in zip(member, pop.decisionValues)])\n if result is True:\n cluster.append(pop)\n break\n clusters.append(cluster)\n\n print clusters\n\n return clusters", "title": "" }, { "docid": "d67f17d75c09629726c6b24cbefe48bd", "score": "0.50781715", "text": "def sample_tree(self, root=0):\n\n distribution = self[root]\n expansions = list(distribution.keys())\n probabilities = [distribution[e] for e in expansions]\n idx = np.random.choice(len(expansions), p=probabilities)\n children = expansions[idx]\n if type(children) == tuple:\n return Tree(root, [self.sample_tree(nt) for nt in children])\n else:\n return Tree(root, [children]) # just a single terminal", "title": "" }, { "docid": "766ec42be39d92e99f5e001278409657", "score": "0.5076841", "text": "def build_tree_threshold(feature_paths, max_depth=3,\n num_splits=5, noisy_split=False,\n _parent=None,\n _depth=0):\n\n expand_tree = partial(build_tree_threshold, feature_paths,\n max_depth=max_depth,\n num_splits=num_splits,\n noisy_split=noisy_split)\n\n if _parent is None:\n tree = RITTree2(next(feature_paths))\n expand_tree(_parent=tree, _depth=0)\n return tree\n\n else:\n _depth += 1\n if _depth >= max_depth:\n #print(' max depth, return')\n return\n if noisy_split:\n num_splits += np.random.randint(low=0, high=2)\n for i in range(num_splits):\n _flag = _parent.add_child(next(feature_paths))\n if _flag == -1 :\n continue\n added_node = _parent.children[-1]\n if not added_node.is_empty():\n #print(' Added node: ', added_node._val , ' is not empty, grow tree after it' )\n expand_tree(_parent=added_node, _depth=_depth)\n else:\n pass\n #print(' Added node: ', added_node._val , ' is empty' )", "title": "" } ]
bfe9e75675fa5a41eb65f52393eeb1df
Import a discourse from csv file
[ { "docid": "e6147a781d4e22ec9c8ff9f560ad3b7e", "score": "0.7458001", "text": "def import_discourse_csvs(corpus_context, typed_data):\n string_set_template = 'n.{name} = csvLine.{name}'\n float_set_template = 'n.{name} = toFloat(csvLine.{name})'\n int_set_template = 'n.{name} = toInt(csvLine.{name})'\n bool_set_template = '''n.{name} = (CASE WHEN csvLine.{name} = 'False' THEN false ELSE true END)'''\n properties = []\n for h, v in typed_data.items():\n if v == int:\n template = int_set_template\n elif v == bool:\n template = bool_set_template\n elif v == float:\n template = float_set_template\n else:\n template = string_set_template\n properties.append(template.format(name=h))\n properties = ',\\n'.join(properties)\n directory = corpus_context.config.temporary_directory('csv')\n path = os.path.join(directory, 'discourse_import.csv')\n\n # If on the Docker version, the files live in /site/proj\n if os.path.exists('/site/proj') and not path.startswith('/site/proj'):\n feat_path = 'file:///site/proj/{}'.format(make_path_safe(path))\n else:\n feat_path = 'file:///{}'.format(make_path_safe(path))\n \n import_statement = '''CYPHER planner=rule\n LOAD CSV WITH HEADERS FROM \"{path}\" AS csvLine\n MATCH (n:Discourse:{corpus_name}) where n.name = toString(csvLine.name)\n SET {new_properties}'''\n\n statement = import_statement.format(path=feat_path,\n corpus_name=corpus_context.cypher_safe_name,\n new_properties=properties)\n corpus_context.execute_cypher(statement)\n for h, v in typed_data.items():\n corpus_context.execute_cypher('CREATE INDEX ON :Discourse(%s)' % h)\n # os.remove(path) # FIXME Neo4j 2.3 does not release files", "title": "" } ]
[ { "docid": "f2ced21216439f9c43fe27a6ffed1504", "score": "0.63136464", "text": "def load_database():\n # copy data from csv file to database\n with connection:\n with connection.cursor() as cursor:\n with open('cleaned_collisions.csv', 'r') as f:\n next(f) # Skip the header row.\n cursor.copy_from(f, 'collisions', sep=',')\n print(\"data upload complete!\")", "title": "" }, { "docid": "ea5a681e842363fab23a7f11e3d8cdc2", "score": "0.6297698", "text": "def read_course(line):\n line = line.split(\",\")\n c = Course(line[0], line[1], line[2], line[3])\n \n return c", "title": "" }, { "docid": "e6cab8ad381f85a217a1f665f7b01883", "score": "0.62452096", "text": "def load_from_csv(self, path: str = None, training: bool = True) -> None:\n pass", "title": "" }, { "docid": "f4a1f1b90fd6dd119e885a0fc5975448", "score": "0.6095082", "text": "def import_csv_to_db(args):\n\n\t# Reading store-locations.csv\n\tstore_locations = constants.data_source\n\ttry: \n\t\treader = csv.reader(open(store_locations, 'rU'))\n\t\tall_stores = [] # temp csv data storage for db import\n\t\tstart = False # ignores the Title-row in csv file\n\t\tfor row in reader:\n\t\t\tif start:\n\t\t\t\tcur_row = create_tuple(row)\n\t\t\t\tall_stores.append(cur_row)\n\t\t\telse: \n\t\t\t\tstart = True\n\n\texcept Exception as e:\n\t\treturn e\n\n\t# Creating a table named \"store\" and inserting csv data to it\n\tconn = sqlite3.connect(constants.store_db)\n\ttry:\n\t\tcur = conn.cursor()\n\t\tcur.execute(constants.drop_store)\n\t\tcur.execute(constants.create_store)\n\t\tcur.executemany(constants.insert_to_store, all_stores)\n\t\tconn.commit()\n\t\tconn.close()\n\n\texcept Exception as e:\n\t\tconn.rollback()\n\t\treturn e", "title": "" }, { "docid": "663b8e21dc875576e1354d3a49a1b52f", "score": "0.59981763", "text": "def import_data(conn, csv_file):\n with open(csv_file, 'r') as f:\n labels = f.readline().rstrip()\n\n for line in f:\n # get data\n entry = line.rstrip()\n entry_data = entry.split(',')\n\n query = \"INSERT INTO `\" + DB_TABLE + \"`(\" + labels + \") VALUES (\"\n for field in entry_data:\n if len(field):\n query += \"'\" + field + \"',\"\n else:\n query += \"NULL,\"\n # remove last ,\n if query[-1] == ',':\n query = query[:-1]\n query += \");\"\n\n # apply query\n cursor = conn.cursor()\n cursor.execute(query)\n conn.commit()\n\n close_connection(conn)", "title": "" }, { "docid": "86e58b0cb54baf26452eff62506f174e", "score": "0.5997261", "text": "def readfile(self):\n with open(self.filename) as csvfile:\n read_csv = csv.reader(csvfile, delimiter=\",\")\n b = 0\n\n for row in read_csv:\n if row != \"\":\n if b == 0:\n self.course = row[0]\n elif b == 1:\n self.work = row\n elif b == 2:\n self.scales = row\n elif b == 3:\n self.grade = row\n b += 1\n\n print(\"Course: \" + str(self.course))\n print(\"Assignments: \" + str(self.work))\n print(\"Scales: \" + str(self.scales))\n print(\"Grades: \" + str(self.grade))", "title": "" }, { "docid": "1a0fa5f95c9406f937649073f1e5bc09", "score": "0.5991692", "text": "def loadArticles(csvPath, isFake, colNum, rnw = False): \n\n articles = [] \n labels = []\n\n with open(csvPath, 'rU') as f:\n reader = csv.reader(f, delimiter=',')\n next(reader, None) # skip the header \n\n counter = 0\n\n for row in reader:\n\n if counter % 20 == 0:\n print counter\n\n # kill all non-unicode articles (foreign language -- may want better \n # way to deal with this in the future)\n try:\n article = row[colNum]\n article = article.decode('utf-8', 'ignore')\n if rnw:\n chars_to_remove = string.punctuation+'1234567890'\n table = {ord(char): None for char in chars_to_remove}\n article = article.translate(table)\n article = removeNonWords(article)\n article = removeStopWordsString(article)\n\n articles.append(article)\n labels.append(isFake)\n\n except UnicodeError:\n print \"non-unicode article\"\n counter += 1\n\n return (articles, labels)", "title": "" }, { "docid": "527cde6f024754578dc148e349d6124a", "score": "0.5976379", "text": "def from_csv(ctx, url, key, secret, **kwargs):\n client = ctx.obj.start_client(url=url, key=key, secret=secret)\n p_grp = ctx.parent.command.name\n apiobj = getattr(client, p_grp)\n with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):\n data = apiobj.run_enforcement_from_csv_path(**kwargs)\n click.secho(f\"{data}\")\n ctx.exit(0)", "title": "" }, { "docid": "c01edee6cf69903532860ade30469571", "score": "0.59720224", "text": "def import_csv(file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()", "title": "" }, { "docid": "229b23012c21977ed011324baa21b880", "score": "0.5929736", "text": "def load_cities_file():\n print(\"load_cities_file. Start..\")\n file_path = settings.BASE_DIR + \"/files/cities.csv\"\n data_reader = csv.reader(open(file_path), delimiter=\",\", quotechar='\"')\n for row in data_reader:\n # read data from line\n code = row[0].strip()\n name = row[1].strip()\n user = row[2].strip()\n # create index card entry\n index_card = IndexCard()\n index_card.city = name\n index_card.name = code + \"-\" + name\n index_card.code = code\n index_card.username = user\n try:\n index_card.save()\n except:\n print(\"Saving index_card: \" + row[1].strip())\n print(\"Unexpected error:\", sys.exc_info())\n print(\"load_cities_file. End....\")", "title": "" }, { "docid": "a483523a4e34e1dd07e2369227e4c28d", "score": "0.5929632", "text": "def import_csv_file(self, _):\n open_file_dialog = wx.FileDialog(self, message=\"Import a CSV file\",\n defaultDir=self.path,\n defaultFile=\"\",\n wildcard=\"CSV files (*.csv)|*.csv\",\n style=wx.FD_OPEN |\n wx.FD_FILE_MUST_EXIST)\n if open_file_dialog.ShowModal() == wx.ID_OK:\n open_file_dialog.Destroy()\n dlg = wx.MessageDialog(parent=self, message='To replace ' +\n 'all items currently in your list, ' +\n 'click ok',\n caption='Replace items',\n style=wx.OK | wx.CANCEL)\n if dlg.ShowModal() == wx.ID_OK:\n self.main_list.DeleteAllItems()\n\n with open(open_file_dialog.GetPath(), 'rb') as csvfile:\n cvs_data = csv.reader(csvfile)\n for item in cvs_data:\n data = Unit(\n model=item[0],\n hostname=item[1],\n serial=item[2],\n firmware=item[3],\n device=item[4],\n mac=item[5],\n ip_ad=item[6],\n arrival_time=datetime.datetime.strptime(\n (item[7]), \"%Y-%m-%d %H:%M:%S.%f\"),\n ip_type=item[8],\n gateway=item[9],\n subnet=item[10],\n master=item[11],\n system=item[12])\n\n self.main_list.AddObject(data)\n self.dump_pickle()\n\n else:\n open_file_dialog.Destroy()", "title": "" }, { "docid": "093fdbe17ab259504b2afb5a0b39d67a", "score": "0.59246504", "text": "def readCSV():\n lectura = open('cride/circles/models/circles.csv', 'r')\n lineas = lectura.readlines()\n\n for linea in lineas:\n partes = linea.split(',')\n name = partes[0]\n slug_name = partes[1]\n is_public = True if int(partes[2]) != 0 else False\n # if int(partes[2]) == 0: is_public = False\n verified = True if int(partes[3]) != 0 else False\n # if int(partes[3]) == 0: verified = False\n is_limited = False if int(partes[4]) == 0 else True\n # if int(partes[4]) > 0: is_limited = True\n members_limit = int(partes[4])\n\n Circle.objects.create(\n name=name,\n slug_name=slug_name,\n is_public=is_public,\n verified=verified,\n is_limited=is_limited,\n members_limit=members_limit\n )", "title": "" }, { "docid": "4a0f49912b220326976b52c5fe3a3e6f", "score": "0.59224313", "text": "def load_csv():\n with open(DB_FILE_NAME, \"r\", newline=\"\") as csv_contents:\n return [item[0] for item in csv.reader(csv_contents)]", "title": "" }, { "docid": "0ee3153809961e291c3c880a56eda263", "score": "0.5918021", "text": "def import_airdetectordata_from_csv(user, csv_file):\n csv_data = []\n reader = csv.reader(csv_file.read().splitlines(), delimiter=',', quotechar='\"')\n for row in reader:\n csv_data.append([item.strip() for item in row])\n\n airdetectordata_objects = []\n\n # Check if headers exists. Skip the first entry if true.\n header_check = ['value', 'category', 'date', 'time']\n first_row = [i.lower().strip() for i in csv_data[0]]\n if all(i in first_row for i in header_check):\n csv_data = csv_data[1:]\n\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n try:\n category = Category.objects.get(name__iexact=row[1].strip())\n except ObjectDoesNotExist:\n category = Category.objects.get(name__iexact='No Category'.strip())\n\n airdetectordata_objects.append(AirDetectorData(\n user=user,\n value=int(row[0]),\n category=category,\n record_date=datetime.strptime(row[2], DATE_FORMAT),\n record_time=datetime.strptime(row[3], TIME_FORMAT),\n notes=row[4],\n ))\n\n AirDetectorData.objects.bulk_create(airdetectordata_objects)", "title": "" }, { "docid": "04b55cfcef7955559273fda416df3d0c", "score": "0.59081054", "text": "def load_csv():\n config = configparser.ConfigParser()\n config.read('db.cfg')\n\n DB_NAME = config.get(\"CLUSTER\", \"DB_NAME\")\n DB_USER = config.get(\"CLUSTER\", \"DB_USER\")\n DB_PASSWORD = config.get(\"CLUSTER\", \"DB_PASSWORD\")\n DB_PORT = config.get(\"CLUSTER\", \"DB_PORT\")\n HOST = config.get(\"CLUSTER\", \"HOST\")\n\n conn = psycopg2.connect(database=DB_NAME,\n user=DB_USER,\n password=DB_PASSWORD,\n host=HOST,\n port=DB_PORT\n )\n\n conn.autocommit = True\n cur = conn.cursor()\n\n data_cleaning()\n with open('resources/GlobalLandTemperaturesByCity.csv', 'r') as f:\n reader = csv.reader(f)\n next(reader) # Skip the header row.\n for row in reader:\n cur.execute(\n '''\n INSERT INTO city_temperature(\n dt, \n avg_temperature, \n avg_temperature_uncertainty, \n city, \n country, \n latitude,\n longitude) \n VALUES (%s, %s, %s, %s, %s, %s, %s) \n ''',\n row\n )\n print('Done.')\n\n conn.commit()\n conn.close()", "title": "" }, { "docid": "e3082ebe48fa71ac82c526fad230938a", "score": "0.5861731", "text": "def importCSV(self):\r\n fileDialog = QtWidgets.QFileDialog.getOpenFileName(self.view, \"Open File\", None, \"CSV Files (*.csv)\")\r\n fileName = str(fileDialog).partition('\\'')[2].split('\\'')[0]\r\n if not (fileName is \"\"):\r\n if not self.model.listContact:\r\n answer = QtWidgets.QMessageBox.No\r\n else:\r\n answer = QtWidgets.QMessageBox.question(self.view, \"Death or life choice !?\",\r\n \"Would you like to append the contacts to your actual list?\",\r\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\r\n if answer == QtWidgets.QMessageBox.No:\r\n self.clearAll()\r\n with open(fileName, newline='') as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',')\r\n for nbLine, row in enumerate(reader):\r\n tempElement = QtWidgets.QTreeWidgetItem()\r\n if nbLine != 0:\r\n for index, word in enumerate(row):\r\n tempElement.setText(index, word)\r\n self.registerContact(tempElement.clone())\r\n QtWidgets.QMessageBox.information(self.view,\r\n \"Open succeeded\",\r\n \"The file has been successfully opened in the folder : \" + fileName,\r\n QtWidgets.QMessageBox.Close)\r\n else:\r\n QtWidgets.QMessageBox.information(self.view, \"Open failed\", \"The file has not been opened.\")", "title": "" }, { "docid": "37cd89a0cc7083b11a62e57154b1946b", "score": "0.5859343", "text": "def test_import_broken_csv(self):\n self._write_csv_file([\n 'person_record_id,source_date,\\0full_name', # contains null\n 'test.google.com/person1,2013-02-26T09:10:00Z,_test_full_name',\n ])\n doc = self.go('/haiti/api/import')\n form = doc.last('form')\n doc = self.s.submit(form, key='test_key', content=open(self.filename))\n assert 'The CSV file is formatted incorrectly' in doc.text\n assert Person.all().count() == 0\n assert Note.all().count() == 0", "title": "" }, { "docid": "a56368beaea6ba4058f2f99e5b28d647", "score": "0.5799825", "text": "def import_csv(self, csvfileobject):\n\t\t# Clear previously stored info\n\t\tself._tracks = []\n\t\tself._selected = None\n\n\t\tfor row in csvfileobject:\n\t\t\tif row[0] == \"T\":\n\t\t\t\ttrack = self.add_track()\n\t\t\t\ttrack.properties = row\n\t\t\telif row[0] == \"P\":\n\t\t\t\tevent = self.add_event(0, 1, '-')\n\t\t\t\tevent.properties = row", "title": "" }, { "docid": "1d1e4e0d98be96155a88c8f8bb9cddde", "score": "0.5794517", "text": "def import_categories_and_topics(csvfile):\n # Check encoding\n try:\n csvfile.read().decode('utf8')\n except UnicodeDecodeError:\n return_value = '', '', _('Import file has wrong character encoding, only UTF-8 is supported!')\n else:\n csvfile.seek(0)\n # Check dialect\n dialect = csv.Sniffer().sniff(csvfile.readline())\n dialect = csv_ext.patchup(dialect)\n csvfile.seek(0)\n # Parse CSV file\n topics = 0\n categories = 0\n for (line_no, line) in enumerate(csv.reader(csvfile, dialect=dialect)):\n if line_no == 0 or len(line) == 0:\n # Do not read the header line\n continue\n # Check and repair format\n if len(line) < 3:\n line.extend(['', ''])\n # Extract data\n title, submitter, category = line[:3]\n if not title:\n continue\n if category:\n category_object, created = Category.objects.get_or_create(name=category)\n if created:\n categories += 1\n else:\n category_object = None\n Topic.objects.create(title=title, submitter=submitter, category=category_object)\n topics += 1\n success = _('%(categories)d categories and %(topics)d topics successfully imported.') % {'categories': categories, 'topics': topics}\n return_value = success, '', ''\n return return_value", "title": "" }, { "docid": "f1fcf84ea1aa7503041acc27a2564989", "score": "0.57882357", "text": "def insertIntoTable(self,f,course_run,uni):\r\n _file = open(f)\r\n _reader = csv.reader(_file)\r\n head = next(_reader)\r\n blank1,blank2,_filename, _extend = f.split('.')\r\n dots,data,uni,course, otherDeets, datatype = _filename.split(\"/\")\r\n cursor = self.__database.cursor()\r\n delete = ''\r\n load = ''\r\n\r\n\r\n\r\n if 'comments' in datatype:\r\n col = ''\r\n setting = ''\r\n\r\n if(len(head) == 8):\r\n col = \t\"(id,author_id,@parent_id,@step,@text,@timestamp,@likes,@moderated) \"\r\n setting = \"step = @step,week_number = SUBSTRING_INDEX(@step,'.',1),step_number = SUBSTRING_INDEX(@step,'.',-1),\"\\\r\n \"text = @text,timestamp = @timestamp,Likes = @Likes, \"''\r\n\r\n elif(len(head) == 10):\r\n col = \"(id,author_id,@parent_id,step,week_number,step_number,text,@timestamp,@moderated,@likes) \"\r\n else:\r\n col = \"(id,author_id,@parent_id,step,week_number,step_number,text,@timestamp,@likes,@first_reported_at,@first_reported_reason,@moderation_state,@moderated) \"\r\n\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Comments CHARACTER SET UTF8 ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' + \" LINES TERMINATED BY '\\n' \" \\\r\n 'IGNORE 1 LINES ' + col +\\\r\n \"Set parent_id = nullif(@parent_id,' '), \"+ setting +\" timestamp = REPLACE(@timestamp, ' UTC', ''), moderated = nullif(REPLACE(@moderated, ' UTC', ''),' '), likes = nullif(@likes,' '), university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n\r\n elif 'enrolments' in datatype:\r\n\r\n\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Enrolments ' \\\r\n \"FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n' \" + \\\r\n 'IGNORE 1 LINES ' \\\r\n \"(learner_id,@enrolled_at,@unenrolled_at,role,@fully_participated_at,@purchased_statement_at,gender,country,age_range,highest_education_level,employment_status,employment_area,detected_country) \"\\\r\n \"Set unenrolled_at = nullif(REPLACE(@unenrolled_at, ' UTC', ''),' '),fully_participated_at = nullif(REPLACE(@fully_participated_at, ' UTC', ''),' '),purchased_statement_at = nullif(REPLACE(@purchased_statement_at, ' UTC', ''),' '), enrolled_at=REPLACE(@enrolled_at, ' UTC', ''),university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n\r\n\r\n elif 'assignments' in datatype:\r\n\r\n col = \t\"(id,step,step_number,week_number,author_id,text,@first_viewed_at,@submitted_at,@moderated,review_count) \"\r\n\r\n\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Assignments ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' + \" LINES TERMINATED BY '\\n' \" \\\r\n 'IGNORE 1 LINES ' + col + \\\r\n \"Set first_viewed_at=REPLACE(@first_viewed_at, ' UTC', ''), submitted_at=REPLACE(@submitted_at, ' UTC', ''), moderated = nullif(REPLACE(@moderated, ' UTC', ''),' '), university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n\r\n elif 'reviews' in datatype:\r\n\r\n col = '(id,step,week_number,step_number,reviewer_id,assignment_id,guideline_one_feedback,guideline_two_feedback,guideline_three_feedback,@created_at)'\r\n\r\n\r\n load = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Reviews ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' + \" LINES TERMINATED BY '\\n' \" \\\r\n 'IGNORE 1 LINES ' + col + \\\r\n \"Set university = \" + \"'\" + uni + \"',\" + \"created_at = REPLACE (@created_at,' UTC', ''),\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n\r\n elif 'question' in datatype:\r\n\r\n if(len(head) == 8):\r\n load = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Quiz ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + \"(learner_id,quiz_question,week_number,step_number,question_number,response,@submitted_at,@correct)\" +\\\r\n \"Set correct = STRCMP(UPPER(@correct),'TRUE') + 1, submitted_at=REPLACE(@submitted_at, ' UTC', ''), university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n else:\r\n load = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Quiz ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + \"(learner_id,quiz_question,question_type,week_number,step_number,question_number,response,@cloze_response,@submitted_at,@correct)\" +\\\r\n \"Set correct = STRCMP(UPPER(@correct),'TRUE') + 1, cloze_response = @cloze_response, submitted_at=REPLACE(@submitted_at, ' UTC', ''), university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n elif 'activity' in datatype:\r\n\r\n col = ''\r\n setting = ''\r\n if(len(head) == 4):\r\n col = '(learner_id,@step,@first_visited_at,@last_completed_at)'\r\n setting = \"step = @step, week_number = SUBSTRING_INDEX(@step,'.',1),step_number = SUBSTRING_INDEX(@step,'.',-1),\" \\\r\n\r\n else:\r\n col = '(learner_id,step,week_number,step_number,@first_visited_at,@last_completed_at)'\r\n\r\n\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Activity ' \\\r\n \"FIELDS TERMINATED BY ',' \" \\\r\n \"IGNORE 1 LINES \" + col +\\\r\n \"Set \" + setting + \"last_completed_at = nullif(REPLACE (@last_completed_at, ' UTC', ''),' '), first_visited_at = nullif(REPLACE(@first_visited_at,' UTC', ''), ' '), university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n elif 'Courses' in datatype:\r\n\r\n col = '(run_id,start_date,no_of_weeks,joiners,leavers,leavers_percent,learners,learners_percent,active_learners,active_learners_percent,returning_learners,returning_learners_percent,social_learners,social_learners_percent,fully_participating_learners,fully_participating_learners_percent,statements_sold,certificates_sold,upgrades_sold,upgrades_sold_percent,learners_with_at_least_50_percent_step_completion,learners_with_at_least_50_percent_step_completion_percent,learners_with_at_least_90_percent_step_completion,learners_with_at_least_90_percent_step_completion_percent,run_retention_index,run_retention_index_percent,course,course_run,run)'\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Courses ' \\\r\n \"FIELDS TERMINATED BY ',' \" \\\r\n \"IGNORE 1 LINES \" + col + \\\r\n \"Set university = \" + \"'\" + uni + \"';\"\r\n\r\n elif 'team-members' in datatype:\r\n col = '(id,first_name,last_name,team_role,user_role)'\r\n\r\n load = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE TeamMembers ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + col +\";\"\r\n\r\n elif 'video-stats' in datatype:\r\n col = '(title,total_views,total_downloads,total_caption_views,total_transcript_views,viewed_hd,viewed_five_percent,viewed_ten_percent,viewed_twentyfive_percent,viewed_fifty_percent,viewed_seventyfive_percent,viewed_ninetyfive_percent,viewed_onehundred_percent,console_device_percentage,desktop_device_percentage,mobile_device_percentage,tv_device_percentage,tablet_device_percentage,unknown_device_percentage,europe_views_percentage,oceania_views_percentage,asia_views_percentage,north_america_views_percentage,south_america_views_percentage,africa_views_percentage,antarctica_views_percentage)'\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE VideoStats ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + col +\\\r\n \"Set university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n \r\n elif 'extract-links' in datatype:\r\n col = '(`Week Number`,`Step Number`,`Step Title`,`Step URL`,`Part`,`Field`,`Link Target`,`Link Caption`)'\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE ExtractLinks ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + col +\\\r\n \"Set university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n elif 'scraped-links' in datatype:\r\n col = '(step_number,step_title,step_type,step_edit_url,step_url)'\r\n load = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE ScrapedLinks ' \\\r\n \"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n 'IGNORE 1 LINES ' + col +\\\r\n \"Set university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n + str(course_run) + \";\"\r\n\r\n cursor.execute(load)\r\n self.__database.commit()\r\n cursor.close()\r\n _file.close()", "title": "" }, { "docid": "fe375dfb1d4c61eaf22fff4af831bc14", "score": "0.578804", "text": "def read_esco_csv(file, only_unigrams, synsets):\n\n if synsets:\n synset_list = list()\n else:\n skills = list()\n\n with open(file, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n next(reader, None) # skip header\n\n for row in reader:\n pref_label = row[4]\n alt_labels = row[5]\n\n synset = set()\n\n if only_unigrams:\n if ' ' not in pref_label:\n synset.add(pref_label)\n else:\n synset.add(pref_label)\n if len(alt_labels) > 0:\n label_list = alt_labels.split('\\n')\n for l in label_list:\n if only_unigrams:\n if ' ' not in l:\n synset.add(l)\n else:\n synset.add(l)\n\n if synsets:\n if len(synset) > 1: # process only synset with more than one member\n synset_list.append(synset)\n else:\n skills.extend(synset)\n\n if synsets:\n return synset_list\n else:\n return skills", "title": "" }, { "docid": "6c5ac772cb26d9850dd0f5d5355dc995", "score": "0.5786417", "text": "def add_exercises_from_csv(self, filename):\n if not os.path.isfile(filename):\n error_msg = \"no such file or directory: '{}'\".format('fake.csv')\n raise IOError(error_msg)\n\n try:\n tmp = []\n with open(filename, 'rb') as f:\n reader = csv.reader(f)\n for row in reader:\n ex = Exercise(row[0])\n tmp.append(ex)\n except:\n raise\n\n self._items += tmp", "title": "" }, { "docid": "b690e903df9eeb7326c810f9908129ba", "score": "0.57780784", "text": "def import_data(\n url=\"\"\"https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv\"\"\",\n data_cols=['data'], index_cols=[0, 3]):\n import pandas as pd\n return pd.read_csv(url, sep=',', parse_dates=data_cols, index_col=index_cols)", "title": "" }, { "docid": "e455c173c1f80b526afba4f9ca27056f", "score": "0.57725334", "text": "def import_csv_command(csvdir):\n import_tables_from_csv_files(csvdir)", "title": "" }, { "docid": "9bb68eb8571f6155c44e7b70533b1553", "score": "0.5768139", "text": "def read_csv():\n local_dir = os.path.dirname(os.path.realpath(__file__))\n csv_filename = os.path.join(local_dir, 'clickbait.csv')\n with open(csv_filename) as f:\n for line in f:\n yield line.strip().replace('\"', '').split(',')", "title": "" }, { "docid": "cabe502d4b35c685ba006f8693a65340", "score": "0.5767473", "text": "def load_csv(path, records):\n try:\n with open(path, encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n print(path + \" opened successfully.\")\n cheese_csv_parser = CheeseCSVParser(csv_reader)\n cheeses = cheese_csv_parser.get_cheeses(records)\n return cheeses\n\n except FileNotFoundError:\n print(\"Cannot find file at the path: \" + path)\n traceback.print_exc(file=sys.stdout)\n\n except IOError:\n print(\"Error opening file: \" + path)\n traceback.print_exc(file=sys.stdout)", "title": "" }, { "docid": "ce41847156fc06c7d0228dcdca59f291", "score": "0.5764453", "text": "def read_csv_c():\n with open('./movies-dataset/movies_metadata.csv', mode='r', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n process_genre(row)\n process_country(row)\n process_name(row)\n process_id(row)\n process_title_id(row)\n\n with open('./movies-dataset/credits.csv', mode='r', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n process_director(row)\n process_cast(row)", "title": "" }, { "docid": "95fbee7418cba5d82d51af36717a9979", "score": "0.5763773", "text": "def import_customers(ctx):\n load_csv(ctx, 'data/demo/customers.csv', 'res.partner')", "title": "" }, { "docid": "ea71a4f00878c8c9cb4eece0de328c7c", "score": "0.57608026", "text": "def load_csv(self, filename):\n try:\n unprocessed_csv = open(filename, \"r\", encoding='utf-8', errors='ignore')\n self.file_content = list(list(row) for row in csv.reader(unprocessed_csv, delimiter=\",\"))\n unprocessed_csv.close()\n except FileNotFoundError:\n print(\"[ERROR]: Could not find specified file. Did you pass a correct path?\")", "title": "" }, { "docid": "e22551129bcf92537b6c67ce02033540", "score": "0.57521033", "text": "def import_distanca_2015(survey, infile):\n\n with open(infile, 'rb') as csvfile:\n datareader = csv.DictReader(csvfile, delimiter=',')\n counter=[]\n for row in datareader:\n ID = row['nombre_pm']\n if ID in survey.plots.keys():\n position = 1\n censo = 1\n if row['name'] =='Distancias':\n # Adding the distance infomation carretera vecinal\n if row.has_key('carreteraCaminoVecinal') and row['carreteraCaminoVecinal'] not in ['', ' ']:\n carreteraVecinal = class_lib.Distance(ID)\n carreteraVecinal.parcela_pm_censo = censo\n carreteraVecinal.distancia_position = position\n carreteraVecinal.distancia_kilometros_unit_name = 'kilometros'\n carreteraVecinal.distancia_categoria = 1\n carreteraVecinal.distancia_camino_estado = '-'\n if ID not in counter:\n counter.append(ID)\n carreteraVecinal.distancia_kilometros = \\\n tools_lib.import_variable(row, 'carreteraCaminoVecinal', 'float', ID)\n\n # Adding the distance infomation camino vecinal\n if row.has_key('caminoVecinalCaminoAcceso') and row['caminoVecinalCaminoAcceso'] not in ['',' ']:\n caminoVecinal = class_lib.Distance(ID)\n caminoVecinal.parcela_pm_censo = censo\n caminoVecinal.distancia_position = position\n caminoVecinal.distancia_kilometros_unit_name = 'kilometros'\n caminoVecinal.distancia_categoria = 2\n caminoVecinal.distancia_camino_estado = '-'\n if ID not in counter:\n counter.append(ID)\n caminoVecinal.distancia_kilometros =\\\n tools_lib.import_variable(row, 'caminoVecinalCaminoAcceso', 'float', ID)\n\n # Adding the distance infomation camino accesso\n if row.has_key('caminoAccesoPuntoGPS') and row['caminoAccesoPuntoGPS'] not in ['', ' ']:\n caminoAccesso = class_lib.Distance(ID)\n caminoAccesso.parcela_pm_censo = censo\n caminoAccesso.distancia_position = position\n caminoAccesso.distancia_kilometros_unit_name = 'kilometros'\n caminoAccesso.distancia_categoria = 3\n caminoAccesso.distancia_camino_estado = '-'\n if ID not in counter:\n counter.append(ID)\n caminoAccesso.distancia_kilometros = \\\n tools_lib.import_variable(row, 'caminoAccesoPuntoGPS', 'float', ID)\n\n # Adding the distance infomation rumboCaminoCentroParcela\n if row.has_key('rumboCaminoCentroParcela') and row['rumboCaminoCentroParcela'] not in ['',' ']:\n puntoGPSCentroParcella = class_lib.Distance(ID)\n puntoGPSCentroParcella.parcela_pm_censo = censo\n puntoGPSCentroParcella.distancia_position = position\n puntoGPSCentroParcella.distancia_categoria = 4\n puntoGPSCentroParcella.distancia_kilometros_unit_name = 'kilometros'\n puntoGPSCentroParcella.distancia_camino_estado = '-'\n if ID not in counter:\n counter.append(ID)\n puntoGPSCentroParcella.rumbo_punto_gps_centro = \\\n tools_lib.import_variable(row, 'rumboCaminoCentroParcela', 'int', ID)\n\n # Adding the distance infomation PuntoGPSCentroParcella\n if row.has_key('puntoGPSCentroParcela') and row['puntoGPSCentroParcela'] not in ['',' ']:\n puntoGPSCentroParcella.distancia_kilometros =\\\n tools_lib.import_variable(row, 'puntoGPSCentroParcela', 'float', ID)\n\n # Adding the distance instances to the survey\n try:\n survey.plots[ID].distances['1'] = carreteraVecinal\n except:\n warn_msg = 'Could not find information on distance \"carreteraVecinal\" on plot: {plotid}.' \\\n .format(plotid=ID)\n logging.warning(warn_msg)\n try:\n survey.plots[ID].distances['2'] = caminoVecinal\n except:\n warn_msg = 'Could not find information on distance \"caminoVecinal\" on plot: {plotid}.' \\\n .format(plotid=ID)\n logging.warning(warn_msg)\n try:\n survey.plots[ID].distances['3'] = caminoAccesso\n except:\n warn_msg = 'Could not find information on distance \"caminoAcceso\" on plot: {plotid}.' \\\n .format(plotid=ID)\n logging.warning(warn_msg)\n try:\n survey.plots[ID].distances['4'] = puntoGPSCentroParcella\n except:\n warn_msg = 'Could not find information on distance \"puntoGPSCentroParcella\" on plot: {plotid}.' \\\n .format(plotid=ID)\n logging.warning(warn_msg)\n\n info_msg = \"Updated the distance table for {nplots} plots from the file: {file}\" \\\n .format(nplots=counter.__len__(), file=os.path.basename(infile))\n print(info_msg)\n logging.info(info_msg)", "title": "" }, { "docid": "0c453c0a1623c5e4411172bebcad6975", "score": "0.5751306", "text": "def loadCategory(catalog):\n categoryfile = cf.data_dir + 'videos/category-id.csv'\n input_file = csv.DictReader(open(categoryfile, encoding='utf-8'))\n for category in input_file:\n model.addCategory(catalog, category)", "title": "" }, { "docid": "d02be274baa9aa211f560440bc5f329b", "score": "0.57416385", "text": "def read_movie(filename):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n result = []\n for item in reader:\n if reader.line_num == 1:\n continue\n result.append(item)\n\n for i in result:\n try:\n movie = Movie(*i)\n except TypeError as e:\n print(e)\n else:\n db.session.add(movie)\n finally:\n db.session.commit()", "title": "" }, { "docid": "76f67b3b8dc53f4b6e95f37b7c7e2ff7", "score": "0.57348394", "text": "def load_csv(self, csv_path, id_column_name, graph_name=None):\n\n if id_column_name is None:\n raise Exception(\"id_column_name must not be None\")\n graph_name = self.graph_name if graph_name is None else graph_name\n self.cog.load_csv(csv_path, id_column_name, graph_name)\n self.all_predicates = self.cog.list_tables()", "title": "" }, { "docid": "67eccf47d282552390bcbbe6605d878d", "score": "0.57340604", "text": "def import_data_from_csv(file_path):\n\n try:\n with open(file_path) as csv_file:\n reader = csv.reader(csv_file, delimiter=',')\n\n # Init dictionaries\n movies, actors, writers, directors, locations = import_data_from_database()\n\n # FIXME : test header !\n header = next(reader)\n if header[0] != 'Title' or header[1] != 'Release Year':\n return \"Bad File..\"\n\n for row in reader:\n\n # Read CSV line\n name = row[0].strip()\n location = row[2]\n fun_facts = row[3]\n\n # Movie already exists create new location\n if name in movies:\n if '' != location:\n new_location = Location(location, fun_facts, movies[name])\n db.session.add(new_location)\n continue\n\n # Read more information from csv line about movie\n release_year = row[1]\n production = row[4]\n distributor = row[5]\n director = row[6]\n writer = row[7]\n movie_actors = [row[8], row[9], row[10]]\n\n # Create a new Movie\n movie = Movie(name, release_year, production, distributor)\n\n # Add director\n if '' != director:\n if director not in directors:\n director = Director(director)\n db.session.add(director)\n db.session.flush()\n\n # Save director id in local dictionary\n directors[director.name] = director.id\n\n # add director_id to movie\n movie.add_director(director.id)\n else:\n movie.add_director(directors[director])\n\n # Add writer\n if '' != writer:\n if writer not in writers:\n writer = Writer(writer)\n db.session.add(writer)\n db.session.flush()\n\n # Save director information\n writers[writer.name] = writer.id\n\n # add director_id to movie\n movie.add_writer(writer.id)\n else:\n movie.add_writer(writers[writer])\n\n # Add Actors\n for actor_name in movie_actors:\n if actor_name != '':\n if actor_name not in actors:\n actor = Actor(actor_name)\n db.session.add(actor)\n db.session.flush()\n\n # Save director information\n actors[actor_name] = actor.id\n\n # add actor to movie\n movie.add_actor(actor)\n else:\n movie.add_actor(actor_name)\n\n # Add Movie in DB\n db.session.add(movie)\n db.session.flush()\n\n # Store movie id in local dictionary\n movies[name] = movie.id\n\n # Create new Location, if not empty and does not exist\n if '' != location:\n if (location, movie.id) not in locations:\n new_location = Location(location, fun_facts, movie.id)\n db.session.add(new_location)\n db.session.flush()\n\n locations[(location, movie.id)] = new_location.id\n\n # Commit imported data\n db.session.commit()\n\n except FileNotFoundError:\n print(\"File : `\" + file_path + '` not found')", "title": "" }, { "docid": "5c4f1bc5dabbf7d12edd1ea800d02b03", "score": "0.5714591", "text": "def _import_from_csv(self, auth_token, csv_data, manager, required_fields,\n custom_row_function=None, special_fields=None, interactive=False):\n\n\n\n # the True value makes it leave the \\n character on the ends of lines\n lines = csv_data.text.splitlines(True)\n csv_data = None # Garbage collection?\n reader = Utils.unicode_csv_reader(lines)\n keys = [] # Store primary keys of newly created venues here\n line_num = 1\n exception_details = {} # If we get exceptions, add a new entry to this\n # dictionary for each line.\n for row in reader:\n line_num += 1\n try:\n if custom_row_function is not None:\n row = custom_row_function(row)\n self._form_create_dict(auth_token, row, required_fields,\n special_fields)\n v = manager.create(**row)\n keys.append(v.id)\n if interactive:\n sys.stderr.write('.')\n except ValueError, ve:\n exception_details[line_num] = u'ValueError: %s' % unicode(ve)\n if interactive:\n sys.stderr.write('E')\n print >> sys.stderr, \"\\nline %d: [%s]\" % (line_num,\n exception_details[line_num])\n print >> sys.stderr, '\\n', row, '\\n'\n except exceptions.InvalidDataException, ide:\n exception_details[line_num] = unicode(ide)\n if interactive:\n sys.stderr.write('E')\n print >> sys.stderr, \"\\nline %d: [%s]\" % (line_num,\n exception_details[line_num])\n print >> sys.stderr, '\\n', row, '\\n'\n except facade.models.ModelDataValidationError, ve:\n exception_details[line_num] = u'ValidationError: %s' % unicode(ve)\n if interactive:\n sys.stderr.write('E')\n print >> sys.stderr, \"\\nline %d: [%s]\" % (line_num,\n exception_details[line_num])\n print >> sys.stderr, '\\n', row, '\\n'\n except Exception, e:\n exception_details[line_num] = unicode(e)\n if interactive:\n sys.stderr.write('E')\n print >> sys.stderr, \"\\nline %d: [%s]\" % (line_num,\n exception_details[line_num])\n print >> sys.stderr, '\\n', row, '\\n'\n\n # If we had any exceptions, let's tell the user about them now.\n if len(exception_details):\n ide = exceptions.InvalidDataException('%d items could not be imported' % \\\n len(exception_details))\n ide.details.update(exception_details)\n raise ide\n\n return keys", "title": "" }, { "docid": "5a019830dba2ea92358eb7ddf490f2c0", "score": "0.57117856", "text": "def from_csv(self, fin):\n self.data = pd.read_csv(fin)", "title": "" }, { "docid": "8dfa937d84ead38baa01b4a1161f8900", "score": "0.5711724", "text": "def import_feature_csvs(corpus_context, typed_data):\n string_set_template = 'n.{name} = csvLine.{name}'\n float_set_template = 'n.{name} = toFloat(csvLine.{name})'\n int_set_template = 'n.{name} = toInt(csvLine.{name})'\n bool_set_template = '''n.{name} = (CASE WHEN csvLine.{name} = 'False' THEN false ELSE true END)'''\n properties = []\n for h, v in typed_data.items():\n if v == int:\n template = int_set_template\n elif v == bool:\n template = bool_set_template\n elif v == float:\n template = float_set_template\n else:\n template = string_set_template\n properties.append(template.format(name=h))\n properties = ',\\n'.join(properties)\n directory = corpus_context.config.temporary_directory('csv')\n path = os.path.join(directory, 'feature_import.csv')\n\n # If on the Docker version, the files live in /site/proj\n if os.path.exists('/site/proj') and not path.startswith('/site/proj'):\n feat_path = 'file:///site/proj/{}'.format(make_path_safe(path))\n else:\n feat_path = 'file:///{}'.format(make_path_safe(path))\n \n import_statement = '''CYPHER planner=rule\n LOAD CSV WITH HEADERS FROM \"{path}\" AS csvLine\n MATCH (n:{phone_type}_type:{corpus_name}) where n.label = csvLine.label\n SET {new_properties}'''\n\n statement = import_statement.format(path=feat_path,\n corpus_name=corpus_context.cypher_safe_name,\n phone_type=corpus_context.phone_name,\n new_properties=properties)\n corpus_context.execute_cypher(statement)\n for h, v in typed_data.items():\n corpus_context.execute_cypher('CREATE INDEX ON :%s(%s)' % (corpus_context.phone_name, h))\n # os.remove(path) # FIXME Neo4j 2.3 does not release files", "title": "" }, { "docid": "fcb26ee48867c8e82539de04c4045500", "score": "0.57114464", "text": "def load_csv(self, csv_file: CSVFile) -> None:\n if isinstance(csv_file, str):\n csv_file = Path(csv_file)\n if isinstance(csv_file, Path):\n csv_file = open(csv_file)\n reader = csv.reader(csv_file)\n with self.connection as conn:\n conn.executemany(\n 'INSERT INTO landholders VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n (parse_row(row) for row in reader)\n )\n conn.execute(\"INSERT INTO fts_landholders(fts_landholders) VALUES ('rebuild')\")", "title": "" }, { "docid": "918e088a8f43b32dc534054ed794167b", "score": "0.57098633", "text": "def load_sections_file():\n print(\"load_sections_file. Start..\")\n file_path = settings.BASE_DIR + \"/files/sections.csv\"\n for index_card in IndexCard.objects.all():\n data_reader = csv.reader(open(file_path), delimiter=\",\", quotechar='\"')\n for row in data_reader:\n # read data from line\n code = row[0].strip()\n name = row[1].strip()\n # create section entry\n section = Section()\n section.name = name\n section.code = code\n section.index_card = index_card\n try:\n section.save()\n except:\n print(\"Saving sections: \" + row[1].strip())\n print(\"Unexpected error:\", sys.exc_info())\n print(\"load_sections_file. End....\")", "title": "" }, { "docid": "e00e0746316703ee03c3be585575eb4c", "score": "0.5708524", "text": "def load_train_companies(companies_csv):\n with open(companies_csv, 'rb') as f: \n rows = [row for row in csv.reader(f, delimiter=',')]\n return [sanitize_text(row[1]) for row in rows], [sanitize_text(row[2]) for row in rows]", "title": "" }, { "docid": "9ab86fd4f7335da870c21889fb62ff82", "score": "0.56965303", "text": "def load_sections_from_csv(contents):\n if not check_sections_csv(contents):\n raise TypeError(\"Missing necessary columns\")\n\n not_added = set()\n for entry in contents:\n section = Section.lookup_by_section_id(entry['section_id'])\n if section is None:\n user = User.lookup_by_sid(entry['instructor_id'])\n if user is None:\n not_added.add(entry['instructor_id'])\n else:\n logger.info(\"CALLING(load_sections_from_csv) creating section \"\n + entry['section_id']\n )\n date_rule = generate_rrule(entry['start_date'], entry['start_time'])\n section = Section(section_id=entry['section_id'],\n section_type=entry['section_type'],\n instructor_id=user.id,\n date_rule=date_rule,\n location=entry['location']\n )\n db.session.add(section)\n if len(not_added) > 0:\n logger.warning(\"CALLING(load_sections_from_csv) missing instructors \" + not_added)\n raise TypeError(\"Instructors do not have an account! \" + not_added)", "title": "" }, { "docid": "74803bdfac47e5e53e60001779b2c485", "score": "0.569518", "text": "def import_csv(self, uri, year, brin6s):\n # TODO: Check encoding='cp1252' with source.\n df = pandas.read_csv(\n uri,\n delimiter=';',\n dtype=_SCHOOLADVIEZEN_CSV_COLUMNS,\n encoding='cp1252'\n )\n mask = df['GEMEENTENUMMER'] == 363 # only Amsterdam is relevant\n\n # Deal with rename of school types between 2013 and 2014:\n for column_name in df.columns:\n if column_name in SCHOOL_TYPE_MAPPING:\n df[SCHOOL_TYPE_MAPPING[column_name]] = df[column_name]\n\n # Cretae model instances and save them to database:\n instances = []\n for i, row in df[mask].iterrows():\n brin6 = row['BRIN_NUMMER'] + '{:02d}'.format(int(row['VESTIGINGSNUMMER']))\n # TODO: check whether this information is needed\n # peildatum_adviezen = datetime.strptime(\n # str(row['PEILDATUM_ADVIEZEN']), '%Y%m%d')\n # peildatum_leerlingen = datetime.strptime(\n # row['PEILDATUM_LEERLINGEN'], '%Y%m%d')\n\n # Make an SchoolAdvies entry for each type of school.\n for school_type in SCHOOL_TYPES:\n obj = SchoolAdvies(\n brin=row['BRIN_NUMMER'],\n vestigingsnummer=row['VESTIGINGSNUMMER'],\n\n advies_id=school_type,\n totaal=row[school_type],\n jaar=year,\n )\n obj.vestiging_id = brin6 if brin6 in brin6s else None\n instances.append(obj)\n\n self.bulk_create(instances)", "title": "" }, { "docid": "28824ff9729560548dc52f4eff3df6e0", "score": "0.56848156", "text": "def load_data(csvfile):\n\n firstnames = []\n lastnames = []\n usernames = []\n roles = []\n passwords = []\n\n # read csv file with user data\n with open(csvfile) as f:\n csv_reader = csv.reader(f, delimiter=',')\n count = 0\n for row in csv_reader:\n if count == 0: # ignores headers\n pass\n else:\n firstnames.append(row[0])\n lastnames.append(row[1])\n usernames.append(row[2])\n roles.append(row[3])\n passwords.append(row[4])\n count += 1\n\n # adds every user in csv file to db\n for i in range(len(firstnames)):\n role = Role.query.filter_by(name=roles[i]).first()\n user = User(firstname=firstnames[i], lastname=lastnames[i], username=usernames[i], password=passwords[i])\n user.roles.append(role)\n db.session.add(user)\n db.session.commit()", "title": "" }, { "docid": "6212f9f68e6bab83f3094c0c166cc098", "score": "0.56795734", "text": "def ingest_csv_file(self, ingest_file_path, tablename):\n warnings.filterwarnings('ignore', category=MySQLdb.Warning)\n query = \"LOAD DATA LOCAL INFILE '\" + ingest_file_path + \"' INTO TABLE \" + tablename + \" \" \\\n \"CHARACTER SET UTF8 FIELDS TERMINATED BY ',' ENCLOSED BY '\\\"' ESCAPED BY '\\' LINES TERMINATED BY '\\\\r\\\\n' IGNORE 1 LINES\"\n\n cursor = self.sql_ecrm_conn.cursor()\n cursor.execute(query)\n warnings.filterwarnings('always', category=MySQLdb.Warning)\n cursor.close()\n self.sql_ecrm_conn.commit()\n print \"Ingested \" + ingest_file_path + \" into \" + tablename\n\n pass", "title": "" }, { "docid": "be2ca4fc71ef9e223c1a4c0be310d79c", "score": "0.56619227", "text": "def importCsv(self, path):\n csv= CsvReader( logStream= self.config.logStream,\n verbose=self.config.verbose,\n debug=self.config.debug)\n\n self.log(\"\\nParsing input file '{0}' ...\\n\".format(path))\n self._currentFile= os.path.basename(path) \n \n try:\n retval= csv( path,\n delimiter=self.config.separator,\n encoding=self.config.encoding,\n dateformat=self.config.date_format,\n timeformat=self.config.time_format,\n mergeTowflights=True )\n \n except Exception as ex:\n self.error(\"{0}\\n\".format(ex))\n \n if self.config.debug:\n print_exc() \n \n self.log(\"-> Imported {0} records\\n\".format( len(retval) ))\n \n return retval", "title": "" }, { "docid": "440dd2851d6ee19eb199ae3a7746a2af", "score": "0.56545365", "text": "def read_data(csv_file, db):\n with open(csv_file, encoding='utf-8') as file_csv:\n \n reader = csv.DictReader(file_csv, delimiter=',')\n list_of_artists = list(reader)\n list_for_db = []\n\n for i, artist in enumerate(list_of_artists, 1):\n\n each_event = {'_id': i,\n 'Artist': artist['ะ˜ัะฟะพะปะฝะธั‚ะตะปัŒ'],\n 'Price': int(artist['ะฆะตะฝะฐ']),\n 'Place': artist['ะœะตัั‚ะพ'],\n 'Date': dt.datetime.strptime('2020 ' + artist['ะ”ะฐั‚ะฐ'], '%Y %d.%m')}\n\n list_for_db.append(each_event)\n\n db.insert_many(list_for_db)\n\n return \"\\nThe data was successfully imported.\"", "title": "" }, { "docid": "a422b1097a02c0ed833039edbb3d7152", "score": "0.5612824", "text": "def import_to_ak(self, csv_file):\n url = self.settings.AK_API_BASE_URL + 'upload/'\n result = requests.post(url,\n files={'upload': StringIO(csv_file.getvalue())},\n data={'page': self.settings.AK_IMPORT_PAGE, 'autocreate_user_fields': 0},\n auth=(self.settings.AK_USER, self.settings.AK_PASSWORD)\n )\n return result", "title": "" }, { "docid": "ae47c1edca8412e4ec0865fe6a488f81", "score": "0.561274", "text": "def importCSV(self):\r\n try:\r\n fileName = mod.openFileLocation(self)\r\n self.table = mod.fileUpload(fileName)\r\n self.csvTable.setSortingEnabled(True)\r\n # self.csvTable.setHorizontalHeaderLabels()\r\n self.csvTable.setModel(PandasModel(self.table))\r\n self.csvTable.resizeColumnsToContents()\r\n self.csvTable.show()\r\n\r\n # clears comboboxes to avoid repeat values when another CSV is imported\r\n self.xBox.clear()\r\n self.yBox.clear()\r\n self.plotBox.clear()\r\n\r\n # populate combobox values\r\n colVal = 0\r\n self.plotBox.addItem(\"None\")\r\n for i in self.table.columns:\r\n colName = str(colVal) + \". \" + i\r\n self.xBox.addItem(colName)\r\n self.yBox.addItem(colName)\r\n self.plotBox.addItem(colName)\r\n colVal += 1\r\n\r\n except Exception as e:\r\n # Display error message\r\n mod.errorGUI(str(e))", "title": "" }, { "docid": "f0a2b5f59e5b69a8606f5d0e91de9007", "score": "0.5607616", "text": "def load_label_csv():\n\n grapheme_train = pd.read_csv(Path(PATH_DATA_RAW) / \"train.csv\")\n return grapheme_train", "title": "" }, { "docid": "74a328ceb63f834e6c35013eb9f3bf43", "score": "0.5598666", "text": "def load_csv_sparse(self,filename, delimiter=',', skiprows=None):\n print(\"Note: load_csv_sparse expects a csv in the format of: rowID, colID, Value, ...\")\n u, m, r = np.loadtxt(filename, delimiter=delimiter, skiprows=skiprows, usecols=(0,1,2)).T\n \n print(\"u\",type(u))\n print(\"m\",type(m))\n print(\"r\",r)\n print(\"COO :\", coo_matrix((r, (u-1, m-1)), shape=(u.max(), m.max())))\n self.mat = coo_matrix((r, (u-1, m-1)), shape=(u.max(), m.max())).tocsr()\n print(\"Created matrix of shape: \",self.mat.shape)", "title": "" }, { "docid": "772e0f351e575cc72be7540deecbabf5", "score": "0.5598014", "text": "def load_dataset_and_make_vectorizer(cls, news_csv):\n news_df = pd.read_csv(news_csv)\n train_news_df = news_df[news_df.split=='train']\n return cls(news_df, NewsVectorizer.from_dataframe(train_news_df))", "title": "" }, { "docid": "2c52a1aa748a61aacbe0e88e6f0d5d3f", "score": "0.5583382", "text": "def csv_pull():\n with open('inventory.csv', newline='') as csvfile:\n info = csv.DictReader(csvfile)\n rows = list(info)\n for i in rows:\n try:\n Product.create(product_name=i['product_name'],\n product_price=int(\n round(float(i['product_price'].replace('$', '')) * 100)),\n product_quantity=int(i['product_quantity']),\n date_updated=datetime.datetime.today()\n )\n except IntegrityError:\n inv_item = Product.get(product_name=i['product_name'])\n inv_item.product_price = int(\n round(float(i['product_price'].replace('$', '')) * 100))\n inv_item.product_quantity = int(i['product_quantity'])\n inv_item.date_updated = datetime.datetime.today()\n inv_item.save()", "title": "" }, { "docid": "5bc884e1e4417c49282ef2d7218d643a", "score": "0.557561", "text": "def import_players():\r\n\r\n try:\r\n for line in open(r\"Players.csv\"):\r\n data = line.strip().split(\",\")\r\n PLAYERS[data[0]] = Player(data[1], data[2], data[3], data[4],\r\n data[5])\r\n except IOError:\r\n pass", "title": "" }, { "docid": "979a12604a72291610a1631a873b395d", "score": "0.55688965", "text": "def read_csv(self, file: str, **kwargs):\n self.decisions.read_csv(file, **kwargs)", "title": "" }, { "docid": "74a5e2b3d8711fc71059a6f7bfe8b415", "score": "0.5561837", "text": "def csv_load(self, csv_file, delimiter):\n\n self._path_check(csv_file)\n\n try:\n with open(csv_file, 'r') as data_file:\n reader = csv.reader(data_file, delimiter=delimiter)\n for line in reader:\n self.row_list.append(line)\n\n except csv.Error:\n logger.error(\"The file provided({})\"\\\n \" is not a file with values \"\\\n \"separated by {}.\".format(csv_file, delimiter))\n\n except (IOError, OSError):\n logger.error(\"Error while trying to read the file: \"\\\n \"{}\".format(csv_file))", "title": "" }, { "docid": "baf04ecec194e6a6f55cc13646df62bf", "score": "0.5559499", "text": "def load_data_from_csv(csv_file):\n pass\n\n with open(csv_file) as myCSV:\n reader = csv.reader(myCSV)\n header = next(reader)\n myData = []\n for line in reader:\n if (len(line)>0):\n myData.append([line[0], int(line[1]), int(line[2])])\n #myData.append(line)\n \n return myData", "title": "" }, { "docid": "a2c31c11e4e967c0265896e72624cc17", "score": "0.5554057", "text": "def read_student_data():\n with open('Students.csv', 'r', newline='', encoding='UTF-8') as stud_file:\n students = []\n \n reader = csv.reader(stud_file)\n next(reader) # Header\n for row in reader:\n name, gender, course_name, teacher, ects, classroom, grade, image = row\n if grade != '':\n grade = int(grade)\n else:\n grade = None\n\n course = Course(course_name, classroom, teacher, int(ects), grade)\n\n if students and students[-1].name == name:\n students[-1].data_sheet.courses.append(course)\n else:\n data = DataSheet([course])\n student = Student(name, gender, data, image)\n students.append(student)\n \n students.sort(key=lambda x: x.get_avg_grade())\n \n #for s in student:\n # student_avg = {s.name, s.get_avg_grade()}\n \n return students", "title": "" }, { "docid": "0e30e6c2775ba728153224e5b610bbfd", "score": "0.5553209", "text": "def from_csv(\n cls,\n path: str = None,\n separator: str = ';',\n newline: str = '\\n',\n encoding: str = 'cp1252'):\n if path is None:\n path = f'data/import/{getattr(cls, \"__tablename__\")}.csv'\n\n file_data = cls.load_csv(path, separator, newline, encoding)\n for model_data in file_data:\n cls.update_or_create(**model_data)\n db.session.commit()", "title": "" }, { "docid": "af3c14e5193528aa36b2aa48ba447562", "score": "0.55485153", "text": "def load_from_csv(self):\n logger.info('Parsing raw IBTrACS CSV file...')\n self.storms.clear()\n with open(os.path.join(self.datadir, 'ibtracs.csv')) as f:\n # Group lines with the same storm ID. Construct Storm objects\n # on the fly to avoid loading the entire file at once\n stormlines = []\n linenum = 0\n # Skip first two header lines\n for i in range(3):\n line = f.readline()\n linenum += 1\n oldID = None\n while True:\n print(f'Parsing line {linenum}', end='\\r')\n line = line.strip()\n if len(line) == 0:\n continue\n fields = [field.strip() for field in line.split(',')]\n ID = fields[0]\n # If ID is the same as the last line, add to this storm's set\n if ID == oldID:\n stormlines.append(line.strip())\n # If ID is different than before, then this is a new storm\n else:\n if stormlines:\n tc = Storm(stormlines)\n if len(tc.time) > 0:\n self.storms.append(tc)\n stormlines = [line.strip()]\n oldID = ID\n line = f.readline()\n # End of file\n if line == '':\n if stormlines:\n tc = Storm(stormlines)\n if len(tc.time) > 0:\n self.storms.append(tc)\n break\n linenum += 1\n print()\n self.resolve_duplicates()", "title": "" }, { "docid": "aef6179ba21f7e76d956dfb679ced209", "score": "0.5546519", "text": "def getCSV():\n file = askopenfile(parent=root, mode=\"r\", title=\"Choose a file\", filetype=[(\"Csv file\", \"*.csv\")])\n if file is not None:\n headers = [] # empty list for headers\n csv_read = csv.reader(file)\n headers = next(csv_read) # input headers to the empty list\n if len(headers) > 2: # ONLY USED FOR READING LIFE GENERATING CSV\n readLifeCSV(csv_read)\n for row in csv_read: # iterate through each row and word within the csv file\n for words in row:\n separate = words.split(\";\")\n result = search_wiki(separate[0], separate[1]) # perform search", "title": "" }, { "docid": "490ba28f22430630105079ef8745198d", "score": "0.55457973", "text": "def test_csv_to_neo_load():\n input_args1 = {\n 'filename': [\n os.path.join(RESOURCE_DIR, 'cm_nodes.csv'),\n os.path.join(RESOURCE_DIR, 'cm_edges.csv'),\n ],\n 'format': 'csv',\n }\n t1 = Transformer()\n t1.transform(input_args1)\n\n output_args = {\n 'uri': DEFAULT_NEO4J_URL,\n 'username': DEFAULT_NEO4J_USERNAME,\n 'password': DEFAULT_NEO4J_PASSWORD,\n 'format': 'neo4j',\n }\n t1.save(output_args)", "title": "" }, { "docid": "7a1921abb0b43cb3fafbca637053db8b", "score": "0.55406576", "text": "def load_from_file_csv(cls):\n class_name = cls.__name__\n my_file = class_name + \".csv\"\n if os.path.isfile(my_file) is False:\n return []\n else:\n my_list = []\n with open(my_file, mode=\"r\", encoding=\"utf-8\") as read_file:\n reader = csv.DictReader(read_file, skipinitialspace=True)\n l_d = [{k: int(v) for k, v in row.items()} for row in reader]\n for dic in l_d:\n my_list.append(cls.create(**dic))\n return my_list", "title": "" }, { "docid": "66ebbf532c786877b0bfd7377e755145", "score": "0.5532867", "text": "def ImportCSV():\n with open(CSV_FILE, 'rb') as csvfile:\n fantasy_list = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in fantasy_list:\n POSITIONS.setdefault(row[0], row[1])\n POINTS.setdefault(row[0], int(row[3]))\n if POSITIONS[row[0]] == 'qb':\n QB_PRICES.setdefault(row[0], (row[0], int(row[2])))\n else:\n PRICES.append((row[0], int(row[2])))", "title": "" }, { "docid": "74846bce6cb57bc09b53764e679c69fa", "score": "0.5529652", "text": "def __init__(self, csv_path):\n self.csv_path = csv_path", "title": "" }, { "docid": "1bd6361ae9e3324cac3a448aadd32391", "score": "0.549749", "text": "def csv_loader(self,load_file_path,import_cols,col_dict,table_name):\n \n load_df = pd.read_csv(load_file_path, usecols = import_cols)\n\n load_df.rename(columns=col_dict, inplace=True)\n \n self.df_to_db(table_name, load_df)", "title": "" }, { "docid": "4e873b2688ae1a46c67b3fb0799aeba3", "score": "0.5489311", "text": "def load_data(session, filepath):\n \n with open(filepath, encoding = 'utf8') as f:\n csvreader = csv.reader(f)\n next(csvreader) # skip header\n for line in csvreader:\n # load session data\n session.execute(session_table_insert, (line[0], line[9], float(line[5]), int(line[8]), int(line[3])))\n # load session playlist data\n session.execute(session_playlist_table_insert, (line[0], line[9], line[1], line[4], int(line[10]), int(line[8]), int(line[3])))\n # load song data\n session.execute(song_table_insert, (line[1], line[4], line[9], int(line[10])))", "title": "" }, { "docid": "8bdac13bb204c44e782faf11cf6a9e69", "score": "0.5479327", "text": "def read_csv_file(perform):\n if perform == 'train':\n file_name = 'data_set_train.csv'\n else:\n file_name = 'data_set_predict.csv'\n with open(file_name, 'r') as read_obj:\n csv_reader = reader(read_obj)\n data = list(csv_reader)\n return data", "title": "" }, { "docid": "4539b1906eced038c706286557b31812", "score": "0.5468457", "text": "def get_titanic(titanic_csv: str=\"titanic.csv\"):\n return read_csv(titanic_csv)", "title": "" }, { "docid": "9e7918cc677d3bb04e5ea6d7fff7cebb", "score": "0.54682386", "text": "def load_coref_csv(self):\n\n self.coref_fic = pd.read_csv(os.path.join(self.coref_output_dirpath, f'{self.fandom_fname}.coref.csv'))", "title": "" }, { "docid": "7db16ff246a6638f2252c446a6018b13", "score": "0.5463037", "text": "def add_csv_file(url):\n csv_file = CSVFile.objects.get_or_create(url=url)[0]\n # Save to generate an ID\n csv_file.save()\n # Load and store the images contained in the CSV file\n csv_file.load_csv()", "title": "" }, { "docid": "e3204e30404a2398a2d6bd28b51153ce", "score": "0.5458506", "text": "def read_csv():\r\n with open('inventory.csv', newline='') as csvfile:\r\n productreader = csv.DictReader(csvfile)\r\n rows = list(productreader)\r\n for row in rows:\r\n row['product_price'] = int(row['product_price'].replace('$', '').replace('.', ''))\r\n row['product_quantity'] = int(row['product_quantity'])\r\n row['date_updated'] = (datetime.datetime.strptime(row['date_updated'], '%m/%d/%Y').date())\r\n try:\r\n Product.create(\r\n product_name=row['product_name'],\r\n product_quantity=row['product_quantity'],\r\n product_price=row['product_price'],\r\n date_updated=row['date_updated']\r\n ).save()\r\n except IntegrityError:\r\n updated = Product.get(product_name=row['product_name'])\r\n updated.product_name = row['product_name']\r\n if updated.date_updated < row['date_updated']:\r\n updated.product_quantity = row['product_quantity']\r\n updated.product_price = row['product_price']\r\n updated.date_updated = row['date_updated']\r\n updated.save()\r\n else:\r\n updated.product_quantity = updated.product_quantity\r\n updated.product_price = updated.product_price\r\n updated.date_updated = updated.date_updated\r\n updated.save()", "title": "" }, { "docid": "620b0eaee2957583ff091db1cb1315d3", "score": "0.54562414", "text": "def read_digivol_csv(filename):\n \n dv = pd.read_csv('data/Project-1536729-DwC.csv')\n t = [x.replace('\\r\\n', '\\n') for x in dv['occurrenceRemarks']]\n dv['text'] = t\n dv.drop('occurrenceRemarks', axis=1, inplace=True)\n return dv", "title": "" }, { "docid": "db7b345852828b5e32d7cdaa7411514f", "score": "0.54549754", "text": "def import_csv(cls, *args, **kwargs):\n # add some defaults\n kwargs.setdefault('sep', '\\t')\n kwargs.setdefault('comment', '#')\n # Now import the dataframe and convert to class cls\n df = pandas.read_csv(*args,**kwargs)\n problem_check = cls(df)\n # store path\n problem_check.from_path = args[0]\n # Get metadata\n problem_check._import_metadata(problem_check.from_path)\n \n # Fix complex numbers ...\n # Pandas does not import complex numbers properly from csv. This is a reported issue: https://github.com/pydata/pandas/issues/9379\n for eval_col in problem_check._get_eval_columns():\n if problem_check.data[eval_col].dtype==object: \n df[eval_col] = df[eval_col].apply(complex)\n \n return problem_check", "title": "" }, { "docid": "40206661294cc03b18fd10050bd9f39c", "score": "0.5453356", "text": "def load_dataset(file):\n return pd.read_csv(file)", "title": "" }, { "docid": "52b3acc87c5a4e9868609c3e448b9655", "score": "0.54455996", "text": "def import_csv(input_file_name):\n new_file = pandas.read_csv(input_file_name, index_col=0)\n # sorted_by_percentage = new_file.sort_values([\"%\"], ascending=False)\n return new_file", "title": "" }, { "docid": "c97ccac620b91a688853fb7254675d9a", "score": "0.5443258", "text": "def load_from_file_csv(cls):\n filename = cls.__name__ + '.csv'\n list_objs = []\n if not os.path.isfile(filename):\n return list_objs\n with open(filename, mode='r', encoding='utf-8') as file:\n list_dicts = cls.from_json_string(file.read())\n for d in list_dicts:\n list_objs.append(cls.create(**d))\n return list_objs", "title": "" }, { "docid": "bce0fb80b158dc99a25590e311228a1d", "score": "0.5439366", "text": "def load_trainstops():\n return load_pointsfile('train_stops.csv')", "title": "" }, { "docid": "f8ff55a4a1c505a7a29744d901a87657", "score": "0.5435377", "text": "def load_csv(csv_file, header=False):\n with open(csv_file, 'rb') as f: \n reader = csv.reader(f, delimiter=',')\n if header: \n next(reader) # if the csv has a header, skip it\n for row in reader:\n # yield each row in the csv file one at a time\n yield row", "title": "" }, { "docid": "d329c7d8332e959c4da19eb3a741874d", "score": "0.54325956", "text": "def open_and_read_in_csv_file(self):\n\n try:\n with open(self.filename, newline='') as csv_file:\n csv_file_reader = csv.reader(csv_file)\n for item in csv_file_reader:\n self.csv_file_in_list_format.append(item)\n self.write_log_to_text_box(\"csv file successfully read. \\n\\n\", True)\n except UnicodeDecodeError:\n self.write_log_to_text_box(\"This file is not being recognized as a csv file. \\n\\n\", True)", "title": "" }, { "docid": "20d5df1fe280a5fd7addec9aaf5c1e43", "score": "0.5428745", "text": "def student_csv(lastname):\n filename = lastname + '.csv'\n filepath = os.path.join('..', 'forecast_entries', filename)\n print(filepath)\n file_df = pd.read_csv(filepath, index_col='Forecast #')\n return file_df", "title": "" }, { "docid": "87b86ece3e321a74b217a5a34acd3301", "score": "0.54264426", "text": "def load_neos(neo_csv_path):\n\n neos = []\n\n with open(neo_csv_path) as f:\n csv_data = csv.DictReader(f)\n\n for item in csv_data:\n try:\n neo = NearEarthObject(\n designation=item[\"pdes\"],\n name=item[\"name\"],\n diameter=(\n item[\"diameter\"]\n if item[\"diameter\"] != \"\" else \"nan\"\n ),\n hazardous=True if item[\"pha\"] == \"Y\" else False\n )\n\n except Exception as e:\n print(e.__str__())\n\n else:\n neos.append(neo)\n\n return neos", "title": "" }, { "docid": "9c1bf9d4f1a17a45cbe8a92886ab967e", "score": "0.54253614", "text": "def importCSV(self):\n import csv\n self.csvFilePath = self.mQgsFileWidget.filePath()\n #QtWidgets.QMessageBox.information(None, \"Fin de calcul\", self.mQgsFileWidget.filePath())\n with open(self.csvFilePath, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=';')\n rows = list(csv_reader)\n totalrows = len(rows) # Nombre de lignes\n #print('totalrows : {}'.format(totalrows))\n self.tableWidget.setRowCount(totalrows-1)\n for i in range(totalrows):\n if i ==0 :\n continue\n row = rows[i]\n for j in range(len(row)):\n item = QtWidgets.QTableWidgetItem(row[j])\n #print(row[j])\n self.tableWidget.setItem(i-1, j, item)\n self.dataAffectation= rows[1::] # A stocker dans le modรจle de donnรฉes\n #print(self.dataAffectation)", "title": "" }, { "docid": "ef6e6c81b3513fc30f53dd07fc685c9e", "score": "0.5424752", "text": "def csv_to_sites(filepath: str) -> list:\n res = []\n with open(filepath, 'r') as file:\n reader = csv.reader(file)\n if csv.Sniffer().has_header(filepath):\n next(reader) # skip header row if it exists\n for row in reader:\n try:\n location = row[1]\n if row[2] == \"Y\":\n availability = True\n else:\n availability = False\n name = row[3]\n vaccine_type = row[4]\n zip_code = int(float(row[6]))\n last_checked = row[5]\n facility_type = \"\"\n res.append(Site(name, location, zip_code, last_checked, vaccine_type, facility_type, availability))\n except ValueError:\n print(\"Unable to cast value. Skipping row...\")\n continue\n\n return res", "title": "" }, { "docid": "6ae17f669ee3b54ee3b2a38a529a607f", "score": "0.5411244", "text": "def read_data():\n\n counter = 0\n with open(filename, \"r\") as infile:\n reader = csv.reader(infile)\n reader.next()\n for row in reader:\n date = row[0].replace(\".\", \"-\")\n date = datetime.strptime(date, \"%d-%m-%Y\").date()\n channel = row[1]\n country = row[2]\n os = row[3]\n impressions = int(row[4])\n clicks = int(row[5])\n installs = int(row[6])\n spend = float(row[7])\n revenue = float(row[8])\n\n try:\n obj, created = CsvRow.objects.get_or_create(date=date, channel=channel,\n country=country, os=os, impressions=impressions,\n clicks=clicks, installs=installs,\n spend=spend, revenue=revenue)\n if created:\n obj.save()\n except Exception as e:\n print e", "title": "" }, { "docid": "67326af7b3b20eb9360442290af579cc", "score": "0.5410195", "text": "def from_csv_to_database(table_name):\n for year, path in datasets.items():\n # load csv files\n with open(path, encoding='cp1251') as dataset:\n datasets[year] = csv.DictReader(dataset, delimiter=';')\n print(f\"Year {year} is loading\")\n database.insert_values(datasets[year], year, size=16384,\n collection_name=table_name)", "title": "" }, { "docid": "d6ba7b2ff51abc0fcd2132e07f5b7c34", "score": "0.54091406", "text": "def load_from_file_csv(cls):\n\n if not os.path.exists(cls.__name__ + '.csv'):\n return []\n if cls.__name__ == 'Rectangle':\n attrs = ('id', 'width', 'height', 'x', 'y')\n elif cls.__name__ == 'Square':\n attrs = ('id', 'size', 'x', 'y')\n with open(cls.__name__ + '.csv', 'rt', newline='') as file:\n reader = csv.reader(file)\n objects = list(reader)\n objects = ((int(i) for i in l) for l in objects)\n return [cls.create(**dict(zip(attrs, l))) for l in objects]", "title": "" }, { "docid": "6c00cb92abec3e866822e9f4784733a8", "score": "0.540781", "text": "def load_cereals(context, csv_path):\n\n csv_path = os.path.join(os.path.dirname(__file__), csv_path)\n with open(csv_path, \"r\") as fd:\n cereals = [row for row in csv.DictReader(fd)]\n\n context.log.info(\"Read {n_lines} cereals\".format(n_lines=len(cereals)))\n return cereals", "title": "" }, { "docid": "2cc4a69a2b941f79b2be6846d8ecfb8d", "score": "0.5407254", "text": "def import_survey(infile,PlotIDName,censo):\n survey = class_lib.Survey(1)\n with open(infile, 'rb') as samplingfile:\n datareader = csv.DictReader(samplingfile, delimiter=',')\n for row in datareader:\n ID = row[PlotIDName]\n if ID.__len__() > 0:\n survey.add_plot(ID, censo_id=censo)\n return survey", "title": "" }, { "docid": "66266dd51db7dfc28e3aaaa4cc32bef7", "score": "0.54071444", "text": "def import_subannotation_csv(corpus_context, type, annotated_type, props):\n path = os.path.join(corpus_context.config.temporary_directory('csv'),\n '{}_subannotations.csv'.format(type))\n\n # If on the Docker version, the files live in /site/proj\n if os.path.exists('/site/proj') and not path.startswith('/site/proj'):\n csv_path = 'file:///site/proj/{}'.format(make_path_safe(path))\n else:\n csv_path = 'file:///{}'.format(make_path_safe(path))\n\n prop_temp = '''{name}: csvLine.{name}'''\n properties = []\n\n corpus_context.execute_cypher('CREATE CONSTRAINT ON (node:%s) ASSERT node.id IS UNIQUE' % type)\n\n for p in props:\n if p in ['id', 'annotated_id', 'begin', 'end']:\n continue\n properties.append(prop_temp.format(name=p))\n if properties:\n properties = ', ' + ', '.join(properties)\n else:\n properties = ''\n statement = '''CYPHER planner=rule USING PERIODIC COMMIT 500\n LOAD CSV WITH HEADERS FROM \"{path}\" AS csvLine\n MATCH (annotated:{a_type}:{corpus} {{id: csvLine.annotated_id}})\n CREATE (annotated) <-[:annotates]-(annotation:{type}:{corpus}\n {{id: csvLine.id, begin: toFloat(csvLine.begin),\n end: toFloat(csvLine.end){properties}}})\n '''\n statement = statement.format(path=csv_path,\n corpus=corpus_context.cypher_safe_name,\n a_type=annotated_type,\n type=type,\n properties=properties)\n corpus_context.execute_cypher(statement)\n for p in props:\n if p in ['id', 'annotated_id']:\n continue\n corpus_context.execute_cypher('CREATE INDEX ON :%s(%s)' % (type, p))\n # os.remove(path) # FIXME Neo4j 2.3 does not release files", "title": "" }, { "docid": "046deda6021bef60c44731c599bfaa5d", "score": "0.540414", "text": "def open_csv( filename=\"rps.csv\" ):\n f = open(filename, newline='')\n reader = csv.reader(f)\n\n LoL = []\n for row in reader:\n LoL.append( row )\n\n # print(\"LoL is\", LoL)\n f.close()\n\n return LoL", "title": "" }, { "docid": "e580b863735c5fd3ae84b79ad32ab50b", "score": "0.54035103", "text": "def importCsv(pathToCSVFile, schema, spark):\n return spark.read.option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"false\") \\\n .option(\"dateFormat\", \"dd-MMM-yyyy\") \\\n .option(\"schema\", schema) \\\n .csv(pathToCSVFile)", "title": "" }, { "docid": "75015706b7f05f6a5ffa8378706d9599", "score": "0.539899", "text": "def load_csv(self, file_path):\n if file_path is None:\n logger.error(\"Please supply a file_path parameter with the full path to the CSV file.\")\n return\n if hasattr(self, 'data_full'):\n logger.warning(\"Warning! Overwriting existing data for this species.\")\n\n logger.info(\"Loading data from: %s\" % file_path)\n f = open(file_path, 'r')\n try:\n dialect = csv.Sniffer().sniff(f.read(10240))\n self.data_full = pd.read_csv(file_path, sep=dialect.delimiter)\n # self.data_full.columns = map(str.lower, self.data_full.columns)\n # convert all column headers to lowercase\n self.data_full.columns = [x.lower() for x in self.data_full.columns]\n logger.info(\"Succesfully loaded previously saved CSV data.\")\n if 'specieskey' in self.data_full and self.data_full['specieskey'].unique().size == 1:\n self.ID = self.data_full['specieskey'].unique()[0]\n logger.info(\"Updated species ID: %s \" % self.ID)\n\n return self.data_full\n finally:\n f.close()", "title": "" }, { "docid": "00cdf08a5f8489f63ffe48d17d787431", "score": "0.53982407", "text": "def import_caesar_data_extracts(self, csv_source):\n return Caesar().http_post(f'{self._api_slug}/{self.id}/extracts/import', json={'file': csv_source})", "title": "" }, { "docid": "9a35df580859d8990c0d199e1af0773a", "score": "0.53871053", "text": "def import_from_file(self):\n\n if self.data_path.find(\"csv\") != -1:\n df_csv = pd.read_csv(self.data_path)\n self.data_path = self.data_path.replace(\"csv\", \"xlsx\")\n df_csv.to_excel(self.data_path)\n\n wb = xl.load_workbook(self.data_path)\n sheet = wb.worksheets[0]\n\n content = []\n\n tokens_name = []\n tokens_list = []\n tokens = {}\n\n firstcell = 1\n if sheet.cell(1, 1).value == None:\n firstcell = 2\n for row in range(2, sheet.max_row + 1):\n if sheet.cell(row, 1).value == None:\n break\n for column in range(firstcell, sheet.max_column + 1):\n if sheet.cell(row, column).value == None:\n break\n tokens_name.append(sheet.cell(1, column).value)\n value = sheet.cell(row, column).value\n if tokens_name[column - 2] == 'content':\n content.append(value)\n tokens[tokens_name[column - 2]] = value\n tokens_list.append(tokens)\n tokens = {}\n\n self.tokens_list = tokens_list\n\n if content != []:\n self.content = content", "title": "" }, { "docid": "a7c88d4dbe8eeb0eb5eb9784f216af69", "score": "0.5386123", "text": "def from_csv(cls, fname):\n if os.path.isfile(fname):\n with open(fname, 'r') as csvfile:\n size = int(csvfile.readline().strip())\n\n direct = re.match(r\"(false|true)\", csvfile.readline().strip(), re.I)\n\n graphtype = cls.DIRECTED if direct and direct.group(0).lower() == \"true\" else cls.UNDIRECTED\n\n graph_reader = csv.DictReader(csvfile, fieldnames=cls.FIELDNAMES)\n try:\n graph = cls(size, graphtype)\n for row in graph_reader:\n graph.parse_row(row)\n return graph\n except (ValueError, SyntaxError) as e:\n Graph.on_error_exit(str(e))\n else:\n Graph.on_error_exit(\"Invalid file name: %s\\nThe file does not exist or is not a file\" %fname)", "title": "" }, { "docid": "f3f3a0ab68dec03de4902444fadbdd53", "score": "0.53822356", "text": "def process_csv():\n df = load_raw_dataset()\n df.to_csv(os.path.join(config['data'], 'GEFCom2014/Load/gefcom2014.csv'), index=False)", "title": "" }, { "docid": "7500b40b74748f522cc9cbd2a381bcad", "score": "0.5381662", "text": "def load_from_csv(self, filename):\n start_time=time.time()\n\n new_df = pd.read_csv(filename)\n self.customer = new_df[self.keyname]\n self.y_train = new_df[self.def_ind]\n\n num_cust = len(self.customer)\n self.X_train = []\n for i in range(self.ndim):\n self.X_train.append(np.zeros(shape=(num_cust, self.maxlen), dtype=np.int32))\n\n for dim in range(self.ndim):\n for tm in range(self.maxlen):\n self.X_train[dim][:,tm] = new_df[ self.config['name_dim'+str(dim+1)]+'_'+str(tm) ]\n\n print(\"Time taken for Reading Data : \" , str(time.time() - start_time) , \" in sec.\")", "title": "" }, { "docid": "c3f46e1145e002f7688aa877824608a5", "score": "0.53815955", "text": "def import_assignments(self, path: str):\n self.context.solution = import_assignments_from_csv(self.context, path)", "title": "" }, { "docid": "17949a94605d64654feae76d64a30b57", "score": "0.537825", "text": "def managecsv(url):\n datas = scrapOne(url)\n checkfolderdata()\n if not datafileexist(datas[1]):\n createcsv(datas[1])\n managecsv(url)\n else:\n addcsv(datas[0], datas[1])", "title": "" }, { "docid": "c786e343ba86acf2a9aef7c9964ceec5", "score": "0.5377544", "text": "def test_import_no_csv(self):\n doc = self.go('/haiti/api/import')\n form = doc.last('form')\n doc = self.s.submit(form, key='test_key')\n assert 'Please specify at least one CSV file.' in doc.text", "title": "" } ]
2f3514ca783200d98c92a95c191f312e
Build a list of adjectives starting with passed letter
[ { "docid": "6ebff2d920f2f0cc133ee2de8ed358e3", "score": "0.7244773", "text": "def get_adjectives_starting_with(first_letter):\n api_data = get_adjectives_data(first_letter)\n adjectives = parse_adjectives_data(api_data)\n return adjectives", "title": "" } ]
[ { "docid": "b38229f8f650f088818b96fc992bd8cb", "score": "0.6487598", "text": "def get_animals_starting_with(first_letter):\n animals_list = ANIMALS[first_letter]\n return animals_list", "title": "" }, { "docid": "902048fe510cff13baabc92782125940", "score": "0.6204445", "text": "def words_starting_with(self, letter):\n for word in self._dict.iterkeys():\n if word.startswith(letter):\n yield word", "title": "" }, { "docid": "0539f0e494b65353a287b39abd222249", "score": "0.61181724", "text": "def front_x(list):#This function will append a words but the start letter is x instead a.\n asd = []\n dsa = []\n\n for word in list:\n if word.startswith('x'):\n asd.append(word)\n else:\n dsa.append(word)\n\n return sorted(asd) + sorted(dsa)", "title": "" }, { "docid": "53d1cb4948f0aa57074f39b3ae53d8c1", "score": "0.60709494", "text": "def anagram(letter_list):\n results = []\n pat = '.*'.join(sorted(letter_list))\n for word in WORDS:\n sorted_word = ''.join(sorted(word))\n if re.search(pat, sorted_word):\n for letter in letter_list:\n pattern = re.compile('('+letter+')')\n word = pattern.sub(lambda pat: pat.group(1).upper(), word, 1)\n results.append(word)\n return map_results(sorted(results, key=len, reverse=False))", "title": "" }, { "docid": "2369fdc1dd7579ca40ecabddb6c9f2f1", "score": "0.59384316", "text": "def add_letters(aliases, prefix, alphabet = 'abcdefghijklmnopqrstuvwxyz', \n name_map = alpha_bravo, language = None):\n if language == None:\n language = all_languages\n for letter in alphabet:\n try:\n name = \"%s\" % name_map[letter]\n spoken = \"%s%s\" % (prefix, name)\n an_LSA = LSAlias(spoken_forms = [spoken], \n meanings = {language: letter}, new_symbol = 'start')\n aliases.add_lsa(an_LSA)\n except KeyError:\n pass", "title": "" }, { "docid": "3b7bae9433957b59d3eec795839990f3", "score": "0.5918187", "text": "def permute(letter_list):\n results = []\n re_str = '^'+''.join(map(lambda w: w+'?', sorted(letter_list)))+'$'\n for word in WORDS:\n letters = ''.join(sorted(word))\n if re.search(re_str, letters):\n results.append(word)\n return map_results(sorted(results, key=len, reverse=True))", "title": "" }, { "docid": "9e1ea29293f50057f692a5a565a305b5", "score": "0.5895219", "text": "def initialize_guess_word(word):\n guess_word = []\n for letter in word:\n guess_word.append('_')\n return guess_word", "title": "" }, { "docid": "0dc4cc056a53fde667d2e79125ad46c8", "score": "0.5859384", "text": "def word(num, alpha, solution):\n word_ = ''\n for letter in str(num):\n word_ += alpha[solution.find(letter)]\n return word_", "title": "" }, { "docid": "dd9d56cdc2a5d5328ab230ee6c846ed4", "score": "0.57951283", "text": "def makeLetters (testcase):\r\n letters = []\r\n complex = False\r\n for i in range (len(testcase)):\r\n if (testcase[i] == '('):\r\n complex = True\r\n word = \"\"\r\n continue\r\n if (testcase[i] == ')'):\r\n complex = False\r\n letters.append (word)\r\n continue\r\n if (complex is True):\r\n word = word + testcase[i]\r\n continue\r\n letters.append (testcase[i])\r\n \r\n return letters, len(letters)", "title": "" }, { "docid": "484732901dc68628d909053692a525c3", "score": "0.57394505", "text": "def find_acrostic(acrostic, worddict):\n\n words = []\n\n for letter in acrostic:\n try:\n words.append(rng().choice(worddict[letter]))\n except KeyError:\n sys.stderr.write(\"No words found starting with \" + letter + \"\\n\")\n sys.exit(1)\n return words", "title": "" }, { "docid": "6ad942c06503ca7b01fa7b26c1900008", "score": "0.57035214", "text": "def get_word_guessed(self, guessed_letters):\n return [c if c in guessed_letters else '_' for c in self.word]", "title": "" }, { "docid": "a26d79d94e4591061fb2aa2da47fb9e5", "score": "0.5684471", "text": "def getWordAlphabet(self, length):\n crossproduct = ['']\n for a in range(length):\n n = []\n for c in crossproduct:\n for m in self:\n n.append(m+c)\n crossproduct = n\n return Alphabet(crossproduct, MolType=self.MolType)", "title": "" }, { "docid": "4fb995b8df64b60510d395e1f2f55fc2", "score": "0.5664846", "text": "def make_lowercase(word_list):\n return [word.lower() for word in word_list]", "title": "" }, { "docid": "cfa6e4b094a59d85f2de5236c93c3780", "score": "0.5640064", "text": "def derivate_leet(word):\n leet = {\"a\": \"4\", \"b\": \"8\", \"e\": \"3\", \"g\": \"6\", \"i\": \"1\", \"l\": \"1\", \"o\": \"0\", \"r\": \"2\", \"s\": \"5\", \"t\": \"7\", \"y\": \"7\", \"z\": \"2\"}\n return [''.join(letters) for letters in itertools.product(*({c, leet.get(c, c)} for c in word))]", "title": "" }, { "docid": "cdb1c7a2a8e38c20d7451db125fb99fe", "score": "0.55992925", "text": "def iteration(candidate,sequence,wordsublist):\n \n result = []\n if candidate in wordsublist:\n result.append(candidate)\n from adjacency import newseqs\n newseqlist = newseqs(self.adjacent,sequence)\n \n for newseq in newseqlist:\n newcand = candidate + self.data[newseq[-1]]\n newsublist = [word for word in wordsublist if word.startswith(newcand)]\n if newsublist: \n r = iteration(newcand,newseq,wordsublist)\n if r:\n result += r\n return result", "title": "" }, { "docid": "cdc88af708fb6cf7320968d84fb987ca", "score": "0.55908495", "text": "def group_anagrams_ver1(strs: list) -> list:\n anagrams = []\n for i, word in enumerate(strs):\n if i == 0:\n anagrams.append([word])\n else:\n exist = False\n # Compare a word and the first element in anagram.\n for anagram in anagrams:\n chars = [char for char in word if char in anagram[0]]\n # Add a word to a anagram when have same letters and length\n if len(chars) == len(anagram[0]):\n anagram.append(word)\n exist = True\n else:\n continue\n\n # Add new anagram when a word can not belong to any anagram.\n if exist == False:\n anagrams.append([word])\n\n return anagrams", "title": "" }, { "docid": "eacbc49115492f1a788224ba87c011e6", "score": "0.55877626", "text": "def gen_all_strings(word):\r\n if not word:\r\n return [\"\"]\r\n else:\r\n first = word[0]\r\n rest_strings = gen_all_strings(word[1:]) # this is a recursive call in order to dive all the way to the last letter and then process everything on the way to front\r\n all_strings = [] # we are going to need an extra list to add all the possibilities\r\n\r\n for item in rest_strings: # this loop goes through all the collected items during the recursive process\r\n for index in range(len(item) + 1): # this lop will move through the item and will stick the first\r\n all_strings.append(item[:index] + first + item[index:]) # this is the key part, if the item='on' and first='h' then by moving the index we can put 'h' everywhere to get: 'hon', 'ohn', 'onh']\r\n return rest_strings + all_strings # this is where we merge the results from previous recursion with the current ones\r", "title": "" }, { "docid": "c2409ad79a88e20d1b30837d4993fe08", "score": "0.5550301", "text": "def keepWord(words, letter):\n return [word for word in words if letter in word]", "title": "" }, { "docid": "11853c24316d56c20aacb0370aff8140", "score": "0.5540242", "text": "def word_plays(hand, board_letters):\r\n # find prefix + L + suffix; L from board_letters, rest from hand\r\n results = set()\r\n for pre in find_prefixes(hand): #find_prefixes(hand, '', set()):\r\n for L in board_letters:\r\n add_suffixes(removed(hand, pre), pre+L, results)\r\n return results", "title": "" }, { "docid": "c63a758435ed544ff6d9c14847fe519f", "score": "0.55213124", "text": "def greenWord(words, letter, place):\n return [word for word in words if letter in word[place]]", "title": "" }, { "docid": "f880aae50196b92df4e966eba093ee6b", "score": "0.5514716", "text": "def find_anagrams(name, word_list):\n name_letter_map = Counter(name)\n anagrams = []\n for word in word_list:\n test = ''\n word_letter_map = Counter(word.lower())\n for letter in word:\n if word_letter_map[letter] <= name_letter_map[letter]:\n test += letter\n if Counter(test) == word_letter_map:\n anagrams.append(word)\n print(*anagrams, sep='\\n')\n print()\n print(f'Remaining letters = {name}')\n print(f'Number of remaining letters = {len(name)}')\n print(f'Number of remaining (real word) anagrams = {len(anagrams)}')", "title": "" }, { "docid": "28c13ad5d1c9fe542f11f49ccd7f6ef3", "score": "0.5500711", "text": "def partial_word(word, guessed_letters):\n result = ''\n for letter in word:\n if letter in guessed_letters:\n result = result + letter\n else:\n result = result + '_'\n return result", "title": "" }, { "docid": "70ac255489d43506b7b2a092e0079706", "score": "0.55004334", "text": "def getCiphered(self,text):\n res=''\n for letter in [let for let in text.lower() if let in alphabet]:\n res+=self.getCipheredLetter(letter)\n return res", "title": "" }, { "docid": "223a332a700b893416a7401f5498b259", "score": "0.5479251", "text": "def create_search_list(name):\n result = []\n addup = \"\"\n for x in name:\n addup += x\n result.append(addup)\n return result", "title": "" }, { "docid": "bb8e695b595d9f2b84c9ccf93034f987", "score": "0.54773134", "text": "def get_jokes(number, flag=1):\n\n nouns = [\"ะฐะฒั‚ะพะผะพะฑะธะปัŒ\", \"ะปะตั\", \"ะพะณะพะฝัŒ\", \"ะณะพั€ะพะด\", \"ะดะพะผ\"]\n adverbs = [\"ัะตะณะพะดะฝั\", \"ะฒั‡ะตั€ะฐ\", \"ะทะฐะฒั‚ั€ะฐ\", \"ะฟะพะทะฐะฒั‡ะตั€ะฐ\", \"ะฝะพั‡ัŒัŽ\"]\n adjectives = [\"ะฒะตัะตะปั‹ะน\", \"ัั€ะบะธะน\", \"ะทะตะปะตะฝั‹ะน\", \"ัƒั‚ะพะฟะธั‡ะฝั‹ะน\", \"ะผัะณะบะธะน\"]\n\n size = len(nouns)\n if number > size and not flag:\n return print(f\"Impossible to create phrases with non-repeated words. \"\n f\"Please, chose number less than {size + 1}.\")\n\n for i in range(number):\n idx_noun, idx_adv, idx_adj = r.randrange(size)\n\n noun = nouns[idx_noun]\n adverb = adverbs[idx_adv]\n adjective = adjectives[idx_adj]\n\n print(f'{noun} {adverb} {adjective}')\n\n if not flag and size:\n # replace selected element with the last element of the list\n # reduce size by one\n nouns[idx_noun] = nouns[size - 1]\n adverbs[idx_adv] = adverbs[size - 1]\n adjectives[idx_adj] = adjectives[size - 1]\n size -= 1", "title": "" }, { "docid": "e902bf9f4a1f3b48b7931e4aacc2f74e", "score": "0.54536694", "text": "def compile_word(word):\n # Your code here.\n result = ''\n i = 0\n for n in word:\n if n in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n if i == 0:\n i += 1\n result = ''.join(['((' for p in range(len(word))]) + result\n else:\n result += '+'\n result += n + ')*10)'\n else:\n result += n\n \n return result", "title": "" }, { "docid": "5ba39ae9f6d84b32188241c0369d4834", "score": "0.54395926", "text": "def gen_all_strings(word):\n if word == \"\":\n return [\"\"]\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n tmp = rest_strings[:]\n for letter in rest_strings:\n for idx in range(len(letter)+1):\n item = letter[:idx] + first + letter[idx:]\n tmp.append(item)\n return tmp", "title": "" }, { "docid": "1833794ca83c60d55e1ddf0c01a2d5ed", "score": "0.54316765", "text": "def generate_key_guesses():\n output = []\n for char_1 in LOWERCASE_ALPHABET:\n for char_2 in LOWERCASE_ALPHABET:\n for char_3 in LOWERCASE_ALPHABET:\n output.append(char_1 + char_2 + char_3)\n return output", "title": "" }, { "docid": "511ad5e26b0958a64ce51d4287c63237", "score": "0.541588", "text": "def _new_letter(self):\n i=0\n done=False\n\n if self._type=='abc':\n while i<26 and not done:\n e=\"%c\"%(i+97)\n if e not in self.positive_letters():\n done=True\n result=[e,\"%c\"%(i+65)]\n i+=1\n elif self._type=='x0':\n i=0\n done=False\n while not done:\n e=\"x%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"X%i\"%i]\n i+=1\n i=0\n while not done:\n e=\"a%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"A%i\"%i]\n i+=1\n\n\n return result", "title": "" }, { "docid": "857599f680ee619656fa1a9a24ecae80", "score": "0.540156", "text": "def generate_word_string(word, letters_guessed):\r\n output = []\r\n for letter in word:\r\n if letter in letters_guessed:\r\n output.append(letter.upper())\r\n else:\r\n output.append(\"_\")\r\n return \" \".join(output)", "title": "" }, { "docid": "a9f89ecd91a280afbee1fc6774f65b15", "score": "0.53785896", "text": "def gen_anagram(anagram, possible_words):\n for word in possible_words:\n x = check_word(anagram, word)\n if x == True:\n yield [word]\n elif x:\n for y in gen_anagram(x, possible_words):\n yield [word] + y", "title": "" }, { "docid": "74cbcf670a7dfe33694a3f7bc0572ded", "score": "0.53777665", "text": "def _my_alphabet(az: int):\n alphabet = string.ascii_uppercase\n extended_alphabet = [\"\".join(i) for i in list(itertools.permutations(alphabet, 2))]\n\n if az <= 25:\n sub = alphabet[az]\n else:\n j = az - 26\n sub = extended_alphabet[j]\n\n return sub", "title": "" }, { "docid": "9b2c695d1e09d8f9eeeecbddedaa8dff", "score": "0.53547233", "text": "def gen_candidate(word):\n global matcher, use_automata, bindex, word_dict, bindex_thresh, lang, cand_topk_cur\n if use_automata:\n cands = list(automata.find_all_matches(word, 2, matcher))\n else:\n word_aug = '$' + word + '$'\n idx = range(len(word_aug))\n query = set()\n for tup in itertools.izip(idx[:-1],idx[1:]):\n query.add(word_aug[tup[0]]+word_aug[tup[1]])\n candidate = {}\n query = [u for u in query if u in bindex]\n for item in query:\n lst = [word_dict[u] for u in bindex[item] if abs(len(word_dict[u])-len(word)) <= 2]\n for w in lst:\n if w in candidate:\n candidate[w] += 1\n else:\n candidate[w] = 1\n cands = []\n for key, cnt in candidate.iteritems():\n score = float(cnt) / float(len(key) + 1 + len(query) - cnt)\n if score >= bindex_thresh and edit_distance(key, word) <= 2:\n cands.append(key)\n if len(cands) == 0 and word in lang:\n cands.append(word)\n \n cands = [(cand, edit_distance_plus(cand, word)[1]) for cand in cands]\n cands = sorted(cands, key=lambda x: lang[x[0]] + x[1])\n cands = [cand for cand, score in cands[:cand_topk_cur]]\n return cands", "title": "" }, { "docid": "7ca18b8b7dde0b80898c548dbdff49f3", "score": "0.53510773", "text": "def make_namelist(hits, exclude=set(), search=\"\"):\n results = []\n first_letters = [] # List only containing letters in alphabetical order\n current_letterlist = [] # List containing entries starting with the same letter\n current_total = 0\n if search:\n max_len = current_app.config[\"SEARCH_RESULT_SIZE\"] - len(exclude)\n else:\n max_len = None\n for hit in hits[\"hits\"]:\n if hit[\"_id\"] in exclude:\n continue\n # Seperate names from linked names\n is_link = hit[\"_index\"].startswith(current_app.config[\"SKBL_LINKS\"])\n if is_link:\n name = hit[\"_source\"][\"name\"].get(\"sortname\", \"\")\n linked_name = join_name(hit[\"_source\"])\n else:\n name = join_name(hit[\"_source\"], mk_bold=True)\n linked_name = False\n\n liferange = get_life_range(hit[\"_source\"])\n subtitle = hit[\"_source\"].get(\"subtitle\", \"\")\n subtitle_eng = hit[\"_source\"].get(\"subtitle_eng\", \"\")\n subject_id = hit[\"_source\"].get(\"url\") or hit[\"_id\"]\n\n # Get first letter from sort[0]\n firstletter = hit[\"sort\"][1].upper()\n if firstletter not in first_letters:\n if current_letterlist:\n results.append(current_letterlist)\n current_letterlist = []\n first_letters.append(firstletter)\n current_letterlist.append((firstletter, is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id))\n current_total += 1\n # Don't show more than SEARCH_RESULT_SIZE number of results\n if max_len and current_total >= max_len:\n break\n\n if current_letterlist:\n # Append last letterlist\n results.append(current_letterlist)\n\n return (first_letters, results)", "title": "" }, { "docid": "276c5ca796502be0a5546a93f513968d", "score": "0.53481287", "text": "def derivate_case(word):\n logging.debug(\"Adding all uppercase and lowercase combinations for '{}'\".format(word))\n return list(map(''.join, itertools.product(*zip(word.upper(), word.lower()))))", "title": "" }, { "docid": "f185b4624268cd129c41a57055df6b77", "score": "0.5336925", "text": "def __init__(self):\n self.alph = {} \n for i in 'abcdefghijklmnopqrstuvwxyz': \n self.alph[i] = []", "title": "" }, { "docid": "0671b93f8581aef2b0422329bf22cd2b", "score": "0.53313017", "text": "def sorted_alphabet(length: int):\n \n alphabet = [string.ascii_lowercase[i] for i in range(length)]\n return alphabet", "title": "" }, { "docid": "4d29a39966658a02fc1ed1f78cc26c0a", "score": "0.5315665", "text": "def gen_all_strings(word):\n if word == \"\":\n return [\"\"]\n else:\n first = word[0]\n rest = word[1:] \n rest_strings = gen_all_strings(rest)\n plus_first = [first]\n for string in rest_strings:\n if len(string) > 0:\n for char in range(len(string)):\n new_var = string[:char] + first + string[char:]\n plus_first.append(new_var)\n plus_first.append(string +first) \n return plus_first + rest_strings", "title": "" }, { "docid": "252fd9e85e4d09a49d8fd838b9e209d4", "score": "0.53152364", "text": "def prepositionalPhrase():\n return random.choice(prepositions) + \" \" + nounPhrase()", "title": "" }, { "docid": "36716015087e8636c7f05995eaa324b4", "score": "0.53145623", "text": "def build_lex():\n\tlex = []\n\tfor w, v in asc.items():\n\t\tfor idx, val in enumerate(v):\n\t\t\tif prob(val, sum(v), len(all_meanings))>TAU:\n\t\t\t\tlex.append((w,all_meanings[idx]))\n\treturn lex", "title": "" }, { "docid": "db4153d42eb686a5ce096d0898488d37", "score": "0.53067297", "text": "def take_player_prefix(words):\n return [players_from_lc[x] for x in\n takewhile(lambda x: x in players_from_lc, words)]", "title": "" }, { "docid": "bd932dfd7f39770b0232421bd8de9868", "score": "0.5305146", "text": "def find_words(starting_lv: dict, prefix: str, letters: str = '') -> list:\n answer = []\n for k, v in starting_lv.items():\n if k != 'end' and k != 'final' and k != 'size':\n answer += find_words(v, prefix + letters + k)\n if k == 'end':\n answer.append(prefix + letters)\n return answer", "title": "" }, { "docid": "f3b44a8d066b404f0c01198641bbe013", "score": "0.52978384", "text": "def add_name(self, name):\n if self.name.lower() == \"pinyin\": # for pinyin we can use the syllables instead of the raw letters. \n name = name.split(\" \") # they'll still fall into the \"letter\" category though\n \n for letter in name: \n if letter not in self.letter2index:\n self.letter2index[letter] = self.n_letters\n self.letter2count[letter] = 1\n self.index2letter[self.n_letters] = letter\n self.n_letters += 1\n else:\n self.letter2count[letter] += 1", "title": "" }, { "docid": "cc93dde004c17e3a40e89c19be6627b1", "score": "0.5291399", "text": "def create_gram_letters(content, n):\n gram_list = []\n for word in content:\n for i in range(0, len(word)):\n gram = ''\n for j in range(i, i + n):\n if j < len(word):\n gram += word[j]\n else:\n gram = ''\n gram_list.append(gram)\n return remove_empty_elements(gram_list)", "title": "" }, { "docid": "2b093e46ffe2ef774b0162f44aac1b62", "score": "0.52877545", "text": "def get_alphabet():\n\n alphabet = list()\n for a in range(26):\n alphabet.append(chr(97+a))\n\n return alphabet", "title": "" }, { "docid": "d50ee2f3529856157a6b2c8153517f54", "score": "0.5277904", "text": "def regex_builder(\n possible_letters: list, must_have_letters: defaultdict(int)\n) -> String:\n regex_str = \"\"\n for letter in must_have_letters:\n num_of_letters = f\".*[{letter}]\" * must_have_letters[letter]\n regex_str = regex_str + f\"(?={num_of_letters})\"\n for letters in possible_letters:\n regex_str = regex_str + f\"[{letters}]\"\n return regex_str", "title": "" }, { "docid": "72a1e4db795d2ac033eda2aad5ad9c34", "score": "0.5270549", "text": "def get_anagrams(self, letters):\n logging.debug(\"Looking up in anagram db: \" + letters)\n results = self.__look_up(letters)\n\n # don't return the letters themselves\n if letters in results:\n results.remove(letters)\n return results", "title": "" }, { "docid": "1015ffa85021cb9c9665f12ea7c3a398", "score": "0.5268602", "text": "def get_adjectives_data(first_letter):\n\n # mocked data \n data = [\"happy\", \"sad\", \"ornry\", \"lazy\"] \n\n url = \"%s&sp=%s*\" % (datamuse_api_url, first_letter) \n response = requests.get(url)\n \n status = response.status_code\n if status != 200:\n print(\"ERROR - API response status\" % (status))\n sys.exit(1) \n \n data = response.json()\n \n return data", "title": "" }, { "docid": "b727e75f9fe9fd1946d3ba2e5f15fda4", "score": "0.52600765", "text": "def find_likely_letters(coset, alpha, eng_freq):\n \n coset_freq = []\n differences = []\n # WRITE YOUR CODE HERE!\n\n \n firstletter = 0\n secondletter = 1\n # WRITE YOUR CODE HERE!\n \n \n letter1 = alpha[firstletter]\n letter2 = alpha[secondletter]\n return \"the most likely letter is: \" + letter1 + \" followed by: \" + letter2", "title": "" }, { "docid": "c91c0951548c6b5359a2006e6189ed2f", "score": "0.52523494", "text": "def hidden_word(taken_letter, word):\n k = 0\n for i in word: #Iterate over list \"word\"\n\n if taken_letter[k] == 1:\n print(word[k], end=\"\")\n else:\n print(\"_ \", end=\"\")\n k += 1", "title": "" }, { "docid": "43c69316f08809510647bdf70d2202be", "score": "0.52479917", "text": "def inserts(word):\r\n\r\n\tletters = \"ใ…‚ใ…ˆใ„ทใ„ฑใ……ใ…ใ„ดใ…‡ใ„นใ…Žใ…‹ใ…Œใ…Šใ…ใ…ƒใ…‰ใ„ธใ„ฒใ…†ใ…•ใ…‘ใ…ใ…’ใ…”ใ…–ใ…—ใ…›ใ…“ใ…ใ…ฃใ… ใ…œใ…ก\"\r\n\tinsertsret = []\r\n\tfor i in range(len(word)+1):\r\n\t\tleft = word[:i]\r\n\t\tright = word[i:]\r\n\t\tfor l in letters:\r\n\t\t\tinsertsret.append(left + l + right)\r\n\treturn(insertsret)", "title": "" }, { "docid": "0075e7cacbfe05bc4ee0e555d9dd1a2d", "score": "0.52478963", "text": "def build(self):\n while self.words[0] is None and self.words[1] is None:\n self.words[2], self.words[3] = get_homonyms(\"adj\")\n try:\n self.words[0] = Word(self.words[2]).synonyms(partOfSpeech=[\"adj\"])[0]\n except:\n self.words[0] = None\n self.words[1] = None\n continue\n try:\n self.words[1] = Word(self.words[3]).synonyms(partOfSpeech=[\"noun\"])[0]\n except:\n self.words[0] = None\n self.words[1] = None\n continue\n return \"What do you call a(n) \" + self.words[0] + \" \" + self.words[1] + \"? A(n) \" + self.words[2] + \" \" + self.words[3] + \".\"", "title": "" }, { "docid": "a19f94fab953eaabc42b8b29f05693d2", "score": "0.52401567", "text": "def replacer(wordlist: Sequence[Sequence[str]]):\n for word in wordlist:\n for i in range(len(word)):\n try:\n word[i] = letters[word[i]]\n except KeyError:\n continue\n return wordlist", "title": "" }, { "docid": "73219a1c6ce138c801684de71ba55f11", "score": "0.52278775", "text": "def actions(self, state):\n alph = \"abcdefghijklmnopqrstuvwxyz\"\n for i in range(len(state)):\n hold = list(state)\n for let in alph:\n hold[i] = let\n word = \"\".join(hold)\n if word in dictionary:\n yield [i,let]", "title": "" }, { "docid": "27dba17f51cc13bdf0b6eda46c4aca88", "score": "0.52242017", "text": "def add_single_letter_words(words):\n words['']=None\n words['i']=None\n words['a']=None\n return words", "title": "" }, { "docid": "271b253f5758e549eac7123dcfb28ecb", "score": "0.52234036", "text": "def prefindAcronym(words, acronym, stopwords):\n \n # Finding the position of the word in the list.''\n index = [i for i,s in enumerate(words) if acronym in s]\n\n if (index):\n indexAcronym = index[0]\n else:\n return \"NA\"\n\n # Find the pre-window\n preWindowFirstIndex = indexAcronym - 2 * len(acronym)\n \n if (preWindowFirstIndex < 0):\n preWindow = words[:indexAcronym]\n else:\n preWindow = words[preWindowFirstIndex:indexAcronym]\n\n #Separate hyphenated words in the list\n preWindowJoin = ' '.join(preWindow)\n preWindowS = re.findall(r'\\w+', preWindowJoin)\n hyphenatedWords = re.findall(r'\\w+-\\w+[-\\w+]*',preWindowJoin)\n # Find the leaders of the pre window\n leaders = [x[0].lower() for x in preWindowS]\n pretypes = []\n for x in preWindowS:\n flagStop = x in stopwords\n if (flagStop):\n pretypes.append('s')\n else:\n flagHyphen = 0\n for word in hyphenatedWords:\n listHyphen = ''.join(word).split('-')\n indexHyphen = None\n if (x in listHyphen):\n flagHyphen = 1\n indexHyphen = listHyphen.index(x)\n if (indexHyphen == 0):\n pretypes.append('H')\n else:\n pretypes.append('h')\n if (not flagHyphen):\n pretypes.append('w')\n\n #X and Y\n X = acronym.lower()\n Y = ''.join(leaders)\n\n # build LCS matrix\n c,b = buildLCSmatrix(X,Y)\n m = len(X)\n n = len(Y)\n \n if (c[m][n]< len(acronym)/2):\n return 'NA'\n PreVectors = parseLCSmatrix(b, 0, 0, m, n, c[m][n], [], [])\n\n precontext='A'\n #postcontext='A'\n PrechoiceVector= None\n #PostchoiceVector= None\n if (not PreVectors):\n return 'NA'\n\n else:\n # Choosing of vectors from the multiple vectors based on number of misses, stopcount, distance and size\n PrechoiceVector = PreVectors[0]\n \n for i in range(1, len(PreVectors)):\n PrechoiceVector = compareVectors(PrechoiceVector, PreVectors[i], pretypes,pretypes)\n\n if (PrechoiceVector== None):\n return'NA'\n\n finalList = []\n \n firstIndex, lastIndex = getFirstAndLastIndex(PrechoiceVector)\n\n countHyphen = 0\n textHyphen = \"\"\n for i,x in enumerate(PrechoiceVector):\n if (i>=firstIndex and i<=lastIndex):\n if (pretypes[i] == 'H' or pretypes[i] == 'h'):\n textHyphen += preWindowS[i]\n if (i+1 < len(pretypes) and pretypes[i+1] == 'h'):\n countHyphen += 1\n textHyphen += '-'\n continue\n\n #Reset the hyphen parameters\n if (countHyphen != 0):\n textJoin = textHyphen\n textHyphen = \"\"\n countHyphen = 0\n else:\n textJoin = preWindowS[i]\n finalList.append(textJoin)\n return ' '.join(finalList)", "title": "" }, { "docid": "e633a27f8e227a7c874617843e933d69", "score": "0.52138424", "text": "def letter(text: Iterable[Iterable[int]], true=1) -> str:\n rows = [_format_row(row, true) for row in text]\n text = '\\n'.join(rows)\n solution = mappings[text]\n return solution", "title": "" }, { "docid": "94f0797749a392f73a3379a5efd75374", "score": "0.5206022", "text": "def get_poss_words(self):\n\n for key, val in self.anchor_strings.items():\n row, col = key\n left, right, above, below = val\n # lword, rword, aword, bword = [], [], [], []\n hwords, vwords = [], []\n if left or (right and col + len(right) >= 14):\n hwords = self.check_board(key[0], key[1], \"H\")\n if above or (below and row + len(below) >= 14):\n vwords = self.check_board(key[0], key[1], \"V\")\n\n if any([hwords, vwords]):\n self.anchor_words[key] = [hwords, vwords]", "title": "" }, { "docid": "82a9b6b27bea60609a6d2675171f7b11", "score": "0.5202885", "text": "def bonus(word: str) -> List[str]:\n char_count = len(word) - 1\n search_section = WORDS[char_count]\n\n word_list = []\n\n seen = []\n\n for i in range(char_count):\n pfx = list(word)\n pfx[-1] = '' # neutralize last character\n pfx[i] = '' # neutralize the indexed character\n pfx = ''.join(pfx)\n if pfx not in seen:\n word_list += search_section.get(pfx, [])\n seen.append(pfx)\n\n results = [word2 for word2 in word_list if funnel(word, word2)]\n\n return results", "title": "" }, { "docid": "e831a71193e686cc294c5ef91d69c6ef", "score": "0.52015996", "text": "def find_anagrams(key, words):\n anagrams = []\n key = sorted(key.lower())\n for word in words:\n if key == sorted(word.lower()):\n anagrams.append(word)\n\n return anagrams", "title": "" }, { "docid": "c45c3969693cdc55dd90af570fc01960", "score": "0.5199592", "text": "def get_words(fff, letters):\n\n words = []\n dict = open(fff, 'r', encoding='utf-8')\n content = dict.read()\n words = content.split(\"\\n\")\n dict.close()\n # for i in range(20):\n # print(words[i])\n words_better = []\n # mark = 0\n # print(len(words))\n for i in range(len(words)-1):\n sigh = 0\n mark = 0\n for kill in range(len(words[i])):\n if mark == 32:\n sigh = kill\n break\n mark = ord(words[i][kill])\n if sigh < 7 and words[i][0] in letters:\n words_better.append(words[i])\n lett_word = []\n keys = []\n for i in range(len(words_better)):\n for kill in range(len(words_better[i])):\n mark = 0\n sigh\n for kill in range(len(words_better[i])):\n if mark == 32:\n break\n mark = ord(words_better[i][kill])\n lepta = words_better[i][:kill-1]\n lett_word.append(lepta)\n keys.append(words_better[i][kill:])\n for i in range(len(keys)):\n if \"/n\" in keys[i] or \"noun\" in keys[i]:\n keys[i] = \"noun\"\n elif \"/v\" in keys[i] or \"verb\" in keys[i]:\n keys[i] = \"verb\"\n elif \"adj\" in keys[i] or \"/adj\" in keys[i]:\n keys[i] = \"adjective\"\n elif \"adv\" in keys[i] or \"/adv\" in keys[i] and \"/v\" not in keys[i] and \"/adj\" not in keys[i]:\n keys[i] = \"adverb\"\n result = tuple(zip(lett_word, keys))\n final = []\n for i in range(len(result)):\n if \"noun\" in result[i] or \"adjective\" in result[i] or \"verb\" in result[i] or \"adverb\" in result[i]:\n final.append(result[i])\n return final, len(final)", "title": "" }, { "docid": "784fee70fd7e11049b7714a4fee64384", "score": "0.51947635", "text": "def anagrams_of(string: str):\n if len(string) == 1:\n # if the string is a single character, return a list containing that\n # single character.\n return [string[0]]\n # instantiate an empty placeholder\n collection = []\n # Get all the anagrams of the rest of the string, besides the first\n # character, until the end of the string.\n substring_anagrams = anagrams_of(string[1:])\n\n for substring_anagram in substring_anagrams:\n # now, while looping through this new list of sub-anagrams,\n for index in range(len(substring_anagram)+1):\n # loop through each character in this anagram, insert the first\n # character into a place dictated by the index of the sub-anagrams\n anagram = substring_anagram[:index] + \\\n string[0] + substring_anagram[index:]\n # append to our collection\n collection.append(anagram)\n # return the result\n return collection", "title": "" }, { "docid": "356dd960ed66df809ee822abd4bfde72", "score": "0.5183492", "text": "def make_alphabetic(hits, processname, sortnames=False, lang=\"sv\"):\n def fix_lastname(name):\n vonaf_pattern = re.compile(r\"^(%s) \" % \"|\".join(VONAV_LIST))\n name = re.sub(vonaf_pattern, r\"\", name)\n return name.replace(\" \", \"z\")\n\n results = []\n for hit in hits:\n processname(hit, results)\n\n letter_results = {}\n # Split the result into start letters\n for first_letter, result in results:\n if first_letter == \"ร˜\":\n first_letter = \"ร–\"\n if first_letter == \"ร†\":\n first_letter = \"ร„\"\n if first_letter == \"รœ\":\n first_letter = \"Y\"\n if lang == \"en\" and first_letter == \"ร–\":\n first_letter = \"O\"\n if lang == \"en\" and first_letter in \"ร„ร…\":\n first_letter = \"A\"\n if first_letter not in letter_results:\n letter_results[first_letter] = [result]\n else:\n letter_results[first_letter].append(result)\n\n # Sort result dictionary alphabetically into list\n if lang == \"en\":\n collator = icu.Collator.createInstance(icu.Locale(\"en_EN.UTF-8\"))\n else:\n collator = icu.Collator.createInstance(icu.Locale(\"sv_SE.UTF-8\"))\n for _n, items in list(letter_results.items()):\n if sortnames:\n items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + \" \" + x[1]))\n else:\n items.sort(key=lambda x: collator.getSortKey(x[0]))\n\n letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0]))\n return letter_results", "title": "" }, { "docid": "cbd0d847cdb9b03fe057c95bce02b43a", "score": "0.5183092", "text": "def autoHelper(strPattern,guessed, wordList, alphPattern = 'abcdefghijklmnopqrstuvwxyz'):\n guessList = list(guessed)\n alphPattern = guessReplace(guessList, alphPattern)\n alphPattern = \"[\" + alphPattern + \"]\"\n repPattern = strPattern.replace(\"_\",alphPattern)\n repPattern = repPattern + \"$\"\n suggestionWords = suggestionMaker(repPattern,wordList)\n return suggestionWords", "title": "" }, { "docid": "a35e1517bbcc81ca2c3107f1fcf5c5b8", "score": "0.51829505", "text": "def coding_strand_to_AA(dna):\n dna_codons = re.findall('...',dna)\n codon_dict = {}\n i= 0\n for i in range(len(aa)):\n for w in codons[i]:\n codon_dict[w] = aa[i]\n i += 1\n #print codon_dict\n #j = 0\n AA =''\n for w in dna_codons:\n AA = AA+codon_dict[w]\n return AA", "title": "" }, { "docid": "a91f2e5bcd14333623fb6ad8d27b2608", "score": "0.5181118", "text": "def pos_init(seq, aa):\n pos_list = []\n for a in aa:\n for i in range(count_len(seq) + 10):\n temp = a + str(i)\n pos_list.append(temp)\n return pos_list", "title": "" }, { "docid": "cc6a7b95cd15864ffcd8096995b09e3a", "score": "0.51759636", "text": "def conjunctions(s):\r\n cleaned = clean_text(s)\r\n listwords = cleaned.split()\r\n conjunction = []\r\n for i in listwords:\r\n if(i == 'and'):\r\n conjunction += [i]\r\n elif(i == 'for'):\r\n conjunction += [i]\r\n elif(i == 'but'):\r\n conjunction += [i]\r\n elif(i == 'yet'):\r\n conjunction += [i]\r\n elif(i == 'so'):\r\n conjunction += [i]\r\n elif(i == 'or'):\r\n conjunction += [i]\r\n elif(i == 'nor'):\r\n conjunction += [i]\r\n return conjunction", "title": "" }, { "docid": "28ec14c852f2fdf31792277c5dcebb4c", "score": "0.5175523", "text": "def preprocess(sentence):\n tokens=nltk.word_tokenize(sentence)\n word_list=[]\n for token in tokens:\n for letter in token:\n if letter.isalpha(): \n word_list.append(token.lower())\n break\n return word_list", "title": "" }, { "docid": "d9e797c4491c5be4dff1ebeb3d7772b7", "score": "0.51734036", "text": "def translate(word):\r\n list_voyelle = [\"a\",\"e\",\"i\",\"o\",\"u\",\"y\"]\r\n if isinstance(word,str) and word:\r\n if word[0] in list_voyelle:\r\n return word + \"way\"\r\n else :\r\n result= deque()\r\n result.appendleft(\"ay\")\r\n index_first_vowel = 0\r\n list_consonant = [] #list of consonant to move before the ay\r\n for w in word:\r\n if w not in list_voyelle:\r\n list_consonant.append(w)\r\n index_first_vowel += 1\r\n else:\r\n break\r\n result.appendleft(''.join(list_consonant))\r\n result.appendleft(word[index_first_vowel:])\r\n return ''.join(d)\r\n return ''", "title": "" }, { "docid": "7a607f40764e30c1873fe3aa8408c50f", "score": "0.5172157", "text": "def anchor_generator() -> 'Iterable[str]':\n\n letters = \"abcdefghijklmnopqrstuvwxyz\"\n for letter in letters:\n yield letter\n\n for letter1 in letters:\n for letter2 in letters:\n if letter1 != letter2:\n yield f\"{letter1}{letter2}\"", "title": "" }, { "docid": "e16f9f65ea9aa3ceb93ade6844087102", "score": "0.5170915", "text": "def pet_filter(letter=\"a\"):\n # fmt: off\n pets = [\n \"dog\", \"goat\",\"pig\",\"sheep\",\"cattle\",\"zebu\",\"cat\",\"chicken\",\n \"guinea pig\",\"donkey\",\"duck\",\"water buffalo\",\"western honey bee\",\n \"dromedary camel\",\"horse\",\"silkmoth\",\"pigeon\",\"goose\",\"yak\",\n \"bactrian camel\",\"llama\",\"alpaca\",\"guineafowl\",\"ferret\",\n \"muscovy duck\",\"barbary dove\",\"bali cattle\",\"gayal\",\"turkey\",\n \"goldfish\",\"rabbit\",\"koi\",\"canary\",\"society finch\",\"fancy mouse\",\n \"siamese fighting fish\",\"fancy rat and lab rat\",\"mink\",\"red fox\",\n \"hedgehog\",\"guppy\",]\n # fmt: on\n filtered = []\n\n for i in range(len(pets)):\n if letter in pets[i]:\n filtered.append(pets[i])\n \n return filtered", "title": "" }, { "docid": "c9a49e30738106bddb4b15527baa76d2", "score": "0.5166652", "text": "def expandAcronym(acronymDict, tokens, POStags):\n newTweet = []\n newToken = []\n count = 0\n for i in range(len(tokens)):\n # word = tokens[i].lower().strip(specialChar)\n word = tokens[i].lower()\n if word:\n if word in acronymDict:\n count += 1\n newTweet += acronymDict[word][0]\n newToken += acronymDict[word][1]\n\n else:\n newTweet += [tokens[i]]\n newToken += [POStags[i]]\n return newToken,newTweet, count\n # return tokens, POStags, count", "title": "" }, { "docid": "b209a22f29f15c927a05c73d961f88cd", "score": "0.5160745", "text": "def get_alphabet(lexicon: List[List[str]]) -> List[str]:\r\n return topological_sort(parse_lexicon(lexicon))", "title": "" }, { "docid": "8de2dc60c9bf4c0ab0733d3e188e588e", "score": "0.515897", "text": "def to_letter_rep(self, x):\n if self.windowSize > len(x):\n print \"ERROR: input length too short comparing to the windowSize of \", self.windowSize\n return\n all_saxString = []\n pointers = []\n for i in range(len(x) - (self.windowSize-1)):\n sub_section = x[i:i + self.windowSize]\n #print i\n paaX = self.to_PAA(self.normalize(sub_section))\n #print \"PAA:\",paaX\n saxString = self.alphabetize(paaX)\n all_saxString.append(saxString)\n pointers.append(i)\n return all_saxString, pointers", "title": "" }, { "docid": "3cde0a93a19b42ce188fa353770c11fa", "score": "0.5143301", "text": "def ipa_list(self, words_in, stress_marks='place'):\n if type(words_in) == str:\n words = [preserve_punc(w.lower())[0] for w in words_in.split()]\n else:\n words = [preserve_punc(w.lower())[0] for w in words_in]\n cmu = self.get_cmu([w[1] for w in words])\n ipa = cmu_to_ipa(cmu, stress_marking=stress_marks)\n #if keep_punct:\n #ipa = _punct_replace_word(words, ipa)\n return ipa", "title": "" }, { "docid": "ef5ff9f950b5e2a77db51627e0c1cb4d", "score": "0.51346886", "text": "def compile_word(word):\n # Your code here.\n index = 1\n answer = ''\n split_reversed_word = ''.join(reversed(word))\n finding_letters = re.findall('[A-Z]', word)\n if len(word) != len(finding_letters):\n return str(word)\n\n for letter in split_reversed_word:\n multiple = str(10**(abs(index) - 1))\n answer = answer + \"+\" + letter + '*' + multiple\n index = index + 1\n if re.search('[+]', answer):\n answer = answer[1:]\n return str(answer)", "title": "" }, { "docid": "56fd01eeaee7f18a516e20148c3674aa", "score": "0.51302266", "text": "def words_one_ed(str1, input_set=string.ascii_letters):\n \"\"\" words_one_ed(\"abc\", \"xy\") == ['xabc', 'yabc', 'axbc', 'aybc', 'abxc', 'abyc', 'abcx', 'abcy', 'bc', 'ac', 'ab', 'xbc', 'ybc', 'axc', 'ayc', 'abx', 'aby']\"\"\"\n \"\"\" words_one_ed(\"\", \"xy\") == ['x', 'y'] \"\"\"\n \"\"\" words_one_ed(\"\", \"\") == [] \"\"\"\n\n # error checking\n if input_set == None:\n raise ValueError(\"argument input_set is invalid\")\n\n out = []\n\n n = len(str1)\n \n #additions\n for i in range(n+1):\n out.extend(add_char(str1, i, input_set)) # using extend because add_char returns a list\n\n #deletions\n for i in range(n):\n out.append(remove_char(str1, i)) # append because remove_char return a word\n\n #replacements\n for i in range(n):\n out.extend(replace_char(str1, i, input_set)) # using extend because replace_char returns a list\n\n \n return out", "title": "" }, { "docid": "ce41da55edadde8c6f8d27cf72eef381", "score": "0.5119761", "text": "def word(characters: Iterable[Any], true=1) -> str:\n #todo make this take 2D arrays\n characters = [item for item in characters if item != '\\n']\n rows = _chunks(characters, len(characters)//6)\n blocks = (_chunks(row, 5) for row in rows)\n answer = (letter(char, true) for char in zip(*blocks))\n return ''.join(answer)", "title": "" }, { "docid": "d418eb6feb6befd3c08779e155ac0565", "score": "0.5117618", "text": "def _find_words_by_adding(self, input_word):\n adder = lambda word, exp, i: word[:i] + exp + word[i:]\n return self._find_words_by_regex_wildcard_match(input_word, adder)", "title": "" }, { "docid": "2c82e33e50918beadc36f69cb40136fd", "score": "0.5108764", "text": "def _make_letters(self):\n for letter, (score, amount) in LETTERS.items():\n for _ in range(amount):\n letter_sprite = Letter(letter, score, scale=SCALE)\n self.pouch.append(letter_sprite)", "title": "" }, { "docid": "6516cea5bd8e874301925f6959f7dd38", "score": "0.51054734", "text": "def ipa_list_from_outside(cmu_in, stress_marks='place'):\n if type(cmu_in) == str:\n words = [[cmu_in.lower()]]\n else:\n for word in cmu_in:\n words.append(word.lower())\n ipa = cmu_to_ipa(words, stress_marking=stress_marks)\n return ipa", "title": "" }, { "docid": "0ac67cd2e5cf63b98848bcdf1a2aef9e", "score": "0.5103717", "text": "def sequentialWord(word):\r\n wlist = [ord(l) for l in list(word)]\r\n wlist[-1] += 1\r\n inc = False\r\n for i in range(1, len(wlist) + 1):\r\n if inc:\r\n inc = False\r\n wlist[-i] += 1\r\n if wlist[-i] > L_Z:\r\n inc = True\r\n wlist[-i] -= ALPHA_SIZE\r\n rword = \"\".join([chr(l) for l in wlist])\r\n if inc:\r\n rword = \"A\" + rword\r\n return rword", "title": "" }, { "docid": "b89998e0a2e5b858dbf3e19629792460", "score": "0.5103682", "text": "def add_sentence(self, sentence):\n #I think this is where I'll make the \"tier\" system\n #so maybe I should make a dictionary which holds a list of strings, those strings each have\n #their own key - list bit, and that way we start getting a tier system\n\n #the first guy we want to add to the Fstart list, then using that word as a temp variable, we want\n #to look if there's a key with that word (if not then create) and if there is, add the next word to the list\n #then repeat\n\n givenList = sentence.split(\" \")\n #print(newList) to see the chains made\n #this gives us ['a', 'b', 'c', 'd', '.']\n newList = []\n for i in (givenList):\n i.lower()\n while (len(givenList) >= self.n):\n newList.append(givenList[0:self.n])\n givenList.pop(0)\n #this makes it so n is good once again\n #I can use pop to get the first index and then have the rest of the array\n #so make a method that takes in this givenList and demolishes it by putting it in a dictionary\n\n self.add_sentence_helper(newList)\n pass", "title": "" }, { "docid": "07d72bfc196e210e49b71875ac7bf29e", "score": "0.5101687", "text": "def concat_start(text):\n starts = [\"It sounds like \", \"I understand, seems like \", \"I get a sense that \", \"It seems like \", \"I see, so \"]\n return random.choice(starts) + text", "title": "" }, { "docid": "5efdc1e87d334a301228b6b715597f8d", "score": "0.5092662", "text": "def suggest_word(words: list, bias_non_repeats: bool = False) -> string:\n words_list = order_words(words)\n if bias_non_repeats:\n try:\n while letter_repeated(suggestion := words_list.pop(0)[0]):\n print(f\"Skipping Repeated Letters {suggestion}\")\n except IndexError:\n print(f\"End of list with {suggestion}\")\n return suggestion\n return words_list.pop(0)[0]", "title": "" }, { "docid": "6b21511cb2dcb942692c112ebd03d8ab", "score": "0.5090459", "text": "def buildCategoryKeywords(categoryname):\n catKeywords = []\n catKeywords2 =[]\n \n catKeywords = re.findall('[A-Z]+[^A-Z ]*', categoryname)\n\n for word in catKeywords:\n noSpaceWord = word.replace(\" \", \"\")\n catKeywords2.append(noSpaceWord)\n \n return catKeywords2", "title": "" }, { "docid": "f4135767f35fd5e0cc3853366f74c71f", "score": "0.5074687", "text": "def suggestionMaker(repPattern, wordList):\n if not wordList:\n return []\n else:\n if re.match(repPattern, wordList[0]) is None:\n return suggestionMaker(repPattern, wordList[1:])\n else:\n return [wordList[0]] + suggestionMaker(repPattern, wordList[1:])", "title": "" }, { "docid": "d8c5f5a386e1de11f42308057326f147", "score": "0.506348", "text": "def get_available_letters(guessed_letters):\n available_letters = \"\"\n for letter in string.lowercase:\n if letter not in guessed_letters:\n available_letters += letter\n return available_letters", "title": "" }, { "docid": "eefa39f9051c6eb6a43f52041a7ab0b1", "score": "0.50634325", "text": "def generate_prefix_list(self, name, pl):\n me = \" seq %d permit %s\"\n mne = \" seq %d permit %s le %d\"\n r = [\"no ip prefix-list %s\" % name]\n r += [\"ip prefix-list %s\" % name]\n seq = 5\n for prefix, min_len, max_len in pl:\n if min_len == max_len:\n r += [me % (seq, prefix)]\n else:\n r += [mne % (seq, prefix, max_len)]\n seq += 5\n r += [\" exit\"]\n return \"\\n\".join(r)", "title": "" }, { "docid": "84eff0141e8fdb797a0e10d4f6be0c5b", "score": "0.50634307", "text": "def setAlpabet(self, listAlpha):\n setAlpha = it.chain.from_iterable(it.combinations(listAlpha, n) for n in range(len(listAlpha)+1))\n for i in setAlpha:\n stringAlpha = \"{\"\n counter = len(i)\n test = 1\n for j in i:\n if counter == test:\n stringAlpha = stringAlpha + j\n continue\n stringAlpha = stringAlpha + j + \", \"\n test += 1\n stringAlpha = stringAlpha + \"}\"\n self.alphabet.append(stringAlpha)", "title": "" }, { "docid": "a8528fc663aad341a101116b8dcd0bab", "score": "0.50566375", "text": "def get_compound_words():\n compound_words = [mixed_words[i] for i in range(len(mixed_words)) if mixed_words[i] not in english_words]\n print(compound_words)", "title": "" }, { "docid": "cbd51e280e0870b3e6604727628f1d88", "score": "0.50561506", "text": "def produce(s): \n lowers =list(ascii_lowercase)\n l=[]\n for i in range(1000):\n ll=[]\n for x in range(len(s)):\n if randint(1,10) < 2 :\n ll.append(s[x])\n else:\n ll.append(choice(lowers ))\n l.append(''.join(ll))\n return l", "title": "" }, { "docid": "e2e329d11a2e1226f752d1fb2b56378f", "score": "0.5054693", "text": "def unscrabble_words(letters, board_letter, length=6):\n puzzle_letters = nltk.FreqDist(letters)\n letter = board_letter\n wordlist = nltk.corpus.words.words()\n return [word for word in wordlist\n if len(word) >= length\n and letter in word\n and nltk.FreqDist(word) <= puzzle_letters]", "title": "" }, { "docid": "d06f463e68cd53c818896916381e4721", "score": "0.5054659", "text": "def find_anagrams(word: str, candidates: list[str]) -> list[str]:\n return [\n candidate for candidate in candidates if is_anagram(word, candidate)\n ]", "title": "" }, { "docid": "d331281e8fcaae068dddfb7820b2bed3", "score": "0.50487", "text": "def dene(wordlist): \n\n lis=[]\n first=wordlist[0]\n lis.append(first)\n for i in wordlist:\n if not i in lis:\n lis.append(i)\n else:\n pass\n return lis\n\n\n doctest.testmod()", "title": "" }, { "docid": "a8c15b83be673147d29272e797a87c01", "score": "0.5045994", "text": "def alphabetize(self,paaX):\n alphabetizedX = np.zeros(len(paaX))\n for i in range(len(paaX)):\n alphabetizedX[i] = np.sum(self.beta<=paaX[i])+1\n \n return alphabetizedX", "title": "" }, { "docid": "9e4b05ec35202c504b0e47a160b2515b", "score": "0.50431824", "text": "def convert_to_word_mash(list_of_songs):\n all_words = []\n for i in range(len(list_of_songs)): #goes through the list of songs and gets the lyrics for each song\n lyric = all_songs[list_of_songs[i]]\n song_words = remove_repeats(re.findall(\"[a-zA-Z']+\",lyric))\n all_words +=song_words \n return all_words", "title": "" }, { "docid": "05c6e72fed5dfceb22bea6e87a516dee", "score": "0.50408363", "text": "def add_new_letter(self):\n new_letter=self._new_letter()\n self._positive.append(new_letter[0])\n self._negative.append(new_letter[1])\n self._inverse[new_letter[0]]=new_letter[1]\n self._inverse[new_letter[1]]=new_letter[0]\n return new_letter", "title": "" }, { "docid": "10139a03854db4a00ab657bc1d736a16", "score": "0.5039186", "text": "def preprocessing(sample):\n List, length = [], len(sample)\n \n for i in range(length):\n string = re.sub(r'[^a-zA-Z]', \"\", sample[i])\n\n if string:\n List.append(string)\n return List", "title": "" }, { "docid": "32822169b6f92cbfeb9d71250b51d8db", "score": "0.50382125", "text": "def nounPhrase():\n return random.choice(articles) + \" \" + random.choice(nouns)", "title": "" } ]
6bd4008d04365eca02543aa406daab14
Get or set str value for 'defaultVRDEExtPack' The name of the extension pack providing the default VRDE. This attribute is for choosing between multiple extension packs providing VRDE. If only one is installed, it will automatically be the default one. The attribute value can be empty if no VRDE extension pack is installed. For details about VirtualBox Remote Desktop Extension and how to implement one, please refer to the VirtualBox SDK.
[ { "docid": "9eb2b1cd771ed8dc51fd8f17732ada53", "score": "0.8275803", "text": "def default_vrde_ext_pack(self):\n ret = self._get_attr(\"defaultVRDEExtPack\")\n return ret", "title": "" } ]
[ { "docid": "003848579f7abc97ed2c7d5cde483606", "score": "0.6818636", "text": "def vrde_ext_pack(self):\n ret = self._get_attr(\"VRDEExtPack\")\n return ret", "title": "" }, { "docid": "18bc3517be47d423160a4da84adfadec", "score": "0.5376459", "text": "def part_component_default():\n\n return InvenTreeSetting.get_setting('PART_COMPONENT')", "title": "" }, { "docid": "eb43d73e5683dcf1f7b2eb6fb2a094fe", "score": "0.5016072", "text": "def _get_default_extension():\n return [\n \"cln\",\n \"clncb\",\n \"clnhd\",\n \"clndd\",\n \"clnib\",\n \"clncn\",\n \"clnmb\",\n ]", "title": "" }, { "docid": "cc21a84f0fadf3f7c27f5e12759801d0", "score": "0.4999456", "text": "def default() -> str:\n return config.get(\"garden.name\")", "title": "" }, { "docid": "6ec7633b629a695b1d5b6bd376efe1cb", "score": "0.48878378", "text": "def entity_registry_enabled_default(self):\n return STICK_API[self._api_type][ATTR_ENABLED_DEFAULT]", "title": "" }, { "docid": "0e20f75e560dc8929c4d53c3f97904fd", "score": "0.4806914", "text": "def shell32_SHAddDefaultPropertiesByExt(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pszExt\", \"pPropStore\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "title": "" }, { "docid": "6587f9f8f3c061a810551b474c782c51", "score": "0.4740945", "text": "def get_packager():\n common_packager = get_rc('common_packager')\n if common_packager:\n return common_packager\n #TODO:cache result by current env.host_string so we can handle multiple hosts with different OSes\n with settings(warn_only=True) as a, hide('running', 'stdout', 'stderr', 'warnings') as b:\n ret = _run('cat /etc/fedora-release')\n if ret.succeeded:\n common_packager = YUM\n else:\n ret = _run('cat /etc/lsb-release')\n if ret.succeeded:\n common_packager = APT\n else:\n for pn in PACKAGERS:\n ret = run_or_dryrun('which %s' % pn)\n if ret.succeeded:\n common_packager = pn\n break\n if not common_packager:\n raise Exception('Unable to determine packager.')\n set_rc('common_packager', common_packager)\n return common_packager", "title": "" }, { "docid": "d356f9cd60255073411650607ca6f4b5", "score": "0.47355983", "text": "def get_extension(name, default=0):\n # Decipher name and extension from name[EXT]\n search = re.search(r'(.*)\\[(.*)\\]', name)\n if search:\n bname, ext = search.groups()\n else:\n bname, ext = name, default\n\n try:\n ext = int(ext) # ext is an integer\n except ValueError:\n ext = ext.upper() # ext is a string\n\n return bname, ext", "title": "" }, { "docid": "255dde06e25349f0faaf2e5783663a86", "score": "0.4727744", "text": "def default_version(self) -> str:\n return pulumi.get(self, \"default_version\")", "title": "" }, { "docid": "ebfde7510d841cd7c2a5cb87a9329ea9", "score": "0.4722298", "text": "def extn(self):\n\t\tif self.lang:\n\t\t\treturn self.LANG_EXTS[self.lang]\n\n\t\treturn None", "title": "" }, { "docid": "af894fcecabc6847b072ce745357870d", "score": "0.4696472", "text": "def _get_hw_version(self) -> str:\n return self._quantsim_configs[ConfigDictKeys.DEFAULTS].get(ConfigDictKeys.HW_VERSION, 'default')", "title": "" }, { "docid": "61f16e21996314b0abcecb6d27525d65", "score": "0.468331", "text": "def get_default_version_hostname():\n\n\n\n\n\n return os.getenv('DEFAULT_VERSION_HOSTNAME')", "title": "" }, { "docid": "7aa5791f836330472b526a9a45888cbc", "score": "0.46653485", "text": "def _get_config_default_value(elem):\n if elem == \"base_paths\":\n em.showError(\"base_paths value needed\", let_copy=False)\n elif elem == \"ignored_folders\":\n return []\n elif elem == \"out_path\":\n return f\"{os.getcwd()}{os.sep}stored_data\"\n elif elem == \"file_extensions\":\n return [\"txt\", \"pdf\", \"docx\", \"doc\", \"csv\"]\n elif elem == \"size_threshold_in_MB\":\n return 50", "title": "" }, { "docid": "bdc8da788c2f6fb3e350d253d6a51e6f", "score": "0.46579105", "text": "def installed_version(self) -> str | None:\n return self._addon_data[ATTR_VERSION]", "title": "" }, { "docid": "cfcf7463b88eb8dc6d0f75cf4e766888", "score": "0.46538925", "text": "def default_frontend(self):\n ret = self._get_attr(\"defaultFrontend\")\n return ret", "title": "" }, { "docid": "cfcf7463b88eb8dc6d0f75cf4e766888", "score": "0.46538925", "text": "def default_frontend(self):\n ret = self._get_attr(\"defaultFrontend\")\n return ret", "title": "" }, { "docid": "fe7e534d4af948d4faf92484b2ab52af", "score": "0.46388832", "text": "def get_version(self):\r\n base = super(uss_port_comfort_ext, self).get_version()\r\n extend = \"Ext. Rev. %d\" % self._revision\r\n return \"%s %s\" % (base, extend)", "title": "" }, { "docid": "6c5fb2f328fd327e3c86401818db9742", "score": "0.46310738", "text": "def extension(self) -> Optional[str]:\n return pulumi.get(self, \"extension\")", "title": "" }, { "docid": "b549b4a9267e9cc113e8542faca1029e", "score": "0.46105722", "text": "def getExtensionString(self):\r\n pmceString = self.EXTENSION_NAME\r\n if self.acceptNoContextTakeover:\r\n pmceString += \"; client_no_context_takeover\"\r\n if self.acceptMaxWindowBits:\r\n pmceString += \"; client_max_window_bits\"\r\n if self.requestNoContextTakeover:\r\n pmceString += \"; server_no_context_takeover\"\r\n if self.requestMaxWindowBits != 0:\r\n pmceString += \"; server_max_window_bits=%d\" % self.requestMaxWindowBits\r\n return pmceString", "title": "" }, { "docid": "a98efd9eb247039b9b4dc35f267aee93", "score": "0.46057513", "text": "def installed_version(self) -> str:\n return self.robot.firmware", "title": "" }, { "docid": "95ed87e5efbf0d882059ddb10a740199", "score": "0.45730048", "text": "def get_packager(self):\n return self.os.packager", "title": "" }, { "docid": "1ca82067c67fea535093ef327125137b", "score": "0.4554257", "text": "def get_platform_default(self, descriptor):\r\n return platformcfg.get(descriptor)", "title": "" }, { "docid": "165d6ad0730d031446452564ce7a36e3", "score": "0.4526275", "text": "def GetEnvarDefaults(var):\n var = str(var)\n val = os.getenv(var)\n if not val:\n val = ENVAR_DEFAULTS.get(var)\n if val is not None:\n val = os.path.expandvars(val)\n return val", "title": "" }, { "docid": "85fbea45f1e39574a1b26249a7693a03", "score": "0.45257086", "text": "def getExtensionString(self):\r\n pmceString = self.EXTENSION_NAME\r\n if self.offer.requestNoContextTakeover:\r\n pmceString += \"; server_no_context_takeover\"\r\n if self.offer.requestMaxWindowBits != 0:\r\n pmceString += \"; server_max_window_bits=%d\" % self.offer.requestMaxWindowBits\r\n if self.requestNoContextTakeover:\r\n pmceString += \"; client_no_context_takeover\"\r\n if self.requestMaxWindowBits != 0:\r\n pmceString += \"; client_max_window_bits=%d\" % self.requestMaxWindowBits\r\n return pmceString", "title": "" }, { "docid": "0f53b3b68236d68044afbffd35e56b90", "score": "0.44952396", "text": "def part_purchaseable_default():\n\n return InvenTreeSetting.get_setting('PART_PURCHASEABLE')", "title": "" }, { "docid": "b38c606f84ed7166228bab0e60cdcf89", "score": "0.44872555", "text": "def get_extension(packaging):\n\n extensions = set([\n 'ejb3',\n 'ear',\n 'aar',\n 'apk',\n 'gem',\n 'jar',\n 'nar',\n 'pom',\n 'so',\n 'swc',\n 'tar',\n 'tar.gz',\n 'war',\n 'xar',\n 'zip'\n ])\n\n if packaging in extensions:\n return packaging\n else:\n return 'jar'", "title": "" }, { "docid": "7c20c242d49afba28383e93ed82b8f5d", "score": "0.44521064", "text": "def binary_dv(self):\n return '{}{}'.format(self.path, self._vstr(consider_default_ver=True))", "title": "" }, { "docid": "5ad5e2b4fbf967104cd0a12a0e3b22c8", "score": "0.4443943", "text": "def get_default(cls):\n return cls(\"Unknown\", \"Unknown\", str(MimeType.get_default()))", "title": "" }, { "docid": "8e0e3cb023cc143bb45556a9758a3782", "score": "0.44284514", "text": "def vrde_module(self):\n ret = self._get_attr(\"VRDEModule\")\n return ret", "title": "" }, { "docid": "6a6b5af49c8d87d3dfd4acadd37af8f9", "score": "0.44211268", "text": "def pack_name(self):\n return os.path.splitext(self.current_pack)[0]", "title": "" }, { "docid": "c0619af7d0cf3528667162baa6e7974c", "score": "0.44193092", "text": "def min_version(self):\n try:\n mv = str(self._min_version)\n except AttributeError:\n mv = 'any'\n return mv", "title": "" }, { "docid": "2d2b196622eb0394174359ac4ca5ee58", "score": "0.44095", "text": "def loadExtensionSetting(self, name):\n # type: (str) -> Union[str,None]", "title": "" }, { "docid": "039f3d962bb14f2ecffb49b441d31a32", "score": "0.4407809", "text": "def value_or_global_default(self, python_setup: PythonSetup) -> Tuple[str, ...]:\n return python_setup.compatibility_or_constraints(self.value)", "title": "" }, { "docid": "039f3d962bb14f2ecffb49b441d31a32", "score": "0.4407809", "text": "def value_or_global_default(self, python_setup: PythonSetup) -> Tuple[str, ...]:\n return python_setup.compatibility_or_constraints(self.value)", "title": "" }, { "docid": "dcd607e9f15eec9e297db7f96151798f", "score": "0.44073936", "text": "def minimum_engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum_engine_version\")", "title": "" }, { "docid": "fb5e0b33fb9699f647ee4e5b1c4f43a3", "score": "0.4396496", "text": "def minimum_engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"minimum_engine_version\")", "title": "" }, { "docid": "8da49c0a2ce2c9901cad4652b572c82c", "score": "0.4386421", "text": "def recommended_virt_ex(self):\n ret = self._get_attr(\"recommendedVirtEx\")\n return ret", "title": "" }, { "docid": "5f93d114d104a8ad1501cfc6702ab6ce", "score": "0.43824837", "text": "def get(self, name: str, default=None):\n try:\n return self._local_env.__getattribute_(name)\n except Exception:\n return default", "title": "" }, { "docid": "886adb520eda98c6707724c77ec36a2e", "score": "0.43764082", "text": "def product_name(self) -> Optional[str]:\n return self.descriptor.product_name", "title": "" }, { "docid": "95245bad00c88f63bb99fb19b182d037", "score": "0.4375977", "text": "def extType(self):\r\n return ExtensionType.server_name", "title": "" }, { "docid": "5b8aa0f07b5f2f40582461e6091fd2b4", "score": "0.43613386", "text": "def get_value(self, default: Optional[str] = None) -> Optional[str]:\n return self._storage.get(self.key, default)", "title": "" }, { "docid": "a1143a6dfdf4bbe8f62897aee2446ddb", "score": "0.43558732", "text": "def get_extension (self):\n ext = self.filetype.split ('.', 1)[0]\n if ext == 'kindle':\n ext = 'mobi'\n return ext", "title": "" }, { "docid": "df8cc41530de42a3488d349e9fb54126", "score": "0.43500814", "text": "def _get_ext_community_set_name(self):\n return self.__ext_community_set_name", "title": "" }, { "docid": "c9377cdb0482cb5b12ecc8235c70a050", "score": "0.43322602", "text": "def extType(self):\r\n\r\n return ExtensionType.srp", "title": "" }, { "docid": "ef35420009a798a4a0fb33557dd7299f", "score": "0.43299398", "text": "def set_v3ext(self, value):\n self._config['v3ext'] = value\n return self", "title": "" }, { "docid": "f9d9bf93073235ec14b358fa2c2e8841", "score": "0.4323169", "text": "def noto_tools(default=''):\n if not default:\n default = DEFAULT_ROOT\n return values.get('noto_tools', default)", "title": "" }, { "docid": "7294a6613286e1b56c5891e2d31b2391", "score": "0.4320971", "text": "def get_opt_val(obj_pyxb, attr_str, default_val=None):\n try:\n return get_req_val(getattr(obj_pyxb, attr_str))\n except (ValueError, AttributeError):\n return default_val", "title": "" }, { "docid": "3c367a8bf397b9d437679ee2d9ed28ae", "score": "0.4320187", "text": "def getDefault(self, content):\n return self.field.get_value('default', content=content)", "title": "" }, { "docid": "c08fd1634b3475fb8d094afc762e2464", "score": "0.43154708", "text": "def part_salable_default():\n\n return InvenTreeSetting.get_setting('PART_SALABLE')", "title": "" }, { "docid": "bf468f69c619acbd0faba22538191b0b", "score": "0.4314294", "text": "def ext(self) -> str:\n return self._ext", "title": "" }, { "docid": "70eec2a2510aa3ca0d85a588e372d0a3", "score": "0.43084416", "text": "def installed_version(self) -> str | None:\n if isinstance(self.api.versions, dict):\n return self.entity_description.installed_version(self.api.versions)\n return None", "title": "" }, { "docid": "c0f3c8980878e6f0d4cb018dd469e8eb", "score": "0.43076506", "text": "def LsbReleaseValue(self, key, default):\n lines = self.GetFileContents('/etc/lsb-release').split('\\n')\n for l in lines:\n m = re.match(r'([^=]*)=(.*)', l)\n if m and m.group(1) == key:\n return m.group(2)\n return default", "title": "" }, { "docid": "050d559af9852123b7bb6c2e424bb497", "score": "0.42991483", "text": "def get_or_create_default_storage_group(\n self, serial_number, srp, slo, workload, extra_specs,\n do_disable_compression=False, is_re=False, rep_mode=None):\n storagegroup, storagegroup_name = (\n self.rest.get_vmax_default_storage_group(\n serial_number, srp, slo, workload, do_disable_compression,\n is_re, rep_mode))\n if storagegroup is None:\n self.provision.create_storage_group(\n serial_number, storagegroup_name, srp, slo, workload,\n extra_specs, do_disable_compression)\n else:\n # Check that SG is not part of a masking view\n LOG.info(\"Using existing default storage group\")\n masking_views = self.rest.get_masking_views_from_storage_group(\n serial_number, storagegroup_name)\n if masking_views:\n exception_message = (_(\n \"Default storage group %(sg_name)s is part of masking \"\n \"views %(mvs)s. Please remove it from all masking views\")\n % {'sg_name': storagegroup_name, 'mvs': masking_views})\n LOG.error(exception_message)\n raise exception.VolumeBackendAPIException(\n message=exception_message)\n # If qos exists, update storage group to reflect qos parameters\n if 'qos' in extra_specs:\n self.rest.update_storagegroup_qos(\n serial_number, storagegroup_name, extra_specs)\n\n return storagegroup_name", "title": "" }, { "docid": "f6f09f42a78c13f6afd78bb08a0aa291", "score": "0.42950386", "text": "def ext(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"ext\")", "title": "" }, { "docid": "05c3ba25858bbcc08a1b21c03ab751bd", "score": "0.42845514", "text": "def get_package(config):\n\n #split the device string with \"-\" and get the second value\n package = config[\"device\"].split(\"-\")[1]\n package = package.strip()\n return package", "title": "" }, { "docid": "bf04414b08e521f1f9660cf6399fe91b", "score": "0.4284026", "text": "def mediapackage(self) -> Optional[str]:\n return pulumi.get(self, \"mediapackage\")", "title": "" }, { "docid": "4f7251a11f8a751a777e1eb6776bcdc6", "score": "0.4282495", "text": "def recommended_firmware(self):\n ret = self._get_attr(\"recommendedFirmware\")\n return FirmwareType(ret)", "title": "" }, { "docid": "f510f8f07062e67c4f4bcb41803dd0f6", "score": "0.42763117", "text": "def getVersionToolkitString() -> \"char const *\":\n return _soqt.SoQt_getVersionToolkitString()", "title": "" }, { "docid": "ab42bcb8a354163616328f97b8b6a453", "score": "0.42755243", "text": "def ext(self) -> str:\n pass", "title": "" }, { "docid": "369cb8393b12ac1f0e9da879bc74b069", "score": "0.4267433", "text": "def getGenXML(self):\n\t\tgen_xml\t\t= self.getEnvironmentVariable( name='GEN_XML', default='NULL' )\n\t\treturn gen_xml", "title": "" }, { "docid": "4183f0d847c4cb25cb7542b544b17e4c", "score": "0.42558315", "text": "def _set_ext_community_set_name(self, v, load=False):\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"ext-community-set-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp-policy', defining_module='openconfig-bgp-policy', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ext_community_set_name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"ext-community-set-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp-policy', defining_module='openconfig-bgp-policy', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__ext_community_set_name = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "d00a0e36cf384bbf142d75cff5f8a2de", "score": "0.42479342", "text": "def get_package():\n pkg = vim.eval(\"s:ctx.swank_package\")\n if pkg == '':\n return 'nil'\n else:\n return requote(pkg)", "title": "" }, { "docid": "6a7cadd973d273d46b8a38e293702ea4", "score": "0.42373696", "text": "def firmware_version(self) -> Optional[str]:\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "4d030965021282263e5b16f5515240f7", "score": "0.42341885", "text": "def get_extension_name():\n\twith open('manifest.json', 'r', encoding='utf-8-sig') as file:\n\t\tmanifest = json.load(file)\n\n\tname = manifest['name'] if 'name' in manifest else DEFAULT_NAME\n\n\t# If the extension name is localized, grab the name from the default locale\n\tmatch = re.match(r'__MSG_(.+)__', name)\n\tif match:\n\t\tlocale = manifest['default_locale'] if 'default_locale' in manifest else 'en_US'\n\t\twith open(os.path.join('_locales', locale, 'messages.json'), 'r', encoding='utf-8-sig') as file:\n\t\t\tmessages = json.load(file)\n\n\t\tkey = match.group(1)\n\t\tname = messages[key]['message'] if key in messages else DEFAULT_NAME\n\n\treturn name", "title": "" }, { "docid": "e5563a057bd5a6fd77fda9781418d631", "score": "0.42310822", "text": "def pre_package_venv_disabled(configurator, question):\n default = get_mrbob_config_variable(\"package.venv.disabled\", home_path)\n if default and question:\n question.default = default", "title": "" }, { "docid": "2a32f8d80617211672cafa8670257352", "score": "0.4217432", "text": "def firmware_version(self) -> Optional[str]:\n raise NotImplementedError # pragma: nocover", "title": "" }, { "docid": "b76d5c0351673b343beb4e4422381c6b", "score": "0.42146048", "text": "def pkg_mgmt_driver(self):\n if not self.__pkg_mgmt_driver:\n self.__pkg_mgmt_driver = lm_drivers.EtsiPackageMgmtDriver(self.env.api_address, self.__get_lm_security_ctrl())\n return self.__pkg_mgmt_driver", "title": "" }, { "docid": "470c22ae5e0d45a52d787bbe253fc1d7", "score": "0.42055178", "text": "def get_attrib(self, entity: 'DXFEntity', key: str, default: Any = DXFValueError) -> 'TagValue':\n if self.xtype is XType.callback:\n return self.get_callback_value(entity)\n try: # No check if attribute is valid for DXF version of drawing, if it is there you get it\n return self._get_dxf_attrib(entity.tags)\n except DXFValueError:\n if default is DXFValueError:\n # no DXF default values if DXF version is incorrect\n if self.dxfversion > DXF12 and entity.drawing.dxfversion < self.dxfversion:\n msg = \"DXFAttrib '{0}' not supported by DXF version '{1}', requires at least DXF version '{2}'.\"\n raise DXFValueError(msg.format(key, entity.drawing.dxfversion, self.dxfversion))\n result = self.default # default value defined by DXF specs\n if result is not None:\n return result\n else:\n raise DXFValueError(\"DXFAttrib '%s' does not exist.\" % key)\n else:\n return default", "title": "" }, { "docid": "2710250b223f8e66740c1165fb93e6a2", "score": "0.42036238", "text": "def get_opt_attr(obj_pyxb, attr_str, default_val=None):\n v = getattr(obj_pyxb, attr_str, default_val)\n return v if v is not None else default_val", "title": "" }, { "docid": "9499bbf3818efae0ddb113e10d5c3155", "score": "0.42032725", "text": "def default_value(self) -> Optional[str]:\n return pulumi.get(self, \"default_value\")", "title": "" }, { "docid": "eefa2981b51ca58b2f10a7c4e9b2b51b", "score": "0.4199392", "text": "def get_default_option(cls) -> str:\n return cls.CYCLE_PARADE", "title": "" }, { "docid": "e234771b1a727101613545e5a26b5b6a", "score": "0.41908115", "text": "def _get_default_dev(self):\n routes = self._get_routes()\n for route in routes:\n if not route.startswith('default'):\n continue\n\n m = re.match(r'.+?\\bdev\\s+([^\\s]+?)\\b', route)\n if m is not None:\n return m.group(1)", "title": "" }, { "docid": "134fed625a55a332db5120d99febb43b", "score": "0.418747", "text": "def get_support_directory():\r\n from miro import app, prefs\r\n return app.config.get(prefs.SUPPORT_DIRECTORY)", "title": "" }, { "docid": "515577c45a8d8e87874b6c87eaecb858", "score": "0.41857684", "text": "def _get_string_tag(opt_bytes_value, default=None):\n if opt_bytes_value is None:\n return default\n try:\n return opt_bytes_value.decode()\n except UnicodeDecodeError:\n return opt_bytes_value.decode(encoding=sys.getdefaultencoding())", "title": "" }, { "docid": "944b1a2bdfe23e422edee00fcf003078", "score": "0.41857556", "text": "def _set_software_version(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"software-version\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"software_version must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"software-version\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__software_version = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "d19be2e1a132b0bb69bb382ddfa01976", "score": "0.41812828", "text": "def get_default_path(self):\n return self._ide.default_installed_path", "title": "" }, { "docid": "284b94b9cb8de2f4fdcecb61ba886bfa", "score": "0.41681948", "text": "def is_default_version(self) -> Optional[bool]:\n return pulumi.get(self, \"is_default_version\")", "title": "" }, { "docid": "a4f02d4cac304a7a00164dcb3e6e87f1", "score": "0.41680667", "text": "def devname(self):\n if self.__name is None:\n idn = self.qIDN().upper()\n if 'PI-E816' in idn:\n self.__name = 'E-816'\n elif 'DIGITAL PIEZO CONTROLLER' in idn:\n self.__name = 'E-710'\n else:\n self.__name = idn.split(',')[1].strip()\n debug('GCSCommands.devname: set to %r', self.__name)\n return self.__name", "title": "" }, { "docid": "5d9f0ebc71410114a31522c0f912251f", "score": "0.4165989", "text": "def packageName(self):\n if self.createPackageCheckBox.isChecked():\n return self.packageNameEdit.text()\n else:\n return \"\"", "title": "" }, { "docid": "d463e091e39cd226a6be25d394c8f120", "score": "0.4164496", "text": "def default_env_spec_name(self):\n return self._updated_cache().default_env_spec_name", "title": "" }, { "docid": "8021f815bab2a5f6a29eab373b142ce8", "score": "0.416154", "text": "def _set_firmware_version(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"firmware-version\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"firmware_version must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"firmware-version\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform', defining_module='openconfig-platform', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__firmware_version = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "66ac2e7dca21512d7bf2d0520ff45921", "score": "0.41609257", "text": "def get_ext_filename (self, ext_name):\r\n\r\n from distutils.sysconfig import get_config_var\r\n ext_path = string.split(ext_name, '.')\r\n # OS/2 has an 8 character module (extension) limit :-(\r\n if os.name == \"os2\":\r\n ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]\r\n # extensions in debug_mode are named 'module_d.pyd' under windows\r\n so_ext = get_config_var('SO')\r\n if os.name == 'nt' and self.debug:\r\n return apply(os.path.join, ext_path) + '_d' + so_ext\r\n return apply(os.path.join, ext_path) + so_ext", "title": "" }, { "docid": "26d09454edd78212381387feef18fe8d", "score": "0.41602537", "text": "def str_ydk_version(self):\n return \"%s.%s.%s\" % self.ydk_version", "title": "" }, { "docid": "c04d82124bd65937124e299abd51141f", "score": "0.41552472", "text": "def get_frontend():\r\n try:\r\n from miro import plat\r\n return plat.FRONTEND.lower()\r\n except (ImportError, AttributeError):\r\n return \"unknown\"", "title": "" }, { "docid": "1efb2f1bb1b3da74a2e9519a845419c5", "score": "0.4144774", "text": "def default(self):\n return self.config.get('default', None)", "title": "" }, { "docid": "0f7608da5c7de38f348fa7490f8a9279", "score": "0.41438654", "text": "def additional_extensions(self) -> Optional[Sequence['outputs.CertificateTemplatePredefinedValuesAdditionalExtension']]:\n return pulumi.get(self, \"additional_extensions\")", "title": "" }, { "docid": "4e709251ed10d2da89b4126a8b1c6876", "score": "0.41431698", "text": "def dmin(self):\n return self._cfg.get('material:dmin', DMIN_DFLT)", "title": "" }, { "docid": "d8d99f4d93d5351bbf54adcfcc8b3c1d", "score": "0.4142372", "text": "def use_server_default(self) -> Optional[str]:\n return pulumi.get(self, \"use_server_default\")", "title": "" }, { "docid": "dca3293b550509d9e7cfc461ee0439ef", "score": "0.4137285", "text": "def extType(self):\r\n return ExtensionType.tack", "title": "" }, { "docid": "77b4f01d88072ed272fb63551687e078", "score": "0.41369027", "text": "def shortname(self):\n return '%s-%s' % (self.product, self.version)", "title": "" }, { "docid": "50313cb039c2b999c310f49ae5afe506", "score": "0.41368568", "text": "def python_version(self):\n if self.environment_yaml:\n return super(PythonBuildPack, self).python_version\n else:\n return super(RBuildPack, self).python_version", "title": "" }, { "docid": "d5918c3e49e17efe336c099a08a29634", "score": "0.41354007", "text": "def default(self) -> Variant:\n for v in self._versions.values():\n if len(v) > 0:\n return v[0]\n\n raise ValueError(\"No default version for this context-aware source\")", "title": "" }, { "docid": "6c4ef4d3711bce908dd4d55fba54a27b", "score": "0.41330403", "text": "def _get_deployment_flavor():\r\n flavor = cfg.CONF.paste_deploy.flavor\r\n return '' if not flavor else ('-' + flavor)", "title": "" }, { "docid": "9be5ffaf3c21f80be7ebb0cef62838fe", "score": "0.41305384", "text": "def default_value(self) -> str:\n return pulumi.get(self, \"default_value\")", "title": "" }, { "docid": "44af18e37ec77710b91d657bd347da6b", "score": "0.41294044", "text": "def disk_driver_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_driver_version\")", "title": "" }, { "docid": "2a45db9e4e4372f3d648a83ab22969c8", "score": "0.4123765", "text": "def installed_version(self) -> str | None:\n return self._api.information.version_string # type: ignore[no-any-return]", "title": "" }, { "docid": "656246409e16a1e11fccd96f98ec7651", "score": "0.4123116", "text": "def defaultData(name):\n return '0' if name == 'vx_enum' else TypeDef.get(name).getDefaultData()", "title": "" }, { "docid": "09a63336ecc8b199f618c18f14e1cc2e", "score": "0.41205427", "text": "def getProductName(self):\n if self.product_name is None:\n ctx = self.getContext()\n smgr = ctx.ServiceManager\n aConfigProvider = smgr.createInstanceWithContext(\n \"com.sun.star.configuration.ConfigurationProvider\", ctx)\n prop = util.createProp(\"nodepath\", \"/org.openoffice.Setup/Product\")\n oNode = aConfigProvider.createInstanceWithArguments(\n \"com.sun.star.configuration.ConfigurationAccess\", (prop,))\n self.product_name = oNode.getByName(\"ooName\")\n return self.product_name", "title": "" }, { "docid": "cdeb46cfb00711c317f56cf399ff0a71", "score": "0.4119529", "text": "def getManufacturer():\n\n return EInterface.sendCommand(\"AT+CGMI\")[0]", "title": "" }, { "docid": "7823a236c9df6c57c4abe6162963fcf8", "score": "0.41179103", "text": "def get_default(self):\n return self.default", "title": "" } ]
37bede708518e1fdcba27e6f1a95117d
Implement a cnn to embed the images in relation to their tags. For a walkthough
[ { "docid": "2ead985af337a8143ff5bcb27e844058", "score": "0.6047527", "text": "def build_cnn(image_input, image_output, tag_size):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n\n x_image = tf.reshape(image_input, [-1, 127, 127, 1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n h_pool1_flat = tf.reshape(h_pool1, [-1, 32])\n\n W_fc1 = weight_variable([32, 1024])\n b_fc1 = bias_variable([1024])\n\n y_conv1 = tf.nn.softmax(tf.matmul(h_pool1_flat, W_fc1) + b_fc1)\n cnn_embedding = y_conv1\n\n W_fc2 = weight_variable([1024, tag_size])\n b_fc2 = bias_variable([tag_size])\n y_conv2 = tf.nn.softmax(tf.matmul(y_conv1, W_fc2) + b_fc2)\n\n #Include this as part of code.\n loss = -tf.reduce_sum(image_output*tf.log(y_conv2))\n optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)\n return (loss, optimizer, cnn_embedding)", "title": "" } ]
[ { "docid": "e2d9ed5733792dc2e0fd49d1b1ada69f", "score": "0.58495724", "text": "def tag_cloud(link=22656, \n lim_num_tags=200, \n image_dims=(400, 200),\n skip_tags = [],\n out_filepath=\"TagCloud.png\",\n ):\n\n info = taginfo(link=link, lim_num_tags=lim_num_tags) \n draw_taginfo(info, image_dims=image_dims, out_filepath=out_filepath, skip_tags = skip_tags)\n return", "title": "" }, { "docid": "d0ea4f883c9067650b18914b2b136e94", "score": "0.5837354", "text": "def call(self, img):\n\n for layer in self.vgg16:\n img = layer(img)\n\n for layer in self.head:\n img = layer(img)\n\n return img", "title": "" }, { "docid": "515f49eed00a860e2e353256ddfeaebc", "score": "0.58236605", "text": "def generate_tags(input_image_file):\n # Make sure that caffe is on the python path:\n caffe_root = '/opt/caffe/'\n sys.path.insert(0, caffe_root + 'python')\n # model location\n model_path = 'models/bvlc_reference_caffenet/'\n model_conf_name = 'deploy.prototxt'\n model_name = 'bvlc_reference_caffenet.caffemodel'\n model_configuration_path = caffe_root + model_path + model_conf_name\n model = caffe_root + model_path + model_name\n # download script\n download_script = '../scripts/download_model_binary.py'\n # mean pixel file\n mean_pixel_file_path = caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'\n # ImageNet labels filename\n imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'\n\n\n assert_file_exists(model)\n assert_file_exists(input_image_file)\n\n mode = caffe.TEST\n caffe.set_mode_cpu()\n net = caffe.Net(model_configuration_path, model, mode)\n\n batch_size =1\n channels = 3\n gray_range = 255\n image_size = 227\n\n transformer = create_transformer(net, mean_pixel_file_path, gray_range)\n net.blobs['data'].reshape(batch_size,channels, image_size, image_size)\n image = caffe.io.load_image(input_image_file)\n net.blobs['data'].data[...] = transformer.preprocess('data', image)\n\n out = net.forward()\n labels = load_labels(imagenet_labels_filename)\n\n top_k = net.blobs['prob'].data[0].flatten().argsort()[::-1]\n probs = net.blobs['prob'].data[0].flatten()\n\n min_p = 0.1 # Minimum acceptable probability for a tag\n return [(labels[i], probs[i]) for i in top_k if probs[i] > min_p]", "title": "" }, { "docid": "442b4b6d97ca4e8b6e79f7691bdf64e8", "score": "0.58228755", "text": "def train(self):\n photos = list(paths.list_images(\"./dataset\"))\n knownEmbeddings , knownNames = [], []\n\n for (i, photo) in enumerate(photos):\n\t print(\"Processing image\", i)\n\t name = photo.split(os.path.sep)[-2]\n\n\t image = cv2.imread(photo)\n\t (h, w) = image.shape[:2]\n\t B, G, R = cv2.split(image) \n\t B = np.mean(B)\n\t G = np.mean(G)\n\t R = np.mean(R)\n\n\t image_blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (R, G, B), swapRB=False, crop=False)\n\n\t self.__detector__.setInput(image_blob)\n\t detections = self.__detector__.forward()\n\n # Selecting photo with only one face\n\t if len(detections) == 1:\n\n\t\t confidence = detections[0, 0, 0, 2]\n # Selecting only strong setections\n\t\t if confidence > 0.5:\n\n\t\t\t face_box = detections[0, 0, 0, 3:7] * np.array([w, h, w, h])\n\t\t\t (startX, startY, endX, endY) = face_box.astype(\"int\")\n\n\t\t\t face = image[startY:endY, startX:endX]\n\t\t\t (fH, fW) = face.shape[:2]\n # Ensure tha sufficiency of the size of the face\n\t\t\t if fW < 20 or fH < 20:\n\t\t\t \tcontinue\n\n\t\t\t face_blob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)\n\t\t\t self.__embedder__.setInput(face_blob)\n\t\t\t vec = self.__embedder__.forward()\n\n\t\t\t knownNames.append(name)\n\t\t\t knownEmbeddings.append(vec.flatten())\n\n le = LabelEncoder()\n labels = le.fit_transform(knownNames)\n\n print(\"Training model...\")\n recognizer = SVC(C=1.0, kernel=\"linear\", probability=True)\n recognizer.fit(knownEmbeddings, labels)\n\n os.remove(\"./trained_model/recognizer.pickle\")\n f = open(\"./trained_model/recognizer.pickle\", \"wb\")\n f.write(pickle.dumps(recognizer))\n f.close()\n\n os.remove(\"./trained_model/le.pickle\")\n f = open(\"./trained_model/le.pickle\", \"wb\")\n f.write(pickle.dumps(le))\n f.close()\n\n print(\"Training has finished successfully!\")", "title": "" }, { "docid": "fd7e6a723162d1567b58b0321ae0d8a6", "score": "0.5816327", "text": "def forward(self, images):\n # pylint: disable=arguments-differ\n # The arguments will differ from the base class since nn.Module is an abstract class.\n\n image_features = self.img_enc(images)\n captions, captions_logits, attn_maps = self.rnn(image_features)\n return captions, captions_logits, attn_maps", "title": "" }, { "docid": "f179cb878ea1121e4ec5b1edeb0dc1d2", "score": "0.58033836", "text": "def embed_imgs(self, imgloader, name = \"\"):\n # Setup progressbar\n pbar = MiscUtils.gen_pbar(max_value=len(imgloader), msg='Embedding %s: ' % name)\n que_emb = []\n que_lbl = []\n with torch.no_grad():\n for i, (samples, labels) in enumerate(imgloader):\n samples = samples.to(self.device)\n que_emb.append(self.model(samples)['feat'])\n que_lbl.append(labels)\n #Monitor progress\n pbar.update(i+1)\n pbar.finish()\n return que_emb, que_lbl", "title": "" }, { "docid": "838886f176ca82e58041664c86ca5819", "score": "0.5759869", "text": "def __getitem__(self, idx):\n \n caption = self.data.iloc[idx]['Findings']\n image_id = self.data.iloc[idx]['index']\n vocab = self.vocab\n\n image = Image.open(os.path.join(self.root, image_id + config.img_extension)).convert('RGB')\n if self.transform:\n image = self.transform(image)\n\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.tensor(caption)\n \n\n return image, target", "title": "" }, { "docid": "c766faa7176b80f5f37d7431eec1d103", "score": "0.56715834", "text": "def __getitem__(self, index):\n (image_fn, label_fn) = self.img_filenames[index], self.label_filenames[index]\n \"\"\" INPUT: image part \"\"\"\n image = Image.open(image_fn).convert('RGB')\n image = image.resize((448,448))\n if self.transform is not None:\n image = self.transform(image)\n #image.show()\n #print (image.format,image.size,image.mode)\n\n \"\"\" OUTPUT: label part \"\"\"\n labels=np.genfromtxt(label_fn, dtype='str')\n label_torch = torch.zeros((7,7,26))\n if np.size(labels) == 10:\n label=labels; label_torch = self.encoder(label_torch, label)\n else:\n for label in labels: label_torch = self.encoder(label_torch, label)\n return image, label_torch", "title": "" }, { "docid": "8837c2b45a36f8e9248c7b47c7f5691f", "score": "0.56138575", "text": "def __init__(self, embed_size, kenerl_size=5):\n super(CNNClassifier, self).__init__()\n self.embed_size = embed_size\n\n self.kenerl_size = kenerl_size\n self.embeddings = nn.Embedding(len(vocab.char2id), self.embed_char, padding_idx=pad_token_idx)\n self.cnn = CNN(self.embed_char, self.kenerl_size, self.embed_size)#self.embed_size is number of filter\n self.highway = Highway(self.embed_size, dropout_rate=0.3)", "title": "" }, { "docid": "ed965c0d1944597a54c6ee436f651780", "score": "0.5605078", "text": "def collate_fn(self, batch):\n\n images = list()\n boxes = list()\n labels = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels # tensor (N, 3, 300, 300), 3 lists of N tensors each", "title": "" }, { "docid": "6d0d6a813a828d80db437572f099dad3", "score": "0.5574822", "text": "def __getitem__(self, idx):\n # Get a sample - i.e. an annotated camera image\n sample = self.nuimages.sample[self.samples_with_objects[idx]]\n # Get the associated sample data, representing the image associated with the sample\n sd_token = sample['key_camera_token']\n sample_data = self.nuimages.get('sample_data', sd_token)\n\n # Read the image file\n image = Image.open(self.root_path / sample_data['filename']).convert(\"RGB\")\n\n # If this is the test split (no annotations), just return the image and None for target\n if not self.has_ann:\n return image, None\n\n # Get the object annotations corresponding to this sample data only\n object_anns = self.object_anns_dict[sd_token]\n\n # NOTE: Surface annotations in nuscenes lack bounding boxes and instance IDs. Skip for now.\n # if self.learn_surfaces:\n # surface_anns = [o for o in self.nuimages.surface_ann if o['sample_data_token'] == sd_token]\n # object_anns += surface_anns\n\n # Get bounding boxes\n # Note object_ann['bbox'] gives the bounding box as [xmin, ymin, xmax, ymax]\n boxes = torch.as_tensor([o['bbox'] for o in object_anns], dtype=torch.float32)\n\n # Get class labels for each bounding box\n category_tokens = [o['category_token'] for o in object_anns]\n categories = [self.nuimages.get('category', token) for token in category_tokens]\n labels = torch.as_tensor([self._category_name_to_id[cat['name']] for cat in categories],\n dtype=torch.int64)\n\n # Get nuimages segmentation masks\n # The nuimages instance mask is (H by W) where each value is 0 to N (N is number of object annotations)\n # Convert this to a single (N by H by W) array\n instance_mask = get_instance_mask(self.nuimages, image, object_anns)\n masks = np.array([instance_mask == i+1 for i in range(len(object_anns))]).astype(np.uint8)\n masks = torch.as_tensor(masks)\n\n # Use key camera token as image identifier\n # Convert key camera token from hexadecimal to an integer, and use it as the unique identifier\n image_id = torch.as_tensor([idx]).type(torch.int64)\n\n # Compute area\n if boxes.shape[0] == 0:\n area = torch.as_tensor([])\n else:\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n\n # Assume all instances are not crowd\n iscrowd = torch.zeros((len(object_anns),), dtype=torch.int64)\n\n target = {}\n target['boxes'] = boxes\n target['labels'] = labels\n target['masks'] = masks\n target['image_id'] = image_id\n target['area'] = area\n target['iscrowd'] = iscrowd\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target", "title": "" }, { "docid": "7bbc95702b6d5148fd4baca28203a408", "score": "0.55365556", "text": "def NDWI(image):", "title": "" }, { "docid": "17688c2669a961791b937fb778501d33", "score": "0.5533873", "text": "def __call__(self, name, batch):\n\n def _check_img(tag_img):\n tag, img = tag_img\n\n assert img.ndim == 2 or img.ndim == 3, 'Only 2D (HW) and 3D (CHW) images are accepted for display'\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=0)\n else:\n C = img.shape[0]\n assert C == 1 or C == 3, 'Only (1, H, W) or (3, H, W) images are supported'\n\n return tag, img\n\n tagged_images = self.process_batch(name, batch)\n\n return list(map(_check_img, tagged_images))", "title": "" }, { "docid": "665527421365567ec6e03d03cfb14d0a", "score": "0.55035067", "text": "def __getitem__(self, index):\n\n\n #attri = self.attri\n\n ann_id = self.ids[index]\n attrib=self.attri[int(ann_id[0:4])]\n\n #attrib = attri[ann_id]\n '''attrib=attri[index]'''\n attrib=attrib-1\n if attrib[0]==0 or attrib[0]==1:\n attrib[0]=0\n elif attrib[0]==2 or attrib[0]==3:\n attrib[0]=1\n\n #img_id = self.ids[ann_id]\n #img_id=int(ann_id)\n img_id=ann_id\n\n #path1 = str(img_id) + '.jpg'\n #path2= str(img_id) +'_occ'+ '.jpg'\n path=str(img_id)+'.jpg'\n\n image1 = Image.open(os.path.join(self.root1, path))\n image2 = Image.open(os.path.join(self.root2, path))\n image3 = Image.open(os.path.join(self.root3, path))\n image4= Image.open(os.path.join(self.root4, path))\n image5 = Image.open(os.path.join(self.root5, path))\n if self.transform is not None:\n #print(self.transform)\n image1 = self.transform(image1)\n image2 = self.transform(image2)\n image3 = self.transform(image3)\n image4 = self.transform(image4)\n image5 = self.transform1(image5)\n\n #target_attrib = torch.Tensor(attrib)\n target_veri=torch.Tensor([1,0])\n return image1,image2,image3,image4,image5,attrib,target_veri", "title": "" }, { "docid": "5ae66412b40d15146d2e82c4057f30d4", "score": "0.5500454", "text": "def source_cat(image,nsigma=2,kernel_size=(3,3),npixels=5,deblend=False,contrast=.001,targ_coord=None):\n\n # to be able to translate from ra/dec <--> pixels on image\n wcs,frame = WCS(image.header),image.header['RADESYS'].lower()\n hdr = image.header\n #L1mean,L1med,L1sigma,L1fwhm = hdr['L1MEAN'],hdr['L1MEDIAN'],hdr['L1SIGMA'],hdr['L1FWHM'] # counts, fwhm in arcsec \n #pixscale,saturate,maxlin = hdr['PIXSCALE'],hdr['SATURATE'],hdr['MAXLIN'] # arcsec/pixel, counts for saturation and non-linearity levels\n\n # detect threshold uses sigma clipped statistics to get bkg flux and set a threshold for detected sources as objs above nsigma*bkg\n # bkg also available in the hdr of file, either way is fine \n threshold = detect_threshold(image.data, nsigma=nsigma)\n sigma = 3.0 * gaussian_fwhm_to_sigma # FWHM = 3. pixels for kernel smoothing\n # optional ~ kernel smooths the image, using gaussian weighting with pixel size of 3\n kernel = Gaussian2DKernel(sigma, x_size=kernel_size[0], y_size=kernel_size[1])\n kernel.normalize()\n # make a segmentation map, id sources defined as n connected pixels above threshold (n*sigma + bkg)\n segm = detect_sources(image.data,\n threshold, npixels=npixels, filter_kernel=kernel)\n # deblend useful for very crowded image with many overlapping objects...\n # uses multi-level threshold and watershed segmentation to sep local peaks as ind obj\n # use the same number of pixels and filter as was used on original segmentation\n # contrast is fraction of source flux local pk has to be consider its own obj\n if deblend:\n segm = deblend_sources(image.data, \n segm, npixels=5,filter_kernel=kernel, \n nlevels=32,contrast=contrast)\n\n # need bkg subtracted to do photometry using source properties\n boxsize=100\n bkg = Background2D(image.data,boxsize) # sigma-clip stats for background est over image on boxsize, regions interpolated to give final map \n data_bkgsub = image.data - bkg.background\n cat = source_properties(data_bkgsub, segm,background=bkg.background,\n error=None,filter_kernel=kernel)\n \n # going to id the target lensing galaxy from source catalog\n # since this is ideal detection location where strong lens could provide multi-im\n # this is going to be area where we will most want to plant and study \n \n #CAT-RA = 'blah' / [HH:MM:SS.sss] Catalog RA of the object \n #CAT-DEC = 'blah' / [sDD:MM:SS.ss] Catalog Dec of the object\n if targ_coord == None:\n # the source images all have cat-ra cat-dec, will default grab target galaxy location from hdr\n ra = image.header['CAT-RA']\n dec = image.header['CAT-DEC']\n else:\n # if using the ref to detect source objs the target stuff isn't in there will need to provide tuple taken from source hdr \n ra,dec = targ_coord # unpack\n\n lensing_gal = SkyCoord(ra,dec,unit=(u.hourangle,u.deg))\n pix_gal = astropy.wcs.utils.skycoord_to_pixel(lensing_gal,wcs)\n\n # TODO all sources of error including poisson from sources\n tbl = cat.to_table()\n tbl['xcentroid'].info.format = '.2f' # optional format\n tbl['ycentroid'].info.format = '.2f'\n tbl['cxx'].info.format = '.2f'\n tbl['cxy'].info.format = '.2f'\n tbl['cyy'].info.format = '.2f'\n tbl['gini'].info.format = '.2f'\n\n # going to add a column of surface brightness so we can plant into the obj shapes according to those\n surf_brightnesses = []\n for obj in tbl:\n unit = 1/obj['area'].unit\n surf_bright = obj['source_sum']/obj['area'].value # flux/pix^2\n surf_brightnesses.append(surf_bright) \n surf_brightnesses = Column(surf_brightnesses,name='surface_brightness',unit=unit)\n tbl.add_column(surf_brightnesses)\n\n # take a look at the brightest or most elliptical objs from phot on segm objs detected\n tbl.sort('ellipticity') #\n elliptical=tbl[-10:]\n #tbl.sort('source_sum') ('surface_brightness') \n\n # there is definitely a neater/cuter way to index table than this using loc to find obj of gal \n tmp = tbl[tbl['xcentroid'].value > pix_gal[0]-10]\n tmp = tmp[tmp['xcentroid'].value < pix_gal[0]+10]\n tmp = tmp[tmp['ycentroid'].value > pix_gal[1]-10]\n targ_obj = tmp[tmp['ycentroid'].value < pix_gal[1]+10] \n targ_sb = targ_obj['source_sum']/targ_obj['area']\n \n return cat,image,threshold,segm,targ_obj", "title": "" }, { "docid": "c31dae8c3e2269cf56d2e58f3fd0960f", "score": "0.54998785", "text": "def __init__(self):\n super(ItemImgFeature, self).__init__()\n self.dim = 4096\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256*8*8, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n)", "title": "" }, { "docid": "be9739f3d812f5e419bed0b0edaaa627", "score": "0.54800344", "text": "def __init__(self, vocab_size, label_size, text_length, batchsize, channels, embed_dim):\n super(DPCNN, self).__init__()\n self.vocab_size = vocab_size\n self.label_size = label_size\n self.channels = channels\n self.text_length = text_length\n self.batchsize = batchsize\n self.embed_dim = embed_dim\n\n self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)\n self.region_embedding = nn.Sequential(\n nn.Conv1d(1, self.channels, kernel_size=3, padding=1))\n # self.region_embedding = nn.Conv2d(1, self.channels, (3, self.embed_dim), stride=1)\n self.pre_conv1 = torch.nn.Conv1d(\n in_channels=self.channels,\n out_channels=self.channels,\n kernel_size=3,\n padding=1)\n self.pre_conv2 = torch.nn.Conv1d(\n in_channels=self.channels,\n out_channels=self.channels,\n kernel_size=3,\n padding=1)\n\n # self.rep_blocks = nn.Sequential(\n # DPCNN_rep_block(self.batchsize, self.channels),\n # DPCNN_rep_block(self.batchsize, self.channels),\n # DPCNN_rep_block(self.batchsize, self.channels),\n # DPCNN_rep_block(self.batchsize, self.channels),\n # DPCNN_rep_block(self.batchsize, self.channels),\n # DPCNN_rep_block(self.batchsize, self.channels),\n # )\n\n self.block1 = DPCNN_rep_block(self.batchsize, self.channels)\n self.block2 = DPCNN_rep_block(self.batchsize, self.channels)\n self.block3 = DPCNN_rep_block(self.batchsize, self.channels)\n self.block4 = DPCNN_rep_block(self.batchsize, self.channels)\n self.block5 = DPCNN_rep_block(self.batchsize, self.channels)\n self.block6 = DPCNN_rep_block(self.batchsize, self.channels)\n\n self.final_pool = torch.nn.MaxPool1d(kernel_size=2, stride=2)\n\n self.dropout = torch.nn.Dropout(p = .5)\n self.fc = torch.nn.Linear(self.channels, self.label_size)", "title": "" }, { "docid": "272f0c4c87ef6968dacdd6ca58f0c0d3", "score": "0.547565", "text": "def __getitem__(self, idx):\r\n \r\n #format index\r\n if torch.is_tensor(idx):\r\n idx = idx.tolist()\r\n \r\n # get coordinates\r\n OD = np.array([self.od.iloc[idx, 1], self.od.iloc[idx, 2] ]).astype('float')\r\n Fovea = np.array([self.fovea.iloc[idx, 1], self.fovea.iloc[idx, 2] ]).astype('float')\r\n img_name = self.od.iloc[idx, 0]\r\n \r\n # read image\r\n img_path = os.path.join(self.root_dir,img_name+'.jpg')\r\n image = Image.open(img_path)\r\n image = self.__augment__(image)\r\n\r\n \r\n # create the sample dictionary\r\n sample = {'image': image, 'OD': OD, 'Fovea': Fovea }\r\n # reshape the image and update coordinates\r\n sample, scale_factor = self.__reshape__(sample)\r\n \r\n\r\n \r\n # create bounding boxes\r\n boxes = []\r\n boxes.append(self.__get_boxes__(sample, tpe ='OD'))\r\n boxes.append(self.__get_boxes__(sample, tpe ='Fovea'))\r\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\r\n\r\n #create labels\r\n labels = torch.tensor([1,2], dtype=torch.int64)\r\n\r\n #image_id\r\n image_id = torch.tensor([idx])\r\n\r\n # suppose all instances are not crowd\r\n iscrowd = torch.zeros((2), dtype=torch.int64)\r\n\r\n # create target dictionary\r\n target = {}\r\n target[\"boxes\"] = boxes\r\n target[\"labels\"] = labels\r\n target[\"image_id\"] = image_id\r\n #target[\"area\"] = torch.tensor([60*60],dtype=torch.int64)\r\n target[\"area\"] = torch.tensor([self.box_width_OD[0]*self.box_width_OD[1], \\\r\n self.box_width_Fovea[0]*self.box_width_Fovea[1] ],\\\r\n dtype=torch.int64)\r\n target[\"iscrowd\"] = iscrowd\r\n \r\n # apply transformations\r\n img = sample['image']\r\n if self.transform is not None:\r\n img , target = self.transform(img, target)\r\n\r\n \r\n return img, target, scale_factor", "title": "" }, { "docid": "9b1b994d91609feff96e43d0635f2802", "score": "0.5471246", "text": "def add_node_images(self, fig, i_viz_layer, image_axes, nn, inputs):\n i_dense_layer = i_viz_layer + self.i_dense_layer_offset\n node_activities = nn.forward_pass(\n inputs, i_stop_layer=i_dense_layer)\n node_image_left = (\n self.left_border\n + self.input_image_width\n + i_viz_layer * self.node_image_width\n + (i_viz_layer + 1) * self.between_layer_gap\n )\n n_nodes = self.n_nodes[i_viz_layer]\n total_layer_height = (\n n_nodes * self.node_image_height\n + (n_nodes - 1) * self.between_node_gap\n )\n layer_bottom = (self.figure_height - total_layer_height) / 2\n layer_axes = []\n for i_node in range(n_nodes):\n node_signal = np.zeros((1, n_nodes))\n node_signal[0, i_node] = 1\n node_signature = nn.forward_pass(\n node_signal,\n i_start_layer=i_dense_layer,\n i_stop_layer=self.n_viz_layers + self.i_dense_layer_offset - 1,\n )\n node_image = node_signature.reshape(\n self.n_image_rows, self.n_image_cols)\n # node_image *= node_activities[i_node]\n node_image *= np.sign(node_activities[i_node])\n\n node_image_bottom = (\n layer_bottom + i_node * (\n self.node_image_height + self.between_node_gap))\n\n absolute_pos = (\n node_image_left,\n node_image_bottom,\n self.node_image_width,\n self.node_image_height)\n ax = self.add_image_axes(fig, image_axes, absolute_pos)\n ax.imshow(\n node_image,\n vmin=self.im_vmin,\n vmax=self.im_vmax,\n cmap=self.cmap,\n zorder=6,\n )\n layer_axes.append(ax)\n image_axes.append(layer_axes)", "title": "" }, { "docid": "f541dbe69d6eb9b6c6c8d8f9ff45e856", "score": "0.5470639", "text": "def give_CUB200_datasets(opt):\n image_sourcepath = opt.source_path+'/images'\n #Find available data classes.\n image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))\n #Make a index-to-labelname conversion dict.\n conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}\n #Generate a list of tuples (class_label, image_path)\n image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}\n image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]\n image_list = [x for y in image_list for x in y]\n\n #Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}\n image_dict = {}\n for key, img_path in image_list:\n key = key-1\n if not key in image_dict.keys():\n image_dict[key] = []\n image_dict[key].append(img_path)\n\n keys = sorted(list(image_dict.keys()))\n\n #Following \"Deep Metric Learning via Lifted Structured Feature Embedding\", we use the first half of classes for training.\n train,test = keys[:len(keys)//2], keys[len(keys)//2:]\n train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}\n\n\n train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)\n val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)\n eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)\n\n train_dataset.conversion = conversion\n val_dataset.conversion = conversion\n eval_dataset.conversion = conversion\n\n return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}", "title": "" }, { "docid": "de4bbf9d29e897ec3e4d4b28a8136203", "score": "0.54531157", "text": "def __init__(self, root, transforms=None, train=True, size=32, already=False):\r\n self.size = size\r\n self.datapkl_path = root+\"/dogs_{}.pickle\".format(\"train\" if train else \"eval\" )\r\n if transforms is None:\r\n self.transforms = T.Compose([\r\n T.ToTensor()\r\n ])\r\n if already:\r\n self.breed_dict = {}\r\n self.imgs = []\r\n \r\n if os.path.exists(path=self.datapkl_path):\r\n with open(self.datapkl_path, 'rb') as load_data:\r\n self.imgs, self.labels = pickle.load(load_data)\r\n for img, label in zip(self.imgs, self.labels):\r\n self.breed_dict[label] = img\r\n else:\r\n self.train = train\r\n self.breed_dict = {}\r\n self.imgs = []\r\n self.name = []\r\n self.labels = []\r\n annots = glob.glob(root + '/Annotation/*/*')\r\n print(annots[-1])\r\n\r\n for annot in annots:\r\n text = open(annot, 'r').read()\r\n annot_list = annot.split('/')\r\n rt = ET.fromstring(text)\r\n children = rt.getchildren()\r\n objects = children[5:]\r\n\r\n for object in objects:\r\n objChildren = object.getchildren()\r\n breed = objChildren[0].text\r\n\r\n img = cv2.imread(root + '/Images/' + annot_list[-2] + '/' + annot_list[-1] + '.jpg')\r\n self.name.append(root + '/Images/' + annot_list[-2] + '/' + annot_list[-1] + '.jpg')\r\n #print(\"Processing \" + root + '/Images/' + annot_list[-2] + '/' + annot_list[-1] + '.jpg')\r\n xmin = int(objChildren[4].getchildren()[0].text)\r\n xmax = int(objChildren[4].getchildren()[2].text)\r\n ymin = int(objChildren[4].getchildren()[1].text)\r\n ymax = int(objChildren[4].getchildren()[3].text)\r\n\r\n # cv2.imshow(\"img\", img[ymin:ymax, xmin:xmax])\r\n # cv2.waitKey(0)\r\n self.imgs.append(img[ymin:ymax, xmin:xmax])\r\n self.labels.append(breed)\r\n if breed in self.breed_dict.keys():\r\n self.breed_dict[breed].append(list(img[ymin:ymax, xmin:xmax]))\r\n else:\r\n self.breed_dict[breed] = [list(img[ymin:ymax, xmin:xmax])]\r\n self.save()", "title": "" }, { "docid": "aad226e461db4673f7be92771a06d13a", "score": "0.5451131", "text": "async def cat(self, ctx):\n try:\n async with self.session.get(\"https://api.thecatapi.com/v1/images/search?format=json\") as response:\n data = await response.json()\n embed = discord.Embed(title=\"Meow!\", color=0x00ff00)\n embed.set_image(url=data[0][\"url\"])\n await ctx.send(embed=embed)\n except Exception as e:\n await ctx.send(\"There was an exception. Please open an issue on Github\")", "title": "" }, { "docid": "fd2227050f8e144a4fca4d89c3b8ff90", "score": "0.54385674", "text": "def live_image(self):", "title": "" }, { "docid": "7f0510c117fc0cc2c674b505a4ba924c", "score": "0.5434894", "text": "def get_images(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "96735995846741a6a0360a9dfd87fd1a", "score": "0.5416487", "text": "def process_image(self):", "title": "" }, { "docid": "6c486bbcef51f94ac6ee2a50a4fed28d", "score": "0.5410554", "text": "def construct_cnn(train=False):\n all_char = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',\n 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n img_rows, img_cols = 30, 60\n np.random.seed(30)\n # path to all the images\n dataset_paths = glob.glob(\"dataset_characters/**/*.jpg\")\n x = []\n y = []\n # load images\n for image_path in dataset_paths[::]:\n label = image_path.split(os.path.sep)[-2]\n img_num = image_path.split(os.path.sep)[-1]\n image_path = os.path.join('./dataset_characters', str(label), str(img_num))\n img1 = cv2.imread(image_path)\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img1 = cv2.resize(img1, (img_rows, img_cols))\n x.append(img1)\n y.append(label)\n x = np.asarray(x)\n y = np.asarray(y)\n # split into 75% training and 25% test subsets\n x_train, x_test, y_train1, y_test1 = train_test_split(x, y, test_size=0.25)\n # reshape the image arrays in order to be an input of CNN model, 1 represents grayscale\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n # one hot encoding the label arrays\n y_train, y_test = one_hot_encoding(all_char, y_train1, y_test1)\n # create a Sequential model, refer to https://missinglink.ai/guides/convolutional-neural-networks/python\n # -convolutional-neural-network-creating-cnn-keras-tensorflow-plain-python/\n model = Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_rows, img_cols, 1)))\n model.add(Dropout(0.2))\n model.add(layers.Conv2D(32, (3, 3), activation='relu'))\n model.add(Dropout(0.2))\n model.add(layers.Conv2D(32, (3, 3), activation='relu'))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(layers.Dense(len(all_char), activation='softmax'))\n # model.summary()\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n if train:\n model.fit(x_train, y_train,\n batch_size=64,\n epochs=10)\n\n model.save('data_char.keras')\n print('CNN model has been saved to the file data_char.keras')\n model2 = load_model('data_char.keras')\n score = model2.evaluate(x_test, y_test, verbose=8)\n print('Test accuracy of CNN:' + str(score[1]))", "title": "" }, { "docid": "f75256400aee7a258162a356e010c8c0", "score": "0.539363", "text": "def forward(img, label, img_path, feat_dict):\n feat = vgg16.features(img.cuda())\n feat1 = vgg16.avgpool(feat)\n feat1 = torch.flatten(feat1, 1)\n feat_dict['feat'].append(feat1.cpu().detach().numpy())\n for i in range(4):\n feat1 = vgg16.classifier[i](feat1)\n if i == 0:\n print(vgg16.classifier[i])\n feat_dict['feat_lin1'].append(feat1.cpu().detach().numpy())\n if i == 3:\n print(vgg16.classifier[i])\n feat_dict['feat_lin2'].append(feat1.cpu().detach().numpy())\n feat_dict['labels'].append(label.cpu().numpy().item())\n feat_dict['paths'].append(img_path[0])", "title": "" }, { "docid": "7f9e409761e15798f5015b5862fdb7a3", "score": "0.5393397", "text": "def __call__(self, image, target):\n w, h = image.size\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n boxes = boxes[keep]\n classes = classes[keep]\n # masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target", "title": "" }, { "docid": "1b35e4d74895a43a97745532e352c572", "score": "0.5377112", "text": "def __init__(self, conf):\n super(CNNpaper, self).__init__()\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 48, 5, padding=2),\n nn.BatchNorm2d(48),\n # Relu (min = 0) followed by maxpool2d is similar to maxout\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 1),\n nn.Dropout(0.2)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(48, 64, 5, padding=2),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(2, 1, 1),\n nn.Dropout(0.2)\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(64, 128, 5, padding=2),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 1),\n nn.Dropout(0.2)\n )\n self.conv4 = nn.Sequential(\n nn.Conv2d(128, 160, 5, padding=2),\n nn.BatchNorm2d(160),\n nn.ReLU(),\n nn.MaxPool2d(2, 1, 1),\n nn.Dropout(0.2)\n )\n self.conv5 = nn.Sequential(\n nn.Conv2d(160, 192, 5, padding=2),\n nn.BatchNorm2d(192),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 1),\n nn.Dropout(0.2)\n )\n self.conv6 = nn.Sequential(\n nn.Conv2d(192, 192, 5, padding=2),\n nn.BatchNorm2d(192),\n nn.ReLU(),\n nn.MaxPool2d(2, 1, 1),\n nn.Dropout(0.2)\n )\n self.conv7 = nn.Sequential(\n nn.Conv2d(192, 192, 5, padding=2),\n nn.BatchNorm2d(192),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 1),\n nn.Dropout(0.2)\n )\n self.conv8 = nn.Sequential(\n nn.Conv2d(192, 192, 5, padding=2),\n nn.BatchNorm2d(192),\n nn.ReLU(),\n nn.MaxPool2d(2, 1, 1),\n nn.Dropout(0.2)\n )\n self.fc1 = nn.Sequential(\n nn.Linear(9408, 3072),\n nn.ReLU()\n )\n self.fc2 = nn.Sequential(\n nn.Linear(3072, 3072),\n nn.ReLU()\n )\n\n self.fc = nn.Sequential(\n self.fc1,\n self.fc2\n )\n self.classify = nn.Sequential(nn.Linear(3072,\n conf.getint(\"num_classes\")))", "title": "" }, { "docid": "e8d0134acbcf24d0312309e80695733f", "score": "0.5369958", "text": "def generateImageLabels(images, metadata):\n\tmetadata['examId'] = metadata.subjectId+(\"_\")+metadata.examIndex.astype(str)\n images['examId'] = images.subjectId+(\"_\")+images.examIndex.astype(str)\n examIdsCancerL = metadata.examId[metadata.cancerL == 1]\n examIdsCancerR = metadata.examId[metadata.cancerR == 1]\n images['cancer'] = 0\n images.loc[(images.examId.isin(examIdsCancerL)) & (images.laterality == \"L\"), 'cancer'] = 1\n images.loc[(images.examId.isin(examIdsCancerR)) & (images.laterality == \"R\"), 'cancer'] = 1\n\t\n\treturn images[['filename', 'cancer']].copy()", "title": "" }, { "docid": "3bf2b05a5732c73fe0ac61bf9c33aa76", "score": "0.53623515", "text": "def detection_collate_tubelet(batch):\n\n targets = []\n imgs = []\n image_ids = []\n for sample in batch:\n imgs.append(sample[0])\n # targets.append([torch.FloatTensor(target_frame) for target_frame in sample[1]])\n targets.append([torch.FloatTensor(t) for t in sample[1]])\n\n image_ids.append(sample[2])\n\n return torch.stack(imgs, 0), targets, image_ids", "title": "" }, { "docid": "58603cf12ffe059fccfe01461982bd8f", "score": "0.5362141", "text": "def collate_fn(self, batch):\n\n labels = [x[0] for x in batch]\n images_1 = [x[1] for x in batch]\n images_2 = [x[2] for x in batch]\n\n batch_size = len(labels)\n inputs_1 = torch.zeros(batch_size, 3, self.img_size[1], self.img_size[0])\n inputs_2 = torch.zeros(batch_size, 3, self.img_size[1], self.img_size[0])\n\n targets = []\n\n for idx in range(batch_size):\n inputs_1[idx] = images_1[idx]\n inputs_2[idx] = images_2[idx]\n\n targets.append(labels[idx])\n\n return torch.stack(targets), inputs_1, inputs_2", "title": "" }, { "docid": "14a5b6e5a09ca59a1082042926355340", "score": "0.5338715", "text": "def forward(self, img_pixels_tensor, img_label_tensor):\n # Pass the input tensor through each of our operations\n img_pixels = self.hidden(img_pixels_tensor) # maps the 4096 sized image pixel tensor to a tensor of 300\n x = torch.cat((img_pixels.float(), img_label_tensor.float()), axis=1) # concatenate pixel and label\n x = self.output(x)\n return x", "title": "" }, { "docid": "63ad39370ad08ec6fc33e70346e94332", "score": "0.53287476", "text": "def build_adjacency_graph(tags, textcorpus):\n adjacency_graph = nx.Graph()\n\n # print \"tags\", take(1, tags.values())\n # print \"textcorpus\", take(1, textcorpus)\n\n tags_to_image = defaultdict(list)\n\n for image_key in tags.keys():\n adjacency_graph.add_node(image_key, type='image')\n\n for tg in tags[image_key]:\n tags_to_image[tg].append(image_key)\n\n for corpus in textcorpus:\n for word in corpus:\n adjacency_graph.add_node(word, type='text')\n\n if word in tags_to_image:\n for tg in tags_to_image[word]:\n adjacency_graph.add_edge(tg, word)\n\n return adjacency_graph", "title": "" }, { "docid": "f65284f11140943fee1a90f7a2f57157", "score": "0.5323988", "text": "def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target", "title": "" }, { "docid": "addc603b22addd285e5a0b2b630fb586", "score": "0.53168917", "text": "def process_picture(tag, url):\n # Only present in Jimdo\n return [( cat_alt, \"static_content\" )]", "title": "" }, { "docid": "2c3bcdd7e0cfe4a5b7fef97581b1b0c6", "score": "0.53092027", "text": "def get_images(self):\n raise NotImplementedError", "title": "" }, { "docid": "fa4f013b7614e818e9b4fe0aec769ded", "score": "0.53086996", "text": "def __init__(self, image_width = 224, image_height = 168):\n self.__image_width = image_width\n self.__image_height = image_height\n self.__commands = [\"move_forward\", \"left_medium\", \"right_medium\", \"left_light\", \"right_light\"]\n # Label encoding\n # 0 = move_forward\n # 1 = left_medium\n # 2 = right_medium\n # 3 = left_light\n # 4 = right_light", "title": "" }, { "docid": "f8bc7c548e06b31537890c5f2b0ca2f0", "score": "0.53053963", "text": "def __getitem__(self, index):\n # Load image and mask\n image_path = self.image_paths[index]\n mask_path = os.path.join(self.annotation_dir, image_path.split('/')[-1].replace(\".jpg\", \".png\"))\n img = Image.open(image_path).convert(\"RGB\")\n img = F.to_tensor(img)\n mask = Image.open(mask_path).convert(\"L\")\n \n # Convert mask to numpy array\n mask = np.array(mask)\n \n # Labels are decoded as different colors\n mask_labels = np.unique(mask)\n \n boxes = []\n masks = []\n labels = []\n \n for mask_label in mask_labels:\n # Ignore the background/unlabeled\n if mask_label == self.background_index:\n continue\n \n # Extract the mask of the current label\n independent_mask = mask == mask_label\n \n # Extract instance in the current mask\n blobs, no_blobs = ndimage.label(independent_mask)\n \n # For each instance\n for i in range(1, no_blobs + 1):\n # Get bounding box\n pos = np.where(blobs == i)\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n if (xmax - xmin) * (ymax - ymin) == 0:\n continue\n boxes.append([xmin, ymin, xmax, ymax])\n \n # Get instance mask\n instance_mask = (blobs == i)\n masks.append(instance_mask)\n \n # Add label\n if mask_label not in self.label_indices: print(mask_label)\n labels.append(mask_label + 1)\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n labels = torch.as_tensor(labels, dtype=torch.int64)\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n \n image_id = torch.tensor([index])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) if len(boxes) > 0 else torch.as_tensor([], dtype=torch.float32)\n iscrowd = torch.zeros((len(boxes),), dtype=torch.int64)\n \n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n \n return img, target", "title": "" }, { "docid": "283785e4e9a42baa3a0bcf3da79834ab", "score": "0.52956474", "text": "def __init__(self, datasetRoot, transforms=None, train=True, already=False):\r\n if transforms is None:\r\n self.transforms = T.Compose([\r\n T.ToTensor()\r\n ])\r\n self.pklPath = \"tinyImageNet_{}.pkl\".format(\"train\" if train else \"val\")\r\n self.pklPath = os.path.join(datasetRoot,self.pklPath)\r\n if already:\r\n self.imgs = []\r\n if os.path.exists(path=self.pklPath):\r\n with open(self.pklPath,'rb') as load_data:\r\n self.imgs, self.labels = pickle.load(load_data)\r\n for img, label in zip(self.imgs, self.labels):\r\n self.breed_dict[label] = img\r\n \r\n else:\r\n self.train = train\r\n self.imgs = []\r\n self.labels = []\r\n self.classes = [] # the string nxxxxxx of different classes \r\n self.ids = {} # mapping the class string nxxxx to number start from 0\r\n wnids = os.path.join(datasetRoot,\"wnids.txt\")\r\n with open(wnids,\"r\") as f:\r\n ids = f.read()\r\n self.classes = ids.split()\r\n num = 0\r\n \r\n for c in self.classes:\r\n self.ids[c] = num\r\n num += 1\r\n print(\"TOTAL CLASSES:\", num)\r\n\r\n if self.train:\r\n imgpath = os.path.join(datasetRoot,\"train\")\r\n for c in self.classes:\r\n classpath = os.path.join(imgpath,c,\"images\") # another file in imgpath/c is boxes.txt\r\n for i in os.listdir(classpath):\r\n ipath = os.path.join(classpath,i)\r\n img = cv2.imread(ipath)\r\n self.imgs.append(img)\r\n self.labels.append(self.ids[c])\r\n else:\r\n imgpath = os.path.join(datasetRoot,\"val\") # the lay out in val folder is a images folder with all images in it \r\n # and a val_annotations.txt\r\n annota = os.path.join(imgpath,\"val_annotations.txt\")\r\n with open(annota,\"r\") as f:\r\n for l in f.readlines():\r\n iname,c,_,__,___,____ = l.split()\r\n ipath = os.path.join(imgpath,\"images\",iname)\r\n img = cv2.imread(ipath)\r\n self.imgs.append(img)\r\n self.labels.append(self.ids[c])", "title": "" }, { "docid": "c92724ed906c675a940a02ac66b2aae3", "score": "0.52739346", "text": "def _generate_examples(self, image_dir, annotation_dir, split_name):\n instance_filename = \"instances_{}.json\"\n\n # Load the annotations (label names, images metadata,...)\n instance_path = os.path.join(\n annotation_dir,\n \"annotations\",\n instance_filename.format(split_name),\n )\n coco_annotation = CocoAnnotationBBoxes(instance_path)\n # Each category is a dict:\n # {\n # 'id': 51, # From 1-91, some entry missing\n # 'name': 'bowl',\n # 'supercategory': 'kitchen',\n # }\n categories = coco_annotation.categories\n # Each image is a dict:\n # {\n # 'id': 262145,\n # 'file_name': 'COCO_train2017_000000262145.jpg'\n # 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',\n # 'coco_url': 'http://images.cocodataset.org/train2017/xyz.jpg',\n # 'license': 2,\n # 'date_captured': '2013-11-20 02:07:55',\n # 'height': 427,\n # 'width': 640,\n # }\n images = coco_annotation.images\n\n objects_key = \"objects\"\n self.info.features[objects_key][\"label\"].names = [c[\"name\"] for c in categories]\n\n categories_id2name = {c[\"id\"]: c[\"name\"] for c in categories}\n\n # Iterate over all images\n for image_info in sorted(images, key=lambda x: x[\"id\"]):\n # Each instance annotation is a dict:\n # {\n # 'iscrowd': 0,\n # 'bbox': [116.95, 305.86, 285.3, 266.03],\n # 'image_id': 480023,\n # 'segmentation': [[312.29, 562.89, 402.25, ...]],\n # 'category_id': 58,\n # 'area': 54652.9556,\n # 'id': 86,\n # }\n instances = coco_annotation.get_annotations(img_id=image_info[\"id\"])\n\n if not instances:\n continue\n\n def build_bbox(x, y, width, height):\n # pylint: disable=cell-var-from-loop\n # build_bbox is only used within the loop so it is ok to use image_info\n return tfds.features.BBox(\n ymin=y / image_info[\"height\"],\n xmin=x / image_info[\"width\"],\n ymax=(y + height) / image_info[\"height\"],\n xmax=(x + width) / image_info[\"width\"],\n )\n # pylint: enable=cell-var-from-loop\n\n example = {\n \"image\": os.path.join(image_dir, split_name, image_info[\"file_name\"]),\n \"image/filename\": image_info[\"file_name\"],\n \"image/id\": image_info[\"id\"],\n objects_key: [\n { # pylint: disable=g-complex-comprehension\n \"id\": instance[\"id\"],\n \"area\": instance[\"area\"],\n \"bbox\": build_bbox(*instance[\"bbox\"]),\n \"label\": categories_id2name[instance[\"category_id\"]],\n \"is_crowd\": bool(instance[\"iscrowd\"]),\n }\n for instance in instances\n ],\n }\n\n yield image_info[\"file_name\"], example", "title": "" }, { "docid": "4f047c5c2a0b01817705ae22eb74e5b3", "score": "0.5271598", "text": "def collate_fn(data):\n # Sort a data list by caption length (descending order).\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end] \n \n return images, targets, lengths", "title": "" }, { "docid": "6081e488305b72b8e4f18724738d3e7c", "score": "0.5267669", "text": "def knnShow(k : int, n : int, starting_epoch: int, ending_epoch: int, step_epoch: int,\n device: torch.device, use_cuda: bool,\n data_type:str, version: str, data_classes: List,\n start_channels: int, img_size: int,\n nb_classes: int, trainset: Dataset) -> None:\n Z_hat = torch.randn(n*nb_classes, start_channels-nb_classes, device=device)\n Yk_hat = torch.zeros(n*nb_classes, nb_classes, device=device)\n for i in range(nb_classes):\n for j in range(n):\n Yk_hat[i*n +j][i] = 1\n Z_Yk_fixed = torch.cat([Z_hat, Yk_hat], dim=1)\n img_labels = torch.argmax(Yk_hat, dim=1)\n T = [i for i in range(starting_epoch, ending_epoch+1, step_epoch)]\n\n images = []\n\n epoch_pbar = tqdm(T, desc=\"Epoch: {}\".format(T[0]))\n for epoch in epoch_pbar:\n epoch_pbar.set_description(\"Epoch: {}\".format(epoch))\n epochImages = []\n\n step_artgan = ArtGAN(data_type, version, img_size, nb_classes,\n start_channels=start_channels, retrain_epoch=epoch,\n device=device)\n\n if use_cuda: step_artgan.cuda()\n step_artgan.eval()\n\n model_imgs = step_artgan.G(Z_Yk_fixed)\n model_imgs = model_imgs.cpu().detach()\n\n nn_pbar = tqdm(range(nb_classes), desc=\"Label: {}\".format(data_classes[0]))\n for i in nn_pbar:\n nn_pbar.set_description(\"Label: {}\".format(data_classes[i]))\n for j in range(n):\n gen_img = model_imgs[i*n+j]\n neighbors = knn(k , gen_img, trainset)\n neighbors = [trainset.__getitem__(idx)[0].numpy() for idx in neighbors]\n gen_img = np.transpose(gen_img, (1, 2, 0))\n neighbors = [ np.transpose(img, (1, 2, 0)) for img in neighbors]\n l = [gen_img] + neighbors\n epochImages.append(l)\n\n images.append(epochImages)\n\n for j in range(n*nb_classes):\n ncols = k+1\n nrows = len(T)\n\n fig = plt.figure(figsize=(4, 4))\n grid = ImageGrid(fig, 111, nrows_ncols=(nrows, ncols), axes_pad=(0.05, 0.3))\n\n row = 0\n col = 0\n for ax in grid:\n\n ax.imshow(images[row][j][col])\n ax.axis(\"off\")\n if col == 0:\n ax.set_title(\"Epoch {}\".format(T[row]))\n\n col += 1\n col = col%ncols\n if col == 0: row += 1\n\n # fig.suptitle(\"Generation of {} at different epochs\".format(data_classes[img_labels[j]]))\n plt.tight_layout()\n eval_folder = \"results/\" + data_type + \"_\" + version + \"/knn/\"\n if not os.path.exists(eval_folder):\n os.makedirs(eval_folder)\n path_to_file = eval_folder + \"neighbor_evo_{}.png\".format(data_classes[img_labels[j]])\n plt.savefig(path_to_file)\n plt.close()", "title": "" }, { "docid": "4d3242b4dd84997765fdec5aeee065f2", "score": "0.5263439", "text": "def build_seg_net(self, img ):\n\n # Q: convert rgb to bgr (?) \n self.conv1_1 = self.conv2d(img, \"conv1_1\")\n self.conv1_2 = self.conv2d(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, \"pool1\" ) \n \n self.conv2_1 = self.conv2d(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv2d(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, \"pool2\" )\n \n self.conv3_1 = self.conv2d(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv2d(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv2d(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, \"pool3\" )\n\n self.conv4_1 = self.conv2d(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv2d(self.conv4_1, \"conv4_2\") \n self.conv4_3 = self.conv2d(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, \"pool4\" )\n\n self.conv5_1 = self.conv2d(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv2d(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv2d(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, \"pool5\" )\n\n # fully conv \n self.fc6 = self.fc(self.pool5, \"fc6\" )\n self.fc7 = self.fc(self.fc6, \"fc7\" )\n self.score_fr = self.fc(self.fc7, \"score_fr\" )\n # upsampling : strided convolution\n self.score_pool4 = self.score_layer( self.pool4, \"score_pool4\")\n self.upscore2 = self.upsample_layer(self.score_fr, \"upscore2\", 2, self.score_pool4.get_shape().as_list() ) # Q: why not conv7 as in the paper? \n self.fuse_pool4 = tf.add( self.upscore2, self.score_pool4)\n \n self.score_pool3 = self.score_layer( self.pool3, \"score_pool3\")\n self.upscore4 = self.upsample_layer( self.fuse_pool4, \"upscore4\", 2, self.score_pool3.get_shape().as_list() )\n self.fuse_pool3 = tf.add( self.upscore4, self.score_pool3)\n \n imgshape = img.get_shape().as_list()\n target_shape = [ self.batch_size, imgshape[1], imgshape[2], self.num_classes ]\n self.upscore32 = self.upsample_layer( self.fuse_pool3, \"upscore32\", 8, target_shape ) # 8x upsampled prediction\n \n self.result = self.upscore32", "title": "" }, { "docid": "abeed966a78de12d605148a0c8b8f558", "score": "0.5261734", "text": "def __init__(self, embed_size, vocab: VocabEntry):\n super(ModelEmbeddings, self).__init__()\n\n ## A4 code\n #pad_token_idx = vocab.src['<pad>']\n #self.embeddings = nn.Embedding(len(vocab.src), embed_size, padding_idx=pad_token_idx)\n ## End A4 code\n\n ### YOUR CODE HERE for part 1j\n self.char_embed_size = 50\n self.word_embed_size = embed_size\n self.char_embedding = nn.Embedding(len(vocab.char2id), self.char_embed_size, padding_idx=vocab.char_pad)\n self.cnn = CNN(e_char=self.char_embed_size, e_word=embed_size)\n self.highway = Highway(word_embed_size=embed_size)\n self.dropout = nn.Dropout(0.3)\n ### END YOUR CODE", "title": "" }, { "docid": "6b034d40fff4d6f1298f59b2c34b8449", "score": "0.5253079", "text": "def get_images_no_storytext(self, article_body, article_url):\n images_dictionary = dict()\n gremlin_zapper = GremlinZapper()\n\n tables = article_body.findAll('table')\n\n for table in tables:\n # print item.parent\n add_to_story = False\n cells = table.find_all('td')\n image_text = None\n image_src = None\n image_height = None\n image_width = None\n\n if cells:\n for cell in cells:\n\n image = cell.find('img')\n if image:\n # print image['src']\n\n if image_src is not None:\n images_dictionary[image_src] = {\"image_text\": image_text,\n \"image_height\": str(image_height),\n \"image_width\": str(image_width),\n \"image_id\": str(self.get_next_index())}\n image_src = image['src']\n image_src = urljoin(article_url, image_src)\n try:\n image_width, image_height = self.get_image_dimens(image_src)\n\n if 'height' in image:\n image_height = image['height']\n if 'width' in image:\n image_width = image['width']\n\n image_text = None\n except IOError, urllib.URLError:\n if 'height' in image and 'width' in image:\n image_height = image['height']\n image_width = image['width']\n image_text = None\n else:\n image_width = None\n image_height = None\n image_text = None\n image_src = None\n else:\n image_src = image['src']\n image_src = urljoin(article_url, image_src)\n try:\n image_width, image_height = self.get_image_dimens(image_src)\n\n if 'height' in image:\n image_height = image['height']\n if 'width' in image:\n image_width = image['width']\n except IOError, urllib.URLError:\n if 'height' in image and 'width' in image:\n image_height = image['height']\n image_width = image['width']\n image_text = None\n else:\n image_width = None\n image_height = None\n image_text = None\n image_src = None\n\n else:\n image_text = cell.get_text()\n matches = self.word_regex.findall(image_text)\n image_text = ' '.join(matches)\n image_text = gremlin_zapper.zap_string(image_text)\n\n if image_src is not None:\n images_dictionary[image_src] = {\"image_text\": image_text,\n \"image_height\": str(image_height),\n \"image_width\": str(image_width),\n \"image_id\": str(self.get_next_index())}\n table.extract()\n return images_dictionary", "title": "" }, { "docid": "708afa2d64125c3b45e168d180f85a54", "score": "0.5243567", "text": "def __getitem__(self, idx):\n metadata = self.metadata_list[idx]\n img_path = DATA_DIR + metadata[\"image_path\"]\n class_label = metadata[\"class_label\"]\n image = cv2.imread(img_path)\n image = cv2.resize(image, (128, 256))\n label = CLOTH_CATEGORIES[class_label]\n\n if self.transform:\n image = self.transform(image)\n\n return image, label", "title": "" }, { "docid": "1ab0759cb111ffc5a766c5f8f269f21a", "score": "0.52283555", "text": "def __getitem__(self, index):\n image_id = self.image_ids[index]\n \n filename = self.image_id_to_filename[image_id]\n image_path = os.path.join(self.image_dir, filename)\n with open(image_path, 'rb') as f:\n with PIL.Image.open(f) as image:\n WW, HH = image.size\n image = self.transform(image.convert('RGB'))\n\n H, W = self.image_size\n objs, boxes, masks = [], [], []\n for object_data in self.image_id_to_objects[image_id]:\n objs.append(object_data['category_id'])\n x, y, w, h = object_data['bbox']\n x0 = x / WW\n y0 = y / HH\n x1 = (x + w) / WW\n y1 = (y + h) / HH\n boxes.append(torch.FloatTensor([x0, y0, x1, y1]))\n\n # This will give a numpy array of shape (HH, WW)\n mask = seg_to_mask(object_data['segmentation'], WW, HH)\n\n # Crop the mask according to the bounding box, being careful to\n # ensure that we don't crop a zero-area region\n mx0, mx1 = int(round(x)), int(round(x + w))\n my0, my1 = int(round(y)), int(round(y + h))\n mx1 = max(mx0 + 1, mx1)\n my1 = max(my0 + 1, my1)\n mask = mask[my0:my1, mx0:mx1]\n mask = imresize(255.0 * mask, (self.mask_size, self.mask_size),\n mode='constant')\n mask = torch.from_numpy((mask > 128).astype(np.int64))\n masks.append(mask)\n\n # Add dummy __image__ object\n objs.append(self.vocab['object_name_to_idx']['__image__'])\n boxes.append(torch.FloatTensor([0, 0, 1, 1]))\n masks.append(torch.ones(self.mask_size, self.mask_size).long())\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n masks = torch.stack(masks, dim=0)\n\n box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n # Compute centers of all objects\n obj_centers = []\n _, MH, MW = masks.size()\n for i, obj_idx in enumerate(objs):\n x0, y0, x1, y1 = boxes[i]\n mask = (masks[i] == 1)\n xs = torch.linspace(x0, x1, MW).view(1, MW).expand(MH, MW)\n ys = torch.linspace(y0, y1, MH).view(MH, 1).expand(MH, MW)\n if mask.sum() == 0:\n mean_x = 0.5 * (x0 + x1)\n mean_y = 0.5 * (y0 + y1)\n else:\n mean_x = xs[mask].mean()\n mean_y = ys[mask].mean()\n obj_centers.append([mean_x, mean_y])\n obj_centers = torch.FloatTensor(obj_centers)\n\n # Add triples\n triples = []\n num_objs = objs.size(0)\n __image__ = self.vocab['object_name_to_idx']['__image__']\n real_objs = []\n if num_objs > 1:\n real_objs = (objs != __image__).nonzero().squeeze(1)\n for cur in real_objs:\n choices = [obj for obj in real_objs if obj != cur]\n if len(choices) == 0 or not self.include_relationships:\n break\n other = random.choice(choices)\n if random.random() > 0.5:\n s, o = cur, other\n else:\n s, o = other, cur\n\n # Check for inside / surrounding\n sx0, sy0, sx1, sy1 = boxes[s]\n ox0, oy0, ox1, oy1 = boxes[o]\n d = obj_centers[s] - obj_centers[o]\n theta = math.atan2(d[1], d[0])\n\n if sx0 < ox0 and sx1 > ox1 and sy0 < oy0 and sy1 > oy1:\n p = 'surrounding'\n elif sx0 > ox0 and sx1 < ox1 and sy0 > oy0 and sy1 < oy1:\n p = 'inside'\n elif theta >= 3 * math.pi / 4 or theta <= -3 * math.pi / 4:\n p = 'left of'\n elif -3 * math.pi / 4 <= theta < -math.pi / 4:\n p = 'above'\n elif -math.pi / 4 <= theta < math.pi / 4:\n p = 'right of'\n elif math.pi / 4 <= theta < 3 * math.pi / 4:\n p = 'below'\n p = self.vocab['pred_name_to_idx'][p]\n triples.append([s, p, o])\n\n # Add __in_image__ triples\n O = objs.size(0)\n in_image = self.vocab['pred_name_to_idx']['__in_image__']\n for i in range(O - 1):\n triples.append([i, in_image, O - 1])\n \n triples = torch.LongTensor(triples)\n return image, objs, boxes, masks, triples", "title": "" }, { "docid": "09b39e660ffc6911c6321ea121412c5d", "score": "0.5223177", "text": "def display_label_images(images, labels, label):\n print(labels)\n limit = 24 # show a max of 24 images.\n plt.figure(figsize=(15, 5))\n i = 1\n start = labels.index(label)\n end = start + labels.count(label)\n for image in images[start:end][:limit]:\n plt.subplot(3, 8, i) # 3 rows, 8 per row.\n plt.axis('off')\n i += 1\n plt.imshow(image)\n plt.show()", "title": "" }, { "docid": "c81c42c08fa592a9c6eb6bf774fee0d2", "score": "0.52221805", "text": "def detection_collate(self,batch):\n targets = []\n imgs = []\n if self.batch_count % 5 == 0:\n self.rescale_size = random.choice(range(3, 6, 1)) / 10\n #print(self.rescale_size)\n for _, sample in enumerate(batch):\n # print(\"sample:\",sample)\n for _, tup in enumerate(sample):\n if torch.is_tensor(tup):\n #tup = tup.astype(np.float32)\n tup=tup.numpy()\n #print(\"tup type\",type(tup))\n interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST,\n cv2.INTER_LANCZOS4]\n interp_method = interp_methods[random.randrange(5)]\n #print(tup.shape,self.rescale_size)\n #tup=cv2.resize(tup,None,fx=self.rescale_size,fy=self.rescale_size,interpolation=interp_method)\n tup = cv2.resize(tup, (int(my_img_dim*self.rescale_size),int(my_img_dim*self.rescale_size)), interpolation=interp_method)\n #tup0 = tup\n #print(\"tup:\",tup.shape)\n #cv2.imshow('tup',tup)\n #cv2.waitKey()\n tup-=(127, 127, 127)\n tup/= 127\n tup=tup.transpose(2,0,1)\n tup=torch.from_numpy(tup)\n #print(\"tup:\", tup.shape)\n imgs.append(tup)\n elif isinstance(tup, type(np.empty(0))):\n # tupa = tup\n # h, w, _ = tup0.shape\n # b_w_t = (tupa[:, 2] - tupa[:, 0]) * w\n # b_h_t = (tupa[:, 3] - tupa[:, 1]) * h\n # mask_b = np.minimum(b_w_t, b_h_t) > 4.0\n # tup = tup[mask_b]\n annos = torch.from_numpy(tup).float()\n #print(\"annos:\", annos.shape, annos)\n targets.append(annos)\n self.batch_count += 1\n #print(torch.stack(imgs, 0).shape)\n return (torch.stack(imgs, 0), targets)", "title": "" }, { "docid": "45689c5075227f7f5dcf975ef85978a9", "score": "0.5211282", "text": "def get_images_storytext(self, story_text, article_url):\n images_dictionary = dict()\n gremlin_zapper = GremlinZapper()\n\n tables = story_text.findAll('table')\n\n for table in tables:\n # print item.parent\n add_to_story = False\n cells = table.find_all('td')\n\n if cells:\n add_to_story = False\n\n for cell in cells:\n image = cell.find('img')\n if image:\n image_src = image['src']\n image_src = urljoin(article_url, image_src)\n try:\n image_width, image_height = self.get_image_dimens(image_src)\n if 'height' in image:\n image_height = image['height']\n if 'width' in image:\n image_width = image['width']\n image_text = cell.get_text()\n matches = self.word_regex.findall(image_text)\n image_text = ' '.join(matches)\n image_text = gremlin_zapper.zap_string(image_text)\n images_dictionary[image_src] = {\"image_text\": image_text,\n \"image_height\": str(image_height),\n \"image_width\": str(image_width),\n \"image_id\": str(self.get_next_index())}\n except IOError, urllib.URLError:\n if 'height' in image and 'width' in image:\n image_height = image['height']\n image_width = image['width']\n image_text = cell.get_text()\n matches = self.word_regex.findall(image_text)\n image_text = ' '.join(matches)\n image_text = gremlin_zapper.zap_string(image_text)\n images_dictionary[image_src] = {\"image_text\": image_text,\n \"image_height\": str(image_height),\n \"image_width\": str(image_width),\n \"image_id\": str(self.get_next_index())}\n\n table.extract()\n return images_dictionary", "title": "" }, { "docid": "ba0c8597ef7a59392ffe4dbc7954bf12", "score": "0.5211046", "text": "def single_image(self):", "title": "" }, { "docid": "0b149b7a4af204780b829fbef02dad67", "score": "0.521011", "text": "def create_coco_lists(ids_list, image_dir, annotations_dir, category_mapper):\n parser = XmlParser()\n\n images_list = []\n annotations_list = []\n count = 0\n\n for image_id in ids_list:\n\n image = cv2.imread(os.path.join(image_dir, image_id) + '.jpg')\n images_list.append({'id': image_id,\n 'width': image.shape[1],\n 'height': image.shape[0],\n 'file_name': image_id + '.jpg',\n 'license': 1})\n\n gt_frame = parser.load(\n os.path.join(\n annotations_dir,\n image_id) + '.xml')\n object_list = gt_frame['object']\n category_names = [object_inst['name'] for object_inst in object_list]\n\n # Convert British nouns used in PascalVOC to American nouns used in\n # COCO\n category_names = ['dining table' if category_name ==\n 'diningtable' else category_name for category_name in category_names]\n category_names = ['motorcycle' if category_name ==\n 'motorbike' else category_name for category_name in category_names]\n category_names = ['potted plant' if category_name ==\n 'pottedplant' else category_name for category_name in category_names]\n category_names = ['airplane' if category_name ==\n 'aeroplane' else category_name for category_name in category_names]\n category_names = ['tv' if category_name ==\n 'tvmonitor' else category_name for category_name in category_names]\n category_names = ['couch' if category_name ==\n 'sofa' else category_name for category_name in category_names]\n\n frame_boxes = np.array(\n [\n [\n object_inst['bndbox']['xmin'],\n object_inst['bndbox']['ymin'],\n object_inst['bndbox']['xmax'],\n object_inst['bndbox']['ymax']] for object_inst in object_list]).astype(\n np.float)\n\n for bbox, category_name in zip(frame_boxes, category_names):\n bbox_coco = [\n bbox[0],\n bbox[1],\n bbox[2] - bbox[0],\n bbox[3] - bbox[1]]\n\n annotations_list.append({'image_id': image_id,\n 'id': count,\n 'category_id': category_mapper[category_name],\n 'bbox': bbox_coco,\n 'area': bbox_coco[2] * bbox_coco[3],\n 'iscrowd': 0})\n count += 1\n\n return images_list, annotations_list", "title": "" }, { "docid": "27f71441c55c7852fd5f3a0672dd5cf8", "score": "0.52059144", "text": "def forward(self, images):\n \n with torch.no_grad():\n features = self.resnext(images)\n \n features = features.view(features.size(0), -1)\n features = self.embed(features)\n features = self.bn(features)\n\n return features", "title": "" }, { "docid": "9ba757ff0dc0ea5f35aba394e976c952", "score": "0.51933235", "text": "def __init__(self, images, labels=None, labeler=None, cast=False):\n self.images = images\n self.labels = labels\n self.labeler = labeler\n self.cast = cast", "title": "" }, { "docid": "90851cf5ad1300cbad370ba4637f841d", "score": "0.5188816", "text": "async def images(self, ctx, search=100):\n await self.do_removal(ctx, search, lambda e: len(e.embeds) or len(e.attachments))", "title": "" }, { "docid": "f18fb6f7411cc9bc5c428a51e51aad05", "score": "0.51870394", "text": "def dectect(img, neural_net, transform):\r\n height, width = img.shape[:2]\r\n ## convert from numpy array to torch tensor\r\n transformed_img = transform(img)[0]\r\n ## turn transformed image from a numpy array to a torch tensor\r\n ## and switch/permute the color channels from RGB (0, 1, 2) to BRG (2, 0, 1)\r\n x = torch.from_numpy(transformed_img).permute(2, 0, 1)\r\n ## add fake dimension corresponding to the batch and turn it into a torch Variable\r\n x = Variable(x.unsqueeze(0))\r\n y = neural_net(x) # feed torch Variable into the neural network\r\n ## get values of output y (torch Tensor)\r\n # detections = [batch, num of classes, num of occurence of classes, (score, x0, y0, x1, y1)]\r\n detections = y.data\r\n ## [width, height, width, height] = upper-left corner to lower-right corner\r\n scale = torch.Tensor([width, height, width, height])\r\n for i in range(detections.size(1)):\r\n occur = 0\r\n while detections[0, i, occur, 0] >= 0.6:\r\n ## get the points from the detection box\r\n pt = (detections[0, i, occur, 1:] * scale).numpy()\r\n ## draw the rectangle on the object in the image\r\n cv2.rectangle(img, (int(pt[0]), int(pt[1])),\r\n (int(pt[2]), int(pt[3])), COLORS[i % 3], 2)\r\n ## display the label on top\r\n cv2.putText(img, labelmap[i - 1], (int(pt[0]), int(pt[1])),\r\n cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\r\n occur += 1\r\n return img", "title": "" }, { "docid": "067f62e94083951f69038498e90ae1b5", "score": "0.5186599", "text": "def collate_fn(data):\n # Sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 3D tensor to 4D tensor)\n images = torch.stack(images, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n\n return images, targets, lengths, ids", "title": "" }, { "docid": "2c93f826455995b9146c866eb69c45c3", "score": "0.5184367", "text": "def process_outputs( image_files, outputs, metadata ):\n\n for image_file, output in zip(image_files, outputs):\n\n basename = os.path.basename(image_file)[:-4] \n output = output['instances'].get_fields() # it has 4 keys here: 'pred_boxes', 'scores', 'pred_classes', 'pred_masks'\n\n total_detected_instances = len(output['scores'])\n\n\n if total_detected_instances > 0: # otherwise no detected instance\n\n sem = torch.zeros_like( output['pred_masks'][0] ).float()\n ins = torch.zeros_like( output['pred_masks'][0] ).float()\n\n current_instance = 0 \n for i in range( total_detected_instances-1, -1, -1 ): # backwards indexing (lowest score first). 'scores' are sorted from detectron2\n \n # these two are used to create sem and ins map\n current_class = output['pred_classes'][i] + 1 # I set the first class is 1 for ade, whereas in detectron2 they shift classes by 1 \n current_instance += 1 \n\n sem.masked_fill_( output['pred_masks'][i], current_class )\n ins.masked_fill_( output['pred_masks'][i], current_instance )\n\n # we assume there are only 255 instances at most in one iamge \n ins[ins>255] = 255 \n\n # then save sem and ins using basename \n ins = Image.fromarray( np.array(ins.cpu()).astype('uint8') )\n sem = Image.fromarray( np.array(sem.cpu()).astype('uint8') )\n\n sem.save( 'annotation/' + basename+'.png' )\n ins.save( 'annotation_instance/' + basename+'.png' )", "title": "" }, { "docid": "8a3389dbd5d9496b88199e5984db2243", "score": "0.5184072", "text": "def learning_iteration(self):\n # print 'DoCaffeTrainng'\n startTime = time.time()\n # print 'q_imStack.qsize() : ', self.q_imStack.qsize()\n # print 'q_labelStack.qsize() : ', self.q_labelStack.qsize()\n\n\n # if too few items in queue do not proceed with iterations\n if self.q_imStack.qsize() < 16*5:\n return None\n\n\n\n batchsize = self.solver.net.blobs['data'].data.shape[0]\n # print 'batchsize', batchsize\n # print \"self.solver.net.blobs['data'].data\", self.solver.net.blobs['data'].data.shape\n # print \"self.solver.net.blobs['label_x'].data\",self.solver.net.blobs['label_x'].data.shape\n for i in range(batchsize):\n im = self.q_imStack.get() #320x240x3\n y = self.q_labelStack.get()\n\n im_noisy = Noise.noisy( 'gauss', im )\n im_gry = np.mean( im_noisy, axis=2)\n\n\n # cv2.imwrite( str(i)+'__.png', x )\n\n cencusTR = ct.censusTransform( im_gry.astype('uint8') )\n edges_out = cv2.Canny(cv2.blur(im_gry.astype('uint8'),(3,3)),100,200)\n\n\n self.solver.net.blobs['data'].data[i,0,:,:] = self.zNormalized( im_gry.astype('float32') )\n self.solver.net.blobs['data'].data[i,1,:,:] = self.zNormalized( cencusTR.astype('float32') )\n self.solver.net.blobs['data'].data[i,1,:,:] = self.zNormalized( edges_out.astype('float32') )\n self.solver.net.blobs['label_x'].data[i,0] = y[0]\n self.solver.net.blobs['label_y'].data[i,0] = y[1]\n self.solver.net.blobs['label_z'].data[i,0] = y[2]\n self.solver.net.blobs['label_yaw'].data[i,0] = y[3]\n # print y[0], y[1], y[2], y[3]\n\n self.solver.step(1)\n self.caffeTrainingLossX[self.caffeIter] = self.solver.net.blobs['loss_x'].data\n self.caffeTrainingLossY[self.caffeIter] = self.solver.net.blobs['loss_y'].data\n self.caffeTrainingLossZ[self.caffeIter] = self.solver.net.blobs['loss_z'].data\n self.caffeTrainingLossYaw[self.caffeIter] = self.solver.net.blobs['loss_yaw'].data\n if self.caffeIter % 50 == 0 and self.caffeIter>0:\n print 'Writing File : train_loss.npy'\n np.save('train_loss_x.npy', self.caffeTrainingLossX[0:self.caffeIter])\n np.save('train_loss_y.npy', self.caffeTrainingLossY[0:self.caffeIter])\n np.save('train_loss_z.npy', self.caffeTrainingLossZ[0:self.caffeIter])\n np.save('train_loss_yaw.npy', self.caffeTrainingLossYaw[0:self.caffeIter])\n\n #time.sleep(.3)\n print 'my_iter=%d, solver_iter=%d, time=%f, loss_x=%f, lossYaw=%f' % (self.caffeIter, self.solver.iter, time.time() - startTime, self.caffeTrainingLossX[self.caffeIter], self.caffeTrainingLossYaw[self.caffeIter])\n self.caffeIter = self.caffeIter + 1", "title": "" }, { "docid": "f4e749d6cefaa58305d8a1353ab01e7a", "score": "0.51839095", "text": "def load_imagecorpus():\n matrix_dir = \"data/Mats\"\n image_tag_file = \"data/imagelist.txt\"\n\n images = {}\n tags = {}\n image_tag_file = open(image_tag_file, \"r\")\n for irow in image_tag_file:\n row = irow.split()\n iname = row[0]\n if len(row[1:] ) > 0:\n tags[iname] = row[1:]\n try :\n images[iname] = np.load(os.path.join(matrix_dir, iname.split(\"\\\\\")[0]+os.sep+iname.split(\"\\\\\")[1].split(\".\")[0])+\".npy\")\n except:\n #print \"Unexpected error:\", sys.exc_info()[1]\n pass\n\n return (tags,images)", "title": "" }, { "docid": "0f0eb7ef7aa06ff7d34be0fc56af6cb5", "score": "0.5181396", "text": "def img_tags(tree):\n return CSSSelector('img')(tree)", "title": "" }, { "docid": "dacee8c9a212f1dddee4e3e13922ac94", "score": "0.51696944", "text": "def detection_collate(batch):\n\n targets = []\n imgs = []\n image_ids = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n image_ids.append(sample[2])\n return torch.stack(imgs, 0), targets, image_ids", "title": "" }, { "docid": "57c458db7f3000a30b92329af87ea65e", "score": "0.51660115", "text": "def images_detection(self, imgs, orig_dim_list):\n args = self.detector_opt\n if not self.model:\n self.load_model()\n with torch.no_grad():\n imgs = imgs.to(args.device) if args else imgs.cuda()\n scaling_factors = torch.FloatTensor([1./min(self.inp_dim / orig_dim[0], self.inp_dim / orig_dim[1]) for orig_dim in orig_dim_list]).view(-1, 1)\n scaling_factors = scaling_factors.to(args.device) if args else scaling_factors.cuda()\n prediction = self.model(imgs, scaling_factors) \n #change the pred format to alphapose (nms has already been done in effdeteval model)\n prediction = prediction.cpu()\n write = False\n for index, sample in enumerate(prediction):\n for det in sample:\n score = float(det[4])\n if score < .001: # stop when below this threshold, scores in descending order\n break\n if int(det[5]) != 1 or score < self.confidence:\n continue\n det_new = prediction.new(1,8)\n det_new[0,0] = index #index of img\n det_new[0,1:3] = det[0:2] # bbox x1,y1\n det_new[0,3:5] = det[0:2] + det[2:4] # bbox x2,y2\n det_new[0,6:7] = det[4] # cls conf\n det_new[0,7] = det[5] # cls idx\n if not write:\n dets = det_new\n write = True\n else:\n dets = torch.cat((dets, det_new)) \n if not write:\n return 0\n\n orig_dim_list = torch.index_select(orig_dim_list, 0, dets[:, 0].long())\n for i in range(dets.shape[0]):\n dets[i, [1, 3]] = torch.clamp(dets[i, [1, 3]], 0.0, orig_dim_list[i, 0])\n dets[i, [2, 4]] = torch.clamp(dets[i, [2, 4]], 0.0, orig_dim_list[i, 1])\n\n return dets", "title": "" }, { "docid": "614315ce875879b2101174025ad56258", "score": "0.51655924", "text": "def run(self):\n uri = directives.uri(self.arguments[0])\n if uri.endswith('.svg'):\n # the ? at the end makes docutil output an <img> instead of an object for the svg, which colorbox requires\n self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri)) + '?'\n else:\n self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri))\n self.options['target'] = uri\n with Image2.open('.' + uri) as im:\n ow, oh = im.size\n attrs = {'data-orig-width': str(ow),\n 'data-orig-height': str(oh),\n 'data-target': uri}\n (orig_node,) = Image.run(self)\n html_doc = docutils.utils.new_document(\"\")\n html_doc.append(orig_node)\n html_root = parseString(docutils.core.publish_from_doctree(html_doc, writer_name='html').decode())\n html_node = html_root.getElementsByTagName(\"img\")[0]\n for key, val in attrs.items():\n html_node.setAttribute(key, val)\n html_node.setAttribute('class', \"{0} simpic\".format(html_node.getAttribute('class')))\n raw_html = html_node.toxml()\n node = docutils.nodes.raw('', raw_html, format='html')\n return [node]", "title": "" }, { "docid": "c500d2fbc0af3bb6f1e42c844391870b", "score": "0.51649487", "text": "def build_network(self):\n x = torch.zeros(self.input_shape)\n input_loc_size = self.args.A\n # input_loc_size = 2\n loc = torch.zeros(self.input_shape[0],input_loc_size)\n hidden = None\n input_act_size = 256\n if self.args.actOnElev:\n input_act_size += 1\n if self.args.actOnAzim:\n input_act_size += 1\n if self.args.actOnTime:\n input_act_size += 1\n out = x\n\n self.layer_dict = nn.ModuleDict()\n self.upscale_shapes.append(x.shape)\n\n for i in range(self.num_stages):\n self.layer_dict['conv{}'.format(i)] = MetaConvNormLayerReLU(input_shape=out.shape,\n num_filters=self.cnn_filters,\n kernel_size=self.kernel_size, stride=self.conv_stride,\n padding=self.args.conv_padding,\n args=self.args, normalization=True,\n meta_layer=self.meta_classifier,\n no_bn_learnable_params=False,\n device=self.device)\n out = self.layer_dict['conv{}'.format(i)](out, training=True, num_step=0)\n\n if self.args.max_pooling:\n out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)\n\n\n if not self.args.max_pooling:\n out = F.avg_pool2d(out, out.shape[2])\n\n # (1) feature encoder\n self.encoder_features_shape = list(out.shape)\n out = out.view(out.shape[0], -1)\n\n # (2) sense motion\n self.layer_dict['sense_motion'] = WordEmbed(input_shape=(out.shape[0],input_loc_size),\n num_filters=128) # input_size_loc = 2\n y = self.layer_dict['sense_motion'](loc)\n # y = F.relu(self.layer_dict['sense_motion'](loc))\n\n # (3) fuse the feature vector and sense location vector to give a fusion vector\n self.layer_dict['fuse'] = MetaLinearLayer(input_shape=(out.shape[0],out.shape[1]+self.args.embed_size),\n num_filters=256)\n out = torch.cat([out,y],dim=1)\n out = self.layer_dict['fuse'](out)\n # out = MetaLayerNormLayer(input_feature_shape=z.shape[1:])\n\n # (4) aggregate\n self.layer_dict['aggregate'] = SimpleRNN(input_size=(out.shape[0],256),hidden_size=(out.shape[0],256))\n \n out = self.layer_dict['aggregate'](out,hidden)\n\n # (5) act module\n if self.args.actorType == 'actor':\n act = out \n self.layer_dict['act'] = MetaLinearLayer(input_shape=(out.shape[0],input_act_size),num_filters=self.args.A)\n if self.args.actOnElev:\n act = torch.cat([act,torch.zeros(out.shape[0],1)],dim=1)\n if self.args.actOnAzim:\n act = torch.cat([act,torch.zeros(out.shape[0],1)],dim=1)\n if self.args.actOnTime:\n act = torch.cat([act,torch.zeros(out.shape[0],1)],dim=1)\n act = self.layer_dict['act'](act)\n \n\n \n # (6) classifier\n self.layer_dict['classifier'] = MetaLinearLayer(input_shape=out.shape,num_filters=self.num_output_classes)\n out = self.layer_dict['classifier'](out)\n\n print(\"VGGNetwork build\", out.shape)", "title": "" }, { "docid": "9021b2119a9c88f5d7af05a397d86156", "score": "0.51511747", "text": "def inference(self, images):\n \n \n \n \n #images = tf.reshape(images, [-1, self.image_size*self.image_size*3]) 448 448 3 \n # ๆญๅปบ็ฝ‘็ปœๆจกๅž‹\n #def _conv_layer(self,x,id,num_filters,filter_size,stride):\n net = self._conv_layer(images, 1, 64, 7, 2)# 224 224 64\n net = self._maxpool_layer(net, 1, 2, 2) # 112 112 63\n net = self._conv_layer(net, 2, 192, 3, 1) # 112 112 192\n net = self._maxpool_layer(net, 2, 2, 2) # 56 56 192\n net = self._conv_layer(net, 3, 128, 1, 1) # 56 56 128\n net = self._conv_layer(net, 4, 256, 3, 1) # 56 56 256\n net = self._conv_layer(net, 5, 256, 1, 1)# 56 56 256\n net = self._conv_layer(net, 6, 512, 3, 1)# 56 56 512\n net = self._maxpool_layer(net, 6, 2, 2) # 28 28 128\n net = self._conv_layer(net, 7, 256, 1, 1) # 28 28 256\n net = self._conv_layer(net, 8, 512, 3, 1)# 28 28 512\n net = self._conv_layer(net, 9, 256, 1, 1)# 28 28 256\n net = self._conv_layer(net, 10, 512, 3, 1)# 28 28 512\n net = self._conv_layer(net, 11, 256, 1, 1)# 28 28 256\n net = self._conv_layer(net, 12, 512, 3, 1)# 28 28 512\n net = self._conv_layer(net, 13, 256, 1, 1)# 28 28 256\n net = self._conv_layer(net, 14, 512, 3, 1)# 28 28 512\n net = self._conv_layer(net, 15, 512, 1, 1)# 28 28 512\n net = self._conv_layer(net, 16, 1024, 3, 1)# 28 28 1024\n net = self._maxpool_layer(net, 16, 2, 2) #14 14 1024\n net = self._conv_layer(net, 17, 512, 1, 1)#14 14 512\n net = self._conv_layer(net, 18, 1024, 3, 1)#14 14 1024\n net = self._conv_layer(net, 19, 512, 1, 1)#14 14 512\n net = self._conv_layer(net, 20, 1024, 3, 1)#14 14 1024\n net = self._conv_layer(net, 21, 1024, 3, 1)#14 14 1024\n net = self._conv_layer(net, 22, 1024, 3, 2)#7 7 1024\n net = self._conv_layer(net, 23, 1024, 3, 1)#7 7 1024\n net = self._conv_layer(net, 24, 1024, 3, 1)#7 7 1024\n net = self._flatten(net) # 7 * 7* 1024 = 50176\n \n #fully0 = self.fully('local0', net, self.image_size*self.image_size*3, 256)\n \n #def fully(self, scope, input, in_dimension, out_dimension, \n # leaky=True, pretrain=True, train=True):\n \"\"\"com2 TA Fully connection layer\n Args:\n scope: variable_scope name\n input: [batch_size, ???]\n out_dimension: int32\n Return:\n output: 2-D tensor [batch_size, out_dimension]\n \"\"\" \n fully1 = self.fully('local1', net, 50176, 512)\n\n fully2 = self.fully('local2', fully1, 512, 4096)\n\n fully3 = self.fully('local3', fully2, 4096, self.cell_size * self.cell_size * \n (self.num_classes + self.boxes_per_cell * 5), leaky=False,\n pretrain=False, train=True)\n# net = self._fc_layer(net, 25, 512, activation=leaky_relu)\n# net = self._fc_layer(net, 26, 4096, activation=leaky_relu)\n# net = self._fc_layer(net, 27, self.cell_size*self.cell_size\n# *(self.boxes_per_cell*5+20))\n \n n1 = self.cell_size * self.cell_size * self.num_classes #7 7 20\n\n n2 = n1 + self.cell_size * self.cell_size * self.boxes_per_cell # 7 7 (20 + 2)\n\n class_probs = tf.reshape(fully3[:, 0:n1], (-1, self.cell_size, self.cell_size, self.num_classes))\n scales = tf.reshape(fully3[:, n1:n2], (-1, self.cell_size, self.cell_size, self.boxes_per_cell))\n boxes = tf.reshape(fully3[:, n2:], (-1, self.cell_size, self.cell_size, self.boxes_per_cell * 4))\n \n predicts = tf.concat([class_probs, scales, boxes], 3)\n return predicts", "title": "" }, { "docid": "772265f931b0243e1c9a606b75d19aa0", "score": "0.5149088", "text": "def forward(self, images, is_training, reuse=False):\n pass", "title": "" }, { "docid": "772265f931b0243e1c9a606b75d19aa0", "score": "0.5149088", "text": "def forward(self, images, is_training, reuse=False):\n pass", "title": "" }, { "docid": "5ead77b7f4cb5d7cc7b4bf137b37022a", "score": "0.5147654", "text": "def caption_index_init_(self):\n\n # index caption 2 caption_id and caption 2 image_ids\n caption2id = {}\n id2caption = {}\n caption2imgids = {}\n for i, img in enumerate(self.imgs):\n for c in img['captions']:\n if not caption2id.has_key(c):\n id2caption[len(caption2id)] = c\n caption2id[c] = len(caption2id)\n caption2imgids[c] = []\n caption2imgids[c].append(i)\n self.caption2imgids = caption2imgids\n print (len(caption2imgids), 'unique cations')\n\n # parent captions are 1-word shorter than their children\n parent2children_captions = {}\n for c in caption2id.keys():\n for w in c.split():\n p = c.replace(w, '')\n p = p.replace(' ', ' ').strip()\n if not parent2children_captions.has_key(p):\n parent2children_captions[p] = []\n if c not in parent2children_captions[p]:\n parent2children_captions[p].append(c)\n self.parent2children_captions = parent2children_captions\n\n # identify parent captions for each image\n for img in self.imgs:\n img['modifiable'] = False\n img['parent_captions'] = []\n for p in parent2children_captions:\n if len(parent2children_captions[p]) >= 2:\n for c in parent2children_captions[p]:\n for imgid in caption2imgids[c]:\n self.imgs[imgid]['modifiable'] = True\n self.imgs[imgid]['parent_captions'] += [p]\n num_modifiable_imgs = 0\n for img in self.imgs:\n if img['modifiable']:\n num_modifiable_imgs += 1\n print ('Modifiable images', num_modifiable_imgs)", "title": "" }, { "docid": "8e8f37ca62d26bb3d1b111fae80b468e", "score": "0.5141748", "text": "def __getitem__(self, index):\n\n # Load PIL image\n source = Image.fromarray((self.data[\"noisy_images\"][index,:,:]/255.))\n target = Image.fromarray((self.data[\"clean_images\"][index,:,:]/255.))\n \n if self.transform:\n source = self.transform(source)\n \n# source = tvF.to_tensor(source)\n target = tvF.to_tensor(target)\n\n return source, target", "title": "" }, { "docid": "94dd1c90ca534af176607b51a2ba3eff", "score": "0.5141702", "text": "async def kitty(self):\n try:\n async with self.session.get(self.caturl) as r:\n result = await r.json()\n cat = discord.Embed(description=\"\\u2063\", color=discord.Color(0xffb6c1))\n cat.set_image(url=result['file'])\n # await self.bot.say(result['file'])\n await self.bot.say(embed=cat)\n except:\n await self.bot.say(\"Couldn't Get An Image\")", "title": "" }, { "docid": "547ff38af15e07d26a3cbd112b34dfee", "score": "0.51394206", "text": "def __init__(self, in_channels=4, num_actions=18, with_sa=True, with_ga=True, goal_embedding_as_feature=False,\n share_image_embedding=False, mean_pool_feature_map=False, residual_connection=False):\n super(GDNet, self).__init__()\n self.with_sa = with_sa\n self.with_ga = with_ga\n self.goal_embedding_as_feature = goal_embedding_as_feature\n self.share_image_embedding = share_image_embedding\n self.mean_pool_feature_map = mean_pool_feature_map\n self.residual_connection = residual_connection\n self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n\n # input feature channel size\n self.C = 64\n self.H = 7\n self.W = 7\n self.E = 32\n # input goal dim\n self.D = 2 * in_channels\n\n if with_sa:\n if not with_ga:\n raise ValueError('Self-attention is only applicable together with goal-driven attention')\n # self-attention\n self.theta = nn.Conv2d(self.C, self.E, 1)\n self.phi = nn.Conv2d(self.C, self.E, 1)\n self.g = nn.Conv2d(self.C, self.E, 1)\n self.recover_dim = nn.Conv2d(self.E, self.C, 1)\n\n if with_ga:\n # goal-driven attention\n if not self.share_image_embedding:\n self.theta2 = nn.Conv2d(self.C, self.E, 1)\n self.alpha = nn.Linear(self.D, self.E)\n image_feature_dim = self.C * 1 * 1\n else:\n if mean_pool_feature_map:\n image_feature_dim = self.C * 1 * 1\n else:\n image_feature_dim = self.C * self.H * self.W\n\n if goal_embedding_as_feature:\n goal_feature_dim = self.E\n else:\n goal_feature_dim = self.D\n\n # action classification layers\n self.fc4 = nn.Linear(image_feature_dim + goal_feature_dim, 256)\n self.fc5 = nn.Linear(256, 520)\n self.fc6 = nn.Linear(520, num_actions)\n\n # for visualization\n self.attention_weights = None", "title": "" }, { "docid": "4852ac90e2e670210e6e22e76cf417ac", "score": "0.5135648", "text": "def detection_collate(batch):\n targets = []\n imgs = []\n for _, sample in enumerate(batch):\n for _, tup in enumerate(sample):\n if torch.is_tensor(tup):\n print(\"tup:\",tup.shape)\n imgs.append(tup)\n elif isinstance(tup, type(np.empty(0))):\n annos = torch.from_numpy(tup).float()\n print(\"annos:\",annos.shape,annos)\n targets.append(annos)\n return (torch.stack(imgs, 0), targets)", "title": "" }, { "docid": "cfd264180371df8eb3935e2d36dff8f2", "score": "0.5131225", "text": "def __init__(self, embed_size, finetune=False, cnn_type='vgg19', use_abs=False, no_imgnorm=False):\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n self.cnn = self.get_cnn(cnn_type, True)\n for param in self.cnn.parameters():\n param.requires_grad = finetune\n if cnn_type.startswith('vgg'):\n self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features, embed_size)\n self.cnn.classifier = nn.Sequential(*list(self.cnn.classifier.children())[:-1])\n elif cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n self.init_weights()", "title": "" }, { "docid": "d50e0ecb0c70fa674d76bc260436c04b", "score": "0.51260835", "text": "def __call__(self, image, debug=False):\n # TODO: output should have length self.n_classes\n# conv_layers = self.depth * [hk.Conv2D(self.n_channels,\n# kernel_shape=3,\n# w_init=self.initializer,\n# b_init=self.initializer,\n# stride=2),\n# jax.nn.relu]\n# convnet = hk.Sequential(conv_layers + [hk.Flatten()])\n\n with_bias = False\n strides = [1,2,1,2,1,2]\n names = ['misc'] + ['conv']*5\n \n conv_layers = [\n [\n hk.Conv2D(self.n_channels,\n kernel_shape=3,\n w_init=self.initializer,\n b_init=self.initializer,\n with_bias=with_bias,\n stride=stride,\n name=name),\n jax.nn.relu,\n debug_layer(debug),\n ]\n for stride, name in zip(strides, names)\n ]\n\n conv_layers = [l for layer in conv_layers for l in layer]\n convnet = hk.Sequential(conv_layers + [\n hk.Flatten(),\n hk.Linear(self.n_classes,\n w_init=self.initializer,\n b_init=self.initializer,\n name='misc'),\n debug_layer(debug),\n ])\n\n return convnet(image)", "title": "" }, { "docid": "c46b3a7e98ccbbb19048b0fce0f52972", "score": "0.5122941", "text": "def collate_fn(self, batch):\n\n images = list()\n probabilities = list()\n\n for b in batch:\n images.append(b[0])\n probabilities.append(b[1])\n\n images = torch.stack(images, dim=0)\n probabilities = torch.stack(probabilities, dim=0)\n\n return images, probabilities # tensor (N, 3, 224, 224), 1 list of N tensors (40 items) each", "title": "" }, { "docid": "bf2473b8f12c931f8c036e7a5e0c9e5e", "score": "0.5122725", "text": "def __getitem__(self, i):\n # turn the i/5-th image to tensor format\n img = torch.FloatTensor(self.imgs[i // self.cpi] / 255.)\n\n if self.transform is not None:\n img = self.transform(img)\n\n caption = torch.LongTensor(self.captions[i]) #turn the i-th caption to tensor format\n caplen = torch.LongTensor([self.caplens[i]]) #turn the i-th caption length to tensor format\n\n if self.split is 'train': # for trainning\n return img, caption, caplen\n else: # For validation and testing,also return all the 5 captions for the image to get the BLEU-4 score\n all_captions = torch.LongTensor(\n self.captions[((i // self.cpi) * self.cpi):(((i // self.cpi) * self.cpi) + self.cpi)])\n return img, caption, caplen, all_captions", "title": "" }, { "docid": "dcfab8a0c8b4ebc485b8d869c672021c", "score": "0.51204824", "text": "def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets", "title": "" }, { "docid": "dcfab8a0c8b4ebc485b8d869c672021c", "score": "0.51204824", "text": "def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets", "title": "" }, { "docid": "5565e54710fb5f9121aeab92e46bd75d", "score": "0.51149994", "text": "def loop_forward(self,text_fts:torch.tensor,img_fts:torch.tensor,obj_fts:torch.tensor,ocr_fts:torch.tensor,ocr_tokens:torch.tensor) -> torch.tensor:\n\n\t\t# Combine Question and Grid Features\n\t\tout1 = self.net1(text_fts) # (IMG_DIM)\n\t\tassert(out1.size()[0]== (self.IMG_DIM)), \"Print {}, IMG_DIM : {}\".format(out1.size(),self.IMG_DIM)\n\t\tquestion_grid = F.sigmoid(out1)*img_fts # (IMG_DIM)\n\t\tassert(question_grid.size()[0]==(self.IMG_DIM))\n\n\t\t# Combine Question and Object Features\n\t\tout2 = self.net1(text_fts) # (IMG_DIM)\n\t\tassert(out2.size()[0]==(self.IMG_DIM))\n\t\tquestion_obj = F.sigmoid(out2)*obj_fts # (IMG_DIM)\n\t\tassert(question_obj.size()[0]==(self.IMG_DIM))\n\n\t\t# Combine Question and OCR Features\n\t\tout3 = self.net3(text_fts) # (IMG_DIM)\n\t\tassert(out3.size()[0]==(self.IMG_DIM))\n\t\tout4 = self.net4(ocr_fts) # (IMG_DIM)\n\t\tassert(out4.size()[0]==(self.IMG_DIM))\n\t\tquestion_ocr = F.sigmoid(out3)*out4 # (IMG_DIM)\n\t\tassert(question_ocr.size()[0]==(self.IMG_DIM))\n\n\t\t# Addition of all 3 vectors\n\t\tfinal_combined = question_grid + question_obj + question_ocr # (IMG_DIM)\n\n\t\t# Linear projection to answer space\n\t\tanswer_space = self.net5(final_combined) # (VOCAB_DIM)\n\t\tassert(answer_space.size()[0]==(self.VOCAB_DIM))\n\n\t\t# Output of copy net\n\t\tcopy_space = self.copy_net.loop_forward(final_combined,ocr_tokens) # (MAX_TOKENS)\n\t\tassert(copy_space.size()[0]==(self.MAX_TOKENS))\n\n\t\t# Final prediction\n\t\tprediction = F.softmax(torch.cat((answer_space,copy_space),dim=0),dim=1) # (VOCAB_DIM + MAX_TOKENS)\n\t\tassert(prediction.size()[0]==(self.VOCAB_DIM+self.MAX_TOKENS))\n\n\t\treturn prediction", "title": "" }, { "docid": "ec4b038850e24640e9f521f2970b637a", "score": "0.51075965", "text": "def forward(self, images):\n batch_size = images.shape[0]\n item_num = 5\n images = torch.reshape(images, (-1, 3, img_size, img_size))\n if self.need_rep:\n features, rep = self.cnn(images)\n else:\n features = self.cnn(images)\n features = features.reshape(batch_size, item_num, -1) # (32, 5, 1000)\n\n # Type specified representation\n masked = []\n masks = []\n for i in range(item_num):\n mask = F.relu(self.masks(torch.tensor(i).to(device)))\n masks.append(mask)\n masked.append(mask * features[:, i, :])\n masked = torch.stack(masked, dim=1)\n masks = torch.stack(masks, dim=0)\n\n # Non-local like matmul\n cross = torch.matmul(masked, masked.transpose(1, 2)) # (32, 5, 5)\n cross = self.bn(cross.view(batch_size, -1))\n\n out = F.relu(self.fc1(cross), inplace=True)\n out = self.sigmoid(self.fc2(out))\n if self.need_rep:\n return out, features, masks, rep\n else:\n return out, features, masks", "title": "" }, { "docid": "1e47392fc51a574d3376af70d21210c1", "score": "0.5105215", "text": "def give_CARS196_datasets(opt):\n image_sourcepath = opt.source_path+'/images'\n #Find available data classes.\n image_classes = sorted([x for x in os.listdir(image_sourcepath)])\n #Make a index-to-labelname conversion dict.\n conversion = {i:x for i,x in enumerate(image_classes)}\n #Generate a list of tuples (class_label, image_path)\n image_list = {i:sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key)]) for i,key in enumerate(image_classes)}\n image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]\n image_list = [x for y in image_list for x in y]\n\n #Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}\n image_dict = {}\n for key, img_path in image_list:\n key = key\n # key = key-1\n if not key in image_dict.keys():\n image_dict[key] = []\n image_dict[key].append(img_path)\n\n keys = sorted(list(image_dict.keys()))\n\n #Following \"Deep Metric Learning via Lifted Structured Feature Embedding\", we use the first half of classes for training.\n train,test = keys[:len(keys)//2], keys[len(keys)//2:]\n train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}\n\n train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)\n val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)\n eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)\n\n train_dataset.conversion = conversion\n val_dataset.conversion = conversion\n eval_dataset.conversion = conversion\n\n return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}", "title": "" }, { "docid": "57c190ced721f18326b0412c316ca441", "score": "0.51041454", "text": "def _collate_fn(batch):\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n centers = [b[2] for b in batch]\n # mask = [b[2] for b in batch]\n # kit_mask = [b[3] for b in batch]\n imgs = torch.stack(imgs, dim=0)\n max_num_label = labels[0].shape[0]\n for l in labels[1:]:\n if l.shape[0] > max_num_label:\n max_num_label = l.shape[0]\n new_labels = []\n for l in labels:\n if l.shape[0] < max_num_label:\n l_pad = torch.cat([l, torch.LongTensor([999]).repeat(max_num_label - l.shape[0], 6)], dim=0)\n new_labels.append(l_pad)\n else:\n new_labels.append(l)\n labels = torch.stack(new_labels, dim=0)\n return [imgs, labels, centers]", "title": "" }, { "docid": "7131047709eb5026893aed7990ecb518", "score": "0.5102052", "text": "def image_fetch(self, chan):\r\n self.img = chan", "title": "" }, { "docid": "b34ed4b11bac8a6846ea82ae9cea4139", "score": "0.5100212", "text": "def visualize2(label_list, img_list):\n n = len(label_list)\n plt.figure(figsize=(16, 5))\n for i in range(len(label_list)):\n name = label_list[i];\n image = img_list[i];\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "title": "" }, { "docid": "ed3de45687958f4761b5fe375105385b", "score": "0.50893164", "text": "def collate_fn(data):\n # Sort a data list by attribute length (descending order).\n data.sort(key=lambda x: len(x[3]), reverse=True)\n images1,images2,images3,images4,images5,attribs,veri = zip(*data)\n #print(images1.size())\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images1 = torch.stack(images1, 0)\n images2 = torch.stack(images2, 0)\n images3 = torch.stack(images3, 0)\n images4 = torch.stack(images4, 0)\n images5 = torch.stack(images5, 0)\n attribs = torch.from_numpy(np.asarray(attribs)).float()\n\n # Merge attributes (from tuple of 1D tensor to 2D tensor).\n lengths_attrib = [len(cap) for cap in attribs]\n targets_attrib = torch.zeros(len(attribs), max(lengths_attrib)).long()\n lengths_veri = [len(cap) for cap in veri]\n targets_veri = torch.zeros(len(veri), max(lengths_veri)).long()\n for i, cap in enumerate(attribs):\n end = lengths_attrib[i]\n targets_attrib[i, :end] = cap[:end]\n for i, cap in enumerate(veri):\n end = lengths_veri[i]\n targets_veri[i, :end] = cap[:end]\n return images1,images2,images3,images4,images5,targets_attrib,lengths_attrib,targets_veri,lengths_veri", "title": "" }, { "docid": "326d9db10f082c0f443b5a3c829ef530", "score": "0.50889677", "text": "def __init__(self, image_ids, label_ids, batch_size, augment=True, \n height=512, width=256, shuffle=True\n ):\n \n self.batch_size = batch_size\n self.augment = augment\n self.image_ids = image_ids\n self.label_ids = label_ids\n self.height = height\n self.width = width\n self.shuffle = shuffle\n self.aug = self._get_augmenter()\n self.on_epoch_end()", "title": "" }, { "docid": "8b30dbb6ea2d31107ec8a3f91d21cda7", "score": "0.50877404", "text": "def vgg(cfg, i, batch_norm=False):\r\n layers = []\r\n in_channels = i\r\n stage = 1\r\n pad = nn.Pad(((0, 0), (0, 0), (1, 1), (1, 1))).to_float(mindspore.dtype.float32)\r\n for v in cfg:\r\n if v == \"M\":\r\n stage += 1\r\n layers += [pad, nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"valid\")]\r\n else:\r\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, pad_mode=\"pad\", padding=1, has_bias=True)\r\n if batch_norm:\r\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]\r\n else:\r\n layers += [conv2d, nn.ReLU()]\r\n in_channels = v\r\n return layers", "title": "" }, { "docid": "97ac2cf400b9977bb9f11124e8b98949", "score": "0.5087333", "text": "def collate_fn(data):\n\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n\n for idx, cap in enumerate(captions):\n cap_end = lengths[idx]\n targets[idx, :cap_end] = cap[:cap_end]\n\n lengths = torch.tensor(lengths)\n\n return images, targets, lengths", "title": "" }, { "docid": "56fe8e905d107dc38869eb7e3fcc6591", "score": "0.5086771", "text": "def __getitem__(self, index):\n image_size = (256,256)\n segpair, segpair_slice = self.indexes[index]\n pair_slice = segpair.get_pair_slice(segpair_slice,\n self.slice_axis)\n# padding input\n \n\n pair_slice[\"input\"]=padding((256,256),pair_slice[\"input\"])\n# normalisation \n pair_slice[\"input\"]/=100\n# multi-label mask to binary mask\n\n pair_slice[\"gt\"] = (pair_slice['gt']>= 1).astype(np.int)\n #obj_ids = np.unique(pair_slice[\"gt\"])\n# padding mask \n pair_slice[\"gt\"]=padding((256,256),pair_slice[\"gt\"])\n pair_slice[\"gt\"].reshape(256,256)\n \n #boxes = [list(region.bbox) for region in regionprops(skimage.measure.label(pair_slice[\"gt\"])) if region.area>=6]\n\n #boxes = boxes + [[1,1,1,1] for _ in range(self.num_query-len(boxes))]\n \n\n\n\n# one hot encoding mask \n \n\n \n \n pair_slice[\"gt\"]=one_hot_encoding(pair_slice[\"gt\"],2)\n \n # if boxes == []:\n # #labels = [0]\n # #labels = [self.num_classes-2]\n # labels = [self.num_classes-2]+[self.num_classes]*(self.num_query-1)\n # print(len(labels))\n # else:\n # #labels = [self.num_classes-1]*len(boxes)\n # labels = [self.num_classes-1]*len(boxes)+[self.num_classes]*(self.num_query-len(boxes))\n # print(len(labels))\n\n # boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n # boxes[:, 2:] += boxes[:, :2]\n # boxes[:, 0::2].clamp_(min=0, max=256)\n # boxes[:, 1::2].clamp_(min=0, max=256)\n\n\n # labels = torch.as_tensor(labels, dtype=torch.int64)\n \n #keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n #print(keep)\n #boxes = boxes[keep]\n #labels = labels[keep]\n # there is only one class\n\n # Consistency with torchvision, returning PIL Image\n # Using the \"Float mode\" of PIL, the only mode\n # supporting unbounded float32 values\n\n input_img = pair_slice[\"input\"]\n\n # Handle unlabeled data\n if pair_slice[\"gt\"] is None:\n gt_img = None\n else:\n gt_img = pair_slice[\"gt\"]\n \n # target_data_dict = {\n # 'boxes':boxes.float(),\n # 'labels':labels,\n # }\n\n if self.transform is not None:\n data_dict = self.transform(data_dict)\n pet=torch.from_numpy(input_img[np.newaxis]).float()\n gt_img=torch.from_numpy(gt_img).float()\n\n #print(pet.shape,gt_img.shape)\n \n \n return pet,gt_img\n #return pet,gt_img,target_data_dict", "title": "" }, { "docid": "d00a18f453519b1c815fff65b53b7140", "score": "0.5086449", "text": "def __init__(self, word_embeddings,\n word_embedding_size,\n pos_size,\n pos_embedding_size,\n depend_size,\n depend_embedding_size,\n position_size,\n position_embedding_size,\n n_filters,\n filters_size,\n n_classes,\n drop_out=0.5,\n n_hidden=250):\n super(CNN, self).__init__()\n\n self.word_embedding = torch.nn.Embedding.from_pretrained(embeddings=torch.FloatTensor(word_embeddings),\n freeze=False)\n self.p1_embedding = torch.nn.Embedding(num_embeddings=position_size,embedding_dim=position_embedding_size)\n self.p2_embedding = torch.nn.Embedding(num_embeddings=position_size,embedding_dim=position_embedding_size)\n self.pos_embedding = torch.nn.Embedding(num_embeddings=pos_size, embedding_dim=pos_embedding_size)\n self.depend_embedding = torch.nn.Embedding(num_embeddings=depend_size,embedding_dim=depend_embedding_size)\n\n drop_out_rate = drop_out\n\n self.drop_out = torch.nn.Dropout(p=drop_out_rate)\n\n self.convs = []\n\n feature_dims = word_embedding_size + pos_embedding_size + 2* position_embedding_size + depend_embedding_size\n\n for filter_size in filters_size:\n conv = torch.nn.Conv2d(in_channels=1, out_channels=n_filters,\n kernel_size=(filter_size, feature_dims))\n\n self.convs.append(conv)\n\n flat_size = n_filters * len(filters_size)\n\n n_classes = n_classes\n\n self.linear0 = torch.nn.Linear(flat_size, n_hidden)\n\n self.linear = torch.nn.Linear(n_hidden, n_classes)\n\n # Binary cross entropy loss for binary classification problem\n self.loss = torch.nn.CrossEntropyLoss()", "title": "" }, { "docid": "849629ff9d17151e22d646aab5fe3b77", "score": "0.50811464", "text": "def __init__(self, num_cls=4, init_weights=False):\n super(Vgg10Conv, self).__init__()\n\n self.num_cls = num_cls\n \n self.features = nn.Sequential(\n # conv1\n nn.Conv2d(1, 64, 3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), \n nn.Conv2d(64, 64, 3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), \n # conv2\n nn.Conv2d(64, 128, 3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), \n nn.Conv2d(128, 128, 3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), \n nn.MaxPool2d(2, stride=2, return_indices=True),\n # conv3\n nn.Conv2d(128, 256, 3, padding=1), nn.BatchNorm2d(256), nn.ReLU(), \n nn.Conv2d(256, 256, 3, padding=1), nn.BatchNorm2d(256), nn.ReLU(), \n nn.Conv2d(256, 256, 3, padding=1), nn.BatchNorm2d(256), nn.ReLU(), \n nn.MaxPool2d(2, stride=2, return_indices=True))\n\n self.classifier = nn.Sequential(\n nn.Linear(256, 512), nn.ReLU(), nn.Dropout(),\n nn.Linear(512, 512), nn.ReLU(), nn.Dropout(),\n nn.Linear(512, num_cls)\n )\n\n # index of conv\n self.conv_layer_indices = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28]\n # feature maps\n self.feature_maps = OrderedDict()\n # switch\n self.pool_locs = OrderedDict()\n # initial weight\n if init_weights:\n self.init_weights()", "title": "" }, { "docid": "2efdc9c069dd7a30a5e05c758a4af0cd", "score": "0.50807637", "text": "def forward(self, images):\n # Pixel-wise depth classification\n batch_dict = {}\n ifn_result = self.ifn(images)\n\n for _idx, _layer in enumerate(self.model_cfg.args['feat_extract_layer']):\n image_features = ifn_result[_layer]\n # Channel reduce\n if self.reduce_blocks[_idx] is not None:\n image_features = self.reduce_blocks[_idx](image_features)\n\n batch_dict[_layer+\"_feat2d\"] = image_features\n \n if self.training:\n # detach feature from graph if not optimize\n if \"logits\" in ifn_result:\n ifn_result[\"logits\"].detach_()\n if not self.is_optimize:\n image_features.detach_()\n\n return batch_dict", "title": "" }, { "docid": "c1722bdbf79cbe5633e4033f8bd582a5", "score": "0.50803244", "text": "def sample_images():\n prev_time = time.time()\n image = cv2.imread(\"/media/arg_ws3/5E703E3A703E18EB/data/d435_mm/empty/0_depth.png\", cv2.IMREAD_ANYDEPTH)\n image = cv2.resize(image, (opt.img_height, opt.img_width))\n '''image = cv2.resize(image, (opt.img_height, opt.img_width))\n image = torch.tensor(image)\n image = image.permute(2, 0, 1).unsqueeze(dim=0)\n print(image.shape)\n my_img = Variable(image.type(Tensor))'''\n image = image/1000.\n depth_max = image.max()\n depth_min = image.min()\n image = (image - depth_min)/(depth_max - depth_min)\n # pil_im = Image.fromarray(image)\n # pil_im = data_transform(pil_im)\n # pil_im = pil_im.unsqueeze(0)\n pil_im = torch.tensor(image).unsqueeze(dim=0).unsqueeze(dim=0)\n\n my_img = Variable(pil_im.type(Tensor))\n my_img_fake = generator(my_img)\n my_img_fake = my_img_fake.squeeze(0).detach().cpu()\n\n pil_ = my_img_fake.mul(255).clamp(0, 255).byte().permute(1, 2, 0)\n #print(pil_)\n pil_ = np.array(pil_)\n pil_ = pil_[...,::-1]\n pil_ = cv2.resize(pil_, (640, 480))\n '''for i in range(640):\n for j in range(480):\n if pil_[j][i]!=0 and pil_[j][i]>5:\n print(pil_[j][i])'''\n print(pil_.dtype)\n #print(pil_)\n cv2.imwrite(\"mask_cv2.jpg\", pil_)\n print(\"Hz: \", 1./(time.time() - prev_time))\n save_image(my_img_fake.data, 'mask_pil2.png', nrow=1, normalize=False)", "title": "" }, { "docid": "f0f12a24efe18abe2053a49a18e1731f", "score": "0.50795746", "text": "def annotate(inpt_pth: str) -> Dataset:\n\n global cx, cy\n\n # Initialize\n annotations = []\n\n # Get path of all images\n inpt_pth = os.path.dirname(inpt_pth)\n imgs_pth = glob.glob(f'{inpt_pth}/*.png')\n\n # Create the image window\n cv2.namedWindow('Image')\n\n # Annotate each image\n for img_pth in tqdm(imgs_pth):\n img = cv2.imread(img_pth)\n\n cv2.setMouseCallback('Image', _on_click, img)\n\n while True:\n cv2.imshow('Image', img)\n\n key = cv2.waitKey(0) & 0xFF\n\n if key == 32: # space bar\n if cx is not None and cy is not None:\n annotations.append({\n 'image': img_pth,\n 'vp': [cx, cy]\n })\n\n cx, cy = None, None\n\n break\n\n return annotations", "title": "" }, { "docid": "258b2f5dbf2efa4915b024397c917090", "score": "0.50759006", "text": "def _caser_cnn(self):\n item_out = self._add_cnn(\n self.item_history_embedding, self.item_embedding_dim, \"item\"\n )\n tf.summary.histogram(\"item_out\", item_out)\n cate_out = self._add_cnn(\n self.cate_history_embedding, self.cate_embedding_dim, \"cate\"\n )\n tf.summary.histogram(\"cate_out\", cate_out)\n cnn_output = tf.concat([item_out, cate_out], 1)\n tf.summary.histogram(\"cnn_output\", cnn_output)\n return cnn_output", "title": "" }, { "docid": "8d7df66024e1b03b3f2ca55a65f1eb77", "score": "0.5075855", "text": "def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=True, image_size=(64, 64), mask_size=16,\n normalize_images=True, max_samples=None,\n include_relationships=True, min_object_size=0.02,\n min_objects_per_image=3, max_objects_per_image=8,\n include_other=False, instance_whitelist=None, stuff_whitelist=None):\n super(Dataset, self).__init__()\n\n if stuff_only and stuff_json is None:\n print('WARNING: Got stuff_only=True but stuff_json=None.')\n print('Falling back to stuff_only=False.')\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.set_image_size(image_size)\n\n with open(instances_json, 'r') as f:\n instances_data = json.load(f)\n\n stuff_data = None\n if stuff_json is not None and stuff_json != '':\n with open(stuff_json, 'r') as f:\n stuff_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n for image_data in instances_data['images']:\n image_id = image_data['id']\n filename = image_data['file_name']\n width = image_data['width']\n height = image_data['height']\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n \n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n object_idx_to_name = {}\n all_instance_categories = []\n for category_data in instances_data['categories']:\n category_id = category_data['id']\n category_name = category_data['name']\n all_instance_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n all_stuff_categories = []\n if stuff_data:\n for category_data in stuff_data['categories']:\n category_name = category_data['name']\n category_id = category_data['id']\n all_stuff_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n if stuff_whitelist is None:\n stuff_whitelist = all_stuff_categories\n category_whitelist = set(instance_whitelist) | set(stuff_whitelist)\n\n # Add object data from instances\n self.image_id_to_objects = defaultdict(list)\n for object_data in instances_data['annotations']:\n image_id = object_data['image_id']\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n # Add object data from stuff\n if stuff_data:\n image_ids_with_stuff = set()\n for object_data in stuff_data['annotations']:\n image_id = object_data['image_id']\n image_ids_with_stuff.add(image_id)\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n if stuff_only:\n new_image_ids = []\n for image_id in self.image_ids:\n if image_id in image_ids_with_stuff:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n all_image_ids = set(self.image_id_to_filename.keys())\n image_ids_to_remove = all_image_ids - image_ids_with_stuff\n for image_id in image_ids_to_remove:\n self.image_id_to_filename.pop(image_id, None)\n self.image_id_to_size.pop(image_id, None)\n self.image_id_to_objects.pop(image_id, None)\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n\n # Prune images that have too few or too many objects\n new_image_ids = []\n total_objs = 0\n for image_id in self.image_ids:\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects_per_image <= num_objs <= max_objects_per_image:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n self.vocab['pred_idx_to_name'] = [\n '__in_image__',\n 'left of',\n 'right of',\n 'above',\n 'below',\n 'inside',\n 'surrounding',\n ]\n self.vocab['pred_name_to_idx'] = {}\n for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n self.vocab['pred_name_to_idx'][name] = idx", "title": "" }, { "docid": "78ae6e247dbad888d73d2bed0528253a", "score": "0.5072808", "text": "def __init__(self,\n num_tgt_embeddings,\n hidden_size,\n heads,\n num_layers,\n src_embedding,\n tgt_embedding,\n queries_dropout=0.,\n keys_dropout=0.,\n values_dropout=0.,\n label_smoothing=0.,\n causal=True,\n logits_per_slot=1,\n first_layer='region',\n final_layer='logits',\n decoder_pos_emb=False,\n dataset='captioning',\n **kwargs):\n\n # TODO: Sequential does not technically support nested inputs\n layers = []\n super(Transformer, self).__init__(layers)\n\n self.src_embedding = src_embedding\n self.tgt_embedding = tgt_embedding\n# # initialize embedding immediately since we need the entire\n# # embedding matrix\n# self.queries_embedding(tf.constant([1]))\n# self.values_embedding(tf.constant([1]))\n \n # the first layer in the transformer depends on the data modality\n # for image captioning using RCNN features select 'region'\n if first_layer == 'discrete':\n layers.extend([DiscreteFeature(\n hidden_size, \n self.src_embedding, self.tgt_embedding, mode='decoder', \n decoder_pos_emb=decoder_pos_emb, **kwargs)])\n if first_layer == 'continuous':\n layers.extend([ContinuousFeature(\n hidden_size,\n self.src_embedding, self.tgt_embedding, mode='decoder', \n decoder_pos_emb=decoder_pos_emb, **kwargs)])\n if first_layer == 'region':\n layers.extend([RegionFeature(\n hidden_size,\n self.src_embedding, self.tgt_embedding, mode='decoder', \n decoder_pos_emb=decoder_pos_emb, **kwargs)])\n\n # the encoder processes values and the decoder processes queries\n # build the encoder first in the stack\n # note that for captioning tasks, encoder doesn't have relative position\n # available\n if dataset == 'captioning' or decoder_pos_emb:\n layers.extend([EncoderLayer(\n hidden_size, hidden_size * 4, heads,\n queries_dropout=queries_dropout,\n keys_dropout=keys_dropout,\n values_dropout=values_dropout,\n causal=False, **kwargs) for _ in range(num_layers)])\n else:\n layers.extend([EncoderWithPositionLayer(\n hidden_size, hidden_size * 4, heads,\n queries_dropout=queries_dropout,\n keys_dropout=keys_dropout,\n values_dropout=values_dropout,\n causal=False, num_pos=1, **kwargs) for _ in range(num_layers)]) \n\n # depending on the type of network possibly condition on position\n # build the decoder second in the stack\n cls = (DecoderWithPositionLayer\n if final_layer == 'indigo' else DecoderLayer)\n layers.extend([cls(\n hidden_size, hidden_size * 4, heads,\n queries_dropout=queries_dropout,\n keys_dropout=keys_dropout,\n values_dropout=values_dropout,\n causal=causal, **kwargs) for _ in range(num_layers)])\n \n # the final layer in the transformer depends on the model purpose\n # to run Transformer-InDIGO select 'indigo'\n if final_layer == 'logits' or final_layer == 'indigo':\n layers.extend([Logits(hidden_size, self.tgt_embedding, label_smoothing, **kwargs)])\n if final_layer == 'indigo':\n layers.extend([PointerAfterLogits(\n hidden_size * 4, hidden_size, num_tgt_embeddings, self.tgt_embedding,\n causal=causal, logits_per_slot=logits_per_slot, **kwargs)])\n\n self.final_layer_obj = layers[-1]\n \n super(Transformer, self).__init__(layers)\n\n # these parameters need to be stored so that\n # tf.layers.model.save_model works\n self.num_tgt_embeddings = num_tgt_embeddings\n self.hidden_size = hidden_size\n self.heads = heads\n self.num_layers = num_layers\n self.queries_dropout = queries_dropout\n self.keys_dropout = keys_dropout\n self.values_dropout = values_dropout\n self.label_smoothing = label_smoothing\n self.causal = causal\n self.logits_per_slot = logits_per_slot\n self.first_layer = first_layer\n self.final_layer = final_layer\n self.decoder_pos_emb = decoder_pos_emb\n self.dataset = dataset\n self.kwargs = kwargs", "title": "" }, { "docid": "522985dce7bc21035b5b7aff83056bc3", "score": "0.50726205", "text": "def embed(self, inputs, mask):\n # Create CNN\n inp = inputs * tf.expand_dims(tf.cast(mask, tf.float32), -1)\n inp = tf.expand_dims(inp, -1)\n outputs = []\n for filter_width in self.filter_widths:\n w_filter, b = self.filters[filter_width]\n conv = tf.nn.conv2d(inp, w_filter, strides=[1, 1, 1, 1],\n padding=\"VALID\", name=\"conv\")\n conv_bias = tf.nn.bias_add(conv, b)\n # conv_bias = tf.contrib.layers.batch_norm(conv_bias)\n if self.nonlinearity == \"relu\":\n conv_bias = tf.nn.relu(conv_bias, name=\"relu\")\n else:\n conv_bias = tf.nn.tanh(conv_bias, name=\"tanh\")\n pooled = tf.nn.max_pool(\n conv_bias, ksize=[1, self.max_seq_len - filter_width + 1, 1, 1],\n strides=[1, 1, 1, 1], padding=\"VALID\", name=\"max_pool\"\n )\n\n outputs.append(pooled)\n n_out = self.num_filters * len(self.filter_widths)\n h_out = tf.concat(outputs, 3)\n conv_output = tf.reshape(h_out, [-1, n_out])\n\n proj_w, proj_b = self.proj_params\n conv_embedding = tf.nn.xw_plus_b(conv_output, proj_w, proj_b, name=\"affine\")\n out_mask = tf.greater(tf.reduce_sum(mask, -1, keep_dims=True), 0)\n out_mask = tf.cast(out_mask, tf.float32)\n conv_embedding = conv_embedding * out_mask\n output = tf.cond(\n self.is_train,\n lambda: tf.nn.dropout(conv_embedding, self.dropout, name=\"dropout\"),\n lambda: conv_embedding\n )\n return output", "title": "" } ]
c16c73e55a512a5510b33a0c0f6efe0a
Save progress in crack phase.
[ { "docid": "8f4c2960e773cbb6e6f06df7299d617f", "score": "0.0", "text": "def set_cracked(self, cracked):\n self.cracked = open(cracked, \"a\")", "title": "" } ]
[ { "docid": "609015f3ffca95ee235a70fb75c8cec3", "score": "0.6551519", "text": "def save_progress(block, addresses, new_addresses, progress_file, with_db):\n print(\"Saving progress... do not close/power off computer until process is\"\n \" complete.\")\n update_progress(block, addresses, progress_file)\n if with_db == 1:\n print(\"Updating database...\")\n update_db(new_addresses)\n else:\n print(\"Skipping database update.\")\n print(\"Progress saved. Last block scanned was {}.\".format(block))", "title": "" }, { "docid": "e42dbc0c68cf343cc2ecd67425c0e7a9", "score": "0.6303032", "text": "def checkpoint(self):\n save()", "title": "" }, { "docid": "39f547cafdade149c521df6df28fe335", "score": "0.6127377", "text": "def saving(self):\n self._log(\"Saving....\")", "title": "" }, { "docid": "dea36dcccc0e000f66d074ab977bb73e", "score": "0.61132705", "text": "def save_checkpoint(self):\n\n ckpt_filename = os.path.join(self._checkpoint_dir,\n f\"{self._checkpoint_index}.ckpt\")\n if self._state_changed:\n with open(ckpt_filename, 'w') as f:\n json.dump(self._current_state, f, default=self.default_encode)\n logger.info(f\"Saved checkpoint to {ckpt_filename}.\")\n\n self._checkpoint_index += 1\n self._state_changed = False\n else:\n logger.info(\n f\"No changes made to analyzer data, no checkpoint saved.\")", "title": "" }, { "docid": "d49b2a4eb9ac3adc1fab47fade5130f5", "score": "0.6052116", "text": "def saveCheckpoint(self):\n self.conn.send(b\"world.checkpoint.save\")", "title": "" }, { "docid": "2a34d703ae8dbc3e5bc3d93debf1669e", "score": "0.60305184", "text": "def save_progress_locally(self, progress_json):\n \n with open(self.save_path+'search_progress.json', 'w') as f:\n json.dump(progress_json, f)\n f.close()\n \n return", "title": "" }, { "docid": "d2ef3d8240f3064d033465143c9859e8", "score": "0.6014209", "text": "def progress(self):\n pass", "title": "" }, { "docid": "f6dc90f3ec847baa52b122d151529c61", "score": "0.5994925", "text": "def callback(_locals, _globals):\n global n_steps, best_mean_reward\n # Print stats every 1000 calls\n print(n_steps)\n if (n_steps + 1) % 1 == 0:\n difficulty = _locals['self'].num_timesteps / _locals['total_timesteps']\n print(\"########## Saving ##########\")\n\n\n\n # _locals['self'].save(log_dir + 'best_model_' + id + '.pkl')\n #with open('balboa/progress.txt', 'w') as file:\n\n #file.write(str(difficulty))\n n_steps += 1\n return True", "title": "" }, { "docid": "5e12cf0302596d780c98c51398d03427", "score": "0.59772056", "text": "def save(self):\n self.write(\"SAV\")\n self.check_errors()", "title": "" }, { "docid": "caf7b502057d34a098da0c9038bcd6ab", "score": "0.5918164", "text": "def save_batch(self):\n self._batch_counter += 1\n joblib.dump(self._batch_cases, os.path.join(\n self.crop.location, \"batches\", BTCH_NM.format(self._batch_counter))\n )\n self._batch_cases = []\n self._counter = 0", "title": "" }, { "docid": "e0d93db8c005a1fa14ee97863573d50b", "score": "0.5898644", "text": "def save_checkpoint(self):\n self.checkpoints.push(\n self.save_model(suffix='update{}'.format(self.uctr)))", "title": "" }, { "docid": "f19b0a2c975b792b43090bf412dc2180", "score": "0.58801925", "text": "def commit(self):\n #logmessage(\"commit\")\n if hasattr(self, 'number'):\n #logmessage(\"Committed \" + str(self.number))\n sf = SavedFile(self.number, fix=True)\n sf.finalize()", "title": "" }, { "docid": "0a3966a4a37a9a6e8f228032b6ec228a", "score": "0.5843663", "text": "def saved(self):\n self._log(\"Saved\")", "title": "" }, { "docid": "e1bf470ad4e9fbf7a151d4e1dd23a517", "score": "0.5818053", "text": "def save():", "title": "" }, { "docid": "e1bf470ad4e9fbf7a151d4e1dd23a517", "score": "0.5818053", "text": "def save():", "title": "" }, { "docid": "e1bf470ad4e9fbf7a151d4e1dd23a517", "score": "0.5818053", "text": "def save():", "title": "" }, { "docid": "1cfa584e422ff730d71fae467307a16a", "score": "0.5788943", "text": "def save_progress(self, survivor_dir: Path = None) -> None:\n assert self.state == State.REPOPULATED\n assert self.survivors is not None\n\n self.hall.update(self.survivors)\n if self.hall_dir is not None:\n self.hall.save(self.hall_dir)\n if survivor_dir is not None:\n self.survivors.save(survivor_dir)\n self.state = State.SAVED", "title": "" }, { "docid": "bdbf8866fb2c71a8f30323600cf304e4", "score": "0.57604057", "text": "def save_progress_remotely(self, progress_json):\n \n self.db_handler.upload_progress_document(progress_json)\n \n return", "title": "" }, { "docid": "67ec40df514aef59893fc07c6937c224", "score": "0.57396924", "text": "def save_file(self):\n current_score = score_service.get_score()\n score_repository.save_score(current_score)", "title": "" }, { "docid": "92cbda7ef04b2aa7278831ed756c886a", "score": "0.57141715", "text": "def progress(self):\n \n pass", "title": "" }, { "docid": "502826994689b2f417b6188b1146cee3", "score": "0.57124627", "text": "def save(self):\n self.write('save')", "title": "" }, { "docid": "4ed961df2aa64fba8e270affb8509672", "score": "0.5684331", "text": "def save_complete(self,par_alloc,result):\n self.completed += 1\n if self.webhook and self.verbose:\n data = '{{\"text\": \":white_check_mark: for configuration {0}\", \"icon_emoji\": \"{1}\", \"username\": \"{2}\"}}'\n self.send_message(data.format(par_alloc, self.icon, self.bot_name))", "title": "" }, { "docid": "b75709fc8e227c28c565a7822b6e6b6c", "score": "0.5672331", "text": "def save(self):\n now = time.time()\n diff = now - self.__last_save\n # Write file to disk if more than 30 changes or more than 600 ms passed\n if self.__changes > 30 or diff > 600:\n with open(self.__path, 'w') as _file:\n self.__last_save = time.time()\n self.__changes = 0\n json.dump(self.__counters, _file)\n LOGGER.debug(\"wrote counters.json file\")", "title": "" }, { "docid": "65c9531bc83cdab4b533043bd7f367e5", "score": "0.5644196", "text": "def checkpoint(self,step):\n checkpoint_file = os.path.join(FLAGS.log_dir, self.RUNID,'model.ckpt')\n self.saver.save(self.session, checkpoint_file, global_step=step)", "title": "" }, { "docid": "d622ac193196ede2268542977e80a119", "score": "0.56418175", "text": "def save_checkpoint(self, path):\n if self.hp.comm.rank == 0:\n path = Path(path)\n self.model.save_parameters(str(path / 'model.h5'))\n self.optim._solver.save_states(str(path / 'optim.h5'))\n with open(Path(self.hp.output_path) / 'checkpoint.json', 'w') as f:\n json.dump(dict(cur_epoch=self.cur_epoch,\n params_path=str(path),\n optim_n_iters=self.optim._iter), f)\n self.monitor.info(f\"Checkpoint saved: {str(path)}\\n\")", "title": "" }, { "docid": "64227581126e34a5d25dd26f8f13194a", "score": "0.5636113", "text": "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "title": "" }, { "docid": "12de3782b2ff1abb47217ce31a115e03", "score": "0.56340265", "text": "def commit(self, acquire_progress, install_progress): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "5a102a08e1f497c5cadfbf2318a74fdb", "score": "0.5627696", "text": "def save(self, path):\n joblib.dump(self, path)", "title": "" }, { "docid": "6867f149b1d9701420645aa8f550d915", "score": "0.5622317", "text": "def save_curr_state(self):\n if self._save_func(self.iteration):\n self.model.save(os.path.join(self.model_path, \"%d.h5\" % self.iteration))\n\n if self.checkpoint_midis and self._midi_save_func(self.iteration):\n self.save_midis(\"t_%d.mid\" % self.iteration, \"v_%d.mid\" % self.iteration)\n\n if time.time() - self.last_save_time > self.save_secs or \"latest.h5\" not in os.listdir(self.model_path):\n self.model.save(os.path.join(self.model_path, \"latest.h5\"))\n if self.checkpoint_midis:\n self.save_midis(\"t_l.mid\", \"v_l.mid\")", "title": "" }, { "docid": "c94abde4c7ac2f419c9a81c0e205b8bc", "score": "0.56169593", "text": "def progress_log(self, symbol, progress=1, **keyargs):\n if not self._progress or self._part == 1:\n return\n data = self._data.get(symbol, None)\n if data is None:\n return\n data._counter += progress\n if data._counter == data._total:\n self.progress_end(symbol)\n elif data._partsize == 0 or \\\n int(data._counter / data._partsize) > data._lastpart:\n if data._partsize == 0 and self._part > 0:\n return\n # this means total is very small\n if data._partsize > 0:\n data._lastpart = data._counter / data._partsize\n done = data._counter / data._total\n t = int(time.time() - data._starttime)\n eta = int((t / done) - t)\n tstr= (\"Elapsed: {:02d}h {:02d}min {:02d}s\"\n .format(t//3600, t//60%60, t%60))\n etastr = (\"ETA: {:02d}h {:02d}min {:02d}s\"\n .format(eta//3600, eta//60%60, eta%60))\n donestr = \"{:.1f}\".format(done*100)\n keystr = \"\".join([ \"; {}: {}\".format(k, v) for k,v in keyargs.items()])\n string = \"{} {}% {} processed \" \\\n .format(self._pfx, donestr, data._units) + \\\n \"[{}; {}{}]\" \\\n .format(tstr, etastr, keystr)\n if len(string) > data._strlen:\n data._strlen = len(string)\n spacediff = \"\"\n else:\n spacediff = \" \"*(data._strlen-len(string))\n self._channel.write(\"\\r{}{}\".format(string, spacediff))\n self._channel.flush()", "title": "" }, { "docid": "2cd5c18660984be90ff684da195eb33a", "score": "0.5595566", "text": "def update_progress(self, val=None):\n self.progress_total_index += 1\n self.progress_total += self.progress_delta\n if val:\n self.progress_total = val\n if self.progress_total_index % self.latency == 0 or val is not None:\n self.progress = self.progress_total\n if self.progress_total >= 100:\n self.status = self.FILE_STATUS_COMPLETED\n self.save()", "title": "" }, { "docid": "fe6dac60b967a9bf8ba849ab04654bda", "score": "0.5547572", "text": "def on_upload_progress(self, bytes_sent):\n pass", "title": "" }, { "docid": "a01cc2ae58c884f3fb5bf731e8afe0a6", "score": "0.55307066", "text": "def _progressive_save(self, job) -> None:\n self._log.info(\"Saving results to '%s'\" % self.folder)\n path: str = self.folder + \"/\"\n if job['domain'] in self.saved:\n return\n job['start_time'] = str_datetime(job['start_time'])\n job['end_time'] = str_datetime(job['end_time'])\n jid: int = random.randint(100000, 999999)\n filename: str = \"%s_%s_%d_job.json\" % (self.project, job['domain'], jid)\n handle = open(path + filename, 'w')\n handle.write(json.dumps(job, indent=4))\n handle.close()\n\n filename = \"%s_%s_%d_emails.txt\" % (self.project, job['domain'], jid)\n handle = open(path + filename, 'w')\n for email in job['results']['emails']:\n handle.write(email + \"\\n\")\n handle.close()\n self.saved.append(job['domain'])", "title": "" }, { "docid": "e13d6e8c8ec4ede7c7271830c5260cd7", "score": "0.5528718", "text": "def fsave_checkpoint(self):\n torch.save({\n 'grad_step' : self.global_step,\n 'model_state_dict' : self.model.state_dict(),\n 'optimizer_state_dict': self.optimiser.state_dict(),\n }, self.CHECKPOINT_NAME)", "title": "" }, { "docid": "cdab46082ea6b69cd20b2dce44667841", "score": "0.55124784", "text": "def checkpoint(self, step):\n if (step + 1) % self.opt.checkpoint_steps == 0:\n LOGGER.info('Checkpoint step {}...'.format(step))\n checkpoint = {'agent': self.agent.state_dict(),\n 'optimizer': self.optimizer}\n ckpt_name = 'checkpoint.{}.pt'.format(step)\n ckpt_path = os.path.join(self.opt.save_dir, ckpt_name)\n torch.save(checkpoint, ckpt_path)", "title": "" }, { "docid": "ea124880f81a6aef9924848873a5912b", "score": "0.5477519", "text": "def save_cmd(self):\n\t\tself._save()", "title": "" }, { "docid": "b76e6d8430bfa8e29cfb7b508aa7d8d5", "score": "0.54765105", "text": "def stage_done(self):\n with open(self.stage_file, \"a+\") as f:\n f.write(f\"{self!s}\\n\")", "title": "" }, { "docid": "5406a77460fe9c33f32d0327246255ac", "score": "0.54750776", "text": "def step_end(self, run_context):\n cb_params = run_context.original_args()\n if cb_params.cur_step_num % self.save_ckpt_step == 0:\n saved_ckpt_num = cb_params.cur_step_num / self.save_ckpt_step\n if saved_ckpt_num > self.max_ckpt_num:\n oldest_ckpt_index = saved_ckpt_num - self.max_ckpt_num\n path = os.path.join(self.output_dir, \"tiny_bert_{}_{}.ckpt\".format(int(oldest_ckpt_index),\n self.save_ckpt_step))\n if os.path.exists(path):\n os.remove(path)\n save_checkpoint(self.network, os.path.join(self.output_dir,\n \"tiny_bert_{}_{}.ckpt\".format(int(saved_ckpt_num),\n self.save_ckpt_step)))", "title": "" }, { "docid": "c489aeb4d684aa6264ad78d24fdc2313", "score": "0.54711276", "text": "def commit(self):\n di = self.selected_dataset()\n if di is not None:\n self.error(\"\")\n\n if self.__awaiting_state is not None:\n # disconnect from the __commit_complete\n self.__awaiting_state.watcher.done.disconnect(\n self.__commit_complete)\n # .. and connect to update_cached_state\n # self.__awaiting_state.watcher.done.connect(\n # self.__update_cached_state)\n # TODO: There are possible pending __progress_advance queued\n self.__awaiting_state.pb.advance.disconnect(\n self.__progress_advance)\n self.progressBarFinished(processEvents=None)\n self.__awaiting_state = None\n\n if not di.islocal:\n pr = progress()\n callback = lambda pr=pr: pr.advance.emit()\n pr.advance.connect(self.__progress_advance, Qt.QueuedConnection)\n\n self.progressBarInit(processEvents=None)\n self.setStatusMessage(\"Fetching...\")\n self.setBlocking(True)\n\n f = self._executor.submit(\n ensure_local, di.prefix, di.filename, force=di.outdated,\n progress_advance=callback)\n w = FutureWatcher(f, parent=self)\n w.done.connect(self.__commit_complete)\n self.__awaiting_state = _FetchState(f, w, pr)\n else:\n self.setStatusMessage(\"\")\n self.setBlocking(False)\n self.commit_cached(di.prefix, di.filename)\n else:\n self.send(\"Data\", None)", "title": "" }, { "docid": "b603c71f54988490f370295dc34c24e9", "score": "0.54581225", "text": "def save(self, checkpoint_dir, iteration_number):\n self.memory.save(checkpoint_dir, iteration_number)", "title": "" }, { "docid": "036abe735aabf74cfa969be3783971d4", "score": "0.5458058", "text": "def save(self, output_path_dir, cycle_id, task_id, task_total_steps):\n pass", "title": "" }, { "docid": "433fe82bad69f192e9364b6452f7d9ce", "score": "0.545317", "text": "async def save(self):\n await self.__write_file()", "title": "" }, { "docid": "3602111ac05dca0af3cbb8b7ce363213", "score": "0.5439", "text": "def save(self, loading):\n pass", "title": "" }, { "docid": "d9dd268c03193144f60814365c00de7d", "score": "0.54140013", "text": "def save(self, filename, step, **kwargs):\n data = {}\n data['model'] = self.model.state_dict()\n if self.optimizer:\n data['optimizer'] = self.optimizer.state_dict()\n if self.lr_scheduler:\n data['lr_scheduler'] = self.lr_scheduler.state_dict()\n data['step'] = step\n data.update(kwargs)\n\n filename = os.path.join(self.work_dir, filename)\n torch.save(data, filename)\n self.tag_last_checkpoint(filename)\n self.logger.info(f'Successfully saved checkpoint to {filename}!')", "title": "" }, { "docid": "9a031c8c6e7ef4f4a4c759b6bde63e34", "score": "0.539994", "text": "def save(self,\n sess,\n global_step):\n self.ckpt_saver.save(sess, self.ckpt_name, global_step=global_step)", "title": "" }, { "docid": "ec27dda4614fb783d07b945aaf4bfbc7", "score": "0.5387442", "text": "def Save(self):", "title": "" }, { "docid": "b4e5b341e4133b7d479779a73682e4d5", "score": "0.53821623", "text": "def save_result(self):\n logging.info(\"Saving result...\")\n if self.result_export.export():\n self.utils.save_best_pf(\n self.population,\n self.best_pf,\n self.result_export\n )\n logging.info(\"Done\")", "title": "" }, { "docid": "afc2ded4dd18312a239e84ed02240f4b", "score": "0.5373877", "text": "def calc_progress(self):\n if self.is_prepared():\n self._sync_info_from_disk()\n self._num_sown_batches = len(glob(\n os.path.join(self.location, \"batches\", BTCH_NM.format(\"*\"))))\n self._num_results = len(glob(\n os.path.join(self.location, \"results\", RSLT_NM.format(\"*\"))))\n else:\n self._num_sown_batches = -1\n self._num_results = -1", "title": "" }, { "docid": "74dca4422e1dbc8811f3cf3970652617", "score": "0.5358858", "text": "def save(self):\n # log\n self.__plugin.setPreferences(\n \"LogLimit\", self.logSpinBox.value())\n self.__plugin.setPreferences(\n \"LogSubjectColumnWidth\", self.logWidthSpinBox.value())\n self.__plugin.setPreferences(\n \"FindCopiesHarder\", self.findHarderCheckBox.isChecked())\n # commit\n self.__plugin.setPreferences(\n \"CommitMessages\", self.commitSpinBox.value())\n self.__plugin.setPreferences(\n \"CommitMessages\", self.commitIdSpinBox.value())\n # cleanup\n self.__plugin.setPreferences(\n \"CleanupPatterns\", self.cleanupPatternEdit.text())\n # repository optimization\n self.__plugin.setPreferences(\n \"AggressiveGC\", self.aggressiveCheckBox.isChecked())", "title": "" }, { "docid": "eccbd828b08e3391c4c0655d834fe9c1", "score": "0.5355023", "text": "def write_to_disc(self, update_hist = False):\n\t\twith open(self.status_file, \"wb\") as f:\n\t\t\tpickle.dump((self.best_model, self.n_start, self.length_hist, self.status_file, self.args), f)\n\t\tif update_hist:\n\t\t\twith open(self.status_file + \".hist\", \"wb\") as d:\n\t\t\t\tpickle.dump(self.ev_hist, d)", "title": "" }, { "docid": "dff71804ffe6a13c619df1ee60159bed", "score": "0.53392744", "text": "def progress(message, *args):\n _last_progress[:] = [message, args]\n _dump_progress()", "title": "" }, { "docid": "95211ed2e02499697b2bf5268af68eec", "score": "0.5339022", "text": "def save(self, session, step):\r\n if not os.path.isdir(self.checkpoint_path):\r\n os.mkdir(self.checkpoint_path)\r\n checkpoint_path = os.path.join(self.checkpoint_path, self.name)\r\n return self.saver.save(session, checkpoint_path, global_step=step)", "title": "" }, { "docid": "bd3d8f11135f79524c961f15c7fa2908", "score": "0.53195757", "text": "def save(self, filename):\n data = [\n VERSION_NUMBER,\n self.q,\n self.n_actions,\n self.max_wind,\n self.iteration\n ]\n\n pickle.dump(data, open(filename, \"wb\"))", "title": "" }, { "docid": "1484913cde067f5c8c0d24ff595d9964", "score": "0.53174186", "text": "def __work_done(self, result, log_lock, heading):\n\n # Process the results\n self.__results[result[1][0]][result[1][1]] = result.success\n _print(log_lock, \"{} Saving results from task-{}-{}\".format(heading, *result.indices))\n self.__write_object(result)", "title": "" }, { "docid": "de175d4be0f4a2aca53bab48f02adc8a", "score": "0.53152966", "text": "def save_state(self):\n\n #self.shelf.commit(str(datetime.utcnow().isoformat()[:15]))\n self.shelf.close()\n\n cache_file = self.issues_cache_file()\n cache_file_dir = os.path.dirname(cache_file)\n\n if not os.path.isdir(cache_file_dir):\n os.makedirs(cache_file_dir)\n\n #if not self.dirty:\n # return\n\n if not os.path.isfile(cache_file):\n fd = open(cache_file, 'wb')\n try:\n cPickle.dump(issueSet, fd)\n finally:\n fd.close()\n \n self.dirty = False", "title": "" }, { "docid": "a0db7e7417ae67d4ecf2a8d426768e28", "score": "0.5308569", "text": "def save(self, path):", "title": "" }, { "docid": "1e524b8079d79d58f02c4b7724e2b8f6", "score": "0.5305712", "text": "def save_for_later(self, smc):\n pass", "title": "" }, { "docid": "20c20805f0027ca647c5430dbd6503a1", "score": "0.5293873", "text": "def copyfileobj_sparse_progress(\n self,\n fsrc,\n fdst,\n length=16*1024,\n make_sparse=True,\n bar_length=40,\n quiet=True,\n ):\n i = 0\n fsrc.seek(0, 2) # move the cursor to the end of the file\n end_val = fsrc.tell()\n fsrc.seek(0, 0) # move back the cursor to the start of the file\n old_ipercent = -1\n while 1:\n buf = fsrc.read(length)\n if not buf:\n break\n if make_sparse and buf == '\\0'*len(buf):\n fdst.seek(len(buf), os.SEEK_CUR)\n else:\n fdst.write(buf)\n i += length\n percent = min(float(i) / end_val, 1.0)\n ipercent = int(round(percent * 100))\n if not quiet and ipercent > old_ipercent:\n old_ipercent = ipercent\n hashes = '#' * int(round(percent * bar_length))\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\n _(\n \"\\rUploading: [{h}] {n}%\".format(\n h=hashes + spaces,\n n=ipercent,\n )\n )\n )\n sys.stdout.flush()\n if make_sparse:\n # Make sure the file ends where it should, even if padded out.\n fdst.truncate()\n if not quiet:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "title": "" }, { "docid": "40705f948a49171fd564d2073ab27bf5", "score": "0.5290306", "text": "def progress(self, searcher_state: SearcherState) -> float:\n pass", "title": "" }, { "docid": "43ebbf1e67f4e3cec203653e61f60c7e", "score": "0.52814263", "text": "def _save_performance(self):\n pass", "title": "" }, { "docid": "f5c203e50e0dc03a98d60e65a14bc85e", "score": "0.527857", "text": "def save(self, botengine=None):\n botengine.get_logger().info(\"Sensibo: save()\")\n self.saved_state = self.is_on(botengine)", "title": "" }, { "docid": "46a3c46bc99c5aad02db233ccec351c2", "score": "0.52739036", "text": "def smartSave(self):\n self._updateSceneVars()\n self.scene.increment_and_save()", "title": "" }, { "docid": "31bfe22e67643dc0f6f191311631c2d8", "score": "0.52730966", "text": "def save(self, dst):", "title": "" }, { "docid": "3f963171376b33a6e256625f5757ad29", "score": "0.52697587", "text": "def final_save(self):\n if self.train_dir:\n # final saving\n dir_ = utils.makedir(os.path.join(self.train_dir, \"final\"))\n rank = os.environ.get(\"LOCAL_RANK\")\n if rank is None or rank == '0':\n try:\n torch.save(self.controller, os.path.join(dir_, \"controller.pt\"))\n except pickle.PicklingError as e:\n self.logger.warning(\"Final saving: torch.save(controller) fail, \"\n \"fallback to call `controller.save`: %s\", e)\n self.controller.save(os.path.join(dir_, \"controller\"))\n try:\n torch.save(self.evaluator, os.path.join(dir_, \"evaluator.pt\"))\n except pickle.PicklingError as e:\n self.logger.warning(\"Final saving: torch.save(evaluator) fail, \"\n \"fallback to call `evaluator.save`: %s\", e)\n self.evaluator.save(os.path.join(dir_, \"evaluator.pt\"))\n self.logger.info(\"Final Saving: Dump controller/evaluator to directory %s\", dir_)", "title": "" }, { "docid": "1e123bc8be50aa370601260d939ee072", "score": "0.5267966", "text": "def progress_end(self, symbol, **keyargs):\n if not self._progress:\n return\n data = self._data.get(symbol, None)\n if data is None:\n return\n t = int(time.time() - data._starttime)\n tstr= (\"Elapsed time: {:02d}h {:02d}min {:02d}s\"\n .format(t//3600, t//60%60, t%60))\n quantity = str(data._total) if self._part == 1 else \"100.0%\"\n keystr = \"\".join([ \"; {}: {}\".format(k, v) for k,v in keyargs.items()])\n string = \"{} {} {} processed \" \\\n .format(self._pfx, quantity, data._units) + \\\n \"[{}{}]\" \\\n .format(tstr, keystr)\n spacediff = \" \" * (max(data._strlen - len(string),0))\n if self._part != 1:\n self._channel.write(\"\\r\")\n self._channel.write(\"{}{}\\n\".format(string, spacediff))\n self._channel.flush()\n self._data.pop(symbol)", "title": "" }, { "docid": "90da92d07706846e3215e9d89bf52ed6", "score": "0.5265882", "text": "def save(self):\n torch.save(self.network.state_dict(), os.path.join(self.output_folder, \"params.net\"))\n torch.save(self.optimizer.state_dict(), os.path.join(self.output_folder, \"optimizer.data\"))\n pickle.dump(self.stats, open(os.path.join(self.output_folder, \"statsCkpt.pkl\"), \"wb\"), protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "0078f8d50dc2643e0343d426d8715aad", "score": "0.526479", "text": "def progress(self):\n\n # here the socket must pe put on read or write queue for select()\n\n self.protocol[self.status]()", "title": "" }, { "docid": "c8316358802c50733479f1008006aa01", "score": "0.525897", "text": "def save(self):\n self.logfile.write(self.log)\n self.log = \"\"\n self.logfile.flush()\n os.fsync(self.logfile.fileno())", "title": "" }, { "docid": "a42cdcec5d55c622d22c78768e14c370", "score": "0.5256228", "text": "def job_done(self, args):\n self.soak_results[args[\"handle\"]] = args[\"state\"]", "title": "" }, { "docid": "831b474dc11f8714ce89aabb11922956", "score": "0.5250563", "text": "def save_report(self):\n task_report = self.download_remote_report()\n if len(task_report) == 0:\n return\n self.report = {**self.get_state(), 'results': task_report[0]}\n with open(self.report_path(), 'w') as f:\n json.dump(self.report, f)", "title": "" }, { "docid": "d18da203748cdb86f932b53d92a0090f", "score": "0.52459615", "text": "def finalize(self):\n self.save_checkpoint()", "title": "" }, { "docid": "6ba393f6dcf58d72692878c3f52b02df", "score": "0.5236931", "text": "def save_checkpoint(self):\n os.makedirs(self.ckpt_dir, exist_ok=True)\n for subject_data in self.data:\n filename = os.path.join(\n self.ckpt_dir, '%s.pickle' % subject_data[KEY_ID])\n with open(filename, 'wb') as handle:\n pickle.dump(\n subject_data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('Checkpoint saved at %s' % self.ckpt_dir)", "title": "" }, { "docid": "b8aa4a5ec7066b2ed74c7ca03fff5698", "score": "0.52302676", "text": "async def save(self):\n for status in self.results:\n pass\n msg = f'Save for endpoint \"{self.endpoint}\" is not yet implemented.'\n raise NotImplementedError(msg)", "title": "" }, { "docid": "99de5a8b1242dd6f2d531aedaf225018", "score": "0.5228136", "text": "def save(self, path):\n\t\tpickle.dump(self.pipeline, open(path, 'w'))", "title": "" }, { "docid": "c5d0955cb6d9fc7ad0248f643672c30c", "score": "0.52169687", "text": "def save_results(*args):\n\tpass", "title": "" }, { "docid": "52f4d6ab3f622d66425a25b127822e30", "score": "0.5216521", "text": "def save(self):\n self._saved_item_index = self._current_item_index\n self._saved_status = self.get_status()", "title": "" }, { "docid": "362e4bd8a34e66816945b095e3458bc2", "score": "0.5213916", "text": "def progress(filename, size, sent):\n sys.stdout.write(\"%s's progress: %.2f%% \\r\" % (filename, float(sent)/float(size)*100) )", "title": "" }, { "docid": "49d3477366920cf5ccdb6d1a8dd8aaa1", "score": "0.5212715", "text": "def save(self):\n gid = \"%.5d\" % self.itimestep\n \n g = self.steps.create_group(gid)\n g.attrs['_timestamp_'] = time.time()\n g.attrs['TIME'] = self.TIME\n g.attrs['istep'] = self.itimestep\n g.attrs['particle_weight'] = self.particle_weight()\n g.attrs['nparticles'] = self.nparticles\n\n for var in self.SAVED_ATTRS:\n value = getattr(self, var)\n g.create_dataset(var, data=value, compression='gzip')\n \n self.itimestep += 1\n\n self.root.attrs['nsteps'] = self.itimestep\n self.root.flush()\n logging.info(\"Timestep {} [TIME={}] saved.\".format(gid, self.TIME))", "title": "" }, { "docid": "1059530a342682118a04de95bc284ddc", "score": "0.5208805", "text": "def _save_state(self, action_result):\n # Updating the last run hash digest for scheduled/interval or manual polling\n self._state[\"last_run_hash_digests\"] = self._last_run_hash_digests\n\n # Check for manual poll or not\n if self._is_poll_now:\n return phantom.APP_SUCCESS\n\n # As end_alert_time has current time, we are saving current time as last run time for both alert and IoCs.\n for run_mode in self._run_mode:\n self._state[f\"last_run_{run_mode}_time\"] = self._time_dict.get(run_mode, {}).get(GC_END_TIME_KEY)\n\n return phantom.APP_SUCCESS", "title": "" }, { "docid": "ea55b18c3f98a29b6a37a1a74f2ecfbd", "score": "0.5207458", "text": "def pickle_output(self):\n\n # see if we need to write\n if hasattr(self,'pickle_time'):\n if self.analysis_time < self.pickle_time:\n return\n\n # save for later in a pickle, updating the pickle_time to now\n self.pickle_time = time.time()\n with open(self.pickle,'wb') as f:\n pickle.dump(self,f)", "title": "" }, { "docid": "568b1323bdf76cb00c265c615e27e1d1", "score": "0.5204068", "text": "def save(self, file_path):", "title": "" }, { "docid": "a83b8bc8d5d59626128dc9fe35ac1c8f", "score": "0.519728", "text": "def _on_step(self) -> bool:\n self.saved_reward.append(self.locals['rewards'])\n self.rolling_reward.append(self.locals['rewards'])\n self.every_step_outfile.write(str(self.locals['rewards'] * 1000) + '\\n')\n return True", "title": "" }, { "docid": "29b6d145982219e37c18f1f34fd28f77", "score": "0.5195233", "text": "def save_step(self, trajectory):\n self.trajectory.append(trajectory)", "title": "" }, { "docid": "e92f05fe264b87f73b2b038e58d669bc", "score": "0.51920456", "text": "def onPhaseSave(event):\n phase = event.info\n submissionModel = ModelImporter.model('submission', 'covalic')\n submissions = submissionModel.getAllSubmissions(phase)\n submissionModel.updateFolderAccess(phase, submissions)", "title": "" }, { "docid": "8c6d4c845accd6e8088cb22748b981f7", "score": "0.51890546", "text": "def save(self):\n if len(self._data) == 0:\n # There is no point in saving empty cache and we could get an error\n # because of nonexistent cache directory\n return\n\n with (self.directory / self._save_filename).open(\"wb\") as fp:\n pickler = pickle.Pickler(fp)\n pickler.dump(1), # Version\n pickler.dump(self.size_used)\n pickler.dump(self._data)\n pickler.dump(self._partial_hashes)", "title": "" }, { "docid": "c7a1be0fd4ddb7772c014bf1474071fd", "score": "0.5188822", "text": "def save(self, filename):\n # use pickle\n pass", "title": "" }, { "docid": "d978568b1728a53b30494db45d4f7a31", "score": "0.51871914", "text": "def save(self):\r\n pass", "title": "" }, { "docid": "7542b1f3c6183c8272e584d750d22fe3", "score": "0.5187162", "text": "def save(self, sess, step):\n self._saver().save(sess, self.__model, global_step=step)", "title": "" }, { "docid": "8ce4add96ccd0a8f369ba9a69714c95f", "score": "0.5185035", "text": "def progress_report(progress_step):\r\n progress_report.progress = 0\r\n def _(new):\r\n if new - progress_report.progress >= progress_step:\r\n progress_report.progress = new\r\n print \"Progress\", str(progress_report.progress) + \"%...\"\r\n return new\r\n return _", "title": "" }, { "docid": "e23b0d58b6c01e25549fd260a6e7e814", "score": "0.5183463", "text": "def save(self):\r\n if self._keyboardListener.saveFlag:\r\n print(\"Save data ...\")\r\n self._fileIO.markEndTime()\r\n self._saveLog()\r\n self._saveData()", "title": "" }, { "docid": "b72394bf0c7fe0551733c37ff90ba678", "score": "0.517915", "text": "def save_general_data(self, data, session):\n session = session if session else get_random_name()\n with click.progressbar(data, label='Collecting data for session: {}'.format(session)) as results:\n for d in results:\n d.update({\n \"time\": datetime.datetime.now(),\n \"session\": session})\n self.ctx.vlog(d)\n self.es.save_in_index(self.domain, d)\n self.ctx.log(\"\\nProject name: {}\".format(self.domain))\n self.ctx.log(\"\\nSession name: {}\".format(session))\n self.ctx.log(\"Results: {}\".format(len(data)))", "title": "" }, { "docid": "a2e41d154a3757660026f313b23e916a", "score": "0.51772875", "text": "def report_file_progress(self, prog, bytes_done):\r\n # The source file might not have a size attribute.\r\n if prog:\r\n done = prog.progress(bytes_done)\r\n print \"Copied %(item)s/%(items)s bytes (%(percentage)s%%) \" \\\r\n \"%(elapsed_time)s/%(total_time)s.\\r\" % done,\r\n else:\r\n print \"Copied %s bytes.\\r\" % (bytes_done),", "title": "" }, { "docid": "dec64bbf16bd4a4402be2cf814fa87ce", "score": "0.51771826", "text": "def _update_status(result):\n if nfile % 50 == 0 and nfile > 0:\n rate = nfile / (time() - t0)\n log.info('{}/{} files; {:.1f} files/sec'.format(nfile, nfiles, rate))\n nfile[...] += 1 # this is an in-place modification.\n return result", "title": "" }, { "docid": "b32e617a951307964cce58d10884be4c", "score": "0.5176856", "text": "def save(self, path=None):", "title": "" }, { "docid": "fa33493e2ea22c5aee0aa00925c9873d", "score": "0.516962", "text": "def _execute(self, session: Session) -> None:\n try:\n self._count += 1\n if self._count > 1 and self._count % self._transaction_size == 0:\n session.commit()\n LOGGER.info(f'Committed {self._count} records so far')\n\n if self._count > 1 and self._count % self._progress_report_frequency == 0:\n LOGGER.info(f'Processed {self._count} records so far')\n\n except Exception as e:\n LOGGER.exception('Failed to commit changes')\n raise e", "title": "" }, { "docid": "f3cf7b57e4437b2741cea5988b7dcb4a", "score": "0.51670665", "text": "def save(self, output, data):\n return", "title": "" }, { "docid": "23b5fca5a27454ccd4d6453dc105755a", "score": "0.5162262", "text": "def save(self, step=None):\n if step is None:\n step = self.session.run(self.global_step)\n self.saver.save(self.session, join(self.save_dir, 'step'), global_step=step)", "title": "" }, { "docid": "15f86995a7d94ab4ef199d99a9d0c479", "score": "0.51592493", "text": "def save(self):\t\t\t\n\t\ttorch.save(self.state_dict(), self.output_name+'.pkl')\n\t\tprint(\"[SAVED] Model saved to file as\", self.output_name)\n\t\treturn", "title": "" }, { "docid": "e6c6a734a6cc2e70c86177cc4fc95370", "score": "0.5159064", "text": "def save(self, includeProtoTxt = False):\n toSave = {\"SessionState\": self.state, \"Iteration\": self.iteration, \"MaxIter\": self.max_iter}\n toSave[\"ProjectID\"] = self.project.projectId\n\n self.__ensureDirectory(self.directory)\n Log.log(\"Saving current Session status to disk.\", self.getCallerId())\n if self.last_solverstate:\n toSave[\"LastSnapshot\"] = self.last_solverstate\n if self.getPretrainedWeights():\n toSave[\"PretrainedWeights\"] = self.getPretrainedWeights()\n if self.state_dictionary:\n serializedDict = copy.deepcopy(self.state_dictionary)\n if includeProtoTxt:\n if \"solver\" in self.state_dictionary:\n solver = self.buildSolverPrototxt()\n with open(self.getSolver(log=False), 'w') as f:\n f.write(solver)\n else:\n Log.error(\"Could not save a solver prototxt file, because no solver settings are defined.\", self.getCallerId())\n\n if \"network\" in serializedDict:\n if includeProtoTxt:\n net = self.buildNetPrototxt(internalVersion=False)\n with open(self.getOriginalNetFile(log=False), 'w') as f:\n f.write(net)\n net = self.buildNetPrototxt(internalVersion=True)\n with open(self.getInternalNetFile(log=False), 'w') as f:\n f.write(net)\n if \"layers\" in serializedDict[\"network\"]:\n layers = serializedDict[\"network\"][\"layers\"]\n for id in layers:\n del layers[id][\"type\"]\n else:\n Log.error(\"Could not save the network state because no state was defined.\", self.getCallerId())\n\n toSave[\"NetworkState\"] = serializedDict\n\n with open(baristaSessionFile(self.directory), \"w\") as f:\n json.dump(toSave, f, sort_keys=True, indent=4)", "title": "" }, { "docid": "27f4cdcfecd2a3e5efbd581b338ec37d", "score": "0.51557183", "text": "def save(self, fname):\n\n self._perform_checks()\n\n persist_dict = self._build_persist_dict()\n joblib.dump(persist_dict, fname)", "title": "" }, { "docid": "f396e89570ec1d580153003b6a9375f8", "score": "0.51498693", "text": "def on_progress(self, future, bytes_transferred, **kwargs):\n with self._lock:\n self._seen_so_far += bytes_transferred\n percentage = (self._seen_so_far / self._size) * 100\n sys.stdout.write(\n \"\\r%s %sMB / %sMB (%.2f%%)\"\n % (\n self._filename,\n int(self._seen_so_far / MB),\n int(self._size / MB),\n percentage,\n )\n )\n sys.stdout.flush()", "title": "" } ]