|
|
|
|
|
import sys |
|
import tensorflow as tf |
|
|
|
import numpy as np |
|
import json |
|
|
|
tf.enable_eager_execution() |
|
|
|
def test(): |
|
|
|
|
|
|
|
|
|
regressor = tf.estimator.LinearRegressor( |
|
feature_columns=[tf.feature_column.numeric_column('feats')], |
|
optimizer='SGD' |
|
|
|
) |
|
def input_fn(): |
|
dataset = tf.data.Dataset.from_tensors(({"feats":[1.]}, [1.])) |
|
return dataset.repeat(1000).batch(10) |
|
|
|
|
|
regressor.train(input_fn=input_fn |
|
, steps=20 |
|
) |
|
regressor.evaluate(input_fn=input_fn |
|
|
|
) |
|
|
|
def parse_from_json(config_path): |
|
""" parse feature columns from feature config path |
|
|
|
Args: |
|
config_path: string, a feature config path |
|
""" |
|
total = 0 |
|
correct = 0 |
|
with open(config_path, "r") as f: |
|
config = json.load(f) |
|
|
|
feature_names = set() |
|
features = config["features"] |
|
for feature in features: |
|
feature_name = feature['feature_name'] |
|
if '#' in feature_name: |
|
feature_name = feature_name.split('#')[0] |
|
feature_names.add(feature_name) |
|
return feature_names |
|
|
|
|
|
def parse_model_2_txt(saved_model_dir ,output_file): |
|
from tensorflow.python.saved_model import loader_impl |
|
from google.protobuf import text_format |
|
saved_model = loader_impl._parse_saved_model(saved_model_dir) |
|
with open(output_file, 'w') as f: |
|
f.write(text_format.MessageToString(saved_model)) |
|
|
|
|
|
|
|
import os |
|
def build_serving_input_new(): |
|
import pickle |
|
with tf.gfile.Open('feature_desc.pkl', mode='rb') as f: |
|
feature_dec = pickle.load(f) |
|
sep_placeholder = {} |
|
for name, desc in feature_dec.items(): |
|
if 'sg_poi_click_time_gap_seq_2d' in name: |
|
print(desc) |
|
|
|
|
|
|
|
def read_schema(file): |
|
d = {} |
|
with open(file) as f: |
|
for line in f: |
|
line = line.strip() |
|
fds = line.split(' ') |
|
d[fds[0]] = fds[1] |
|
return d |
|
|
|
|
|
|
|
def sparse_tensor(): |
|
indices_tf = tf.constant([[0, 0], [0, 1], [1, 1], [2, 2]], dtype=tf.int64) |
|
values_tf = tf.constant([1, 2, 3, 4], dtype=tf.int32) |
|
dense_shape_tf = tf.constant([3, 3], dtype=tf.int64) |
|
|
|
sparse_tf = tf.SparseTensor(indices=indices_tf, |
|
values=values_tf, |
|
dense_shape=dense_shape_tf) |
|
dense_tf = tf.sparse_tensor_to_dense(sparse_tf) |
|
|
|
|
|
|
|
|
|
user_tf = tf.constant([1, 2, 3], dtype=tf.int32, shape=[3, 1]) |
|
|
|
|
|
|
|
b = tf.constant([[1, 2, 1], [0, 3, 2], [0, 0, 4]]) |
|
|
|
|
|
a = tf.constant([1, 2, 3], shape=[3, 1]) |
|
|
|
|
|
a = tf.tile(a, tf.constant([1, 3])) |
|
print(a) |
|
|
|
|
|
indices = tf.where(tf.not_equal(b, 0)) |
|
print(indices) |
|
|
|
|
|
c = tf.concat(values=[tf.expand_dims(tf.gather_nd(a, indices), axis=1), tf.expand_dims(tf.gather_nd(b, indices), axis=1)], axis=1) |
|
print(c) |
|
|
|
|
|
def kkv_attention(query, key, value, mask=None): |
|
|
|
key_transpose = tf.transpose(key, perm=[0, 2, 1]) |
|
value_transpose = tf.transpose(value, perm=[0, 2, 1]) |
|
|
|
|
|
logits = tf.matmul(query, key_transpose) |
|
|
|
|
|
if mask is not None: |
|
logits += mask |
|
|
|
|
|
attention_scores = tf.nn.softmax(logits, axis=-1) |
|
|
|
|
|
context_vector = tf.matmul(attention_scores, value_transpose) |
|
|
|
|
|
context_vector = tf.transpose(context_vector, perm=[0, 2, 1]) |
|
|
|
return context_vector, attention_scores |
|
|
|
|
|
def write_kkv_attention(query, key, value, mask=None): |
|
|
|
|
|
|
|
|
|
|
|
logits = tf.matmul(query, key) |
|
|
|
|
|
if mask is not None: |
|
logits += mask |
|
|
|
|
|
attention_scores = tf.nn.softmax(logits, axis=-1) |
|
|
|
|
|
context_vector = tf.matmul(attention_scores, value) |
|
|
|
|
|
|
|
|
|
return context_vector, attention_scores |
|
|
|
|
|
def test_write_kkv_attention(): |
|
|
|
query = tf.constant([[-0.1250, 0.0000, -0.5000, 0.5000, 0.0000]]) |
|
|
|
key = tf.constant([[ -0.1250, 0.0000, -0.5000, 0.5000, 0.0000], |
|
[-0.5000, 0.0000, 0.5000, 0.5000, 0.0000], |
|
[-0.2500, -0.5000, 0.0000, 0.5000, 0.2500], |
|
[ 0.0000, 0.0000, 0.0000, 0.5000, 0.5000], |
|
[ 0.5000, 0.5000, 0.0000, -0.5000, 0.5000]]) |
|
|
|
value = tf.constant([[-0.5000, 0.0000, 0.5000, 0.5000, 0.0000], |
|
[-0.5000, 0.0000, 0.5000, 0.5000, 0.0000], |
|
[-0.5000, 0.0000, 0.5000, 0.5000, 0.0000], |
|
[ 0.0000, 0.0000, 0.5000, 0.5000, 0.5000], |
|
[ 0.5000, 0.5000, 0.0000, -0.5000, 0.5000]]) |
|
|
|
|
|
mask = None |
|
|
|
|
|
context_vector, attention_scores = write_kkv_attention(query, key, value,mask) |
|
|
|
|
|
print context_vector |
|
print attention_scores |
|
|
|
|
|
print '123', 1 |