File size: 1,757 Bytes
7d6f77f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19d554b
7d6f77f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import transformers
import torch
import tokenizers
import streamlit as st


@st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None}, suppress_st_warning=True)
def get_model(model_name, model_path):
    tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
    model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
    model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
    model.eval()
    return model, tokenizer


@st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None}, suppress_st_warning=True)
def predict(text, model, tokenizer, n_beams=5, temperature=2.5, top_p=0.8, max_length=300):
    input_ids = tokenizer.encode(text, return_tensors="pt")
    with torch.no_grad():
        out = model.generate(input_ids,
                             do_sample=True,
                             num_beams=n_beams,
                             temperature=temperature,
                             top_p=top_p,
                             max_length=max_length,
                             )

    return list(map(tokenizer.decode, out))[0]


model, tokenizer = get_model('sberbank-ai/rugpt3medium_based_on_gpt2', 'korzh-medium_30epochs_1bs.bin')

st.title("NeuroKorzh")
st.markdown("<img width=200px src='https://avatars.yandex.net/get-music-content/2399641/5d26d7e5.p.975699/m1000x1000'>",
            unsafe_allow_html=True)

st.markdown("\n")

text = st.text_area(label='Starting point for text generation', height=200)
button = st.button('Go')

if button:
    try:
        result = predict(text, model, tokenizer)
        st.subheader('Max Korzh:')
        st.write(result)
    except Exception:
        st.error("Ooooops, something went wrong. Try again please and report to me, tg: @vladyur")