File size: 4,888 Bytes
20b211e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206b767
 
 
 
20b211e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3136aa7
 
 
20b211e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa5fd42
 
20b211e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3136aa7
6fde19c
 
3136aa7
20b211e
a1d81f9
20b211e
 
 
 
 
 
 
33b1631
bdfbba1
 
20b211e
 
 
 
 
 
 
 
33b1631
20b211e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/env python3

# Copyright 2023 Yi Xie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import gradio as gr

import glob
import hashlib
import logging
import os
import shutil
import subprocess
import sys
import yaml

OUT_DIR = '/tmp'

logging.basicConfig(
    format='%(asctime)s %(levelname)-8s %(message)s',
    level=logging.INFO,
    datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()

known_models_yaml = None
with open('known_models.yaml', 'r') as f:
    known_models_yaml = yaml.load(f.read(), Loader=yaml.Loader)

def convert(input_model):
    last_outputs = glob.glob('*.wifm', root_dir=OUT_DIR) + glob.glob('*.mlpackage', root_dir=OUT_DIR)
    for output in last_outputs:
        try:
            if os.path.isfile(OUT_DIR + '/' + output):
                os.remove(OUT_DIR + '/' + output)
            else:
                shutil.rmtree(OUT_DIR + '/' + output)
        except Exception as e:
            logger.error('Failed to remove last output file: ' + str(e))

    if input_model == None:
        return None

    file = input_model.name
    if not file.endswith('.pth'):
        raise gr.Error('Uploaded file is not PyTorch weights.')
    digest = None
    with open(file, 'rb') as f:
        digest = hashlib.sha256(f.read()).hexdigest()

    for model in known_models_yaml['models']:
        if digest != model['sha256']:
            continue
        name = model['name']
        out_file = OUT_DIR + '/' + name + '.wifm'
        logger.info('Converting model: %s', name)
        command = [
            'python', 'converter.py',
            '--type', model['type'],
            '--name', name,
            '--scale', str(model['scale']),
            '--out-dir', OUT_DIR,
            '--description', model['description'],
            '--source', model['source'],
            '--author', model['author'],
            '--license', model['license']
        ]
        if 'cuda' in model and model['cuda']:
            command += ['--has-cuda']
        if 'monochrome' in model and model['monochrome']:
            command += ['--monochrome']
        if 'features' in model:
            command += ['--num-features', str(model['features'])]
        if 'blocks' in model:
            command += ['--num-blocks', str(model['blocks'])]
        if 'convs' in model:
            command += ['--num-convs', str(model['convs'])]
        if 'shuffle-factor' in model:
            command += ['--shuffle-factor', str(model['shuffle-factor'])]
        command += [file]
        logger.debug('Command: %s', command)
        process = subprocess.Popen(command, stdout=subprocess.PIPE)
        for c in iter(lambda: process.stdout.read(1), b''):
            sys.stdout.buffer.write(c)
            sys.stdout.flush()
        process.communicate()
        if process.returncode != 0:
            raise gr.Error('converter.py returned non-zero exit code ' + str(process.returncode))
        if not os.path.exists(out_file):
            raise gr.Error('Unknown error')
        return out_file

    raise gr.Error('Unknown model. Please create an issue in https://github.com/imxieyi/waifu2x-ios-model-converter if it has a supported architecture.')

models_string = '''
|File|Name|Scale|Description|License|
|---|---|---|---|---|
'''
for model in known_models_yaml['models']:
    models_string += '|[{}]({})|{}|{}|{}|{}|\n'.format(model['file'].split('/')[-1], model['sourceLink'], model['name'], model['scale'], model['description'].replace('\n', '<br/>'), model['license'])

iface = gr.Interface(
    fn=convert,
    inputs='file',
    outputs='file',
    title='Web waifu2x-ios Model Converter',
    description='''
Please upload the `.pth` model file on the `input_model` panel. After uploading please wait until the output `.wifm` model file appears on the `output` panel. Then simply click `Download` to download converted custom model.

Only models listed under the converter are supported. If you want another model to be added please create an issue [here](https://github.com/imxieyi/waifu2x-ios-model-converter/issues) or report via app feedback.
''',
    article='''
Supported models (from [upscale.wiki Model Database](https://upscale.wiki/wiki/Model_Database)):
{}

Project: https://github.com/imxieyi/waifu2x-ios-model-converter
Report issues: https://github.com/imxieyi/waifu2x-ios-model-converter/issues
'''.format(models_string),
    live=True,
)
iface.launch()