Spaces:
Build error
Build error
Upload 15 files
Browse files- .gitignore +140 -0
- .pre-commit-config.yaml +46 -0
- CODE_OF_CONDUCT.md +128 -0
- LICENSE +29 -0
- MANIFEST.in +8 -0
- README_CN.md +276 -0
- VERSION +1 -0
- app.py +149 -0
- cog.yaml +22 -0
- cog_predict.py +148 -0
- inference_realesrgan.py +166 -0
- inference_realesrgan_video.py +398 -0
- requirements.txt +9 -0
- setup.cfg +33 -0
- setup.py +107 -0
.gitignore
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ignored folders
|
| 2 |
+
datasets/*
|
| 3 |
+
experiments/*
|
| 4 |
+
results/*
|
| 5 |
+
tb_logger/*
|
| 6 |
+
wandb/*
|
| 7 |
+
tmp/*
|
| 8 |
+
weights/*
|
| 9 |
+
|
| 10 |
+
version.py
|
| 11 |
+
|
| 12 |
+
# Byte-compiled / optimized / DLL files
|
| 13 |
+
__pycache__/
|
| 14 |
+
*.py[cod]
|
| 15 |
+
*$py.class
|
| 16 |
+
|
| 17 |
+
# C extensions
|
| 18 |
+
*.so
|
| 19 |
+
|
| 20 |
+
# Distribution / packaging
|
| 21 |
+
.Python
|
| 22 |
+
build/
|
| 23 |
+
develop-eggs/
|
| 24 |
+
dist/
|
| 25 |
+
downloads/
|
| 26 |
+
eggs/
|
| 27 |
+
.eggs/
|
| 28 |
+
lib/
|
| 29 |
+
lib64/
|
| 30 |
+
parts/
|
| 31 |
+
sdist/
|
| 32 |
+
var/
|
| 33 |
+
wheels/
|
| 34 |
+
pip-wheel-metadata/
|
| 35 |
+
share/python-wheels/
|
| 36 |
+
*.egg-info/
|
| 37 |
+
.installed.cfg
|
| 38 |
+
*.egg
|
| 39 |
+
MANIFEST
|
| 40 |
+
|
| 41 |
+
# PyInstaller
|
| 42 |
+
# Usually these files are written by a python script from a template
|
| 43 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 44 |
+
*.manifest
|
| 45 |
+
*.spec
|
| 46 |
+
|
| 47 |
+
# Installer logs
|
| 48 |
+
pip-log.txt
|
| 49 |
+
pip-delete-this-directory.txt
|
| 50 |
+
|
| 51 |
+
# Unit test / coverage reports
|
| 52 |
+
htmlcov/
|
| 53 |
+
.tox/
|
| 54 |
+
.nox/
|
| 55 |
+
.coverage
|
| 56 |
+
.coverage.*
|
| 57 |
+
.cache
|
| 58 |
+
nosetests.xml
|
| 59 |
+
coverage.xml
|
| 60 |
+
*.cover
|
| 61 |
+
*.py,cover
|
| 62 |
+
.hypothesis/
|
| 63 |
+
.pytest_cache/
|
| 64 |
+
|
| 65 |
+
# Translations
|
| 66 |
+
*.mo
|
| 67 |
+
*.pot
|
| 68 |
+
|
| 69 |
+
# Django stuff:
|
| 70 |
+
*.log
|
| 71 |
+
local_settings.py
|
| 72 |
+
db.sqlite3
|
| 73 |
+
db.sqlite3-journal
|
| 74 |
+
|
| 75 |
+
# Flask stuff:
|
| 76 |
+
instance/
|
| 77 |
+
.webassets-cache
|
| 78 |
+
|
| 79 |
+
# Scrapy stuff:
|
| 80 |
+
.scrapy
|
| 81 |
+
|
| 82 |
+
# Sphinx documentation
|
| 83 |
+
docs/_build/
|
| 84 |
+
|
| 85 |
+
# PyBuilder
|
| 86 |
+
target/
|
| 87 |
+
|
| 88 |
+
# Jupyter Notebook
|
| 89 |
+
.ipynb_checkpoints
|
| 90 |
+
|
| 91 |
+
# IPython
|
| 92 |
+
profile_default/
|
| 93 |
+
ipython_config.py
|
| 94 |
+
|
| 95 |
+
# pyenv
|
| 96 |
+
.python-version
|
| 97 |
+
|
| 98 |
+
# pipenv
|
| 99 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 100 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 101 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 102 |
+
# install all needed dependencies.
|
| 103 |
+
#Pipfile.lock
|
| 104 |
+
|
| 105 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 106 |
+
__pypackages__/
|
| 107 |
+
|
| 108 |
+
# Celery stuff
|
| 109 |
+
celerybeat-schedule
|
| 110 |
+
celerybeat.pid
|
| 111 |
+
|
| 112 |
+
# SageMath parsed files
|
| 113 |
+
*.sage.py
|
| 114 |
+
|
| 115 |
+
# Environments
|
| 116 |
+
.env
|
| 117 |
+
.venv
|
| 118 |
+
env/
|
| 119 |
+
venv/
|
| 120 |
+
ENV/
|
| 121 |
+
env.bak/
|
| 122 |
+
venv.bak/
|
| 123 |
+
|
| 124 |
+
# Spyder project settings
|
| 125 |
+
.spyderproject
|
| 126 |
+
.spyproject
|
| 127 |
+
|
| 128 |
+
# Rope project settings
|
| 129 |
+
.ropeproject
|
| 130 |
+
|
| 131 |
+
# mkdocs documentation
|
| 132 |
+
/site
|
| 133 |
+
|
| 134 |
+
# mypy
|
| 135 |
+
.mypy_cache/
|
| 136 |
+
.dmypy.json
|
| 137 |
+
dmypy.json
|
| 138 |
+
|
| 139 |
+
# Pyre type checker
|
| 140 |
+
.pyre/
|
.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
# flake8
|
| 3 |
+
- repo: https://github.com/PyCQA/flake8
|
| 4 |
+
rev: 3.8.3
|
| 5 |
+
hooks:
|
| 6 |
+
- id: flake8
|
| 7 |
+
args: ["--config=setup.cfg", "--ignore=W504, W503"]
|
| 8 |
+
|
| 9 |
+
# modify known_third_party
|
| 10 |
+
- repo: https://github.com/asottile/seed-isort-config
|
| 11 |
+
rev: v2.2.0
|
| 12 |
+
hooks:
|
| 13 |
+
- id: seed-isort-config
|
| 14 |
+
|
| 15 |
+
# isort
|
| 16 |
+
- repo: https://github.com/timothycrosley/isort
|
| 17 |
+
rev: 5.2.2
|
| 18 |
+
hooks:
|
| 19 |
+
- id: isort
|
| 20 |
+
|
| 21 |
+
# yapf
|
| 22 |
+
- repo: https://github.com/pre-commit/mirrors-yapf
|
| 23 |
+
rev: v0.30.0
|
| 24 |
+
hooks:
|
| 25 |
+
- id: yapf
|
| 26 |
+
|
| 27 |
+
# codespell
|
| 28 |
+
- repo: https://github.com/codespell-project/codespell
|
| 29 |
+
rev: v2.1.0
|
| 30 |
+
hooks:
|
| 31 |
+
- id: codespell
|
| 32 |
+
|
| 33 |
+
# pre-commit-hooks
|
| 34 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 35 |
+
rev: v3.2.0
|
| 36 |
+
hooks:
|
| 37 |
+
- id: trailing-whitespace # Trim trailing whitespace
|
| 38 |
+
- id: check-yaml # Attempt to load all yaml files to verify syntax
|
| 39 |
+
- id: check-merge-conflict # Check for files that contain merge conflict strings
|
| 40 |
+
- id: double-quote-string-fixer # Replace double quoted strings with single quoted strings
|
| 41 |
+
- id: end-of-file-fixer # Make sure files end in a newline and only a newline
|
| 42 |
+
- id: requirements-txt-fixer # Sort entries in requirements.txt and remove incorrect entry for pkg-resources==0.0.0
|
| 43 |
+
- id: fix-encoding-pragma # Remove the coding pragma: # -*- coding: utf-8 -*-
|
| 44 |
+
args: ["--remove"]
|
| 45 |
+
- id: mixed-line-ending # Replace or check mixed line ending
|
| 46 |
+
args: ["--fix=lf"]
|
CODE_OF_CONDUCT.md
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributor Covenant Code of Conduct
|
| 2 |
+
|
| 3 |
+
## Our Pledge
|
| 4 |
+
|
| 5 |
+
We as members, contributors, and leaders pledge to make participation in our
|
| 6 |
+
community a harassment-free experience for everyone, regardless of age, body
|
| 7 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
| 8 |
+
identity and expression, level of experience, education, socio-economic status,
|
| 9 |
+
nationality, personal appearance, race, religion, or sexual identity
|
| 10 |
+
and orientation.
|
| 11 |
+
|
| 12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
| 13 |
+
diverse, inclusive, and healthy community.
|
| 14 |
+
|
| 15 |
+
## Our Standards
|
| 16 |
+
|
| 17 |
+
Examples of behavior that contributes to a positive environment for our
|
| 18 |
+
community include:
|
| 19 |
+
|
| 20 |
+
* Demonstrating empathy and kindness toward other people
|
| 21 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
| 22 |
+
* Giving and gracefully accepting constructive feedback
|
| 23 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
| 24 |
+
and learning from the experience
|
| 25 |
+
* Focusing on what is best not just for us as individuals, but for the
|
| 26 |
+
overall community
|
| 27 |
+
|
| 28 |
+
Examples of unacceptable behavior include:
|
| 29 |
+
|
| 30 |
+
* The use of sexualized language or imagery, and sexual attention or
|
| 31 |
+
advances of any kind
|
| 32 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
| 33 |
+
* Public or private harassment
|
| 34 |
+
* Publishing others' private information, such as a physical or email
|
| 35 |
+
address, without their explicit permission
|
| 36 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
| 37 |
+
professional setting
|
| 38 |
+
|
| 39 |
+
## Enforcement Responsibilities
|
| 40 |
+
|
| 41 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
| 42 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
| 43 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
| 44 |
+
or harmful.
|
| 45 |
+
|
| 46 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
| 47 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
| 48 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
| 49 |
+
decisions when appropriate.
|
| 50 |
+
|
| 51 |
+
## Scope
|
| 52 |
+
|
| 53 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
| 54 |
+
an individual is officially representing the community in public spaces.
|
| 55 |
+
Examples of representing our community include using an official e-mail address,
|
| 56 |
+
posting via an official social media account, or acting as an appointed
|
| 57 |
+
representative at an online or offline event.
|
| 58 |
+
|
| 59 |
+
## Enforcement
|
| 60 |
+
|
| 61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
| 62 |
+
reported to the community leaders responsible for enforcement at
|
| 63 | |
| 64 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
| 65 |
+
|
| 66 |
+
All community leaders are obligated to respect the privacy and security of the
|
| 67 |
+
reporter of any incident.
|
| 68 |
+
|
| 69 |
+
## Enforcement Guidelines
|
| 70 |
+
|
| 71 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
| 72 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
| 73 |
+
|
| 74 |
+
### 1. Correction
|
| 75 |
+
|
| 76 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
| 77 |
+
unprofessional or unwelcome in the community.
|
| 78 |
+
|
| 79 |
+
**Consequence**: A private, written warning from community leaders, providing
|
| 80 |
+
clarity around the nature of the violation and an explanation of why the
|
| 81 |
+
behavior was inappropriate. A public apology may be requested.
|
| 82 |
+
|
| 83 |
+
### 2. Warning
|
| 84 |
+
|
| 85 |
+
**Community Impact**: A violation through a single incident or series
|
| 86 |
+
of actions.
|
| 87 |
+
|
| 88 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
| 89 |
+
interaction with the people involved, including unsolicited interaction with
|
| 90 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
| 91 |
+
includes avoiding interactions in community spaces as well as external channels
|
| 92 |
+
like social media. Violating these terms may lead to a temporary or
|
| 93 |
+
permanent ban.
|
| 94 |
+
|
| 95 |
+
### 3. Temporary Ban
|
| 96 |
+
|
| 97 |
+
**Community Impact**: A serious violation of community standards, including
|
| 98 |
+
sustained inappropriate behavior.
|
| 99 |
+
|
| 100 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
| 101 |
+
communication with the community for a specified period of time. No public or
|
| 102 |
+
private interaction with the people involved, including unsolicited interaction
|
| 103 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
| 104 |
+
Violating these terms may lead to a permanent ban.
|
| 105 |
+
|
| 106 |
+
### 4. Permanent Ban
|
| 107 |
+
|
| 108 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
| 109 |
+
standards, including sustained inappropriate behavior, harassment of an
|
| 110 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
| 111 |
+
|
| 112 |
+
**Consequence**: A permanent ban from any sort of public interaction within
|
| 113 |
+
the community.
|
| 114 |
+
|
| 115 |
+
## Attribution
|
| 116 |
+
|
| 117 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
| 118 |
+
version 2.0, available at
|
| 119 |
+
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
| 120 |
+
|
| 121 |
+
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
| 122 |
+
enforcement ladder](https://github.com/mozilla/diversity).
|
| 123 |
+
|
| 124 |
+
[homepage]: https://www.contributor-covenant.org
|
| 125 |
+
|
| 126 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
| 127 |
+
https://www.contributor-covenant.org/faq. Translations are available at
|
| 128 |
+
https://www.contributor-covenant.org/translations.
|
LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2021, Xintao Wang
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
1. Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
3. Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
MANIFEST.in
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include assets/*
|
| 2 |
+
include inputs/*
|
| 3 |
+
include scripts/*.py
|
| 4 |
+
include inference_realesrgan.py
|
| 5 |
+
include VERSION
|
| 6 |
+
include LICENSE
|
| 7 |
+
include requirements.txt
|
| 8 |
+
include weights/README.md
|
README_CN.md
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<img src="assets/realesrgan_logo.png" height=120>
|
| 3 |
+
</p>
|
| 4 |
+
|
| 5 |
+
## <div align="center"><b><a href="README.md">English</a> | <a href="README_CN.md">简体中文</a></b></div>
|
| 6 |
+
|
| 7 |
+
[](https://github.com/xinntao/Real-ESRGAN/releases)
|
| 8 |
+
[](https://pypi.org/project/realesrgan/)
|
| 9 |
+
[](https://github.com/xinntao/Real-ESRGAN/issues)
|
| 10 |
+
[](https://github.com/xinntao/Real-ESRGAN/issues)
|
| 11 |
+
[](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE)
|
| 12 |
+
[](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/pylint.yml)
|
| 13 |
+
[](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/publish-pip.yml)
|
| 14 |
+
|
| 15 |
+
:fire: 更新动漫视频的小模型 **RealESRGAN AnimeVideo-v3**. 更多信息在 [[动漫视频模型介绍](docs/anime_video_model.md)] 和 [[比较](docs/anime_comparisons_CN.md)] 中.
|
| 16 |
+
|
| 17 |
+
1. Real-ESRGAN的[Colab Demo](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) | Real-ESRGAN**动漫视频** 的[Colab Demo](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing)
|
| 18 |
+
2. **支持Intel/AMD/Nvidia显卡**的绿色版exe文件: [Windows版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [macOS版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip),详情请移步[这里](#便携版(绿色版)可执行文件)。NCNN的实现在 [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan)。
|
| 19 |
+
|
| 20 |
+
Real-ESRGAN 的目标是开发出**实用的图像/视频修复算法**。<br>
|
| 21 |
+
我们在 ESRGAN 的基础上使用纯合成的数据来进行训练,以使其能被应用于实际的图片修复的场景(顾名思义:Real-ESRGAN)。
|
| 22 |
+
|
| 23 |
+
:art: Real-ESRGAN 需要,也很欢迎你的贡献,如新功能、模型、bug修复、建议、维护等等。详情可以查看[CONTRIBUTING.md](docs/CONTRIBUTING.md),所有的贡献者都会被列在[此处](README_CN.md#hugs-感谢)。
|
| 24 |
+
|
| 25 |
+
:milky_way: 感谢大家提供了很好的反馈。这些反馈会逐步更新在 [这个文档](docs/feedback.md)。
|
| 26 |
+
|
| 27 |
+
:question: 常见的问题可以在[FAQ.md](docs/FAQ.md)中找到答案。(好吧,现在还是空白的=-=||)
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
如果 Real-ESRGAN 对你有帮助,可以给本项目一个 Star :star: ,或者推荐给你的朋友们,谢谢!:blush: <br/>
|
| 32 |
+
其他推荐的项目:<br/>
|
| 33 |
+
:arrow_forward: [GFPGAN](https://github.com/TencentARC/GFPGAN): 实用的人脸复原算法 <br>
|
| 34 |
+
:arrow_forward: [BasicSR](https://github.com/xinntao/BasicSR): 开源的图像和视频工具箱<br>
|
| 35 |
+
:arrow_forward: [facexlib](https://github.com/xinntao/facexlib): 提供与人脸相关的工具箱<br>
|
| 36 |
+
:arrow_forward: [HandyView](https://github.com/xinntao/HandyView): 基于PyQt5的图片查看器,方便查看以及比较 <br>
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
<!---------------------------------- Updates --------------------------->
|
| 41 |
+
<details>
|
| 42 |
+
<summary>🚩<b>更新</b></summary>
|
| 43 |
+
|
| 44 |
+
- ✅ 更新动漫视频的小模型 **RealESRGAN AnimeVideo-v3**. 更多信息在 [anime video models](docs/anime_video_model.md) 和 [comparisons](docs/anime_comparisons.md)中.
|
| 45 |
+
- ✅ 添加了针对动漫视频的小模型, 更多信息在 [anime video models](docs/anime_video_model.md) 中.
|
| 46 |
+
- ✅ 添加了ncnn 实现:[Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan).
|
| 47 |
+
- ✅ 添加了 [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth),对二次元图片进行了优化,并减少了model的大小。详情 以及 与[waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan)的对比请查看[**anime_model.md**](docs/anime_model.md)
|
| 48 |
+
- ✅支持用户在自己的数据上进行微调 (finetune):[详情](docs/Training.md#Finetune-Real-ESRGAN-on-your-own-dataset)
|
| 49 |
+
- ✅ 支持使用[GFPGAN](https://github.com/TencentARC/GFPGAN)**增强人脸**
|
| 50 |
+
- ✅ 通过[Gradio](https://github.com/gradio-app/gradio)添加到了[Huggingface Spaces](https://huggingface.co/spaces)(一个机器学习应用的在线平台):[Gradio在线版](https://huggingface.co/spaces/akhaliq/Real-ESRGAN)。感谢[@AK391](https://github.com/AK391)
|
| 51 |
+
- ✅ 支持任意比例的缩放:`--outscale`(实际上使用`LANCZOS4`来更进一步调整输出图像的尺寸)。添加了*RealESRGAN_x2plus.pth*模型
|
| 52 |
+
- ✅ [推断脚本](inference_realesrgan.py)支持: 1) 分块处理**tile**; 2) 带**alpha通道**的图像; 3) **灰色**图像; 4) **16-bit**图像.
|
| 53 |
+
- ✅ 训练代码已经发布,具体做法可查看:[Training.md](docs/Training.md)。
|
| 54 |
+
|
| 55 |
+
</details>
|
| 56 |
+
|
| 57 |
+
<!---------------------------------- Projects that use Real-ESRGAN --------------------------->
|
| 58 |
+
<details>
|
| 59 |
+
<summary>🧩<b>使用Real-ESRGAN的项目</b></summary>
|
| 60 |
+
|
| 61 |
+
👋 如果你开发/使用/集成了Real-ESRGAN, 欢迎联系我添加
|
| 62 |
+
|
| 63 |
+
- NCNN-Android: [RealSR-NCNN-Android](https://github.com/tumuyan/RealSR-NCNN-Android) by [tumuyan](https://github.com/tumuyan)
|
| 64 |
+
- VapourSynth: [vs-realesrgan](https://github.com/HolyWu/vs-realesrgan) by [HolyWu](https://github.com/HolyWu)
|
| 65 |
+
- NCNN: [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan)
|
| 66 |
+
|
| 67 |
+
**易用的图形界面**
|
| 68 |
+
|
| 69 |
+
- [Waifu2x-Extension-GUI](https://github.com/AaronFeng753/Waifu2x-Extension-GUI) by [AaronFeng753](https://github.com/AaronFeng753)
|
| 70 |
+
- [Squirrel-RIFE](https://github.com/Justin62628/Squirrel-RIFE) by [Justin62628](https://github.com/Justin62628)
|
| 71 |
+
- [Real-GUI](https://github.com/scifx/Real-GUI) by [scifx](https://github.com/scifx)
|
| 72 |
+
- [Real-ESRGAN_GUI](https://github.com/net2cn/Real-ESRGAN_GUI) by [net2cn](https://github.com/net2cn)
|
| 73 |
+
- [Real-ESRGAN-EGUI](https://github.com/WGzeyu/Real-ESRGAN-EGUI) by [WGzeyu](https://github.com/WGzeyu)
|
| 74 |
+
- [anime_upscaler](https://github.com/shangar21/anime_upscaler) by [shangar21](https://github.com/shangar21)
|
| 75 |
+
- [RealESRGAN-GUI](https://github.com/Baiyuetribe/paper2gui/blob/main/Video%20Super%20Resolution/RealESRGAN-GUI.md) by [Baiyuetribe](https://github.com/Baiyuetribe)
|
| 76 |
+
|
| 77 |
+
</details>
|
| 78 |
+
|
| 79 |
+
<details>
|
| 80 |
+
<summary>👀<b>Demo视频(B站)</b></summary>
|
| 81 |
+
|
| 82 |
+
- [大闹天宫片段](https://www.bilibili.com/video/BV1ja41117zb)
|
| 83 |
+
|
| 84 |
+
</details>
|
| 85 |
+
|
| 86 |
+
### :book: Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data
|
| 87 |
+
|
| 88 |
+
> [[论文](https://arxiv.org/abs/2107.10833)]   [项目主页]   [[YouTube 视频](https://www.youtube.com/watch?v=fxHWoDSSvSc)]   [[B站视频](https://www.bilibili.com/video/BV1H34y1m7sS/)]   [[Poster](https://xinntao.github.io/projects/RealESRGAN_src/RealESRGAN_poster.pdf)]   [[PPT](https://docs.google.com/presentation/d/1QtW6Iy8rm8rGLsJ0Ldti6kP-7Qyzy6XL/edit?usp=sharing&ouid=109799856763657548160&rtpof=true&sd=true)]<br>
|
| 89 |
+
> [Xintao Wang](https://xinntao.github.io/), Liangbin Xie, [Chao Dong](https://scholar.google.com.hk/citations?user=OSDCB0UAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en) <br>
|
| 90 |
+
> Tencent ARC Lab; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences
|
| 91 |
+
|
| 92 |
+
<p align="center">
|
| 93 |
+
<img src="assets/teaser.jpg">
|
| 94 |
+
</p>
|
| 95 |
+
|
| 96 |
+
---
|
| 97 |
+
|
| 98 |
+
我们提供了一套训练好的模型(*RealESRGAN_x4plus.pth*),可以进行4倍的超分辨率。<br>
|
| 99 |
+
**现在的 Real-ESRGAN 还是有几率失败的,因为现实生活的降质过程比较复杂。**<br>
|
| 100 |
+
而且,本项目对**人脸以及文字之类**的效果还不是太好,但是我们会持续进行优化的。<br>
|
| 101 |
+
|
| 102 |
+
Real-ESRGAN 将会被长期支持,我会在空闲的时间中持续维护更新。
|
| 103 |
+
|
| 104 |
+
这些是未来计划的几个新功能:
|
| 105 |
+
|
| 106 |
+
- [ ] 优化人脸
|
| 107 |
+
- [ ] 优化文字
|
| 108 |
+
- [x] 优化动画图像
|
| 109 |
+
- [ ] 支持更多的超分辨率比例
|
| 110 |
+
- [ ] 可调节的复原
|
| 111 |
+
|
| 112 |
+
如果你有好主意或需求,欢迎在 issue 或 discussion 中提出。<br/>
|
| 113 |
+
如果你有一些 Real-ESRGAN 中有问题的照片,你也可以在 issue 或者 discussion 中发出来。我会留意(但是不一定能解决:stuck_out_tongue:)。如果有必要的话,我还会专门开一页来记录那些有待解决的图像。
|
| 114 |
+
|
| 115 |
+
---
|
| 116 |
+
|
| 117 |
+
### 便携版(绿色版)可执行文件
|
| 118 |
+
|
| 119 |
+
你可以下载**支持Intel/AMD/Nvidia显卡**的绿色版exe文件: [Windows版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [macOS版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip)。
|
| 120 |
+
|
| 121 |
+
绿色版指的是这些exe你可以直接运行(放U盘里拷走都没问题),因为里面已经有所需的文件和模型了。它不需要 CUDA 或者 PyTorch运行环境。<br>
|
| 122 |
+
|
| 123 |
+
你可以通过下面这个命令来运行(Windows版本的例子,更多信息请查看对应版本的README.md):
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
./realesrgan-ncnn-vulkan.exe -i 输入图像.jpg -o 输出图像.png -n 模型名字
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
我们提供了五种模型:
|
| 130 |
+
|
| 131 |
+
1. realesrgan-x4plus(默认)
|
| 132 |
+
2. reaesrnet-x4plus
|
| 133 |
+
3. realesrgan-x4plus-anime(针对动漫插画图像优化,有更小的体积)
|
| 134 |
+
4. realesr-animevideov3 (针对动漫视频)
|
| 135 |
+
|
| 136 |
+
你可以通过`-n`参数来使用其他模型,例如`./realesrgan-ncnn-vulkan.exe -i 二次元图片.jpg -o 二刺螈图片.png -n realesrgan-x4plus-anime`
|
| 137 |
+
|
| 138 |
+
### 可执行文件的用法
|
| 139 |
+
|
| 140 |
+
1. 更多细节可以参考 [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan#computer-usages).
|
| 141 |
+
2. 注意:可执行文件并没有支持 python 脚本 `inference_realesrgan.py` 中所有的功能,比如 `outscale` 选项) .
|
| 142 |
+
|
| 143 |
+
```console
|
| 144 |
+
Usage: realesrgan-ncnn-vulkan.exe -i infile -o outfile [options]...
|
| 145 |
+
|
| 146 |
+
-h show this help
|
| 147 |
+
-i input-path input image path (jpg/png/webp) or directory
|
| 148 |
+
-o output-path output image path (jpg/png/webp) or directory
|
| 149 |
+
-s scale upscale ratio (can be 2, 3, 4. default=4)
|
| 150 |
+
-t tile-size tile size (>=32/0=auto, default=0) can be 0,0,0 for multi-gpu
|
| 151 |
+
-m model-path folder path to the pre-trained models. default=models
|
| 152 |
+
-n model-name model name (default=realesr-animevideov3, can be realesr-animevideov3 | realesrgan-x4plus | realesrgan-x4plus-anime | realesrnet-x4plus)
|
| 153 |
+
-g gpu-id gpu device to use (default=auto) can be 0,1,2 for multi-gpu
|
| 154 |
+
-j load:proc:save thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu
|
| 155 |
+
-x enable tta mode"
|
| 156 |
+
-f format output image format (jpg/png/webp, default=ext/png)
|
| 157 |
+
-v verbose output
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
由于这些exe文件会把图像分成几个板块,然后来分别进行处理,再合成导出,输出的图像可能会有一点割裂感(而且可能跟PyTorch的输出不太一样)
|
| 161 |
+
|
| 162 |
+
---
|
| 163 |
+
|
| 164 |
+
## :wrench: 依赖以及安装
|
| 165 |
+
|
| 166 |
+
- Python >= 3.7 (推荐使用[Anaconda](https://www.anaconda.com/download/#linux)或[Miniconda](https://docs.conda.io/en/latest/miniconda.html))
|
| 167 |
+
- [PyTorch >= 1.7](https://pytorch.org/)
|
| 168 |
+
|
| 169 |
+
#### 安装
|
| 170 |
+
|
| 171 |
+
1. 把项目克隆到本地
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
git clone https://github.com/xinntao/Real-ESRGAN.git
|
| 175 |
+
cd Real-ESRGAN
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
2. 安装各种依赖
|
| 179 |
+
|
| 180 |
+
```bash
|
| 181 |
+
# 安装 basicsr - https://github.com/xinntao/BasicSR
|
| 182 |
+
# 我们使用BasicSR来训练以及推断
|
| 183 |
+
pip install basicsr
|
| 184 |
+
# facexlib和gfpgan是用来增强人脸的
|
| 185 |
+
pip install facexlib
|
| 186 |
+
pip install gfpgan
|
| 187 |
+
pip install -r requirements.txt
|
| 188 |
+
python setup.py develop
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
## :zap: 快速上手
|
| 192 |
+
|
| 193 |
+
### 普通图片
|
| 194 |
+
|
| 195 |
+
下载我们训练好的模型: [RealESRGAN_x4plus.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth)
|
| 196 |
+
|
| 197 |
+
```bash
|
| 198 |
+
wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P weights
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
+
推断!
|
| 202 |
+
|
| 203 |
+
```bash
|
| 204 |
+
python inference_realesrgan.py -n RealESRGAN_x4plus -i inputs --face_enhance
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
结果在`results`文件夹
|
| 208 |
+
|
| 209 |
+
### 动画图片
|
| 210 |
+
|
| 211 |
+
<p align="center">
|
| 212 |
+
<img src="https://raw.githubusercontent.com/xinntao/public-figures/master/Real-ESRGAN/cmp_realesrgan_anime_1.png">
|
| 213 |
+
</p>
|
| 214 |
+
|
| 215 |
+
训练好的模型: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)<br>
|
| 216 |
+
有关[waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan)的更多信息和对比在[**anime_model.md**](docs/anime_model.md)中。
|
| 217 |
+
|
| 218 |
+
```bash
|
| 219 |
+
# 下载模型
|
| 220 |
+
wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P weights
|
| 221 |
+
# 推断
|
| 222 |
+
python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
结果在`results`文件夹
|
| 226 |
+
|
| 227 |
+
### Python 脚本的用法
|
| 228 |
+
|
| 229 |
+
1. 虽然你使用了 X4 模型,但是你可以 **输出任意尺寸比例的图片**,只要实用了 `outscale` 参数. 程序会进一步对模型的输出图像进行缩放。
|
| 230 |
+
|
| 231 |
+
```console
|
| 232 |
+
Usage: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile -o outfile [options]...
|
| 233 |
+
|
| 234 |
+
A common command: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile --outscale 3.5 --face_enhance
|
| 235 |
+
|
| 236 |
+
-h show this help
|
| 237 |
+
-i --input Input image or folder. Default: inputs
|
| 238 |
+
-o --output Output folder. Default: results
|
| 239 |
+
-n --model_name Model name. Default: RealESRGAN_x4plus
|
| 240 |
+
-s, --outscale The final upsampling scale of the image. Default: 4
|
| 241 |
+
--suffix Suffix of the restored image. Default: out
|
| 242 |
+
-t, --tile Tile size, 0 for no tile during testing. Default: 0
|
| 243 |
+
--face_enhance Whether to use GFPGAN to enhance face. Default: False
|
| 244 |
+
--fp32 Whether to use half precision during inference. Default: False
|
| 245 |
+
--ext Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
## :european_castle: 模型库
|
| 249 |
+
|
| 250 |
+
请参见 [docs/model_zoo.md](docs/model_zoo.md)
|
| 251 |
+
|
| 252 |
+
## :computer: 训练,在你的数据上微调(Fine-tune)
|
| 253 |
+
|
| 254 |
+
这里有一份详细的指南:[Training.md](docs/Training.md).
|
| 255 |
+
|
| 256 |
+
## BibTeX 引用
|
| 257 |
+
|
| 258 |
+
@Article{wang2021realesrgan,
|
| 259 |
+
title={Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data},
|
| 260 |
+
author={Xintao Wang and Liangbin Xie and Chao Dong and Ying Shan},
|
| 261 |
+
journal={arXiv:2107.10833},
|
| 262 |
+
year={2021}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
## :e-mail: 联系我们
|
| 266 |
+
|
| 267 |
+
如果你有任何问题,请通过 `[email protected]` 或 `[email protected]` 联系我们。
|
| 268 |
+
|
| 269 |
+
## :hugs: 感谢
|
| 270 |
+
|
| 271 |
+
感谢所有的贡献者大大们~
|
| 272 |
+
|
| 273 |
+
- [AK391](https://github.com/AK391): 通过[Gradio](https://github.com/gradio-app/gradio)添加到了[Huggingface Spaces](https://huggingface.co/spaces)(一个机器学习应用的在线平台):[Gradio在线版](https://huggingface.co/spaces/akhaliq/Real-ESRGAN)。
|
| 274 |
+
- [Asiimoviet](https://github.com/Asiimoviet): 把 README.md 文档 翻译成了中文。
|
| 275 |
+
- [2ji3150](https://github.com/2ji3150): 感谢详尽并且富有价值的[反馈、建议](https://github.com/xinntao/Real-ESRGAN/issues/131).
|
| 276 |
+
- [Jared-02](https://github.com/Jared-02): 把 Training.md 文档 翻译成了中文。
|
VERSION
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
0.3.0
|
app.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import argparse
|
| 3 |
+
from realesrgan import RealESRGANer
|
| 4 |
+
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
| 5 |
+
import os
|
| 6 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
| 7 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 8 |
+
def Generate(img, model_name):
|
| 9 |
+
|
| 10 |
+
global output
|
| 11 |
+
parser = argparse.ArgumentParser()
|
| 12 |
+
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
|
| 13 |
+
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
| 14 |
+
parser.add_argument(
|
| 15 |
+
'-dn',
|
| 16 |
+
'--denoise_strength',
|
| 17 |
+
type=float,
|
| 18 |
+
default=0.5,
|
| 19 |
+
help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '
|
| 20 |
+
'Only used for the realesr-general-x4v3 model'))
|
| 21 |
+
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
| 22 |
+
parser.add_argument(
|
| 23 |
+
'--model_path', type=str, default=None, help='[Option] Model path. Usually, you do not need to specify it')
|
| 24 |
+
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
|
| 25 |
+
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
| 26 |
+
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
| 27 |
+
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
| 28 |
+
parser.add_argument('--face_enhance', action='store_true',help='Use GFPGAN to enhance face')
|
| 29 |
+
parser.add_argument(
|
| 30 |
+
'--fp32', action='store_true',default=True,help='Use fp32 precision during inference. Default: fp16 (half precision).')
|
| 31 |
+
parser.add_argument(
|
| 32 |
+
'--alpha_upsampler',
|
| 33 |
+
type=str,
|
| 34 |
+
default='realesrgan',
|
| 35 |
+
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
| 36 |
+
parser.add_argument(
|
| 37 |
+
'--ext',
|
| 38 |
+
type=str,
|
| 39 |
+
default='auto',
|
| 40 |
+
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
| 41 |
+
parser.add_argument(
|
| 42 |
+
'-g', '--gpu-id', type=int, default=None, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu')
|
| 43 |
+
|
| 44 |
+
args = parser.parse_args()
|
| 45 |
+
|
| 46 |
+
if model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
|
| 47 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 48 |
+
netscale = 4
|
| 49 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
|
| 50 |
+
elif model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
|
| 51 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 52 |
+
netscale = 4
|
| 53 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
|
| 54 |
+
elif model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
|
| 55 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
| 56 |
+
netscale = 4
|
| 57 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
|
| 58 |
+
elif model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
|
| 59 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
| 60 |
+
netscale = 2
|
| 61 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
|
| 62 |
+
elif model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
|
| 63 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
| 64 |
+
netscale = 4
|
| 65 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
|
| 66 |
+
elif model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
|
| 67 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
| 68 |
+
netscale = 4
|
| 69 |
+
file_url = [
|
| 70 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
|
| 71 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
model_path = os.path.join('weights', model_name + '.pth')
|
| 75 |
+
print(model_path)
|
| 76 |
+
if not os.path.isfile(model_path):
|
| 77 |
+
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 78 |
+
for url in file_url:
|
| 79 |
+
# model_path will be updated
|
| 80 |
+
model_path = load_file_from_url(
|
| 81 |
+
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
|
| 82 |
+
dni_weight = None
|
| 83 |
+
if model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:
|
| 84 |
+
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
|
| 85 |
+
model_path = [model_path, wdn_model_path]
|
| 86 |
+
dni_weight = [args.denoise_strength, 1 - args.denoise_strength]
|
| 87 |
+
|
| 88 |
+
# restorer
|
| 89 |
+
upsampler = RealESRGANer(
|
| 90 |
+
scale=netscale,
|
| 91 |
+
model_path=model_path,
|
| 92 |
+
dni_weight=dni_weight,
|
| 93 |
+
model=model,
|
| 94 |
+
tile=args.tile,
|
| 95 |
+
tile_pad=args.tile_pad,
|
| 96 |
+
pre_pad=args.pre_pad,
|
| 97 |
+
half=not args.fp32,
|
| 98 |
+
gpu_id=args.gpu_id)
|
| 99 |
+
|
| 100 |
+
if args.face_enhance: # Use GFPGAN for face enhancement
|
| 101 |
+
from gfpgan import GFPGANer
|
| 102 |
+
face_enhancer = GFPGANer(
|
| 103 |
+
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
|
| 104 |
+
upscale=args.outscale,
|
| 105 |
+
arch='clean',
|
| 106 |
+
channel_multiplier=2,
|
| 107 |
+
bg_upsampler=upsampler)
|
| 108 |
+
os.makedirs(args.output, exist_ok=True)
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
if args.face_enhance:
|
| 112 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
| 113 |
+
else:
|
| 114 |
+
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
| 115 |
+
print("生成成功")
|
| 116 |
+
|
| 117 |
+
except RuntimeError as error:
|
| 118 |
+
print('Error', error)
|
| 119 |
+
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
| 120 |
+
output = None
|
| 121 |
+
|
| 122 |
+
return output
|
| 123 |
+
|
| 124 |
+
with gr.Blocks() as demo:
|
| 125 |
+
|
| 126 |
+
gr.Markdown(
|
| 127 |
+
"""
|
| 128 |
+
# <center> Real-ESRGAN 在线体验程序
|
| 129 |
+
""")
|
| 130 |
+
gr.Markdown("""
|
| 131 |
+
1. **项目模型运行在CPU上,等待时间略长**
|
| 132 |
+
2. **原工程项目旨在对图片就行修复**
|
| 133 |
+
3. **项目源地址为:[Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN)**
|
| 134 |
+
""")
|
| 135 |
+
|
| 136 |
+
with gr.Row():
|
| 137 |
+
with gr.Column():
|
| 138 |
+
img = gr.Image(type="numpy",label = "输入图片")
|
| 139 |
+
model_name = gr.Dropdown(["RealESRGAN_x4plus","RealESRGAN_x4plus_anime_6B","RealESRGAN_x2plus",
|
| 140 |
+
"realesr-animevideov3","realesr-general-x4v3"],info="选择模型")
|
| 141 |
+
with gr.Column():
|
| 142 |
+
img_out = gr.Image(type="numpy",label = "输出图片")
|
| 143 |
+
|
| 144 |
+
btn = gr.Button("Generate")
|
| 145 |
+
|
| 146 |
+
btn.click(Generate, inputs=[img,model_name], outputs=[img_out])
|
| 147 |
+
|
| 148 |
+
if __name__ == "__main__":
|
| 149 |
+
demo.launch()
|
cog.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is used for constructing replicate env
|
| 2 |
+
image: "r8.im/tencentarc/realesrgan"
|
| 3 |
+
|
| 4 |
+
build:
|
| 5 |
+
gpu: true
|
| 6 |
+
python_version: "3.8"
|
| 7 |
+
system_packages:
|
| 8 |
+
- "libgl1-mesa-glx"
|
| 9 |
+
- "libglib2.0-0"
|
| 10 |
+
python_packages:
|
| 11 |
+
- "torch==1.7.1"
|
| 12 |
+
- "torchvision==0.8.2"
|
| 13 |
+
- "numpy==1.21.1"
|
| 14 |
+
- "lmdb==1.2.1"
|
| 15 |
+
- "opencv-python==4.5.3.56"
|
| 16 |
+
- "PyYAML==5.4.1"
|
| 17 |
+
- "tqdm==4.62.2"
|
| 18 |
+
- "yapf==0.31.0"
|
| 19 |
+
- "basicsr==1.4.2"
|
| 20 |
+
- "facexlib==0.2.5"
|
| 21 |
+
|
| 22 |
+
predict: "cog_predict.py:Predictor"
|
cog_predict.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# This file is used for deploying replicate models
|
| 3 |
+
# running: cog predict -i img=@inputs/00017_gray.png -i version='General - v3' -i scale=2 -i face_enhance=True -i tile=0
|
| 4 |
+
# push: cog push r8.im/xinntao/realesrgan
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
os.system('pip install gfpgan')
|
| 9 |
+
os.system('python setup.py develop')
|
| 10 |
+
|
| 11 |
+
import cv2
|
| 12 |
+
import shutil
|
| 13 |
+
import tempfile
|
| 14 |
+
import torch
|
| 15 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
| 16 |
+
from basicsr.archs.srvgg_arch import SRVGGNetCompact
|
| 17 |
+
|
| 18 |
+
from realesrgan.utils import RealESRGANer
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
from cog import BasePredictor, Input, Path
|
| 22 |
+
from gfpgan import GFPGANer
|
| 23 |
+
except Exception:
|
| 24 |
+
print('please install cog and realesrgan package')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Predictor(BasePredictor):
|
| 28 |
+
|
| 29 |
+
def setup(self):
|
| 30 |
+
os.makedirs('output', exist_ok=True)
|
| 31 |
+
# download weights
|
| 32 |
+
if not os.path.exists('weights/realesr-general-x4v3.pth'):
|
| 33 |
+
os.system(
|
| 34 |
+
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./weights'
|
| 35 |
+
)
|
| 36 |
+
if not os.path.exists('weights/GFPGANv1.4.pth'):
|
| 37 |
+
os.system('wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./weights')
|
| 38 |
+
if not os.path.exists('weights/RealESRGAN_x4plus.pth'):
|
| 39 |
+
os.system(
|
| 40 |
+
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./weights'
|
| 41 |
+
)
|
| 42 |
+
if not os.path.exists('weights/RealESRGAN_x4plus_anime_6B.pth'):
|
| 43 |
+
os.system(
|
| 44 |
+
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P ./weights'
|
| 45 |
+
)
|
| 46 |
+
if not os.path.exists('weights/realesr-animevideov3.pth'):
|
| 47 |
+
os.system(
|
| 48 |
+
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P ./weights'
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def choose_model(self, scale, version, tile=0):
|
| 52 |
+
half = True if torch.cuda.is_available() else False
|
| 53 |
+
if version == 'General - RealESRGANplus':
|
| 54 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 55 |
+
model_path = 'weights/RealESRGAN_x4plus.pth'
|
| 56 |
+
self.upsampler = RealESRGANer(
|
| 57 |
+
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
|
| 58 |
+
elif version == 'General - v3':
|
| 59 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
| 60 |
+
model_path = 'weights/realesr-general-x4v3.pth'
|
| 61 |
+
self.upsampler = RealESRGANer(
|
| 62 |
+
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
|
| 63 |
+
elif version == 'Anime - anime6B':
|
| 64 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
| 65 |
+
model_path = 'weights/RealESRGAN_x4plus_anime_6B.pth'
|
| 66 |
+
self.upsampler = RealESRGANer(
|
| 67 |
+
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
|
| 68 |
+
elif version == 'AnimeVideo - v3':
|
| 69 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
| 70 |
+
model_path = 'weights/realesr-animevideov3.pth'
|
| 71 |
+
self.upsampler = RealESRGANer(
|
| 72 |
+
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
|
| 73 |
+
|
| 74 |
+
self.face_enhancer = GFPGANer(
|
| 75 |
+
model_path='weights/GFPGANv1.4.pth',
|
| 76 |
+
upscale=scale,
|
| 77 |
+
arch='clean',
|
| 78 |
+
channel_multiplier=2,
|
| 79 |
+
bg_upsampler=self.upsampler)
|
| 80 |
+
|
| 81 |
+
def predict(
|
| 82 |
+
self,
|
| 83 |
+
img: Path = Input(description='Input'),
|
| 84 |
+
version: str = Input(
|
| 85 |
+
description='RealESRGAN version. Please see [Readme] below for more descriptions',
|
| 86 |
+
choices=['General - RealESRGANplus', 'General - v3', 'Anime - anime6B', 'AnimeVideo - v3'],
|
| 87 |
+
default='General - v3'),
|
| 88 |
+
scale: float = Input(description='Rescaling factor', default=2),
|
| 89 |
+
face_enhance: bool = Input(
|
| 90 |
+
description='Enhance faces with GFPGAN. Note that it does not work for anime images/vidoes', default=False),
|
| 91 |
+
tile: int = Input(
|
| 92 |
+
description=
|
| 93 |
+
'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200',
|
| 94 |
+
default=0)
|
| 95 |
+
) -> Path:
|
| 96 |
+
if tile <= 100 or tile is None:
|
| 97 |
+
tile = 0
|
| 98 |
+
print(f'img: {img}. version: {version}. scale: {scale}. face_enhance: {face_enhance}. tile: {tile}.')
|
| 99 |
+
try:
|
| 100 |
+
extension = os.path.splitext(os.path.basename(str(img)))[1]
|
| 101 |
+
img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED)
|
| 102 |
+
if len(img.shape) == 3 and img.shape[2] == 4:
|
| 103 |
+
img_mode = 'RGBA'
|
| 104 |
+
elif len(img.shape) == 2:
|
| 105 |
+
img_mode = None
|
| 106 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 107 |
+
else:
|
| 108 |
+
img_mode = None
|
| 109 |
+
|
| 110 |
+
h, w = img.shape[0:2]
|
| 111 |
+
if h < 300:
|
| 112 |
+
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
|
| 113 |
+
|
| 114 |
+
self.choose_model(scale, version, tile)
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
if face_enhance:
|
| 118 |
+
_, _, output = self.face_enhancer.enhance(
|
| 119 |
+
img, has_aligned=False, only_center_face=False, paste_back=True)
|
| 120 |
+
else:
|
| 121 |
+
output, _ = self.upsampler.enhance(img, outscale=scale)
|
| 122 |
+
except RuntimeError as error:
|
| 123 |
+
print('Error', error)
|
| 124 |
+
print('If you encounter CUDA out of memory, try to set "tile" to a smaller size, e.g., 400.')
|
| 125 |
+
|
| 126 |
+
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
| 127 |
+
extension = 'png'
|
| 128 |
+
# save_path = f'output/out.{extension}'
|
| 129 |
+
# cv2.imwrite(save_path, output)
|
| 130 |
+
out_path = Path(tempfile.mkdtemp()) / f'out.{extension}'
|
| 131 |
+
cv2.imwrite(str(out_path), output)
|
| 132 |
+
except Exception as error:
|
| 133 |
+
print('global exception: ', error)
|
| 134 |
+
finally:
|
| 135 |
+
clean_folder('output')
|
| 136 |
+
return out_path
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def clean_folder(folder):
|
| 140 |
+
for filename in os.listdir(folder):
|
| 141 |
+
file_path = os.path.join(folder, filename)
|
| 142 |
+
try:
|
| 143 |
+
if os.path.isfile(file_path) or os.path.islink(file_path):
|
| 144 |
+
os.unlink(file_path)
|
| 145 |
+
elif os.path.isdir(file_path):
|
| 146 |
+
shutil.rmtree(file_path)
|
| 147 |
+
except Exception as e:
|
| 148 |
+
print(f'Failed to delete {file_path}. Reason: {e}')
|
inference_realesrgan.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import cv2
|
| 3 |
+
import glob
|
| 4 |
+
import os
|
| 5 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
| 6 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 7 |
+
|
| 8 |
+
from realesrgan import RealESRGANer
|
| 9 |
+
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def main():
|
| 13 |
+
"""Inference demo for Real-ESRGAN.
|
| 14 |
+
"""
|
| 15 |
+
parser = argparse.ArgumentParser()
|
| 16 |
+
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
|
| 17 |
+
parser.add_argument(
|
| 18 |
+
'-n',
|
| 19 |
+
'--model_name',
|
| 20 |
+
type=str,
|
| 21 |
+
default='RealESRGAN_x4plus',
|
| 22 |
+
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus | '
|
| 23 |
+
'realesr-animevideov3 | realesr-general-x4v3'))
|
| 24 |
+
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
| 25 |
+
parser.add_argument(
|
| 26 |
+
'-dn',
|
| 27 |
+
'--denoise_strength',
|
| 28 |
+
type=float,
|
| 29 |
+
default=0.5,
|
| 30 |
+
help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '
|
| 31 |
+
'Only used for the realesr-general-x4v3 model'))
|
| 32 |
+
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
| 33 |
+
parser.add_argument(
|
| 34 |
+
'--model_path', type=str, default=None, help='[Option] Model path. Usually, you do not need to specify it')
|
| 35 |
+
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
|
| 36 |
+
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
| 37 |
+
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
| 38 |
+
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
| 39 |
+
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
|
| 40 |
+
parser.add_argument(
|
| 41 |
+
'--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).')
|
| 42 |
+
parser.add_argument(
|
| 43 |
+
'--alpha_upsampler',
|
| 44 |
+
type=str,
|
| 45 |
+
default='realesrgan',
|
| 46 |
+
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
| 47 |
+
parser.add_argument(
|
| 48 |
+
'--ext',
|
| 49 |
+
type=str,
|
| 50 |
+
default='auto',
|
| 51 |
+
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
| 52 |
+
parser.add_argument(
|
| 53 |
+
'-g', '--gpu-id', type=int, default=None, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu')
|
| 54 |
+
|
| 55 |
+
args = parser.parse_args()
|
| 56 |
+
|
| 57 |
+
# determine models according to model names
|
| 58 |
+
args.model_name = args.model_name.split('.')[0]
|
| 59 |
+
if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
|
| 60 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 61 |
+
netscale = 4
|
| 62 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
|
| 63 |
+
elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
|
| 64 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 65 |
+
netscale = 4
|
| 66 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
|
| 67 |
+
elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
|
| 68 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
| 69 |
+
netscale = 4
|
| 70 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
|
| 71 |
+
elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
|
| 72 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
| 73 |
+
netscale = 2
|
| 74 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
|
| 75 |
+
elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
|
| 76 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
| 77 |
+
netscale = 4
|
| 78 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
|
| 79 |
+
elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
|
| 80 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
| 81 |
+
netscale = 4
|
| 82 |
+
file_url = [
|
| 83 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
|
| 84 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
# determine model paths
|
| 88 |
+
if args.model_path is not None:
|
| 89 |
+
model_path = args.model_path
|
| 90 |
+
else:
|
| 91 |
+
model_path = os.path.join('weights', args.model_name + '.pth')
|
| 92 |
+
if not os.path.isfile(model_path):
|
| 93 |
+
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 94 |
+
for url in file_url:
|
| 95 |
+
# model_path will be updated
|
| 96 |
+
model_path = load_file_from_url(
|
| 97 |
+
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
|
| 98 |
+
|
| 99 |
+
# use dni to control the denoise strength
|
| 100 |
+
dni_weight = None
|
| 101 |
+
if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:
|
| 102 |
+
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
|
| 103 |
+
model_path = [model_path, wdn_model_path]
|
| 104 |
+
dni_weight = [args.denoise_strength, 1 - args.denoise_strength]
|
| 105 |
+
|
| 106 |
+
# restorer
|
| 107 |
+
upsampler = RealESRGANer(
|
| 108 |
+
scale=netscale,
|
| 109 |
+
model_path=model_path,
|
| 110 |
+
dni_weight=dni_weight,
|
| 111 |
+
model=model,
|
| 112 |
+
tile=args.tile,
|
| 113 |
+
tile_pad=args.tile_pad,
|
| 114 |
+
pre_pad=args.pre_pad,
|
| 115 |
+
half=not args.fp32,
|
| 116 |
+
gpu_id=args.gpu_id)
|
| 117 |
+
|
| 118 |
+
if args.face_enhance: # Use GFPGAN for face enhancement
|
| 119 |
+
from gfpgan import GFPGANer
|
| 120 |
+
face_enhancer = GFPGANer(
|
| 121 |
+
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
|
| 122 |
+
upscale=args.outscale,
|
| 123 |
+
arch='clean',
|
| 124 |
+
channel_multiplier=2,
|
| 125 |
+
bg_upsampler=upsampler)
|
| 126 |
+
os.makedirs(args.output, exist_ok=True)
|
| 127 |
+
|
| 128 |
+
if os.path.isfile(args.input):
|
| 129 |
+
paths = [args.input]
|
| 130 |
+
else:
|
| 131 |
+
paths = sorted(glob.glob(os.path.join(args.input, '*')))
|
| 132 |
+
|
| 133 |
+
for idx, path in enumerate(paths):
|
| 134 |
+
imgname, extension = os.path.splitext(os.path.basename(path))
|
| 135 |
+
print('Testing', idx, imgname)
|
| 136 |
+
|
| 137 |
+
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
| 138 |
+
if len(img.shape) == 3 and img.shape[2] == 4:
|
| 139 |
+
img_mode = 'RGBA'
|
| 140 |
+
else:
|
| 141 |
+
img_mode = None
|
| 142 |
+
|
| 143 |
+
try:
|
| 144 |
+
if args.face_enhance:
|
| 145 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
| 146 |
+
else:
|
| 147 |
+
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
| 148 |
+
except RuntimeError as error:
|
| 149 |
+
print('Error', error)
|
| 150 |
+
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
| 151 |
+
else:
|
| 152 |
+
if args.ext == 'auto':
|
| 153 |
+
extension = extension[1:]
|
| 154 |
+
else:
|
| 155 |
+
extension = args.ext
|
| 156 |
+
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
| 157 |
+
extension = 'png'
|
| 158 |
+
if args.suffix == '':
|
| 159 |
+
save_path = os.path.join(args.output, f'{imgname}.{extension}')
|
| 160 |
+
else:
|
| 161 |
+
save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
|
| 162 |
+
cv2.imwrite(save_path, output)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
if __name__ == '__main__':
|
| 166 |
+
main()
|
inference_realesrgan_video.py
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import cv2
|
| 3 |
+
import glob
|
| 4 |
+
import mimetypes
|
| 5 |
+
import numpy as np
|
| 6 |
+
import os
|
| 7 |
+
import shutil
|
| 8 |
+
import subprocess
|
| 9 |
+
import torch
|
| 10 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
| 11 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 12 |
+
from os import path as osp
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
|
| 15 |
+
from realesrgan import RealESRGANer
|
| 16 |
+
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
import ffmpeg
|
| 20 |
+
except ImportError:
|
| 21 |
+
import pip
|
| 22 |
+
pip.main(['install', '--user', 'ffmpeg-python'])
|
| 23 |
+
import ffmpeg
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_video_meta_info(video_path):
|
| 27 |
+
ret = {}
|
| 28 |
+
probe = ffmpeg.probe(video_path)
|
| 29 |
+
video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
|
| 30 |
+
has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams'])
|
| 31 |
+
ret['width'] = video_streams[0]['width']
|
| 32 |
+
ret['height'] = video_streams[0]['height']
|
| 33 |
+
ret['fps'] = eval(video_streams[0]['avg_frame_rate'])
|
| 34 |
+
ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None
|
| 35 |
+
ret['nb_frames'] = int(video_streams[0]['nb_frames'])
|
| 36 |
+
return ret
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_sub_video(args, num_process, process_idx):
|
| 40 |
+
if num_process == 1:
|
| 41 |
+
return args.input
|
| 42 |
+
meta = get_video_meta_info(args.input)
|
| 43 |
+
duration = int(meta['nb_frames'] / meta['fps'])
|
| 44 |
+
part_time = duration // num_process
|
| 45 |
+
print(f'duration: {duration}, part_time: {part_time}')
|
| 46 |
+
os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True)
|
| 47 |
+
out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4')
|
| 48 |
+
cmd = [
|
| 49 |
+
args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}',
|
| 50 |
+
f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y'
|
| 51 |
+
]
|
| 52 |
+
print(' '.join(cmd))
|
| 53 |
+
subprocess.call(' '.join(cmd), shell=True)
|
| 54 |
+
return out_path
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class Reader:
|
| 58 |
+
|
| 59 |
+
def __init__(self, args, total_workers=1, worker_idx=0):
|
| 60 |
+
self.args = args
|
| 61 |
+
input_type = mimetypes.guess_type(args.input)[0]
|
| 62 |
+
self.input_type = 'folder' if input_type is None else input_type
|
| 63 |
+
self.paths = [] # for image&folder type
|
| 64 |
+
self.audio = None
|
| 65 |
+
self.input_fps = None
|
| 66 |
+
if self.input_type.startswith('video'):
|
| 67 |
+
video_path = get_sub_video(args, total_workers, worker_idx)
|
| 68 |
+
self.stream_reader = (
|
| 69 |
+
ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24',
|
| 70 |
+
loglevel='error').run_async(
|
| 71 |
+
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
|
| 72 |
+
meta = get_video_meta_info(video_path)
|
| 73 |
+
self.width = meta['width']
|
| 74 |
+
self.height = meta['height']
|
| 75 |
+
self.input_fps = meta['fps']
|
| 76 |
+
self.audio = meta['audio']
|
| 77 |
+
self.nb_frames = meta['nb_frames']
|
| 78 |
+
|
| 79 |
+
else:
|
| 80 |
+
if self.input_type.startswith('image'):
|
| 81 |
+
self.paths = [args.input]
|
| 82 |
+
else:
|
| 83 |
+
paths = sorted(glob.glob(os.path.join(args.input, '*')))
|
| 84 |
+
tot_frames = len(paths)
|
| 85 |
+
num_frame_per_worker = tot_frames // total_workers + (1 if tot_frames % total_workers else 0)
|
| 86 |
+
self.paths = paths[num_frame_per_worker * worker_idx:num_frame_per_worker * (worker_idx + 1)]
|
| 87 |
+
|
| 88 |
+
self.nb_frames = len(self.paths)
|
| 89 |
+
assert self.nb_frames > 0, 'empty folder'
|
| 90 |
+
from PIL import Image
|
| 91 |
+
tmp_img = Image.open(self.paths[0])
|
| 92 |
+
self.width, self.height = tmp_img.size
|
| 93 |
+
self.idx = 0
|
| 94 |
+
|
| 95 |
+
def get_resolution(self):
|
| 96 |
+
return self.height, self.width
|
| 97 |
+
|
| 98 |
+
def get_fps(self):
|
| 99 |
+
if self.args.fps is not None:
|
| 100 |
+
return self.args.fps
|
| 101 |
+
elif self.input_fps is not None:
|
| 102 |
+
return self.input_fps
|
| 103 |
+
return 24
|
| 104 |
+
|
| 105 |
+
def get_audio(self):
|
| 106 |
+
return self.audio
|
| 107 |
+
|
| 108 |
+
def __len__(self):
|
| 109 |
+
return self.nb_frames
|
| 110 |
+
|
| 111 |
+
def get_frame_from_stream(self):
|
| 112 |
+
img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel
|
| 113 |
+
if not img_bytes:
|
| 114 |
+
return None
|
| 115 |
+
img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3])
|
| 116 |
+
return img
|
| 117 |
+
|
| 118 |
+
def get_frame_from_list(self):
|
| 119 |
+
if self.idx >= self.nb_frames:
|
| 120 |
+
return None
|
| 121 |
+
img = cv2.imread(self.paths[self.idx])
|
| 122 |
+
self.idx += 1
|
| 123 |
+
return img
|
| 124 |
+
|
| 125 |
+
def get_frame(self):
|
| 126 |
+
if self.input_type.startswith('video'):
|
| 127 |
+
return self.get_frame_from_stream()
|
| 128 |
+
else:
|
| 129 |
+
return self.get_frame_from_list()
|
| 130 |
+
|
| 131 |
+
def close(self):
|
| 132 |
+
if self.input_type.startswith('video'):
|
| 133 |
+
self.stream_reader.stdin.close()
|
| 134 |
+
self.stream_reader.wait()
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class Writer:
|
| 138 |
+
|
| 139 |
+
def __init__(self, args, audio, height, width, video_save_path, fps):
|
| 140 |
+
out_width, out_height = int(width * args.outscale), int(height * args.outscale)
|
| 141 |
+
if out_height > 2160:
|
| 142 |
+
print('You are generating video that is larger than 4K, which will be very slow due to IO speed.',
|
| 143 |
+
'We highly recommend to decrease the outscale(aka, -s).')
|
| 144 |
+
|
| 145 |
+
if audio is not None:
|
| 146 |
+
self.stream_writer = (
|
| 147 |
+
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',
|
| 148 |
+
framerate=fps).output(
|
| 149 |
+
audio,
|
| 150 |
+
video_save_path,
|
| 151 |
+
pix_fmt='yuv420p',
|
| 152 |
+
vcodec='libx264',
|
| 153 |
+
loglevel='error',
|
| 154 |
+
acodec='copy').overwrite_output().run_async(
|
| 155 |
+
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
|
| 156 |
+
else:
|
| 157 |
+
self.stream_writer = (
|
| 158 |
+
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',
|
| 159 |
+
framerate=fps).output(
|
| 160 |
+
video_save_path, pix_fmt='yuv420p', vcodec='libx264',
|
| 161 |
+
loglevel='error').overwrite_output().run_async(
|
| 162 |
+
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
|
| 163 |
+
|
| 164 |
+
def write_frame(self, frame):
|
| 165 |
+
frame = frame.astype(np.uint8).tobytes()
|
| 166 |
+
self.stream_writer.stdin.write(frame)
|
| 167 |
+
|
| 168 |
+
def close(self):
|
| 169 |
+
self.stream_writer.stdin.close()
|
| 170 |
+
self.stream_writer.wait()
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0):
|
| 174 |
+
# ---------------------- determine models according to model names ---------------------- #
|
| 175 |
+
args.model_name = args.model_name.split('.pth')[0]
|
| 176 |
+
if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
|
| 177 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 178 |
+
netscale = 4
|
| 179 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
|
| 180 |
+
elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
|
| 181 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
| 182 |
+
netscale = 4
|
| 183 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
|
| 184 |
+
elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
|
| 185 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
| 186 |
+
netscale = 4
|
| 187 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
|
| 188 |
+
elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
|
| 189 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
| 190 |
+
netscale = 2
|
| 191 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
|
| 192 |
+
elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
|
| 193 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
| 194 |
+
netscale = 4
|
| 195 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
|
| 196 |
+
elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
|
| 197 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
| 198 |
+
netscale = 4
|
| 199 |
+
file_url = [
|
| 200 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
|
| 201 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
# ---------------------- determine model paths ---------------------- #
|
| 205 |
+
model_path = os.path.join('weights', args.model_name + '.pth')
|
| 206 |
+
if not os.path.isfile(model_path):
|
| 207 |
+
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 208 |
+
for url in file_url:
|
| 209 |
+
# model_path will be updated
|
| 210 |
+
model_path = load_file_from_url(
|
| 211 |
+
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
|
| 212 |
+
|
| 213 |
+
# use dni to control the denoise strength
|
| 214 |
+
dni_weight = None
|
| 215 |
+
if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:
|
| 216 |
+
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
|
| 217 |
+
model_path = [model_path, wdn_model_path]
|
| 218 |
+
dni_weight = [args.denoise_strength, 1 - args.denoise_strength]
|
| 219 |
+
|
| 220 |
+
# restorer
|
| 221 |
+
upsampler = RealESRGANer(
|
| 222 |
+
scale=netscale,
|
| 223 |
+
model_path=model_path,
|
| 224 |
+
dni_weight=dni_weight,
|
| 225 |
+
model=model,
|
| 226 |
+
tile=args.tile,
|
| 227 |
+
tile_pad=args.tile_pad,
|
| 228 |
+
pre_pad=args.pre_pad,
|
| 229 |
+
half=not args.fp32,
|
| 230 |
+
device=device,
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
if 'anime' in args.model_name and args.face_enhance:
|
| 234 |
+
print('face_enhance is not supported in anime models, we turned this option off for you. '
|
| 235 |
+
'if you insist on turning it on, please manually comment the relevant lines of code.')
|
| 236 |
+
args.face_enhance = False
|
| 237 |
+
|
| 238 |
+
if args.face_enhance: # Use GFPGAN for face enhancement
|
| 239 |
+
from gfpgan import GFPGANer
|
| 240 |
+
face_enhancer = GFPGANer(
|
| 241 |
+
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
|
| 242 |
+
upscale=args.outscale,
|
| 243 |
+
arch='clean',
|
| 244 |
+
channel_multiplier=2,
|
| 245 |
+
bg_upsampler=upsampler) # TODO support custom device
|
| 246 |
+
else:
|
| 247 |
+
face_enhancer = None
|
| 248 |
+
|
| 249 |
+
reader = Reader(args, total_workers, worker_idx)
|
| 250 |
+
audio = reader.get_audio()
|
| 251 |
+
height, width = reader.get_resolution()
|
| 252 |
+
fps = reader.get_fps()
|
| 253 |
+
writer = Writer(args, audio, height, width, video_save_path, fps)
|
| 254 |
+
|
| 255 |
+
pbar = tqdm(total=len(reader), unit='frame', desc='inference')
|
| 256 |
+
while True:
|
| 257 |
+
img = reader.get_frame()
|
| 258 |
+
if img is None:
|
| 259 |
+
break
|
| 260 |
+
|
| 261 |
+
try:
|
| 262 |
+
if args.face_enhance:
|
| 263 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
| 264 |
+
else:
|
| 265 |
+
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
| 266 |
+
except RuntimeError as error:
|
| 267 |
+
print('Error', error)
|
| 268 |
+
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
| 269 |
+
else:
|
| 270 |
+
writer.write_frame(output)
|
| 271 |
+
|
| 272 |
+
torch.cuda.synchronize(device)
|
| 273 |
+
pbar.update(1)
|
| 274 |
+
|
| 275 |
+
reader.close()
|
| 276 |
+
writer.close()
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def run(args):
|
| 280 |
+
args.video_name = osp.splitext(os.path.basename(args.input))[0]
|
| 281 |
+
video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4')
|
| 282 |
+
|
| 283 |
+
if args.extract_frame_first:
|
| 284 |
+
tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')
|
| 285 |
+
os.makedirs(tmp_frames_folder, exist_ok=True)
|
| 286 |
+
os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png')
|
| 287 |
+
args.input = tmp_frames_folder
|
| 288 |
+
|
| 289 |
+
num_gpus = torch.cuda.device_count()
|
| 290 |
+
num_process = num_gpus * args.num_process_per_gpu
|
| 291 |
+
if num_process == 1:
|
| 292 |
+
inference_video(args, video_save_path)
|
| 293 |
+
return
|
| 294 |
+
|
| 295 |
+
ctx = torch.multiprocessing.get_context('spawn')
|
| 296 |
+
pool = ctx.Pool(num_process)
|
| 297 |
+
os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True)
|
| 298 |
+
pbar = tqdm(total=num_process, unit='sub_video', desc='inference')
|
| 299 |
+
for i in range(num_process):
|
| 300 |
+
sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4')
|
| 301 |
+
pool.apply_async(
|
| 302 |
+
inference_video,
|
| 303 |
+
args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i),
|
| 304 |
+
callback=lambda arg: pbar.update(1))
|
| 305 |
+
pool.close()
|
| 306 |
+
pool.join()
|
| 307 |
+
|
| 308 |
+
# combine sub videos
|
| 309 |
+
# prepare vidlist.txt
|
| 310 |
+
with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f:
|
| 311 |
+
for i in range(num_process):
|
| 312 |
+
f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n')
|
| 313 |
+
|
| 314 |
+
cmd = [
|
| 315 |
+
args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c',
|
| 316 |
+
'copy', f'{video_save_path}'
|
| 317 |
+
]
|
| 318 |
+
print(' '.join(cmd))
|
| 319 |
+
subprocess.call(cmd)
|
| 320 |
+
shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos'))
|
| 321 |
+
if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')):
|
| 322 |
+
shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'))
|
| 323 |
+
os.remove(f'{args.output}/{args.video_name}_vidlist.txt')
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def main():
|
| 327 |
+
"""Inference demo for Real-ESRGAN.
|
| 328 |
+
It mainly for restoring anime videos.
|
| 329 |
+
|
| 330 |
+
"""
|
| 331 |
+
parser = argparse.ArgumentParser()
|
| 332 |
+
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input video, image or folder')
|
| 333 |
+
parser.add_argument(
|
| 334 |
+
'-n',
|
| 335 |
+
'--model_name',
|
| 336 |
+
type=str,
|
| 337 |
+
default='realesr-animevideov3',
|
| 338 |
+
help=('Model names: realesr-animevideov3 | RealESRGAN_x4plus_anime_6B | RealESRGAN_x4plus | RealESRNet_x4plus |'
|
| 339 |
+
' RealESRGAN_x2plus | realesr-general-x4v3'
|
| 340 |
+
'Default:realesr-animevideov3'))
|
| 341 |
+
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
| 342 |
+
parser.add_argument(
|
| 343 |
+
'-dn',
|
| 344 |
+
'--denoise_strength',
|
| 345 |
+
type=float,
|
| 346 |
+
default=0.5,
|
| 347 |
+
help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '
|
| 348 |
+
'Only used for the realesr-general-x4v3 model'))
|
| 349 |
+
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
| 350 |
+
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
|
| 351 |
+
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
| 352 |
+
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
| 353 |
+
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
| 354 |
+
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
|
| 355 |
+
parser.add_argument(
|
| 356 |
+
'--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).')
|
| 357 |
+
parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
|
| 358 |
+
parser.add_argument('--ffmpeg_bin', type=str, default='ffmpeg', help='The path to ffmpeg')
|
| 359 |
+
parser.add_argument('--extract_frame_first', action='store_true')
|
| 360 |
+
parser.add_argument('--num_process_per_gpu', type=int, default=1)
|
| 361 |
+
|
| 362 |
+
parser.add_argument(
|
| 363 |
+
'--alpha_upsampler',
|
| 364 |
+
type=str,
|
| 365 |
+
default='realesrgan',
|
| 366 |
+
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
| 367 |
+
parser.add_argument(
|
| 368 |
+
'--ext',
|
| 369 |
+
type=str,
|
| 370 |
+
default='auto',
|
| 371 |
+
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
| 372 |
+
args = parser.parse_args()
|
| 373 |
+
|
| 374 |
+
args.input = args.input.rstrip('/').rstrip('\\')
|
| 375 |
+
os.makedirs(args.output, exist_ok=True)
|
| 376 |
+
|
| 377 |
+
if mimetypes.guess_type(args.input)[0] is not None and mimetypes.guess_type(args.input)[0].startswith('video'):
|
| 378 |
+
is_video = True
|
| 379 |
+
else:
|
| 380 |
+
is_video = False
|
| 381 |
+
|
| 382 |
+
if is_video and args.input.endswith('.flv'):
|
| 383 |
+
mp4_path = args.input.replace('.flv', '.mp4')
|
| 384 |
+
os.system(f'ffmpeg -i {args.input} -codec copy {mp4_path}')
|
| 385 |
+
args.input = mp4_path
|
| 386 |
+
|
| 387 |
+
if args.extract_frame_first and not is_video:
|
| 388 |
+
args.extract_frame_first = False
|
| 389 |
+
|
| 390 |
+
run(args)
|
| 391 |
+
|
| 392 |
+
if args.extract_frame_first:
|
| 393 |
+
tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')
|
| 394 |
+
shutil.rmtree(tmp_frames_folder)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
if __name__ == '__main__':
|
| 398 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
basicsr==1.4.2
|
| 2 |
+
facexlib>=0.2.5
|
| 3 |
+
gfpgan>=1.3.5
|
| 4 |
+
numpy
|
| 5 |
+
opencv-python
|
| 6 |
+
Pillow
|
| 7 |
+
torch>=1.7
|
| 8 |
+
torchvision
|
| 9 |
+
tqdm
|
setup.cfg
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[flake8]
|
| 2 |
+
ignore =
|
| 3 |
+
# line break before binary operator (W503)
|
| 4 |
+
W503,
|
| 5 |
+
# line break after binary operator (W504)
|
| 6 |
+
W504,
|
| 7 |
+
max-line-length=120
|
| 8 |
+
|
| 9 |
+
[yapf]
|
| 10 |
+
based_on_style = pep8
|
| 11 |
+
column_limit = 120
|
| 12 |
+
blank_line_before_nested_class_or_def = true
|
| 13 |
+
split_before_expression_after_opening_paren = true
|
| 14 |
+
|
| 15 |
+
[isort]
|
| 16 |
+
line_length = 120
|
| 17 |
+
multi_line_output = 0
|
| 18 |
+
known_standard_library = pkg_resources,setuptools
|
| 19 |
+
known_first_party = realesrgan
|
| 20 |
+
known_third_party = PIL,basicsr,cv2,numpy,pytest,torch,torchvision,tqdm,yaml
|
| 21 |
+
no_lines_before = STDLIB,LOCALFOLDER
|
| 22 |
+
default_section = THIRDPARTY
|
| 23 |
+
|
| 24 |
+
[codespell]
|
| 25 |
+
skip = .git,./docs/build
|
| 26 |
+
count =
|
| 27 |
+
quiet-level = 3
|
| 28 |
+
|
| 29 |
+
[aliases]
|
| 30 |
+
test=pytest
|
| 31 |
+
|
| 32 |
+
[tool:pytest]
|
| 33 |
+
addopts=tests/
|
setup.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
from setuptools import find_packages, setup
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import subprocess
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
version_file = 'realesrgan/version.py'
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def readme():
|
| 13 |
+
with open('README.md', encoding='utf-8') as f:
|
| 14 |
+
content = f.read()
|
| 15 |
+
return content
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_git_hash():
|
| 19 |
+
|
| 20 |
+
def _minimal_ext_cmd(cmd):
|
| 21 |
+
# construct minimal environment
|
| 22 |
+
env = {}
|
| 23 |
+
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
|
| 24 |
+
v = os.environ.get(k)
|
| 25 |
+
if v is not None:
|
| 26 |
+
env[k] = v
|
| 27 |
+
# LANGUAGE is used on win32
|
| 28 |
+
env['LANGUAGE'] = 'C'
|
| 29 |
+
env['LANG'] = 'C'
|
| 30 |
+
env['LC_ALL'] = 'C'
|
| 31 |
+
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
| 32 |
+
return out
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
|
| 36 |
+
sha = out.strip().decode('ascii')
|
| 37 |
+
except OSError:
|
| 38 |
+
sha = 'unknown'
|
| 39 |
+
|
| 40 |
+
return sha
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_hash():
|
| 44 |
+
if os.path.exists('.git'):
|
| 45 |
+
sha = get_git_hash()[:7]
|
| 46 |
+
else:
|
| 47 |
+
sha = 'unknown'
|
| 48 |
+
|
| 49 |
+
return sha
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def write_version_py():
|
| 53 |
+
content = """# GENERATED VERSION FILE
|
| 54 |
+
# TIME: {}
|
| 55 |
+
__version__ = '{}'
|
| 56 |
+
__gitsha__ = '{}'
|
| 57 |
+
version_info = ({})
|
| 58 |
+
"""
|
| 59 |
+
sha = get_hash()
|
| 60 |
+
with open('VERSION', 'r') as f:
|
| 61 |
+
SHORT_VERSION = f.read().strip()
|
| 62 |
+
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
|
| 63 |
+
|
| 64 |
+
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
|
| 65 |
+
with open(version_file, 'w') as f:
|
| 66 |
+
f.write(version_file_str)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_version():
|
| 70 |
+
with open(version_file, 'r') as f:
|
| 71 |
+
exec(compile(f.read(), version_file, 'exec'))
|
| 72 |
+
return locals()['__version__']
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def get_requirements(filename='requirements.txt'):
|
| 76 |
+
here = os.path.dirname(os.path.realpath(__file__))
|
| 77 |
+
with open(os.path.join(here, filename), 'r') as f:
|
| 78 |
+
requires = [line.replace('\n', '') for line in f.readlines()]
|
| 79 |
+
return requires
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
if __name__ == '__main__':
|
| 83 |
+
write_version_py()
|
| 84 |
+
setup(
|
| 85 |
+
name='realesrgan',
|
| 86 |
+
version=get_version(),
|
| 87 |
+
description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
|
| 88 |
+
long_description=readme(),
|
| 89 |
+
long_description_content_type='text/markdown',
|
| 90 |
+
author='Xintao Wang',
|
| 91 |
+
author_email='[email protected]',
|
| 92 |
+
keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
|
| 93 |
+
url='https://github.com/xinntao/Real-ESRGAN',
|
| 94 |
+
include_package_data=True,
|
| 95 |
+
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
|
| 96 |
+
classifiers=[
|
| 97 |
+
'Development Status :: 4 - Beta',
|
| 98 |
+
'License :: OSI Approved :: Apache Software License',
|
| 99 |
+
'Operating System :: OS Independent',
|
| 100 |
+
'Programming Language :: Python :: 3',
|
| 101 |
+
'Programming Language :: Python :: 3.7',
|
| 102 |
+
'Programming Language :: Python :: 3.8',
|
| 103 |
+
],
|
| 104 |
+
license='BSD-3-Clause License',
|
| 105 |
+
setup_requires=['cython', 'numpy'],
|
| 106 |
+
install_requires=get_requirements(),
|
| 107 |
+
zip_safe=False)
|