Keshabwi66's picture
Update Self-Correction-Human-Parsing/datasets/simple_extractor_dataset.py
f951c01
raw
history blame
1.87 kB
import cv2
import numpy as np
from torch.utils import data
from utils.transforms import get_affine_transform
class SimpleFileDataset(data.Dataset):
def __init__(self, img_path, input_size=[512, 512], transform=None):
self.img_path = img_path # A single file path
self.input_size = input_size
self.transform = transform
self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
self.input_size = np.asarray(input_size)
def __len__(self):
return 1 # Only one image, so the length is 1
def _box2cs(self, box):
x, y, w, h = box[:4]
return self._xywh2cs(x, y, w, h)
def _xywh2cs(self, x, y, w, h):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > self.aspect_ratio * h:
h = w * 1.0 / self.aspect_ratio
elif w < self.aspect_ratio * h:
w = h * self.aspect_ratio
scale = np.array([w, h], dtype=np.float32)
return center, scale
def __getitem__(self, index):
img = cv2.imread(self.img_path, cv2.IMREAD_COLOR)
h, w, _ = img.shape
# Get person center and scale
person_center, s = self._box2cs([0, 0, w - 1, h - 1])
r = 0
trans = get_affine_transform(person_center, s, r, self.input_size)
input_img = cv2.warpAffine(
img,
trans,
(int(self.input_size[1]), int(self.input_size[0])),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
input_img = self.transform(input_img)
meta = {
'name': self.img_path,
'center': person_center,
'height': h,
'width': w,
'scale': s,
'rotation': r
}
return input_img, meta