emmm…快开学了,手头只有红底证件照,但是学院要求要蓝底,这可咋办呢。懒得下ps了。自己撸起来吧。
lableme标注完后。得到一个json文件,然后将这种json文件转成掩码图.
# 代码来自 https://blog.csdn.net/hello_dear_you/article/details/120130155
import json
import numpy as np
import cv2
# read json file
with open("origin_json/mypic.json", "r") as f:
data = f.read()
# convert str to json objs
data = json.loads(data)
# get the points
points = data["shapes"][0]["points"]
points = np.array(points, dtype=np.int32) # tips: points location must be int32
# read image to get shape
image = cv2.imread("origin_png/person.jpg")
# create a blank image
mask = np.zeros_like(image, dtype=np.uint8)
# fill the contour with 255
cv2.fillPoly(mask, [points], (255, 255, 255))
# save the mask
cv2.imwrite("mask/person_mask.png", mask)
大概是这样:
然后利用这个mask生成图片
# 参考自: https://www.jianshu.com/p/1961aa0c02ee
import cv2
import numpy as np
origin_png = 'origin_png/person.jpg'
# maskPath = 'mask/person_mask.png'
maskPath = 'mask/bmv2.png'
result_png = 'result_png/result_png.png'
maskImg = cv2.imread(maskPath)
img = cv2.imread(origin_png)
assert maskImg.shape == img.shape, 'maskImg.shape != origin_png.shape'
h, w = img.shape[0], img.shape[1]
print('图片宽度: {}, 高度: {}'.format(h, w))
rgb = (19,122,171)
bgr = (rgb[2], rgb[1], rgb[0])
# (B, G, R)
for i in range(h):
for j in range(w):
if (maskImg[i, j] == 0).all():
img[i, j] = bgr
cv2.imwrite(result_png, img)
print('图片写入 {} 成功'.format(result_png))
由于人长得一般,就不放图了…
缺点:
lableme标注时挺费力,并且难以避免人与背景边缘会有残留红色像素的情况。
该方法通过比较像素的RGB与背景的RGB来区分是否为图像背景。
import cv2
import numpy as np
def mean_square_loss(a_np, b_np):
sl = np.square(a_np - b_np)
return np.mean(sl)
def change_red2blue(origin_png, result_png):
img = cv2.imread(origin_png)
h, w = img.shape[0], img.shape[1]
print('图片宽度: {}, 高度: {}'.format(h, w))
origin_rgb = (168,36,32) # 可以用浏览器啥的控制台工具提取出背景的rgb值
origin_bgr = (origin_rgb[2], origin_rgb[1], origin_rgb[0])
target_rgb = (19,122,171) # 蓝底RBG
target_bgr = (target_rgb[2], target_rgb[1], target_rgb[0])
for i in range(h):
for j in range(w):
# (B, G, R)
if mean_square_loss(img[i, j], origin_bgr) < 50:
img[i, j] = target_bgr
cv2.imwrite(result_png, img)
print('图片写入 {} 成功'.format(result_png))
if __name__ == '__main__':
# origin_png = 'result_png/result_png.png'
origin_png = 'origin_png/person.jpg'
result_png = 'result_png/result_refine.png'
change_red2blue(origin_png, result_png)
结果人与背景边缘仍会存在红色像素残留
from torchvision.transforms.functional import to_tensor, to_pil_image
from PIL import Image
import torch
import time
def mean_square_loss(a_ts, b_ts):
# print(a_ts.shape)
# print(b_ts)
sl = (a_ts - b_ts) ** 2
return sl.sum()
def change_red2blue(origin_png, result_png):
src = Image.open(origin_png)
src = to_tensor(src)
# print(src.shape) # torch.Size([3, 800, 600])
# channel: (R, G, B) / 255
h, w = src.shape[1], src.shape[2]
pha = torch.ones(h, w, 3)
bg = torch.tensor([168,36,32]) / 255
target_bg = torch.tensor([19,122,171]) / 255
# C, H, W -> H, W, C
src = src.permute(1, 2, 0)
for i in range(h):
for j in range(w):
if mean_square_loss(src[i][j], bg) < 0.025: # 0.025是阈值,超参数
pha[i][j] = torch.tensor([0.0, 0.0, 0.0])
# H, W, C -> C, H, W
src = src.permute(2, 0, 1)
pha = pha.permute(2, 0, 1)
com = pha * src + (1 - pha) * target_bg.view(3, 1, 1)
to_pil_image(com).save(result_png)
if __name__ == '__main__':
origin_png = 'origin_png/person.jpg'
result_png = 'result_png/com.png'
start_time = time.time()
change_red2blue(origin_png, result_png)
spend_time = round(time.time() - start_time, 2)
print('生成成功,共花了 {} 秒'.format(spend_time))
该方法质量较好,但一张图片大概需要12秒。
Real-Time High-Resolution Background Matting
CVPR 2021 oral
论文:https://arxiv.org/abs/2012.07810
代码:https://github.com/PeterL1n/BackgroundMattingV2
github的readme.md有inference的colab链接,可以用那个跑
由于这篇论文是需要输入一张图片(例如有人存在的草地上)和背景图片的(如果草地啥的), 然后模型会把人抠出来。
于是这里我需要生成一个背景图片。
首先我先借助firefox的颜色拾取器,得到十六进制,再用在线转换工具转成rgb。
然后生成一个背景图片。
import cv2
import numpy as np
image = cv2.imread("origin_png/person.jpg")
origin_rgb = (168,36,32) # 可以用浏览器啥的控制台工具提取出背景的rgb值
origin_bgr = (origin_rgb[2], origin_rgb[1], origin_rgb[0])
image[:, :] = origin_bgr
cv2.imwrite("mask/bg.png", image)
然后输入给模型,然后换一下RGB。
然后就输出图片了。可以看到效果相当好。不愧是oral。
原论文可以实现发丝级效果