Artistic Portrait Drawing Generative Adversarial Network (APDrawingGAN)

Source : Ran Yi, Yong-Jin Liu, Yu-Kun Lai, Paul L. Rosin. APDrawingGAN: Generating Artistic Portrait Drawings from Face Photos with Hierarchical GANs. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 10743–10752

Demo ArtLine

import fastai
from fastai.vision import *
from fastai.utils.mem import *
from fastai.vision import open_image, load_learner, image, torch
import numpy as np
import urllib.request
import PIL.Image
from io import BytesIO
import torchvision.transforms as T
from PIL import Image
import requests
class FeatureLoss(nn.Module):
def __init__(self, m_feat, layer_ids, layer_wgts):
super().__init__()
self.m_feat = m_feat
self.loss_features = [self.m_feat[i] for i in layer_ids]
self.hooks = hook_outputs(self.loss_features, detach=False)
self.wgts = layer_wgts
self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))
] + [f'gram_{i}' for i in range(len(layer_ids))]
def make_features(self, x, clone=False):
self.m_feat(x)
return [(o.clone() if clone else o) for o in self.hooks.stored]

def forward(self, input, target):
out_feat = self.make_features(target, clone=True)
in_feat = self.make_features(input)
self.feat_losses = [base_loss(input,target)]
self.feat_losses += [base_loss(f_in, f_out)*w
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.metrics = dict(zip(self.metric_names, self.feat_losses))
return sum(self.feat_losses)

def __del__(self): self.hooks.remove()
def add_margin(pil_img, top, right, bottom, left, color):
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
learn=load_learner('C:/Users/LENOVO/Downloads/ArtLine/ArtLine-main', 'ArtLine_920.pkl')
img = "C:/Users/LENOVO/Downloads/ArtLine/Image/2212111952.png"
open_image(img)
Source : https://assets.pikiran-rakyat.com/crop/3x282:688x907/x/photo/2020/10/10/2212111952.png
p,img_hr,b = learn.predict(open_image(img))
show_image(img_hr, figsize=(9,9), interpolation='nearest');
import fastai
from fastai.vision import *
from fastai.utils.mem import *
from fastai.vision import open_image, load_learner, image, torch
import numpy as np
import urllib.request
import PIL.Image
from io import BytesIO
import torchvision.transforms as T
from PIL import Image
import requests
class FeatureLoss(nn.Module):
def __init__(self, m_feat, layer_ids, layer_wgts):
super().__init__()
self.m_feat = m_feat
self.loss_features = [self.m_feat[i] for i in layer_ids]
self.hooks = hook_outputs(self.loss_features, detach=False)
self.wgts = layer_wgts
self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))
] + [f'gram_{i}' for i in range(len(layer_ids))]

def make_features(self, x, clone=False):
self.m_feat(x)
return [(o.clone() if clone else o) for o in self.hooks.stored]

def forward(self, input, target):
out_feat = self.make_features(target, clone=True)
in_feat = self.make_features(input)
self.feat_losses = [base_loss(input,target)]
self.feat_losses += [base_loss(f_in, f_out)*w
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.metrics = dict(zip(self.metric_names, self.feat_losses))
return sum(self.feat_losses)

def __del__(self): self.hooks.remove()

def add_margin(pil_img, top, right, bottom, left, color):
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
learn=load_learner('C:/Users/LENOVO/Downloads/ArtLine/ArtLine-main', 'ArtLine_920.pkl')
learn_c=load_learner('C:/Users/LENOVO/Downloads/ArtLine/ArtLine-main', 'Toon-Me_820.pkl')
img = "C:/Users/LENOVO/Downloads/ArtLine/Image/Capture1.png"
open_image(img)
p,img_hr,b = learn_c.predict(open_image(img))
show_image(img_hr, figsize=(9,9), interpolation='nearest');
import cv2
import time
cap = cv2.VideoCapture(0)

while True:
ret,frame=cap.read()
if ret == False:
break
cv2.imwrite('Frame0.jpg',frame)
frame = "C:/Users/LENOVO/Downloads/ArtLine/ArtLine-main/Frame0.jpg"
p,img_hr,b = learn.predict(open_image(frame))
show_image(img_hr, figsize=(9,9), interpolation='nearest');
cv2.destroyAllWindows()
time.sleep(1)
break

cap.release()
# Destroy all the windows
p,img_hr,b = learn_c.predict(open_image(img))
show_image(img_hr, figsize=(9,9), interpolation='nearest');

--

--

Love podcasts or audiobooks? Learn on the go with our new app.

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store