See the Pen yLOQNKZ by Cartelet Cydius (@cartelet-cydius) on CodePen.
Mr. Uzura Info himself There was a tweet on Twitter asking him to refrain from publishing the anime list in the format of Mr. Uzura Info, so except for one sample from the beginning of this article, the anime list to be released afterwards Is the original? I would like to make it of the design of (although I have received it). If you want a quail info style image, please run the program posted.
You can generate it on Colab from here. [Example animation list automatic generation script](https://colab.research.google.com/github/Cartelet0423/animeListGen/blob/master/%E4%BE%8B%E3%81%AE%E3%82%A2 % E3% 83% 8B% E3% 83% A1% E3% 83% AA% E3% 82% B9% E3% 83% 88% E8% 87% AA% E5% 8B% 95% E7% 94% 9F% E6 % 88% 90% E3% 82% B9% E3% 82% AF% E3% 83% AA% E3% 83% 97% E3% 83% 88.ipynb)
Do you know Uzura Info? If you like anime, you may have been taken care of once without knowing the name. From around 2011 until now, he has been making anime lists like $ ↓ $ every cool.
Mr. Uzura Info seems to finish the production of the animation list in this term (2020 summer animation). → Notice of discontinuation of list production Thank you for what you have done.
However, there is a large list that can be seen at a glance, and many people, including myself, feel that it is necessary.
** I wrote a program that creates images like this without permission. **
Since all I have to do is run the program, I would like to update it every period unless I forget it or the source animateTimes finishes updating the list. (Updates are at the top of the page).
Explanatory image on the upper left Everything else is manufactured within the program. (This image can also be created in the program, but it is troublesome to adjust the position of the characters, so I made it with Photoshop.)
・ Generate base image material
· Get information from ** animateTimes **
・ Format information into dictionary type
・ Separate sentences that are too long on one line, and insert line breaks so that they do not break at strange positions.
-Transform the base image to fit the character size-> insert characters-> transform it into the desired shape-> place it in the desired position of another base image
・ Get an image from an address
・ Face recognition with OpenCV (requires lbpcascade_animeface.xml
)
Reference: Anime face detection with OpenCV
-Cut out the image so that the average of the recognized face positions is (close to) the center.
・ Place in the desired position of the base image
・ While doing the above work for all animations, arrange them in a grid of $ 6 \ times N $
-Reproduce the shadow in the extra space
・ Export
It may be difficult to read because it is just a function written in Jupyter 9/23 Improved character insertion
from requests import get
import re
from bs4 import BeautifulSoup
from math import ceil
from janome.tokenizer import Tokenizer
from PIL import Image, ImageFont, ImageDraw, ImageFilter
import matplotlib.pyplot as plt
from io import BytesIO
import unicodedata
import numpy as np
import cv2
classifier = cv2.CascadeClassifier('lbpcascade_animeface.xml')
t = Tokenizer()
template = Image.new('RGB', (158, 332), (71, 71, 71))
part = Image.fromarray(np.r_[[[np.linspace(130.5, 84.5, 256)] * 256] *
3].T.astype(np.uint8))
aimsize = {
"title": (157, 39),
"Production contractor": (70, 39),
"staff": (86, 99),
"cast": (70, 112),
"Broadcast schedule": (86, 52),
"Original": (157, 37),
}
aimpoint = {
"title": (0, 103),
"Production contractor": (0, 142),
"staff": (71, 195),
"cast": (0, 182),
"Broadcast schedule": (71, 142),
"Original": (0, 295),
}
def get_data(url):
global Title
html = get(url).text
soup = BeautifulSoup(html, 'html.parser')
for i in soup.select("br"):
i.replace_with("\n")
Title = re.sub("(\d+)(.+)", "\\1\n\\2",
soup.title.text.replace("|", "|").split("|")[0])
#"
li = []
headingh2 = soup.find_all('h2', class_='c-heading-h2')
if headingh2[0].get("id") != "1":
headingh2.pop(0)
for i, j in zip(headingh2, soup.find_all('table')):
a = [k.text for k in j.select("th")]
a.append(a[0])
a[0] = i.text
aa = []
for e in i.next_elements:
if e.name == "img":
aa.append(e["src"])
break
for k in a:
if k:
if k[0] == "\n":
k = k[1:]
aa.append(k)
li.append(aa)
data = {}
for i in li:
d = {"img": "", "Original": "", "cast": "", "Production contractor": "", "Broadcast schedule": ""}
data[i[1]] = d
d["img"] = i[0]
d["Broadcast schedule"] = i[-1]
d["cast"] = "\n".join(re.findall(".+:(.+)", i[2].replace(":", ":")))
staff = []
for j in i[3].splitlines():
j = j.replace(":", ":")
if len(j.split(':')) < 2: continue
if "Original" in j:
d["Original"] = " ".join(j.split(':')[1:])
elif "Production" in j:
d["Production contractor"] = j.split(':')[1]
else:
staff.append("\n".join(j.split(':')))
d["staff"] = "\n".join(staff)
for j in soup.find(text=f"『{i[1]}] Latest article / related video list").previous_elements:
if j.name == "a" and "site" in j.text:
data[i[1]]['href'] = j["href"]
break
return data
def len_(text):
count = 0
for c in text:
if unicodedata.east_asian_width(c) in 'FWA':
count += 2
else:
count += 1
return count
def nn(text, w):
tt = ""
l = 0
for j in t.tokenize(text, wakati=True):
if l + len(j) > w:
tt += "\n"
l = 0
elif j == "\n":
l = 0
tt += j
l += len(j)
return tt.replace("\n\n", "\n")
def mojiire(text, font_path, tmp, aimsize, aimpoint, case, hopt):
text = text.replace("\n ", "\n")
if case == 1:
tm = Image.new('RGB', (256, 256), (66, 58, 59))
if text:
text = nn(text, 14)
elif case == 2:
tm = part.copy()
if text:
text = nn("\n".join(text.splitlines()[-3:]), 10) + "\n "
elif case == 3:
tm = part.copy()
if text:
text = nn(text, 20)
while len_(text) < 20:
text += " "
else:
tm = part.copy()
if text:
text = nn("\n".join(text.splitlines()[:8]), 10)
if text:
while len(text.splitlines()) < hopt:
text += "\n "
font = ImageFont.truetype(font_path, 100)
draw = ImageDraw.Draw(tm)
x, y = draw.textsize(text, font=font, spacing=1)
tm = tm.resize((x + 30, y + 30))
draw = ImageDraw.Draw(tm)
draw.text((15, 15), text, font=font, spacing=1)
tm = tm.resize(aimsize)
if case == 2:
draw = ImageDraw.Draw(tm)
draw.line((0, 39, aimsize[0], 39), fill=(179, 179, 179), width=1)
else:
tm = tm.resize(aimsize)
tmp.paste(tm, aimpoint)
def main(url, font_title, font_main):
data = get_data(url)
titles = list(data.keys())
inList = True
for x in range(ceil((len(titles) + 1) / 6)):
for y in range(6):
i = x * 6 + y - 1
tmp = template.copy()
if i == -1:
tmp = Image.open("Path of the explanation image on the upper left").convert("RGB")
font = ImageFont.truetype(font_main, 20)
draw = ImageDraw.Draw(tmp)
draw.text((2, 2), Title, (96, 167, 200), font=font, spacing=1)
tmp = np.array(tmp)
elif i < len(titles):
for kw in aimsize.keys():
if kw == "title":
case = 1
hopt = 1
elif kw == "Broadcast schedule":
case = 2
hopt = 3
elif kw == "Original":
case = 3
hopt = 2
else:
case = 0
if kw == "Production contractor":
hopt = 2
else:
hopt = 6
mojiire(titles[i] if kw == "title" else data[titles[i]][kw],
font_title if kw == "title" else font_main, tmp,
aimsize[kw], aimpoint[kw], case, hopt)
try:
img = Image.open(
BytesIO(get(
data[titles[i]]["img"]).content)).convert("RGB")
gray_image = cv2.cvtColor(np.array(img),
cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray_image)
h, w = img.height, img.width
if len(faces):
x_, y_ = (
np.r_[[faces[:, 3]**2 /
(faces[:, 3]**2).sum()]].T *
(faces[:, :2] + faces[:, 2:] * .5)).sum(axis=0,
dtype=int)
else:
x_, y_ = 0.5 * w, 0.45 * h
if w > 1.5 * h:
cropped_image = img.crop(
(max(0, int(x_ - .75 * h)) -
max(0,
int(x_ + .75 * h) - w), 0,
min(w, int(x_ + .75 * h)) +
max(0, -int(x_ - .75 * h)), h))
else:
cropped_image = img.crop(
(0, max(0, int(y_ - (1 / 3) * w)) -
max(0,
int(y_ + (1 / 3) * w) - h), w,
min(h, int(y_ + (1 / 3) * w)) +
max(0, -int(y_ - (1 / 3) * w))))
tmp = np.array(tmp)
tmp[:103, :-1] = np.array(cropped_image.resize((157, 103)))
except Exception as e:
print(e)
elif inList:
foundation = np.array(template)
foundation[20:, :10] = 0
foundation[:2] = 0
tmp = np.array(
Image.fromarray(foundation).filter(
ImageFilter.GaussianBlur(10.0)))
inList = False
else:
foundation = np.array(template)
foundation[:2] = 0
tmp = np.array(
Image.fromarray(foundation).filter(
ImageFilter.GaussianBlur(10.0)))
try:
line = np.r_["1", line, tmp]
except:
line = tmp.copy()
try:
image = np.r_["0", image, line]
except:
image = line.copy()
del line
plt.imsave(f"{''.join(Title.splitlines())}.png ", image)
if __name__ == "__main__":
url = "https://www.animatetimes.com/tag/details.php?id=5947" #URL of animateTimes
font_title = "C:\\Windows\\Fonts\\YuGothB.ttc" #Font path of the work name part
font_main = "C:\\Windows\\Fonts\\YuGothM.ttc" #Paths for other fonts
main(url, font_title, font_main)
・ Weakness to changes in the structure of the information source site ・ If it is not recognized as a face, or if the composition has scattered faces, the cutting may be miserable.
The details are far from handmade, but I think that the necessary information as a minimum list has been suppressed.
Recommended Posts