177 lines
5.9 KiB
Python
177 lines
5.9 KiB
Python
from PIL import Image
|
|
import numpy as np
|
|
import cv2
|
|
import zlib
|
|
import yaml
|
|
from io import BytesIO
|
|
from base64 import b64encode
|
|
|
|
def get_handle(text):
|
|
if not isinstance(text, str):
|
|
text = " ".join(text)
|
|
if text[0].lower()!=text[0].upper():
|
|
delimit = " "
|
|
else:
|
|
delimit = text[0]
|
|
text = text[1:]
|
|
d = {"tags": []}
|
|
for tag in text.split(delimit):
|
|
if ":" in tag:
|
|
key, value = tag.split(":",1)
|
|
d[key] = value
|
|
else:
|
|
d["tags"].append(tag)
|
|
d["tags"] = " ".join(d["tags"])
|
|
|
|
if "name" in list(d.keys()) and "author" in list(d.keys()):
|
|
base = f"{d['name']} by {d['author']}"
|
|
elif "name" in list(d.keys()):
|
|
base = f"{d['name']}"
|
|
else:
|
|
base = "CEI file"
|
|
additional = ", tags: "+d["tags"] if d["tags"] else ""
|
|
return base+additional
|
|
|
|
def string_to_bytes(sobj):
|
|
bobj = bytes([])
|
|
for n in range(0,len(sobj),2):
|
|
bobj+=bytes([int(sobj[n:n+2],16)])
|
|
return bobj
|
|
|
|
def np_LSS_procedural(LSS, obj = []):
|
|
L, S_1, S_2 = cv2.split(LSS.astype(np.float32))
|
|
vnp_lss = np.vectorize(np_lss)
|
|
parts = 16
|
|
|
|
obj[:] = [None for i in range(parts)]
|
|
|
|
length = LSS.shape[0]
|
|
L_list = [L[int(length*i/parts):int(length*(i+1)/parts),:] for i in range(parts)]
|
|
S_1_list = [S_1[int(length*i/parts):int(length*(i+1)/parts),:] for i in range(parts)]
|
|
S_2_list = [S_2[int(length*i/parts):int(length*(i+1)/parts),:] for i in range(parts)]
|
|
|
|
for n in range(parts):
|
|
l, s1, s2 = [i[n] for i in [L_list,S_1_list,S_2_list]]
|
|
merged = cv2.merge(vnp_lss(l,s1,s2))
|
|
obj[n] = Image.fromarray(merged.astype(np.uint8))
|
|
img_io = BytesIO()
|
|
obj[n].save(img_io, "PNG")
|
|
img_io.seek(0)
|
|
dataurl = 'data:image/png;base64,' + b64encode(img_io.getvalue()).decode('ascii')
|
|
document.getElementById(f"cei-image-{n}").src = dataurl
|
|
|
|
def from_ibyte(ibyte, size):
|
|
ib = bin(ibyte).split("b")[-1]
|
|
while len(ib)<8:
|
|
ib = "0"+ib
|
|
volume = (int(ib[:3], base = 2)+1)*(int(ib[-4:-1], base = 2)+1)
|
|
x = 16 if not int(ib[3]) else size[0]%16
|
|
y = 16 if not int(ib[-1]) else size[1]%16
|
|
return (x,y), ((int(ib[:3], base = 2)+1),(int(ib[-4:-1], base = 2)+1)), volume
|
|
|
|
def cei_dict(bd):
|
|
l = sepget(bd)
|
|
d = dict()
|
|
for n in range(0, len(l), 2):
|
|
match str(l[n], "utf-8"):
|
|
case "version":
|
|
value = int.from_bytes(l[n+1], "big")
|
|
case "size" | "csize":
|
|
value = tuple([int.from_bytes(i, "big") for i in sepget(l[n+1])])
|
|
case "luminocity" | "chromaticity" | "palette":
|
|
value = zlib.decompress(l[n+1])
|
|
case "tags":
|
|
value = str(l[n+1], "utf-8")#tuple([str(i, "utf-8") for i in sepget(l[n+1])])
|
|
|
|
d[str(l[n],"utf-8")] = value
|
|
return d
|
|
|
|
def sepget(sequence):
|
|
items = []
|
|
while len(sequence):
|
|
prelen = sequence[-1]
|
|
sequence = sequence[:-1]
|
|
length = int.from_bytes(sequence[-prelen:], "big")
|
|
sequence = sequence[:-len(sequence[-prelen:])]
|
|
items = [sequence[-length:]] + items
|
|
sequence = sequence[:-length]
|
|
|
|
return items
|
|
|
|
def open_cei(fname, preview = False):
|
|
with open(fname, "rb") as f:
|
|
d = cei_dict(f.read())
|
|
luma = d["luminocity"]
|
|
focus = 0
|
|
sectors = []
|
|
luma_bg = np.ndarray((d["size"][1],d["size"][0]), dtype = np.uint8)
|
|
while focus<len(luma):
|
|
sec_size, red_size, volume = from_ibyte(luma[focus], d["size"])
|
|
bytes_ = luma[focus+1:focus+volume+1]
|
|
array = np.array([int(i) for i in bytes_], dtype = np.uint8).reshape((red_size[1], red_size[0]))
|
|
sectors.append(cv2.resize(array, sec_size, interpolation = cv2.INTER_LINEAR))
|
|
focus+=volume+1
|
|
for y in range(0, d["size"][1], 16):
|
|
for x in range(0, d["size"][0], 16):
|
|
sector = sectors.pop(0)
|
|
h, w = sector.shape
|
|
luma_bg[y:y+h,x:x+w] = sector
|
|
if preview:
|
|
return Image.fromarray(luma_bg), d["tags"]
|
|
pal_ind = 0
|
|
palette = dict()
|
|
for n in range(0, len(d["palette"]), 2):
|
|
palette[pal_ind] = tuple(d["palette"][n:n+2])
|
|
pal_ind+=1
|
|
|
|
colors = np.array([0,palette[0][0],palette[0][1]], dtype = np.uint8)#[]
|
|
csize = d["csize"]
|
|
for n, index in enumerate(d["chromaticity"]):
|
|
if n:
|
|
colors = np.append(colors, np.array([0,palette[index][0],palette[index][1]], dtype = np.uint8), axis = 0)
|
|
|
|
lss = colors.reshape(csize[1],csize[0],3)
|
|
lss = cv2.resize(lss, d["size"], interpolation = cv2.INTER_LINEAR)
|
|
lss[:,:,0] = luma_bg
|
|
return np_LSS(lss), d["tags"]
|
|
|
|
def np_lss(L,S_1,S_2):
|
|
try:
|
|
s1 = S_1/(255-S_1)
|
|
except ZeroDivisionError:
|
|
s1 = S_1
|
|
try:
|
|
s2 = (255-S_2)/S_2
|
|
except ZeroDivisionError:
|
|
s2 = 255
|
|
G = 3*L / ( s1 + s2 + 1 )#source[n*3]
|
|
try:
|
|
R = (S_1*G)/(255-S_1)
|
|
except ZeroDivisionError:
|
|
R = (S_1*G)/1
|
|
try:
|
|
B = G*(255-S_2)/S_2
|
|
except ZeroDivisionError:
|
|
B = G*(255-S_2)/1
|
|
maximal = max([R,G,B])
|
|
if maximal > 255:
|
|
multiplier = 255/maximal
|
|
R,G,B = R*multiplier,G*multiplier,B*multiplier
|
|
R,G,B = round(R),round(G),round(B)
|
|
return R,G,B
|
|
|
|
def np_LSS(LSS):#convert lss back to rgb
|
|
L, S_1, S_2 = cv2.split(LSS.astype(np.float32))
|
|
vnp_lss = np.vectorize(np_lss)
|
|
merged = cv2.merge(vnp_lss(L,S_1,S_2))
|
|
return Image.fromarray(merged.astype(np.uint8))
|
|
|
|
if __name__=="__main__":
|
|
pil_im, text = open_cei(string_to_bytes(ceibytes))
|
|
img_io = BytesIO()
|
|
pil_im.save(img_io, "PNG")
|
|
img_io.seek(0)
|
|
dataurl = 'data:image/png;base64,' + b64encode(img_io.getvalue()).decode('ascii')
|
|
document.getElementById("cei-image").src = dataurl
|
|
document.getElementById("title").innerHTML = get_handle(text)
|