70af4845af
new file: inpaint/__main__.py new file: inpaint/api.py new file: inpaint/batch_processing.py new file: inpaint/benchmark.py new file: inpaint/cli.py new file: inpaint/const.py new file: inpaint/download.py new file: inpaint/file_manager/__init__.py new file: inpaint/file_manager/file_manager.py new file: inpaint/file_manager/storage_backends.py new file: inpaint/file_manager/utils.py new file: inpaint/helper.py new file: inpaint/installer.py new file: inpaint/model/__init__.py new file: inpaint/model/anytext/__init__.py new file: inpaint/model/anytext/anytext_model.py new file: inpaint/model/anytext/anytext_pipeline.py new file: inpaint/model/anytext/anytext_sd15.yaml new file: inpaint/model/anytext/cldm/__init__.py new file: inpaint/model/anytext/cldm/cldm.py new file: inpaint/model/anytext/cldm/ddim_hacked.py new file: inpaint/model/anytext/cldm/embedding_manager.py new file: inpaint/model/anytext/cldm/hack.py new file: inpaint/model/anytext/cldm/model.py new file: inpaint/model/anytext/cldm/recognizer.py new file: inpaint/model/anytext/ldm/__init__.py new file: inpaint/model/anytext/ldm/models/__init__.py new file: inpaint/model/anytext/ldm/models/autoencoder.py new file: inpaint/model/anytext/ldm/models/diffusion/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/ddim.py new file: inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file: inpaint/model/anytext/ldm/models/diffusion/plms.py new file: inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file: inpaint/model/anytext/ldm/modules/__init__.py new file: inpaint/model/anytext/ldm/modules/attention.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file: inpaint/model/anytext/ldm/modules/distributions/__init__.py new file: inpaint/model/anytext/ldm/modules/distributions/distributions.py new file: inpaint/model/anytext/ldm/modules/ema.py new file: inpaint/model/anytext/ldm/modules/encoders/__init__.py new file: inpaint/model/anytext/ldm/modules/encoders/modules.py new file: inpaint/model/anytext/ldm/util.py new file: inpaint/model/anytext/main.py new file: inpaint/model/anytext/ocr_recog/RNN.py new file: inpaint/model/anytext/ocr_recog/RecCTCHead.py new file: inpaint/model/anytext/ocr_recog/RecModel.py new file: inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file: inpaint/model/anytext/ocr_recog/RecSVTR.py new file: inpaint/model/anytext/ocr_recog/__init__.py new file: inpaint/model/anytext/ocr_recog/common.py new file: inpaint/model/anytext/ocr_recog/en_dict.txt new file: inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file: inpaint/model/anytext/utils.py new file: inpaint/model/base.py new file: inpaint/model/brushnet/__init__.py new file: inpaint/model/brushnet/brushnet.py new file: inpaint/model/brushnet/brushnet_unet_forward.py new file: inpaint/model/brushnet/brushnet_wrapper.py new file: inpaint/model/brushnet/pipeline_brushnet.py new file: inpaint/model/brushnet/unet_2d_blocks.py new file: inpaint/model/controlnet.py new file: inpaint/model/ddim_sampler.py new file: inpaint/model/fcf.py new file: inpaint/model/helper/__init__.py new file: inpaint/model/helper/controlnet_preprocess.py new file: inpaint/model/helper/cpu_text_encoder.py new file: inpaint/model/helper/g_diffuser_bot.py new file: inpaint/model/instruct_pix2pix.py new file: inpaint/model/kandinsky.py new file: inpaint/model/lama.py new file: inpaint/model/ldm.py new file: inpaint/model/manga.py new file: inpaint/model/mat.py new file: inpaint/model/mi_gan.py new file: inpaint/model/opencv2.py new file: inpaint/model/original_sd_configs/__init__.py new file: inpaint/model/original_sd_configs/sd_xl_base.yaml new file: inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file: inpaint/model/original_sd_configs/v1-inference.yaml new file: inpaint/model/original_sd_configs/v2-inference-v.yaml new file: inpaint/model/paint_by_example.py new file: inpaint/model/plms_sampler.py new file: inpaint/model/power_paint/__init__.py new file: inpaint/model/power_paint/pipeline_powerpaint.py new file: inpaint/model/power_paint/power_paint.py new file: inpaint/model/power_paint/power_paint_v2.py new file: inpaint/model/power_paint/powerpaint_tokenizer.py
112 lines
3.6 KiB
Python
112 lines
3.6 KiB
Python
import torch
|
|
import einops
|
|
|
|
import iopaint.model.anytext.ldm.modules.encoders.modules
|
|
import iopaint.model.anytext.ldm.modules.attention
|
|
|
|
from transformers import logging
|
|
from iopaint.model.anytext.ldm.modules.attention import default
|
|
|
|
|
|
def disable_verbosity():
|
|
logging.set_verbosity_error()
|
|
print('logging improved.')
|
|
return
|
|
|
|
|
|
def enable_sliced_attention():
|
|
iopaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward
|
|
print('Enabled sliced_attention.')
|
|
return
|
|
|
|
|
|
def hack_everything(clip_skip=0):
|
|
disable_verbosity()
|
|
iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward
|
|
iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip
|
|
print('Enabled clip hacks.')
|
|
return
|
|
|
|
|
|
# Written by Lvmin
|
|
def _hacked_clip_forward(self, text):
|
|
PAD = self.tokenizer.pad_token_id
|
|
EOS = self.tokenizer.eos_token_id
|
|
BOS = self.tokenizer.bos_token_id
|
|
|
|
def tokenize(t):
|
|
return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"]
|
|
|
|
def transformer_encode(t):
|
|
if self.clip_skip > 1:
|
|
rt = self.transformer(input_ids=t, output_hidden_states=True)
|
|
return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip])
|
|
else:
|
|
return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state
|
|
|
|
def split(x):
|
|
return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3]
|
|
|
|
def pad(x, p, i):
|
|
return x[:i] if len(x) >= i else x + [p] * (i - len(x))
|
|
|
|
raw_tokens_list = tokenize(text)
|
|
tokens_list = []
|
|
|
|
for raw_tokens in raw_tokens_list:
|
|
raw_tokens_123 = split(raw_tokens)
|
|
raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123]
|
|
raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123]
|
|
tokens_list.append(raw_tokens_123)
|
|
|
|
tokens_list = torch.IntTensor(tokens_list).to(self.device)
|
|
|
|
feed = einops.rearrange(tokens_list, 'b f i -> (b f) i')
|
|
y = transformer_encode(feed)
|
|
z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3)
|
|
|
|
return z
|
|
|
|
|
|
# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py
|
|
def _hacked_sliced_attentin_forward(self, x, context=None, mask=None):
|
|
h = self.heads
|
|
|
|
q = self.to_q(x)
|
|
context = default(context, x)
|
|
k = self.to_k(context)
|
|
v = self.to_v(context)
|
|
del context, x
|
|
|
|
q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
|
|
|
limit = k.shape[0]
|
|
att_step = 1
|
|
q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0))
|
|
k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0))
|
|
v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0))
|
|
|
|
q_chunks.reverse()
|
|
k_chunks.reverse()
|
|
v_chunks.reverse()
|
|
sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
|
|
del k, q, v
|
|
for i in range(0, limit, att_step):
|
|
q_buffer = q_chunks.pop()
|
|
k_buffer = k_chunks.pop()
|
|
v_buffer = v_chunks.pop()
|
|
sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale
|
|
|
|
del k_buffer, q_buffer
|
|
# attention, what we cannot get enough of, by chunks
|
|
|
|
sim_buffer = sim_buffer.softmax(dim=-1)
|
|
|
|
sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer)
|
|
del v_buffer
|
|
sim[i:i + att_step, :, :] = sim_buffer
|
|
|
|
del sim_buffer
|
|
sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h)
|
|
return self.to_out(sim)
|