70af4845af
new file: inpaint/__main__.py new file: inpaint/api.py new file: inpaint/batch_processing.py new file: inpaint/benchmark.py new file: inpaint/cli.py new file: inpaint/const.py new file: inpaint/download.py new file: inpaint/file_manager/__init__.py new file: inpaint/file_manager/file_manager.py new file: inpaint/file_manager/storage_backends.py new file: inpaint/file_manager/utils.py new file: inpaint/helper.py new file: inpaint/installer.py new file: inpaint/model/__init__.py new file: inpaint/model/anytext/__init__.py new file: inpaint/model/anytext/anytext_model.py new file: inpaint/model/anytext/anytext_pipeline.py new file: inpaint/model/anytext/anytext_sd15.yaml new file: inpaint/model/anytext/cldm/__init__.py new file: inpaint/model/anytext/cldm/cldm.py new file: inpaint/model/anytext/cldm/ddim_hacked.py new file: inpaint/model/anytext/cldm/embedding_manager.py new file: inpaint/model/anytext/cldm/hack.py new file: inpaint/model/anytext/cldm/model.py new file: inpaint/model/anytext/cldm/recognizer.py new file: inpaint/model/anytext/ldm/__init__.py new file: inpaint/model/anytext/ldm/models/__init__.py new file: inpaint/model/anytext/ldm/models/autoencoder.py new file: inpaint/model/anytext/ldm/models/diffusion/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/ddim.py new file: inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file: inpaint/model/anytext/ldm/models/diffusion/plms.py new file: inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file: inpaint/model/anytext/ldm/modules/__init__.py new file: inpaint/model/anytext/ldm/modules/attention.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file: inpaint/model/anytext/ldm/modules/distributions/__init__.py new file: inpaint/model/anytext/ldm/modules/distributions/distributions.py new file: inpaint/model/anytext/ldm/modules/ema.py new file: inpaint/model/anytext/ldm/modules/encoders/__init__.py new file: inpaint/model/anytext/ldm/modules/encoders/modules.py new file: inpaint/model/anytext/ldm/util.py new file: inpaint/model/anytext/main.py new file: inpaint/model/anytext/ocr_recog/RNN.py new file: inpaint/model/anytext/ocr_recog/RecCTCHead.py new file: inpaint/model/anytext/ocr_recog/RecModel.py new file: inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file: inpaint/model/anytext/ocr_recog/RecSVTR.py new file: inpaint/model/anytext/ocr_recog/__init__.py new file: inpaint/model/anytext/ocr_recog/common.py new file: inpaint/model/anytext/ocr_recog/en_dict.txt new file: inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file: inpaint/model/anytext/utils.py new file: inpaint/model/base.py new file: inpaint/model/brushnet/__init__.py new file: inpaint/model/brushnet/brushnet.py new file: inpaint/model/brushnet/brushnet_unet_forward.py new file: inpaint/model/brushnet/brushnet_wrapper.py new file: inpaint/model/brushnet/pipeline_brushnet.py new file: inpaint/model/brushnet/unet_2d_blocks.py new file: inpaint/model/controlnet.py new file: inpaint/model/ddim_sampler.py new file: inpaint/model/fcf.py new file: inpaint/model/helper/__init__.py new file: inpaint/model/helper/controlnet_preprocess.py new file: inpaint/model/helper/cpu_text_encoder.py new file: inpaint/model/helper/g_diffuser_bot.py new file: inpaint/model/instruct_pix2pix.py new file: inpaint/model/kandinsky.py new file: inpaint/model/lama.py new file: inpaint/model/ldm.py new file: inpaint/model/manga.py new file: inpaint/model/mat.py new file: inpaint/model/mi_gan.py new file: inpaint/model/opencv2.py new file: inpaint/model/original_sd_configs/__init__.py new file: inpaint/model/original_sd_configs/sd_xl_base.yaml new file: inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file: inpaint/model/original_sd_configs/v1-inference.yaml new file: inpaint/model/original_sd_configs/v2-inference-v.yaml new file: inpaint/model/paint_by_example.py new file: inpaint/model/plms_sampler.py new file: inpaint/model/power_paint/__init__.py new file: inpaint/model/power_paint/pipeline_powerpaint.py new file: inpaint/model/power_paint/power_paint.py new file: inpaint/model/power_paint/power_paint_v2.py new file: inpaint/model/power_paint/powerpaint_tokenizer.py
110 lines
3.1 KiB
Python
110 lines
3.1 KiB
Python
#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
import os
|
|
import time
|
|
|
|
import numpy as np
|
|
import nvidia_smi
|
|
import psutil
|
|
import torch
|
|
|
|
from inpaint.model_manager import ModelManager
|
|
from inpaint.schema import InpaintRequest, HDStrategy, SDSampler
|
|
|
|
try:
|
|
torch._C._jit_override_can_fuse_on_cpu(False)
|
|
torch._C._jit_override_can_fuse_on_gpu(False)
|
|
torch._C._jit_set_texpr_fuser_enabled(False)
|
|
torch._C._jit_set_nvfuser_enabled(False)
|
|
except:
|
|
pass
|
|
|
|
NUM_THREADS = str(4)
|
|
|
|
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
|
|
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
|
|
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
|
|
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
|
|
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
|
|
if os.environ.get("CACHE_DIR"):
|
|
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
|
|
|
|
|
|
def run_model(model, size):
|
|
# RGB
|
|
image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8)
|
|
mask = np.random.randint(0, 255, size).astype(np.uint8)
|
|
|
|
config = InpaintRequest(
|
|
ldm_steps=2,
|
|
hd_strategy=HDStrategy.ORIGINAL,
|
|
hd_strategy_crop_margin=128,
|
|
hd_strategy_crop_trigger_size=128,
|
|
hd_strategy_resize_limit=128,
|
|
prompt="a fox is sitting on a bench",
|
|
sd_steps=5,
|
|
sd_sampler=SDSampler.ddim,
|
|
)
|
|
model(image, mask, config)
|
|
|
|
|
|
def benchmark(model, times: int, empty_cache: bool):
|
|
sizes = [(512, 512)]
|
|
|
|
nvidia_smi.nvmlInit()
|
|
device_id = 0
|
|
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(device_id)
|
|
|
|
def format(metrics):
|
|
return f"{np.mean(metrics):.2f} ± {np.std(metrics):.2f}"
|
|
|
|
process = psutil.Process(os.getpid())
|
|
# 每个 size 给出显存和内存占用的指标
|
|
for size in sizes:
|
|
torch.cuda.empty_cache()
|
|
time_metrics = []
|
|
cpu_metrics = []
|
|
memory_metrics = []
|
|
gpu_memory_metrics = []
|
|
for _ in range(times):
|
|
start = time.time()
|
|
run_model(model, size)
|
|
torch.cuda.synchronize()
|
|
|
|
# cpu_metrics.append(process.cpu_percent())
|
|
time_metrics.append((time.time() - start) * 1000)
|
|
memory_metrics.append(process.memory_info().rss / 1024 / 1024)
|
|
gpu_memory_metrics.append(
|
|
nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024
|
|
)
|
|
|
|
print(f"size: {size}".center(80, "-"))
|
|
# print(f"cpu: {format(cpu_metrics)}")
|
|
print(f"latency: {format(time_metrics)}ms")
|
|
print(f"memory: {format(memory_metrics)} MB")
|
|
print(f"gpu memory: {format(gpu_memory_metrics)} MB")
|
|
|
|
nvidia_smi.nvmlShutdown()
|
|
|
|
|
|
def get_args_parser():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--name")
|
|
parser.add_argument("--device", default="cuda", type=str)
|
|
parser.add_argument("--times", default=10, type=int)
|
|
parser.add_argument("--empty-cache", action="store_true")
|
|
return parser.parse_args()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
args = get_args_parser()
|
|
device = torch.device(args.device)
|
|
model = ModelManager(
|
|
name=args.name,
|
|
device=device,
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=True,
|
|
)
|
|
benchmark(model, args.times, args.empty_cache)
|