From 70af4845afc617ca5d3ffcd3a55c80cb76d5537e Mon Sep 17 00:00:00 2001 From: root Date: Tue, 20 Aug 2024 21:17:33 +0200 Subject: [PATCH] new file: inpaint/__init__.py new file: inpaint/__main__.py new file: inpaint/api.py new file: inpaint/batch_processing.py new file: inpaint/benchmark.py new file: inpaint/cli.py new file: inpaint/const.py new file: inpaint/download.py new file: inpaint/file_manager/__init__.py new file: inpaint/file_manager/file_manager.py new file: inpaint/file_manager/storage_backends.py new file: inpaint/file_manager/utils.py new file: inpaint/helper.py new file: inpaint/installer.py new file: inpaint/model/__init__.py new file: inpaint/model/anytext/__init__.py new file: inpaint/model/anytext/anytext_model.py new file: inpaint/model/anytext/anytext_pipeline.py new file: inpaint/model/anytext/anytext_sd15.yaml new file: inpaint/model/anytext/cldm/__init__.py new file: inpaint/model/anytext/cldm/cldm.py new file: inpaint/model/anytext/cldm/ddim_hacked.py new file: inpaint/model/anytext/cldm/embedding_manager.py new file: inpaint/model/anytext/cldm/hack.py new file: inpaint/model/anytext/cldm/model.py new file: inpaint/model/anytext/cldm/recognizer.py new file: inpaint/model/anytext/ldm/__init__.py new file: inpaint/model/anytext/ldm/models/__init__.py new file: inpaint/model/anytext/ldm/models/autoencoder.py new file: inpaint/model/anytext/ldm/models/diffusion/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/ddim.py new file: inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file: inpaint/model/anytext/ldm/models/diffusion/plms.py new file: inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file: inpaint/model/anytext/ldm/modules/__init__.py new file: inpaint/model/anytext/ldm/modules/attention.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file: inpaint/model/anytext/ldm/modules/distributions/__init__.py new file: inpaint/model/anytext/ldm/modules/distributions/distributions.py new file: inpaint/model/anytext/ldm/modules/ema.py new file: inpaint/model/anytext/ldm/modules/encoders/__init__.py new file: inpaint/model/anytext/ldm/modules/encoders/modules.py new file: inpaint/model/anytext/ldm/util.py new file: inpaint/model/anytext/main.py new file: inpaint/model/anytext/ocr_recog/RNN.py new file: inpaint/model/anytext/ocr_recog/RecCTCHead.py new file: inpaint/model/anytext/ocr_recog/RecModel.py new file: inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file: inpaint/model/anytext/ocr_recog/RecSVTR.py new file: inpaint/model/anytext/ocr_recog/__init__.py new file: inpaint/model/anytext/ocr_recog/common.py new file: inpaint/model/anytext/ocr_recog/en_dict.txt new file: inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file: inpaint/model/anytext/utils.py new file: inpaint/model/base.py new file: inpaint/model/brushnet/__init__.py new file: inpaint/model/brushnet/brushnet.py new file: inpaint/model/brushnet/brushnet_unet_forward.py new file: inpaint/model/brushnet/brushnet_wrapper.py new file: inpaint/model/brushnet/pipeline_brushnet.py new file: inpaint/model/brushnet/unet_2d_blocks.py new file: inpaint/model/controlnet.py new file: inpaint/model/ddim_sampler.py new file: inpaint/model/fcf.py new file: inpaint/model/helper/__init__.py new file: inpaint/model/helper/controlnet_preprocess.py new file: inpaint/model/helper/cpu_text_encoder.py new file: inpaint/model/helper/g_diffuser_bot.py new file: inpaint/model/instruct_pix2pix.py new file: inpaint/model/kandinsky.py new file: inpaint/model/lama.py new file: inpaint/model/ldm.py new file: inpaint/model/manga.py new file: inpaint/model/mat.py new file: inpaint/model/mi_gan.py new file: inpaint/model/opencv2.py new file: inpaint/model/original_sd_configs/__init__.py new file: inpaint/model/original_sd_configs/sd_xl_base.yaml new file: inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file: inpaint/model/original_sd_configs/v1-inference.yaml new file: inpaint/model/original_sd_configs/v2-inference-v.yaml new file: inpaint/model/paint_by_example.py new file: inpaint/model/plms_sampler.py new file: inpaint/model/power_paint/__init__.py new file: inpaint/model/power_paint/pipeline_powerpaint.py new file: inpaint/model/power_paint/power_paint.py new file: inpaint/model/power_paint/power_paint_v2.py new file: inpaint/model/power_paint/powerpaint_tokenizer.py --- inpaint/__init__.py | 23 + inpaint/__main__.py | 4 + inpaint/api.py | 398 + inpaint/batch_processing.py | 128 + inpaint/benchmark.py | 109 + inpaint/cli.py | 232 + inpaint/const.py | 128 + inpaint/download.py | 313 + inpaint/file_manager/__init__.py | 1 + inpaint/file_manager/file_manager.py | 218 + inpaint/file_manager/storage_backends.py | 46 + inpaint/file_manager/utils.py | 65 + inpaint/helper.py | 408 + inpaint/installer.py | 10 + inpaint/model/__init__.py | 37 + inpaint/model/anytext/__init__.py | 0 inpaint/model/anytext/anytext_model.py | 73 + inpaint/model/anytext/anytext_pipeline.py | 403 + inpaint/model/anytext/anytext_sd15.yaml | 99 + inpaint/model/anytext/cldm/__init__.py | 0 inpaint/model/anytext/cldm/cldm.py | 630 ++ inpaint/model/anytext/cldm/ddim_hacked.py | 486 ++ .../model/anytext/cldm/embedding_manager.py | 165 + inpaint/model/anytext/cldm/hack.py | 111 + inpaint/model/anytext/cldm/model.py | 40 + inpaint/model/anytext/cldm/recognizer.py | 300 + inpaint/model/anytext/ldm/__init__.py | 0 inpaint/model/anytext/ldm/models/__init__.py | 0 .../model/anytext/ldm/models/autoencoder.py | 218 + .../anytext/ldm/models/diffusion/__init__.py | 0 .../anytext/ldm/models/diffusion/ddim.py | 354 + .../anytext/ldm/models/diffusion/ddpm.py | 2380 ++++++ .../models/diffusion/dpm_solver/__init__.py | 1 + .../models/diffusion/dpm_solver/dpm_solver.py | 1154 +++ .../models/diffusion/dpm_solver/sampler.py | 87 + .../anytext/ldm/models/diffusion/plms.py | 244 + .../ldm/models/diffusion/sampling_util.py | 22 + inpaint/model/anytext/ldm/modules/__init__.py | 0 .../model/anytext/ldm/modules/attention.py | 360 + .../ldm/modules/diffusionmodules/__init__.py | 0 .../ldm/modules/diffusionmodules/model.py | 973 +++ .../modules/diffusionmodules/openaimodel.py | 786 ++ .../ldm/modules/diffusionmodules/upscaling.py | 81 + .../ldm/modules/diffusionmodules/util.py | 271 + .../ldm/modules/distributions/__init__.py | 0 .../modules/distributions/distributions.py | 92 + inpaint/model/anytext/ldm/modules/ema.py | 80 + .../anytext/ldm/modules/encoders/__init__.py | 0 .../anytext/ldm/modules/encoders/modules.py | 411 + inpaint/model/anytext/ldm/util.py | 197 + inpaint/model/anytext/main.py | 45 + inpaint/model/anytext/ocr_recog/RNN.py | 210 + inpaint/model/anytext/ocr_recog/RecCTCHead.py | 48 + inpaint/model/anytext/ocr_recog/RecModel.py | 45 + .../model/anytext/ocr_recog/RecMv1_enhance.py | 232 + inpaint/model/anytext/ocr_recog/RecSVTR.py | 591 ++ inpaint/model/anytext/ocr_recog/__init__.py | 0 inpaint/model/anytext/ocr_recog/common.py | 74 + inpaint/model/anytext/ocr_recog/en_dict.txt | 95 + .../model/anytext/ocr_recog/ppocr_keys_v1.txt | 6623 +++++++++++++++++ inpaint/model/anytext/utils.py | 151 + inpaint/model/base.py | 405 + inpaint/model/brushnet/__init__.py | 0 inpaint/model/brushnet/brushnet.py | 931 +++ .../model/brushnet/brushnet_unet_forward.py | 322 + inpaint/model/brushnet/brushnet_wrapper.py | 157 + inpaint/model/brushnet/pipeline_brushnet.py | 1279 ++++ inpaint/model/brushnet/unet_2d_blocks.py | 388 + inpaint/model/controlnet.py | 194 + inpaint/model/ddim_sampler.py | 193 + inpaint/model/fcf.py | 1737 +++++ inpaint/model/helper/__init__.py | 0 inpaint/model/helper/controlnet_preprocess.py | 68 + inpaint/model/helper/cpu_text_encoder.py | 41 + inpaint/model/helper/g_diffuser_bot.py | 62 + inpaint/model/instruct_pix2pix.py | 64 + inpaint/model/kandinsky.py | 65 + inpaint/model/lama.py | 57 + inpaint/model/ldm.py | 336 + inpaint/model/manga.py | 97 + inpaint/model/mat.py | 1945 +++++ inpaint/model/mi_gan.py | 110 + inpaint/model/opencv2.py | 29 + inpaint/model/original_sd_configs/__init__.py | 19 + .../model/original_sd_configs/sd_xl_base.yaml | 93 + .../original_sd_configs/sd_xl_refiner.yaml | 86 + .../original_sd_configs/v1-inference.yaml | 70 + .../original_sd_configs/v2-inference-v.yaml | 68 + inpaint/model/paint_by_example.py | 68 + inpaint/model/plms_sampler.py | 225 + inpaint/model/power_paint/__init__.py | 0 .../model/power_paint/pipeline_powerpaint.py | 1243 ++++ inpaint/model/power_paint/power_paint.py | 101 + inpaint/model/power_paint/power_paint_v2.py | 186 + .../model/power_paint/powerpaint_tokenizer.py | 254 + inpaint/model/power_paint/v2/BrushNet_CA.py | 1094 +++ inpaint/model/power_paint/v2/__init__.py | 0 .../v2/pipeline_PowerPaint_Brushnet_CA.py | 1690 +++++ .../model/power_paint/v2/unet_2d_blocks.py | 342 + .../model/power_paint/v2/unet_2d_condition.py | 402 + inpaint/model/sd.py | 129 + inpaint/model/sdxl.py | 110 + inpaint/model/utils.py | 1033 +++ inpaint/model/zits.py | 476 ++ inpaint/model_manager.py | 260 + inpaint/plugins/__init__.py | 74 + inpaint/plugins/anime_seg.py | 462 ++ inpaint/plugins/base_plugin.py | 30 + inpaint/plugins/basicsr/LICENSE | 201 + inpaint/plugins/basicsr/__init__.py | 22 + inpaint/plugins/basicsr/arch_util.py | 80 + inpaint/plugins/basicsr/img_util.py | 172 + inpaint/plugins/basicsr/rrdbnet_arch.py | 133 + inpaint/plugins/briarmbg.py | 512 ++ inpaint/plugins/facexlib/.gitignore | 135 + inpaint/plugins/facexlib/__init__.py | 3 + .../plugins/facexlib/detection/__init__.py | 31 + .../plugins/facexlib/detection/align_trans.py | 219 + .../facexlib/detection/matlab_cp2tform.py | 317 + .../plugins/facexlib/detection/retinaface.py | 419 ++ .../facexlib/detection/retinaface_net.py | 196 + .../facexlib/detection/retinaface_utils.py | 421 ++ inpaint/plugins/facexlib/parsing/__init__.py | 24 + inpaint/plugins/facexlib/parsing/bisenet.py | 140 + inpaint/plugins/facexlib/parsing/parsenet.py | 194 + inpaint/plugins/facexlib/parsing/resnet.py | 69 + inpaint/plugins/facexlib/utils/__init__.py | 7 + .../facexlib/utils/face_restoration_helper.py | 473 ++ inpaint/plugins/facexlib/utils/face_utils.py | 208 + inpaint/plugins/facexlib/utils/misc.py | 118 + .../gfpgan/archs/gfpganv1_clean_arch.py | 322 + .../gfpgan/archs/restoreformer_arch.py | 759 ++ .../gfpgan/archs/stylegan2_clean_arch.py | 434 ++ inpaint/plugins/gfpgan_plugin.py | 61 + inpaint/plugins/gfpganer.py | 156 + inpaint/plugins/interactive_seg.py | 130 + inpaint/plugins/realesrgan.py | 468 ++ inpaint/plugins/remove_bg.py | 71 + inpaint/plugins/restoreformer.py | 44 + inpaint/plugins/segment_anything/__init__.py | 16 + inpaint/plugins/segment_anything/build_sam.py | 269 + .../segment_anything/modeling/__init__.py | 11 + .../segment_anything/modeling/common.py | 43 + .../modeling/image_encoder.py | 395 + .../modeling/image_encoder_hq.py | 422 ++ .../segment_anything/modeling/mask_decoder.py | 410 + .../modeling/prompt_encoder.py | 214 + .../plugins/segment_anything/modeling/sam.py | 174 + .../segment_anything/modeling/sam_hq.py | 177 + .../segment_anything/modeling/tiny_vit_sam.py | 822 ++ .../segment_anything/modeling/transformer.py | 240 + inpaint/plugins/segment_anything/predictor.py | 285 + .../plugins/segment_anything/predictor_hq.py | 292 + .../segment_anything/utils/__init__.py | 5 + .../segment_anything/utils/transforms.py | 112 + inpaint/plugins/segment_anything2/__init__.py | 5 + .../plugins/segment_anything2/build_sam.py | 262 + .../segment_anything2/modeling/__init__.py | 5 + .../modeling/backbones/__init__.py | 5 + .../modeling/backbones/hieradet.py | 295 + .../modeling/backbones/image_encoder.py | 133 + .../modeling/backbones/utils.py | 95 + .../modeling/memory_attention.py | 169 + .../modeling/memory_encoder.py | 181 + .../modeling/position_encoding.py | 216 + .../modeling/sam/__init__.py | 5 + .../modeling/sam/mask_decoder.py | 295 + .../modeling/sam/prompt_encoder.py | 182 + .../modeling/sam/transformer.py | 327 + .../segment_anything2/modeling/sam2_base.py | 832 +++ .../segment_anything2/modeling/sam2_utils.py | 149 + .../segment_anything2/sam2_image_predictor.py | 445 ++ .../segment_anything2/utils/__init__.py | 5 + .../plugins/segment_anything2/utils/misc.py | 90 + .../segment_anything2/utils/transforms.py | 77 + inpaint/runtime.py | 86 + inpaint/schema.py | 491 ++ inpaint/tests/.gitignore | 2 + inpaint/tests/__init__.py | 0 inpaint/tests/anime_test.png | Bin 0 -> 491736 bytes inpaint/tests/anytext_mask.jpg | Bin 0 -> 6860 bytes inpaint/tests/anytext_ref.jpg | Bin 0 -> 105967 bytes inpaint/tests/bunny.jpeg | Bin 0 -> 52557 bytes inpaint/tests/cat.png | Bin 0 -> 492745 bytes inpaint/tests/icc_profile_test.jpg | Bin 0 -> 219998 bytes inpaint/tests/icc_profile_test.png | Bin 0 -> 311946 bytes inpaint/tests/image.png | Bin 0 -> 132172 bytes inpaint/tests/mask.png | Bin 0 -> 7916 bytes .../tests/overture-creations-5sI6fQgYIuo.png | Bin 0 -> 404753 bytes .../overture-creations-5sI6fQgYIuo_mask.png | Bin 0 -> 12106 bytes ...erture-creations-5sI6fQgYIuo_mask_blur.png | Bin 0 -> 39225 bytes inpaint/tests/png_parameter_test.png | Bin 0 -> 70526 bytes inpaint/tests/test_adjust_mask.py | 17 + inpaint/tests/test_anytext.py | 45 + inpaint/tests/test_brushnet.py | 110 + inpaint/tests/test_controlnet.py | 118 + inpaint/tests/test_instruct_pix2pix.py | 40 + inpaint/tests/test_load_img.py | 19 + inpaint/tests/test_low_mem.py | 102 + inpaint/tests/test_match_histograms.py | 36 + inpaint/tests/test_model.py | 160 + inpaint/tests/test_model_md5.py | 16 + inpaint/tests/test_model_switch.py | 70 + inpaint/tests/test_outpainting.py | 137 + inpaint/tests/test_paint_by_example.py | 58 + inpaint/tests/test_plugins.py | 125 + inpaint/tests/test_save_exif.py | 59 + inpaint/tests/test_sd_model.py | 240 + inpaint/tests/test_sdxl.py | 118 + inpaint/tests/utils.py | 77 + .../web_app/assets/Inter-Black-jiII8dog.woff2 | Bin 0 -> 108748 bytes .../assets/Inter-BlackItalic-1413vuen.woff2 | Bin 0 -> 115364 bytes .../web_app/assets/Inter-Bold-srYz_-1B.woff2 | Bin 0 -> 111040 bytes .../assets/Inter-BoldItalic-dE_gZyur.woff2 | Bin 0 -> 118392 bytes .../assets/Inter-ExtraBold-TduDdwUu.woff2 | Bin 0 -> 111360 bytes .../Inter-ExtraBoldItalic-BJafRE5I.woff2 | Bin 0 -> 118604 bytes .../assets/Inter-ExtraLight-w5HAp5iF.woff2 | Bin 0 -> 110176 bytes .../Inter-ExtraLightItalic-ZptecSuc.woff2 | Bin 0 -> 116296 bytes .../assets/Inter-Italic-f6M78thn.woff2 | Bin 0 -> 114576 bytes .../web_app/assets/Inter-Light-DFhX0qo-.woff2 | Bin 0 -> 109992 bytes .../assets/Inter-LightItalic-fu56_DRc.woff2 | Bin 0 -> 116516 bytes .../assets/Inter-Medium-dDRaJ8tM.woff2 | Bin 0 -> 111380 bytes .../assets/Inter-MediumItalic-zr3roggP.woff2 | Bin 0 -> 118392 bytes .../assets/Inter-Regular-dEFHw1tF.woff2 | Bin 0 -> 108488 bytes .../assets/Inter-SemiBold-PyS8DO2L.woff2 | Bin 0 -> 111588 bytes .../Inter-SemiBoldItalic-uIDb7hsH.woff2 | Bin 0 -> 118216 bytes .../web_app/assets/Inter-Thin-eKObIkJC.woff2 | Bin 0 -> 106620 bytes .../assets/Inter-ThinItalic-L6uBn3RP.woff2 | Bin 0 -> 113384 bytes inpaint/web_app/assets/index-7L_lPAh0.css | 1 + inpaint/web_app/assets/index-VrFIcmY_.js | 165 + inpaint/web_app/index.html | 13 + inpaint/web_config.py | 319 + 232 files changed, 54070 insertions(+) create mode 100644 inpaint/__init__.py create mode 100644 inpaint/__main__.py create mode 100644 inpaint/api.py create mode 100644 inpaint/batch_processing.py create mode 100644 inpaint/benchmark.py create mode 100644 inpaint/cli.py create mode 100644 inpaint/const.py create mode 100644 inpaint/download.py create mode 100644 inpaint/file_manager/__init__.py create mode 100644 inpaint/file_manager/file_manager.py create mode 100644 inpaint/file_manager/storage_backends.py create mode 100644 inpaint/file_manager/utils.py create mode 100644 inpaint/helper.py create mode 100644 inpaint/installer.py create mode 100644 inpaint/model/__init__.py create mode 100644 inpaint/model/anytext/__init__.py create mode 100644 inpaint/model/anytext/anytext_model.py create mode 100644 inpaint/model/anytext/anytext_pipeline.py create mode 100644 inpaint/model/anytext/anytext_sd15.yaml create mode 100644 inpaint/model/anytext/cldm/__init__.py create mode 100644 inpaint/model/anytext/cldm/cldm.py create mode 100644 inpaint/model/anytext/cldm/ddim_hacked.py create mode 100644 inpaint/model/anytext/cldm/embedding_manager.py create mode 100644 inpaint/model/anytext/cldm/hack.py create mode 100644 inpaint/model/anytext/cldm/model.py create mode 100755 inpaint/model/anytext/cldm/recognizer.py create mode 100644 inpaint/model/anytext/ldm/__init__.py create mode 100644 inpaint/model/anytext/ldm/models/__init__.py create mode 100644 inpaint/model/anytext/ldm/models/autoencoder.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/__init__.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/ddim.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/ddpm.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/plms.py create mode 100644 inpaint/model/anytext/ldm/models/diffusion/sampling_util.py create mode 100644 inpaint/model/anytext/ldm/modules/__init__.py create mode 100644 inpaint/model/anytext/ldm/modules/attention.py create mode 100644 inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py create mode 100644 inpaint/model/anytext/ldm/modules/diffusionmodules/model.py create mode 100644 inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py create mode 100644 inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py create mode 100644 inpaint/model/anytext/ldm/modules/diffusionmodules/util.py create mode 100644 inpaint/model/anytext/ldm/modules/distributions/__init__.py create mode 100644 inpaint/model/anytext/ldm/modules/distributions/distributions.py create mode 100644 inpaint/model/anytext/ldm/modules/ema.py create mode 100644 inpaint/model/anytext/ldm/modules/encoders/__init__.py create mode 100644 inpaint/model/anytext/ldm/modules/encoders/modules.py create mode 100644 inpaint/model/anytext/ldm/util.py create mode 100644 inpaint/model/anytext/main.py create mode 100755 inpaint/model/anytext/ocr_recog/RNN.py create mode 100755 inpaint/model/anytext/ocr_recog/RecCTCHead.py create mode 100755 inpaint/model/anytext/ocr_recog/RecModel.py create mode 100644 inpaint/model/anytext/ocr_recog/RecMv1_enhance.py create mode 100644 inpaint/model/anytext/ocr_recog/RecSVTR.py create mode 100644 inpaint/model/anytext/ocr_recog/__init__.py create mode 100644 inpaint/model/anytext/ocr_recog/common.py create mode 100644 inpaint/model/anytext/ocr_recog/en_dict.txt create mode 100644 inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt create mode 100644 inpaint/model/anytext/utils.py create mode 100644 inpaint/model/base.py create mode 100644 inpaint/model/brushnet/__init__.py create mode 100644 inpaint/model/brushnet/brushnet.py create mode 100644 inpaint/model/brushnet/brushnet_unet_forward.py create mode 100644 inpaint/model/brushnet/brushnet_wrapper.py create mode 100644 inpaint/model/brushnet/pipeline_brushnet.py create mode 100644 inpaint/model/brushnet/unet_2d_blocks.py create mode 100644 inpaint/model/controlnet.py create mode 100644 inpaint/model/ddim_sampler.py create mode 100644 inpaint/model/fcf.py create mode 100644 inpaint/model/helper/__init__.py create mode 100644 inpaint/model/helper/controlnet_preprocess.py create mode 100644 inpaint/model/helper/cpu_text_encoder.py create mode 100644 inpaint/model/helper/g_diffuser_bot.py create mode 100644 inpaint/model/instruct_pix2pix.py create mode 100644 inpaint/model/kandinsky.py create mode 100644 inpaint/model/lama.py create mode 100644 inpaint/model/ldm.py create mode 100644 inpaint/model/manga.py create mode 100644 inpaint/model/mat.py create mode 100644 inpaint/model/mi_gan.py create mode 100644 inpaint/model/opencv2.py create mode 100644 inpaint/model/original_sd_configs/__init__.py create mode 100644 inpaint/model/original_sd_configs/sd_xl_base.yaml create mode 100644 inpaint/model/original_sd_configs/sd_xl_refiner.yaml create mode 100644 inpaint/model/original_sd_configs/v1-inference.yaml create mode 100644 inpaint/model/original_sd_configs/v2-inference-v.yaml create mode 100644 inpaint/model/paint_by_example.py create mode 100644 inpaint/model/plms_sampler.py create mode 100644 inpaint/model/power_paint/__init__.py create mode 100644 inpaint/model/power_paint/pipeline_powerpaint.py create mode 100644 inpaint/model/power_paint/power_paint.py create mode 100644 inpaint/model/power_paint/power_paint_v2.py create mode 100644 inpaint/model/power_paint/powerpaint_tokenizer.py create mode 100644 inpaint/model/power_paint/v2/BrushNet_CA.py create mode 100644 inpaint/model/power_paint/v2/__init__.py create mode 100644 inpaint/model/power_paint/v2/pipeline_PowerPaint_Brushnet_CA.py create mode 100644 inpaint/model/power_paint/v2/unet_2d_blocks.py create mode 100644 inpaint/model/power_paint/v2/unet_2d_condition.py create mode 100644 inpaint/model/sd.py create mode 100644 inpaint/model/sdxl.py create mode 100644 inpaint/model/utils.py create mode 100644 inpaint/model/zits.py create mode 100644 inpaint/model_manager.py create mode 100644 inpaint/plugins/__init__.py create mode 100644 inpaint/plugins/anime_seg.py create mode 100644 inpaint/plugins/base_plugin.py create mode 100644 inpaint/plugins/basicsr/LICENSE create mode 100644 inpaint/plugins/basicsr/__init__.py create mode 100644 inpaint/plugins/basicsr/arch_util.py create mode 100644 inpaint/plugins/basicsr/img_util.py create mode 100644 inpaint/plugins/basicsr/rrdbnet_arch.py create mode 100644 inpaint/plugins/briarmbg.py create mode 100644 inpaint/plugins/facexlib/.gitignore create mode 100644 inpaint/plugins/facexlib/__init__.py create mode 100644 inpaint/plugins/facexlib/detection/__init__.py create mode 100644 inpaint/plugins/facexlib/detection/align_trans.py create mode 100644 inpaint/plugins/facexlib/detection/matlab_cp2tform.py create mode 100644 inpaint/plugins/facexlib/detection/retinaface.py create mode 100644 inpaint/plugins/facexlib/detection/retinaface_net.py create mode 100644 inpaint/plugins/facexlib/detection/retinaface_utils.py create mode 100644 inpaint/plugins/facexlib/parsing/__init__.py create mode 100644 inpaint/plugins/facexlib/parsing/bisenet.py create mode 100644 inpaint/plugins/facexlib/parsing/parsenet.py create mode 100644 inpaint/plugins/facexlib/parsing/resnet.py create mode 100644 inpaint/plugins/facexlib/utils/__init__.py create mode 100644 inpaint/plugins/facexlib/utils/face_restoration_helper.py create mode 100644 inpaint/plugins/facexlib/utils/face_utils.py create mode 100644 inpaint/plugins/facexlib/utils/misc.py create mode 100644 inpaint/plugins/gfpgan/archs/gfpganv1_clean_arch.py create mode 100644 inpaint/plugins/gfpgan/archs/restoreformer_arch.py create mode 100644 inpaint/plugins/gfpgan/archs/stylegan2_clean_arch.py create mode 100644 inpaint/plugins/gfpgan_plugin.py create mode 100644 inpaint/plugins/gfpganer.py create mode 100644 inpaint/plugins/interactive_seg.py create mode 100644 inpaint/plugins/realesrgan.py create mode 100644 inpaint/plugins/remove_bg.py create mode 100644 inpaint/plugins/restoreformer.py create mode 100644 inpaint/plugins/segment_anything/__init__.py create mode 100644 inpaint/plugins/segment_anything/build_sam.py create mode 100644 inpaint/plugins/segment_anything/modeling/__init__.py create mode 100644 inpaint/plugins/segment_anything/modeling/common.py create mode 100644 inpaint/plugins/segment_anything/modeling/image_encoder.py create mode 100644 inpaint/plugins/segment_anything/modeling/image_encoder_hq.py create mode 100644 inpaint/plugins/segment_anything/modeling/mask_decoder.py create mode 100644 inpaint/plugins/segment_anything/modeling/prompt_encoder.py create mode 100644 inpaint/plugins/segment_anything/modeling/sam.py create mode 100644 inpaint/plugins/segment_anything/modeling/sam_hq.py create mode 100644 inpaint/plugins/segment_anything/modeling/tiny_vit_sam.py create mode 100644 inpaint/plugins/segment_anything/modeling/transformer.py create mode 100644 inpaint/plugins/segment_anything/predictor.py create mode 100644 inpaint/plugins/segment_anything/predictor_hq.py create mode 100644 inpaint/plugins/segment_anything/utils/__init__.py create mode 100644 inpaint/plugins/segment_anything/utils/transforms.py create mode 100644 inpaint/plugins/segment_anything2/__init__.py create mode 100644 inpaint/plugins/segment_anything2/build_sam.py create mode 100644 inpaint/plugins/segment_anything2/modeling/__init__.py create mode 100644 inpaint/plugins/segment_anything2/modeling/backbones/__init__.py create mode 100644 inpaint/plugins/segment_anything2/modeling/backbones/hieradet.py create mode 100644 inpaint/plugins/segment_anything2/modeling/backbones/image_encoder.py create mode 100644 inpaint/plugins/segment_anything2/modeling/backbones/utils.py create mode 100644 inpaint/plugins/segment_anything2/modeling/memory_attention.py create mode 100644 inpaint/plugins/segment_anything2/modeling/memory_encoder.py create mode 100644 inpaint/plugins/segment_anything2/modeling/position_encoding.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam/__init__.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam/mask_decoder.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam/prompt_encoder.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam/transformer.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam2_base.py create mode 100644 inpaint/plugins/segment_anything2/modeling/sam2_utils.py create mode 100644 inpaint/plugins/segment_anything2/sam2_image_predictor.py create mode 100644 inpaint/plugins/segment_anything2/utils/__init__.py create mode 100644 inpaint/plugins/segment_anything2/utils/misc.py create mode 100644 inpaint/plugins/segment_anything2/utils/transforms.py create mode 100644 inpaint/runtime.py create mode 100644 inpaint/schema.py create mode 100644 inpaint/tests/.gitignore create mode 100644 inpaint/tests/__init__.py create mode 100644 inpaint/tests/anime_test.png create mode 100644 inpaint/tests/anytext_mask.jpg create mode 100644 inpaint/tests/anytext_ref.jpg create mode 100644 inpaint/tests/bunny.jpeg create mode 100644 inpaint/tests/cat.png create mode 100644 inpaint/tests/icc_profile_test.jpg create mode 100644 inpaint/tests/icc_profile_test.png create mode 100644 inpaint/tests/image.png create mode 100644 inpaint/tests/mask.png create mode 100644 inpaint/tests/overture-creations-5sI6fQgYIuo.png create mode 100644 inpaint/tests/overture-creations-5sI6fQgYIuo_mask.png create mode 100644 inpaint/tests/overture-creations-5sI6fQgYIuo_mask_blur.png create mode 100644 inpaint/tests/png_parameter_test.png create mode 100644 inpaint/tests/test_adjust_mask.py create mode 100644 inpaint/tests/test_anytext.py create mode 100644 inpaint/tests/test_brushnet.py create mode 100644 inpaint/tests/test_controlnet.py create mode 100644 inpaint/tests/test_instruct_pix2pix.py create mode 100644 inpaint/tests/test_load_img.py create mode 100644 inpaint/tests/test_low_mem.py create mode 100644 inpaint/tests/test_match_histograms.py create mode 100644 inpaint/tests/test_model.py create mode 100644 inpaint/tests/test_model_md5.py create mode 100644 inpaint/tests/test_model_switch.py create mode 100644 inpaint/tests/test_outpainting.py create mode 100644 inpaint/tests/test_paint_by_example.py create mode 100644 inpaint/tests/test_plugins.py create mode 100644 inpaint/tests/test_save_exif.py create mode 100644 inpaint/tests/test_sd_model.py create mode 100644 inpaint/tests/test_sdxl.py create mode 100644 inpaint/tests/utils.py create mode 100644 inpaint/web_app/assets/Inter-Black-jiII8dog.woff2 create mode 100644 inpaint/web_app/assets/Inter-BlackItalic-1413vuen.woff2 create mode 100644 inpaint/web_app/assets/Inter-Bold-srYz_-1B.woff2 create mode 100644 inpaint/web_app/assets/Inter-BoldItalic-dE_gZyur.woff2 create mode 100644 inpaint/web_app/assets/Inter-ExtraBold-TduDdwUu.woff2 create mode 100644 inpaint/web_app/assets/Inter-ExtraBoldItalic-BJafRE5I.woff2 create mode 100644 inpaint/web_app/assets/Inter-ExtraLight-w5HAp5iF.woff2 create mode 100644 inpaint/web_app/assets/Inter-ExtraLightItalic-ZptecSuc.woff2 create mode 100644 inpaint/web_app/assets/Inter-Italic-f6M78thn.woff2 create mode 100644 inpaint/web_app/assets/Inter-Light-DFhX0qo-.woff2 create mode 100644 inpaint/web_app/assets/Inter-LightItalic-fu56_DRc.woff2 create mode 100644 inpaint/web_app/assets/Inter-Medium-dDRaJ8tM.woff2 create mode 100644 inpaint/web_app/assets/Inter-MediumItalic-zr3roggP.woff2 create mode 100644 inpaint/web_app/assets/Inter-Regular-dEFHw1tF.woff2 create mode 100644 inpaint/web_app/assets/Inter-SemiBold-PyS8DO2L.woff2 create mode 100644 inpaint/web_app/assets/Inter-SemiBoldItalic-uIDb7hsH.woff2 create mode 100644 inpaint/web_app/assets/Inter-Thin-eKObIkJC.woff2 create mode 100644 inpaint/web_app/assets/Inter-ThinItalic-L6uBn3RP.woff2 create mode 100644 inpaint/web_app/assets/index-7L_lPAh0.css create mode 100644 inpaint/web_app/assets/index-VrFIcmY_.js create mode 100644 inpaint/web_app/index.html create mode 100644 inpaint/web_config.py diff --git a/inpaint/__init__.py b/inpaint/__init__.py new file mode 100644 index 0000000..d8e11fe --- /dev/null +++ b/inpaint/__init__.py @@ -0,0 +1,23 @@ +import os + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +# https://github.com/pytorch/pytorch/issues/27971#issuecomment-1768868068 +os.environ["ONEDNN_PRIMITIVE_CACHE_CAPACITY"] = "1" +os.environ["LRU_CACHE_CAPACITY"] = "1" +# prevent CPU memory leak when run model on GPU +# https://github.com/pytorch/pytorch/issues/98688#issuecomment-1869288431 +# https://github.com/pytorch/pytorch/issues/108334#issuecomment-1752763633 +os.environ["TORCH_CUDNN_V8_API_LRU_CACHE_LIMIT"] = "1" + + +import warnings + +warnings.simplefilter("ignore", UserWarning) + + +def entry_point(): + # To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers + # https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18 + from inpaint.cli import typer_app + + typer_app() diff --git a/inpaint/__main__.py b/inpaint/__main__.py new file mode 100644 index 0000000..57fc6ae --- /dev/null +++ b/inpaint/__main__.py @@ -0,0 +1,4 @@ +from inpaint import entry_point + +if __name__ == "__main__": + entry_point() diff --git a/inpaint/api.py b/inpaint/api.py new file mode 100644 index 0000000..51e1329 --- /dev/null +++ b/inpaint/api.py @@ -0,0 +1,398 @@ +import asyncio +import os +import threading +import time +import traceback +from pathlib import Path +from typing import Optional, Dict, List + +import cv2 +import numpy as np +import socketio +import torch + +try: + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(False) +except: + pass + +import uvicorn +from PIL import Image +from fastapi import APIRouter, FastAPI, Request, UploadFile +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, FileResponse, Response +from fastapi.staticfiles import StaticFiles +from loguru import logger +from socketio import AsyncServer + +from inpaint.file_manager import FileManager +from inpaint.helper import ( + load_img, + decode_base64_to_image, + pil_to_bytes, + numpy_to_bytes, + concat_alpha_channel, + gen_frontend_mask, + adjust_mask, +) +from inpaint.model.utils import torch_gc +from inpaint.model_manager import ModelManager +from inpaint.plugins import build_plugins, RealESRGANUpscaler, InteractiveSeg +from inpaint.plugins.base_plugin import BasePlugin +from inpaint.plugins.remove_bg import RemoveBG +from inpaint.schema import ( + GenInfoResponse, + ApiConfig, + ServerConfigResponse, + SwitchModelRequest, + InpaintRequest, + RunPluginRequest, + SDSampler, + PluginInfo, + AdjustMaskRequest, + RemoveBGModel, + SwitchPluginModelRequest, + ModelInfo, + InteractiveSegModel, + RealESRGANModel, +) + +CURRENT_DIR = Path(__file__).parent.absolute().resolve() +WEB_APP_DIR = CURRENT_DIR / "web_app" + + +def api_middleware(app: FastAPI): + rich_available = False + try: + if os.environ.get("WEBUI_RICH_EXCEPTIONS", None) is not None: + import anyio # importing just so it can be placed on silent list + import starlette # importing just so it can be placed on silent list + from rich.console import Console + + console = Console() + rich_available = True + except Exception: + pass + + def handle_exception(request: Request, e: Exception): + err = { + "error": type(e).__name__, + "detail": vars(e).get("detail", ""), + "body": vars(e).get("body", ""), + "errors": str(e), + } + if not isinstance( + e, HTTPException + ): # do not print backtrace on known httpexceptions + message = f"API error: {request.method}: {request.url} {err}" + if rich_available: + print(message) + console.print_exception( + show_locals=True, + max_frames=2, + extra_lines=1, + suppress=[anyio, starlette], + word_wrap=False, + width=min([console.width, 200]), + ) + else: + traceback.print_exc() + return JSONResponse( + status_code=vars(e).get("status_code", 500), content=jsonable_encoder(err) + ) + + @app.middleware("http") + async def exception_handling(request: Request, call_next): + try: + return await call_next(request) + except Exception as e: + return handle_exception(request, e) + + @app.exception_handler(Exception) + async def fastapi_exception_handler(request: Request, e: Exception): + return handle_exception(request, e) + + @app.exception_handler(HTTPException) + async def http_exception_handler(request: Request, e: HTTPException): + return handle_exception(request, e) + + cors_options = { + "allow_methods": ["*"], + "allow_headers": ["*"], + "allow_origins": ["*"], + "allow_credentials": True, + "expose_headers": ["X-Seed"], + } + app.add_middleware(CORSMiddleware, **cors_options) + + +global_sio: AsyncServer = None + + +def diffuser_callback(pipe, step: int, timestep: int, callback_kwargs: Dict = {}): + # self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict + # logger.info(f"diffusion callback: step={step}, timestep={timestep}") + + # We use asyncio loos for task processing. Perhaps in the future, we can add a processing queue similar to InvokeAI, + # but for now let's just start a separate event loop. It shouldn't make a difference for single person use + asyncio.run(global_sio.emit("diffusion_progress", {"step": step})) + return {} + + +class Api: + def __init__(self, app: FastAPI, config: ApiConfig): + self.app = app + self.config = config + self.router = APIRouter() + self.queue_lock = threading.Lock() + api_middleware(self.app) + + self.file_manager = self._build_file_manager() + self.plugins = self._build_plugins() + self.model_manager = self._build_model_manager() + + # fmt: off + self.add_api_route("/api/v1/gen-info", self.api_geninfo, methods=["POST"], response_model=GenInfoResponse) + self.add_api_route("/api/v1/server-config", self.api_server_config, methods=["GET"], + response_model=ServerConfigResponse) + self.add_api_route("/api/v1/model", self.api_current_model, methods=["GET"], response_model=ModelInfo) + self.add_api_route("/api/v1/model", self.api_switch_model, methods=["POST"], response_model=ModelInfo) + self.add_api_route("/api/v1/inputimage", self.api_input_image, methods=["GET"]) + self.add_api_route("/api/v1/inpaint", self.api_inpaint, methods=["POST"]) + self.add_api_route("/api/v1/switch_plugin_model", self.api_switch_plugin_model, methods=["POST"]) + self.add_api_route("/api/v1/run_plugin_gen_mask", self.api_run_plugin_gen_mask, methods=["POST"]) + self.add_api_route("/api/v1/run_plugin_gen_image", self.api_run_plugin_gen_image, methods=["POST"]) + self.add_api_route("/api/v1/samplers", self.api_samplers, methods=["GET"]) + self.add_api_route("/api/v1/adjust_mask", self.api_adjust_mask, methods=["POST"]) + self.add_api_route("/api/v1/save_image", self.api_save_image, methods=["POST"]) + self.app.mount("/", StaticFiles(directory=WEB_APP_DIR, html=True), name="assets") + # fmt: on + + global global_sio + self.sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*") + self.combined_asgi_app = socketio.ASGIApp(self.sio, self.app) + self.app.mount("/ws", self.combined_asgi_app) + global_sio = self.sio + + def add_api_route(self, path: str, endpoint, **kwargs): + return self.app.add_api_route(path, endpoint, **kwargs) + + def api_save_image(self, file: UploadFile): + filename = file.filename + origin_image_bytes = file.file.read() + with open(self.config.output_dir / filename, "wb") as fw: + fw.write(origin_image_bytes) + + def api_current_model(self) -> ModelInfo: + return self.model_manager.current_model + + def api_switch_model(self, req: SwitchModelRequest) -> ModelInfo: + if req.name == self.model_manager.name: + return self.model_manager.current_model + self.model_manager.switch(req.name) + return self.model_manager.current_model + + def api_switch_plugin_model(self, req: SwitchPluginModelRequest): + if req.plugin_name in self.plugins: + self.plugins[req.plugin_name].switch_model(req.model_name) + if req.plugin_name == RemoveBG.name: + self.config.remove_bg_model = req.model_name + if req.plugin_name == RealESRGANUpscaler.name: + self.config.realesrgan_model = req.model_name + if req.plugin_name == InteractiveSeg.name: + self.config.interactive_seg_model = req.model_name + torch_gc() + + def api_server_config(self) -> ServerConfigResponse: + plugins = [] + for it in self.plugins.values(): + plugins.append( + PluginInfo( + name=it.name, + support_gen_image=it.support_gen_image, + support_gen_mask=it.support_gen_mask, + ) + ) + + return ServerConfigResponse( + plugins=plugins, + modelInfos=self.model_manager.scan_models(), + removeBGModel=self.config.remove_bg_model, + removeBGModels=RemoveBGModel.values(), + realesrganModel=self.config.realesrgan_model, + realesrganModels=RealESRGANModel.values(), + interactiveSegModel=self.config.interactive_seg_model, + interactiveSegModels=InteractiveSegModel.values(), + enableFileManager=self.file_manager is not None, + enableAutoSaving=self.config.output_dir is not None, + enableControlnet=self.model_manager.enable_controlnet, + controlnetMethod=self.model_manager.controlnet_method, + disableModelSwitch=False, + isDesktop=False, + samplers=self.api_samplers(), + ) + + def api_input_image(self) -> FileResponse: + if self.config.input and self.config.input.is_file(): + return FileResponse(self.config.input) + raise HTTPException(status_code=404, detail="Input image not found") + + def api_geninfo(self, file: UploadFile) -> GenInfoResponse: + _, _, info = load_img(file.file.read(), return_info=True) + parts = info.get("parameters", "").split("Negative prompt: ") + prompt = parts[0].strip() + negative_prompt = "" + if len(parts) > 1: + negative_prompt = parts[1].split("\n")[0].strip() + return GenInfoResponse(prompt=prompt, negative_prompt=negative_prompt) + + def api_inpaint(self, req: InpaintRequest): + image, alpha_channel, infos = decode_base64_to_image(req.image) + mask, _, _ = decode_base64_to_image(req.mask, gray=True) + + mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1] + if image.shape[:2] != mask.shape[:2]: + raise HTTPException( + 400, + detail=f"Image size({image.shape[:2]}) and mask size({mask.shape[:2]}) not match.", + ) + + if req.paint_by_example_example_image: + paint_by_example_image, _, _ = decode_base64_to_image( + req.paint_by_example_example_image + ) + + start = time.time() + rgb_np_img = self.model_manager(image, mask, req) + logger.info(f"process time: {(time.time() - start) * 1000:.2f}ms") + torch_gc() + + rgb_np_img = cv2.cvtColor(rgb_np_img.astype(np.uint8), cv2.COLOR_BGR2RGB) + rgb_res = concat_alpha_channel(rgb_np_img, alpha_channel) + + ext = "png" + res_img_bytes = pil_to_bytes( + Image.fromarray(rgb_res), + ext=ext, + quality=self.config.quality, + infos=infos, + ) + + asyncio.run(self.sio.emit("diffusion_finish")) + + return Response( + content=res_img_bytes, + media_type=f"image/{ext}", + headers={"X-Seed": str(req.sd_seed)}, + ) + + def api_run_plugin_gen_image(self, req: RunPluginRequest): + ext = "png" + if req.name not in self.plugins: + raise HTTPException(status_code=422, detail="Plugin not found") + if not self.plugins[req.name].support_gen_image: + raise HTTPException( + status_code=422, detail="Plugin does not support output image" + ) + rgb_np_img, alpha_channel, infos = decode_base64_to_image(req.image) + bgr_or_rgba_np_img = self.plugins[req.name].gen_image(rgb_np_img, req) + torch_gc() + + if bgr_or_rgba_np_img.shape[2] == 4: + rgba_np_img = bgr_or_rgba_np_img + else: + rgba_np_img = cv2.cvtColor(bgr_or_rgba_np_img, cv2.COLOR_BGR2RGB) + rgba_np_img = concat_alpha_channel(rgba_np_img, alpha_channel) + + return Response( + content=pil_to_bytes( + Image.fromarray(rgba_np_img), + ext=ext, + quality=self.config.quality, + infos=infos, + ), + media_type=f"image/{ext}", + ) + + def api_run_plugin_gen_mask(self, req: RunPluginRequest): + if req.name not in self.plugins: + raise HTTPException(status_code=422, detail="Plugin not found") + if not self.plugins[req.name].support_gen_mask: + raise HTTPException( + status_code=422, detail="Plugin does not support output image" + ) + rgb_np_img, alpha_channel, infos = decode_base64_to_image(req.image) + bgr_or_gray_mask = self.plugins[req.name].gen_mask(rgb_np_img, req) + torch_gc() + res_mask = gen_frontend_mask(bgr_or_gray_mask) + return Response( + content=numpy_to_bytes(res_mask, "png"), + media_type="image/png", + ) + + def api_samplers(self) -> List[str]: + return [member.value for member in SDSampler.__members__.values()] + + def api_adjust_mask(self, req: AdjustMaskRequest): + mask, _, _ = decode_base64_to_image(req.mask, gray=True) + mask = adjust_mask(mask, req.kernel_size, req.operate) + return Response(content=numpy_to_bytes(mask, "png"), media_type="image/png") + + def launch(self): + self.app.include_router(self.router) + uvicorn.run( + self.combined_asgi_app, + host=self.config.host, + port=self.config.port, + timeout_keep_alive=999999999, + ) + + def _build_file_manager(self) -> Optional[FileManager]: + if self.config.input and self.config.input.is_dir(): + logger.info( + f"Input is directory, initialize file manager {self.config.input}" + ) + + return FileManager( + app=self.app, + input_dir=self.config.input, + mask_dir=self.config.mask_dir, + output_dir=self.config.output_dir, + ) + return None + + def _build_plugins(self) -> Dict[str, BasePlugin]: + return build_plugins( + self.config.enable_interactive_seg, + self.config.interactive_seg_model, + self.config.interactive_seg_device, + self.config.enable_remove_bg, + self.config.remove_bg_model, + self.config.enable_anime_seg, + self.config.enable_realesrgan, + self.config.realesrgan_device, + self.config.realesrgan_model, + self.config.enable_gfpgan, + self.config.gfpgan_device, + self.config.enable_restoreformer, + self.config.restoreformer_device, + self.config.no_half, + ) + + def _build_model_manager(self): + return ModelManager( + name=self.config.model, + device=torch.device(self.config.device), + no_half=self.config.no_half, + low_mem=self.config.low_mem, + disable_nsfw=self.config.disable_nsfw_checker, + sd_cpu_textencoder=self.config.cpu_textencoder, + local_files_only=self.config.local_files_only, + cpu_offload=self.config.cpu_offload, + callback=diffuser_callback, + ) diff --git a/inpaint/batch_processing.py b/inpaint/batch_processing.py new file mode 100644 index 0000000..2430010 --- /dev/null +++ b/inpaint/batch_processing.py @@ -0,0 +1,128 @@ +import json +from pathlib import Path +from typing import Dict, Optional + +import cv2 +import numpy as np +from PIL import Image +from loguru import logger +from rich.console import Console +from rich.progress import ( + Progress, + SpinnerColumn, + TimeElapsedColumn, + MofNCompleteColumn, + TextColumn, + BarColumn, + TaskProgressColumn, +) + +from inpaint.helper import pil_to_bytes +from inpaint.model.utils import torch_gc +from inpaint.model_manager import ModelManager +from inpaint.schema import InpaintRequest + + +def glob_images(path: Path) -> Dict[str, Path]: + # png/jpg/jpeg + if path.is_file(): + return {path.stem: path} + elif path.is_dir(): + res = {} + for it in path.glob("*.*"): + if it.suffix.lower() in [".png", ".jpg", ".jpeg"]: + res[it.stem] = it + return res + + +def batch_inpaint( + model: str, + device, + image: Path, + mask: Path, + output: Path, + config: Optional[Path] = None, + concat: bool = False, +): + if image.is_dir() and output.is_file(): + logger.error( + "invalid --output: when image is a directory, output should be a directory" + ) + exit(-1) + output.mkdir(parents=True, exist_ok=True) + + image_paths = glob_images(image) + mask_paths = glob_images(mask) + if len(image_paths) == 0: + logger.error("invalid --image: empty image folder") + exit(-1) + if len(mask_paths) == 0: + logger.error("invalid --mask: empty mask folder") + exit(-1) + + if config is None: + inpaint_request = InpaintRequest() + logger.info(f"Using default config: {inpaint_request}") + else: + with open(config, "r", encoding="utf-8") as f: + inpaint_request = InpaintRequest(**json.load(f)) + logger.info(f"Using config: {inpaint_request}") + + model_manager = ModelManager(name=model, device=device) + first_mask = list(mask_paths.values())[0] + + console = Console() + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + MofNCompleteColumn(), + TimeElapsedColumn(), + console=console, + transient=False, + ) as progress: + task = progress.add_task("Batch processing...", total=len(image_paths)) + for stem, image_p in image_paths.items(): + if stem not in mask_paths and mask.is_dir(): + progress.log(f"mask for {image_p} not found") + progress.update(task, advance=1) + continue + mask_p = mask_paths.get(stem, first_mask) + + infos = Image.open(image_p).info + + img = np.array(Image.open(image_p).convert("RGB")) + mask_img = np.array(Image.open(mask_p).convert("L")) + + if mask_img.shape[:2] != img.shape[:2]: + progress.log( + f"resize mask {mask_p.name} to image {image_p.name} size: {img.shape[:2]}" + ) + mask_img = cv2.resize( + mask_img, + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST, + ) + mask_img[mask_img >= 127] = 255 + mask_img[mask_img < 127] = 0 + + # bgr + inpaint_result = model_manager(img, mask_img, inpaint_request) + inpaint_result = cv2.cvtColor(inpaint_result, cv2.COLOR_BGR2RGB) + if concat: + mask_img = cv2.cvtColor(mask_img, cv2.COLOR_GRAY2RGB) + inpaint_result = cv2.hconcat([img, mask_img, inpaint_result]) + + img_bytes = pil_to_bytes(Image.fromarray(inpaint_result), "png", 100, infos) + save_p = output / f"{stem}.png" + with open(save_p, "wb") as fw: + fw.write(img_bytes) + + progress.update(task, advance=1) + torch_gc() + # pid = psutil.Process().pid + # memory_info = psutil.Process(pid).memory_info() + # memory_in_mb = memory_info.rss / (1024 * 1024) + # print(f"原图大小:{img.shape},当前进程的内存占用:{memory_in_mb}MB") diff --git a/inpaint/benchmark.py b/inpaint/benchmark.py new file mode 100644 index 0000000..9a98a3c --- /dev/null +++ b/inpaint/benchmark.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 + +import argparse +import os +import time + +import numpy as np +import nvidia_smi +import psutil +import torch + +from inpaint.model_manager import ModelManager +from inpaint.schema import InpaintRequest, HDStrategy, SDSampler + +try: + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(False) +except: + pass + +NUM_THREADS = str(4) + +os.environ["OMP_NUM_THREADS"] = NUM_THREADS +os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS +os.environ["MKL_NUM_THREADS"] = NUM_THREADS +os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS +os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS +if os.environ.get("CACHE_DIR"): + os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"] + + +def run_model(model, size): + # RGB + image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8) + mask = np.random.randint(0, 255, size).astype(np.uint8) + + config = InpaintRequest( + ldm_steps=2, + hd_strategy=HDStrategy.ORIGINAL, + hd_strategy_crop_margin=128, + hd_strategy_crop_trigger_size=128, + hd_strategy_resize_limit=128, + prompt="a fox is sitting on a bench", + sd_steps=5, + sd_sampler=SDSampler.ddim, + ) + model(image, mask, config) + + +def benchmark(model, times: int, empty_cache: bool): + sizes = [(512, 512)] + + nvidia_smi.nvmlInit() + device_id = 0 + handle = nvidia_smi.nvmlDeviceGetHandleByIndex(device_id) + + def format(metrics): + return f"{np.mean(metrics):.2f} ± {np.std(metrics):.2f}" + + process = psutil.Process(os.getpid()) + # 每个 size 给出显存和内存占用的指标 + for size in sizes: + torch.cuda.empty_cache() + time_metrics = [] + cpu_metrics = [] + memory_metrics = [] + gpu_memory_metrics = [] + for _ in range(times): + start = time.time() + run_model(model, size) + torch.cuda.synchronize() + + # cpu_metrics.append(process.cpu_percent()) + time_metrics.append((time.time() - start) * 1000) + memory_metrics.append(process.memory_info().rss / 1024 / 1024) + gpu_memory_metrics.append( + nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024 + ) + + print(f"size: {size}".center(80, "-")) + # print(f"cpu: {format(cpu_metrics)}") + print(f"latency: {format(time_metrics)}ms") + print(f"memory: {format(memory_metrics)} MB") + print(f"gpu memory: {format(gpu_memory_metrics)} MB") + + nvidia_smi.nvmlShutdown() + + +def get_args_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--name") + parser.add_argument("--device", default="cuda", type=str) + parser.add_argument("--times", default=10, type=int) + parser.add_argument("--empty-cache", action="store_true") + return parser.parse_args() + + +if __name__ == "__main__": + args = get_args_parser() + device = torch.device(args.device) + model = ModelManager( + name=args.name, + device=device, + disable_nsfw=True, + sd_cpu_textencoder=True, + ) + benchmark(model, args.times, args.empty_cache) diff --git a/inpaint/cli.py b/inpaint/cli.py new file mode 100644 index 0000000..fb8e94a --- /dev/null +++ b/inpaint/cli.py @@ -0,0 +1,232 @@ +import webbrowser +from contextlib import asynccontextmanager +from pathlib import Path +from typing import Optional + +import typer +from fastapi import FastAPI +from loguru import logger +from typer import Option +from typer_config import use_json_config + +from inpaint.const import * +from inpaint.runtime import setup_model_dir, dump_environment_info, check_device +from inpaint.schema import InteractiveSegModel, Device, RealESRGANModel, RemoveBGModel + +typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False) + + +@typer_app.command(help="Install all plugins dependencies") +def install_plugins_packages(): + from inpaint.installer import install_plugins_package + + install_plugins_package() + + +@typer_app.command(help="Download SD/SDXL normal/inpainting model from HuggingFace") +def download( + model: str = Option( + ..., help="Model id on HuggingFace e.g: runwayml/stable-diffusion-inpainting" + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from inpaint.download import cli_download_model + + cli_download_model(model) + + +@typer_app.command(name="list", help="List downloaded models") +def list_model( + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from inpaint.download import scan_models + + scanned_models = scan_models() + for it in scanned_models: + print(it.name) + + +@typer_app.command(help="Batch processing images") +def run( + model: str = Option("lama"), + device: Device = Option(Device.cpu), + image: Path = Option(..., help="Image folders or file path"), + mask: Path = Option( + ..., + help="Mask folders or file path. " + "If it is a directory, the mask images in the directory should have the same name as the original image." + "If it is a file, all images will use this mask." + "Mask will automatically resize to the same size as the original image.", + ), + output: Path = Option(..., help="Output directory or file path"), + config: Path = Option( + None, help="Config file path. You can use dump command to create a base config." + ), + concat: bool = Option( + False, help="Concat original image, mask and output images into one image" + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from inpaint.download import cli_download_model, scan_models + + scanned_models = scan_models() + if model not in [it.name for it in scanned_models]: + logger.info(f"{model} not found in {model_dir}, try to downloading") + cli_download_model(model) + + from inpaint.batch_processing import batch_inpaint + + batch_inpaint(model, device, image, mask, output, config, concat) + + +@typer_app.command(help="Start IOPaint server") +@use_json_config() +def start( + host: str = Option("127.0.0.1"), + port: int = Option(8080), + inbrowser: bool = Option(False, help=INBROWSER_HELP), + model: str = Option( + DEFAULT_MODEL, + help=f"Erase models: [{', '.join(AVAILABLE_MODELS)}].\n" + f"Diffusion models: [{', '.join(DIFFUSION_MODELS)}] or any SD/SDXL normal/inpainting models on HuggingFace.", + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + dir_okay=True, + file_okay=False, + callback=setup_model_dir, + ), + low_mem: bool = Option(False, help=LOW_MEM_HELP), + no_half: bool = Option(False, help=NO_HALF_HELP), + cpu_offload: bool = Option(False, help=CPU_OFFLOAD_HELP), + disable_nsfw_checker: bool = Option(False, help=DISABLE_NSFW_HELP), + cpu_textencoder: bool = Option(False, help=CPU_TEXTENCODER_HELP), + local_files_only: bool = Option(False, help=LOCAL_FILES_ONLY_HELP), + device: Device = Option(Device.cpu), + input: Optional[Path] = Option(None, help=INPUT_HELP), + mask_dir: Optional[Path] = Option( + None, help=MODEL_DIR_HELP, dir_okay=True, file_okay=False + ), + output_dir: Optional[Path] = Option( + None, help=OUTPUT_DIR_HELP, dir_okay=True, file_okay=False + ), + quality: int = Option(95, help=QUALITY_HELP), + enable_interactive_seg: bool = Option(False, help=INTERACTIVE_SEG_HELP), + interactive_seg_model: InteractiveSegModel = Option( + InteractiveSegModel.vit_b, help=INTERACTIVE_SEG_MODEL_HELP + ), + interactive_seg_device: Device = Option(Device.cpu), + enable_remove_bg: bool = Option(False, help=REMOVE_BG_HELP), + remove_bg_model: RemoveBGModel = Option(RemoveBGModel.briaai_rmbg_1_4), + enable_anime_seg: bool = Option(False, help=ANIMESEG_HELP), + enable_realesrgan: bool = Option(False), + realesrgan_device: Device = Option(Device.cpu), + realesrgan_model: RealESRGANModel = Option(RealESRGANModel.realesr_general_x4v3), + enable_gfpgan: bool = Option(False), + gfpgan_device: Device = Option(Device.cpu), + enable_restoreformer: bool = Option(False), + restoreformer_device: Device = Option(Device.cpu), +): + dump_environment_info() + device = check_device(device) + if input and not input.exists(): + logger.error(f"invalid --input: {input} not exists") + exit(-1) + if mask_dir and not mask_dir.exists(): + logger.error(f"invalid --mask-dir: {mask_dir} not exists") + exit(-1) + if input and input.is_dir() and not output_dir: + logger.error("invalid --output-dir: --output-dir must be set when --input is a directory") + exit(-1) + if output_dir: + output_dir = output_dir.expanduser().absolute() + logger.info(f"Image will be saved to {output_dir}") + if not output_dir.exists(): + logger.info(f"Create output directory {output_dir}") + output_dir.mkdir(parents=True) + if mask_dir: + mask_dir = mask_dir.expanduser().absolute() + + model_dir = model_dir.expanduser().absolute() + + if local_files_only: + os.environ["TRANSFORMERS_OFFLINE"] = "1" + os.environ["HF_HUB_OFFLINE"] = "1" + + from inpaint.download import cli_download_model, scan_models + + scanned_models = scan_models() + if model not in [it.name for it in scanned_models]: + logger.info(f"{model} not found in {model_dir}, try to downloading") + cli_download_model(model) + + from inpaint.api import Api + from inpaint.schema import ApiConfig + + @asynccontextmanager + async def lifespan(app: FastAPI): + if inbrowser: + webbrowser.open(f"http://localhost:{port}", new=0, autoraise=True) + yield + + app = FastAPI(lifespan=lifespan) + + api_config = ApiConfig( + host=host, + port=port, + inbrowser=inbrowser, + model=model, + no_half=no_half, + low_mem=low_mem, + cpu_offload=cpu_offload, + disable_nsfw_checker=disable_nsfw_checker, + local_files_only=local_files_only, + cpu_textencoder=cpu_textencoder if device == Device.cuda else False, + device=device, + input=input, + mask_dir=mask_dir, + output_dir=output_dir, + quality=quality, + enable_interactive_seg=enable_interactive_seg, + interactive_seg_model=interactive_seg_model, + interactive_seg_device=interactive_seg_device, + enable_remove_bg=enable_remove_bg, + remove_bg_model=remove_bg_model, + enable_anime_seg=enable_anime_seg, + enable_realesrgan=enable_realesrgan, + realesrgan_device=realesrgan_device, + realesrgan_model=realesrgan_model, + enable_gfpgan=enable_gfpgan, + gfpgan_device=gfpgan_device, + enable_restoreformer=enable_restoreformer, + restoreformer_device=restoreformer_device, + ) + print(api_config.model_dump_json(indent=4)) + api = Api(app, api_config) + api.launch() + + +@typer_app.command(help="Start IOPaint web config page") +def start_web_config( + config_file: Path = Option("config.json"), +): + dump_environment_info() + from inpaint.web_config import main + + main(config_file) diff --git a/inpaint/const.py b/inpaint/const.py new file mode 100644 index 0000000..b18254b --- /dev/null +++ b/inpaint/const.py @@ -0,0 +1,128 @@ +import os +from typing import List + +INSTRUCT_PIX2PIX_NAME = "timbrooks/instruct-pix2pix" +KANDINSKY22_NAME = "kandinsky-community/kandinsky-2-2-decoder-inpaint" +POWERPAINT_NAME = "Sanster/PowerPaint-V1-stable-diffusion-inpainting" +ANYTEXT_NAME = "Sanster/AnyText" + +DIFFUSERS_SD_CLASS_NAME = "StableDiffusionPipeline" +DIFFUSERS_SD_INPAINT_CLASS_NAME = "StableDiffusionInpaintPipeline" +DIFFUSERS_SDXL_CLASS_NAME = "StableDiffusionXLPipeline" +DIFFUSERS_SDXL_INPAINT_CLASS_NAME = "StableDiffusionXLInpaintPipeline" + +MPS_UNSUPPORT_MODELS = [ + "lama", + "ldm", + "zits", + "mat", + "fcf", + "cv2", + "manga", +] + +DEFAULT_MODEL = "lama" +AVAILABLE_MODELS = ["lama", "ldm", "zits", "mat", "fcf", "manga", "cv2", "migan"] +DIFFUSION_MODELS = [ + "runwayml/stable-diffusion-inpainting", + "Uminosachi/realisticVisionV51_v51VAE-inpainting", + "redstonehero/dreamshaper-inpainting", + "Sanster/anything-4.0-inpainting", + "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "Fantasy-Studio/Paint-by-Example", + POWERPAINT_NAME, + ANYTEXT_NAME, +] + +NO_HALF_HELP = """ +Using full precision(fp32) model. +If your diffusion model generate result is always black or green, use this argument. +""" + +CPU_OFFLOAD_HELP = """ +Offloads diffusion model's weight to CPU RAM, significantly reducing vRAM usage. +""" + +LOW_MEM_HELP = "Enable attention slicing and vae tiling to save memory." + +DISABLE_NSFW_HELP = """ +Disable NSFW checker for diffusion model. +""" + +CPU_TEXTENCODER_HELP = """ +Run diffusion models text encoder on CPU to reduce vRAM usage. +""" + +SD_CONTROLNET_CHOICES: List[str] = [ + "lllyasviel/control_v11p_sd15_canny", + # "lllyasviel/control_v11p_sd15_seg", + "lllyasviel/control_v11p_sd15_openpose", + "lllyasviel/control_v11p_sd15_inpaint", + "lllyasviel/control_v11f1p_sd15_depth", +] + +SD_BRUSHNET_CHOICES: List[str] = [ + "Sanster/brushnet_random_mask", + "Sanster/brushnet_segmentation_mask", +] + +SD2_CONTROLNET_CHOICES = [ + "thibaud/controlnet-sd21-canny-diffusers", + "thibaud/controlnet-sd21-depth-diffusers", + "thibaud/controlnet-sd21-openpose-diffusers", +] + +SDXL_CONTROLNET_CHOICES = [ + "thibaud/controlnet-openpose-sdxl-1.0", + "destitech/controlnet-inpaint-dreamer-sdxl", + "diffusers/controlnet-canny-sdxl-1.0", + "diffusers/controlnet-canny-sdxl-1.0-mid", + "diffusers/controlnet-canny-sdxl-1.0-small", + "diffusers/controlnet-depth-sdxl-1.0", + "diffusers/controlnet-depth-sdxl-1.0-mid", + "diffusers/controlnet-depth-sdxl-1.0-small", +] + +LOCAL_FILES_ONLY_HELP = """ +When loading diffusion models, using local files only, not connect to HuggingFace server. +""" + +DEFAULT_MODEL_DIR = os.path.abspath( + os.getenv("XDG_CACHE_HOME", os.path.join(os.path.expanduser("~"), ".cache")) +) + +MODEL_DIR_HELP = f""" +Model download directory (by setting XDG_CACHE_HOME environment variable), by default model download to {DEFAULT_MODEL_DIR} +""" + +OUTPUT_DIR_HELP = """ +Result images will be saved to output directory automatically. +""" + +MASK_DIR_HELP = """ +You can view masks in FileManager +""" + +INPUT_HELP = """ +If input is image, it will be loaded by default. +If input is directory, you can browse and select image in file manager. +""" + +GUI_HELP = """ +Launch Lama Cleaner as desktop app +""" + +QUALITY_HELP = """ +Quality of image encoding, 0-100. Default is 95, higher quality will generate larger file size. +""" + +INTERACTIVE_SEG_HELP = "Enable interactive segmentation using Segment Anything." +INTERACTIVE_SEG_MODEL_HELP = "Model size: mobile_sam < vit_b < vit_l < vit_h. Bigger model size means better segmentation but slower speed." +REMOVE_BG_HELP = "Enable remove background plugin. Always run on CPU" +ANIMESEG_HELP = "Enable anime segmentation plugin. Always run on CPU" +REALESRGAN_HELP = "Enable realesrgan super resolution" +GFPGAN_HELP = "Enable GFPGAN face restore. To also enhance background, use with --enable-realesrgan" +RESTOREFORMER_HELP = "Enable RestoreFormer face restore. To also enhance background, use with --enable-realesrgan" +GIF_HELP = "Enable GIF plugin. Make GIF to compare original and cleaned image" + +INBROWSER_HELP = "Automatically launch IOPaint in a new tab on the default browser" diff --git a/inpaint/download.py b/inpaint/download.py new file mode 100644 index 0000000..c0a099f --- /dev/null +++ b/inpaint/download.py @@ -0,0 +1,313 @@ +import glob +import json +import os +from functools import lru_cache +from typing import List, Optional + +from inpaint.schema import ModelType, ModelInfo +from loguru import logger +from pathlib import Path + +from inpaint.const import ( + DEFAULT_MODEL_DIR, + DIFFUSERS_SD_CLASS_NAME, + DIFFUSERS_SD_INPAINT_CLASS_NAME, + DIFFUSERS_SDXL_CLASS_NAME, + DIFFUSERS_SDXL_INPAINT_CLASS_NAME, + ANYTEXT_NAME, +) +from inpaint.model.original_sd_configs import get_config_files + + +def cli_download_model(model: str): + from inpaint.model import models + from inpaint.model.utils import handle_from_pretrained_exceptions + + if model in models and models[model].is_erase_model: + logger.info(f"Downloading {model}...") + models[model].download() + logger.info("Done.") + elif model == ANYTEXT_NAME: + logger.info(f"Downloading {model}...") + models[model].download() + logger.info("Done.") + else: + logger.info(f"Downloading model from Huggingface: {model}") + from diffusers import DiffusionPipeline + + downloaded_path = handle_from_pretrained_exceptions( + DiffusionPipeline.download, + pretrained_model_name=model, + variant="fp16", + resume_download=True, + ) + logger.info(f"Done. Downloaded to {downloaded_path}") + + +def folder_name_to_show_name(name: str) -> str: + return name.replace("models--", "").replace("--", "/") + + +@lru_cache(maxsize=512) +def get_sd_model_type(model_abs_path: str) -> Optional[ModelType]: + if "inpaint" in Path(model_abs_path).name.lower(): + model_type = ModelType.DIFFUSERS_SD_INPAINT + else: + # load once to check num_in_channels + from diffusers import StableDiffusionInpaintPipeline + + try: + StableDiffusionInpaintPipeline.from_single_file( + model_abs_path, + load_safety_checker=False, + num_in_channels=9, + original_config_file=get_config_files()['v1'] + ) + model_type = ModelType.DIFFUSERS_SD_INPAINT + except ValueError as e: + if "[320, 4, 3, 3]" in str(e): + model_type = ModelType.DIFFUSERS_SD + else: + logger.info(f"Ignore non sdxl file: {model_abs_path}") + return + except Exception as e: + logger.error(f"Failed to load {model_abs_path}: {e}") + return + return model_type + + +@lru_cache() +def get_sdxl_model_type(model_abs_path: str) -> Optional[ModelType]: + if "inpaint" in model_abs_path: + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + else: + # load once to check num_in_channels + from diffusers import StableDiffusionXLInpaintPipeline + + try: + model = StableDiffusionXLInpaintPipeline.from_single_file( + model_abs_path, + load_safety_checker=False, + num_in_channels=9, + original_config_file=get_config_files()['xl'], + ) + if model.unet.config.in_channels == 9: + # https://github.com/huggingface/diffusers/issues/6610 + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + else: + model_type = ModelType.DIFFUSERS_SDXL + except ValueError as e: + if "[320, 4, 3, 3]" in str(e): + model_type = ModelType.DIFFUSERS_SDXL + else: + logger.info(f"Ignore non sdxl file: {model_abs_path}") + return + except Exception as e: + logger.error(f"Failed to load {model_abs_path}: {e}") + return + return model_type + + +def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]: + cache_dir = Path(cache_dir) + stable_diffusion_dir = cache_dir / "stable_diffusion" + cache_file = stable_diffusion_dir / "iopaint_cache.json" + model_type_cache = {} + if cache_file.exists(): + try: + with open(cache_file, "r", encoding="utf-8") as f: + model_type_cache = json.load(f) + assert isinstance(model_type_cache, dict) + except: + pass + + res = [] + for it in stable_diffusion_dir.glob("*.*"): + if it.suffix not in [".safetensors", ".ckpt"]: + continue + model_abs_path = str(it.absolute()) + model_type = model_type_cache.get(it.name) + if model_type is None: + model_type = get_sd_model_type(model_abs_path) + if model_type is None: + continue + + model_type_cache[it.name] = model_type + res.append( + ModelInfo( + name=it.name, + path=model_abs_path, + model_type=model_type, + is_single_file_diffusers=True, + ) + ) + if stable_diffusion_dir.exists(): + with open(cache_file, "w", encoding="utf-8") as fw: + json.dump(model_type_cache, fw, indent=2, ensure_ascii=False) + + stable_diffusion_xl_dir = cache_dir / "stable_diffusion_xl" + sdxl_cache_file = stable_diffusion_xl_dir / "iopaint_cache.json" + sdxl_model_type_cache = {} + if sdxl_cache_file.exists(): + try: + with open(sdxl_cache_file, "r", encoding="utf-8") as f: + sdxl_model_type_cache = json.load(f) + assert isinstance(sdxl_model_type_cache, dict) + except: + pass + + for it in stable_diffusion_xl_dir.glob("*.*"): + if it.suffix not in [".safetensors", ".ckpt"]: + continue + model_abs_path = str(it.absolute()) + model_type = sdxl_model_type_cache.get(it.name) + if model_type is None: + model_type = get_sdxl_model_type(model_abs_path) + if model_type is None: + continue + + sdxl_model_type_cache[it.name] = model_type + if stable_diffusion_xl_dir.exists(): + with open(sdxl_cache_file, "w", encoding="utf-8") as fw: + json.dump(sdxl_model_type_cache, fw, indent=2, ensure_ascii=False) + + res.append( + ModelInfo( + name=it.name, + path=model_abs_path, + model_type=model_type, + is_single_file_diffusers=True, + ) + ) + return res + + +def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]: + res = [] + from inpaint.model import models + + # logger.info(f"Scanning inpaint models in {model_dir}") + + for name, m in models.items(): + if m.is_erase_model and m.is_downloaded(): + res.append( + ModelInfo( + name=name, + path=name, + model_type=ModelType.INPAINT, + ) + ) + return res + + +def scan_diffusers_models() -> List[ModelInfo]: + from huggingface_hub.constants import HF_HUB_CACHE + + available_models = [] + cache_dir = Path(HF_HUB_CACHE) + # logger.info(f"Scanning diffusers models in {cache_dir}") + diffusers_model_names = [] + model_index_files = glob.glob(os.path.join(cache_dir, "**/*", "model_index.json"), recursive=True) + for it in model_index_files: + it = Path(it) + with open(it, "r", encoding="utf-8") as f: + try: + data = json.load(f) + except: + continue + + _class_name = data["_class_name"] + name = folder_name_to_show_name(it.parent.parent.parent.name) + if name in diffusers_model_names: + continue + if "PowerPaint" in name: + model_type = ModelType.DIFFUSERS_OTHER + elif _class_name == DIFFUSERS_SD_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD + elif _class_name == DIFFUSERS_SD_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD_INPAINT + elif _class_name == DIFFUSERS_SDXL_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL + elif _class_name == DIFFUSERS_SDXL_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + elif _class_name in [ + "StableDiffusionInstructPix2PixPipeline", + "PaintByExamplePipeline", + "KandinskyV22InpaintPipeline", + "AnyText", + ]: + model_type = ModelType.DIFFUSERS_OTHER + else: + continue + + diffusers_model_names.append(name) + available_models.append( + ModelInfo( + name=name, + path=name, + model_type=model_type, + ) + ) + return available_models + + +def _scan_converted_diffusers_models(cache_dir) -> List[ModelInfo]: + cache_dir = Path(cache_dir) + available_models = [] + diffusers_model_names = [] + model_index_files = glob.glob(os.path.join(cache_dir, "**/*", "model_index.json"), recursive=True) + for it in model_index_files: + it = Path(it) + with open(it, "r", encoding="utf-8") as f: + try: + data = json.load(f) + except: + logger.error( + f"Failed to load {it}, please try revert from original model or fix model_index.json by hand." + ) + continue + + _class_name = data["_class_name"] + name = folder_name_to_show_name(it.parent.name) + if name in diffusers_model_names: + continue + elif _class_name == DIFFUSERS_SD_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD + elif _class_name == DIFFUSERS_SD_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD_INPAINT + elif _class_name == DIFFUSERS_SDXL_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL + elif _class_name == DIFFUSERS_SDXL_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + else: + continue + + diffusers_model_names.append(name) + available_models.append( + ModelInfo( + name=name, + path=str(it.parent.absolute()), + model_type=model_type, + ) + ) + return available_models + + +def scan_converted_diffusers_models(cache_dir) -> List[ModelInfo]: + cache_dir = Path(cache_dir) + available_models = [] + stable_diffusion_dir = cache_dir / "stable_diffusion" + stable_diffusion_xl_dir = cache_dir / "stable_diffusion_xl" + available_models.extend(_scan_converted_diffusers_models(stable_diffusion_dir)) + available_models.extend(_scan_converted_diffusers_models(stable_diffusion_xl_dir)) + return available_models + + +def scan_models() -> List[ModelInfo]: + model_dir = os.getenv("XDG_CACHE_HOME", DEFAULT_MODEL_DIR) + available_models = [] + available_models.extend(scan_inpaint_models(model_dir)) + available_models.extend(scan_single_file_diffusion_models(model_dir)) + available_models.extend(scan_diffusers_models()) + available_models.extend(scan_converted_diffusers_models(model_dir)) + return available_models diff --git a/inpaint/file_manager/__init__.py b/inpaint/file_manager/__init__.py new file mode 100644 index 0000000..1a24998 --- /dev/null +++ b/inpaint/file_manager/__init__.py @@ -0,0 +1 @@ +from .file_manager import FileManager diff --git a/inpaint/file_manager/file_manager.py b/inpaint/file_manager/file_manager.py new file mode 100644 index 0000000..c24f54f --- /dev/null +++ b/inpaint/file_manager/file_manager.py @@ -0,0 +1,218 @@ +import os +from io import BytesIO +from pathlib import Path +from typing import List + +from PIL import Image, ImageOps, PngImagePlugin +from fastapi import FastAPI, HTTPException +from starlette.responses import FileResponse + +from ..schema import MediasResponse, MediaTab + +LARGE_ENOUGH_NUMBER = 100 +PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2) +from .storage_backends import FilesystemStorageBackend +from .utils import aspect_to_string, generate_filename, glob_img + + +class FileManager: + def __init__(self, app: FastAPI, input_dir: Path, mask_dir: Path, output_dir: Path): + self.app = app + self.input_dir: Path = input_dir + self.mask_dir: Path = mask_dir + self.output_dir: Path = output_dir + + self.image_dir_filenames = [] + self.output_dir_filenames = [] + if not self.thumbnail_directory.exists(): + self.thumbnail_directory.mkdir(parents=True) + + # fmt: off + self.app.add_api_route("/api/v1/medias", self.api_medias, methods=["GET"], response_model=List[MediasResponse]) + self.app.add_api_route("/api/v1/media_file", self.api_media_file, methods=["GET"]) + self.app.add_api_route("/api/v1/media_thumbnail_file", self.api_media_thumbnail_file, methods=["GET"]) + # fmt: on + + def api_medias(self, tab: MediaTab) -> List[MediasResponse]: + img_dir = self._get_dir(tab) + return self._media_names(img_dir) + + def api_media_file(self, tab: MediaTab, filename: str) -> FileResponse: + file_path = self._get_file(tab, filename) + return FileResponse(file_path, media_type="image/png") + + # tab=${tab}?filename=${filename.name}?width=${width}&height=${height} + def api_media_thumbnail_file( + self, tab: MediaTab, filename: str, width: int, height: int + ) -> FileResponse: + img_dir = self._get_dir(tab) + thumb_filename, (width, height) = self.get_thumbnail( + img_dir, filename, width=width, height=height + ) + thumbnail_filepath = self.thumbnail_directory / thumb_filename + return FileResponse( + thumbnail_filepath, + headers={ + "X-Width": str(width), + "X-Height": str(height), + }, + media_type="image/jpeg", + ) + + def _get_dir(self, tab: MediaTab) -> Path: + if tab == "input": + return self.input_dir + elif tab == "output": + return self.output_dir + elif tab == "mask": + return self.mask_dir + else: + raise HTTPException(status_code=422, detail=f"tab not found: {tab}") + + def _get_file(self, tab: MediaTab, filename: str) -> Path: + file_path = self._get_dir(tab) / filename + if not file_path.exists(): + raise HTTPException(status_code=422, detail=f"file not found: {file_path}") + return file_path + + @property + def thumbnail_directory(self) -> Path: + return self.output_dir / "thumbnails" + + @staticmethod + def _media_names(directory: Path) -> List[MediasResponse]: + names = sorted([it.name for it in glob_img(directory)]) + res = [] + for name in names: + path = os.path.join(directory, name) + img = Image.open(path) + res.append( + MediasResponse( + name=name, + height=img.height, + width=img.width, + ctime=os.path.getctime(path), + mtime=os.path.getmtime(path), + ) + ) + return res + + def get_thumbnail( + self, directory: Path, original_filename: str, width, height, **options + ): + directory = Path(directory) + storage = FilesystemStorageBackend(self.app) + crop = options.get("crop", "fit") + background = options.get("background") + quality = options.get("quality", 90) + + original_path, original_filename = os.path.split(original_filename) + original_filepath = os.path.join(directory, original_path, original_filename) + image = Image.open(BytesIO(storage.read(original_filepath))) + + # keep ratio resize + if not width and not height: + width = 256 + + if width != 0: + height = int(image.height * width / image.width) + else: + width = int(image.width * height / image.height) + + thumbnail_size = (width, height) + + thumbnail_filename = generate_filename( + directory, + original_filename, + aspect_to_string(thumbnail_size), + crop, + background, + quality, + ) + + thumbnail_filepath = os.path.join( + self.thumbnail_directory, original_path, thumbnail_filename + ) + + if storage.exists(thumbnail_filepath): + return thumbnail_filepath, (width, height) + + try: + image.load() + except (IOError, OSError): + self.app.logger.warning("Thumbnail not load image: %s", original_filepath) + return thumbnail_filepath, (width, height) + + # get original image format + options["format"] = options.get("format", image.format) + + image = self._create_thumbnail( + image, thumbnail_size, crop, background=background + ) + + raw_data = self.get_raw_data(image, **options) + storage.save(thumbnail_filepath, raw_data) + + return thumbnail_filepath, (width, height) + + def get_raw_data(self, image, **options): + data = { + "format": self._get_format(image, **options), + "quality": options.get("quality", 90), + } + + _file = BytesIO() + image.save(_file, **data) + return _file.getvalue() + + @staticmethod + def colormode(image, colormode="RGB"): + if colormode == "RGB" or colormode == "RGBA": + if image.mode == "RGBA": + return image + if image.mode == "LA": + return image.convert("RGBA") + return image.convert(colormode) + + if colormode == "GRAY": + return image.convert("L") + + return image.convert(colormode) + + @staticmethod + def background(original_image, color=0xFF): + size = (max(original_image.size),) * 2 + image = Image.new("L", size, color) + image.paste( + original_image, + tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, original_image.size))), + ) + + return image + + def _get_format(self, image, **options): + if options.get("format"): + return options.get("format") + if image.format: + return image.format + + return "JPEG" + + def _create_thumbnail(self, image, size, crop="fit", background=None): + try: + resample = Image.Resampling.LANCZOS + except AttributeError: # pylint: disable=raise-missing-from + resample = Image.ANTIALIAS + + if crop == "fit": + image = ImageOps.fit(image, size, resample) + else: + image = image.copy() + image.thumbnail(size, resample=resample) + + if background is not None: + image = self.background(image) + + image = self.colormode(image) + + return image diff --git a/inpaint/file_manager/storage_backends.py b/inpaint/file_manager/storage_backends.py new file mode 100644 index 0000000..3f453ad --- /dev/null +++ b/inpaint/file_manager/storage_backends.py @@ -0,0 +1,46 @@ +# Copy from https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/storage_backends.py +import errno +import os +from abc import ABC, abstractmethod + + +class BaseStorageBackend(ABC): + def __init__(self, app=None): + self.app = app + + @abstractmethod + def read(self, filepath, mode="rb", **kwargs): + raise NotImplementedError + + @abstractmethod + def exists(self, filepath): + raise NotImplementedError + + @abstractmethod + def save(self, filepath, data): + raise NotImplementedError + + +class FilesystemStorageBackend(BaseStorageBackend): + def read(self, filepath, mode="rb", **kwargs): + with open(filepath, mode) as f: # pylint: disable=unspecified-encoding + return f.read() + + def exists(self, filepath): + return os.path.exists(filepath) + + def save(self, filepath, data): + directory = os.path.dirname(filepath) + + if not os.path.exists(directory): + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + if not os.path.isdir(directory): + raise IOError("{} is not a directory".format(directory)) + + with open(filepath, "wb") as f: + f.write(data) diff --git a/inpaint/file_manager/utils.py b/inpaint/file_manager/utils.py new file mode 100644 index 0000000..f6890af --- /dev/null +++ b/inpaint/file_manager/utils.py @@ -0,0 +1,65 @@ +# Copy from: https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/utils.py +import hashlib +from pathlib import Path + +from typing import Union + + +def generate_filename(directory: Path, original_filename, *options) -> str: + text = str(directory.absolute()) + original_filename + for v in options: + text += "%s" % v + md5_hash = hashlib.md5() + md5_hash.update(text.encode("utf-8")) + return md5_hash.hexdigest() + ".jpg" + + +def parse_size(size): + if isinstance(size, int): + # If the size parameter is a single number, assume square aspect. + return [size, size] + + if isinstance(size, (tuple, list)): + if len(size) == 1: + # If single value tuple/list is provided, exand it to two elements + return size + type(size)(size) + return size + + try: + thumbnail_size = [int(x) for x in size.lower().split("x", 1)] + except ValueError: + raise ValueError( # pylint: disable=raise-missing-from + "Bad thumbnail size format. Valid format is INTxINT." + ) + + if len(thumbnail_size) == 1: + # If the size parameter only contains a single integer, assume square aspect. + thumbnail_size.append(thumbnail_size[0]) + + return thumbnail_size + + +def aspect_to_string(size): + if isinstance(size, str): + return size + + return "x".join(map(str, size)) + + +IMG_SUFFIX = {".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"} + + +def glob_img(p: Union[Path, str], recursive: bool = False): + p = Path(p) + if p.is_file() and p.suffix in IMG_SUFFIX: + yield p + else: + if recursive: + files = Path(p).glob("**/*.*") + else: + files = Path(p).glob("*.*") + + for it in files: + if it.suffix not in IMG_SUFFIX: + continue + yield it diff --git a/inpaint/helper.py b/inpaint/helper.py new file mode 100644 index 0000000..c2c0c48 --- /dev/null +++ b/inpaint/helper.py @@ -0,0 +1,408 @@ +import base64 +import imghdr +import io +import os +import sys +from typing import List, Optional, Dict, Tuple + +from urllib.parse import urlparse +import cv2 +from PIL import Image, ImageOps, PngImagePlugin +import numpy as np +import torch +from inpaint.const import MPS_UNSUPPORT_MODELS +from loguru import logger +from torch.hub import download_url_to_file, get_dir +import hashlib + + +def md5sum(filename): + md5 = hashlib.md5() + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(128 * md5.block_size), b""): + md5.update(chunk) + return md5.hexdigest() + + +def switch_mps_device(model_name, device): + if model_name in MPS_UNSUPPORT_MODELS and str(device) == "mps": + logger.info(f"{model_name} not support mps, switch to cpu") + return torch.device("cpu") + return device + + +def get_cache_path_by_url(url): + parts = urlparse(url) + hub_dir = get_dir() + model_dir = os.path.join(hub_dir, "checkpoints") + if not os.path.isdir(model_dir): + os.makedirs(model_dir) + filename = os.path.basename(parts.path) + cached_file = os.path.join(model_dir, filename) + return cached_file + + +def download_model(url, model_md5: str = None): + if os.path.exists(url): + cached_file = url + else: + cached_file = get_cache_path_by_url(url) + if not os.path.exists(cached_file): + sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + download_url_to_file(url, cached_file, hash_prefix, progress=True) + if model_md5: + _md5 = md5sum(cached_file) + if model_md5 == _md5: + logger.info(f"Download model success, md5: {_md5}") + else: + try: + os.remove(cached_file) + logger.error( + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." + f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" + ) + except: + logger.error( + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint." + ) + exit(-1) + + return cached_file + + +def ceil_modulo(x, mod): + if x % mod == 0: + return x + return (x // mod + 1) * mod + + +def handle_error(model_path, model_md5, e): + _md5 = md5sum(model_path) + if _md5 != model_md5: + try: + os.remove(model_path) + logger.error( + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." + f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" + ) + except: + logger.error( + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint." + ) + else: + logger.error( + f"Failed to load model {model_path}," + f"please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}" + ) + exit(-1) + + +def load_jit_model(url_or_path, device, model_md5: str): + if os.path.exists(url_or_path): + model_path = url_or_path + else: + model_path = download_model(url_or_path, model_md5) + + logger.info(f"Loading model from: {model_path}") + try: + model = torch.jit.load(model_path, map_location="cpu").to(device) + except Exception as e: + handle_error(model_path, model_md5, e) + model.eval() + return model + + +def load_model(model: torch.nn.Module, url_or_path, device, model_md5): + if os.path.exists(url_or_path): + model_path = url_or_path + else: + model_path = download_model(url_or_path, model_md5) + + try: + logger.info(f"Loading model from: {model_path}") + state_dict = torch.load(model_path, map_location="cpu") + model.load_state_dict(state_dict, strict=True) + model.to(device) + except Exception as e: + handle_error(model_path, model_md5, e) + model.eval() + return model + + +def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes: + data = cv2.imencode( + f".{ext}", + image_numpy, + [int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0], + )[1] + image_bytes = data.tobytes() + return image_bytes + + +def pil_to_bytes(pil_img, ext: str, quality: int = 95, infos={}) -> bytes: + with io.BytesIO() as output: + kwargs = {k: v for k, v in infos.items() if v is not None} + if ext == "jpg": + ext = "jpeg" + if "png" == ext.lower() and "parameters" in kwargs: + pnginfo_data = PngImagePlugin.PngInfo() + pnginfo_data.add_text("parameters", kwargs["parameters"]) + kwargs["pnginfo"] = pnginfo_data + + pil_img.save(output, format=ext, quality=quality, **kwargs) + image_bytes = output.getvalue() + return image_bytes + + +def load_img(img_bytes, gray: bool = False, return_info: bool = False): + alpha_channel = None + image = Image.open(io.BytesIO(img_bytes)) + + if return_info: + infos = image.info + + try: + image = ImageOps.exif_transpose(image) + except: + pass + + if gray: + image = image.convert("L") + np_img = np.array(image) + else: + if image.mode == "RGBA": + np_img = np.array(image) + alpha_channel = np_img[:, :, -1] + np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB) + else: + image = image.convert("RGB") + np_img = np.array(image) + + if return_info: + return np_img, alpha_channel, infos + return np_img, alpha_channel + + +def norm_img(np_img): + if len(np_img.shape) == 2: + np_img = np_img[:, :, np.newaxis] + np_img = np.transpose(np_img, (2, 0, 1)) + np_img = np_img.astype("float32") / 255 + return np_img + + +def resize_max_size( + np_img, size_limit: int, interpolation=cv2.INTER_CUBIC +) -> np.ndarray: + # Resize image's longer size to size_limit if longer size larger than size_limit + h, w = np_img.shape[:2] + if max(h, w) > size_limit: + ratio = size_limit / max(h, w) + new_w = int(w * ratio + 0.5) + new_h = int(h * ratio + 0.5) + return cv2.resize(np_img, dsize=(new_w, new_h), interpolation=interpolation) + else: + return np_img + + +def pad_img_to_modulo( + img: np.ndarray, mod: int, square: bool = False, min_size: Optional[int] = None +): + """ + + Args: + img: [H, W, C] + mod: + square: 是否为正方形 + min_size: + + Returns: + + """ + if len(img.shape) == 2: + img = img[:, :, np.newaxis] + height, width = img.shape[:2] + out_height = ceil_modulo(height, mod) + out_width = ceil_modulo(width, mod) + + if min_size is not None: + assert min_size % mod == 0 + out_width = max(min_size, out_width) + out_height = max(min_size, out_height) + + if square: + max_size = max(out_height, out_width) + out_height = max_size + out_width = max_size + + return np.pad( + img, + ((0, out_height - height), (0, out_width - width), (0, 0)), + mode="symmetric", + ) + + +def boxes_from_mask(mask: np.ndarray) -> List[np.ndarray]: + """ + Args: + mask: (h, w, 1) 0~255 + + Returns: + + """ + height, width = mask.shape[:2] + _, thresh = cv2.threshold(mask, 127, 255, 0) + contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + boxes = [] + for cnt in contours: + x, y, w, h = cv2.boundingRect(cnt) + box = np.array([x, y, x + w, y + h]).astype(int) + + box[::2] = np.clip(box[::2], 0, width) + box[1::2] = np.clip(box[1::2], 0, height) + boxes.append(box) + + return boxes + + +def only_keep_largest_contour(mask: np.ndarray) -> List[np.ndarray]: + """ + Args: + mask: (h, w) 0~255 + + Returns: + + """ + _, thresh = cv2.threshold(mask, 127, 255, 0) + contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + max_area = 0 + max_index = -1 + for i, cnt in enumerate(contours): + area = cv2.contourArea(cnt) + if area > max_area: + max_area = area + max_index = i + + if max_index != -1: + new_mask = np.zeros_like(mask) + return cv2.drawContours(new_mask, contours, max_index, 255, -1) + else: + return mask + + +def is_mac(): + return sys.platform == "darwin" + + +def get_image_ext(img_bytes): + w = imghdr.what("", img_bytes) + if w is None: + w = "jpeg" + return w + + +def decode_base64_to_image( + encoding: str, gray=False +) -> Tuple[np.array, Optional[np.array], Dict]: + if encoding.startswith("data:image/") or encoding.startswith( + "data:application/octet-stream;base64," + ): + encoding = encoding.split(";")[1].split(",")[1] + image = Image.open(io.BytesIO(base64.b64decode(encoding))) + + alpha_channel = None + try: + image = ImageOps.exif_transpose(image) + except: + pass + # exif_transpose will remove exif rotate info,we must call image.info after exif_transpose + infos = image.info + + if gray: + image = image.convert("L") + np_img = np.array(image) + else: + if image.mode == "RGBA": + np_img = np.array(image) + alpha_channel = np_img[:, :, -1] + np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB) + else: + image = image.convert("RGB") + np_img = np.array(image) + + return np_img, alpha_channel, infos + + +def encode_pil_to_base64(image: Image, quality: int, infos: Dict) -> bytes: + img_bytes = pil_to_bytes( + image, + "png", + quality=quality, + infos=infos, + ) + return base64.b64encode(img_bytes) + + +def concat_alpha_channel(rgb_np_img, alpha_channel) -> np.ndarray: + if alpha_channel is not None: + if alpha_channel.shape[:2] != rgb_np_img.shape[:2]: + alpha_channel = cv2.resize( + alpha_channel, dsize=(rgb_np_img.shape[1], rgb_np_img.shape[0]) + ) + rgb_np_img = np.concatenate( + (rgb_np_img, alpha_channel[:, :, np.newaxis]), axis=-1 + ) + return rgb_np_img + + +def adjust_mask(mask: np.ndarray, kernel_size: int, operate): + # fronted brush color "ffcc00bb" + # kernel_size = kernel_size*2+1 + mask[mask >= 127] = 255 + mask[mask < 127] = 0 + + if operate == "reverse": + mask = 255 - mask + else: + kernel = cv2.getStructuringElement( + cv2.MORPH_ELLIPSE, (2 * kernel_size + 1, 2 * kernel_size + 1) + ) + if operate == "expand": + mask = cv2.dilate( + mask, + kernel, + iterations=1, + ) + else: + mask = cv2.erode( + mask, + kernel, + iterations=1, + ) + res_mask = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8) + res_mask[mask > 128] = [255, 203, 0, int(255 * 0.73)] + res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA) + return res_mask + + +def gen_frontend_mask(bgr_or_gray_mask): + if len(bgr_or_gray_mask.shape) == 3 and bgr_or_gray_mask.shape[2] != 1: + bgr_or_gray_mask = cv2.cvtColor(bgr_or_gray_mask, cv2.COLOR_BGR2GRAY) + + # fronted brush color "ffcc00bb" + # TODO: how to set kernel size? + kernel_size = 9 + bgr_or_gray_mask = cv2.dilate( + bgr_or_gray_mask, + np.ones((kernel_size, kernel_size), np.uint8), + iterations=1, + ) + res_mask = np.zeros( + (bgr_or_gray_mask.shape[0], bgr_or_gray_mask.shape[1], 4), dtype=np.uint8 + ) + res_mask[bgr_or_gray_mask > 128] = [255, 203, 0, int(255 * 0.73)] + res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA) + return res_mask diff --git a/inpaint/installer.py b/inpaint/installer.py new file mode 100644 index 0000000..01506d9 --- /dev/null +++ b/inpaint/installer.py @@ -0,0 +1,10 @@ +import subprocess +import sys + + +def install(package): + subprocess.check_call([sys.executable, "-m", "pip", "install", package]) + + +def install_plugins_package(): + install("rembg") diff --git a/inpaint/model/__init__.py b/inpaint/model/__init__.py new file mode 100644 index 0000000..799e2ec --- /dev/null +++ b/inpaint/model/__init__.py @@ -0,0 +1,37 @@ +from .anytext.anytext_model import AnyText +from .controlnet import ControlNet +from .fcf import FcF +from .instruct_pix2pix import InstructPix2Pix +from .kandinsky import Kandinsky22 +from .lama import LaMa +from .ldm import LDM +from .manga import Manga +from .mat import MAT +from .mi_gan import MIGAN +from .opencv2 import OpenCV2 +from .paint_by_example import PaintByExample +from .power_paint.power_paint import PowerPaint +from .sd import SD15, SD2, Anything4, RealisticVision14, SD +from .sdxl import SDXL +from .zits import ZITS + +models = { + LaMa.name: LaMa, + LDM.name: LDM, + ZITS.name: ZITS, + MAT.name: MAT, + FcF.name: FcF, + OpenCV2.name: OpenCV2, + Manga.name: Manga, + MIGAN.name: MIGAN, + SD15.name: SD15, + Anything4.name: Anything4, + RealisticVision14.name: RealisticVision14, + SD2.name: SD2, + PaintByExample.name: PaintByExample, + InstructPix2Pix.name: InstructPix2Pix, + Kandinsky22.name: Kandinsky22, + SDXL.name: SDXL, + PowerPaint.name: PowerPaint, + AnyText.name: AnyText, +} diff --git a/inpaint/model/anytext/__init__.py b/inpaint/model/anytext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/anytext_model.py b/inpaint/model/anytext/anytext_model.py new file mode 100644 index 0000000..7a687d5 --- /dev/null +++ b/inpaint/model/anytext/anytext_model.py @@ -0,0 +1,73 @@ +import torch +from huggingface_hub import hf_hub_download + +from inpaint.const import ANYTEXT_NAME +from inpaint.model.anytext.anytext_pipeline import AnyTextPipeline +from inpaint.model.base import DiffusionInpaintModel +from inpaint.model.utils import get_torch_dtype, is_local_files_only +from inpaint.schema import InpaintRequest + + +class AnyText(DiffusionInpaintModel): + name = ANYTEXT_NAME + pad_mod = 64 + is_erase_model = False + + @staticmethod + def download(local_files_only=False): + hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="model_index.json", + local_files_only=local_files_only, + ) + ckpt_path = hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="pytorch_model.fp16.safetensors", + local_files_only=local_files_only, + ) + font_path = hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="SourceHanSansSC-Medium.otf", + local_files_only=local_files_only, + ) + return ckpt_path, font_path + + def init_model(self, device, **kwargs): + local_files_only = is_local_files_only(**kwargs) + ckpt_path, font_path = self.download(local_files_only) + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + self.model = AnyTextPipeline( + ckpt_path=ckpt_path, + font_path=font_path, + device=device, + use_fp16=torch_dtype == torch.float16, + ) + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to inpainting + return: BGR IMAGE + """ + height, width = image.shape[:2] + mask = mask.astype("float32") / 255.0 + masked_image = image * (1 - mask) + + # list of rgb ndarray + results, rtn_code, rtn_warning = self.model( + image=image, + masked_image=masked_image, + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + height=height, + width=width, + seed=config.sd_seed, + sort_priority="y", + callback=self.callback + ) + inpainted_rgb_image = results[0][..., ::-1] + return inpainted_rgb_image diff --git a/inpaint/model/anytext/anytext_pipeline.py b/inpaint/model/anytext/anytext_pipeline.py new file mode 100644 index 0000000..8571728 --- /dev/null +++ b/inpaint/model/anytext/anytext_pipeline.py @@ -0,0 +1,403 @@ +""" +AnyText: Multilingual Visual Text Generation And Editing +Paper: https://arxiv.org/abs/2311.03054 +Code: https://github.com/tyxsspa/AnyText +Copyright (c) Alibaba, Inc. and its affiliates. +""" +import os +from pathlib import Path + +from inpaint.model.utils import set_seed +from safetensors.torch import load_file + +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" +import torch +import re +import numpy as np +import cv2 +import einops +from PIL import ImageFont +from inpaint.model.anytext.cldm.model import create_model, load_state_dict +from inpaint.model.anytext.cldm.ddim_hacked import DDIMSampler +from inpaint.model.anytext.utils import ( + check_channels, + draw_glyph, + draw_glyph2, +) + + +BBOX_MAX_NUM = 8 +PLACE_HOLDER = "*" +max_chars = 20 + +ANYTEXT_CFG = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "anytext_sd15.yaml" +) + + +def check_limits(tensor): + float16_min = torch.finfo(torch.float16).min + float16_max = torch.finfo(torch.float16).max + + # 检查张量中是否有值小于float16的最小值或大于float16的最大值 + is_below_min = (tensor < float16_min).any() + is_above_max = (tensor > float16_max).any() + + return is_below_min or is_above_max + + +class AnyTextPipeline: + def __init__(self, ckpt_path, font_path, device, use_fp16=True): + self.cfg_path = ANYTEXT_CFG + self.font_path = font_path + self.use_fp16 = use_fp16 + self.device = device + + self.font = ImageFont.truetype(font_path, size=60) + self.model = create_model( + self.cfg_path, + device=self.device, + use_fp16=self.use_fp16, + ) + if self.use_fp16: + self.model = self.model.half() + if Path(ckpt_path).suffix == ".safetensors": + state_dict = load_file(ckpt_path, device="cpu") + else: + state_dict = load_state_dict(ckpt_path, location="cpu") + self.model.load_state_dict(state_dict, strict=False) + self.model = self.model.eval().to(self.device) + self.ddim_sampler = DDIMSampler(self.model, device=self.device) + + def __call__( + self, + prompt: str, + negative_prompt: str, + image: np.ndarray, + masked_image: np.ndarray, + num_inference_steps: int, + strength: float, + guidance_scale: float, + height: int, + width: int, + seed: int, + sort_priority: str = "y", + callback=None, + ): + """ + + Args: + prompt: + negative_prompt: + image: + masked_image: + num_inference_steps: + strength: + guidance_scale: + height: + width: + seed: + sort_priority: x: left-right, y: top-down + + Returns: + result: list of images in numpy.ndarray format + rst_code: 0: normal -1: error 1:warning + rst_info: string of error or warning + + """ + set_seed(seed) + str_warning = "" + + mode = "text-editing" + revise_pos = False + img_count = 1 + ddim_steps = num_inference_steps + w = width + h = height + strength = strength + cfg_scale = guidance_scale + eta = 0.0 + + prompt, texts = self.modify_prompt(prompt) + if prompt is None and texts is None: + return ( + None, + -1, + "You have input Chinese prompt but the translator is not loaded!", + "", + ) + n_lines = len(texts) + if mode in ["text-generation", "gen"]: + edit_image = np.ones((h, w, 3)) * 127.5 # empty mask image + elif mode in ["text-editing", "edit"]: + if masked_image is None or image is None: + return ( + None, + -1, + "Reference image and position image are needed for text editing!", + "", + ) + if isinstance(image, str): + image = cv2.imread(image)[..., ::-1] + assert image is not None, f"Can't read ori_image image from{image}!" + elif isinstance(image, torch.Tensor): + image = image.cpu().numpy() + else: + assert isinstance( + image, np.ndarray + ), f"Unknown format of ori_image: {type(image)}" + edit_image = image.clip(1, 255) # for mask reason + edit_image = check_channels(edit_image) + # edit_image = resize_image( + # edit_image, max_length=768 + # ) # make w h multiple of 64, resize if w or h > max_length + h, w = edit_image.shape[:2] # change h, w by input ref_img + # preprocess pos_imgs(if numpy, make sure it's white pos in black bg) + if masked_image is None: + pos_imgs = np.zeros((w, h, 1)) + if isinstance(masked_image, str): + masked_image = cv2.imread(masked_image)[..., ::-1] + assert ( + masked_image is not None + ), f"Can't read draw_pos image from{masked_image}!" + pos_imgs = 255 - masked_image + elif isinstance(masked_image, torch.Tensor): + pos_imgs = masked_image.cpu().numpy() + else: + assert isinstance( + masked_image, np.ndarray + ), f"Unknown format of draw_pos: {type(masked_image)}" + pos_imgs = 255 - masked_image + pos_imgs = pos_imgs[..., 0:1] + pos_imgs = cv2.convertScaleAbs(pos_imgs) + _, pos_imgs = cv2.threshold(pos_imgs, 254, 255, cv2.THRESH_BINARY) + # seprate pos_imgs + pos_imgs = self.separate_pos_imgs(pos_imgs, sort_priority) + if len(pos_imgs) == 0: + pos_imgs = [np.zeros((h, w, 1))] + if len(pos_imgs) < n_lines: + if n_lines == 1 and texts[0] == " ": + pass # text-to-image without text + else: + raise RuntimeError( + f"{n_lines} text line to draw from prompt, not enough mask area({len(pos_imgs)}) on images" + ) + elif len(pos_imgs) > n_lines: + str_warning = f"Warning: found {len(pos_imgs)} positions that > needed {n_lines} from prompt." + # get pre_pos, poly_list, hint that needed for anytext + pre_pos = [] + poly_list = [] + for input_pos in pos_imgs: + if input_pos.mean() != 0: + input_pos = ( + input_pos[..., np.newaxis] + if len(input_pos.shape) == 2 + else input_pos + ) + poly, pos_img = self.find_polygon(input_pos) + pre_pos += [pos_img / 255.0] + poly_list += [poly] + else: + pre_pos += [np.zeros((h, w, 1))] + poly_list += [None] + np_hint = np.sum(pre_pos, axis=0).clip(0, 1) + # prepare info dict + info = {} + info["glyphs"] = [] + info["gly_line"] = [] + info["positions"] = [] + info["n_lines"] = [len(texts)] * img_count + gly_pos_imgs = [] + for i in range(len(texts)): + text = texts[i] + if len(text) > max_chars: + str_warning = ( + f'"{text}" length > max_chars: {max_chars}, will be cut off...' + ) + text = text[:max_chars] + gly_scale = 2 + if pre_pos[i].mean() != 0: + gly_line = draw_glyph(self.font, text) + glyphs = draw_glyph2( + self.font, + text, + poly_list[i], + scale=gly_scale, + width=w, + height=h, + add_space=False, + ) + gly_pos_img = cv2.drawContours( + glyphs * 255, [poly_list[i] * gly_scale], 0, (255, 255, 255), 1 + ) + if revise_pos: + resize_gly = cv2.resize( + glyphs, (pre_pos[i].shape[1], pre_pos[i].shape[0]) + ) + new_pos = cv2.morphologyEx( + (resize_gly * 255).astype(np.uint8), + cv2.MORPH_CLOSE, + kernel=np.ones( + (resize_gly.shape[0] // 10, resize_gly.shape[1] // 10), + dtype=np.uint8, + ), + iterations=1, + ) + new_pos = ( + new_pos[..., np.newaxis] if len(new_pos.shape) == 2 else new_pos + ) + contours, _ = cv2.findContours( + new_pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) + if len(contours) != 1: + str_warning = f"Fail to revise position {i} to bounding rect, remain position unchanged..." + else: + rect = cv2.minAreaRect(contours[0]) + poly = np.int0(cv2.boxPoints(rect)) + pre_pos[i] = ( + cv2.drawContours(new_pos, [poly], -1, 255, -1) / 255.0 + ) + gly_pos_img = cv2.drawContours( + glyphs * 255, [poly * gly_scale], 0, (255, 255, 255), 1 + ) + gly_pos_imgs += [gly_pos_img] # for show + else: + glyphs = np.zeros((h * gly_scale, w * gly_scale, 1)) + gly_line = np.zeros((80, 512, 1)) + gly_pos_imgs += [ + np.zeros((h * gly_scale, w * gly_scale, 1)) + ] # for show + pos = pre_pos[i] + info["glyphs"] += [self.arr2tensor(glyphs, img_count)] + info["gly_line"] += [self.arr2tensor(gly_line, img_count)] + info["positions"] += [self.arr2tensor(pos, img_count)] + # get masked_x + masked_img = ((edit_image.astype(np.float32) / 127.5) - 1.0) * (1 - np_hint) + masked_img = np.transpose(masked_img, (2, 0, 1)) + masked_img = torch.from_numpy(masked_img.copy()).float().to(self.device) + if self.use_fp16: + masked_img = masked_img.half() + encoder_posterior = self.model.encode_first_stage(masked_img[None, ...]) + masked_x = self.model.get_first_stage_encoding(encoder_posterior).detach() + if self.use_fp16: + masked_x = masked_x.half() + info["masked_x"] = torch.cat([masked_x for _ in range(img_count)], dim=0) + + hint = self.arr2tensor(np_hint, img_count) + cond = self.model.get_learned_conditioning( + dict( + c_concat=[hint], + c_crossattn=[[prompt] * img_count], + text_info=info, + ) + ) + un_cond = self.model.get_learned_conditioning( + dict( + c_concat=[hint], + c_crossattn=[[negative_prompt] * img_count], + text_info=info, + ) + ) + shape = (4, h // 8, w // 8) + self.model.control_scales = [strength] * 13 + samples, intermediates = self.ddim_sampler.sample( + ddim_steps, + img_count, + shape, + cond, + verbose=False, + eta=eta, + unconditional_guidance_scale=cfg_scale, + unconditional_conditioning=un_cond, + callback=callback + ) + if self.use_fp16: + samples = samples.half() + x_samples = self.model.decode_first_stage(samples) + x_samples = ( + (einops.rearrange(x_samples, "b c h w -> b h w c") * 127.5 + 127.5) + .cpu() + .numpy() + .clip(0, 255) + .astype(np.uint8) + ) + results = [x_samples[i] for i in range(img_count)] + # if ( + # mode == "edit" and False + # ): # replace backgound in text editing but not ideal yet + # results = [r * np_hint + edit_image * (1 - np_hint) for r in results] + # results = [r.clip(0, 255).astype(np.uint8) for r in results] + # if len(gly_pos_imgs) > 0 and show_debug: + # glyph_bs = np.stack(gly_pos_imgs, axis=2) + # glyph_img = np.sum(glyph_bs, axis=2) * 255 + # glyph_img = glyph_img.clip(0, 255).astype(np.uint8) + # results += [np.repeat(glyph_img, 3, axis=2)] + rst_code = 1 if str_warning else 0 + return results, rst_code, str_warning + + def modify_prompt(self, prompt): + prompt = prompt.replace("“", '"') + prompt = prompt.replace("”", '"') + p = '"(.*?)"' + strs = re.findall(p, prompt) + if len(strs) == 0: + strs = [" "] + else: + for s in strs: + prompt = prompt.replace(f'"{s}"', f" {PLACE_HOLDER} ", 1) + # if self.is_chinese(prompt): + # if self.trans_pipe is None: + # return None, None + # old_prompt = prompt + # prompt = self.trans_pipe(input=prompt + " .")["translation"][:-1] + # print(f"Translate: {old_prompt} --> {prompt}") + return prompt, strs + + # def is_chinese(self, text): + # text = checker._clean_text(text) + # for char in text: + # cp = ord(char) + # if checker._is_chinese_char(cp): + # return True + # return False + + def separate_pos_imgs(self, img, sort_priority, gap=102): + num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img) + components = [] + for label in range(1, num_labels): + component = np.zeros_like(img) + component[labels == label] = 255 + components.append((component, centroids[label])) + if sort_priority == "y": + fir, sec = 1, 0 # top-down first + elif sort_priority == "x": + fir, sec = 0, 1 # left-right first + components.sort(key=lambda c: (c[1][fir] // gap, c[1][sec] // gap)) + sorted_components = [c[0] for c in components] + return sorted_components + + def find_polygon(self, image, min_rect=False): + contours, hierarchy = cv2.findContours( + image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) + max_contour = max(contours, key=cv2.contourArea) # get contour with max area + if min_rect: + # get minimum enclosing rectangle + rect = cv2.minAreaRect(max_contour) + poly = np.int0(cv2.boxPoints(rect)) + else: + # get approximate polygon + epsilon = 0.01 * cv2.arcLength(max_contour, True) + poly = cv2.approxPolyDP(max_contour, epsilon, True) + n, _, xy = poly.shape + poly = poly.reshape(n, xy) + cv2.drawContours(image, [poly], -1, 255, -1) + return poly, image + + def arr2tensor(self, arr, bs): + arr = np.transpose(arr, (2, 0, 1)) + _arr = torch.from_numpy(arr.copy()).float().to(self.device) + if self.use_fp16: + _arr = _arr.half() + _arr = torch.stack([_arr for _ in range(bs)], dim=0) + return _arr diff --git a/inpaint/model/anytext/anytext_sd15.yaml b/inpaint/model/anytext/anytext_sd15.yaml new file mode 100644 index 0000000..d727594 --- /dev/null +++ b/inpaint/model/anytext/anytext_sd15.yaml @@ -0,0 +1,99 @@ +model: + target: iopaint.model.anytext.cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "img" + cond_stage_key: "caption" + control_key: "hint" + glyph_key: "glyphs" + position_key: "positions" + image_size: 64 + channels: 4 + cond_stage_trainable: true # need be true when embedding_manager is valid + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + loss_alpha: 0 # perceptual loss, 0.003 + loss_beta: 0 # ctc loss + latin_weight: 1.0 # latin text line may need smaller weigth + with_step_weight: true + use_vae_upsample: true + embedding_manager_config: + target: iopaint.model.anytext.cldm.embedding_manager.EmbeddingManager + params: + valid: true # v6 + emb_type: ocr # ocr, vit, conv + glyph_channels: 1 + position_channels: 1 + add_pos: false + placeholder_string: '*' + + control_stage_config: + target: iopaint.model.anytext.cldm.cldm.ControlNet + params: + image_size: 32 # unused + in_channels: 4 + model_channels: 320 + glyph_channels: 1 + position_channels: 1 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + unet_config: + target: iopaint.model.anytext.cldm.cldm.ControlledUnetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: iopaint.model.anytext.ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3 + params: + version: openai/clip-vit-large-patch14 + use_vision: false # v6 diff --git a/inpaint/model/anytext/cldm/__init__.py b/inpaint/model/anytext/cldm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/cldm/cldm.py b/inpaint/model/anytext/cldm/cldm.py new file mode 100644 index 0000000..ad9692a --- /dev/null +++ b/inpaint/model/anytext/cldm/cldm.py @@ -0,0 +1,630 @@ +import os +from pathlib import Path + +import einops +import torch +import torch as th +import torch.nn as nn +import copy +from easydict import EasyDict as edict + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + conv_nd, + linear, + zero_module, + timestep_embedding, +) + +from einops import rearrange, repeat +from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer +from iopaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock +from iopaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion +from iopaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config +from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler +from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution +from .recognizer import TextRecognizer, create_predictor + +CURRENT_DIR = Path(os.path.dirname(os.path.abspath(__file__))) + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +class ControlledUnetModel(UNetModel): + def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): + hs = [] + with torch.no_grad(): + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + if self.use_fp16: + t_emb = t_emb.half() + emb = self.time_embed(t_emb) + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + + if control is not None: + h += control.pop() + + for i, module in enumerate(self.output_blocks): + if only_mid_control or control is None: + h = torch.cat([h, hs.pop()], dim=1) + else: + h = torch.cat([h, hs.pop() + control.pop()], dim=1) + h = module(h, emb, context) + + h = h.type(x.dtype) + return self.out(h) + + +class ControlNet(nn.Module): + def __init__( + self, + image_size, + in_channels, + model_channels, + glyph_channels, + position_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + self.dims = dims + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.use_fp16 = use_fp16 + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) + + self.glyph_block = TimestepEmbedSequential( + conv_nd(dims, glyph_channels, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 16, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 16, 32, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 32, 96, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 96, 96, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 96, 256, 3, padding=1, stride=2), + nn.SiLU(), + ) + + self.position_block = TimestepEmbedSequential( + conv_nd(dims, position_channels, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 16, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 16, 32, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 32, 64, 3, padding=1, stride=2), + nn.SiLU(), + ) + + self.fuse_block = zero_module(conv_nd(dims, 256+64+4, model_channels, 3, padding=1)) + + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self.zero_convs.append(self.make_zero_conv(ch)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + self.zero_convs.append(self.make_zero_conv(ch)) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self.middle_block_out = self.make_zero_conv(ch) + self._feature_size += ch + + def make_zero_conv(self, channels): + return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0))) + + def forward(self, x, hint, text_info, timesteps, context, **kwargs): + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + if self.use_fp16: + t_emb = t_emb.half() + emb = self.time_embed(t_emb) + + # guided_hint from text_info + B, C, H, W = x.shape + glyphs = torch.cat(text_info['glyphs'], dim=1).sum(dim=1, keepdim=True) + positions = torch.cat(text_info['positions'], dim=1).sum(dim=1, keepdim=True) + enc_glyph = self.glyph_block(glyphs, emb, context) + enc_pos = self.position_block(positions, emb, context) + guided_hint = self.fuse_block(torch.cat([enc_glyph, enc_pos, text_info['masked_x']], dim=1)) + + outs = [] + + h = x.type(self.dtype) + for module, zero_conv in zip(self.input_blocks, self.zero_convs): + if guided_hint is not None: + h = module(h, emb, context) + h += guided_hint + guided_hint = None + else: + h = module(h, emb, context) + outs.append(zero_conv(h, emb, context)) + + h = self.middle_block(h, emb, context) + outs.append(self.middle_block_out(h, emb, context)) + + return outs + + +class ControlLDM(LatentDiffusion): + + def __init__(self, control_stage_config, control_key, glyph_key, position_key, only_mid_control, loss_alpha=0, loss_beta=0, with_step_weight=False, use_vae_upsample=False, latin_weight=1.0, embedding_manager_config=None, *args, **kwargs): + self.use_fp16 = kwargs.pop('use_fp16', False) + super().__init__(*args, **kwargs) + self.control_model = instantiate_from_config(control_stage_config) + self.control_key = control_key + self.glyph_key = glyph_key + self.position_key = position_key + self.only_mid_control = only_mid_control + self.control_scales = [1.0] * 13 + self.loss_alpha = loss_alpha + self.loss_beta = loss_beta + self.with_step_weight = with_step_weight + self.use_vae_upsample = use_vae_upsample + self.latin_weight = latin_weight + + if embedding_manager_config is not None and embedding_manager_config.params.valid: + self.embedding_manager = self.instantiate_embedding_manager(embedding_manager_config, self.cond_stage_model) + for param in self.embedding_manager.embedding_parameters(): + param.requires_grad = True + else: + self.embedding_manager = None + if self.loss_alpha > 0 or self.loss_beta > 0 or self.embedding_manager: + if embedding_manager_config.params.emb_type == 'ocr': + self.text_predictor = create_predictor().eval() + args = edict() + args.rec_image_shape = "3, 48, 320" + args.rec_batch_num = 6 + args.rec_char_dict_path = str(CURRENT_DIR.parent / "ocr_recog" / "ppocr_keys_v1.txt") + args.use_fp16 = self.use_fp16 + self.cn_recognizer = TextRecognizer(args, self.text_predictor) + for param in self.text_predictor.parameters(): + param.requires_grad = False + if self.embedding_manager: + self.embedding_manager.recog = self.cn_recognizer + + @torch.no_grad() + def get_input(self, batch, k, bs=None, *args, **kwargs): + if self.embedding_manager is None: # fill in full caption + self.fill_caption(batch) + x, c, mx = super().get_input(batch, self.first_stage_key, mask_k='masked_img', *args, **kwargs) + control = batch[self.control_key] # for log_images and loss_alpha, not real control + if bs is not None: + control = control[:bs] + control = control.to(self.device) + control = einops.rearrange(control, 'b h w c -> b c h w') + control = control.to(memory_format=torch.contiguous_format).float() + + inv_mask = batch['inv_mask'] + if bs is not None: + inv_mask = inv_mask[:bs] + inv_mask = inv_mask.to(self.device) + inv_mask = einops.rearrange(inv_mask, 'b h w c -> b c h w') + inv_mask = inv_mask.to(memory_format=torch.contiguous_format).float() + + glyphs = batch[self.glyph_key] + gly_line = batch['gly_line'] + positions = batch[self.position_key] + n_lines = batch['n_lines'] + language = batch['language'] + texts = batch['texts'] + assert len(glyphs) == len(positions) + for i in range(len(glyphs)): + if bs is not None: + glyphs[i] = glyphs[i][:bs] + gly_line[i] = gly_line[i][:bs] + positions[i] = positions[i][:bs] + n_lines = n_lines[:bs] + glyphs[i] = glyphs[i].to(self.device) + gly_line[i] = gly_line[i].to(self.device) + positions[i] = positions[i].to(self.device) + glyphs[i] = einops.rearrange(glyphs[i], 'b h w c -> b c h w') + gly_line[i] = einops.rearrange(gly_line[i], 'b h w c -> b c h w') + positions[i] = einops.rearrange(positions[i], 'b h w c -> b c h w') + glyphs[i] = glyphs[i].to(memory_format=torch.contiguous_format).float() + gly_line[i] = gly_line[i].to(memory_format=torch.contiguous_format).float() + positions[i] = positions[i].to(memory_format=torch.contiguous_format).float() + info = {} + info['glyphs'] = glyphs + info['positions'] = positions + info['n_lines'] = n_lines + info['language'] = language + info['texts'] = texts + info['img'] = batch['img'] # nhwc, (-1,1) + info['masked_x'] = mx + info['gly_line'] = gly_line + info['inv_mask'] = inv_mask + return x, dict(c_crossattn=[c], c_concat=[control], text_info=info) + + def apply_model(self, x_noisy, t, cond, *args, **kwargs): + assert isinstance(cond, dict) + diffusion_model = self.model.diffusion_model + _cond = torch.cat(cond['c_crossattn'], 1) + _hint = torch.cat(cond['c_concat'], 1) + if self.use_fp16: + x_noisy = x_noisy.half() + control = self.control_model(x=x_noisy, timesteps=t, context=_cond, hint=_hint, text_info=cond['text_info']) + control = [c * scale for c, scale in zip(control, self.control_scales)] + eps = diffusion_model(x=x_noisy, timesteps=t, context=_cond, control=control, only_mid_control=self.only_mid_control) + + return eps + + def instantiate_embedding_manager(self, config, embedder): + model = instantiate_from_config(config, embedder=embedder) + return model + + @torch.no_grad() + def get_unconditional_conditioning(self, N): + return self.get_learned_conditioning(dict(c_crossattn=[[""] * N], text_info=None)) + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + if self.embedding_manager is not None and c['text_info'] is not None: + self.embedding_manager.encode_text(c['text_info']) + if isinstance(c, dict): + cond_txt = c['c_crossattn'][0] + else: + cond_txt = c + if self.embedding_manager is not None: + cond_txt = self.cond_stage_model.encode(cond_txt, embedding_manager=self.embedding_manager) + else: + cond_txt = self.cond_stage_model.encode(cond_txt) + if isinstance(c, dict): + c['c_crossattn'][0] = cond_txt + else: + c = cond_txt + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def fill_caption(self, batch, place_holder='*'): + bs = len(batch['n_lines']) + cond_list = copy.deepcopy(batch[self.cond_stage_key]) + for i in range(bs): + n_lines = batch['n_lines'][i] + if n_lines == 0: + continue + cur_cap = cond_list[i] + for j in range(n_lines): + r_txt = batch['texts'][j][i] + cur_cap = cur_cap.replace(place_holder, f'"{r_txt}"', 1) + cond_list[i] = cur_cap + batch[self.cond_stage_key] = cond_list + + @torch.no_grad() + def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + use_ddim = ddim_steps is not None + + log = dict() + z, c = self.get_input(batch, self.first_stage_key, bs=N) + if self.cond_stage_trainable: + with torch.no_grad(): + c = self.get_learned_conditioning(c) + c_crossattn = c["c_crossattn"][0][:N] + c_cat = c["c_concat"][0][:N] + text_info = c["text_info"] + text_info['glyphs'] = [i[:N] for i in text_info['glyphs']] + text_info['gly_line'] = [i[:N] for i in text_info['gly_line']] + text_info['positions'] = [i[:N] for i in text_info['positions']] + text_info['n_lines'] = text_info['n_lines'][:N] + text_info['masked_x'] = text_info['masked_x'][:N] + text_info['img'] = text_info['img'][:N] + + N = min(z.shape[0], N) + n_row = min(z.shape[0], n_row) + log["reconstruction"] = self.decode_first_stage(z) + log["masked_image"] = self.decode_first_stage(text_info['masked_x']) + log["control"] = c_cat * 2.0 - 1.0 + log["img"] = text_info['img'].permute(0, 3, 1, 2) # log source image if needed + # get glyph + glyph_bs = torch.stack(text_info['glyphs']) + glyph_bs = torch.sum(glyph_bs, dim=0) * 2.0 - 1.0 + log["glyph"] = torch.nn.functional.interpolate(glyph_bs, size=(512, 512), mode='bilinear', align_corners=True,) + # fill caption + if not self.embedding_manager: + self.fill_caption(batch) + captions = batch[self.cond_stage_key] + log["conditioning"] = log_txt_as_img((512, 512), captions, size=16) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c], "text_info": text_info}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning(N) + uc_cat = c_cat # torch.zeros_like(c_cat) + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross['c_crossattn'][0]], "text_info": text_info} + samples_cfg, tmps = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c_crossattn], "text_info": text_info}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + pred_x0 = False # wether log pred_x0 + if pred_x0: + for idx in range(len(tmps['pred_x0'])): + pred_x0 = self.decode_first_stage(tmps['pred_x0'][idx]) + log[f"pred_x0_{tmps['index'][idx]}"] = pred_x0 + + return log + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + ddim_sampler = DDIMSampler(self) + b, c, h, w = cond["c_concat"][0].shape + shape = (self.channels, h // 8, w // 8) + samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, log_every_t=5, **kwargs) + return samples, intermediates + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.control_model.parameters()) + if self.embedding_manager: + params += list(self.embedding_manager.embedding_parameters()) + if not self.sd_locked: + # params += list(self.model.diffusion_model.input_blocks.parameters()) + # params += list(self.model.diffusion_model.middle_block.parameters()) + params += list(self.model.diffusion_model.output_blocks.parameters()) + params += list(self.model.diffusion_model.out.parameters()) + if self.unlockKV: + nCount = 0 + for name, param in self.model.diffusion_model.named_parameters(): + if 'attn2.to_k' in name or 'attn2.to_v' in name: + params += [param] + nCount += 1 + print(f'Cross attention is unlocked, and {nCount} Wk or Wv are added to potimizers!!!') + + opt = torch.optim.AdamW(params, lr=lr) + return opt + + def low_vram_shift(self, is_diffusing): + if is_diffusing: + self.model = self.model.cuda() + self.control_model = self.control_model.cuda() + self.first_stage_model = self.first_stage_model.cpu() + self.cond_stage_model = self.cond_stage_model.cpu() + else: + self.model = self.model.cpu() + self.control_model = self.control_model.cpu() + self.first_stage_model = self.first_stage_model.cuda() + self.cond_stage_model = self.cond_stage_model.cuda() diff --git a/inpaint/model/anytext/cldm/ddim_hacked.py b/inpaint/model/anytext/cldm/ddim_hacked.py new file mode 100644 index 0000000..b23a883 --- /dev/null +++ b/inpaint/model/anytext/cldm/ddim_hacked.py @@ -0,0 +1,486 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + make_ddim_sampling_parameters, + make_ddim_timesteps, + noise_like, + extract_into_tensor, +) + + +class DDIMSampler(object): + def __init__(self, model, device, schedule="linear", **kwargs): + super().__init__() + self.device = device + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device(self.device): + attr = attr.to(torch.device(self.device)) + setattr(self, name, attr) + + def make_schedule( + self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True + ): + self.ddim_timesteps = make_ddim_timesteps( + ddim_discr_method=ddim_discretize, + num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps, + verbose=verbose, + ) + alphas_cumprod = self.model.alphas_cumprod + assert ( + alphas_cumprod.shape[0] == self.ddpm_num_timesteps + ), "alphas have to be defined for each timestep" + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device) + + self.register_buffer("betas", to_torch(self.model.betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer( + "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) + ) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer( + "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", + to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", + to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), + ) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( + alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, + verbose=verbose, + ) + self.register_buffer("ddim_sigmas", ddim_sigmas) + self.register_buffer("ddim_alphas", ddim_alphas) + self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) + self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) + / (1 - self.alphas_cumprod) + * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) + ) + self.register_buffer( + "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps + ) + + @torch.no_grad() + def sample( + self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0.0, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + ucg_schedule=None, + **kwargs, + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): + ctmp = ctmp[0] + cbs = ctmp.shape[0] + if cbs != batch_size: + print( + f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" + ) + + elif isinstance(conditioning, list): + for ctmp in conditioning: + if ctmp.shape[0] != batch_size: + print( + f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" + ) + + else: + if conditioning.shape[0] != batch_size: + print( + f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" + ) + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f"Data shape for DDIM sampling is {size}, eta {eta}") + + samples, intermediates = self.ddim_sampling( + conditioning, + size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, + x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ucg_schedule=ucg_schedule, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling( + self, + cond, + shape, + x_T=None, + ddim_use_original_steps=False, + callback=None, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + log_every_t=100, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + dynamic_threshold=None, + ucg_schedule=None, + ): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = ( + self.ddpm_num_timesteps + if ddim_use_original_steps + else self.ddim_timesteps + ) + elif timesteps is not None and not ddim_use_original_steps: + subset_end = ( + int( + min(timesteps / self.ddim_timesteps.shape[0], 1) + * self.ddim_timesteps.shape[0] + ) + - 1 + ) + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {"x_inter": [img], "pred_x0": [img]} + time_range = ( + reversed(range(0, timesteps)) + if ddim_use_original_steps + else np.flip(timesteps) + ) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample( + x0, ts + ) # TODO: deterministic forward pass? + img = img_orig * mask + (1.0 - mask) * img + + if ucg_schedule is not None: + assert len(ucg_schedule) == len(time_range) + unconditional_guidance_scale = ucg_schedule[i] + + outs = self.p_sample_ddim( + img, + cond, + ts, + index=index, + use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, + temperature=temperature, + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + img, pred_x0 = outs + if callback: + callback(None, i, None, None) + if img_callback: + img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates["x_inter"].append(img) + intermediates["pred_x0"].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim( + self, + x, + c, + t, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + dynamic_threshold=None, + ): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.0: + model_output = self.model.apply_model(x, t, c) + else: + model_t = self.model.apply_model(x, t, c) + model_uncond = self.model.apply_model(x, t, unconditional_conditioning) + model_output = model_uncond + unconditional_guidance_scale * ( + model_t - model_uncond + ) + + if self.model.parameterization == "v": + e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) + else: + e_t = model_output + + if score_corrector is not None: + assert self.model.parameterization == "eps", "not implemented" + e_t = score_corrector.modify_score( + self.model, e_t, x, t, c, **corrector_kwargs + ) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = ( + self.model.alphas_cumprod_prev + if use_original_steps + else self.ddim_alphas_prev + ) + sqrt_one_minus_alphas = ( + self.model.sqrt_one_minus_alphas_cumprod + if use_original_steps + else self.ddim_sqrt_one_minus_alphas + ) + sigmas = ( + self.model.ddim_sigmas_for_original_num_steps + if use_original_steps + else self.ddim_sigmas + ) + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full( + (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device + ) + + # current prediction for x_0 + if self.model.parameterization != "v": + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + else: + pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) + + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + + if dynamic_threshold is not None: + raise NotImplementedError() + + # direction pointing to x_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def encode( + self, + x0, + c, + t_enc, + use_original_steps=False, + return_intermediates=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + callback=None, + ): + timesteps = ( + np.arange(self.ddpm_num_timesteps) + if use_original_steps + else self.ddim_timesteps + ) + num_reference_steps = timesteps.shape[0] + + assert t_enc <= num_reference_steps + num_steps = t_enc + + if use_original_steps: + alphas_next = self.alphas_cumprod[:num_steps] + alphas = self.alphas_cumprod_prev[:num_steps] + else: + alphas_next = self.ddim_alphas[:num_steps] + alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) + + x_next = x0 + intermediates = [] + inter_steps = [] + for i in tqdm(range(num_steps), desc="Encoding Image"): + t = torch.full( + (x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long + ) + if unconditional_guidance_scale == 1.0: + noise_pred = self.model.apply_model(x_next, t, c) + else: + assert unconditional_conditioning is not None + e_t_uncond, noise_pred = torch.chunk( + self.model.apply_model( + torch.cat((x_next, x_next)), + torch.cat((t, t)), + torch.cat((unconditional_conditioning, c)), + ), + 2, + ) + noise_pred = e_t_uncond + unconditional_guidance_scale * ( + noise_pred - e_t_uncond + ) + + xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next + weighted_noise_pred = ( + alphas_next[i].sqrt() + * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) + * noise_pred + ) + x_next = xt_weighted + weighted_noise_pred + if ( + return_intermediates + and i % (num_steps // return_intermediates) == 0 + and i < num_steps - 1 + ): + intermediates.append(x_next) + inter_steps.append(i) + elif return_intermediates and i >= num_steps - 2: + intermediates.append(x_next) + inter_steps.append(i) + if callback: + callback(i) + + out = {"x_encoded": x_next, "intermediate_steps": inter_steps} + if return_intermediates: + out.update({"intermediates": intermediates}) + return x_next, out + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return ( + extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise + ) + + @torch.no_grad() + def decode( + self, + x_latent, + cond, + t_start, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + callback=None, + ): + timesteps = ( + np.arange(self.ddpm_num_timesteps) + if use_original_steps + else self.ddim_timesteps + ) + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc="Decoding image", total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full( + (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long + ) + x_dec, _ = self.p_sample_ddim( + x_dec, + cond, + ts, + index=index, + use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + if callback: + callback(i) + return x_dec diff --git a/inpaint/model/anytext/cldm/embedding_manager.py b/inpaint/model/anytext/cldm/embedding_manager.py new file mode 100644 index 0000000..6ccf8a9 --- /dev/null +++ b/inpaint/model/anytext/cldm/embedding_manager.py @@ -0,0 +1,165 @@ +''' +Copyright (c) Alibaba, Inc. and its affiliates. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear + + +def get_clip_token_for_string(tokenizer, string): + batch_encoding = tokenizer(string, truncation=True, max_length=77, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"] + assert torch.count_nonzero(tokens - 49407) == 2, f"String '{string}' maps to more than a single token. Please use another string" + return tokens[0, 1] + + +def get_bert_token_for_string(tokenizer, string): + token = tokenizer(string) + assert torch.count_nonzero(token) == 3, f"String '{string}' maps to more than a single token. Please use another string" + token = token[0, 1] + return token + + +def get_clip_vision_emb(encoder, processor, img): + _img = img.repeat(1, 3, 1, 1)*255 + inputs = processor(images=_img, return_tensors="pt") + inputs['pixel_values'] = inputs['pixel_values'].to(img.device) + outputs = encoder(**inputs) + emb = outputs.image_embeds + return emb + + +def get_recog_emb(encoder, img_list): + _img_list = [(img.repeat(1, 3, 1, 1)*255)[0] for img in img_list] + encoder.predictor.eval() + _, preds_neck = encoder.pred_imglist(_img_list, show_debug=False) + return preds_neck + + +def pad_H(x): + _, _, H, W = x.shape + p_top = (W - H) // 2 + p_bot = W - H - p_top + return F.pad(x, (0, 0, p_top, p_bot)) + + +class EncodeNet(nn.Module): + def __init__(self, in_channels, out_channels): + super(EncodeNet, self).__init__() + chan = 16 + n_layer = 4 # downsample + + self.conv1 = conv_nd(2, in_channels, chan, 3, padding=1) + self.conv_list = nn.ModuleList([]) + _c = chan + for i in range(n_layer): + self.conv_list.append(conv_nd(2, _c, _c*2, 3, padding=1, stride=2)) + _c *= 2 + self.conv2 = conv_nd(2, _c, out_channels, 3, padding=1) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.act = nn.SiLU() + + def forward(self, x): + x = self.act(self.conv1(x)) + for layer in self.conv_list: + x = self.act(layer(x)) + x = self.act(self.conv2(x)) + x = self.avgpool(x) + x = x.view(x.size(0), -1) + return x + + +class EmbeddingManager(nn.Module): + def __init__( + self, + embedder, + valid=True, + glyph_channels=20, + position_channels=1, + placeholder_string='*', + add_pos=False, + emb_type='ocr', + **kwargs + ): + super().__init__() + if hasattr(embedder, 'tokenizer'): # using Stable Diffusion's CLIP encoder + get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer) + token_dim = 768 + if hasattr(embedder, 'vit'): + assert emb_type == 'vit' + self.get_vision_emb = partial(get_clip_vision_emb, embedder.vit, embedder.processor) + self.get_recog_emb = None + else: # using LDM's BERT encoder + get_token_for_string = partial(get_bert_token_for_string, embedder.tknz_fn) + token_dim = 1280 + self.token_dim = token_dim + self.emb_type = emb_type + + self.add_pos = add_pos + if add_pos: + self.position_encoder = EncodeNet(position_channels, token_dim) + if emb_type == 'ocr': + self.proj = linear(40*64, token_dim) + if emb_type == 'conv': + self.glyph_encoder = EncodeNet(glyph_channels, token_dim) + + self.placeholder_token = get_token_for_string(placeholder_string) + + def encode_text(self, text_info): + if self.get_recog_emb is None and self.emb_type == 'ocr': + self.get_recog_emb = partial(get_recog_emb, self.recog) + + gline_list = [] + pos_list = [] + for i in range(len(text_info['n_lines'])): # sample index in a batch + n_lines = text_info['n_lines'][i] + for j in range(n_lines): # line + gline_list += [text_info['gly_line'][j][i:i+1]] + if self.add_pos: + pos_list += [text_info['positions'][j][i:i+1]] + + if len(gline_list) > 0: + if self.emb_type == 'ocr': + recog_emb = self.get_recog_emb(gline_list) + enc_glyph = self.proj(recog_emb.reshape(recog_emb.shape[0], -1)) + elif self.emb_type == 'vit': + enc_glyph = self.get_vision_emb(pad_H(torch.cat(gline_list, dim=0))) + elif self.emb_type == 'conv': + enc_glyph = self.glyph_encoder(pad_H(torch.cat(gline_list, dim=0))) + if self.add_pos: + enc_pos = self.position_encoder(torch.cat(gline_list, dim=0)) + enc_glyph = enc_glyph+enc_pos + + self.text_embs_all = [] + n_idx = 0 + for i in range(len(text_info['n_lines'])): # sample index in a batch + n_lines = text_info['n_lines'][i] + text_embs = [] + for j in range(n_lines): # line + text_embs += [enc_glyph[n_idx:n_idx+1]] + n_idx += 1 + self.text_embs_all += [text_embs] + + def forward( + self, + tokenized_text, + embedded_text, + ): + b, device = tokenized_text.shape[0], tokenized_text.device + for i in range(b): + idx = tokenized_text[i] == self.placeholder_token.to(device) + if sum(idx) > 0: + if i >= len(self.text_embs_all): + print('truncation for log images...') + break + text_emb = torch.cat(self.text_embs_all[i], dim=0) + if sum(idx) != len(text_emb): + print('truncation for long caption...') + embedded_text[i][idx] = text_emb[:sum(idx)] + return embedded_text + + def embedding_parameters(self): + return self.parameters() diff --git a/inpaint/model/anytext/cldm/hack.py b/inpaint/model/anytext/cldm/hack.py new file mode 100644 index 0000000..05afe5f --- /dev/null +++ b/inpaint/model/anytext/cldm/hack.py @@ -0,0 +1,111 @@ +import torch +import einops + +import iopaint.model.anytext.ldm.modules.encoders.modules +import iopaint.model.anytext.ldm.modules.attention + +from transformers import logging +from iopaint.model.anytext.ldm.modules.attention import default + + +def disable_verbosity(): + logging.set_verbosity_error() + print('logging improved.') + return + + +def enable_sliced_attention(): + iopaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward + print('Enabled sliced_attention.') + return + + +def hack_everything(clip_skip=0): + disable_verbosity() + iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward + iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip + print('Enabled clip hacks.') + return + + +# Written by Lvmin +def _hacked_clip_forward(self, text): + PAD = self.tokenizer.pad_token_id + EOS = self.tokenizer.eos_token_id + BOS = self.tokenizer.bos_token_id + + def tokenize(t): + return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] + + def transformer_encode(t): + if self.clip_skip > 1: + rt = self.transformer(input_ids=t, output_hidden_states=True) + return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) + else: + return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state + + def split(x): + return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] + + def pad(x, p, i): + return x[:i] if len(x) >= i else x + [p] * (i - len(x)) + + raw_tokens_list = tokenize(text) + tokens_list = [] + + for raw_tokens in raw_tokens_list: + raw_tokens_123 = split(raw_tokens) + raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] + raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] + tokens_list.append(raw_tokens_123) + + tokens_list = torch.IntTensor(tokens_list).to(self.device) + + feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') + y = transformer_encode(feed) + z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) + + return z + + +# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py +def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + del context, x + + q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + limit = k.shape[0] + att_step = 1 + q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) + k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) + v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) + + q_chunks.reverse() + k_chunks.reverse() + v_chunks.reverse() + sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) + del k, q, v + for i in range(0, limit, att_step): + q_buffer = q_chunks.pop() + k_buffer = k_chunks.pop() + v_buffer = v_chunks.pop() + sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale + + del k_buffer, q_buffer + # attention, what we cannot get enough of, by chunks + + sim_buffer = sim_buffer.softmax(dim=-1) + + sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) + del v_buffer + sim[i:i + att_step, :, :] = sim_buffer + + del sim_buffer + sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) + return self.to_out(sim) diff --git a/inpaint/model/anytext/cldm/model.py b/inpaint/model/anytext/cldm/model.py new file mode 100644 index 0000000..688f2ed --- /dev/null +++ b/inpaint/model/anytext/cldm/model.py @@ -0,0 +1,40 @@ +import os +import torch + +from omegaconf import OmegaConf +from iopaint.model.anytext.ldm.util import instantiate_from_config + + +def get_state_dict(d): + return d.get("state_dict", d) + + +def load_state_dict(ckpt_path, location="cpu"): + _, extension = os.path.splitext(ckpt_path) + if extension.lower() == ".safetensors": + import safetensors.torch + + state_dict = safetensors.torch.load_file(ckpt_path, device=location) + else: + state_dict = get_state_dict( + torch.load(ckpt_path, map_location=torch.device(location)) + ) + state_dict = get_state_dict(state_dict) + print(f"Loaded state_dict from [{ckpt_path}]") + return state_dict + + +def create_model(config_path, device, cond_stage_path=None, use_fp16=False): + config = OmegaConf.load(config_path) + # if cond_stage_path: + # config.model.params.cond_stage_config.params.version = ( + # cond_stage_path # use pre-downloaded ckpts, in case blocked + # ) + config.model.params.cond_stage_config.params.device = str(device) + if use_fp16: + config.model.params.use_fp16 = True + config.model.params.control_stage_config.params.use_fp16 = True + config.model.params.unet_config.params.use_fp16 = True + model = instantiate_from_config(config.model).cpu() + print(f"Loaded model config from [{config_path}]") + return model diff --git a/inpaint/model/anytext/cldm/recognizer.py b/inpaint/model/anytext/cldm/recognizer.py new file mode 100755 index 0000000..0621512 --- /dev/null +++ b/inpaint/model/anytext/cldm/recognizer.py @@ -0,0 +1,300 @@ +""" +Copyright (c) Alibaba, Inc. and its affiliates. +""" +import os +import cv2 +import numpy as np +import math +import traceback +from easydict import EasyDict as edict +import time +from iopaint.model.anytext.ocr_recog.RecModel import RecModel +import torch +import torch.nn.functional as F + + +def min_bounding_rect(img): + ret, thresh = cv2.threshold(img, 127, 255, 0) + contours, hierarchy = cv2.findContours( + thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + if len(contours) == 0: + print("Bad contours, using fake bbox...") + return np.array([[0, 0], [100, 0], [100, 100], [0, 100]]) + max_contour = max(contours, key=cv2.contourArea) + rect = cv2.minAreaRect(max_contour) + box = cv2.boxPoints(rect) + box = np.int0(box) + # sort + x_sorted = sorted(box, key=lambda x: x[0]) + left = x_sorted[:2] + right = x_sorted[2:] + left = sorted(left, key=lambda x: x[1]) + (tl, bl) = left + right = sorted(right, key=lambda x: x[1]) + (tr, br) = right + if tl[1] > bl[1]: + (tl, bl) = (bl, tl) + if tr[1] > br[1]: + (tr, br) = (br, tr) + return np.array([tl, tr, br, bl]) + + +def create_predictor(model_dir=None, model_lang="ch", is_onnx=False): + model_file_path = model_dir + if model_file_path is not None and not os.path.exists(model_file_path): + raise ValueError("not find model file path {}".format(model_file_path)) + + if is_onnx: + import onnxruntime as ort + + sess = ort.InferenceSession( + model_file_path, providers=["CPUExecutionProvider"] + ) # 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider' + return sess + else: + if model_lang == "ch": + n_class = 6625 + elif model_lang == "en": + n_class = 97 + else: + raise ValueError(f"Unsupported OCR recog model_lang: {model_lang}") + rec_config = edict( + in_channels=3, + backbone=edict( + type="MobileNetV1Enhance", + scale=0.5, + last_conv_stride=[1, 2], + last_pool_type="avg", + ), + neck=edict( + type="SequenceEncoder", + encoder_type="svtr", + dims=64, + depth=2, + hidden_dims=120, + use_guide=True, + ), + head=edict( + type="CTCHead", + fc_decay=0.00001, + out_channels=n_class, + return_feats=True, + ), + ) + + rec_model = RecModel(rec_config) + if model_file_path is not None: + rec_model.load_state_dict(torch.load(model_file_path, map_location="cpu")) + rec_model.eval() + return rec_model.eval() + + +def _check_image_file(path): + img_end = {"jpg", "bmp", "png", "jpeg", "rgb", "tif", "tiff"} + return any([path.lower().endswith(e) for e in img_end]) + + +def get_image_file_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + if os.path.isfile(img_file) and _check_image_file(img_file): + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + file_path = os.path.join(img_file, single_file) + if os.path.isfile(file_path) and _check_image_file(file_path): + imgs_lists.append(file_path) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + imgs_lists = sorted(imgs_lists) + return imgs_lists + + +class TextRecognizer(object): + def __init__(self, args, predictor): + self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")] + self.rec_batch_num = args.rec_batch_num + self.predictor = predictor + self.chars = self.get_char_dict(args.rec_char_dict_path) + self.char2id = {x: i for i, x in enumerate(self.chars)} + self.is_onnx = not isinstance(self.predictor, torch.nn.Module) + self.use_fp16 = args.use_fp16 + + # img: CHW + def resize_norm_img(self, img, max_wh_ratio): + imgC, imgH, imgW = self.rec_image_shape + assert imgC == img.shape[0] + imgW = int((imgH * max_wh_ratio)) + + h, w = img.shape[1:] + ratio = w / float(h) + if math.ceil(imgH * ratio) > imgW: + resized_w = imgW + else: + resized_w = int(math.ceil(imgH * ratio)) + resized_image = torch.nn.functional.interpolate( + img.unsqueeze(0), + size=(imgH, resized_w), + mode="bilinear", + align_corners=True, + ) + resized_image /= 255.0 + resized_image -= 0.5 + resized_image /= 0.5 + padding_im = torch.zeros((imgC, imgH, imgW), dtype=torch.float32).to(img.device) + padding_im[:, :, 0:resized_w] = resized_image[0] + return padding_im + + # img_list: list of tensors with shape chw 0-255 + def pred_imglist(self, img_list, show_debug=False, is_ori=False): + img_num = len(img_list) + assert img_num > 0 + # Calculate the aspect ratio of all text bars + width_list = [] + for img in img_list: + width_list.append(img.shape[2] / float(img.shape[1])) + # Sorting can speed up the recognition process + indices = torch.from_numpy(np.argsort(np.array(width_list))) + batch_num = self.rec_batch_num + preds_all = [None] * img_num + preds_neck_all = [None] * img_num + for beg_img_no in range(0, img_num, batch_num): + end_img_no = min(img_num, beg_img_no + batch_num) + norm_img_batch = [] + + imgC, imgH, imgW = self.rec_image_shape[:3] + max_wh_ratio = imgW / imgH + for ino in range(beg_img_no, end_img_no): + h, w = img_list[indices[ino]].shape[1:] + if h > w * 1.2: + img = img_list[indices[ino]] + img = torch.transpose(img, 1, 2).flip(dims=[1]) + img_list[indices[ino]] = img + h, w = img.shape[1:] + # wh_ratio = w * 1.0 / h + # max_wh_ratio = max(max_wh_ratio, wh_ratio) # comment to not use different ratio + for ino in range(beg_img_no, end_img_no): + norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) + if self.use_fp16: + norm_img = norm_img.half() + norm_img = norm_img.unsqueeze(0) + norm_img_batch.append(norm_img) + norm_img_batch = torch.cat(norm_img_batch, dim=0) + if show_debug: + for i in range(len(norm_img_batch)): + _img = norm_img_batch[i].permute(1, 2, 0).detach().cpu().numpy() + _img = (_img + 0.5) * 255 + _img = _img[:, :, ::-1] + file_name = f"{indices[beg_img_no + i]}" + file_name = file_name + "_ori" if is_ori else file_name + cv2.imwrite(file_name + ".jpg", _img) + if self.is_onnx: + input_dict = {} + input_dict[self.predictor.get_inputs()[0].name] = ( + norm_img_batch.detach().cpu().numpy() + ) + outputs = self.predictor.run(None, input_dict) + preds = {} + preds["ctc"] = torch.from_numpy(outputs[0]) + preds["ctc_neck"] = [torch.zeros(1)] * img_num + else: + preds = self.predictor(norm_img_batch) + for rno in range(preds["ctc"].shape[0]): + preds_all[indices[beg_img_no + rno]] = preds["ctc"][rno] + preds_neck_all[indices[beg_img_no + rno]] = preds["ctc_neck"][rno] + + return torch.stack(preds_all, dim=0), torch.stack(preds_neck_all, dim=0) + + def get_char_dict(self, character_dict_path): + character_str = [] + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + for line in lines: + line = line.decode("utf-8").strip("\n").strip("\r\n") + character_str.append(line) + dict_character = list(character_str) + dict_character = ["sos"] + dict_character + [" "] # eos is space + return dict_character + + def get_text(self, order): + char_list = [self.chars[text_id] for text_id in order] + return "".join(char_list) + + def decode(self, mat): + text_index = mat.detach().cpu().numpy().argmax(axis=1) + ignored_tokens = [0] + selection = np.ones(len(text_index), dtype=bool) + selection[1:] = text_index[1:] != text_index[:-1] + for ignored_token in ignored_tokens: + selection &= text_index != ignored_token + return text_index[selection], np.where(selection)[0] + + def get_ctcloss(self, preds, gt_text, weight): + if not isinstance(weight, torch.Tensor): + weight = torch.tensor(weight).to(preds.device) + ctc_loss = torch.nn.CTCLoss(reduction="none") + log_probs = preds.log_softmax(dim=2).permute(1, 0, 2) # NTC-->TNC + targets = [] + target_lengths = [] + for t in gt_text: + targets += [self.char2id.get(i, len(self.chars) - 1) for i in t] + target_lengths += [len(t)] + targets = torch.tensor(targets).to(preds.device) + target_lengths = torch.tensor(target_lengths).to(preds.device) + input_lengths = torch.tensor([log_probs.shape[0]] * (log_probs.shape[1])).to( + preds.device + ) + loss = ctc_loss(log_probs, targets, input_lengths, target_lengths) + loss = loss / input_lengths * weight + return loss + + +def main(): + rec_model_dir = "./ocr_weights/ppv3_rec.pth" + predictor = create_predictor(rec_model_dir) + args = edict() + args.rec_image_shape = "3, 48, 320" + args.rec_char_dict_path = "./ocr_weights/ppocr_keys_v1.txt" + args.rec_batch_num = 6 + text_recognizer = TextRecognizer(args, predictor) + image_dir = "./test_imgs_cn" + gt_text = ["韩国小馆"] * 14 + + image_file_list = get_image_file_list(image_dir) + valid_image_file_list = [] + img_list = [] + + for image_file in image_file_list: + img = cv2.imread(image_file) + if img is None: + print("error in loading image:{}".format(image_file)) + continue + valid_image_file_list.append(image_file) + img_list.append(torch.from_numpy(img).permute(2, 0, 1).float()) + try: + tic = time.time() + times = [] + for i in range(10): + preds, _ = text_recognizer.pred_imglist(img_list) # get text + preds_all = preds.softmax(dim=2) + times += [(time.time() - tic) * 1000.0] + tic = time.time() + print(times) + print(np.mean(times[1:]) / len(preds_all)) + weight = np.ones(len(gt_text)) + loss = text_recognizer.get_ctcloss(preds, gt_text, weight) + for i in range(len(valid_image_file_list)): + pred = preds_all[i] + order, idx = text_recognizer.decode(pred) + text = text_recognizer.get_text(order) + print( + f'{valid_image_file_list[i]}: pred/gt="{text}"/"{gt_text[i]}", loss={loss[i]:.2f}' + ) + except Exception as E: + print(traceback.format_exc(), E) + + +if __name__ == "__main__": + main() diff --git a/inpaint/model/anytext/ldm/__init__.py b/inpaint/model/anytext/ldm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/models/__init__.py b/inpaint/model/anytext/ldm/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/models/autoencoder.py b/inpaint/model/anytext/ldm/models/autoencoder.py new file mode 100644 index 0000000..20d52e9 --- /dev/null +++ b/inpaint/model/anytext/ldm/models/autoencoder.py @@ -0,0 +1,218 @@ +import torch +import torch.nn.functional as F +from contextlib import contextmanager + +from iopaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder +from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from iopaint.model.anytext.ldm.util import instantiate_from_config +from iopaint.model.anytext.ldm.modules.ema import LitEma + + +class AutoencoderKL(torch.nn.Module): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ema_decay=None, + learn_logvar=False + ): + super().__init__() + self.learn_logvar = learn_logvar + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + + self.use_ema = ema_decay is not None + if self.use_ema: + self.ema_decay = ema_decay + assert 0. < ema_decay < 1. + self.model_ema = LitEma(self, decay=ema_decay) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, postfix=""): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list( + self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()) + if self.learn_logvar: + print(f"{self.__class__.__name__}: Learning logvar") + ae_params_list.append(self.loss.logvar) + opt_ae = torch.optim.Adam(ae_params_list, + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + if log_ema or self.use_ema: + with self.ema_scope(): + xrec_ema, posterior_ema = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec_ema.shape[1] > 3 + xrec_ema = self.to_rgb(xrec_ema) + log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample())) + log["reconstructions_ema"] = xrec_ema + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x + diff --git a/inpaint/model/anytext/ldm/models/diffusion/__init__.py b/inpaint/model/anytext/ldm/models/diffusion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/models/diffusion/ddim.py b/inpaint/model/anytext/ldm/models/diffusion/ddim.py new file mode 100644 index 0000000..f8bbaff --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/ddim.py @@ -0,0 +1,354 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + ucg_schedule=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): ctmp = ctmp[0] + cbs = ctmp.shape[0] + # cbs = len(ctmp[0]) + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + + elif isinstance(conditioning, list): + for ctmp in conditioning: + if ctmp.shape[0] != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ucg_schedule=ucg_schedule + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, + ucg_schedule=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img], "index": [10000]} + time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + if ucg_schedule is not None: + assert len(ucg_schedule) == len(time_range) + unconditional_guidance_scale = ucg_schedule[i] + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold) + img, pred_x0 = outs + if callback: + callback(i) + if img_callback: + img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + intermediates['index'].append(index) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + model_output = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + if isinstance(c, dict): + assert isinstance(unconditional_conditioning, dict) + c_in = dict() + for k in c: + if isinstance(c[k], list): + c_in[k] = [torch.cat([ + unconditional_conditioning[k][i], + c[k][i]]) for i in range(len(c[k]))] + elif isinstance(c[k], dict): + c_in[k] = dict() + for key in c[k]: + if isinstance(c[k][key], list): + if not isinstance(c[k][key][0], torch.Tensor): + continue + c_in[k][key] = [torch.cat([ + unconditional_conditioning[k][key][i], + c[k][key][i]]) for i in range(len(c[k][key]))] + else: + c_in[k][key] = torch.cat([ + unconditional_conditioning[k][key], + c[k][key]]) + + else: + c_in[k] = torch.cat([ + unconditional_conditioning[k], + c[k]]) + elif isinstance(c, list): + c_in = list() + assert isinstance(unconditional_conditioning, list) + for i in range(len(c)): + c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) + else: + c_in = torch.cat([unconditional_conditioning, c]) + model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) + + if self.model.parameterization == "v": + e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) + else: + e_t = model_output + + if score_corrector is not None: + assert self.model.parameterization == "eps", 'not implemented' + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + if self.model.parameterization != "v": + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + else: + pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) + + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + + if dynamic_threshold is not None: + raise NotImplementedError() + + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, + unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): + num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] + + assert t_enc <= num_reference_steps + num_steps = t_enc + + if use_original_steps: + alphas_next = self.alphas_cumprod[:num_steps] + alphas = self.alphas_cumprod_prev[:num_steps] + else: + alphas_next = self.ddim_alphas[:num_steps] + alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) + + x_next = x0 + intermediates = [] + inter_steps = [] + for i in tqdm(range(num_steps), desc='Encoding Image'): + t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) + if unconditional_guidance_scale == 1.: + noise_pred = self.model.apply_model(x_next, t, c) + else: + assert unconditional_conditioning is not None + e_t_uncond, noise_pred = torch.chunk( + self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), + torch.cat((unconditional_conditioning, c))), 2) + noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) + + xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next + weighted_noise_pred = alphas_next[i].sqrt() * ( + (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred + x_next = xt_weighted + weighted_noise_pred + if return_intermediates and i % ( + num_steps // return_intermediates) == 0 and i < num_steps - 1: + intermediates.append(x_next) + inter_steps.append(i) + elif return_intermediates and i >= num_steps - 2: + intermediates.append(x_next) + inter_steps.append(i) + if callback: callback(i) + + out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} + if return_intermediates: + out.update({'intermediates': intermediates}) + return x_next, out + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False, callback=None): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + if callback: callback(i) + return x_dec \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/models/diffusion/ddpm.py b/inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file mode 100644 index 0000000..9f48918 --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/ddpm.py @@ -0,0 +1,2380 @@ +""" +Part of the implementation is borrowed and modified from ControlNet, publicly available at https://github.com/lllyasviel/ControlNet/blob/main/ldm/models/diffusion/ddpm.py +""" + +import torch +import torch.nn as nn +import numpy as np +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager, nullcontext +from functools import partial +import itertools +from tqdm import tqdm +from torchvision.utils import make_grid +from omegaconf import ListConfig + +from iopaint.model.anytext.ldm.util import ( + log_txt_as_img, + exists, + default, + ismap, + isimage, + mean_flat, + count_params, + instantiate_from_config, +) +from iopaint.model.anytext.ldm.modules.ema import LitEma +from iopaint.model.anytext.ldm.modules.distributions.distributions import ( + normal_kl, + DiagonalGaussianDistribution, +) +from iopaint.model.anytext.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + make_beta_schedule, + extract_into_tensor, + noise_like, +) +from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler +import cv2 + + +__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} + +PRINT_DEBUG = False + + +def print_grad(grad): + # print('Gradient:', grad) + # print(grad.shape) + a = grad.max() + b = grad.min() + # print(f'mean={grad.mean():.4f}, max={a:.4f}, min={b:.4f}') + s = 255.0 / (a - b) + c = 255 * (-b / (a - b)) + grad = grad * s + c + # print(f'mean={grad.mean():.4f}, max={grad.max():.4f}, min={grad.min():.4f}') + img = grad[0].permute(1, 2, 0).detach().cpu().numpy() + if img.shape[0] == 512: + cv2.imwrite("grad-img.jpg", img) + elif img.shape[0] == 64: + cv2.imwrite("grad-latent.jpg", img) + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(torch.nn.Module): + # classic DDPM with Gaussian diffusion, in image space + def __init__( + self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0.0, + v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1.0, + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0.0, + make_it_fit=False, + ucg_training=None, + reset_ema=False, + reset_num_ema_updates=False, + ): + super().__init__() + assert parameterization in [ + "eps", + "x0", + "v", + ], 'currently only supporting "eps" and "x0" and "v"' + self.parameterization = parameterization + print( + f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" + ) + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + self.make_it_fit = make_it_fit + if reset_ema: + assert exists(ckpt_path) + if ckpt_path is not None: + self.init_from_ckpt( + ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet + ) + if reset_ema: + assert self.use_ema + print( + f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint." + ) + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print( + " +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ " + ) + assert self.use_ema + self.model_ema.reset_num_updates() + + self.register_schedule( + given_betas=given_betas, + beta_schedule=beta_schedule, + timesteps=timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + else: + self.register_buffer("logvar", logvar) + + self.ucg_training = ucg_training or dict() + if self.ucg_training: + self.ucg_prng = np.random.RandomState() + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule( + beta_schedule, + timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + # np.save('1.npy', alphas_cumprod) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + + (timesteps,) = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert ( + alphas_cumprod.shape[0] == self.num_timesteps + ), "alphas have to be defined for each timestep" + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer("betas", to_torch(betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) + ) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * ( + 1.0 - alphas_cumprod_prev + ) / (1.0 - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer("posterior_variance", to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer( + "posterior_log_variance_clipped", + to_torch(np.log(np.maximum(posterior_variance, 1e-20))), + ) + self.register_buffer( + "posterior_mean_coef1", + to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), + ) + self.register_buffer( + "posterior_mean_coef2", + to_torch( + (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) + ), + ) + + if self.parameterization == "eps": + lvlb_weights = self.betas**2 / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + elif self.parameterization == "x0": + lvlb_weights = ( + 0.5 + * np.sqrt(torch.Tensor(alphas_cumprod)) + / (2.0 * 1 - torch.Tensor(alphas_cumprod)) + ) + elif self.parameterization == "v": + lvlb_weights = torch.ones_like( + self.betas**2 + / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + ) + else: + raise NotImplementedError("mu not supported") + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + @torch.no_grad() + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + if self.make_it_fit: + n_params = len( + [ + name + for name, _ in itertools.chain( + self.named_parameters(), self.named_buffers() + ) + ] + ) + for name, param in tqdm( + itertools.chain(self.named_parameters(), self.named_buffers()), + desc="Fitting old weights to new weights", + total=n_params, + ): + if not name in sd: + continue + old_shape = sd[name].shape + new_shape = param.shape + assert len(old_shape) == len(new_shape) + if len(new_shape) > 2: + # we only modify first two axes + assert new_shape[2:] == old_shape[2:] + # assumes first axis corresponds to output dim + if not new_shape == old_shape: + new_param = param.clone() + old_param = sd[name] + if len(new_shape) == 1: + for i in range(new_param.shape[0]): + new_param[i] = old_param[i % old_shape[0]] + elif len(new_shape) >= 2: + for i in range(new_param.shape[0]): + for j in range(new_param.shape[1]): + new_param[i, j] = old_param[ + i % old_shape[0], j % old_shape[1] + ] + + n_used_old = torch.ones(old_shape[1]) + for j in range(new_param.shape[1]): + n_used_old[j % old_shape[1]] += 1 + n_used_new = torch.zeros(new_shape[1]) + for j in range(new_param.shape[1]): + n_used_new[j] = n_used_old[j % old_shape[1]] + + n_used_new = n_used_new[None, :] + while len(n_used_new.shape) < len(new_shape): + n_used_new = n_used_new.unsqueeze(-1) + new_param /= n_used_new + + sd[name] = new_param + + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys:\n {missing}") + if len(unexpected) > 0: + print(f"\nUnexpected Keys:\n {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor( + self.log_one_minus_alphas_cumprod, t, x_start.shape + ) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + * noise + ) + + def predict_start_from_z_and_v(self, x_t, t, v): + # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v + ) + + def predict_eps_from_z_and_v(self, x_t, t, v): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) + * x_t + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior( + x_start=x_recon, x_t=x, t=t + ) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance( + x=x, t=t, clip_denoised=clip_denoised + ) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm( + reversed(range(0, self.num_timesteps)), + desc="Sampling t", + total=self.num_timesteps, + ): + img = self.p_sample( + img, + torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised, + ) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop( + (batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates, + ) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def get_v(self, x, noise, t): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x + ) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == "l1": + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == "l2": + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction="none") + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + elif self.parameterization == "v": + target = self.get_v(x_start, noise, t) + else: + raise NotImplementedError( + f"Parameterization {self.parameterization} not yet supported" + ) + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = "train" if self.training else "val" + + loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f"{log_prefix}/loss": loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, "b h w c -> b c h w") + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + for k in self.ucg_training: + p = self.ucg_training[k]["p"] + val = self.ucg_training[k]["val"] + if val is None: + val = "" + for i in range(len(batch[k])): + if self.ucg_prng.choice(2, p=[1 - p, p]): + batch[k][i] = val + + loss, loss_dict = self.shared_step(batch) + + self.log_dict( + loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True + ) + + self.log( + "global_step", + self.global_step, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]["lr"] + self.log( + "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict( + loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True + ) + self.log_dict( + loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True + ) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample( + batch_size=N, return_intermediates=True + ) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + + def __init__( + self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + force_null_conditioning=False, + *args, + **kwargs, + ): + self.force_null_conditioning = force_null_conditioning + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs["timesteps"] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = "concat" if concat_mode else "crossattn" + if ( + cond_stage_config == "__is_unconditional__" + and not self.force_null_conditioning + ): + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + reset_ema = kwargs.pop("reset_ema", False) + reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer("scale_factor", torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + if reset_ema: + assert self.use_ema + print( + f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint." + ) + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print( + " +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ " + ) + assert self.use_ema + self.model_ema.reset_num_updates() + + def make_cond_schedule( + self, + ): + self.cond_ids = torch.full( + size=(self.num_timesteps,), + fill_value=self.num_timesteps - 1, + dtype=torch.long, + ) + ids = torch.round( + torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) + ).long() + self.cond_ids[: self.num_timesteps_cond] = ids + + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if ( + self.scale_by_std + and self.current_epoch == 0 + and self.global_step == 0 + and batch_idx == 0 + and not self.restarted_from_ckpt + ): + assert ( + self.scale_factor == 1.0 + ), "rather not use custom rescaling and std-rescaling simultaneously" + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer("scale_factor", 1.0 / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + super().register_schedule( + given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s + ) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != "__is_first_stage__" + assert config != "__is_unconditional__" + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list( + self, samples, desc="", force_no_decoder_quantization=False + ): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append( + self.decode_first_stage( + zd.to(self.device), force_not_quantize=force_no_decoder_quantization + ) + ) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError( + f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" + ) + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, "encode") and callable( + self.cond_stage_model.encode + ): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min( + torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 + )[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip( + weighting, + self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], + ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip( + L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"], + ) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold( + self, x, kernel_size, stride, uf=1, df=1 + ): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting( + kernel_size[0], kernel_size[1], Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, + padding=0, + stride=(stride[0] * uf, stride[1] * uf), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 + ) + + weighting = self.get_weighting( + kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h * uf, w * uf + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) + ) + + elif df > 1 and uf == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, + padding=0, + stride=(stride[0] // df, stride[1] // df), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 + ) + + weighting = self.get_weighting( + kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h // df, w // df + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) + ) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input( + self, + batch, + k, + return_first_stage_outputs=False, + force_c_encode=False, + cond_key=None, + return_original_cond=False, + bs=None, + return_x=False, + mask_k=None, + ): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if mask_k is not None: + mx = super().get_input(batch, mask_k) + if bs is not None: + mx = mx[:bs] + mx = mx.to(self.device) + encoder_posterior = self.encode_first_stage(mx) + mx = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None and not self.force_null_conditioning: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ["caption", "coordinates_bbox", "txt"]: + xc = batch[cond_key] + elif cond_key in ["class_label", "cls"]: + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, "pos_x": pos_x, "pos_y": pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {"pos_x": pos_x, "pos_y": pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_x: + out.extend([x]) + if return_original_cond: + out.append(xc) + if mask_k: + out.append(mx) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() + + z = 1.0 / self.scale_factor * z + return self.first_stage_model.decode(z) + + def decode_first_stage_grad(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() + + z = 1.0 / self.scale_factor * z + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + # t = torch.randint(500, 501, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def apply_model(self, x_noisy, t, cond, return_ids=False): + if isinstance(cond, dict): + # hybrid case, cond is expected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = ( + "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" + ) + cond = {key: cond} + + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def p_mean_variance( + self, + x, + c, + t, + clip_denoised: bool, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + score_corrector=None, + corrector_kwargs=None, + ): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score( + self, model_out, x, t, c, **corrector_kwargs + ) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior( + x_start=x_recon, x_t=x, t=t + ) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample( + self, + x, + c, + t, + clip_denoised=False, + repeat_noise=False, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + ): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance( + x=x, + c=c, + t=t, + clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * ( + 0.5 * model_log_variance + ).exp() * noise, logits.argmax(dim=1) + if return_x0: + return ( + model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, + x0, + ) + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising( + self, + cond, + shape, + verbose=True, + callback=None, + quantize_denoised=False, + img_callback=None, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + batch_size=None, + x_T=None, + start_T=None, + log_every_t=None, + ): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm( + reversed(range(0, timesteps)), + desc="Progressive Generation", + total=timesteps, + ) + if verbose + else reversed(range(0, timesteps)) + ) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != "hybrid" + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + return_x0=True, + temperature=temperature[i], + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop( + self, + cond, + shape, + return_intermediates=False, + x_T=None, + verbose=True, + callback=None, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + start_T=None, + log_every_t=None, + ): + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) + if verbose + else reversed(range(0, timesteps)) + ) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != "hybrid" + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + ) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample( + self, + cond, + batch_size=16, + return_intermediates=False, + x_T=None, + verbose=True, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + shape=None, + **kwargs, + ): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + return self.p_sample_loop( + cond, + shape, + return_intermediates=return_intermediates, + x_T=x_T, + verbose=verbose, + timesteps=timesteps, + quantize_denoised=quantize_denoised, + mask=mask, + x0=x0, + ) + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates = ddim_sampler.sample( + ddim_steps, batch_size, shape, cond, verbose=False, **kwargs + ) + + else: + samples, intermediates = self.sample( + cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs + ) + + return samples, intermediates + + @torch.no_grad() + def get_unconditional_conditioning(self, batch_size, null_label=None): + if null_label is not None: + xc = null_label + if isinstance(xc, ListConfig): + xc = list(xc) + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + if hasattr(xc, "to"): + xc = xc.to(self.device) + c = self.get_learned_conditioning(xc) + else: + if self.cond_stage_key in ["class_label", "cls"]: + xc = self.cond_stage_model.get_unconditional_conditioning( + batch_size, device=self.device + ) + return self.get_learned_conditioning(xc) + else: + raise NotImplementedError("todo") + if isinstance(c, list): # in case the encoder gives us a list + for i in range(len(c)): + c[i] = repeat(c[i], "1 ... -> b ...", b=batch_size).to(self.device) + else: + c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) + return c + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=50, + ddim_eta=0.0, + return_keys=None, + quantize_denoised=True, + inpaint=True, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N, + ) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + try: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + except KeyError: + # probably no "human_label" in batch + pass + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if ( + quantize_denoised + and not isinstance(self.first_stage_model, AutoencoderKL) + and not isinstance(self.first_stage_model, IdentityFirstStage) + ): + # also display when quantizing x0 while sampling + with ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + quantize_denoised=True, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc = self.get_unconditional_conditioning(N, unconditional_guidance_label) + if self.model.conditioning_key == "crossattn-adm": + uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 + mask = mask[:, None, ...] + with ema_scope("Plotting Inpaint"): + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + mask = 1.0 - mask + with ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising( + c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N, + ) + prog_row = self._get_denoise_row_from_list( + progressives, desc="Progressive Generation" + ) + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print("Diffusion model optimizing logvar") + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert "target" in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 + return x + + +class DiffusionWrapper(torch.nn.Module): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.sequential_cross_attn = diff_model_config.pop( + "sequential_crossattn", False + ) + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [ + None, + "concat", + "crossattn", + "hybrid", + "adm", + "hybrid-adm", + "crossattn-adm", + ] + + def forward( + self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None + ): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == "concat": + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == "crossattn": + if not self.sequential_cross_attn: + cc = torch.cat(c_crossattn, 1) + else: + cc = c_crossattn + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == "hybrid": + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == "hybrid-adm": + assert c_adm is not None + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc, y=c_adm) + elif self.conditioning_key == "crossattn-adm": + assert c_adm is not None + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc, y=c_adm) + elif self.conditioning_key == "adm": + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class LatentUpscaleDiffusion(LatentDiffusion): + def __init__( + self, + *args, + low_scale_config, + low_scale_key="LR", + noise_level_key=None, + **kwargs, + ): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + self.noise_level_key = noise_level_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + x_low = batch[self.low_scale_key][:bs] + x_low = rearrange(x_low, "b h w c -> b c h w") + x_low = x_low.to(memory_format=torch.contiguous_format).float() + zx, noise_level = self.low_scale_model(x_low) + if self.noise_level_key is not None: + # get noise level from batch instead, e.g. when extracting a custom noise level for bsr + raise NotImplementedError("TODO") + + all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} + if log_mode: + # TODO: maybe disable if too expensive + x_low_rec = self.low_scale_model.decode(zx) + return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level + return z, all_conds + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=200, + ddim_eta=1.0, + return_keys=None, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input( + batch, self.first_stage_key, bs=N, log_mode=True + ) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + log[ + f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}" + ] = x_low_rec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning( + N, unconditional_guidance_label + ) + # TODO explore better "unconditional" choices for the other keys + # maybe guide away from empty text label and highest noise level and maximally degraded zx? + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif k == "c_adm": # todo: only run with text-based guidance? + assert isinstance(c[k], torch.Tensor) + # uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level + uc[k] = c[k] + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising( + c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N, + ) + prog_row = self._get_denoise_row_from_list( + progressives, desc="Progressive Generation" + ) + log["progressive_row"] = prog_row + + return log + + +class LatentFinetuneDiffusion(LatentDiffusion): + """ + Basis for different finetunas, such as inpainting or depth2image + To disable finetuning mode, set finetune_keys to None + """ + + def __init__( + self, + concat_keys: tuple, + finetune_keys=( + "model.diffusion_model.input_blocks.0.0.weight", + "model_ema.diffusion_modelinput_blocks00weight", + ), + keep_finetune_dims=4, + # if model was trained without concat mode before and we would like to keep these channels + c_concat_log_start=None, # to log reconstruction of c_concat codes + c_concat_log_end=None, + *args, + **kwargs, + ): + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", list()) + super().__init__(*args, **kwargs) + self.finetune_keys = finetune_keys + self.concat_keys = concat_keys + self.keep_dims = keep_finetune_dims + self.c_concat_log_start = c_concat_log_start + self.c_concat_log_end = c_concat_log_end + if exists(self.finetune_keys): + assert exists(ckpt_path), "can only finetune from a given checkpoint" + if exists(ckpt_path): + self.init_from_ckpt(ckpt_path, ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + + # make it explicit, finetune by including extra input channels + if exists(self.finetune_keys) and k in self.finetune_keys: + new_entry = None + for name, param in self.named_parameters(): + if name in self.finetune_keys: + print( + f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only" + ) + new_entry = torch.zeros_like(param) # zero init + assert exists(new_entry), "did not find matching parameter to modify" + new_entry[:, : self.keep_dims, ...] = sd[k] + sd[k] = new_entry + + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=200, + ddim_eta=1.0, + return_keys=None, + quantize_denoised=True, + inpaint=True, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input( + batch, self.first_stage_key, bs=N, return_first_stage_outputs=True + ) + c_cat, c = c["c_concat"][0], c["c_crossattn"][0] + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if not (self.c_concat_log_start is None and self.c_concat_log_end is None): + log["c_concat_decoded"] = self.decode_first_stage( + c_cat[:, self.c_concat_log_start : self.c_concat_log_end] + ) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning( + N, unconditional_guidance_label + ) + uc_cat = c_cat + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + return log + + +class LatentInpaintDiffusion(LatentFinetuneDiffusion): + """ + can either run as pure inpainting model (only concat mode) or with mixed conditionings, + e.g. mask as concat and text via cross-attn. + To disable finetuning mode, set finetune_keys to None + """ + + def __init__( + self, + concat_keys=("mask", "masked_image"), + masked_image_key="masked_image", + *args, + **kwargs, + ): + super().__init__(concat_keys, *args, **kwargs) + self.masked_image_key = masked_image_key + assert self.masked_image_key in concat_keys + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for inpainting" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + c_cat = list() + for ck in self.concat_keys: + cc = ( + rearrange(batch[ck], "b h w c -> b c h w") + .to(memory_format=torch.contiguous_format) + .float() + ) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + bchw = z.shape + if ck != self.masked_image_key: + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs) + log["masked_image"] = ( + rearrange(args[0]["masked_image"], "b h w c -> b c h w") + .to(memory_format=torch.contiguous_format) + .float() + ) + return log + + +class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion): + """ + condition on monocular depth estimation + """ + + def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.depth_model = instantiate_from_config(depth_stage_config) + self.depth_stage_key = concat_keys[0] + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for depth2img" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + c_cat = list() + for ck in self.concat_keys: + cc = batch[ck] + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + cc = self.depth_model(cc) + cc = torch.nn.functional.interpolate( + cc, + size=z.shape[2:], + mode="bicubic", + align_corners=False, + ) + + depth_min, depth_max = torch.amin( + cc, dim=[1, 2, 3], keepdim=True + ), torch.amax(cc, dim=[1, 2, 3], keepdim=True) + cc = 2.0 * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.0 + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + depth = self.depth_model(args[0][self.depth_stage_key]) + depth_min, depth_max = torch.amin( + depth, dim=[1, 2, 3], keepdim=True + ), torch.amax(depth, dim=[1, 2, 3], keepdim=True) + log["depth"] = 2.0 * (depth - depth_min) / (depth_max - depth_min) - 1.0 + return log + + +class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): + """ + condition on low-res image (and optionally on some spatial noise augmentation) + """ + + def __init__( + self, + concat_keys=("lr",), + reshuffle_patch_size=None, + low_scale_config=None, + low_scale_key=None, + *args, + **kwargs, + ): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.reshuffle_patch_size = reshuffle_patch_size + self.low_scale_model = None + if low_scale_config is not None: + print("Initializing a low-scale model") + assert exists(low_scale_key) + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for upscaling-ft" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + # optionally make spatial noise_level here + c_cat = list() + noise_level = None + for ck in self.concat_keys: + cc = batch[ck] + cc = rearrange(cc, "b h w c -> b c h w") + if exists(self.reshuffle_patch_size): + assert isinstance(self.reshuffle_patch_size, int) + cc = rearrange( + cc, + "b c (p1 h) (p2 w) -> b (p1 p2 c) h w", + p1=self.reshuffle_patch_size, + p2=self.reshuffle_patch_size, + ) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + if exists(self.low_scale_model) and ck == self.low_scale_key: + cc, noise_level = self.low_scale_model(cc) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + if exists(noise_level): + all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level} + else: + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + log["lr"] = rearrange(args[0]["lr"], "b h w c -> b c h w") + return log diff --git a/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 0000000..7427f38 --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 0000000..095e5ba --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1154 @@ +import torch +import torch.nn.functional as F +import math +from tqdm import tqdm + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + t = self.inverse_lambda(lambda_t) + =============================================================== + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + 1. For discrete-time DPMs: + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + 2. For continuous-time DPMs: + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + =============================================================== + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + Example: + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError( + "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( + schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), + self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0 ** 2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), + torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + We support four types of the diffusion model by setting `model_type`: + 1. "noise": noise prediction model. (Trained by predicting noise). + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + =============================================================== + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError( + "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3, ] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3, ] * (K - 1) + [1] + else: + orders = [3, ] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2, ] * K + else: + K = steps // 2 + 1 + orders = [2, ] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1, ] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ + torch.cumsum(torch.tensor([0, ] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, + solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( + s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( + model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, + return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( + s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( + s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( + t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( + t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, + r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, + solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + solver_type=solver_type, + **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + return_intermediate=True, + solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, + solver_type=solver_type, + **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + ===================================================== + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + ===================================================== + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, + solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in tqdm(range(1, order), desc="DPM init order"): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, + solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in tqdm(range(order, steps + 1), desc="DPM multistep"): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, + solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, + skip_type=skip_type, + t_T=t_T, t_0=t_0, + device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order, ] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), + N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 0000000..7d137b8 --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,87 @@ +"""SAMPLING ONLY.""" +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +MODEL_TYPES = { + "eps": "noise", + "v": "v" +} + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type=MODEL_TYPES[self.model.parameterization], + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/models/diffusion/plms.py b/inpaint/model/anytext/ldm/models/diffusion/plms.py new file mode 100644 index 0000000..5f35d55 --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/plms.py @@ -0,0 +1,244 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like +from iopaint.model.anytext.ldm.models.diffusion.sampling_util import norm_thresholding + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next, + dynamic_threshold=dynamic_threshold) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/inpaint/model/anytext/ldm/models/diffusion/sampling_util.py b/inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file mode 100644 index 0000000..7eff02b --- /dev/null +++ b/inpaint/model/anytext/ldm/models/diffusion/sampling_util.py @@ -0,0 +1,22 @@ +import torch +import numpy as np + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions. + From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') + return x[(...,) + (None,) * dims_to_append] + + +def norm_thresholding(x0, value): + s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) + return x0 * (value / s) + + +def spatial_norm_thresholding(x0, value): + # b c h w + s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) + return x0 * (value / s) \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/modules/__init__.py b/inpaint/model/anytext/ldm/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/modules/attention.py b/inpaint/model/anytext/ldm/modules/attention.py new file mode 100644 index 0000000..df92aa7 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/attention.py @@ -0,0 +1,360 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat +from typing import Optional, Any + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import checkpoint + + +# CrossAttn precision handling +import os + +_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") + + +def exists(val): + return val is not None + + +def uniq(arr): + return {el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = ( + nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) + if not glu + else GEGLU(dim, inner_dim) + ) + + self.net = nn.Sequential( + project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = rearrange(q, "b c h w -> b (h w) c") + k = rearrange(k, "b c h w -> b c (h w)") + w_ = torch.einsum("bij,bjk->bik", q, k) + + w_ = w_ * (int(c) ** (-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, "b c h w -> b c (h w)") + w_ = rearrange(w_, "b i j -> b j i") + h_ = torch.einsum("bij,bjk->bik", v, w_) + h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) + h_ = self.proj_out(h_) + + return x + h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head**-0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) + + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION == "fp32": + with torch.autocast(enabled=False, device_type="cuda"): + q, k = q.float(), k.float() + sim = einsum("b i d, b j d -> b i j", q, k) * self.scale + else: + sim = einsum("b i d, b j d -> b i j", q, k) * self.scale + + del q, k + + if exists(mask): + mask = rearrange(mask, "b ... -> b (...)") + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, "b j -> (b h) () j", h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) + + out = einsum("b i j, b j d -> b i d", sim, v) + out = rearrange(out, "(b h) n d -> b n (h d)", h=h) + return self.to_out(out) + + +class SDPACrossAttention(CrossAttention): + def forward(self, x, context=None, mask=None): + batch_size, sequence_length, inner_dim = x.shape + + if mask is not None: + mask = self.prepare_attention_mask(mask, sequence_length, batch_size) + mask = mask.view(batch_size, self.heads, -1, mask.shape[-1]) + + h = self.heads + q_in = self.to_q(x) + context = default(context, x) + + k_in = self.to_k(context) + v_in = self.to_v(context) + + head_dim = inner_dim // h + q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + + del q_in, k_in, v_in + + dtype = q.dtype + if _ATTN_PRECISION == "fp32": + q, k, v = q.float(), k.float(), v.float() + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, h * head_dim + ) + hidden_states = hidden_states.to(dtype) + + # linear proj + hidden_states = self.to_out[0](hidden_states) + # dropout + hidden_states = self.to_out[1](hidden_states) + return hidden_states + + +class BasicTransformerBlock(nn.Module): + def __init__( + self, + dim, + n_heads, + d_head, + dropout=0.0, + context_dim=None, + gated_ff=True, + checkpoint=True, + disable_self_attn=False, + ): + super().__init__() + + if hasattr(torch.nn.functional, "scaled_dot_product_attention"): + attn_cls = SDPACrossAttention + else: + attn_cls = CrossAttention + + self.disable_self_attn = disable_self_attn + self.attn1 = attn_cls( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + context_dim=context_dim if self.disable_self_attn else None, + ) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = attn_cls( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + ) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint( + self._forward, (x, context), self.parameters(), self.checkpoint + ) + + def _forward(self, x, context=None): + x = ( + self.attn1( + self.norm1(x), context=context if self.disable_self_attn else None + ) + + x + ) + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + use_checkpoint=True, + ): + super().__init__() + if exists(context_dim) and not isinstance(context_dim, list): + context_dim = [context_dim] + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + if not use_linear: + self.proj_in = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in = nn.Linear(in_channels, inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=context_dim[d], + disable_self_attn=disable_self_attn, + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) + self.use_linear = use_linear + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "b c h w -> b (h w) c").contiguous() + if self.use_linear: + x = self.proj_in(x) + for i, block in enumerate(self.transformer_blocks): + x = block(x, context=context[i]) + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out(x) + return x + x_in diff --git a/inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py b/inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/modules/diffusionmodules/model.py b/inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file mode 100644 index 0000000..3472824 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,973 @@ +# pytorch_diffusion + derived encoder decoder +import math + +import numpy as np +import torch +import torch.nn as nn + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +def nonlinearity(x): + # swish + return x * torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm( + num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) + + def forward(self, x): + if self.with_conv: + pad = (0, 1, 0, 1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + dropout, + temb_channels=512, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + else: + self.nin_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x + h + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = q.reshape(b, c, h * w) + q = q.permute(0, 2, 1) # b,hw,c + k = k.reshape(b, c, h * w) # b,c,hw + w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c) ** (-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b, c, h * w) + w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b, c, h, w) + + h_ = self.proj_out(h_) + + return x + h_ + + +class AttnBlock2_0(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + # output: [1, 512, 64, 64] + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + + # q = q.reshape(b, c, h * w).transpose() + # q = q.permute(0, 2, 1) # b,hw,c + # k = k.reshape(b, c, h * w) # b,c,hw + q = q.transpose(1, 2) + k = k.transpose(1, 2) + v = v.transpose(1, 2) + # (batch, num_heads, seq_len, head_dim) + hidden_states = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False + ) + hidden_states = hidden_states.transpose(1, 2) + hidden_states = hidden_states.to(q.dtype) + + h_ = self.proj_out(hidden_states) + + return x + h_ + + +def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): + assert attn_type in [ + "vanilla", + "vanilla-xformers", + "memory-efficient-cross-attn", + "linear", + "none", + ], f"attn_type {attn_type} unknown" + assert attn_kwargs is None + if hasattr(torch.nn.functional, "scaled_dot_product_attention"): + # print(f"Using torch.nn.functional.scaled_dot_product_attention") + return AttnBlock2_0(in_channels) + return AttnBlock(in_channels) + + +class Model(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + use_timestep=True, + use_linear_attn=False, + attn_type="vanilla", + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch * 4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList( + [ + torch.nn.Linear(self.ch, self.temb_ch), + torch.nn.Linear(self.temb_ch, self.temb_ch), + ] + ) + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + skip_in = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + if i_block == self.num_res_blocks: + skip_in = ch * in_ch_mult[i_level] + block.append( + ResnetBlock( + in_channels=block_in + skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x, t=None, context=None): + # assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb + ) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + double_z=True, + use_linear_attn=False, + attn_type="vanilla", + **ignore_kwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, + 2 * z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1, + ) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + give_pre_end=False, + tanh_out=False, + use_linear_attn=False, + attn_type="vanilla", + **ignorekwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,) + tuple(ch_mult) + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + "Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) + + # z to block_in + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, z): + # assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList( + [ + nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock( + in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + nn.Conv2d(2 * in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True), + ] + ) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1, 2, 3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + ch, + num_res_blocks, + resolution, + ch_mult=(2, 2), + dropout=0.0, + ): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d( + in_channels, mid_channels, kernel_size=3, stride=1, padding=1 + ) + self.res_block1 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) + + self.conv_out = nn.Conv2d( + mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate( + x, + size=( + int(round(x.shape[2] * self.factor)), + int(round(x.shape[3] * self.factor)), + ), + ) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__( + self, + in_channels, + ch, + resolution, + out_ch, + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + ch_mult=(1, 2, 4, 8), + rescale_factor=1.0, + rescale_module_depth=1, + ): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder( + in_channels=in_channels, + num_res_blocks=num_res_blocks, + ch=ch, + ch_mult=ch_mult, + z_channels=intermediate_chn, + double_z=False, + resolution=resolution, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + out_ch=None, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=intermediate_chn, + mid_channels=intermediate_chn, + out_channels=out_ch, + depth=rescale_module_depth, + ) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__( + self, + z_channels, + out_ch, + resolution, + num_res_blocks, + attn_resolutions, + ch, + ch_mult=(1, 2, 4, 8), + dropout=0.0, + resamp_with_conv=True, + rescale_factor=1.0, + rescale_module_depth=1, + ): + super().__init__() + tmp_chn = z_channels * ch_mult[-1] + self.decoder = Decoder( + out_ch=out_ch, + z_channels=tmp_chn, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + in_channels=None, + num_res_blocks=num_res_blocks, + ch_mult=ch_mult, + resolution=resolution, + ch=ch, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=z_channels, + mid_channels=tmp_chn, + out_channels=tmp_chn, + depth=rescale_module_depth, + ) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size // in_size)) + 1 + factor_up = 1.0 + (out_size % in_size) + print( + f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}" + ) + self.rescaler = LatentRescaler( + factor=factor_up, + in_channels=in_channels, + mid_channels=2 * in_channels, + out_channels=in_channels, + ) + self.decoder = Decoder( + out_ch=out_channels, + resolution=out_size, + z_channels=in_channels, + num_res_blocks=2, + attn_resolutions=[], + in_channels=None, + ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)], + ) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print( + f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode" + ) + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=4, stride=2, padding=1 + ) + + def forward(self, x, scale_factor=1.0): + if scale_factor == 1.0: + return x + else: + x = torch.nn.functional.interpolate( + x, mode=self.mode, align_corners=False, scale_factor=scale_factor + ) + return x diff --git a/inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py b/inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000..fd3d6be --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,786 @@ +from abc import abstractmethod +import math + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer +from iopaint.model.anytext.ldm.util import exists + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") + self.use_fp16 = use_fp16 + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + if isinstance(self.num_classes, int): + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + elif self.num_classes == "continuous": + print("setting up linear c_adm embedding layer") + self.label_emb = nn.Linear(1, time_embed_dim) + else: + raise ValueError() + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(self.num_res_blocks[level] + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or i < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + if level and i == self.num_res_blocks[level]: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) diff --git a/inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py b/inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file mode 100644 index 0000000..5f92630 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py @@ -0,0 +1,81 @@ +import torch +import torch.nn as nn +import numpy as np +from functools import partial + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule +from iopaint.model.anytext.ldm.util import default + + +class AbstractLowScaleModel(nn.Module): + # for concatenating a downsampled image to the latent representation + def __init__(self, noise_schedule_config=None): + super(AbstractLowScaleModel, self).__init__() + if noise_schedule_config is not None: + self.register_schedule(**noise_schedule_config) + + def register_schedule(self, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def forward(self, x): + return x, None + + def decode(self, x): + return x + + +class SimpleImageConcat(AbstractLowScaleModel): + # no noise level conditioning + def __init__(self): + super(SimpleImageConcat, self).__init__(noise_schedule_config=None) + self.max_noise_level = 0 + + def forward(self, x): + # fix to constant noise level + return x, torch.zeros(x.shape[0], device=x.device).long() + + +class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel): + def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False): + super().__init__(noise_schedule_config=noise_schedule_config) + self.max_noise_level = max_noise_level + + def forward(self, x, noise_level=None): + if noise_level is None: + noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() + else: + assert isinstance(noise_level, torch.Tensor) + z = self.q_sample(x, noise_level) + return z, noise_level + + + diff --git a/inpaint/model/anytext/ldm/modules/diffusionmodules/util.py b/inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file mode 100644 index 0000000..da29c72 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,271 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from iopaint.model.anytext.ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas.to(torch.float32), alphas.to(torch.float32), alphas_prev.astype(np.float32) + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled()} + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + # return super().forward(x.float()).type(x.dtype) + return super().forward(x).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/inpaint/model/anytext/ldm/modules/distributions/__init__.py b/inpaint/model/anytext/ldm/modules/distributions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/modules/distributions/distributions.py b/inpaint/model/anytext/ldm/modules/distributions/distributions.py new file mode 100644 index 0000000..f2b8ef9 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/inpaint/model/anytext/ldm/modules/ema.py b/inpaint/model/anytext/ldm/modules/ema.py new file mode 100644 index 0000000..bded250 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/ema.py @@ -0,0 +1,80 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates + else torch.tensor(-1, dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + # remove as '.'-character is not allowed in buffers + s_name = name.replace('.', '') + self.m_name2s_name.update({name: s_name}) + self.register_buffer(s_name, p.clone().detach().data) + + self.collected_params = [] + + def reset_num_updates(self): + del self.num_updates + self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int)) + + def forward(self, model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/inpaint/model/anytext/ldm/modules/encoders/__init__.py b/inpaint/model/anytext/ldm/modules/encoders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ldm/modules/encoders/modules.py b/inpaint/model/anytext/ldm/modules/encoders/modules.py new file mode 100644 index 0000000..ceac395 --- /dev/null +++ b/inpaint/model/anytext/ldm/modules/encoders/modules.py @@ -0,0 +1,411 @@ +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from transformers import ( + T5Tokenizer, + T5EncoderModel, + CLIPTokenizer, + CLIPTextModel, + AutoProcessor, + CLIPVisionModelWithProjection, +) + +from iopaint.model.anytext.ldm.util import count_params + + +def _expand_mask(mask, dtype, tgt_len=None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill( + inverted_mask.to(torch.bool), torch.finfo(dtype).min + ) + + +def _build_causal_attention_mask(bsz, seq_len, dtype): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + +class IdentityEncoder(AbstractEncoder): + def encode(self, x): + return x + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key="class", ucg_rate=0.1): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + self.n_classes = n_classes + self.ucg_rate = ucg_rate + + def forward(self, batch, key=None, disable_dropout=False): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + if self.ucg_rate > 0.0 and not disable_dropout: + mask = 1.0 - torch.bernoulli(torch.ones_like(c) * self.ucg_rate) + c = mask * c + (1 - mask) * torch.ones_like(c) * (self.n_classes - 1) + c = c.long() + c = self.embedding(c) + return c + + def get_unconditional_conditioning(self, bs, device="cuda"): + uc_class = ( + self.n_classes - 1 + ) # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000) + uc = torch.ones((bs,), device=device) * uc_class + uc = {self.key: uc} + return uc + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class FrozenT5Embedder(AbstractEncoder): + """Uses the T5 transformer encoder for text""" + + def __init__( + self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True + ): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl + super().__init__() + self.tokenizer = T5Tokenizer.from_pretrained(version) + self.transformer = T5EncoderModel.from_pretrained(version) + self.device = device + self.max_length = max_length # TODO: typical value? + if freeze: + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + # self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from huggingface)""" + + LAYERS = ["last", "pooled", "hidden"] + + def __init__( + self, + version="openai/clip-vit-large-patch14", + device="cuda", + max_length=77, + freeze=True, + layer="last", + layer_idx=None, + ): # clip-vit-base-patch32 + super().__init__() + assert layer in self.LAYERS + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + self.layer = layer + self.layer_idx = layer_idx + if layer == "hidden": + assert layer_idx is not None + assert 0 <= abs(layer_idx) <= 12 + + def freeze(self): + self.transformer = self.transformer.eval() + # self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer( + input_ids=tokens, output_hidden_states=self.layer == "hidden" + ) + if self.layer == "last": + z = outputs.last_hidden_state + elif self.layer == "pooled": + z = outputs.pooler_output[:, None, :] + else: + z = outputs.hidden_states[self.layer_idx] + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPT5Encoder(AbstractEncoder): + def __init__( + self, + clip_version="openai/clip-vit-large-patch14", + t5_version="google/t5-v1_1-xl", + device="cuda", + clip_max_length=77, + t5_max_length=77, + ): + super().__init__() + self.clip_encoder = FrozenCLIPEmbedder( + clip_version, device, max_length=clip_max_length + ) + self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) + print( + f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, " + f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params." + ) + + def encode(self, text): + return self(text) + + def forward(self, text): + clip_z = self.clip_encoder.encode(text) + t5_z = self.t5_encoder.encode(text) + return [clip_z, t5_z] + + +class FrozenCLIPEmbedderT3(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + + def __init__( + self, + version="openai/clip-vit-large-patch14", + device="cuda", + max_length=77, + freeze=True, + use_vision=False, + ): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + if use_vision: + self.vit = CLIPVisionModelWithProjection.from_pretrained(version) + self.processor = AutoProcessor.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + + def embedding_forward( + self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + embedding_manager=None, + ): + seq_length = ( + input_ids.shape[-1] + if input_ids is not None + else inputs_embeds.shape[-2] + ) + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + if embedding_manager is not None: + inputs_embeds = embedding_manager(input_ids, inputs_embeds) + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + return embeddings + + self.transformer.text_model.embeddings.forward = embedding_forward.__get__( + self.transformer.text_model.embeddings + ) + + def encoder_forward( + self, + inputs_embeds, + attention_mask=None, + causal_attention_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + return hidden_states + + self.transformer.text_model.encoder.forward = encoder_forward.__get__( + self.transformer.text_model.encoder + ) + + def text_encoder_forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + embedding_manager=None, + ): + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if input_ids is None: + raise ValueError("You have to specify either input_ids") + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + hidden_states = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + embedding_manager=embedding_manager, + ) + bsz, seq_len = input_shape + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = _build_causal_attention_mask( + bsz, seq_len, hidden_states.dtype + ).to(hidden_states.device) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + last_hidden_state = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = self.final_layer_norm(last_hidden_state) + return last_hidden_state + + self.transformer.text_model.forward = text_encoder_forward.__get__( + self.transformer.text_model + ) + + def transformer_forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + embedding_manager=None, + ): + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + embedding_manager=embedding_manager, + ) + + self.transformer.forward = transformer_forward.__get__(self.transformer) + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text, **kwargs): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + z = self.transformer(input_ids=tokens, **kwargs) + return z + + def encode(self, text, **kwargs): + return self(text, **kwargs) diff --git a/inpaint/model/anytext/ldm/util.py b/inpaint/model/anytext/ldm/util.py new file mode 100644 index 0000000..d456a86 --- /dev/null +++ b/inpaint/model/anytext/ldm/util.py @@ -0,0 +1,197 @@ +import importlib + +import torch +from torch import optim +import numpy as np + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('font/Arial_Unicode.ttf', size=size) + nc = int(32 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x,torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config, **kwargs): + if "target" not in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +class AdamWwithEMAandWings(optim.Optimizer): + # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 + def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using + weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code + ema_power=1., param_names=()): + """AdamW that saves EMA versions of the parameters.""" + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= ema_decay <= 1.0: + raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, + ema_power=ema_power, param_names=param_names) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + ema_params_with_grad = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + ema_decay = group['ema_decay'] + ema_power = group['ema_power'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of parameter values + state['param_exp_avg'] = p.detach().float().clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + ema_params_with_grad.append(state['param_exp_avg']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + optim._functional.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=False) + + cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) + for param, ema_param in zip(params_with_grad, ema_params_with_grad): + ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) + + return loss \ No newline at end of file diff --git a/inpaint/model/anytext/main.py b/inpaint/model/anytext/main.py new file mode 100644 index 0000000..f7b2d2e --- /dev/null +++ b/inpaint/model/anytext/main.py @@ -0,0 +1,45 @@ +import cv2 +import os + +from anytext_pipeline import AnyTextPipeline +from utils import save_images + +seed = 66273235 +# seed_everything(seed) + +pipe = AnyTextPipeline( + ckpt_path="/Users/cwq/code/github/IOPaint/iopaint/model/anytext/anytext_v1.1_fp16.ckpt", + font_path="/Users/cwq/code/github/AnyText/anytext/font/SourceHanSansSC-Medium.otf", + use_fp16=False, + device="mps", +) + +img_save_folder = "SaveImages" +rgb_image = cv2.imread( + "/Users/cwq/code/github/AnyText/anytext/example_images/ref7.jpg" +)[..., ::-1] + +masked_image = cv2.imread( + "/Users/cwq/code/github/AnyText/anytext/example_images/edit7.png" +)[..., ::-1] + +rgb_image = cv2.resize(rgb_image, (512, 512)) +masked_image = cv2.resize(masked_image, (512, 512)) + +# results: list of rgb ndarray +results, rtn_code, rtn_warning = pipe( + prompt='A cake with colorful characters that reads "EVERYDAY", best quality, extremely detailed,4k, HD, supper legible text, clear text edges, clear strokes, neat writing, no watermarks', + negative_prompt="low-res, bad anatomy, extra digit, fewer digits, cropped, worst quality, low quality, watermark, unreadable text, messy words, distorted text, disorganized writing, advertising picture", + image=rgb_image, + masked_image=masked_image, + num_inference_steps=20, + strength=1.0, + guidance_scale=9.0, + height=rgb_image.shape[0], + width=rgb_image.shape[1], + seed=seed, + sort_priority="y", +) +if rtn_code >= 0: + save_images(results, img_save_folder) + print(f"Done, result images are saved in: {img_save_folder}") diff --git a/inpaint/model/anytext/ocr_recog/RNN.py b/inpaint/model/anytext/ocr_recog/RNN.py new file mode 100755 index 0000000..cf16855 --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/RNN.py @@ -0,0 +1,210 @@ +from torch import nn +import torch +from .RecSVTR import Block + +class Swish(nn.Module): + def __int__(self): + super(Swish, self).__int__() + + def forward(self,x): + return x*torch.sigmoid(x) + +class Im2Im(nn.Module): + def __init__(self, in_channels, **kwargs): + super().__init__() + self.out_channels = in_channels + + def forward(self, x): + return x + +class Im2Seq(nn.Module): + def __init__(self, in_channels, **kwargs): + super().__init__() + self.out_channels = in_channels + + def forward(self, x): + B, C, H, W = x.shape + # assert H == 1 + x = x.reshape(B, C, H * W) + x = x.permute((0, 2, 1)) + return x + +class EncoderWithRNN(nn.Module): + def __init__(self, in_channels,**kwargs): + super(EncoderWithRNN, self).__init__() + hidden_size = kwargs.get('hidden_size', 256) + self.out_channels = hidden_size * 2 + self.lstm = nn.LSTM(in_channels, hidden_size, bidirectional=True, num_layers=2,batch_first=True) + + def forward(self, x): + self.lstm.flatten_parameters() + x, _ = self.lstm(x) + return x + +class SequenceEncoder(nn.Module): + def __init__(self, in_channels, encoder_type='rnn', **kwargs): + super(SequenceEncoder, self).__init__() + self.encoder_reshape = Im2Seq(in_channels) + self.out_channels = self.encoder_reshape.out_channels + self.encoder_type = encoder_type + if encoder_type == 'reshape': + self.only_reshape = True + else: + support_encoder_dict = { + 'reshape': Im2Seq, + 'rnn': EncoderWithRNN, + 'svtr': EncoderWithSVTR + } + assert encoder_type in support_encoder_dict, '{} must in {}'.format( + encoder_type, support_encoder_dict.keys()) + + self.encoder = support_encoder_dict[encoder_type]( + self.encoder_reshape.out_channels,**kwargs) + self.out_channels = self.encoder.out_channels + self.only_reshape = False + + def forward(self, x): + if self.encoder_type != 'svtr': + x = self.encoder_reshape(x) + if not self.only_reshape: + x = self.encoder(x) + return x + else: + x = self.encoder(x) + x = self.encoder_reshape(x) + return x + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act=nn.GELU): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), + bias=bias_attr) + self.norm = nn.BatchNorm2d(out_channels) + self.act = Swish() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class EncoderWithSVTR(nn.Module): + def __init__( + self, + in_channels, + dims=64, # XS + depth=2, + hidden_dims=120, + use_guide=False, + num_heads=8, + qkv_bias=True, + mlp_ratio=2.0, + drop_rate=0.1, + attn_drop_rate=0.1, + drop_path=0., + qk_scale=None): + super(EncoderWithSVTR, self).__init__() + self.depth = depth + self.use_guide = use_guide + self.conv1 = ConvBNLayer( + in_channels, in_channels // 8, padding=1, act='swish') + self.conv2 = ConvBNLayer( + in_channels // 8, hidden_dims, kernel_size=1, act='swish') + + self.svtr_block = nn.ModuleList([ + Block( + dim=hidden_dims, + num_heads=num_heads, + mixer='Global', + HW=None, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer='swish', + attn_drop=attn_drop_rate, + drop_path=drop_path, + norm_layer='nn.LayerNorm', + epsilon=1e-05, + prenorm=False) for i in range(depth) + ]) + self.norm = nn.LayerNorm(hidden_dims, eps=1e-6) + self.conv3 = ConvBNLayer( + hidden_dims, in_channels, kernel_size=1, act='swish') + # last conv-nxn, the input is concat of input tensor and conv3 output tensor + self.conv4 = ConvBNLayer( + 2 * in_channels, in_channels // 8, padding=1, act='swish') + + self.conv1x1 = ConvBNLayer( + in_channels // 8, dims, kernel_size=1, act='swish') + self.out_channels = dims + self.apply(self._init_weights) + + def _init_weights(self, m): + # weight initialization + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def forward(self, x): + # for use guide + if self.use_guide: + z = x.clone() + z.stop_gradient = True + else: + z = x + # for short cut + h = z + # reduce dim + z = self.conv1(z) + z = self.conv2(z) + # SVTR global block + B, C, H, W = z.shape + z = z.flatten(2).permute(0, 2, 1) + + for blk in self.svtr_block: + z = blk(z) + + z = self.norm(z) + # last stage + z = z.reshape([-1, H, W, C]).permute(0, 3, 1, 2) + z = self.conv3(z) + z = torch.cat((h, z), dim=1) + z = self.conv1x1(self.conv4(z)) + + return z + +if __name__=="__main__": + svtrRNN = EncoderWithSVTR(56) + print(svtrRNN) \ No newline at end of file diff --git a/inpaint/model/anytext/ocr_recog/RecCTCHead.py b/inpaint/model/anytext/ocr_recog/RecCTCHead.py new file mode 100755 index 0000000..867ede9 --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/RecCTCHead.py @@ -0,0 +1,48 @@ +from torch import nn + + +class CTCHead(nn.Module): + def __init__(self, + in_channels, + out_channels=6625, + fc_decay=0.0004, + mid_channels=None, + return_feats=False, + **kwargs): + super(CTCHead, self).__init__() + if mid_channels is None: + self.fc = nn.Linear( + in_channels, + out_channels, + bias=True,) + else: + self.fc1 = nn.Linear( + in_channels, + mid_channels, + bias=True, + ) + self.fc2 = nn.Linear( + mid_channels, + out_channels, + bias=True, + ) + + self.out_channels = out_channels + self.mid_channels = mid_channels + self.return_feats = return_feats + + def forward(self, x, labels=None): + if self.mid_channels is None: + predicts = self.fc(x) + else: + x = self.fc1(x) + predicts = self.fc2(x) + + if self.return_feats: + result = dict() + result['ctc'] = predicts + result['ctc_neck'] = x + else: + result = predicts + + return result diff --git a/inpaint/model/anytext/ocr_recog/RecModel.py b/inpaint/model/anytext/ocr_recog/RecModel.py new file mode 100755 index 0000000..c2313bf --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/RecModel.py @@ -0,0 +1,45 @@ +from torch import nn +from .RNN import SequenceEncoder, Im2Seq, Im2Im +from .RecMv1_enhance import MobileNetV1Enhance + +from .RecCTCHead import CTCHead + +backbone_dict = {"MobileNetV1Enhance":MobileNetV1Enhance} +neck_dict = {'SequenceEncoder': SequenceEncoder, 'Im2Seq': Im2Seq,'None':Im2Im} +head_dict = {'CTCHead':CTCHead} + + +class RecModel(nn.Module): + def __init__(self, config): + super().__init__() + assert 'in_channels' in config, 'in_channels must in model config' + backbone_type = config.backbone.pop('type') + assert backbone_type in backbone_dict, f'backbone.type must in {backbone_dict}' + self.backbone = backbone_dict[backbone_type](config.in_channels, **config.backbone) + + neck_type = config.neck.pop('type') + assert neck_type in neck_dict, f'neck.type must in {neck_dict}' + self.neck = neck_dict[neck_type](self.backbone.out_channels, **config.neck) + + head_type = config.head.pop('type') + assert head_type in head_dict, f'head.type must in {head_dict}' + self.head = head_dict[head_type](self.neck.out_channels, **config.head) + + self.name = f'RecModel_{backbone_type}_{neck_type}_{head_type}' + + def load_3rd_state_dict(self, _3rd_name, _state): + self.backbone.load_3rd_state_dict(_3rd_name, _state) + self.neck.load_3rd_state_dict(_3rd_name, _state) + self.head.load_3rd_state_dict(_3rd_name, _state) + + def forward(self, x): + x = self.backbone(x) + x = self.neck(x) + x = self.head(x) + return x + + def encode(self, x): + x = self.backbone(x) + x = self.neck(x) + x = self.head.ctc_encoder(x) + return x diff --git a/inpaint/model/anytext/ocr_recog/RecMv1_enhance.py b/inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file mode 100644 index 0000000..7529b4a --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/RecMv1_enhance.py @@ -0,0 +1,232 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from .common import Activation + + +class ConvBNLayer(nn.Module): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + act='hard_swish'): + super(ConvBNLayer, self).__init__() + self.act = act + self._conv = nn.Conv2d( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias=False) + + self._batch_norm = nn.BatchNorm2d( + num_filters, + ) + if self.act is not None: + self._act = Activation(act_type=act, inplace=True) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class DepthwiseSeparable(nn.Module): + def __init__(self, + num_channels, + num_filters1, + num_filters2, + num_groups, + stride, + scale, + dw_size=3, + padding=1, + use_se=False): + super(DepthwiseSeparable, self).__init__() + self.use_se = use_se + self._depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=dw_size, + stride=stride, + padding=padding, + num_groups=int(num_groups * scale)) + if use_se: + self._se = SEModule(int(num_filters1 * scale)) + self._pointwise_conv = ConvBNLayer( + num_channels=int(num_filters1 * scale), + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0) + + def forward(self, inputs): + y = self._depthwise_conv(inputs) + if self.use_se: + y = self._se(y) + y = self._pointwise_conv(y) + return y + + +class MobileNetV1Enhance(nn.Module): + def __init__(self, + in_channels=3, + scale=0.5, + last_conv_stride=1, + last_pool_type='max', + **kwargs): + super().__init__() + self.scale = scale + self.block_list = [] + + self.conv1 = ConvBNLayer( + num_channels=in_channels, + filter_size=3, + channels=3, + num_filters=int(32 * scale), + stride=2, + padding=1) + + conv2_1 = DepthwiseSeparable( + num_channels=int(32 * scale), + num_filters1=32, + num_filters2=64, + num_groups=32, + stride=1, + scale=scale) + self.block_list.append(conv2_1) + + conv2_2 = DepthwiseSeparable( + num_channels=int(64 * scale), + num_filters1=64, + num_filters2=128, + num_groups=64, + stride=1, + scale=scale) + self.block_list.append(conv2_2) + + conv3_1 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=128, + num_groups=128, + stride=1, + scale=scale) + self.block_list.append(conv3_1) + + conv3_2 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=256, + num_groups=128, + stride=(2, 1), + scale=scale) + self.block_list.append(conv3_2) + + conv4_1 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=256, + num_groups=256, + stride=1, + scale=scale) + self.block_list.append(conv4_1) + + conv4_2 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=512, + num_groups=256, + stride=(2, 1), + scale=scale) + self.block_list.append(conv4_2) + + for _ in range(5): + conv5 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=512, + num_groups=512, + stride=1, + dw_size=5, + padding=2, + scale=scale, + use_se=False) + self.block_list.append(conv5) + + conv5_6 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=1024, + num_groups=512, + stride=(2, 1), + dw_size=5, + padding=2, + scale=scale, + use_se=True) + self.block_list.append(conv5_6) + + conv6 = DepthwiseSeparable( + num_channels=int(1024 * scale), + num_filters1=1024, + num_filters2=1024, + num_groups=1024, + stride=last_conv_stride, + dw_size=5, + padding=2, + use_se=True, + scale=scale) + self.block_list.append(conv6) + + self.block_list = nn.Sequential(*self.block_list) + if last_pool_type == 'avg': + self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) + else: + self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.out_channels = int(1024 * scale) + + def forward(self, inputs): + y = self.conv1(inputs) + y = self.block_list(y) + y = self.pool(y) + return y + +def hardsigmoid(x): + return F.relu6(x + 3., inplace=True) / 6. + +class SEModule(nn.Module): + def __init__(self, channel, reduction=4): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.conv2 = nn.Conv2d( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0, + bias=True) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = F.relu(outputs) + outputs = self.conv2(outputs) + outputs = hardsigmoid(outputs) + x = torch.mul(inputs, outputs) + + return x diff --git a/inpaint/model/anytext/ocr_recog/RecSVTR.py b/inpaint/model/anytext/ocr_recog/RecSVTR.py new file mode 100644 index 0000000..484b3df --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/RecSVTR.py @@ -0,0 +1,591 @@ +import torch +import torch.nn as nn +import numpy as np +from torch.nn.init import trunc_normal_, zeros_, ones_ +from torch.nn import functional + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = torch.tensor(1 - drop_prob) + shape = (x.size()[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype) + random_tensor = torch.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class Swish(nn.Module): + def __int__(self): + super(Swish, self).__int__() + + def forward(self,x): + return x*torch.sigmoid(x) + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act=nn.GELU): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), + bias=bias_attr) + self.norm = nn.BatchNorm2d(out_channels) + self.act = act() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Module): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Module): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + if isinstance(act_layer, str): + self.act = Swish() + else: + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMixer(nn.Module): + def __init__( + self, + dim, + num_heads=8, + HW=(8, 25), + local_k=(3, 3), ): + super().__init__() + self.HW = HW + self.dim = dim + self.local_mixer = nn.Conv2d( + dim, + dim, + local_k, + 1, (local_k[0] // 2, local_k[1] // 2), + groups=num_heads, + # weight_attr=ParamAttr(initializer=KaimingNormal()) + ) + + def forward(self, x): + h = self.HW[0] + w = self.HW[1] + x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) + x = self.local_mixer(x) + x = x.flatten(2).transpose([0, 2, 1]) + return x + + +class Attention(nn.Module): + def __init__(self, + dim, + num_heads=8, + mixer='Global', + HW=(8, 25), + local_k=(7, 11), + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.HW = HW + if HW is not None: + H = HW[0] + W = HW[1] + self.N = H * W + self.C = dim + if mixer == 'Local' and HW is not None: + hk = local_k[0] + wk = local_k[1] + mask = torch.ones([H * W, H + hk - 1, W + wk - 1]) + for h in range(0, H): + for w in range(0, W): + mask[h * W + w, h:h + hk, w:w + wk] = 0. + mask_paddle = mask[:, hk // 2:H + hk // 2, wk // 2:W + wk // + 2].flatten(1) + mask_inf = torch.full([H * W, H * W],fill_value=float('-inf')) + mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf) + self.mask = mask[None,None,:] + # self.mask = mask.unsqueeze([0, 1]) + self.mixer = mixer + + def forward(self, x): + if self.HW is not None: + N = self.N + C = self.C + else: + _, N, C = x.shape + qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C //self.num_heads)).permute((2, 0, 3, 1, 4)) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q.matmul(k.permute((0, 1, 3, 2)))) + if self.mixer == 'Local': + attn += self.mask + attn = functional.softmax(attn, dim=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).permute((0, 2, 1, 3)).reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, + dim, + num_heads, + mixer='Global', + local_mixer=(7, 11), + HW=(8, 25), + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-6, + prenorm=True): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm1 = norm_layer(dim) + if mixer == 'Global' or mixer == 'Local': + + self.mixer = Attention( + dim, + num_heads=num_heads, + mixer=mixer, + HW=HW, + local_k=local_mixer, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + elif mixer == 'Conv': + self.mixer = ConvMixer( + dim, num_heads=num_heads, HW=HW, local_k=local_mixer) + else: + raise TypeError("The mixer must be one of [Global, Local, Conv]") + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp_ratio = mlp_ratio + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.prenorm = prenorm + + def forward(self, x): + if self.prenorm: + x = self.norm1(x + self.drop_path(self.mixer(x))) + x = self.norm2(x + self.drop_path(self.mlp(x))) + else: + x = x + self.drop_path(self.mixer(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=(32, 100), + in_channels=3, + embed_dim=768, + sub_num=2): + super().__init__() + num_patches = (img_size[1] // (2 ** sub_num)) * \ + (img_size[0] // (2 ** sub_num)) + self.img_size = img_size + self.num_patches = num_patches + self.embed_dim = embed_dim + self.norm = None + if sub_num == 2: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False)) + if sub_num == 3: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 4, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 4, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False)) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).permute(0, 2, 1) + return x + + +class SubSample(nn.Module): + def __init__(self, + in_channels, + out_channels, + types='Pool', + stride=(2, 1), + sub_norm='nn.LayerNorm', + act=None): + super().__init__() + self.types = types + if types == 'Pool': + self.avgpool = nn.AvgPool2d( + kernel_size=(3, 5), stride=stride, padding=(1, 2)) + self.maxpool = nn.MaxPool2d( + kernel_size=(3, 5), stride=stride, padding=(1, 2)) + self.proj = nn.Linear(in_channels, out_channels) + else: + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + # weight_attr=ParamAttr(initializer=KaimingNormal()) + ) + self.norm = eval(sub_norm)(out_channels) + if act is not None: + self.act = act() + else: + self.act = None + + def forward(self, x): + + if self.types == 'Pool': + x1 = self.avgpool(x) + x2 = self.maxpool(x) + x = (x1 + x2) * 0.5 + out = self.proj(x.flatten(2).permute((0, 2, 1))) + else: + x = self.conv(x) + out = x.flatten(2).permute((0, 2, 1)) + out = self.norm(out) + if self.act is not None: + out = self.act(out) + + return out + + +class SVTRNet(nn.Module): + def __init__( + self, + img_size=[48, 100], + in_channels=3, + embed_dim=[64, 128, 256], + depth=[3, 6, 3], + num_heads=[2, 4, 8], + mixer=['Local'] * 6 + ['Global'] * + 6, # Local atten, Global atten, Conv + local_mixer=[[7, 11], [7, 11], [7, 11]], + patch_merging='Conv', # Conv, Pool, None + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + last_drop=0.1, + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer='nn.LayerNorm', + sub_norm='nn.LayerNorm', + epsilon=1e-6, + out_channels=192, + out_char_num=25, + block_unit='Block', + act='nn.GELU', + last_stage=True, + sub_num=2, + prenorm=True, + use_lenhead=False, + **kwargs): + super().__init__() + self.img_size = img_size + self.embed_dim = embed_dim + self.out_channels = out_channels + self.prenorm = prenorm + patch_merging = None if patch_merging != 'Conv' and patch_merging != 'Pool' else patch_merging + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim[0], + sub_num=sub_num) + num_patches = self.patch_embed.num_patches + self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) + # self.pos_embed = self.create_parameter( + # shape=[1, num_patches, embed_dim[0]], default_initializer=zeros_) + + # self.add_parameter("pos_embed", self.pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + Block_unit = eval(block_unit) + + dpr = np.linspace(0, drop_path_rate, sum(depth)) + self.blocks1 = nn.ModuleList( + [ + Block_unit( + dim=embed_dim[0], + num_heads=num_heads[0], + mixer=mixer[0:depth[0]][i], + HW=self.HW, + local_mixer=local_mixer[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[0:depth[0]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[0]) + ] + ) + if patch_merging is not None: + self.sub_sample1 = SubSample( + embed_dim[0], + embed_dim[1], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 2, self.HW[1]] + else: + HW = self.HW + self.patch_merging = patch_merging + self.blocks2 = nn.ModuleList([ + Block_unit( + dim=embed_dim[1], + num_heads=num_heads[1], + mixer=mixer[depth[0]:depth[0] + depth[1]][i], + HW=HW, + local_mixer=local_mixer[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0]:depth[0] + depth[1]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[1]) + ]) + if patch_merging is not None: + self.sub_sample2 = SubSample( + embed_dim[1], + embed_dim[2], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 4, self.HW[1]] + else: + HW = self.HW + self.blocks3 = nn.ModuleList([ + Block_unit( + dim=embed_dim[2], + num_heads=num_heads[2], + mixer=mixer[depth[0] + depth[1]:][i], + HW=HW, + local_mixer=local_mixer[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0] + depth[1]:][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[2]) + ]) + self.last_stage = last_stage + if last_stage: + self.avg_pool = nn.AdaptiveAvgPool2d((1, out_char_num)) + self.last_conv = nn.Conv2d( + in_channels=embed_dim[2], + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.hardswish = nn.Hardswish() + self.dropout = nn.Dropout(p=last_drop) + if not prenorm: + self.norm = eval(norm_layer)(embed_dim[-1], epsilon=epsilon) + self.use_lenhead = use_lenhead + if use_lenhead: + self.len_conv = nn.Linear(embed_dim[2], self.out_channels) + self.hardswish_len = nn.Hardswish() + self.dropout_len = nn.Dropout( + p=last_drop) + + trunc_normal_(self.pos_embed,std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight,std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample1( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[0], self.HW[0], self.HW[1]])) + for blk in self.blocks2: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample2( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]])) + for blk in self.blocks3: + x = blk(x) + if not self.prenorm: + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.use_lenhead: + len_x = self.len_conv(x.mean(1)) + len_x = self.dropout_len(self.hardswish_len(len_x)) + if self.last_stage: + if self.patch_merging is not None: + h = self.HW[0] // 4 + else: + h = self.HW[0] + x = self.avg_pool( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[2], h, self.HW[1]])) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + if self.use_lenhead: + return x, len_x + return x + + +if __name__=="__main__": + a = torch.rand(1,3,48,100) + svtr = SVTRNet() + + out = svtr(a) + print(svtr) + print(out.size()) \ No newline at end of file diff --git a/inpaint/model/anytext/ocr_recog/__init__.py b/inpaint/model/anytext/ocr_recog/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/anytext/ocr_recog/common.py b/inpaint/model/anytext/ocr_recog/common.py new file mode 100644 index 0000000..a328bb0 --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/common.py @@ -0,0 +1,74 @@ + + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Hswish(nn.Module): + def __init__(self, inplace=True): + super(Hswish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x * F.relu6(x + 3., inplace=self.inplace) / 6. + +# out = max(0, min(1, slop*x+offset)) +# paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) +class Hsigmoid(nn.Module): + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + # torch: F.relu6(x + 3., inplace=self.inplace) / 6. + # paddle: F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + return F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + +class GELU(nn.Module): + def __init__(self, inplace=True): + super(GELU, self).__init__() + self.inplace = inplace + + def forward(self, x): + return torch.nn.functional.gelu(x) + + +class Swish(nn.Module): + def __init__(self, inplace=True): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + if self.inplace: + x.mul_(torch.sigmoid(x)) + return x + else: + return x*torch.sigmoid(x) + + +class Activation(nn.Module): + def __init__(self, act_type, inplace=True): + super(Activation, self).__init__() + act_type = act_type.lower() + if act_type == 'relu': + self.act = nn.ReLU(inplace=inplace) + elif act_type == 'relu6': + self.act = nn.ReLU6(inplace=inplace) + elif act_type == 'sigmoid': + raise NotImplementedError + elif act_type == 'hard_sigmoid': + self.act = Hsigmoid(inplace) + elif act_type == 'hard_swish': + self.act = Hswish(inplace=inplace) + elif act_type == 'leakyrelu': + self.act = nn.LeakyReLU(inplace=inplace) + elif act_type == 'gelu': + self.act = GELU(inplace=inplace) + elif act_type == 'swish': + self.act = Swish(inplace=inplace) + else: + raise NotImplementedError + + def forward(self, inputs): + return self.act(inputs) \ No newline at end of file diff --git a/inpaint/model/anytext/ocr_recog/en_dict.txt b/inpaint/model/anytext/ocr_recog/en_dict.txt new file mode 100644 index 0000000..7677d31 --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/en_dict.txt @@ -0,0 +1,95 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ + diff --git a/inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt b/inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file mode 100644 index 0000000..84b885d --- /dev/null +++ b/inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt @@ -0,0 +1,6623 @@ +' +疗 +绚 +诚 +娇 +溜 +题 +贿 +者 +廖 +更 +纳 +加 +奉 +公 +一 +就 +汴 +计 +与 +路 +房 +原 +妇 +2 +0 +8 +- +7 +其 +> +: +] +, +, +骑 +刈 +全 +消 +昏 +傈 +安 +久 +钟 +嗅 +不 +影 +处 +驽 +蜿 +资 +关 +椤 +地 +瘸 +专 +问 +忖 +票 +嫉 +炎 +韵 +要 +月 +田 +节 +陂 +鄙 +捌 +备 +拳 +伺 +眼 +网 +盎 +大 +傍 +心 +东 +愉 +汇 +蹿 +科 +每 +业 +里 +航 +晏 +字 +平 +录 +先 +1 +3 +彤 +鲶 +产 +稍 +督 +腴 +有 +象 +岳 +注 +绍 +在 +泺 +文 +定 +核 +名 +水 +过 +理 +让 +偷 +率 +等 +这 +发 +” +为 +含 +肥 +酉 +相 +鄱 +七 +编 +猥 +锛 +日 +镀 +蒂 +掰 +倒 +辆 +栾 +栗 +综 +涩 +州 +雌 +滑 +馀 +了 +机 +块 +司 +宰 +甙 +兴 +矽 +抚 +保 +用 +沧 +秩 +如 +收 +息 +滥 +页 +疑 +埠 +! +! +姥 +异 +橹 +钇 +向 +下 +跄 +的 +椴 +沫 +国 +绥 +獠 +报 +开 +民 +蜇 +何 +分 +凇 +长 +讥 +藏 +掏 +施 +羽 +中 +讲 +派 +嘟 +人 +提 +浼 +间 +世 +而 +古 +多 +倪 +唇 +饯 +控 +庚 +首 +赛 +蜓 +味 +断 +制 +觉 +技 +替 +艰 +溢 +潮 +夕 +钺 +外 +摘 +枋 +动 +双 +单 +啮 +户 +枇 +确 +锦 +曜 +杜 +或 +能 +效 +霜 +盒 +然 +侗 +电 +晁 +放 +步 +鹃 +新 +杖 +蜂 +吒 +濂 +瞬 +评 +总 +隍 +对 +独 +合 +也 +是 +府 +青 +天 +诲 +墙 +组 +滴 +级 +邀 +帘 +示 +已 +时 +骸 +仄 +泅 +和 +遨 +店 +雇 +疫 +持 +巍 +踮 +境 +只 +亨 +目 +鉴 +崤 +闲 +体 +泄 +杂 +作 +般 +轰 +化 +解 +迂 +诿 +蛭 +璀 +腾 +告 +版 +服 +省 +师 +小 +规 +程 +线 +海 +办 +引 +二 +桧 +牌 +砺 +洄 +裴 +修 +图 +痫 +胡 +许 +犊 +事 +郛 +基 +柴 +呼 +食 +研 +奶 +律 +蛋 +因 +葆 +察 +戏 +褒 +戒 +再 +李 +骁 +工 +貂 +油 +鹅 +章 +啄 +休 +场 +给 +睡 +纷 +豆 +器 +捎 +说 +敏 +学 +会 +浒 +设 +诊 +格 +廓 +查 +来 +霓 +室 +溆 +¢ +诡 +寥 +焕 +舜 +柒 +狐 +回 +戟 +砾 +厄 +实 +翩 +尿 +五 +入 +径 +惭 +喹 +股 +宇 +篝 +| +; +美 +期 +云 +九 +祺 +扮 +靠 +锝 +槌 +系 +企 +酰 +阊 +暂 +蚕 +忻 +豁 +本 +羹 +执 +条 +钦 +H +獒 +限 +进 +季 +楦 +于 +芘 +玖 +铋 +茯 +未 +答 +粘 +括 +样 +精 +欠 +矢 +甥 +帷 +嵩 +扣 +令 +仔 +风 +皈 +行 +支 +部 +蓉 +刮 +站 +蜡 +救 +钊 +汗 +松 +嫌 +成 +可 +. +鹤 +院 +从 +交 +政 +怕 +活 +调 +球 +局 +验 +髌 +第 +韫 +谗 +串 +到 +圆 +年 +米 +/ +* +友 +忿 +检 +区 +看 +自 +敢 +刃 +个 +兹 +弄 +流 +留 +同 +没 +齿 +星 +聆 +轼 +湖 +什 +三 +建 +蛔 +儿 +椋 +汕 +震 +颧 +鲤 +跟 +力 +情 +璺 +铨 +陪 +务 +指 +族 +训 +滦 +鄣 +濮 +扒 +商 +箱 +十 +召 +慷 +辗 +所 +莞 +管 +护 +臭 +横 +硒 +嗓 +接 +侦 +六 +露 +党 +馋 +驾 +剖 +高 +侬 +妪 +幂 +猗 +绺 +骐 +央 +酐 +孝 +筝 +课 +徇 +缰 +门 +男 +西 +项 +句 +谙 +瞒 +秃 +篇 +教 +碲 +罚 +声 +呐 +景 +前 +富 +嘴 +鳌 +稀 +免 +朋 +啬 +睐 +去 +赈 +鱼 +住 +肩 +愕 +速 +旁 +波 +厅 +健 +茼 +厥 +鲟 +谅 +投 +攸 +炔 +数 +方 +击 +呋 +谈 +绩 +别 +愫 +僚 +躬 +鹧 +胪 +炳 +招 +喇 +膨 +泵 +蹦 +毛 +结 +5 +4 +谱 +识 +陕 +粽 +婚 +拟 +构 +且 +搜 +任 +潘 +比 +郢 +妨 +醪 +陀 +桔 +碘 +扎 +选 +哈 +骷 +楷 +亿 +明 +缆 +脯 +监 +睫 +逻 +婵 +共 +赴 +淝 +凡 +惦 +及 +达 +揖 +谩 +澹 +减 +焰 +蛹 +番 +祁 +柏 +员 +禄 +怡 +峤 +龙 +白 +叽 +生 +闯 +起 +细 +装 +谕 +竟 +聚 +钙 +上 +导 +渊 +按 +艾 +辘 +挡 +耒 +盹 +饪 +臀 +记 +邮 +蕙 +受 +各 +医 +搂 +普 +滇 +朗 +茸 +带 +翻 +酚 +( +光 +堤 +墟 +蔷 +万 +幻 +〓 +瑙 +辈 +昧 +盏 +亘 +蛀 +吉 +铰 +请 +子 +假 +闻 +税 +井 +诩 +哨 +嫂 +好 +面 +琐 +校 +馊 +鬣 +缂 +营 +访 +炖 +占 +农 +缀 +否 +经 +钚 +棵 +趟 +张 +亟 +吏 +茶 +谨 +捻 +论 +迸 +堂 +玉 +信 +吧 +瞠 +乡 +姬 +寺 +咬 +溏 +苄 +皿 +意 +赉 +宝 +尔 +钰 +艺 +特 +唳 +踉 +都 +荣 +倚 +登 +荐 +丧 +奇 +涵 +批 +炭 +近 +符 +傩 +感 +道 +着 +菊 +虹 +仲 +众 +懈 +濯 +颞 +眺 +南 +释 +北 +缝 +标 +既 +茗 +整 +撼 +迤 +贲 +挎 +耱 +拒 +某 +妍 +卫 +哇 +英 +矶 +藩 +治 +他 +元 +领 +膜 +遮 +穗 +蛾 +飞 +荒 +棺 +劫 +么 +市 +火 +温 +拈 +棚 +洼 +转 +果 +奕 +卸 +迪 +伸 +泳 +斗 +邡 +侄 +涨 +屯 +萋 +胭 +氡 +崮 +枞 +惧 +冒 +彩 +斜 +手 +豚 +随 +旭 +淑 +妞 +形 +菌 +吲 +沱 +争 +驯 +歹 +挟 +兆 +柱 +传 +至 +包 +内 +响 +临 +红 +功 +弩 +衡 +寂 +禁 +老 +棍 +耆 +渍 +织 +害 +氵 +渑 +布 +载 +靥 +嗬 +虽 +苹 +咨 +娄 +库 +雉 +榜 +帜 +嘲 +套 +瑚 +亲 +簸 +欧 +边 +6 +腿 +旮 +抛 +吹 +瞳 +得 +镓 +梗 +厨 +继 +漾 +愣 +憨 +士 +策 +窑 +抑 +躯 +襟 +脏 +参 +贸 +言 +干 +绸 +鳄 +穷 +藜 +音 +折 +详 +) +举 +悍 +甸 +癌 +黎 +谴 +死 +罩 +迁 +寒 +驷 +袖 +媒 +蒋 +掘 +模 +纠 +恣 +观 +祖 +蛆 +碍 +位 +稿 +主 +澧 +跌 +筏 +京 +锏 +帝 +贴 +证 +糠 +才 +黄 +鲸 +略 +炯 +饱 +四 +出 +园 +犀 +牧 +容 +汉 +杆 +浈 +汰 +瑷 +造 +虫 +瘩 +怪 +驴 +济 +应 +花 +沣 +谔 +夙 +旅 +价 +矿 +以 +考 +s +u +呦 +晒 +巡 +茅 +准 +肟 +瓴 +詹 +仟 +褂 +译 +桌 +混 +宁 +怦 +郑 +抿 +些 +余 +鄂 +饴 +攒 +珑 +群 +阖 +岔 +琨 +藓 +预 +环 +洮 +岌 +宀 +杲 +瀵 +最 +常 +囡 +周 +踊 +女 +鼓 +袭 +喉 +简 +范 +薯 +遐 +疏 +粱 +黜 +禧 +法 +箔 +斤 +遥 +汝 +奥 +直 +贞 +撑 +置 +绱 +集 +她 +馅 +逗 +钧 +橱 +魉 +[ +恙 +躁 +唤 +9 +旺 +膘 +待 +脾 +惫 +购 +吗 +依 +盲 +度 +瘿 +蠖 +俾 +之 +镗 +拇 +鲵 +厝 +簧 +续 +款 +展 +啃 +表 +剔 +品 +钻 +腭 +损 +清 +锶 +统 +涌 +寸 +滨 +贪 +链 +吠 +冈 +伎 +迥 +咏 +吁 +览 +防 +迅 +失 +汾 +阔 +逵 +绀 +蔑 +列 +川 +凭 +努 +熨 +揪 +利 +俱 +绉 +抢 +鸨 +我 +即 +责 +膦 +易 +毓 +鹊 +刹 +玷 +岿 +空 +嘞 +绊 +排 +术 +估 +锷 +违 +们 +苟 +铜 +播 +肘 +件 +烫 +审 +鲂 +广 +像 +铌 +惰 +铟 +巳 +胍 +鲍 +康 +憧 +色 +恢 +想 +拷 +尤 +疳 +知 +S +Y +F +D +A +峄 +裕 +帮 +握 +搔 +氐 +氘 +难 +墒 +沮 +雨 +叁 +缥 +悴 +藐 +湫 +娟 +苑 +稠 +颛 +簇 +后 +阕 +闭 +蕤 +缚 +怎 +佞 +码 +嘤 +蔡 +痊 +舱 +螯 +帕 +赫 +昵 +升 +烬 +岫 +、 +疵 +蜻 +髁 +蕨 +隶 +烛 +械 +丑 +盂 +梁 +强 +鲛 +由 +拘 +揉 +劭 +龟 +撤 +钩 +呕 +孛 +费 +妻 +漂 +求 +阑 +崖 +秤 +甘 +通 +深 +补 +赃 +坎 +床 +啪 +承 +吼 +量 +暇 +钼 +烨 +阂 +擎 +脱 +逮 +称 +P +神 +属 +矗 +华 +届 +狍 +葑 +汹 +育 +患 +窒 +蛰 +佼 +静 +槎 +运 +鳗 +庆 +逝 +曼 +疱 +克 +代 +官 +此 +麸 +耧 +蚌 +晟 +例 +础 +榛 +副 +测 +唰 +缢 +迹 +灬 +霁 +身 +岁 +赭 +扛 +又 +菡 +乜 +雾 +板 +读 +陷 +徉 +贯 +郁 +虑 +变 +钓 +菜 +圾 +现 +琢 +式 +乐 +维 +渔 +浜 +左 +吾 +脑 +钡 +警 +T +啵 +拴 +偌 +漱 +湿 +硕 +止 +骼 +魄 +积 +燥 +联 +踢 +玛 +则 +窿 +见 +振 +畿 +送 +班 +钽 +您 +赵 +刨 +印 +讨 +踝 +籍 +谡 +舌 +崧 +汽 +蔽 +沪 +酥 +绒 +怖 +财 +帖 +肱 +私 +莎 +勋 +羔 +霸 +励 +哼 +帐 +将 +帅 +渠 +纪 +婴 +娩 +岭 +厘 +滕 +吻 +伤 +坝 +冠 +戊 +隆 +瘁 +介 +涧 +物 +黍 +并 +姗 +奢 +蹑 +掣 +垸 +锴 +命 +箍 +捉 +病 +辖 +琰 +眭 +迩 +艘 +绌 +繁 +寅 +若 +毋 +思 +诉 +类 +诈 +燮 +轲 +酮 +狂 +重 +反 +职 +筱 +县 +委 +磕 +绣 +奖 +晋 +濉 +志 +徽 +肠 +呈 +獐 +坻 +口 +片 +碰 +几 +村 +柿 +劳 +料 +获 +亩 +惕 +晕 +厌 +号 +罢 +池 +正 +鏖 +煨 +家 +棕 +复 +尝 +懋 +蜥 +锅 +岛 +扰 +队 +坠 +瘾 +钬 +@ +卧 +疣 +镇 +譬 +冰 +彷 +频 +黯 +据 +垄 +采 +八 +缪 +瘫 +型 +熹 +砰 +楠 +襁 +箐 +但 +嘶 +绳 +啤 +拍 +盥 +穆 +傲 +洗 +盯 +塘 +怔 +筛 +丿 +台 +恒 +喂 +葛 +永 +¥ +烟 +酒 +桦 +书 +砂 +蚝 +缉 +态 +瀚 +袄 +圳 +轻 +蛛 +超 +榧 +遛 +姒 +奘 +铮 +右 +荽 +望 +偻 +卡 +丶 +氰 +附 +做 +革 +索 +戚 +坨 +桷 +唁 +垅 +榻 +岐 +偎 +坛 +莨 +山 +殊 +微 +骇 +陈 +爨 +推 +嗝 +驹 +澡 +藁 +呤 +卤 +嘻 +糅 +逛 +侵 +郓 +酌 +德 +摇 +※ +鬃 +被 +慨 +殡 +羸 +昌 +泡 +戛 +鞋 +河 +宪 +沿 +玲 +鲨 +翅 +哽 +源 +铅 +语 +照 +邯 +址 +荃 +佬 +顺 +鸳 +町 +霭 +睾 +瓢 +夸 +椁 +晓 +酿 +痈 +咔 +侏 +券 +噎 +湍 +签 +嚷 +离 +午 +尚 +社 +锤 +背 +孟 +使 +浪 +缦 +潍 +鞅 +军 +姹 +驶 +笑 +鳟 +鲁 +》 +孽 +钜 +绿 +洱 +礴 +焯 +椰 +颖 +囔 +乌 +孔 +巴 +互 +性 +椽 +哞 +聘 +昨 +早 +暮 +胶 +炀 +隧 +低 +彗 +昝 +铁 +呓 +氽 +藉 +喔 +癖 +瑗 +姨 +权 +胱 +韦 +堑 +蜜 +酋 +楝 +砝 +毁 +靓 +歙 +锲 +究 +屋 +喳 +骨 +辨 +碑 +武 +鸠 +宫 +辜 +烊 +适 +坡 +殃 +培 +佩 +供 +走 +蜈 +迟 +翼 +况 +姣 +凛 +浔 +吃 +飘 +债 +犟 +金 +促 +苛 +崇 +坂 +莳 +畔 +绂 +兵 +蠕 +斋 +根 +砍 +亢 +欢 +恬 +崔 +剁 +餐 +榫 +快 +扶 +‖ +濒 +缠 +鳜 +当 +彭 +驭 +浦 +篮 +昀 +锆 +秸 +钳 +弋 +娣 +瞑 +夷 +龛 +苫 +拱 +致 +% +嵊 +障 +隐 +弑 +初 +娓 +抉 +汩 +累 +蓖 +" +唬 +助 +苓 +昙 +押 +毙 +破 +城 +郧 +逢 +嚏 +獭 +瞻 +溱 +婿 +赊 +跨 +恼 +璧 +萃 +姻 +貉 +灵 +炉 +密 +氛 +陶 +砸 +谬 +衔 +点 +琛 +沛 +枳 +层 +岱 +诺 +脍 +榈 +埂 +征 +冷 +裁 +打 +蹴 +素 +瘘 +逞 +蛐 +聊 +激 +腱 +萘 +踵 +飒 +蓟 +吆 +取 +咙 +簋 +涓 +矩 +曝 +挺 +揣 +座 +你 +史 +舵 +焱 +尘 +苏 +笈 +脚 +溉 +榨 +诵 +樊 +邓 +焊 +义 +庶 +儋 +蟋 +蒲 +赦 +呷 +杞 +诠 +豪 +还 +试 +颓 +茉 +太 +除 +紫 +逃 +痴 +草 +充 +鳕 +珉 +祗 +墨 +渭 +烩 +蘸 +慕 +璇 +镶 +穴 +嵘 +恶 +骂 +险 +绋 +幕 +碉 +肺 +戳 +刘 +潞 +秣 +纾 +潜 +銮 +洛 +须 +罘 +销 +瘪 +汞 +兮 +屉 +r +林 +厕 +质 +探 +划 +狸 +殚 +善 +煊 +烹 +〒 +锈 +逯 +宸 +辍 +泱 +柚 +袍 +远 +蹋 +嶙 +绝 +峥 +娥 +缍 +雀 +徵 +认 +镱 +谷 += +贩 +勉 +撩 +鄯 +斐 +洋 +非 +祚 +泾 +诒 +饿 +撬 +威 +晷 +搭 +芍 +锥 +笺 +蓦 +候 +琊 +档 +礁 +沼 +卵 +荠 +忑 +朝 +凹 +瑞 +头 +仪 +弧 +孵 +畏 +铆 +突 +衲 +车 +浩 +气 +茂 +悖 +厢 +枕 +酝 +戴 +湾 +邹 +飚 +攘 +锂 +写 +宵 +翁 +岷 +无 +喜 +丈 +挑 +嗟 +绛 +殉 +议 +槽 +具 +醇 +淞 +笃 +郴 +阅 +饼 +底 +壕 +砚 +弈 +询 +缕 +庹 +翟 +零 +筷 +暨 +舟 +闺 +甯 +撞 +麂 +茌 +蔼 +很 +珲 +捕 +棠 +角 +阉 +媛 +娲 +诽 +剿 +尉 +爵 +睬 +韩 +诰 +匣 +危 +糍 +镯 +立 +浏 +阳 +少 +盆 +舔 +擘 +匪 +申 +尬 +铣 +旯 +抖 +赘 +瓯 +居 +ˇ +哮 +游 +锭 +茏 +歌 +坏 +甚 +秒 +舞 +沙 +仗 +劲 +潺 +阿 +燧 +郭 +嗖 +霏 +忠 +材 +奂 +耐 +跺 +砀 +输 +岖 +媳 +氟 +极 +摆 +灿 +今 +扔 +腻 +枝 +奎 +药 +熄 +吨 +话 +q +额 +慑 +嘌 +协 +喀 +壳 +埭 +视 +著 +於 +愧 +陲 +翌 +峁 +颅 +佛 +腹 +聋 +侯 +咎 +叟 +秀 +颇 +存 +较 +罪 +哄 +岗 +扫 +栏 +钾 +羌 +己 +璨 +枭 +霉 +煌 +涸 +衿 +键 +镝 +益 +岢 +奏 +连 +夯 +睿 +冥 +均 +糖 +狞 +蹊 +稻 +爸 +刿 +胥 +煜 +丽 +肿 +璃 +掸 +跚 +灾 +垂 +樾 +濑 +乎 +莲 +窄 +犹 +撮 +战 +馄 +软 +络 +显 +鸢 +胸 +宾 +妲 +恕 +埔 +蝌 +份 +遇 +巧 +瞟 +粒 +恰 +剥 +桡 +博 +讯 +凯 +堇 +阶 +滤 +卖 +斌 +骚 +彬 +兑 +磺 +樱 +舷 +两 +娱 +福 +仃 +差 +找 +桁 +÷ +净 +把 +阴 +污 +戬 +雷 +碓 +蕲 +楚 +罡 +焖 +抽 +妫 +咒 +仑 +闱 +尽 +邑 +菁 +爱 +贷 +沥 +鞑 +牡 +嗉 +崴 +骤 +塌 +嗦 +订 +拮 +滓 +捡 +锻 +次 +坪 +杩 +臃 +箬 +融 +珂 +鹗 +宗 +枚 +降 +鸬 +妯 +阄 +堰 +盐 +毅 +必 +杨 +崃 +俺 +甬 +状 +莘 +货 +耸 +菱 +腼 +铸 +唏 +痤 +孚 +澳 +懒 +溅 +翘 +疙 +杷 +淼 +缙 +骰 +喊 +悉 +砻 +坷 +艇 +赁 +界 +谤 +纣 +宴 +晃 +茹 +归 +饭 +梢 +铡 +街 +抄 +肼 +鬟 +苯 +颂 +撷 +戈 +炒 +咆 +茭 +瘙 +负 +仰 +客 +琉 +铢 +封 +卑 +珥 +椿 +镧 +窨 +鬲 +寿 +御 +袤 +铃 +萎 +砖 +餮 +脒 +裳 +肪 +孕 +嫣 +馗 +嵇 +恳 +氯 +江 +石 +褶 +冢 +祸 +阻 +狈 +羞 +银 +靳 +透 +咳 +叼 +敷 +芷 +啥 +它 +瓤 +兰 +痘 +懊 +逑 +肌 +往 +捺 +坊 +甩 +呻 +〃 +沦 +忘 +膻 +祟 +菅 +剧 +崆 +智 +坯 +臧 +霍 +墅 +攻 +眯 +倘 +拢 +骠 +铐 +庭 +岙 +瓠 +′ +缺 +泥 +迢 +捶 +? +? +郏 +喙 +掷 +沌 +纯 +秘 +种 +听 +绘 +固 +螨 +团 +香 +盗 +妒 +埚 +蓝 +拖 +旱 +荞 +铀 +血 +遏 +汲 +辰 +叩 +拽 +幅 +硬 +惶 +桀 +漠 +措 +泼 +唑 +齐 +肾 +念 +酱 +虚 +屁 +耶 +旗 +砦 +闵 +婉 +馆 +拭 +绅 +韧 +忏 +窝 +醋 +葺 +顾 +辞 +倜 +堆 +辋 +逆 +玟 +贱 +疾 +董 +惘 +倌 +锕 +淘 +嘀 +莽 +俭 +笏 +绑 +鲷 +杈 +择 +蟀 +粥 +嗯 +驰 +逾 +案 +谪 +褓 +胫 +哩 +昕 +颚 +鲢 +绠 +躺 +鹄 +崂 +儒 +俨 +丝 +尕 +泌 +啊 +萸 +彰 +幺 +吟 +骄 +苣 +弦 +脊 +瑰 +〈 +诛 +镁 +析 +闪 +剪 +侧 +哟 +框 +螃 +守 +嬗 +燕 +狭 +铈 +缮 +概 +迳 +痧 +鲲 +俯 +售 +笼 +痣 +扉 +挖 +满 +咋 +援 +邱 +扇 +歪 +便 +玑 +绦 +峡 +蛇 +叨 +〖 +泽 +胃 +斓 +喋 +怂 +坟 +猪 +该 +蚬 +炕 +弥 +赞 +棣 +晔 +娠 +挲 +狡 +创 +疖 +铕 +镭 +稷 +挫 +弭 +啾 +翔 +粉 +履 +苘 +哦 +楼 +秕 +铂 +土 +锣 +瘟 +挣 +栉 +习 +享 +桢 +袅 +磨 +桂 +谦 +延 +坚 +蔚 +噗 +署 +谟 +猬 +钎 +恐 +嬉 +雒 +倦 +衅 +亏 +璩 +睹 +刻 +殿 +王 +算 +雕 +麻 +丘 +柯 +骆 +丸 +塍 +谚 +添 +鲈 +垓 +桎 +蚯 +芥 +予 +飕 +镦 +谌 +窗 +醚 +菀 +亮 +搪 +莺 +蒿 +羁 +足 +J +真 +轶 +悬 +衷 +靛 +翊 +掩 +哒 +炅 +掐 +冼 +妮 +l +谐 +稚 +荆 +擒 +犯 +陵 +虏 +浓 +崽 +刍 +陌 +傻 +孜 +千 +靖 +演 +矜 +钕 +煽 +杰 +酗 +渗 +伞 +栋 +俗 +泫 +戍 +罕 +沾 +疽 +灏 +煦 +芬 +磴 +叱 +阱 +榉 +湃 +蜀 +叉 +醒 +彪 +租 +郡 +篷 +屎 +良 +垢 +隗 +弱 +陨 +峪 +砷 +掴 +颁 +胎 +雯 +绵 +贬 +沐 +撵 +隘 +篙 +暖 +曹 +陡 +栓 +填 +臼 +彦 +瓶 +琪 +潼 +哪 +鸡 +摩 +啦 +俟 +锋 +域 +耻 +蔫 +疯 +纹 +撇 +毒 +绶 +痛 +酯 +忍 +爪 +赳 +歆 +嘹 +辕 +烈 +册 +朴 +钱 +吮 +毯 +癜 +娃 +谀 +邵 +厮 +炽 +璞 +邃 +丐 +追 +词 +瓒 +忆 +轧 +芫 +谯 +喷 +弟 +半 +冕 +裙 +掖 +墉 +绮 +寝 +苔 +势 +顷 +褥 +切 +衮 +君 +佳 +嫒 +蚩 +霞 +佚 +洙 +逊 +镖 +暹 +唛 +& +殒 +顶 +碗 +獗 +轭 +铺 +蛊 +废 +恹 +汨 +崩 +珍 +那 +杵 +曲 +纺 +夏 +薰 +傀 +闳 +淬 +姘 +舀 +拧 +卷 +楂 +恍 +讪 +厩 +寮 +篪 +赓 +乘 +灭 +盅 +鞣 +沟 +慎 +挂 +饺 +鼾 +杳 +树 +缨 +丛 +絮 +娌 +臻 +嗳 +篡 +侩 +述 +衰 +矛 +圈 +蚜 +匕 +筹 +匿 +濞 +晨 +叶 +骋 +郝 +挚 +蚴 +滞 +增 +侍 +描 +瓣 +吖 +嫦 +蟒 +匾 +圣 +赌 +毡 +癞 +恺 +百 +曳 +需 +篓 +肮 +庖 +帏 +卿 +驿 +遗 +蹬 +鬓 +骡 +歉 +芎 +胳 +屐 +禽 +烦 +晌 +寄 +媾 +狄 +翡 +苒 +船 +廉 +终 +痞 +殇 +々 +畦 +饶 +改 +拆 +悻 +萄 +£ +瓿 +乃 +訾 +桅 +匮 +溧 +拥 +纱 +铍 +骗 +蕃 +龋 +缬 +父 +佐 +疚 +栎 +醍 +掳 +蓄 +x +惆 +颜 +鲆 +榆 +〔 +猎 +敌 +暴 +谥 +鲫 +贾 +罗 +玻 +缄 +扦 +芪 +癣 +落 +徒 +臾 +恿 +猩 +托 +邴 +肄 +牵 +春 +陛 +耀 +刊 +拓 +蓓 +邳 +堕 +寇 +枉 +淌 +啡 +湄 +兽 +酷 +萼 +碚 +濠 +萤 +夹 +旬 +戮 +梭 +琥 +椭 +昔 +勺 +蜊 +绐 +晚 +孺 +僵 +宣 +摄 +冽 +旨 +萌 +忙 +蚤 +眉 +噼 +蟑 +付 +契 +瓜 +悼 +颡 +壁 +曾 +窕 +颢 +澎 +仿 +俑 +浑 +嵌 +浣 +乍 +碌 +褪 +乱 +蔟 +隙 +玩 +剐 +葫 +箫 +纲 +围 +伐 +决 +伙 +漩 +瑟 +刑 +肓 +镳 +缓 +蹭 +氨 +皓 +典 +畲 +坍 +铑 +檐 +塑 +洞 +倬 +储 +胴 +淳 +戾 +吐 +灼 +惺 +妙 +毕 +珐 +缈 +虱 +盖 +羰 +鸿 +磅 +谓 +髅 +娴 +苴 +唷 +蚣 +霹 +抨 +贤 +唠 +犬 +誓 +逍 +庠 +逼 +麓 +籼 +釉 +呜 +碧 +秧 +氩 +摔 +霄 +穸 +纨 +辟 +妈 +映 +完 +牛 +缴 +嗷 +炊 +恩 +荔 +茆 +掉 +紊 +慌 +莓 +羟 +阙 +萁 +磐 +另 +蕹 +辱 +鳐 +湮 +吡 +吩 +唐 +睦 +垠 +舒 +圜 +冗 +瞿 +溺 +芾 +囱 +匠 +僳 +汐 +菩 +饬 +漓 +黑 +霰 +浸 +濡 +窥 +毂 +蒡 +兢 +驻 +鹉 +芮 +诙 +迫 +雳 +厂 +忐 +臆 +猴 +鸣 +蚪 +栈 +箕 +羡 +渐 +莆 +捍 +眈 +哓 +趴 +蹼 +埕 +嚣 +骛 +宏 +淄 +斑 +噜 +严 +瑛 +垃 +椎 +诱 +压 +庾 +绞 +焘 +廿 +抡 +迄 +棘 +夫 +纬 +锹 +眨 +瞌 +侠 +脐 +竞 +瀑 +孳 +骧 +遁 +姜 +颦 +荪 +滚 +萦 +伪 +逸 +粳 +爬 +锁 +矣 +役 +趣 +洒 +颔 +诏 +逐 +奸 +甭 +惠 +攀 +蹄 +泛 +尼 +拼 +阮 +鹰 +亚 +颈 +惑 +勒 +〉 +际 +肛 +爷 +刚 +钨 +丰 +养 +冶 +鲽 +辉 +蔻 +画 +覆 +皴 +妊 +麦 +返 +醉 +皂 +擀 +〗 +酶 +凑 +粹 +悟 +诀 +硖 +港 +卜 +z +杀 +涕 +± +舍 +铠 +抵 +弛 +段 +敝 +镐 +奠 +拂 +轴 +跛 +袱 +e +t +沉 +菇 +俎 +薪 +峦 +秭 +蟹 +历 +盟 +菠 +寡 +液 +肢 +喻 +染 +裱 +悱 +抱 +氙 +赤 +捅 +猛 +跑 +氮 +谣 +仁 +尺 +辊 +窍 +烙 +衍 +架 +擦 +倏 +璐 +瑁 +币 +楞 +胖 +夔 +趸 +邛 +惴 +饕 +虔 +蝎 +§ +哉 +贝 +宽 +辫 +炮 +扩 +饲 +籽 +魏 +菟 +锰 +伍 +猝 +末 +琳 +哚 +蛎 +邂 +呀 +姿 +鄞 +却 +歧 +仙 +恸 +椐 +森 +牒 +寤 +袒 +婆 +虢 +雅 +钉 +朵 +贼 +欲 +苞 +寰 +故 +龚 +坭 +嘘 +咫 +礼 +硷 +兀 +睢 +汶 +’ +铲 +烧 +绕 +诃 +浃 +钿 +哺 +柜 +讼 +颊 +璁 +腔 +洽 +咐 +脲 +簌 +筠 +镣 +玮 +鞠 +谁 +兼 +姆 +挥 +梯 +蝴 +谘 +漕 +刷 +躏 +宦 +弼 +b +垌 +劈 +麟 +莉 +揭 +笙 +渎 +仕 +嗤 +仓 +配 +怏 +抬 +错 +泯 +镊 +孰 +猿 +邪 +仍 +秋 +鼬 +壹 +歇 +吵 +炼 +< +尧 +射 +柬 +廷 +胧 +霾 +凳 +隋 +肚 +浮 +梦 +祥 +株 +堵 +退 +L +鹫 +跎 +凶 +毽 +荟 +炫 +栩 +玳 +甜 +沂 +鹿 +顽 +伯 +爹 +赔 +蛴 +徐 +匡 +欣 +狰 +缸 +雹 +蟆 +疤 +默 +沤 +啜 +痂 +衣 +禅 +w +i +h +辽 +葳 +黝 +钗 +停 +沽 +棒 +馨 +颌 +肉 +吴 +硫 +悯 +劾 +娈 +马 +啧 +吊 +悌 +镑 +峭 +帆 +瀣 +涉 +咸 +疸 +滋 +泣 +翦 +拙 +癸 +钥 +蜒 ++ +尾 +庄 +凝 +泉 +婢 +渴 +谊 +乞 +陆 +锉 +糊 +鸦 +淮 +I +B +N +晦 +弗 +乔 +庥 +葡 +尻 +席 +橡 +傣 +渣 +拿 +惩 +麋 +斛 +缃 +矮 +蛏 +岘 +鸽 +姐 +膏 +催 +奔 +镒 +喱 +蠡 +摧 +钯 +胤 +柠 +拐 +璋 +鸥 +卢 +荡 +倾 +^ +_ +珀 +逄 +萧 +塾 +掇 +贮 +笆 +聂 +圃 +冲 +嵬 +M +滔 +笕 +值 +炙 +偶 +蜱 +搐 +梆 +汪 +蔬 +腑 +鸯 +蹇 +敞 +绯 +仨 +祯 +谆 +梧 +糗 +鑫 +啸 +豺 +囹 +猾 +巢 +柄 +瀛 +筑 +踌 +沭 +暗 +苁 +鱿 +蹉 +脂 +蘖 +牢 +热 +木 +吸 +溃 +宠 +序 +泞 +偿 +拜 +檩 +厚 +朐 +毗 +螳 +吞 +媚 +朽 +担 +蝗 +橘 +畴 +祈 +糟 +盱 +隼 +郜 +惜 +珠 +裨 +铵 +焙 +琚 +唯 +咚 +噪 +骊 +丫 +滢 +勤 +棉 +呸 +咣 +淀 +隔 +蕾 +窈 +饨 +挨 +煅 +短 +匙 +粕 +镜 +赣 +撕 +墩 +酬 +馁 +豌 +颐 +抗 +酣 +氓 +佑 +搁 +哭 +递 +耷 +涡 +桃 +贻 +碣 +截 +瘦 +昭 +镌 +蔓 +氚 +甲 +猕 +蕴 +蓬 +散 +拾 +纛 +狼 +猷 +铎 +埋 +旖 +矾 +讳 +囊 +糜 +迈 +粟 +蚂 +紧 +鲳 +瘢 +栽 +稼 +羊 +锄 +斟 +睁 +桥 +瓮 +蹙 +祉 +醺 +鼻 +昱 +剃 +跳 +篱 +跷 +蒜 +翎 +宅 +晖 +嗑 +壑 +峻 +癫 +屏 +狠 +陋 +袜 +途 +憎 +祀 +莹 +滟 +佶 +溥 +臣 +约 +盛 +峰 +磁 +慵 +婪 +拦 +莅 +朕 +鹦 +粲 +裤 +哎 +疡 +嫖 +琵 +窟 +堪 +谛 +嘉 +儡 +鳝 +斩 +郾 +驸 +酊 +妄 +胜 +贺 +徙 +傅 +噌 +钢 +栅 +庇 +恋 +匝 +巯 +邈 +尸 +锚 +粗 +佟 +蛟 +薹 +纵 +蚊 +郅 +绢 +锐 +苗 +俞 +篆 +淆 +膀 +鲜 +煎 +诶 +秽 +寻 +涮 +刺 +怀 +噶 +巨 +褰 +魅 +灶 +灌 +桉 +藕 +谜 +舸 +薄 +搀 +恽 +借 +牯 +痉 +渥 +愿 +亓 +耘 +杠 +柩 +锔 +蚶 +钣 +珈 +喘 +蹒 +幽 +赐 +稗 +晤 +莱 +泔 +扯 +肯 +菪 +裆 +腩 +豉 +疆 +骜 +腐 +倭 +珏 +唔 +粮 +亡 +润 +慰 +伽 +橄 +玄 +誉 +醐 +胆 +龊 +粼 +塬 +陇 +彼 +削 +嗣 +绾 +芽 +妗 +垭 +瘴 +爽 +薏 +寨 +龈 +泠 +弹 +赢 +漪 +猫 +嘧 +涂 +恤 +圭 +茧 +烽 +屑 +痕 +巾 +赖 +荸 +凰 +腮 +畈 +亵 +蹲 +偃 +苇 +澜 +艮 +换 +骺 +烘 +苕 +梓 +颉 +肇 +哗 +悄 +氤 +涠 +葬 +屠 +鹭 +植 +竺 +佯 +诣 +鲇 +瘀 +鲅 +邦 +移 +滁 +冯 +耕 +癔 +戌 +茬 +沁 +巩 +悠 +湘 +洪 +痹 +锟 +循 +谋 +腕 +鳃 +钠 +捞 +焉 +迎 +碱 +伫 +急 +榷 +奈 +邝 +卯 +辄 +皲 +卟 +醛 +畹 +忧 +稳 +雄 +昼 +缩 +阈 +睑 +扌 +耗 +曦 +涅 +捏 +瞧 +邕 +淖 +漉 +铝 +耦 +禹 +湛 +喽 +莼 +琅 +诸 +苎 +纂 +硅 +始 +嗨 +傥 +燃 +臂 +赅 +嘈 +呆 +贵 +屹 +壮 +肋 +亍 +蚀 +卅 +豹 +腆 +邬 +迭 +浊 +} +童 +螂 +捐 +圩 +勐 +触 +寞 +汊 +壤 +荫 +膺 +渌 +芳 +懿 +遴 +螈 +泰 +蓼 +蛤 +茜 +舅 +枫 +朔 +膝 +眙 +避 +梅 +判 +鹜 +璜 +牍 +缅 +垫 +藻 +黔 +侥 +惚 +懂 +踩 +腰 +腈 +札 +丞 +唾 +慈 +顿 +摹 +荻 +琬 +~ +斧 +沈 +滂 +胁 +胀 +幄 +莜 +Z +匀 +鄄 +掌 +绰 +茎 +焚 +赋 +萱 +谑 +汁 +铒 +瞎 +夺 +蜗 +野 +娆 +冀 +弯 +篁 +懵 +灞 +隽 +芡 +脘 +俐 +辩 +芯 +掺 +喏 +膈 +蝈 +觐 +悚 +踹 +蔗 +熠 +鼠 +呵 +抓 +橼 +峨 +畜 +缔 +禾 +崭 +弃 +熊 +摒 +凸 +拗 +穹 +蒙 +抒 +祛 +劝 +闫 +扳 +阵 +醌 +踪 +喵 +侣 +搬 +仅 +荧 +赎 +蝾 +琦 +买 +婧 +瞄 +寓 +皎 +冻 +赝 +箩 +莫 +瞰 +郊 +笫 +姝 +筒 +枪 +遣 +煸 +袋 +舆 +痱 +涛 +母 +〇 +启 +践 +耙 +绲 +盘 +遂 +昊 +搞 +槿 +诬 +纰 +泓 +惨 +檬 +亻 +越 +C +o +憩 +熵 +祷 +钒 +暧 +塔 +阗 +胰 +咄 +娶 +魔 +琶 +钞 +邻 +扬 +杉 +殴 +咽 +弓 +〆 +髻 +】 +吭 +揽 +霆 +拄 +殖 +脆 +彻 +岩 +芝 +勃 +辣 +剌 +钝 +嘎 +甄 +佘 +皖 +伦 +授 +徕 +憔 +挪 +皇 +庞 +稔 +芜 +踏 +溴 +兖 +卒 +擢 +饥 +鳞 +煲 +‰ +账 +颗 +叻 +斯 +捧 +鳍 +琮 +讹 +蛙 +纽 +谭 +酸 +兔 +莒 +睇 +伟 +觑 +羲 +嗜 +宜 +褐 +旎 +辛 +卦 +诘 +筋 +鎏 +溪 +挛 +熔 +阜 +晰 +鳅 +丢 +奚 +灸 +呱 +献 +陉 +黛 +鸪 +甾 +萨 +疮 +拯 +洲 +疹 +辑 +叙 +恻 +谒 +允 +柔 +烂 +氏 +逅 +漆 +拎 +惋 +扈 +湟 +纭 +啕 +掬 +擞 +哥 +忽 +涤 +鸵 +靡 +郗 +瓷 +扁 +廊 +怨 +雏 +钮 +敦 +E +懦 +憋 +汀 +拚 +啉 +腌 +岸 +f +痼 +瞅 +尊 +咀 +眩 +飙 +忌 +仝 +迦 +熬 +毫 +胯 +篑 +茄 +腺 +凄 +舛 +碴 +锵 +诧 +羯 +後 +漏 +汤 +宓 +仞 +蚁 +壶 +谰 +皑 +铄 +棰 +罔 +辅 +晶 +苦 +牟 +闽 +\ +烃 +饮 +聿 +丙 +蛳 +朱 +煤 +涔 +鳖 +犁 +罐 +荼 +砒 +淦 +妤 +黏 +戎 +孑 +婕 +瑾 +戢 +钵 +枣 +捋 +砥 +衩 +狙 +桠 +稣 +阎 +肃 +梏 +诫 +孪 +昶 +婊 +衫 +嗔 +侃 +塞 +蜃 +樵 +峒 +貌 +屿 +欺 +缫 +阐 +栖 +诟 +珞 +荭 +吝 +萍 +嗽 +恂 +啻 +蜴 +磬 +峋 +俸 +豫 +谎 +徊 +镍 +韬 +魇 +晴 +U +囟 +猜 +蛮 +坐 +囿 +伴 +亭 +肝 +佗 +蝠 +妃 +胞 +滩 +榴 +氖 +垩 +苋 +砣 +扪 +馏 +姓 +轩 +厉 +夥 +侈 +禀 +垒 +岑 +赏 +钛 +辐 +痔 +披 +纸 +碳 +“ +坞 +蠓 +挤 +荥 +沅 +悔 +铧 +帼 +蒌 +蝇 +a +p +y +n +g +哀 +浆 +瑶 +凿 +桶 +馈 +皮 +奴 +苜 +佤 +伶 +晗 +铱 +炬 +优 +弊 +氢 +恃 +甫 +攥 +端 +锌 +灰 +稹 +炝 +曙 +邋 +亥 +眶 +碾 +拉 +萝 +绔 +捷 +浍 +腋 +姑 +菖 +凌 +涞 +麽 +锢 +桨 +潢 +绎 +镰 +殆 +锑 +渝 +铬 +困 +绽 +觎 +匈 +糙 +暑 +裹 +鸟 +盔 +肽 +迷 +綦 +『 +亳 +佝 +俘 +钴 +觇 +骥 +仆 +疝 +跪 +婶 +郯 +瀹 +唉 +脖 +踞 +针 +晾 +忒 +扼 +瞩 +叛 +椒 +疟 +嗡 +邗 +肆 +跆 +玫 +忡 +捣 +咧 +唆 +艄 +蘑 +潦 +笛 +阚 +沸 +泻 +掊 +菽 +贫 +斥 +髂 +孢 +镂 +赂 +麝 +鸾 +屡 +衬 +苷 +恪 +叠 +希 +粤 +爻 +喝 +茫 +惬 +郸 +绻 +庸 +撅 +碟 +宄 +妹 +膛 +叮 +饵 +崛 +嗲 +椅 +冤 +搅 +咕 +敛 +尹 +垦 +闷 +蝉 +霎 +勰 +败 +蓑 +泸 +肤 +鹌 +幌 +焦 +浠 +鞍 +刁 +舰 +乙 +竿 +裔 +。 +茵 +函 +伊 +兄 +丨 +娜 +匍 +謇 +莪 +宥 +似 +蝽 +翳 +酪 +翠 +粑 +薇 +祢 +骏 +赠 +叫 +Q +噤 +噻 +竖 +芗 +莠 +潭 +俊 +羿 +耜 +O +郫 +趁 +嗪 +囚 +蹶 +芒 +洁 +笋 +鹑 +敲 +硝 +啶 +堡 +渲 +揩 +』 +携 +宿 +遒 +颍 +扭 +棱 +割 +萜 +蔸 +葵 +琴 +捂 +饰 +衙 +耿 +掠 +募 +岂 +窖 +涟 +蔺 +瘤 +柞 +瞪 +怜 +匹 +距 +楔 +炜 +哆 +秦 +缎 +幼 +茁 +绪 +痨 +恨 +楸 +娅 +瓦 +桩 +雪 +嬴 +伏 +榔 +妥 +铿 +拌 +眠 +雍 +缇 +‘ +卓 +搓 +哌 +觞 +噩 +屈 +哧 +髓 +咦 +巅 +娑 +侑 +淫 +膳 +祝 +勾 +姊 +莴 +胄 +疃 +薛 +蜷 +胛 +巷 +芙 +芋 +熙 +闰 +勿 +窃 +狱 +剩 +钏 +幢 +陟 +铛 +慧 +靴 +耍 +k +浙 +浇 +飨 +惟 +绗 +祜 +澈 +啼 +咪 +磷 +摞 +诅 +郦 +抹 +跃 +壬 +吕 +肖 +琏 +颤 +尴 +剡 +抠 +凋 +赚 +泊 +津 +宕 +殷 +倔 +氲 +漫 +邺 +涎 +怠 +$ +垮 +荬 +遵 +俏 +叹 +噢 +饽 +蜘 +孙 +筵 +疼 +鞭 +羧 +牦 +箭 +潴 +c +眸 +祭 +髯 +啖 +坳 +愁 +芩 +驮 +倡 +巽 +穰 +沃 +胚 +怒 +凤 +槛 +剂 +趵 +嫁 +v +邢 +灯 +鄢 +桐 +睽 +檗 +锯 +槟 +婷 +嵋 +圻 +诗 +蕈 +颠 +遭 +痢 +芸 +怯 +馥 +竭 +锗 +徜 +恭 +遍 +籁 +剑 +嘱 +苡 +龄 +僧 +桑 +潸 +弘 +澶 +楹 +悲 +讫 +愤 +腥 +悸 +谍 +椹 +呢 +桓 +葭 +攫 +阀 +翰 +躲 +敖 +柑 +郎 +笨 +橇 +呃 +魁 +燎 +脓 +葩 +磋 +垛 +玺 +狮 +沓 +砜 +蕊 +锺 +罹 +蕉 +翱 +虐 +闾 +巫 +旦 +茱 +嬷 +枯 +鹏 +贡 +芹 +汛 +矫 +绁 +拣 +禺 +佃 +讣 +舫 +惯 +乳 +趋 +疲 +挽 +岚 +虾 +衾 +蠹 +蹂 +飓 +氦 +铖 +孩 +稞 +瑜 +壅 +掀 +勘 +妓 +畅 +髋 +W +庐 +牲 +蓿 +榕 +练 +垣 +唱 +邸 +菲 +昆 +婺 +穿 +绡 +麒 +蚱 +掂 +愚 +泷 +涪 +漳 +妩 +娉 +榄 +讷 +觅 +旧 +藤 +煮 +呛 +柳 +腓 +叭 +庵 +烷 +阡 +罂 +蜕 +擂 +猖 +咿 +媲 +脉 +【 +沏 +貅 +黠 +熏 +哲 +烁 +坦 +酵 +兜 +× +潇 +撒 +剽 +珩 +圹 +乾 +摸 +樟 +帽 +嗒 +襄 +魂 +轿 +憬 +锡 +〕 +喃 +皆 +咖 +隅 +脸 +残 +泮 +袂 +鹂 +珊 +囤 +捆 +咤 +误 +徨 +闹 +淙 +芊 +淋 +怆 +囗 +拨 +梳 +渤 +R +G +绨 +蚓 +婀 +幡 +狩 +麾 +谢 +唢 +裸 +旌 +伉 +纶 +裂 +驳 +砼 +咛 +澄 +樨 +蹈 +宙 +澍 +倍 +貔 +操 +勇 +蟠 +摈 +砧 +虬 +够 +缁 +悦 +藿 +撸 +艹 +摁 +淹 +豇 +虎 +榭 +ˉ +吱 +d +° +喧 +荀 +踱 +侮 +奋 +偕 +饷 +犍 +惮 +坑 +璎 +徘 +宛 +妆 +袈 +倩 +窦 +昂 +荏 +乖 +K +怅 +撰 +鳙 +牙 +袁 +酞 +X +痿 +琼 +闸 +雁 +趾 +荚 +虻 +涝 +《 +杏 +韭 +偈 +烤 +绫 +鞘 +卉 +症 +遢 +蓥 +诋 +杭 +荨 +匆 +竣 +簪 +辙 +敕 +虞 +丹 +缭 +咩 +黟 +m +淤 +瑕 +咂 +铉 +硼 +茨 +嶂 +痒 +畸 +敬 +涿 +粪 +窘 +熟 +叔 +嫔 +盾 +忱 +裘 +憾 +梵 +赡 +珙 +咯 +娘 +庙 +溯 +胺 +葱 +痪 +摊 +荷 +卞 +乒 +髦 +寐 +铭 +坩 +胗 +枷 +爆 +溟 +嚼 +羚 +砬 +轨 +惊 +挠 +罄 +竽 +菏 +氧 +浅 +楣 +盼 +枢 +炸 +阆 +杯 +谏 +噬 +淇 +渺 +俪 +秆 +墓 +泪 +跻 +砌 +痰 +垡 +渡 +耽 +釜 +讶 +鳎 +煞 +呗 +韶 +舶 +绷 +鹳 +缜 +旷 +铊 +皱 +龌 +檀 +霖 +奄 +槐 +艳 +蝶 +旋 +哝 +赶 +骞 +蚧 +腊 +盈 +丁 +` +蜚 +矸 +蝙 +睨 +嚓 +僻 +鬼 +醴 +夜 +彝 +磊 +笔 +拔 +栀 +糕 +厦 +邰 +纫 +逭 +纤 +眦 +膊 +馍 +躇 +烯 +蘼 +冬 +诤 +暄 +骶 +哑 +瘠 +」 +臊 +丕 +愈 +咱 +螺 +擅 +跋 +搏 +硪 +谄 +笠 +淡 +嘿 +骅 +谧 +鼎 +皋 +姚 +歼 +蠢 +驼 +耳 +胬 +挝 +涯 +狗 +蒽 +孓 +犷 +凉 +芦 +箴 +铤 +孤 +嘛 +坤 +V +茴 +朦 +挞 +尖 +橙 +诞 +搴 +碇 +洵 +浚 +帚 +蜍 +漯 +柘 +嚎 +讽 +芭 +荤 +咻 +祠 +秉 +跖 +埃 +吓 +糯 +眷 +馒 +惹 +娼 +鲑 +嫩 +讴 +轮 +瞥 +靶 +褚 +乏 +缤 +宋 +帧 +删 +驱 +碎 +扑 +俩 +俄 +偏 +涣 +竹 +噱 +皙 +佰 +渚 +唧 +斡 +# +镉 +刀 +崎 +筐 +佣 +夭 +贰 +肴 +峙 +哔 +艿 +匐 +牺 +镛 +缘 +仡 +嫡 +劣 +枸 +堀 +梨 +簿 +鸭 +蒸 +亦 +稽 +浴 +{ +衢 +束 +槲 +j +阁 +揍 +疥 +棋 +潋 +聪 +窜 +乓 +睛 +插 +冉 +阪 +苍 +搽 +「 +蟾 +螟 +幸 +仇 +樽 +撂 +慢 +跤 +幔 +俚 +淅 +覃 +觊 +溶 +妖 +帛 +侨 +曰 +妾 +泗 +· +: +瀘 +風 +Ë +( +) +∶ +紅 +紗 +瑭 +雲 +頭 +鶏 +財 +許 +• +¥ +樂 +焗 +麗 +— +; +滙 +東 +榮 +繪 +興 +… +門 +業 +π +楊 +國 +顧 +é +盤 +寳 +Λ +龍 +鳳 +島 +誌 +緣 +結 +銭 +萬 +勝 +祎 +璟 +優 +歡 +臨 +時 +購 += +★ +藍 +昇 +鐵 +觀 +勅 +農 +聲 +畫 +兿 +術 +發 +劉 +記 +專 +耑 +園 +書 +壴 +種 +Ο +● +褀 +號 +銀 +匯 +敟 +锘 +葉 +橪 +廣 +進 +蒄 +鑽 +阝 +祙 +貢 +鍋 +豊 +夬 +喆 +團 +閣 +開 +燁 +賓 +館 +酡 +沔 +順 ++ +硚 +劵 +饸 +陽 +車 +湓 +復 +萊 +氣 +軒 +華 +堃 +迮 +纟 +戶 +馬 +學 +裡 +電 +嶽 +獨 +マ +シ +サ +ジ +燘 +袪 +環 +❤ +臺 +灣 +専 +賣 +孖 +聖 +攝 +線 +▪ +α +傢 +俬 +夢 +達 +莊 +喬 +貝 +薩 +劍 +羅 +壓 +棛 +饦 +尃 +璈 +囍 +醫 +G +I +A +# +N +鷄 +髙 +嬰 +啓 +約 +隹 +潔 +賴 +藝 +~ +寶 +籣 +麺 +  +嶺 +√ +義 +網 +峩 +長 +∧ +魚 +機 +構 +② +鳯 +偉 +L +B +㙟 +畵 +鴿 +' +詩 +溝 +嚞 +屌 +藔 +佧 +玥 +蘭 +織 +1 +3 +9 +0 +7 +點 +砭 +鴨 +鋪 +銘 +廳 +弍 +‧ +創 +湯 +坶 +℃ +卩 +骝 +& +烜 +荘 +當 +潤 +扞 +係 +懷 +碶 +钅 +蚨 +讠 +☆ +叢 +爲 +埗 +涫 +塗 +→ +楽 +現 +鯨 +愛 +瑪 +鈺 +忄 +悶 +藥 +飾 +樓 +視 +孬 +ㆍ +燚 +苪 +師 +① +丼 +锽 +│ +韓 +標 +è +兒 +閏 +匋 +張 +漢 +Ü +髪 +會 +閑 +檔 +習 +裝 +の +峯 +菘 +輝 +И +雞 +釣 +億 +浐 +K +O +R +8 +H +E +P +T +W +D +S +C +M +F +姌 +饹 +» +晞 +廰 +ä +嵯 +鷹 +負 +飲 +絲 +冚 +楗 +澤 +綫 +區 +❋ +← +質 +靑 +揚 +③ +滬 +統 +産 +協 +﹑ +乸 +畐 +經 +運 +際 +洺 +岽 +為 +粵 +諾 +崋 +豐 +碁 +ɔ +V +2 +6 +齋 +誠 +訂 +´ +勑 +雙 +陳 +無 +í +泩 +媄 +夌 +刂 +i +c +t +o +r +a +嘢 +耄 +燴 +暃 +壽 +媽 +靈 +抻 +體 +唻 +É +冮 +甹 +鎮 +錦 +ʌ +蜛 +蠄 +尓 +駕 +戀 +飬 +逹 +倫 +貴 +極 +Я +Й +寬 +磚 +嶪 +郎 +職 +| +間 +n +d +剎 +伈 +課 +飛 +橋 +瘊 +№ +譜 +骓 +圗 +滘 +縣 +粿 +咅 +養 +濤 +彳 +® +% +Ⅱ +啰 +㴪 +見 +矞 +薬 +糁 +邨 +鲮 +顔 +罱 +З +選 +話 +贏 +氪 +俵 +競 +瑩 +繡 +枱 +β +綉 +á +獅 +爾 +™ +麵 +戋 +淩 +徳 +個 +劇 +場 +務 +簡 +寵 +h +實 +膠 +轱 +圖 +築 +嘣 +樹 +㸃 +營 +耵 +孫 +饃 +鄺 +飯 +麯 +遠 +輸 +坫 +孃 +乚 +閃 +鏢 +㎡ +題 +廠 +關 +↑ +爺 +將 +軍 +連 +篦 +覌 +參 +箸 +- +窠 +棽 +寕 +夀 +爰 +歐 +呙 +閥 +頡 +熱 +雎 +垟 +裟 +凬 +勁 +帑 +馕 +夆 +疌 +枼 +馮 +貨 +蒤 +樸 +彧 +旸 +靜 +龢 +暢 +㐱 +鳥 +珺 +鏡 +灡 +爭 +堷 +廚 +Ó +騰 +診 +┅ +蘇 +褔 +凱 +頂 +豕 +亞 +帥 +嘬 +⊥ +仺 +桖 +複 +饣 +絡 +穂 +顏 +棟 +納 +▏ +濟 +親 +設 +計 +攵 +埌 +烺 +ò +頤 +燦 +蓮 +撻 +節 +講 +濱 +濃 +娽 +洳 +朿 +燈 +鈴 +護 +膚 +铔 +過 +補 +Z +U +5 +4 +坋 +闿 +䖝 +餘 +缐 +铞 +貿 +铪 +桼 +趙 +鍊 +[ +㐂 +垚 +菓 +揸 +捲 +鐘 +滏 +𣇉 +爍 +輪 +燜 +鴻 +鮮 +動 +鹞 +鷗 +丄 +慶 +鉌 +翥 +飮 +腸 +⇋ +漁 +覺 +來 +熘 +昴 +翏 +鲱 +圧 +鄉 +萭 +頔 +爐 +嫚 +г +貭 +類 +聯 +幛 +輕 +訓 +鑒 +夋 +锨 +芃 +珣 +䝉 +扙 +嵐 +銷 +處 +ㄱ +語 +誘 +苝 +歸 +儀 +燒 +楿 +內 +粢 +葒 +奧 +麥 +礻 +滿 +蠔 +穵 +瞭 +態 +鱬 +榞 +硂 +鄭 +黃 +煙 +祐 +奓 +逺 +* +瑄 +獲 +聞 +薦 +讀 +這 +樣 +決 +問 +啟 +們 +執 +説 +轉 +單 +隨 +唘 +帶 +倉 +庫 +還 +贈 +尙 +皺 +■ +餅 +產 +○ +∈ +報 +狀 +楓 +賠 +琯 +嗮 +禮 +` +傳 +> +≤ +嗞 +Φ +≥ +換 +咭 +∣ +↓ +曬 +ε +応 +寫 +″ +終 +様 +純 +費 +療 +聨 +凍 +壐 +郵 +ü +黒 +∫ +製 +塊 +調 +軽 +確 +撃 +級 +馴 +Ⅲ +涇 +繹 +數 +碼 +證 +狒 +処 +劑 +< +晧 +賀 +衆 +] +櫥 +兩 +陰 +絶 +對 +鯉 +憶 +◎ +p +e +Y +蕒 +煖 +頓 +測 +試 +鼽 +僑 +碩 +妝 +帯 +≈ +鐡 +舖 +權 +喫 +倆 +ˋ +該 +悅 +ā +俫 +. +f +s +b +m +k +g +u +j +貼 +淨 +濕 +針 +適 +備 +l +/ +給 +謢 +強 +觸 +衛 +與 +⊙ +$ +緯 +變 +⑴ +⑵ +⑶ +㎏ +殺 +∩ +幚 +─ +價 +▲ +離 +ú +ó +飄 +烏 +関 +閟 +﹝ +﹞ +邏 +輯 +鍵 +驗 +訣 +導 +歷 +屆 +層 +▼ +儱 +錄 +熳 +ē +艦 +吋 +錶 +辧 +飼 +顯 +④ +禦 +販 +気 +対 +枰 +閩 +紀 +幹 +瞓 +貊 +淚 +△ +眞 +墊 +Ω +獻 +褲 +縫 +緑 +亜 +鉅 +餠 +{ +} +◆ +蘆 +薈 +█ +◇ +溫 +彈 +晳 +粧 +犸 +穩 +訊 +崬 +凖 +熥 +П +舊 +條 +紋 +圍 +Ⅳ +筆 +尷 +難 +雜 +錯 +綁 +識 +頰 +鎖 +艶 +□ +殁 +殼 +⑧ +├ +▕ +鵬 +ǐ +ō +ǒ +糝 +綱 +▎ +μ +盜 +饅 +醬 +籤 +蓋 +釀 +鹽 +據 +à +ɡ +辦 +◥ +彐 +┌ +婦 +獸 +鲩 +伱 +ī +蒟 +蒻 +齊 +袆 +腦 +寧 +凈 +妳 +煥 +詢 +偽 +謹 +啫 +鯽 +騷 +鱸 +損 +傷 +鎻 +髮 +買 +冏 +儥 +両 +﹢ +∞ +載 +喰 +z +羙 +悵 +燙 +曉 +員 +組 +徹 +艷 +痠 +鋼 +鼙 +縮 +細 +嚒 +爯 +≠ +維 +" +鱻 +壇 +厍 +帰 +浥 +犇 +薡 +軎 +² +應 +醜 +刪 +緻 +鶴 +賜 +噁 +軌 +尨 +镔 +鷺 +槗 +彌 +葚 +濛 +請 +溇 +緹 +賢 +訪 +獴 +瑅 +資 +縤 +陣 +蕟 +栢 +韻 +祼 +恁 +伢 +謝 +劃 +涑 +總 +衖 +踺 +砋 +凉 +籃 +駿 +苼 +瘋 +昽 +紡 +驊 +腎 +﹗ +響 +杋 +剛 +嚴 +禪 +歓 +槍 +傘 +檸 +檫 +炣 +勢 +鏜 +鎢 +銑 +尐 +減 +奪 +惡 +θ +僮 +婭 +臘 +ū +ì +殻 +鉄 +∑ +蛲 +焼 +緖 +續 +紹 +懮 \ No newline at end of file diff --git a/inpaint/model/anytext/utils.py b/inpaint/model/anytext/utils.py new file mode 100644 index 0000000..c9f55b8 --- /dev/null +++ b/inpaint/model/anytext/utils.py @@ -0,0 +1,151 @@ +import os +import datetime +import cv2 +import numpy as np +from PIL import Image, ImageDraw + + +def save_images(img_list, folder): + if not os.path.exists(folder): + os.makedirs(folder) + now = datetime.datetime.now() + date_str = now.strftime("%Y-%m-%d") + folder_path = os.path.join(folder, date_str) + if not os.path.exists(folder_path): + os.makedirs(folder_path) + time_str = now.strftime("%H_%M_%S") + for idx, img in enumerate(img_list): + image_number = idx + 1 + filename = f"{time_str}_{image_number}.jpg" + save_path = os.path.join(folder_path, filename) + cv2.imwrite(save_path, img[..., ::-1]) + + +def check_channels(image): + channels = image.shape[2] if len(image.shape) == 3 else 1 + if channels == 1: + image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) + elif channels > 3: + image = image[:, :, :3] + return image + + +def resize_image(img, max_length=768): + height, width = img.shape[:2] + max_dimension = max(height, width) + + if max_dimension > max_length: + scale_factor = max_length / max_dimension + new_width = int(round(width * scale_factor)) + new_height = int(round(height * scale_factor)) + new_size = (new_width, new_height) + img = cv2.resize(img, new_size) + height, width = img.shape[:2] + img = cv2.resize(img, (width - (width % 64), height - (height % 64))) + return img + + +def insert_spaces(string, nSpace): + if nSpace == 0: + return string + new_string = "" + for char in string: + new_string += char + " " * nSpace + return new_string[:-nSpace] + + +def draw_glyph(font, text): + g_size = 50 + W, H = (512, 80) + new_font = font.font_variant(size=g_size) + img = Image.new(mode="1", size=(W, H), color=0) + draw = ImageDraw.Draw(img) + left, top, right, bottom = new_font.getbbox(text) + text_width = max(right - left, 5) + text_height = max(bottom - top, 5) + ratio = min(W * 0.9 / text_width, H * 0.9 / text_height) + new_font = font.font_variant(size=int(g_size * ratio)) + + text_width, text_height = new_font.getsize(text) + offset_x, offset_y = new_font.getoffset(text) + x = (img.width - text_width) // 2 + y = (img.height - text_height) // 2 - offset_y // 2 + draw.text((x, y), text, font=new_font, fill="white") + img = np.expand_dims(np.array(img), axis=2).astype(np.float64) + return img + + +def draw_glyph2( + font, text, polygon, vertAng=10, scale=1, width=512, height=512, add_space=True +): + enlarge_polygon = polygon * scale + rect = cv2.minAreaRect(enlarge_polygon) + box = cv2.boxPoints(rect) + box = np.int0(box) + w, h = rect[1] + angle = rect[2] + if angle < -45: + angle += 90 + angle = -angle + if w < h: + angle += 90 + + vert = False + if abs(angle) % 90 < vertAng or abs(90 - abs(angle) % 90) % 90 < vertAng: + _w = max(box[:, 0]) - min(box[:, 0]) + _h = max(box[:, 1]) - min(box[:, 1]) + if _h >= _w: + vert = True + angle = 0 + + img = np.zeros((height * scale, width * scale, 3), np.uint8) + img = Image.fromarray(img) + + # infer font size + image4ratio = Image.new("RGB", img.size, "white") + draw = ImageDraw.Draw(image4ratio) + _, _, _tw, _th = draw.textbbox(xy=(0, 0), text=text, font=font) + text_w = min(w, h) * (_tw / _th) + if text_w <= max(w, h): + # add space + if len(text) > 1 and not vert and add_space: + for i in range(1, 100): + text_space = insert_spaces(text, i) + _, _, _tw2, _th2 = draw.textbbox(xy=(0, 0), text=text_space, font=font) + if min(w, h) * (_tw2 / _th2) > max(w, h): + break + text = insert_spaces(text, i - 1) + font_size = min(w, h) * 0.80 + else: + shrink = 0.75 if vert else 0.85 + font_size = min(w, h) / (text_w / max(w, h)) * shrink + new_font = font.font_variant(size=int(font_size)) + + left, top, right, bottom = new_font.getbbox(text) + text_width = right - left + text_height = bottom - top + + layer = Image.new("RGBA", img.size, (0, 0, 0, 0)) + draw = ImageDraw.Draw(layer) + if not vert: + draw.text( + (rect[0][0] - text_width // 2, rect[0][1] - text_height // 2 - top), + text, + font=new_font, + fill=(255, 255, 255, 255), + ) + else: + x_s = min(box[:, 0]) + _w // 2 - text_height // 2 + y_s = min(box[:, 1]) + for c in text: + draw.text((x_s, y_s), c, font=new_font, fill=(255, 255, 255, 255)) + _, _t, _, _b = new_font.getbbox(c) + y_s += _b + + rotated_layer = layer.rotate(angle, expand=1, center=(rect[0][0], rect[0][1])) + + x_offset = int((img.width - rotated_layer.width) / 2) + y_offset = int((img.height - rotated_layer.height) / 2) + img.paste(rotated_layer, (x_offset, y_offset), rotated_layer) + img = np.expand_dims(np.array(img.convert("1")), axis=2).astype(np.float64) + return img diff --git a/inpaint/model/base.py b/inpaint/model/base.py new file mode 100644 index 0000000..433ad68 --- /dev/null +++ b/inpaint/model/base.py @@ -0,0 +1,405 @@ +import abc +from typing import Optional + +import cv2 +import torch +import numpy as np +from loguru import logger + +from iopaint.helper import ( + boxes_from_mask, + resize_max_size, + pad_img_to_modulo, + switch_mps_device, +) +from iopaint.schema import InpaintRequest, HDStrategy, SDSampler +from .helper.g_diffuser_bot import expand_image +from .utils import get_scheduler + + +class InpaintModel: + name = "base" + min_size: Optional[int] = None + pad_mod = 8 + pad_to_square = False + is_erase_model = False + + def __init__(self, device, **kwargs): + """ + + Args: + device: + """ + device = switch_mps_device(self.name, device) + self.device = device + self.init_model(device, **kwargs) + + @abc.abstractmethod + def init_model(self, device, **kwargs): ... + + @staticmethod + @abc.abstractmethod + def is_downloaded() -> bool: + return False + + @abc.abstractmethod + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W, 1] 255 为 masks 区域 + return: BGR IMAGE + """ + ... + + @staticmethod + def download(): ... + + def _pad_forward(self, image, mask, config: InpaintRequest): + origin_height, origin_width = image.shape[:2] + pad_image = pad_img_to_modulo( + image, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size + ) + pad_mask = pad_img_to_modulo( + mask, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size + ) + + # logger.info(f"final forward pad size: {pad_image.shape}") + + image, mask = self.forward_pre_process(image, mask, config) + + result = self.forward(pad_image, pad_mask, config) + result = result[0:origin_height, 0:origin_width, :] + + result, image, mask = self.forward_post_process(result, image, mask, config) + + if config.sd_keep_unmasked_area: + mask = mask[:, :, np.newaxis] + result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255)) + return result + + def forward_pre_process(self, image, mask, config): + return image, mask + + def forward_post_process(self, result, image, mask, config): + return result, image, mask + + @torch.no_grad() + def __call__(self, image, mask, config: InpaintRequest): + """ + images: [H, W, C] RGB, not normalized + masks: [H, W] + return: BGR IMAGE + """ + inpaint_result = None + # logger.info(f"hd_strategy: {config.hd_strategy}") + if config.hd_strategy == HDStrategy.CROP: + if max(image.shape) > config.hd_strategy_crop_trigger_size: + logger.info("Run crop strategy") + boxes = boxes_from_mask(mask) + crop_result = [] + for box in boxes: + crop_image, crop_box = self._run_box(image, mask, box, config) + crop_result.append((crop_image, crop_box)) + + inpaint_result = image[:, :, ::-1] + for crop_image, crop_box in crop_result: + x1, y1, x2, y2 = crop_box + inpaint_result[y1:y2, x1:x2, :] = crop_image + + elif config.hd_strategy == HDStrategy.RESIZE: + if max(image.shape) > config.hd_strategy_resize_limit: + origin_size = image.shape[:2] + downsize_image = resize_max_size( + image, size_limit=config.hd_strategy_resize_limit + ) + downsize_mask = resize_max_size( + mask, size_limit=config.hd_strategy_resize_limit + ) + + logger.info( + f"Run resize strategy, origin size: {image.shape} forward size: {downsize_image.shape}" + ) + inpaint_result = self._pad_forward( + downsize_image, downsize_mask, config + ) + + # only paste masked area result + inpaint_result = cv2.resize( + inpaint_result, + (origin_size[1], origin_size[0]), + interpolation=cv2.INTER_CUBIC, + ) + original_pixel_indices = mask < 127 + inpaint_result[original_pixel_indices] = image[:, :, ::-1][ + original_pixel_indices + ] + + if inpaint_result is None: + inpaint_result = self._pad_forward(image, mask, config) + + return inpaint_result + + def _crop_box(self, image, mask, box, config: InpaintRequest): + """ + + Args: + image: [H, W, C] RGB + mask: [H, W, 1] + box: [left,top,right,bottom] + + Returns: + BGR IMAGE, (l, r, r, b) + """ + box_h = box[3] - box[1] + box_w = box[2] - box[0] + cx = (box[0] + box[2]) // 2 + cy = (box[1] + box[3]) // 2 + img_h, img_w = image.shape[:2] + + w = box_w + config.hd_strategy_crop_margin * 2 + h = box_h + config.hd_strategy_crop_margin * 2 + + _l = cx - w // 2 + _r = cx + w // 2 + _t = cy - h // 2 + _b = cy + h // 2 + + l = max(_l, 0) + r = min(_r, img_w) + t = max(_t, 0) + b = min(_b, img_h) + + # try to get more context when crop around image edge + if _l < 0: + r += abs(_l) + if _r > img_w: + l -= _r - img_w + if _t < 0: + b += abs(_t) + if _b > img_h: + t -= _b - img_h + + l = max(l, 0) + r = min(r, img_w) + t = max(t, 0) + b = min(b, img_h) + + crop_img = image[t:b, l:r, :] + crop_mask = mask[t:b, l:r] + + # logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}") + + return crop_img, crop_mask, [l, t, r, b] + + def _calculate_cdf(self, histogram): + cdf = histogram.cumsum() + normalized_cdf = cdf / float(cdf.max()) + return normalized_cdf + + def _calculate_lookup(self, source_cdf, reference_cdf): + lookup_table = np.zeros(256) + lookup_val = 0 + for source_index, source_val in enumerate(source_cdf): + for reference_index, reference_val in enumerate(reference_cdf): + if reference_val >= source_val: + lookup_val = reference_index + break + lookup_table[source_index] = lookup_val + return lookup_table + + def _match_histograms(self, source, reference, mask): + transformed_channels = [] + if len(mask.shape) == 3: + mask = mask[:, :, -1] + + for channel in range(source.shape[-1]): + source_channel = source[:, :, channel] + reference_channel = reference[:, :, channel] + + # only calculate histograms for non-masked parts + source_histogram, _ = np.histogram(source_channel[mask == 0], 256, [0, 256]) + reference_histogram, _ = np.histogram( + reference_channel[mask == 0], 256, [0, 256] + ) + + source_cdf = self._calculate_cdf(source_histogram) + reference_cdf = self._calculate_cdf(reference_histogram) + + lookup = self._calculate_lookup(source_cdf, reference_cdf) + + transformed_channels.append(cv2.LUT(source_channel, lookup)) + + result = cv2.merge(transformed_channels) + result = cv2.convertScaleAbs(result) + + return result + + def _apply_cropper(self, image, mask, config: InpaintRequest): + img_h, img_w = image.shape[:2] + l, t, w, h = ( + config.croper_x, + config.croper_y, + config.croper_width, + config.croper_height, + ) + r = l + w + b = t + h + + l = max(l, 0) + r = min(r, img_w) + t = max(t, 0) + b = min(b, img_h) + + crop_img = image[t:b, l:r, :] + crop_mask = mask[t:b, l:r] + return crop_img, crop_mask, (l, t, r, b) + + def _run_box(self, image, mask, box, config: InpaintRequest): + """ + + Args: + image: [H, W, C] RGB + mask: [H, W, 1] + box: [left,top,right,bottom] + + Returns: + BGR IMAGE + """ + crop_img, crop_mask, [l, t, r, b] = self._crop_box(image, mask, box, config) + + return self._pad_forward(crop_img, crop_mask, config), [l, t, r, b] + + +class DiffusionInpaintModel(InpaintModel): + def __init__(self, device, **kwargs): + self.model_info = kwargs["model_info"] + self.model_id_or_path = self.model_info.path + super().__init__(device, **kwargs) + + @torch.no_grad() + def __call__(self, image, mask, config: InpaintRequest): + """ + images: [H, W, C] RGB, not normalized + masks: [H, W] + return: BGR IMAGE + """ + # boxes = boxes_from_mask(mask) + if config.use_croper: + crop_img, crop_mask, (l, t, r, b) = self._apply_cropper(image, mask, config) + crop_image = self._scaled_pad_forward(crop_img, crop_mask, config) + inpaint_result = image[:, :, ::-1] + inpaint_result[t:b, l:r, :] = crop_image + elif config.use_extender: + inpaint_result = self._do_outpainting(image, config) + else: + inpaint_result = self._scaled_pad_forward(image, mask, config) + + return inpaint_result + + def _do_outpainting(self, image, config: InpaintRequest): + # cropper 和 image 在同一个坐标系下,croper_x/y 可能为负数 + # 从 image 中 crop 出 outpainting 区域 + image_h, image_w = image.shape[:2] + cropper_l = config.extender_x + cropper_t = config.extender_y + cropper_r = config.extender_x + config.extender_width + cropper_b = config.extender_y + config.extender_height + image_l = 0 + image_t = 0 + image_r = image_w + image_b = image_h + + # 类似求 IOU + l = max(cropper_l, image_l) + t = max(cropper_t, image_t) + r = min(cropper_r, image_r) + b = min(cropper_b, image_b) + + assert ( + 0 <= l < r and 0 <= t < b + ), f"cropper and image not overlap, {l},{t},{r},{b}" + + cropped_image = image[t:b, l:r, :] + padding_l = max(0, image_l - cropper_l) + padding_t = max(0, image_t - cropper_t) + padding_r = max(0, cropper_r - image_r) + padding_b = max(0, cropper_b - image_b) + + expanded_image, mask_image = expand_image( + cropped_image, + left=padding_l, + top=padding_t, + right=padding_r, + bottom=padding_b, + ) + + # 最终扩大了的 image, BGR + expanded_cropped_result_image = self._scaled_pad_forward( + expanded_image, mask_image, config + ) + + # RGB -> BGR + outpainting_image = cv2.copyMakeBorder( + image, + left=padding_l, + top=padding_t, + right=padding_r, + bottom=padding_b, + borderType=cv2.BORDER_CONSTANT, + value=0, + )[:, :, ::-1] + + # 把 cropped_result_image 贴到 outpainting_image 上,这一步不需要 blend + paste_t = 0 if config.extender_y < 0 else config.extender_y + paste_l = 0 if config.extender_x < 0 else config.extender_x + + outpainting_image[ + paste_t : paste_t + expanded_cropped_result_image.shape[0], + paste_l : paste_l + expanded_cropped_result_image.shape[1], + :, + ] = expanded_cropped_result_image + return outpainting_image + + def _scaled_pad_forward(self, image, mask, config: InpaintRequest): + longer_side_length = int(config.sd_scale * max(image.shape[:2])) + origin_size = image.shape[:2] + downsize_image = resize_max_size(image, size_limit=longer_side_length) + downsize_mask = resize_max_size(mask, size_limit=longer_side_length) + if config.sd_scale != 1: + logger.info( + f"Resize image to do sd inpainting: {image.shape} -> {downsize_image.shape}" + ) + inpaint_result = self._pad_forward(downsize_image, downsize_mask, config) + # only paste masked area result + inpaint_result = cv2.resize( + inpaint_result, + (origin_size[1], origin_size[0]), + interpolation=cv2.INTER_CUBIC, + ) + + return inpaint_result + + def set_scheduler(self, config: InpaintRequest): + scheduler_config = self.model.scheduler.config + sd_sampler = config.sd_sampler + if config.sd_lcm_lora and self.model_info.support_lcm_lora: + sd_sampler = SDSampler.lcm + logger.info(f"LCM Lora enabled, use {sd_sampler} sampler") + scheduler = get_scheduler(sd_sampler, scheduler_config) + self.model.scheduler = scheduler + + def forward_pre_process(self, image, mask, config): + if config.sd_mask_blur != 0: + k = 2 * config.sd_mask_blur + 1 + mask = cv2.GaussianBlur(mask, (k, k), 0) + + return image, mask + + def forward_post_process(self, result, image, mask, config): + if config.sd_match_histograms: + result = self._match_histograms(result, image[:, :, ::-1], mask) + + if config.use_extender and config.sd_mask_blur != 0: + k = 2 * config.sd_mask_blur + 1 + mask = cv2.GaussianBlur(mask, (k, k), 0) + return result, image, mask diff --git a/inpaint/model/brushnet/__init__.py b/inpaint/model/brushnet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/brushnet/brushnet.py b/inpaint/model/brushnet/brushnet.py new file mode 100644 index 0000000..b3a045b --- /dev/null +++ b/inpaint/model/brushnet/brushnet.py @@ -0,0 +1,931 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.utils import BaseOutput, logging +from diffusers.models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from diffusers.models.embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, \ + TimestepEmbedding, Timesteps +from diffusers.models.modeling_utils import ModelMixin +from diffusers.models.unets.unet_2d_blocks import ( + CrossAttnDownBlock2D, + DownBlock2D, get_down_block, get_up_block, +) + +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel +from .unet_2d_blocks import MidBlock2D + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class BrushNetOutput(BaseOutput): + """ + The output of [`BrushNetModel`]. + + Args: + up_block_res_samples (`tuple[torch.Tensor]`): + A tuple of upsample activations at different resolutions for each upsampling block. Each tensor should + be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be + used to condition the original UNet's upsampling activations. + down_block_res_samples (`tuple[torch.Tensor]`): + A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should + be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be + used to condition the original UNet's downsampling activations. + mid_down_block_re_sample (`torch.Tensor`): + The activation of the midde block (the lowest sample resolution). Each tensor should be of shape + `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. + Output can be used to condition the original UNet's middle block activation. + """ + + up_block_res_samples: Tuple[torch.Tensor] + down_block_res_samples: Tuple[torch.Tensor] + mid_block_res_sample: torch.Tensor + + +class BrushNetModel(ModelMixin, ConfigMixin): + """ + A BrushNet model. + + Args: + in_channels (`int`, defaults to 4): + The number of channels in the input sample. + flip_sin_to_cos (`bool`, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, defaults to 0): + The frequency shift to apply to the time embedding. + down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or + `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): + block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, defaults to 2): + The number of layers per block. + downsample_padding (`int`, defaults to 1): + The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, defaults to 1): + The scale factor to use for the mid block. + act_fn (`str`, defaults to "silu"): + The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use for the normalization. If None, normalization and activation layers is skipped + in post-processing. + norm_eps (`float`, defaults to 1e-5): + The epsilon to use for the normalization. + cross_attention_dim (`int`, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): + The dimension of the attention heads. + use_linear_projection (`bool`, defaults to `False`): + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + num_class_embeds (`int`, *optional*, defaults to 0): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + upcast_attention (`bool`, defaults to `False`): + resnet_time_scale_shift (`str`, defaults to `"default"`): + Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. + projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): + The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when + `class_embed_type="projection"`. + brushnet_conditioning_channel_order (`str`, defaults to `"rgb"`): + The channel order of conditional image. Will convert to `rgb` if it's `bgr`. + conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): + The tuple of output channel for each block in the `conditioning_embedding` layer. + global_pool_conditions (`bool`, defaults to `False`): + TODO(Patrick) - unused parameter. + addition_embed_type_num_heads (`int`, defaults to 64): + The number of heads to use for the `TextTimeEmbedding` layer. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 4, + conditioning_channels: int = 5, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str, ...] = ( + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2D", + up_block_types: Tuple[str, ...] = ( + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + ), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1280, + transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int, ...]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + projection_class_embeddings_input_dim: Optional[int] = None, + brushnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), + global_pool_conditions: bool = False, + addition_embed_type_num_heads: int = 64, + ): + super().__init__() + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + # input + conv_in_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in_condition = nn.Conv2d( + in_channels + conditioning_channels, block_out_channels[0], kernel_size=conv_in_kernel, + padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + ) + + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + + elif encoder_hid_dim_type is not None: + raise ValueError( + f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." + ) + else: + self.encoder_hid_proj = None + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + + elif addition_embed_type is not None: + raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") + + self.down_blocks = nn.ModuleList([]) + self.brushnet_down_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + + self.down_blocks.append(down_block) + + for _ in range(layers_per_block): + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + if not is_final_block: + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + # mid + mid_block_channel = block_out_channels[-1] + + brushnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_mid_block = brushnet_block + + self.mid_block = MidBlock2D( + in_channels=mid_block_channel, + temb_channels=time_embed_dim, + dropout=0.0, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_transformer_layers_per_block = (list(reversed(transformer_layers_per_block))) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + + self.up_blocks = nn.ModuleList([]) + self.brushnet_up_blocks = nn.ModuleList([]) + + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resolution_idx=i, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=reversed_num_attention_heads[i], + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + ) + + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + for _ in range(layers_per_block + 1): + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_up_blocks.append(brushnet_block) + + if not is_final_block: + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_up_blocks.append(brushnet_block) + + @classmethod + def from_unet( + cls, + unet: UNet2DConditionModel, + brushnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), + load_weights_from_unet: bool = True, + conditioning_channels: int = 5, + ): + r""" + Instantiate a [`BrushNetModel`] from [`UNet2DConditionModel`]. + + Parameters: + unet (`UNet2DConditionModel`): + The UNet model weights to copy to the [`BrushNetModel`]. All configuration options are also copied + where applicable. + """ + transformer_layers_per_block = ( + unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 + ) + encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None + encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None + addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None + addition_time_embed_dim = ( + unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None + ) + + brushnet = cls( + in_channels=unet.config.in_channels, + conditioning_channels=conditioning_channels, + flip_sin_to_cos=unet.config.flip_sin_to_cos, + freq_shift=unet.config.freq_shift, + down_block_types=['DownBlock2D', 'DownBlock2D', 'DownBlock2D', 'DownBlock2D'], + mid_block_type='MidBlock2D', + up_block_types=['UpBlock2D', 'UpBlock2D', 'UpBlock2D', 'UpBlock2D'], + only_cross_attention=unet.config.only_cross_attention, + block_out_channels=unet.config.block_out_channels, + layers_per_block=unet.config.layers_per_block, + downsample_padding=unet.config.downsample_padding, + mid_block_scale_factor=unet.config.mid_block_scale_factor, + act_fn=unet.config.act_fn, + norm_num_groups=unet.config.norm_num_groups, + norm_eps=unet.config.norm_eps, + cross_attention_dim=unet.config.cross_attention_dim, + transformer_layers_per_block=transformer_layers_per_block, + encoder_hid_dim=encoder_hid_dim, + encoder_hid_dim_type=encoder_hid_dim_type, + attention_head_dim=unet.config.attention_head_dim, + num_attention_heads=unet.config.num_attention_heads, + use_linear_projection=unet.config.use_linear_projection, + class_embed_type=unet.config.class_embed_type, + addition_embed_type=addition_embed_type, + addition_time_embed_dim=addition_time_embed_dim, + num_class_embeds=unet.config.num_class_embeds, + upcast_attention=unet.config.upcast_attention, + resnet_time_scale_shift=unet.config.resnet_time_scale_shift, + projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, + brushnet_conditioning_channel_order=brushnet_conditioning_channel_order, + conditioning_embedding_out_channels=conditioning_embedding_out_channels, + ) + + if load_weights_from_unet: + conv_in_condition_weight = torch.zeros_like(brushnet.conv_in_condition.weight) + conv_in_condition_weight[:, :4, ...] = unet.conv_in.weight + conv_in_condition_weight[:, 4:8, ...] = unet.conv_in.weight + brushnet.conv_in_condition.weight = torch.nn.Parameter(conv_in_condition_weight) + brushnet.conv_in_condition.bias = unet.conv_in.bias + + brushnet.time_proj.load_state_dict(unet.time_proj.state_dict()) + brushnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) + + if brushnet.class_embedding: + brushnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) + + brushnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False) + brushnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False) + brushnet.up_blocks.load_state_dict(unet.up_blocks.state_dict(), strict=False) + + return brushnet + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + brushnet_cond: torch.FloatTensor, + conditioning_scale: float = 1.0, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[BrushNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]: + """ + The [`BrushNetModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor. + timestep (`Union[torch.Tensor, float, int]`): + The number of timesteps to denoise an input. + encoder_hidden_states (`torch.Tensor`): + The encoder hidden states. + brushnet_cond (`torch.FloatTensor`): + The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. + conditioning_scale (`float`, defaults to `1.0`): + The scale factor for BrushNet outputs. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): + Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the + timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep + embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + added_cond_kwargs (`dict`): + Additional conditions for the Stable Diffusion XL UNet. + cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): + A kwargs dictionary that if specified is passed along to the `AttnProcessor`. + guess_mode (`bool`, defaults to `False`): + In this mode, the BrushNet encoder tries its best to recognize the input content of the input even if + you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. + return_dict (`bool`, defaults to `True`): + Whether or not to return a [`~models.brushnet.BrushNetOutput`] instead of a plain tuple. + + Returns: + [`~models.brushnet.BrushNetOutput`] **or** `tuple`: + If `return_dict` is `True`, a [`~models.brushnet.BrushNetOutput`] is returned, otherwise a tuple is + returned where the first element is the sample tensor. + """ + # check channel order + channel_order = self.config.brushnet_conditioning_channel_order + + if channel_order == "rgb": + # in rgb order by default + ... + elif channel_order == "bgr": + brushnet_cond = torch.flip(brushnet_cond, dims=[1]) + else: + raise ValueError(f"unknown `brushnet_conditioning_channel_order`: {channel_order}") + + # prepare attention_mask + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) + emb = emb + class_emb + + if self.config.addition_embed_type is not None: + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + + elif self.config.addition_embed_type == "text_time": + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + + emb = emb + aug_emb if aug_emb is not None else emb + + # 2. pre-process + brushnet_cond = torch.concat([sample, brushnet_cond], 1) + sample = self.conv_in_condition(brushnet_cond) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. PaintingNet down blocks + brushnet_down_block_res_samples = () + for down_block_res_sample, brushnet_down_block in zip(down_block_res_samples, self.brushnet_down_blocks): + down_block_res_sample = brushnet_down_block(down_block_res_sample) + brushnet_down_block_res_samples = brushnet_down_block_res_samples + (down_block_res_sample,) + + # 5. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = self.mid_block(sample, emb) + + # 6. BrushNet mid blocks + brushnet_mid_block_res_sample = self.brushnet_mid_block(sample) + + # 7. up + up_block_res_samples = () + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets):] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample, up_res_samples = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + return_res_samples=True + ) + else: + sample, up_res_samples = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + return_res_samples=True + ) + + up_block_res_samples += up_res_samples + + # 8. BrushNet up blocks + brushnet_up_block_res_samples = () + for up_block_res_sample, brushnet_up_block in zip(up_block_res_samples, self.brushnet_up_blocks): + up_block_res_sample = brushnet_up_block(up_block_res_sample) + brushnet_up_block_res_samples = brushnet_up_block_res_samples + (up_block_res_sample,) + + # 6. scaling + if guess_mode and not self.config.global_pool_conditions: + scales = torch.logspace(-1, 0, + len(brushnet_down_block_res_samples) + 1 + len(brushnet_up_block_res_samples), + device=sample.device) # 0.1 to 1.0 + scales = scales * conditioning_scale + + brushnet_down_block_res_samples = [sample * scale for sample, scale in zip(brushnet_down_block_res_samples, + scales[:len( + brushnet_down_block_res_samples)])] + brushnet_mid_block_res_sample = brushnet_mid_block_res_sample * scales[len(brushnet_down_block_res_samples)] + brushnet_up_block_res_samples = [sample * scale for sample, scale in zip(brushnet_up_block_res_samples, + scales[ + len(brushnet_down_block_res_samples) + 1:])] + else: + brushnet_down_block_res_samples = [sample * conditioning_scale for sample in + brushnet_down_block_res_samples] + brushnet_mid_block_res_sample = brushnet_mid_block_res_sample * conditioning_scale + brushnet_up_block_res_samples = [sample * conditioning_scale for sample in brushnet_up_block_res_samples] + + if self.config.global_pool_conditions: + brushnet_down_block_res_samples = [ + torch.mean(sample, dim=(2, 3), keepdim=True) for sample in brushnet_down_block_res_samples + ] + brushnet_mid_block_res_sample = torch.mean(brushnet_mid_block_res_sample, dim=(2, 3), keepdim=True) + brushnet_up_block_res_samples = [ + torch.mean(sample, dim=(2, 3), keepdim=True) for sample in brushnet_up_block_res_samples + ] + + if not return_dict: + return (brushnet_down_block_res_samples, brushnet_mid_block_res_sample, brushnet_up_block_res_samples) + + return BrushNetOutput( + down_block_res_samples=brushnet_down_block_res_samples, + mid_block_res_sample=brushnet_mid_block_res_sample, + up_block_res_samples=brushnet_up_block_res_samples + ) + + +def zero_module(module): + for p in module.parameters(): + nn.init.zeros_(p) + return module + + +if __name__ == "__main__": + BrushNetModel.from_pretrained("/Users/cwq/data/models/brushnet/brushnet_random_mask", variant='fp16', + use_safetensors=True) diff --git a/inpaint/model/brushnet/brushnet_unet_forward.py b/inpaint/model/brushnet/brushnet_unet_forward.py new file mode 100644 index 0000000..04e8f0a --- /dev/null +++ b/inpaint/model/brushnet/brushnet_unet_forward.py @@ -0,0 +1,322 @@ +from typing import Union, Optional, Dict, Any, Tuple + +import torch +from diffusers.models.unet_2d_condition import UNet2DConditionOutput +from diffusers.utils import USE_PEFT_BACKEND, unscale_lora_layers, deprecate, scale_lora_layers + + +def brushnet_unet_forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + down_block_add_samples: Optional[Tuple[torch.Tensor]] = None, + mid_block_add_sample: Optional[Tuple[torch.Tensor]] = None, + up_block_add_samples: Optional[Tuple[torch.Tensor]] = None, +) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`UNet2DConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added to UNet long skip connections from down blocks to up blocks for + example from ControlNet side model(s) + mid_block_additional_residual (`torch.Tensor`, *optional*): + additional residual to be added to UNet mid block output, for example from ControlNet side model + down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2 ** self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + for dim in sample.shape[-2:]: + if dim % default_overall_up_factor != 0: + # Forward upsample size to force interpolation output size. + forward_upsample_size = True + break + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + t_emb = self.get_time_embed(sample=sample, timestep=timestep) + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) + if class_emb is not None: + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + aug_emb = self.get_aug_embed( + emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs + ) + if self.config.addition_embed_type == "image_hint": + aug_emb, hint = aug_emb + sample = torch.cat([sample, hint], dim=1) + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + encoder_hidden_states = self.process_encoder_hidden_states( + encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs + ) + + # 2. pre-process + sample = self.conv_in(sample) + + # 2.5 GLIGEN position net + if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: + cross_attention_kwargs = cross_attention_kwargs.copy() + gligen_args = cross_attention_kwargs.pop("gligen") + cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} + + # 3. down + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None + # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets + is_adapter = down_intrablock_additional_residuals is not None + # maintain backward compatibility for legacy usage, where + # T2I-Adapter and ControlNet both use down_block_additional_residuals arg + # but can only use one or the other + is_brushnet = down_block_add_samples is not None and mid_block_add_sample is not None and up_block_add_samples is not None + if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: + deprecate( + "T2I should not use down_block_additional_residuals", + "1.3.0", + "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ + and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ + for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", + standard_warn=False, + ) + down_intrablock_additional_residuals = down_block_additional_residuals + is_adapter = True + + down_block_res_samples = (sample,) + + if is_brushnet: + sample = sample + down_block_add_samples.pop(0) + + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + # For t2i-adapter CrossAttnDownBlock2D + additional_residuals = {} + if is_adapter and len(down_intrablock_additional_residuals) > 0: + additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) + + if is_brushnet and len(down_block_add_samples) > 0: + additional_residuals["down_block_add_samples"] = [down_block_add_samples.pop(0) + for _ in range( + len(downsample_block.resnets) + (downsample_block.downsamplers != None))] + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + additional_residuals = {} + if is_brushnet and len(down_block_add_samples) > 0: + additional_residuals["down_block_add_samples"] = [down_block_add_samples.pop(0) + for _ in range( + len(downsample_block.resnets) + (downsample_block.downsamplers != None))] + + sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale, + **additional_residuals) + if is_adapter and len(down_intrablock_additional_residuals) > 0: + sample += down_intrablock_additional_residuals.pop(0) + + down_block_res_samples += res_samples + + if is_controlnet: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = self.mid_block(sample, emb) + + # To support T2I-Adapter-XL + if ( + is_adapter + and len(down_intrablock_additional_residuals) > 0 + and sample.shape == down_intrablock_additional_residuals[0].shape + ): + sample += down_intrablock_additional_residuals.pop(0) + + if is_controlnet: + sample = sample + mid_block_additional_residual + + if is_brushnet: + sample = sample + mid_block_add_sample + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets):] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + additional_residuals = {} + if is_brushnet and len(up_block_add_samples) > 0: + additional_residuals["up_block_add_samples"] = [up_block_add_samples.pop(0) + for _ in range( + len(upsample_block.resnets) + (upsample_block.upsamplers != None))] + + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + additional_residuals = {} + if is_brushnet and len(up_block_add_samples) > 0: + additional_residuals["up_block_add_samples"] = [up_block_add_samples.pop(0) + for _ in range( + len(upsample_block.resnets) + (upsample_block.upsamplers != None))] + + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + scale=lora_scale, + **additional_residuals, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) diff --git a/inpaint/model/brushnet/brushnet_wrapper.py b/inpaint/model/brushnet/brushnet_wrapper.py new file mode 100644 index 0000000..c7343d2 --- /dev/null +++ b/inpaint/model/brushnet/brushnet_wrapper.py @@ -0,0 +1,157 @@ +import PIL.Image +import cv2 +import torch +from loguru import logger +import numpy as np + +from ..base import DiffusionInpaintModel +from ..helper.cpu_text_encoder import CPUTextEncoderWrapper +from ..original_sd_configs import get_config_files +from ..utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) +from .brushnet import BrushNetModel +from .brushnet_unet_forward import brushnet_unet_forward +from .unet_2d_blocks import CrossAttnDownBlock2D_forward, DownBlock2D_forward, CrossAttnUpBlock2D_forward, \ + UpBlock2D_forward +from ...schema import InpaintRequest, ModelType + + +class BrushNetWrapper(DiffusionInpaintModel): + pad_mod = 8 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from .pipeline_brushnet import StableDiffusionBrushNetPipeline + self.model_info = kwargs["model_info"] + self.brushnet_method = kwargs["brushnet_method"] + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + self.torch_dtype = torch_dtype + + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + self.local_files_only = model_kwargs["local_files_only"] + + disable_nsfw_checker = kwargs["disable_nsfw"] or kwargs.get( + "cpu_offload", False + ) + if disable_nsfw_checker: + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + logger.info(f"Loading BrushNet model from {self.brushnet_method}") + brushnet = BrushNetModel.from_pretrained(self.brushnet_method, torch_dtype=torch_dtype) + + if self.model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + self.model = StableDiffusionBrushNetPipeline.from_single_file( + self.model_id_or_path, + torch_dtype=torch_dtype, + load_safety_checker=not disable_nsfw_checker, + original_config_file=get_config_files()['v1'], + brushnet=brushnet, + **model_kwargs, + ) + else: + self.model = handle_from_pretrained_exceptions( + StableDiffusionBrushNetPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + variant="fp16", + torch_dtype=torch_dtype, + brushnet=brushnet, + **model_kwargs, + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + # Monkey patch the forward method of the UNet to use the brushnet_unet_forward method + self.model.unet.forward = brushnet_unet_forward.__get__(self.model.unet, self.model.unet.__class__) + + for down_block in self.model.brushnet.down_blocks: + down_block.forward = DownBlock2D_forward.__get__(down_block, down_block.__class__) + for up_block in self.model.brushnet.up_blocks: + up_block.forward = UpBlock2D_forward.__get__(up_block, up_block.__class__) + + # Monkey patch unet down_blocks to use CrossAttnDownBlock2D_forward + for down_block in self.model.unet.down_blocks: + if down_block.__class__.__name__ == "CrossAttnDownBlock2D": + down_block.forward = CrossAttnDownBlock2D_forward.__get__(down_block, down_block.__class__) + else: + down_block.forward = DownBlock2D_forward.__get__(down_block, down_block.__class__) + + for up_block in self.model.unet.up_blocks: + if up_block.__class__.__name__ == "CrossAttnUpBlock2D": + up_block.forward = CrossAttnUpBlock2D_forward.__get__(up_block, up_block.__class__) + else: + up_block.forward = UpBlock2D_forward.__get__(up_block, up_block.__class__) + + def switch_brushnet_method(self, new_method: str): + self.brushnet_method = new_method + brushnet = BrushNetModel.from_pretrained( + new_method, + resume_download=True, + local_files_only=self.local_files_only, + torch_dtype=self.torch_dtype, + ).to(self.model.device) + self.model.brushnet = brushnet + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + normalized_mask = mask[:, :].astype("float32") / 255.0 + image = image * (1 - normalized_mask) + image = image.astype(np.uint8) + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + mask=PIL.Image.fromarray(mask[:, :, -1], mode="L").convert("RGB"), + num_inference_steps=config.sd_steps, + # strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + brushnet_conditioning_scale=config.brushnet_conditioning_scale, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/brushnet/pipeline_brushnet.py b/inpaint/model/brushnet/pipeline_brushnet.py new file mode 100644 index 0000000..2826e77 --- /dev/null +++ b/inpaint/model/brushnet/pipeline_brushnet.py @@ -0,0 +1,1279 @@ +# https://github.com/TencentARC/BrushNet +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from diffusers.image_processor import PipelineImageInput, VaeImageProcessor +from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from diffusers.models.lora import adjust_lora_scale_text_encoder +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + +from .brushnet import BrushNetModel + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import StableDiffusionBrushNetPipeline, BrushNetModel, UniPCMultistepScheduler + from diffusers.utils import load_image + import torch + import cv2 + import numpy as np + from PIL import Image + + base_model_path = "runwayml/stable-diffusion-v1-5" + brushnet_path = "ckpt_path" + + brushnet = BrushNetModel.from_pretrained(brushnet_path, torch_dtype=torch.float16) + pipe = StableDiffusionBrushNetPipeline.from_pretrained( + base_model_path, brushnet=brushnet, torch_dtype=torch.float16, low_cpu_mem_usage=False + ) + + # speed up diffusion process with faster scheduler and memory optimization + pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + # remove following line if xformers is not installed or when using Torch 2.0. + # pipe.enable_xformers_memory_efficient_attention() + # memory optimization. + pipe.enable_model_cpu_offload() + + image_path="examples/brushnet/src/test_image.jpg" + mask_path="examples/brushnet/src/test_mask.jpg" + caption="A cake on the table." + + init_image = cv2.imread(image_path) + mask_image = 1.*(cv2.imread(mask_path).sum(-1)>255)[:,:,np.newaxis] + init_image = init_image * (1-mask_image) + + init_image = Image.fromarray(init_image.astype(np.uint8)).convert("RGB") + mask_image = Image.fromarray(mask_image.astype(np.uint8).repeat(3,-1)*255).convert("RGB") + + generator = torch.Generator("cuda").manual_seed(1234) + + image = pipe( + caption, + init_image, + mask_image, + num_inference_steps=50, + generator=generator, + paintingnet_conditioning_scale=1.0 + ).images[0] + image.save("output.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionBrushNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with BrushNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + brushnet ([`BrushNetModel`]`): + Provides additional conditioning to the `unet` during the denoising process. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + brushnet: BrushNetModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + brushnet=brushnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1: -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + brushnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.brushnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.brushnet, BrushNetModel) + or is_compiled + and isinstance(self.brushnet._orig_mod, BrushNetModel) + ): + self.check_image(image, mask, prompt, prompt_embeds) + else: + assert False + + # Check `brushnet_conditioning_scale` + if ( + isinstance(self.brushnet, BrushNetModel) + or is_compiled + and isinstance(self.brushnet._orig_mod, BrushNetModel) + ): + if not isinstance(brushnet_conditioning_scale, float): + raise TypeError("For single brushnet: `brushnet_conditioning_scale` must be type `float`.") + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def check_image(self, image, mask, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + mask_is_pil = isinstance(mask, PIL.Image.Image) + mask_is_tensor = isinstance(mask, torch.Tensor) + mask_is_np = isinstance(mask, np.ndarray) + mask_is_pil_list = isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image) + mask_is_tensor_list = isinstance(mask, list) and isinstance(mask[0], torch.Tensor) + mask_is_np_list = isinstance(mask, list) and isinstance(mask[0], np.ndarray) + + if ( + not mask_is_pil + and not mask_is_tensor + and not mask_is_np + and not mask_is_pil_list + and not mask_is_tensor_list + and not mask_is_np_list + ): + raise TypeError( + f"mask must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(mask)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + noise = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = noise * self.scheduler.init_noise_sigma + return latents, noise + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + mask: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + brushnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The BrushNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to BrushNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple BrushNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single BrushNet. When `prompt` is a list, and if a list of images is passed for a single BrushNet, + each will be paired with each prompt in the `prompt` list. This also applies to multiple BrushNets, + where a list of image lists can be passed to batch for each prompt and each BrushNet. + mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The BrushNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to BrushNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple BrushNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single BrushNet. When `prompt` is a list, and if a list of images is passed for a single BrushNet, + each will be paired with each prompt in the `prompt` list. This also applies to multiple BrushNets, + where a list of image lists can be passed to batch for each prompt and each BrushNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + brushnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the BrushNet are multiplied by `brushnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple BrushNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The BrushNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the BrushNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the BrushNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + brushnet = self.brushnet._orig_mod if is_compiled_module(self.brushnet) else self.brushnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + control_guidance_start, control_guidance_end = ( + [control_guidance_start], + [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + mask, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + brushnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + global_pool_conditions = ( + brushnet.config.global_pool_conditions + if isinstance(brushnet, BrushNetModel) + else brushnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(brushnet, BrushNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=brushnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + original_mask = self.prepare_image( + image=mask, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=brushnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + original_mask = (original_mask.sum(1)[:, None, :, :] < 0).to(image.dtype) + height, width = image.shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents, noise = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.1 prepare condition latents + conditioning_latents = self.vae.encode(image).latent_dist.sample() * self.vae.config.scaling_factor + mask = torch.nn.functional.interpolate( + original_mask, + size=( + conditioning_latents.shape[-2], + conditioning_latents.shape[-1] + ) + ) + conditioning_latents = torch.concat([conditioning_latents, mask], 1) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which brushnets to keep + brushnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + brushnet_keep.append(keeps[0] if isinstance(brushnet, BrushNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + is_unet_compiled = is_compiled_module(self.unet) + is_brushnet_compiled = is_compiled_module(self.brushnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_brushnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # brushnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer BrushNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + brushnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + brushnet_prompt_embeds = prompt_embeds + + if isinstance(brushnet_keep[i], list): + cond_scale = [c * s for c, s in zip(brushnet_conditioning_scale, brushnet_keep[i])] + else: + brushnet_cond_scale = brushnet_conditioning_scale + if isinstance(brushnet_cond_scale, list): + brushnet_cond_scale = brushnet_cond_scale[0] + cond_scale = brushnet_cond_scale * brushnet_keep[i] + + down_block_res_samples, mid_block_res_sample, up_block_res_samples = self.brushnet( + control_model_input, + t, + encoder_hidden_states=brushnet_prompt_embeds, + brushnet_cond=conditioning_latents, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered BrushNet only for the conditional batch. + # To apply the output of BrushNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + up_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in up_block_res_samples] + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_add_samples=down_block_res_samples, + mid_block_add_sample=mid_block_res_sample, + up_block_add_samples=up_block_res_samples, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and brushnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.brushnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/inpaint/model/brushnet/unet_2d_blocks.py b/inpaint/model/brushnet/unet_2d_blocks.py new file mode 100644 index 0000000..dcaae8e --- /dev/null +++ b/inpaint/model/brushnet/unet_2d_blocks.py @@ -0,0 +1,388 @@ +from typing import Dict, Any, Optional, Tuple + +import torch +from diffusers.models.resnet import ResnetBlock2D +from diffusers.utils import is_torch_version +from diffusers.utils.torch_utils import apply_freeu +from torch import nn + + +class MidBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + use_linear_projection: bool = False, + ): + super().__init__() + + self.has_cross_attention = False + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + + for i in range(num_layers): + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + lora_scale = 1.0 + hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale) + for resnet in self.resnets[1:]: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + hidden_states = resnet(hidden_states, temb, scale=lora_scale) + + return hidden_states + + +def DownBlock2D_forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, + down_block_add_samples: Optional[torch.FloatTensor] = None, +) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb, scale=scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, scale=scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) # todo: add before or after + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +def CrossAttnDownBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + additional_residuals: Optional[torch.FloatTensor] = None, + down_block_add_samples: Optional[torch.FloatTensor] = None, +) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + + blocks = list(zip(self.resnets, self.attentions)) + + for i, (resnet, attn) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb, scale=lora_scale) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, scale=lora_scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) # todo: add before or after + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +def CrossAttnUpBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + return_res_samples: Optional[bool] = False, + up_block_add_samples: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + if return_res_samples: + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb, scale=lora_scale) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size, scale=lora_scale) + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) + + if return_res_samples: + return hidden_states, output_states + else: + return hidden_states + + +def UpBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + scale: float = 1.0, + return_res_samples: Optional[bool] = False, + up_block_add_samples: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + if return_res_samples: + output_states = () + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb, scale=scale) + + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) # todo: add before or after + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size, scale=scale) + + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) # todo: add before or after + + if return_res_samples: + return hidden_states, output_states + else: + return hidden_states diff --git a/inpaint/model/controlnet.py b/inpaint/model/controlnet.py new file mode 100644 index 0000000..7b4d243 --- /dev/null +++ b/inpaint/model/controlnet.py @@ -0,0 +1,194 @@ +import PIL.Image +import cv2 +import torch +from diffusers import ControlNetModel +from loguru import logger +from iopaint.schema import InpaintRequest, ModelType + +from .base import DiffusionInpaintModel +from .helper.controlnet_preprocess import ( + make_canny_control_image, + make_openpose_control_image, + make_depth_control_image, + make_inpaint_control_image, +) +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + get_scheduler, + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) + + +class ControlNet(DiffusionInpaintModel): + name = "controlnet" + pad_mod = 8 + min_size = 512 + + @property + def lcm_lora_id(self): + if self.model_info.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SD_INPAINT, + ]: + return "latent-consistency/lcm-lora-sdv1-5" + if self.model_info.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + return "latent-consistency/lcm-lora-sdxl" + raise NotImplementedError(f"Unsupported controlnet lcm model {self.model_info}") + + def init_model(self, device: torch.device, **kwargs): + model_info = kwargs["model_info"] + controlnet_method = kwargs["controlnet_method"] + + self.model_info = model_info + self.controlnet_method = controlnet_method + + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + self.local_files_only = model_kwargs["local_files_only"] + + disable_nsfw_checker = kwargs["disable_nsfw"] or kwargs.get( + "cpu_offload", False + ) + if disable_nsfw_checker: + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + self.torch_dtype = torch_dtype + + original_config_file_name = "v1" + if model_info.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SD_INPAINT, + ]: + from diffusers import ( + StableDiffusionControlNetInpaintPipeline as PipeClass, + ) + original_config_file_name = "v1" + + elif model_info.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + from diffusers import ( + StableDiffusionXLControlNetInpaintPipeline as PipeClass, + ) + original_config_file_name = "xl" + + controlnet = ControlNetModel.from_pretrained( + pretrained_model_name_or_path=controlnet_method, + resume_download=True, + local_files_only=model_kwargs["local_files_only"], + torch_dtype=self.torch_dtype, + ) + if model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + self.model = PipeClass.from_single_file( + model_info.path, + controlnet=controlnet, + load_safety_checker=not disable_nsfw_checker, + torch_dtype=torch_dtype, + original_config_file=get_config_files()[original_config_file_name], + **model_kwargs, + ) + else: + self.model = handle_from_pretrained_exceptions( + PipeClass.from_pretrained, + pretrained_model_name_or_path=model_info.path, + controlnet=controlnet, + variant="fp16", + torch_dtype=torch_dtype, + **model_kwargs, + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def switch_controlnet_method(self, new_method: str): + self.controlnet_method = new_method + controlnet = ControlNetModel.from_pretrained( + new_method, + resume_download=True, + local_files_only=self.local_files_only, + torch_dtype=self.torch_dtype, + ).to(self.model.device) + self.model.controlnet = controlnet + + def _get_control_image(self, image, mask): + if "canny" in self.controlnet_method: + control_image = make_canny_control_image(image) + elif "openpose" in self.controlnet_method: + control_image = make_openpose_control_image(image) + elif "depth" in self.controlnet_method: + control_image = make_depth_control_image(image) + elif "inpaint" in self.controlnet_method: + control_image = make_inpaint_control_image(image, mask) + else: + raise NotImplementedError(f"{self.controlnet_method} not implemented") + return control_image + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + scheduler_config = self.model.scheduler.config + scheduler = get_scheduler(config.sd_sampler, scheduler_config) + self.model.scheduler = scheduler + + img_h, img_w = image.shape[:2] + control_image = self._get_control_image(image, mask) + mask_image = PIL.Image.fromarray(mask[:, :, -1], mode="L") + image = PIL.Image.fromarray(image) + + output = self.model( + image=image, + mask_image=mask_image, + control_image=control_image, + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + controlnet_conditioning_scale=config.controlnet_conditioning_scale, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/ddim_sampler.py b/inpaint/model/ddim_sampler.py new file mode 100644 index 0000000..a3f44fd --- /dev/null +++ b/inpaint/model/ddim_sampler.py @@ -0,0 +1,193 @@ +import torch +import numpy as np +from tqdm import tqdm + +from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like + +from loguru import logger + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear"): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + setattr(self, name, attr) + + def make_schedule( + self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True + ): + self.ddim_timesteps = make_ddim_timesteps( + ddim_discr_method=ddim_discretize, + num_ddim_timesteps=ddim_num_steps, + # array([1]) + num_ddpm_timesteps=self.ddpm_num_timesteps, + verbose=verbose, + ) + alphas_cumprod = self.model.alphas_cumprod # torch.Size([1000]) + assert ( + alphas_cumprod.shape[0] == self.ddpm_num_timesteps + ), "alphas have to be defined for each timestep" + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer("betas", to_torch(self.model.betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer( + "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) + ) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer( + "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", + to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", + to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), + ) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( + alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, + verbose=verbose, + ) + self.register_buffer("ddim_sigmas", ddim_sigmas) + self.register_buffer("ddim_alphas", ddim_alphas) + self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) + self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) + / (1 - self.alphas_cumprod) + * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) + ) + self.register_buffer( + "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps + ) + + @torch.no_grad() + def sample(self, steps, conditioning, batch_size, shape): + self.make_schedule(ddim_num_steps=steps, ddim_eta=0, verbose=False) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + # samples: 1,3,128,128 + return self.ddim_sampling( + conditioning, + size, + quantize_denoised=False, + ddim_use_original_steps=False, + noise_dropout=0, + temperature=1.0, + ) + + @torch.no_grad() + def ddim_sampling( + self, + cond, + shape, + ddim_use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + ): + device = self.model.betas.device + b = shape[0] + img = torch.randn(shape, device=device, dtype=cond.dtype) + timesteps = ( + self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + ) + + time_range = ( + reversed(range(0, timesteps)) + if ddim_use_original_steps + else np.flip(timesteps) + ) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + logger.info(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + outs = self.p_sample_ddim( + img, + cond, + ts, + index=index, + use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, + temperature=temperature, + noise_dropout=noise_dropout, + ) + img, _ = outs + + return img + + @torch.no_grad() + def p_sample_ddim( + self, + x, + c, + t, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + ): + b, *_, device = *x.shape, x.device + e_t = self.model.apply_model(x, t, c) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = ( + self.model.alphas_cumprod_prev + if use_original_steps + else self.ddim_alphas_prev + ) + sqrt_one_minus_alphas = ( + self.model.sqrt_one_minus_alphas_cumprod + if use_original_steps + else self.ddim_sqrt_one_minus_alphas + ) + sigmas = ( + self.model.ddim_sigmas_for_original_num_steps + if use_original_steps + else self.ddim_sigmas + ) + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full( + (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device + ) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: # 没用 + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1.0 - a_prev - sigma_t ** 2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: # 没用 + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 diff --git a/inpaint/model/fcf.py b/inpaint/model/fcf.py new file mode 100644 index 0000000..a6f2d42 --- /dev/null +++ b/inpaint/model/fcf.py @@ -0,0 +1,1737 @@ +import os +import random + +import cv2 +import torch +import numpy as np +import torch.fft as fft + +from iopaint.schema import InpaintRequest + +from iopaint.helper import ( + load_model, + get_cache_path_by_url, + norm_img, + boxes_from_mask, + resize_max_size, + download_model, +) +from .base import InpaintModel +from torch import conv2d, nn +import torch.nn.functional as F + +from .utils import ( + setup_filter, + _parse_scaling, + _parse_padding, + Conv2dLayer, + FullyConnectedLayer, + MinibatchStdLayer, + activation_funcs, + conv2d_resample, + bias_act, + upsample2d, + normalize_2nd_moment, + downsample2d, +) + + +def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"): + assert isinstance(x, torch.Tensor) + return _upfirdn2d_ref( + x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain + ) + + +def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.""" + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert f.dtype == torch.float32 and not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Upsample by inserting zeros. + x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad( + x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)] + ) + x = x[ + :, + :, + max(-pady0, 0) : x.shape[2] - max(-pady1, 0), + max(-padx0, 0) : x.shape[3] - max(-padx1, 0), + ] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) + x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + + +class EncoderEpilogue(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label. + z_dim, # Output Latent (Z) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + architecture="resnet", # Architecture: 'orig', 'skip', 'resnet'. + mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch. + mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + ): + assert architecture in ["orig", "skip", "resnet"] + super().__init__() + self.in_channels = in_channels + self.cmap_dim = cmap_dim + self.resolution = resolution + self.img_channels = img_channels + self.architecture = architecture + + if architecture == "skip": + self.fromrgb = Conv2dLayer( + self.img_channels, in_channels, kernel_size=1, activation=activation + ) + self.mbstd = ( + MinibatchStdLayer( + group_size=mbstd_group_size, num_channels=mbstd_num_channels + ) + if mbstd_num_channels > 0 + else None + ) + self.conv = Conv2dLayer( + in_channels + mbstd_num_channels, + in_channels, + kernel_size=3, + activation=activation, + conv_clamp=conv_clamp, + ) + self.fc = FullyConnectedLayer( + in_channels * (resolution**2), z_dim, activation=activation + ) + self.dropout = torch.nn.Dropout(p=0.5) + + def forward(self, x, cmap, force_fp32=False): + _ = force_fp32 # unused + dtype = torch.float32 + memory_format = torch.contiguous_format + + # FromRGB. + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + if self.mbstd is not None: + x = self.mbstd(x) + const_e = self.conv(x) + x = self.fc(const_e.flatten(1)) + x = self.dropout(x) + + # Conditioning. + if self.cmap_dim > 0: + x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) + + assert x.dtype == dtype + return x, const_e + + +class EncoderBlock(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels, 0 = first block. + tmp_channels, # Number of intermediate channels. + out_channels, # Number of output channels. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + first_layer_idx, # Index of the first layer. + architecture="skip", # Architecture: 'orig', 'skip', 'resnet'. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + freeze_layers=0, # Freeze-D: Number of layers to freeze. + ): + assert in_channels in [0, tmp_channels] + assert architecture in ["orig", "skip", "resnet"] + super().__init__() + self.in_channels = in_channels + self.resolution = resolution + self.img_channels = img_channels + 1 + self.first_layer_idx = first_layer_idx + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = use_fp16 and fp16_channels_last + self.register_buffer("resample_filter", setup_filter(resample_filter)) + + self.num_layers = 0 + + def trainable_gen(): + while True: + layer_idx = self.first_layer_idx + self.num_layers + trainable = layer_idx >= freeze_layers + self.num_layers += 1 + yield trainable + + trainable_iter = trainable_gen() + + if in_channels == 0: + self.fromrgb = Conv2dLayer( + self.img_channels, + tmp_channels, + kernel_size=1, + activation=activation, + trainable=next(trainable_iter), + conv_clamp=conv_clamp, + channels_last=self.channels_last, + ) + + self.conv0 = Conv2dLayer( + tmp_channels, + tmp_channels, + kernel_size=3, + activation=activation, + trainable=next(trainable_iter), + conv_clamp=conv_clamp, + channels_last=self.channels_last, + ) + + self.conv1 = Conv2dLayer( + tmp_channels, + out_channels, + kernel_size=3, + activation=activation, + down=2, + trainable=next(trainable_iter), + resample_filter=resample_filter, + conv_clamp=conv_clamp, + channels_last=self.channels_last, + ) + + if architecture == "resnet": + self.skip = Conv2dLayer( + tmp_channels, + out_channels, + kernel_size=1, + bias=False, + down=2, + trainable=next(trainable_iter), + resample_filter=resample_filter, + channels_last=self.channels_last, + ) + + def forward(self, x, img, force_fp32=False): + # dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + dtype = torch.float32 + memory_format = ( + torch.channels_last + if self.channels_last and not force_fp32 + else torch.contiguous_format + ) + + # Input. + if x is not None: + x = x.to(dtype=dtype, memory_format=memory_format) + + # FromRGB. + if self.in_channels == 0: + img = img.to(dtype=dtype, memory_format=memory_format) + y = self.fromrgb(img) + x = x + y if x is not None else y + img = ( + downsample2d(img, self.resample_filter) + if self.architecture == "skip" + else None + ) + + # Main layers. + if self.architecture == "resnet": + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x) + feat = x.clone() + x = self.conv1(x, gain=np.sqrt(0.5)) + x = y.add_(x) + else: + x = self.conv0(x) + feat = x.clone() + x = self.conv1(x) + + assert x.dtype == dtype + return x, img, feat + + +class EncoderNetwork(torch.nn.Module): + def __init__( + self, + c_dim, # Conditioning label (C) dimensionality. + z_dim, # Input latent (Z) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture="orig", # Architecture: 'orig', 'skip', 'resnet'. + channel_base=16384, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=0, # Use FP16 for the N highest resolutions. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for EncoderEpilogue. + ): + super().__init__() + self.c_dim = c_dim + self.z_dim = z_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [ + 2**i for i in range(self.img_resolution_log2, 2, -1) + ] + channels_dict = { + res: min(channel_base // res, channel_max) + for res in self.block_resolutions + [4] + } + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict( + img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp + ) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = res >= fp16_resolution + use_fp16 = False + block = EncoderBlock( + in_channels, + tmp_channels, + out_channels, + resolution=res, + first_layer_idx=cur_layer_idx, + use_fp16=use_fp16, + **block_kwargs, + **common_kwargs, + ) + setattr(self, f"b{res}", block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork( + z_dim=0, + c_dim=c_dim, + w_dim=cmap_dim, + num_ws=None, + w_avg_beta=None, + **mapping_kwargs, + ) + self.b4 = EncoderEpilogue( + channels_dict[4], + cmap_dim=cmap_dim, + z_dim=z_dim * 2, + resolution=4, + **epilogue_kwargs, + **common_kwargs, + ) + + def forward(self, img, c, **block_kwargs): + x = None + feats = {} + for res in self.block_resolutions: + block = getattr(self, f"b{res}") + x, img, feat = block(x, img, **block_kwargs) + feats[res] = feat + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x, const_e = self.b4(x, cmap) + feats[4] = const_e + + B, _ = x.shape + z = torch.zeros( + (B, self.z_dim), requires_grad=False, dtype=x.dtype, device=x.device + ) ## Noise for Co-Modulation + return x, z, feats + + +def fma(a, b, c): # => a * b + c + return _FusedMultiplyAdd.apply(a, b, c) + + +class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c + @staticmethod + def forward(ctx, a, b, c): # pylint: disable=arguments-differ + out = torch.addcmul(c, a, b) + ctx.save_for_backward(a, b) + ctx.c_shape = c.shape + return out + + @staticmethod + def backward(ctx, dout): # pylint: disable=arguments-differ + a, b = ctx.saved_tensors + c_shape = ctx.c_shape + da = None + db = None + dc = None + + if ctx.needs_input_grad[0]: + da = _unbroadcast(dout * b, a.shape) + + if ctx.needs_input_grad[1]: + db = _unbroadcast(dout * a, b.shape) + + if ctx.needs_input_grad[2]: + dc = _unbroadcast(dout, c_shape) + + return da, db, dc + + +def _unbroadcast(x, shape): + extra_dims = x.ndim - len(shape) + assert extra_dims >= 0 + dim = [ + i + for i in range(x.ndim) + if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1) + ] + if len(dim): + x = x.sum(dim=dim, keepdim=True) + if extra_dims: + x = x.reshape(-1, *x.shape[extra_dims + 1 :]) + assert x.shape == shape + return x + + +def modulated_conv2d( + x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. + weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. + styles, # Modulation coefficients of shape [batch_size, in_channels]. + noise=None, # Optional noise tensor to add to the output activations. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + padding=0, # Padding with respect to the upsampled image. + resample_filter=None, + # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). + demodulate=True, # Apply weight demodulation? + flip_weight=True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). + fused_modconv=True, # Perform modulation, convolution, and demodulation as a single fused operation? +): + batch_size = x.shape[0] + out_channels, in_channels, kh, kw = weight.shape + + # Pre-normalize inputs to avoid FP16 overflow. + if x.dtype == torch.float16 and demodulate: + weight = weight * ( + 1 + / np.sqrt(in_channels * kh * kw) + / weight.norm(float("inf"), dim=[1, 2, 3], keepdim=True) + ) # max_Ikk + styles = styles / styles.norm(float("inf"), dim=1, keepdim=True) # max_I + + # Calculate per-sample weights and demodulation coefficients. + w = None + dcoefs = None + if demodulate or fused_modconv: + w = weight.unsqueeze(0) # [NOIkk] + w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] + if demodulate: + dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO] + if demodulate and fused_modconv: + w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] + # Execute by scaling the activations before and after the convolution. + if not fused_modconv: + x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) + x = conv2d_resample.conv2d_resample( + x=x, + w=weight.to(x.dtype), + f=resample_filter, + up=up, + down=down, + padding=padding, + flip_weight=flip_weight, + ) + if demodulate and noise is not None: + x = fma( + x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype) + ) + elif demodulate: + x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) + elif noise is not None: + x = x.add_(noise.to(x.dtype)) + return x + + # Execute as one fused op using grouped convolution. + batch_size = int(batch_size) + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_resample( + x=x, + w=w.to(x.dtype), + f=resample_filter, + up=up, + down=down, + padding=padding, + groups=batch_size, + flip_weight=flip_weight, + ) + x = x.reshape(batch_size, -1, *x.shape[2:]) + if noise is not None: + x = x.add_(noise) + return x + + +class SynthesisLayer(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this layer. + kernel_size=3, # Convolution kernel size. + up=1, # Integer upsampling factor. + use_noise=True, # Enable noise input? + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + channels_last=False, # Use channels_last format for the weights? + ): + super().__init__() + self.resolution = resolution + self.up = up + self.use_noise = use_noise + self.activation = activation + self.conv_clamp = conv_clamp + self.register_buffer("resample_filter", setup_filter(resample_filter)) + self.padding = kernel_size // 2 + self.act_gain = activation_funcs[activation].def_gain + + self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) + memory_format = ( + torch.channels_last if channels_last else torch.contiguous_format + ) + self.weight = torch.nn.Parameter( + torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( + memory_format=memory_format + ) + ) + if use_noise: + self.register_buffer("noise_const", torch.randn([resolution, resolution])) + self.noise_strength = torch.nn.Parameter(torch.zeros([])) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + + def forward(self, x, w, noise_mode="none", fused_modconv=True, gain=1): + assert noise_mode in ["random", "const", "none"] + in_resolution = self.resolution // self.up + styles = self.affine(w) + + noise = None + if self.use_noise and noise_mode == "random": + noise = ( + torch.randn( + [x.shape[0], 1, self.resolution, self.resolution], device=x.device + ) + * self.noise_strength + ) + if self.use_noise and noise_mode == "const": + noise = self.noise_const * self.noise_strength + + flip_weight = self.up == 1 # slightly faster + x = modulated_conv2d( + x=x, + weight=self.weight, + styles=styles, + noise=noise, + up=self.up, + padding=self.padding, + resample_filter=self.resample_filter, + flip_weight=flip_weight, + fused_modconv=fused_modconv, + ) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + x = F.leaky_relu(x, negative_slope=0.2, inplace=False) + if act_gain != 1: + x = x * act_gain + if act_clamp is not None: + x = x.clamp(-act_clamp, act_clamp) + return x + + +class ToRGBLayer(torch.nn.Module): + def __init__( + self, + in_channels, + out_channels, + w_dim, + kernel_size=1, + conv_clamp=None, + channels_last=False, + ): + super().__init__() + self.conv_clamp = conv_clamp + self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) + memory_format = ( + torch.channels_last if channels_last else torch.contiguous_format + ) + self.weight = torch.nn.Parameter( + torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( + memory_format=memory_format + ) + ) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2)) + + def forward(self, x, w, fused_modconv=True): + styles = self.affine(w) * self.weight_gain + x = modulated_conv2d( + x=x, + weight=self.weight, + styles=styles, + demodulate=False, + fused_modconv=fused_modconv, + ) + x = bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) + return x + + +class SynthesisForeword(torch.nn.Module): + def __init__( + self, + z_dim, # Output Latent (Z) dimensionality. + resolution, # Resolution of this block. + in_channels, + img_channels, # Number of input color channels. + architecture="skip", # Architecture: 'orig', 'skip', 'resnet'. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + ): + super().__init__() + self.in_channels = in_channels + self.z_dim = z_dim + self.resolution = resolution + self.img_channels = img_channels + self.architecture = architecture + + self.fc = FullyConnectedLayer( + self.z_dim, (self.z_dim // 2) * 4 * 4, activation=activation + ) + self.conv = SynthesisLayer( + self.in_channels, self.in_channels, w_dim=(z_dim // 2) * 3, resolution=4 + ) + + if architecture == "skip": + self.torgb = ToRGBLayer( + self.in_channels, + self.img_channels, + kernel_size=1, + w_dim=(z_dim // 2) * 3, + ) + + def forward(self, x, ws, feats, img, force_fp32=False): + _ = force_fp32 # unused + dtype = torch.float32 + memory_format = torch.contiguous_format + + x_global = x.clone() + # ToRGB. + x = self.fc(x) + x = x.view(-1, self.z_dim // 2, 4, 4) + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + x_skip = feats[4].clone() + x = x + x_skip + + mod_vector = [] + mod_vector.append(ws[:, 0]) + mod_vector.append(x_global.clone()) + mod_vector = torch.cat(mod_vector, dim=1) + + x = self.conv(x, mod_vector) + + mod_vector = [] + mod_vector.append(ws[:, 2 * 2 - 3]) + mod_vector.append(x_global.clone()) + mod_vector = torch.cat(mod_vector, dim=1) + + if self.architecture == "skip": + img = self.torgb(x, mod_vector) + img = img.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + assert x.dtype == dtype + return x, img + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=16): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction, bias=False), + nn.ReLU(inplace=False), + nn.Linear(channel // reduction, channel, bias=False), + nn.Sigmoid(), + ) + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + res = x * y.expand_as(x) + return res + + +class FourierUnit(nn.Module): + def __init__( + self, + in_channels, + out_channels, + groups=1, + spatial_scale_factor=None, + spatial_scale_mode="bilinear", + spectral_pos_encoding=False, + use_se=False, + se_kwargs=None, + ffc3d=False, + fft_norm="ortho", + ): + # bn_layer not used + super(FourierUnit, self).__init__() + self.groups = groups + + self.conv_layer = torch.nn.Conv2d( + in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), + out_channels=out_channels * 2, + kernel_size=1, + stride=1, + padding=0, + groups=self.groups, + bias=False, + ) + self.relu = torch.nn.ReLU(inplace=False) + + # squeeze and excitation block + self.use_se = use_se + if use_se: + if se_kwargs is None: + se_kwargs = {} + self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) + + self.spatial_scale_factor = spatial_scale_factor + self.spatial_scale_mode = spatial_scale_mode + self.spectral_pos_encoding = spectral_pos_encoding + self.ffc3d = ffc3d + self.fft_norm = fft_norm + + def forward(self, x): + batch = x.shape[0] + + if self.spatial_scale_factor is not None: + orig_size = x.shape[-2:] + x = F.interpolate( + x, + scale_factor=self.spatial_scale_factor, + mode=self.spatial_scale_mode, + align_corners=False, + ) + + r_size = x.size() + # (batch, c, h, w/2+1, 2) + fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) + ffted = fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) + ffted = torch.stack((ffted.real, ffted.imag), dim=-1) + ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) + ffted = ffted.view( + ( + batch, + -1, + ) + + ffted.size()[3:] + ) + + if self.spectral_pos_encoding: + height, width = ffted.shape[-2:] + coords_vert = ( + torch.linspace(0, 1, height)[None, None, :, None] + .expand(batch, 1, height, width) + .to(ffted) + ) + coords_hor = ( + torch.linspace(0, 1, width)[None, None, None, :] + .expand(batch, 1, height, width) + .to(ffted) + ) + ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) + + if self.use_se: + ffted = self.se(ffted) + + ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) + ffted = self.relu(ffted) + + ffted = ( + ffted.view( + ( + batch, + -1, + 2, + ) + + ffted.size()[2:] + ) + .permute(0, 1, 3, 4, 2) + .contiguous() + ) # (batch,c, t, h, w/2+1, 2) + ffted = torch.complex(ffted[..., 0], ffted[..., 1]) + + ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] + output = torch.fft.irfftn( + ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm + ) + + if self.spatial_scale_factor is not None: + output = F.interpolate( + output, + size=orig_size, + mode=self.spatial_scale_mode, + align_corners=False, + ) + + return output + + +class SpectralTransform(nn.Module): + def __init__( + self, + in_channels, + out_channels, + stride=1, + groups=1, + enable_lfu=True, + **fu_kwargs, + ): + # bn_layer not used + super(SpectralTransform, self).__init__() + self.enable_lfu = enable_lfu + if stride == 2: + self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) + else: + self.downsample = nn.Identity() + + self.stride = stride + self.conv1 = nn.Sequential( + nn.Conv2d( + in_channels, out_channels // 2, kernel_size=1, groups=groups, bias=False + ), + # nn.BatchNorm2d(out_channels // 2), + nn.ReLU(inplace=True), + ) + self.fu = FourierUnit(out_channels // 2, out_channels // 2, groups, **fu_kwargs) + if self.enable_lfu: + self.lfu = FourierUnit(out_channels // 2, out_channels // 2, groups) + self.conv2 = torch.nn.Conv2d( + out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False + ) + + def forward(self, x): + x = self.downsample(x) + x = self.conv1(x) + output = self.fu(x) + + if self.enable_lfu: + n, c, h, w = x.shape + split_no = 2 + split_s = h // split_no + xs = torch.cat( + torch.split(x[:, : c // 4], split_s, dim=-2), dim=1 + ).contiguous() + xs = torch.cat(torch.split(xs, split_s, dim=-1), dim=1).contiguous() + xs = self.lfu(xs) + xs = xs.repeat(1, 1, split_no, split_no).contiguous() + else: + xs = 0 + + output = self.conv2(x + output + xs) + + return output + + +class FFC(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + ratio_gin, + ratio_gout, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=False, + enable_lfu=True, + padding_type="reflect", + gated=False, + **spectral_kwargs, + ): + super(FFC, self).__init__() + + assert stride == 1 or stride == 2, "Stride should be 1 or 2." + self.stride = stride + + in_cg = int(in_channels * ratio_gin) + in_cl = in_channels - in_cg + out_cg = int(out_channels * ratio_gout) + out_cl = out_channels - out_cg + # groups_g = 1 if groups == 1 else int(groups * ratio_gout) + # groups_l = 1 if groups == 1 else groups - groups_g + + self.ratio_gin = ratio_gin + self.ratio_gout = ratio_gout + self.global_in_num = in_cg + + module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d + self.convl2l = module( + in_cl, + out_cl, + kernel_size, + stride, + padding, + dilation, + groups, + bias, + padding_mode=padding_type, + ) + module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d + self.convl2g = module( + in_cl, + out_cg, + kernel_size, + stride, + padding, + dilation, + groups, + bias, + padding_mode=padding_type, + ) + module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d + self.convg2l = module( + in_cg, + out_cl, + kernel_size, + stride, + padding, + dilation, + groups, + bias, + padding_mode=padding_type, + ) + module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform + self.convg2g = module( + in_cg, + out_cg, + stride, + 1 if groups == 1 else groups // 2, + enable_lfu, + **spectral_kwargs, + ) + + self.gated = gated + module = ( + nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d + ) + self.gate = module(in_channels, 2, 1) + + def forward(self, x, fname=None): + x_l, x_g = x if type(x) is tuple else (x, 0) + out_xl, out_xg = 0, 0 + + if self.gated: + total_input_parts = [x_l] + if torch.is_tensor(x_g): + total_input_parts.append(x_g) + total_input = torch.cat(total_input_parts, dim=1) + + gates = torch.sigmoid(self.gate(total_input)) + g2l_gate, l2g_gate = gates.chunk(2, dim=1) + else: + g2l_gate, l2g_gate = 1, 1 + + spec_x = self.convg2g(x_g) + + if self.ratio_gout != 1: + out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate + if self.ratio_gout != 0: + out_xg = self.convl2g(x_l) * l2g_gate + spec_x + + return out_xl, out_xg + + +class FFC_BN_ACT(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + ratio_gin, + ratio_gout, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=False, + norm_layer=nn.SyncBatchNorm, + activation_layer=nn.Identity, + padding_type="reflect", + enable_lfu=True, + **kwargs, + ): + super(FFC_BN_ACT, self).__init__() + self.ffc = FFC( + in_channels, + out_channels, + kernel_size, + ratio_gin, + ratio_gout, + stride, + padding, + dilation, + groups, + bias, + enable_lfu, + padding_type=padding_type, + **kwargs, + ) + lnorm = nn.Identity if ratio_gout == 1 else norm_layer + gnorm = nn.Identity if ratio_gout == 0 else norm_layer + global_channels = int(out_channels * ratio_gout) + # self.bn_l = lnorm(out_channels - global_channels) + # self.bn_g = gnorm(global_channels) + + lact = nn.Identity if ratio_gout == 1 else activation_layer + gact = nn.Identity if ratio_gout == 0 else activation_layer + self.act_l = lact(inplace=True) + self.act_g = gact(inplace=True) + + def forward(self, x, fname=None): + x_l, x_g = self.ffc( + x, + fname=fname, + ) + x_l = self.act_l(x_l) + x_g = self.act_g(x_g) + return x_l, x_g + + +class FFCResnetBlock(nn.Module): + def __init__( + self, + dim, + padding_type, + norm_layer, + activation_layer=nn.ReLU, + dilation=1, + spatial_transform_kwargs=None, + inline=False, + ratio_gin=0.75, + ratio_gout=0.75, + ): + super().__init__() + self.conv1 = FFC_BN_ACT( + dim, + dim, + kernel_size=3, + padding=dilation, + dilation=dilation, + norm_layer=norm_layer, + activation_layer=activation_layer, + padding_type=padding_type, + ratio_gin=ratio_gin, + ratio_gout=ratio_gout, + ) + self.conv2 = FFC_BN_ACT( + dim, + dim, + kernel_size=3, + padding=dilation, + dilation=dilation, + norm_layer=norm_layer, + activation_layer=activation_layer, + padding_type=padding_type, + ratio_gin=ratio_gin, + ratio_gout=ratio_gout, + ) + self.inline = inline + + def forward(self, x, fname=None): + if self.inline: + x_l, x_g = ( + x[:, : -self.conv1.ffc.global_in_num], + x[:, -self.conv1.ffc.global_in_num :], + ) + else: + x_l, x_g = x if type(x) is tuple else (x, 0) + + id_l, id_g = x_l, x_g + + x_l, x_g = self.conv1((x_l, x_g), fname=fname) + x_l, x_g = self.conv2((x_l, x_g), fname=fname) + + x_l, x_g = id_l + x_l, id_g + x_g + out = x_l, x_g + if self.inline: + out = torch.cat(out, dim=1) + return out + + +class ConcatTupleLayer(nn.Module): + def forward(self, x): + assert isinstance(x, tuple) + x_l, x_g = x + assert torch.is_tensor(x_l) or torch.is_tensor(x_g) + if not torch.is_tensor(x_g): + return x_l + return torch.cat(x, dim=1) + + +class FFCBlock(torch.nn.Module): + def __init__( + self, + dim, # Number of output/input channels. + kernel_size, # Width and height of the convolution kernel. + padding, + ratio_gin=0.75, + ratio_gout=0.75, + activation="linear", # Activation function: 'relu', 'lrelu', etc. + ): + super().__init__() + if activation == "linear": + self.activation = nn.Identity + else: + self.activation = nn.ReLU + self.padding = padding + self.kernel_size = kernel_size + self.ffc_block = FFCResnetBlock( + dim=dim, + padding_type="reflect", + norm_layer=nn.SyncBatchNorm, + activation_layer=self.activation, + dilation=1, + ratio_gin=ratio_gin, + ratio_gout=ratio_gout, + ) + + self.concat_layer = ConcatTupleLayer() + + def forward(self, gen_ft, mask, fname=None): + x = gen_ft.float() + + x_l, x_g = ( + x[:, : -self.ffc_block.conv1.ffc.global_in_num], + x[:, -self.ffc_block.conv1.ffc.global_in_num :], + ) + id_l, id_g = x_l, x_g + + x_l, x_g = self.ffc_block((x_l, x_g), fname=fname) + x_l, x_g = id_l + x_l, id_g + x_g + x = self.concat_layer((x_l, x_g)) + + return x + gen_ft.float() + + +class FFCSkipLayer(torch.nn.Module): + def __init__( + self, + dim, # Number of input/output channels. + kernel_size=3, # Convolution kernel size. + ratio_gin=0.75, + ratio_gout=0.75, + ): + super().__init__() + self.padding = kernel_size // 2 + + self.ffc_act = FFCBlock( + dim=dim, + kernel_size=kernel_size, + activation=nn.ReLU, + padding=self.padding, + ratio_gin=ratio_gin, + ratio_gout=ratio_gout, + ) + + def forward(self, gen_ft, mask, fname=None): + x = self.ffc_act(gen_ft, mask, fname=fname) + return x + + +class SynthesisBlock(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels, 0 = first block. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of output color channels. + is_last, # Is this the last block? + architecture="skip", # Architecture: 'orig', 'skip', 'resnet'. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + **layer_kwargs, # Arguments for SynthesisLayer. + ): + assert architecture in ["orig", "skip", "resnet"] + super().__init__() + self.in_channels = in_channels + self.w_dim = w_dim + self.resolution = resolution + self.img_channels = img_channels + self.is_last = is_last + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = use_fp16 and fp16_channels_last + self.register_buffer("resample_filter", setup_filter(resample_filter)) + self.num_conv = 0 + self.num_torgb = 0 + self.res_ffc = {4: 0, 8: 0, 16: 0, 32: 1, 64: 1, 128: 1, 256: 1, 512: 1} + + if in_channels != 0 and resolution >= 8: + self.ffc_skip = nn.ModuleList() + for _ in range(self.res_ffc[resolution]): + self.ffc_skip.append(FFCSkipLayer(dim=out_channels)) + + if in_channels == 0: + self.const = torch.nn.Parameter( + torch.randn([out_channels, resolution, resolution]) + ) + + if in_channels != 0: + self.conv0 = SynthesisLayer( + in_channels, + out_channels, + w_dim=w_dim * 3, + resolution=resolution, + up=2, + resample_filter=resample_filter, + conv_clamp=conv_clamp, + channels_last=self.channels_last, + **layer_kwargs, + ) + self.num_conv += 1 + + self.conv1 = SynthesisLayer( + out_channels, + out_channels, + w_dim=w_dim * 3, + resolution=resolution, + conv_clamp=conv_clamp, + channels_last=self.channels_last, + **layer_kwargs, + ) + self.num_conv += 1 + + if is_last or architecture == "skip": + self.torgb = ToRGBLayer( + out_channels, + img_channels, + w_dim=w_dim * 3, + conv_clamp=conv_clamp, + channels_last=self.channels_last, + ) + self.num_torgb += 1 + + if in_channels != 0 and architecture == "resnet": + self.skip = Conv2dLayer( + in_channels, + out_channels, + kernel_size=1, + bias=False, + up=2, + resample_filter=resample_filter, + channels_last=self.channels_last, + ) + + def forward( + self, + x, + mask, + feats, + img, + ws, + fname=None, + force_fp32=False, + fused_modconv=None, + **layer_kwargs, + ): + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + dtype = torch.float32 + memory_format = ( + torch.channels_last + if self.channels_last and not force_fp32 + else torch.contiguous_format + ) + if fused_modconv is None: + fused_modconv = (not self.training) and ( + dtype == torch.float32 or int(x.shape[0]) == 1 + ) + + x = x.to(dtype=dtype, memory_format=memory_format) + x_skip = ( + feats[self.resolution].clone().to(dtype=dtype, memory_format=memory_format) + ) + + # Main layers. + if self.in_channels == 0: + x = self.conv1(x, ws[1], fused_modconv=fused_modconv, **layer_kwargs) + elif self.architecture == "resnet": + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0( + x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs + ) + if len(self.ffc_skip) > 0: + mask = F.interpolate( + mask, + size=x_skip.shape[2:], + ) + z = x + x_skip + for fres in self.ffc_skip: + z = fres(z, mask) + x = x + z + else: + x = x + x_skip + x = self.conv1( + x, + ws[1].clone(), + fused_modconv=fused_modconv, + gain=np.sqrt(0.5), + **layer_kwargs, + ) + x = y.add_(x) + else: + x = self.conv0( + x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs + ) + if len(self.ffc_skip) > 0: + mask = F.interpolate( + mask, + size=x_skip.shape[2:], + ) + z = x + x_skip + for fres in self.ffc_skip: + z = fres(z, mask) + x = x + z + else: + x = x + x_skip + x = self.conv1( + x, ws[1].clone(), fused_modconv=fused_modconv, **layer_kwargs + ) + # ToRGB. + if img is not None: + img = upsample2d(img, self.resample_filter) + if self.is_last or self.architecture == "skip": + y = self.torgb(x, ws[2].clone(), fused_modconv=fused_modconv) + y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) + img = img.add_(y) if img is not None else y + + x = x.to(dtype=dtype) + assert x.dtype == dtype + assert img is None or img.dtype == torch.float32 + return x, img + + +class SynthesisNetwork(torch.nn.Module): + def __init__( + self, + w_dim, # Intermediate latent (W) dimensionality. + z_dim, # Output Latent (Z) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base=16384, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=0, # Use FP16 for the N highest resolutions. + **block_kwargs, # Arguments for SynthesisBlock. + ): + assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [ + 2**i for i in range(3, self.img_resolution_log2 + 1) + ] + channels_dict = { + res: min(channel_base // res, channel_max) for res in self.block_resolutions + } + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + self.foreword = SynthesisForeword( + img_channels=img_channels, + in_channels=min(channel_base // 4, channel_max), + z_dim=z_dim * 2, + resolution=4, + ) + + self.num_ws = self.img_resolution_log2 * 2 - 2 + for res in self.block_resolutions: + if res // 2 in channels_dict.keys(): + in_channels = channels_dict[res // 2] if res > 4 else 0 + else: + in_channels = min(channel_base // (res // 2), channel_max) + out_channels = channels_dict[res] + use_fp16 = res >= fp16_resolution + use_fp16 = False + is_last = res == self.img_resolution + block = SynthesisBlock( + in_channels, + out_channels, + w_dim=w_dim, + resolution=res, + img_channels=img_channels, + is_last=is_last, + use_fp16=use_fp16, + **block_kwargs, + ) + setattr(self, f"b{res}", block) + + def forward(self, x_global, mask, feats, ws, fname=None, **block_kwargs): + img = None + + x, img = self.foreword(x_global, ws, feats, img) + + for res in self.block_resolutions: + block = getattr(self, f"b{res}") + mod_vector0 = [] + mod_vector0.append(ws[:, int(np.log2(res)) * 2 - 5]) + mod_vector0.append(x_global.clone()) + mod_vector0 = torch.cat(mod_vector0, dim=1) + + mod_vector1 = [] + mod_vector1.append(ws[:, int(np.log2(res)) * 2 - 4]) + mod_vector1.append(x_global.clone()) + mod_vector1 = torch.cat(mod_vector1, dim=1) + + mod_vector_rgb = [] + mod_vector_rgb.append(ws[:, int(np.log2(res)) * 2 - 3]) + mod_vector_rgb.append(x_global.clone()) + mod_vector_rgb = torch.cat(mod_vector_rgb, dim=1) + x, img = block( + x, + mask, + feats, + img, + (mod_vector0, mod_vector1, mod_vector_rgb), + fname=fname, + **block_kwargs, + ) + return img + + +class MappingNetwork(torch.nn.Module): + def __init__( + self, + z_dim, # Input latent (Z) dimensionality, 0 = no latent. + c_dim, # Conditioning label (C) dimensionality, 0 = no label. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output, None = do not broadcast. + num_layers=8, # Number of mapping layers. + embed_features=None, # Label embedding dimensionality, None = same as w_dim. + layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + if embed_features is None: + embed_features = w_dim + if c_dim == 0: + embed_features = 0 + if layer_features is None: + layer_features = w_dim + features_list = ( + [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] + ) + + if c_dim > 0: + self.embed = FullyConnectedLayer(c_dim, embed_features) + for idx in range(num_layers): + in_features = features_list[idx] + out_features = features_list[idx + 1] + layer = FullyConnectedLayer( + in_features, + out_features, + activation=activation, + lr_multiplier=lr_multiplier, + ) + setattr(self, f"fc{idx}", layer) + + if num_ws is not None and w_avg_beta is not None: + self.register_buffer("w_avg", torch.zeros([w_dim])) + + def forward( + self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False + ): + # Embed, normalize, and concat inputs. + x = None + with torch.autograd.profiler.record_function("input"): + if self.z_dim > 0: + x = normalize_2nd_moment(z.to(torch.float32)) + if self.c_dim > 0: + y = normalize_2nd_moment(self.embed(c.to(torch.float32))) + x = torch.cat([x, y], dim=1) if x is not None else y + + # Main layers. + for idx in range(self.num_layers): + layer = getattr(self, f"fc{idx}") + x = layer(x) + + # Update moving average of W. + if self.w_avg_beta is not None and self.training and not skip_w_avg_update: + with torch.autograd.profiler.record_function("update_w_avg"): + self.w_avg.copy_( + x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta) + ) + + # Broadcast. + if self.num_ws is not None: + with torch.autograd.profiler.record_function("broadcast"): + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + + # Apply truncation. + if truncation_psi != 1: + with torch.autograd.profiler.record_function("truncate"): + assert self.w_avg_beta is not None + if self.num_ws is None or truncation_cutoff is None: + x = self.w_avg.lerp(x, truncation_psi) + else: + x[:, :truncation_cutoff] = self.w_avg.lerp( + x[:, :truncation_cutoff], truncation_psi + ) + return x + + +class Generator(torch.nn.Module): + def __init__( + self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + encoder_kwargs={}, # Arguments for EncoderNetwork. + mapping_kwargs={}, # Arguments for MappingNetwork. + synthesis_kwargs={}, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.encoder = EncoderNetwork( + c_dim=c_dim, + z_dim=z_dim, + img_resolution=img_resolution, + img_channels=img_channels, + **encoder_kwargs, + ) + self.synthesis = SynthesisNetwork( + z_dim=z_dim, + w_dim=w_dim, + img_resolution=img_resolution, + img_channels=img_channels, + **synthesis_kwargs, + ) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork( + z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs + ) + + def forward( + self, + img, + c, + fname=None, + truncation_psi=1, + truncation_cutoff=None, + **synthesis_kwargs, + ): + mask = img[:, -1].unsqueeze(1) + x_global, z, feats = self.encoder(img, c) + ws = self.mapping( + z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff + ) + img = self.synthesis(x_global, mask, feats, ws, fname=fname, **synthesis_kwargs) + return img + + +FCF_MODEL_URL = os.environ.get( + "FCF_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_fcf/places_512_G.pth", +) +FCF_MODEL_MD5 = os.environ.get("FCF_MODEL_MD5", "3323152bc01bf1c56fd8aba74435a211") + + +class FcF(InpaintModel): + name = "fcf" + min_size = 512 + pad_mod = 512 + pad_to_square = True + is_erase_model = True + + def init_model(self, device, **kwargs): + seed = 0 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + kwargs = { + "channel_base": 1 * 32768, + "channel_max": 512, + "num_fp16_res": 4, + "conv_clamp": 256, + } + G = Generator( + z_dim=512, + c_dim=0, + w_dim=512, + img_resolution=512, + img_channels=3, + synthesis_kwargs=kwargs, + encoder_kwargs=kwargs, + mapping_kwargs={"num_layers": 2}, + ) + self.model = load_model(G, FCF_MODEL_URL, device, FCF_MODEL_MD5) + self.label = torch.zeros([1, self.model.c_dim], device=device) + + @staticmethod + def download(): + download_model(FCF_MODEL_URL, FCF_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + return os.path.exists(get_cache_path_by_url(FCF_MODEL_URL)) + + @torch.no_grad() + def __call__(self, image, mask, config: InpaintRequest): + """ + images: [H, W, C] RGB, not normalized + masks: [H, W] + return: BGR IMAGE + """ + if image.shape[0] == 512 and image.shape[1] == 512: + return self._pad_forward(image, mask, config) + + boxes = boxes_from_mask(mask) + crop_result = [] + config.hd_strategy_crop_margin = 128 + for box in boxes: + crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config) + origin_size = crop_image.shape[:2] + resize_image = resize_max_size(crop_image, size_limit=512) + resize_mask = resize_max_size(crop_mask, size_limit=512) + inpaint_result = self._pad_forward(resize_image, resize_mask, config) + + # only paste masked area result + inpaint_result = cv2.resize( + inpaint_result, + (origin_size[1], origin_size[0]), + interpolation=cv2.INTER_CUBIC, + ) + + original_pixel_indices = crop_mask < 127 + inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][ + original_pixel_indices + ] + + crop_result.append((inpaint_result, crop_box)) + + inpaint_result = image[:, :, ::-1].copy() + for crop_image, crop_box in crop_result: + x1, y1, x2, y2 = crop_box + inpaint_result[y1:y2, x1:x2, :] = crop_image + + return inpaint_result + + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W] mask area == 255 + return: BGR IMAGE + """ + + image = norm_img(image) # [0, 1] + image = image * 2 - 1 # [0, 1] -> [-1, 1] + mask = (mask > 120) * 255 + mask = norm_img(mask) + + image = torch.from_numpy(image).unsqueeze(0).to(self.device) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) + + erased_img = image * (1 - mask) + input_image = torch.cat([0.5 - mask, erased_img], dim=1) + + output = self.model( + input_image, self.label, truncation_psi=0.1, noise_mode="none" + ) + output = ( + (output.permute(0, 2, 3, 1) * 127.5 + 127.5) + .round() + .clamp(0, 255) + .to(torch.uint8) + ) + output = output[0].cpu().numpy() + cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return cur_res diff --git a/inpaint/model/helper/__init__.py b/inpaint/model/helper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/helper/controlnet_preprocess.py b/inpaint/model/helper/controlnet_preprocess.py new file mode 100644 index 0000000..75c409f --- /dev/null +++ b/inpaint/model/helper/controlnet_preprocess.py @@ -0,0 +1,68 @@ +import torch +import PIL +import cv2 +from PIL import Image +import numpy as np + +from iopaint.helper import pad_img_to_modulo + + +def make_canny_control_image(image: np.ndarray) -> Image: + canny_image = cv2.Canny(image, 100, 200) + canny_image = canny_image[:, :, None] + canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2) + canny_image = PIL.Image.fromarray(canny_image) + control_image = canny_image + return control_image + + +def make_openpose_control_image(image: np.ndarray) -> Image: + from controlnet_aux import OpenposeDetector + + processor = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") + control_image = processor(image, hand_and_face=True) + return control_image + + +def resize_image(input_image, resolution): + H, W, C = input_image.shape + H = float(H) + W = float(W) + k = float(resolution) / min(H, W) + H *= k + W *= k + H = int(np.round(H / 64.0)) * 64 + W = int(np.round(W / 64.0)) * 64 + img = cv2.resize( + input_image, + (W, H), + interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA, + ) + return img + + +def make_depth_control_image(image: np.ndarray) -> Image: + from controlnet_aux import MidasDetector + + midas = MidasDetector.from_pretrained("lllyasviel/Annotators") + + origin_height, origin_width = image.shape[:2] + pad_image = pad_img_to_modulo(image, mod=64, square=False, min_size=512) + depth_image = midas(pad_image) + depth_image = depth_image[0:origin_height, 0:origin_width] + depth_image = depth_image[:, :, None] + depth_image = np.concatenate([depth_image, depth_image, depth_image], axis=2) + control_image = PIL.Image.fromarray(depth_image) + return control_image + + +def make_inpaint_control_image(image: np.ndarray, mask: np.ndarray) -> torch.Tensor: + """ + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + """ + image = image.astype(np.float32) / 255.0 + image[mask[:, :, -1] > 128] = -1.0 # set as masked pixel + image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return image diff --git a/inpaint/model/helper/cpu_text_encoder.py b/inpaint/model/helper/cpu_text_encoder.py new file mode 100644 index 0000000..116eb48 --- /dev/null +++ b/inpaint/model/helper/cpu_text_encoder.py @@ -0,0 +1,41 @@ +import torch +from transformers import PreTrainedModel + +from ..utils import torch_gc + + +class CPUTextEncoderWrapper(PreTrainedModel): + def __init__(self, text_encoder, torch_dtype): + super().__init__(text_encoder.config) + self.config = text_encoder.config + self._device = text_encoder.device + # cpu not support float16 + self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True) + self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True) + self.torch_dtype = torch_dtype + del text_encoder + torch_gc() + + def __call__(self, x, **kwargs): + input_device = x.device + original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs) + for k, v in original_output.items(): + if isinstance(v, tuple): + original_output[k] = [ + v[i].to(input_device).to(self.torch_dtype) for i in range(len(v)) + ] + else: + original_output[k] = v.to(input_device).to(self.torch_dtype) + return original_output + + @property + def dtype(self): + return self.torch_dtype + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + return self._device \ No newline at end of file diff --git a/inpaint/model/helper/g_diffuser_bot.py b/inpaint/model/helper/g_diffuser_bot.py new file mode 100644 index 0000000..79b19aa --- /dev/null +++ b/inpaint/model/helper/g_diffuser_bot.py @@ -0,0 +1,62 @@ +import cv2 +import numpy as np + + +def expand_image(cv2_img, top: int, right: int, bottom: int, left: int): + assert cv2_img.shape[2] == 3 + origin_h, origin_w = cv2_img.shape[:2] + + # TODO: which is better? + # new_img = np.ones((new_height, new_width, 3), np.uint8) * 255 + new_img = cv2.copyMakeBorder( + cv2_img, top, bottom, left, right, cv2.BORDER_REPLICATE + ) + + inner_padding_left = 0 if left > 0 else 0 + inner_padding_right = 0 if right > 0 else 0 + inner_padding_top = 0 if top > 0 else 0 + inner_padding_bottom = 0 if bottom > 0 else 0 + + mask_image = np.zeros( + ( + origin_h - inner_padding_top - inner_padding_bottom, + origin_w - inner_padding_left - inner_padding_right, + ), + np.uint8, + ) + mask_image = cv2.copyMakeBorder( + mask_image, + top + inner_padding_top, + bottom + inner_padding_bottom, + left + inner_padding_left, + right + inner_padding_right, + cv2.BORDER_CONSTANT, + value=255, + ) + # k = 2*int(min(origin_h, origin_w) // 6)+1 + # k = 7 + # mask_image = cv2.GaussianBlur(mask_image, (k, k), 0) + return new_img, mask_image + + +if __name__ == "__main__": + from pathlib import Path + + current_dir = Path(__file__).parent.absolute().resolve() + image_path = "/Users/cwq/code/github/IOPaint/iopaint/tests/bunny.jpeg" + init_image = cv2.imread(str(image_path)) + init_image, mask_image = expand_image( + init_image, + top=0, + right=0, + bottom=0, + left=100, + softness=20, + space=20, + ) + print(mask_image.dtype, mask_image.min(), mask_image.max()) + print(init_image.dtype, init_image.min(), init_image.max()) + mask_image = mask_image.astype(np.uint8) + init_image = init_image.astype(np.uint8) + cv2.imwrite("expanded_image.png", init_image) + cv2.imwrite("expanded_mask.png", mask_image) diff --git a/inpaint/model/instruct_pix2pix.py b/inpaint/model/instruct_pix2pix.py new file mode 100644 index 0000000..fc8cd26 --- /dev/null +++ b/inpaint/model/instruct_pix2pix.py @@ -0,0 +1,64 @@ +import PIL.Image +import cv2 +import torch +from loguru import logger + +from iopaint.const import INSTRUCT_PIX2PIX_NAME +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class InstructPix2Pix(DiffusionInpaintModel): + name = INSTRUCT_PIX2PIX_NAME + pad_mod = 8 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import StableDiffusionInstructPix2PixPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = {"local_files_only": is_local_files_only(**kwargs)} + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained( + self.name, variant="fp16", torch_dtype=torch_dtype, **model_kwargs + ) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + edit = pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7).images[0] + """ + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + image_guidance_scale=config.p2p_image_guidance_scale, + guidance_scale=config.sd_guidance_scale, + output_type="np", + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/kandinsky.py b/inpaint/model/kandinsky.py new file mode 100644 index 0000000..1a0bf1c --- /dev/null +++ b/inpaint/model/kandinsky.py @@ -0,0 +1,65 @@ +import PIL.Image +import cv2 +import numpy as np +import torch + +from iopaint.const import KANDINSKY22_NAME +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class Kandinsky(DiffusionInpaintModel): + pad_mod = 64 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import AutoPipelineForInpainting + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = { + "torch_dtype": torch_dtype, + "local_files_only": is_local_files_only(**kwargs), + } + self.model = AutoPipelineForInpainting.from_pretrained( + self.name, **model_kwargs + ).to(device) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + generator = torch.manual_seed(config.sd_seed) + mask = mask.astype(np.float32) / 255 + img_h, img_w = image.shape[:2] + + # kandinsky 没有 strength + output = self.model( + prompt=config.prompt, + negative_prompt=config.negative_prompt, + image=PIL.Image.fromarray(image), + mask_image=mask[:, :, 0], + height=img_h, + width=img_w, + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + generator=generator, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +class Kandinsky22(Kandinsky): + name = KANDINSKY22_NAME diff --git a/inpaint/model/lama.py b/inpaint/model/lama.py new file mode 100644 index 0000000..7aba242 --- /dev/null +++ b/inpaint/model/lama.py @@ -0,0 +1,57 @@ +import os + +import cv2 +import numpy as np +import torch + +from iopaint.helper import ( + norm_img, + get_cache_path_by_url, + load_jit_model, + download_model, +) +from iopaint.schema import InpaintRequest +from .base import InpaintModel + +LAMA_MODEL_URL = os.environ.get( + "LAMA_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt", +) +LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e500") + + +class LaMa(InpaintModel): + name = "lama" + pad_mod = 8 + is_erase_model = True + + @staticmethod + def download(): + download_model(LAMA_MODEL_URL, LAMA_MODEL_MD5) + + def init_model(self, device, **kwargs): + self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval() + + @staticmethod + def is_downloaded() -> bool: + return os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL)) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W] + return: BGR IMAGE + """ + image = norm_img(image) + mask = norm_img(mask) + + mask = (mask > 0) * 1 + image = torch.from_numpy(image).unsqueeze(0).to(self.device) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) + + inpainted_image = self.model(image, mask) + + cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy() + cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8") + cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) + return cur_res diff --git a/inpaint/model/ldm.py b/inpaint/model/ldm.py new file mode 100644 index 0000000..19e51a3 --- /dev/null +++ b/inpaint/model/ldm.py @@ -0,0 +1,336 @@ +import os + +import numpy as np +import torch +from loguru import logger + +from .base import InpaintModel +from .ddim_sampler import DDIMSampler +from .plms_sampler import PLMSSampler +from iopaint.schema import InpaintRequest, LDMSampler + +torch.manual_seed(42) +import torch.nn as nn +from iopaint.helper import ( + download_model, + norm_img, + get_cache_path_by_url, + load_jit_model, +) +from .utils import ( + make_beta_schedule, + timestep_embedding, +) + +LDM_ENCODE_MODEL_URL = os.environ.get( + "LDM_ENCODE_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_encode.pt", +) +LDM_ENCODE_MODEL_MD5 = os.environ.get( + "LDM_ENCODE_MODEL_MD5", "23239fc9081956a3e70de56472b3f296" +) + +LDM_DECODE_MODEL_URL = os.environ.get( + "LDM_DECODE_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_decode.pt", +) +LDM_DECODE_MODEL_MD5 = os.environ.get( + "LDM_DECODE_MODEL_MD5", "fe419cd15a750d37a4733589d0d3585c" +) + +LDM_DIFFUSION_MODEL_URL = os.environ.get( + "LDM_DIFFUSION_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_ldm/diffusion.pt", +) + +LDM_DIFFUSION_MODEL_MD5 = os.environ.get( + "LDM_DIFFUSION_MODEL_MD5", "b0afda12bf790c03aba2a7431f11d22d" +) + + +class DDPM(nn.Module): + # classic DDPM with Gaussian diffusion, in image space + def __init__( + self, + device, + timesteps=1000, + beta_schedule="linear", + linear_start=0.0015, + linear_end=0.0205, + cosine_s=0.008, + original_elbo_weight=0.0, + v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1.0, + parameterization="eps", # all assuming fixed variance schedules + use_positional_encodings=False, + ): + super().__init__() + self.device = device + self.parameterization = parameterization + self.use_positional_encodings = use_positional_encodings + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + self.register_schedule( + beta_schedule=beta_schedule, + timesteps=timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + betas = make_beta_schedule( + self.device, + beta_schedule, + timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + + (timesteps,) = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert ( + alphas_cumprod.shape[0] == self.num_timesteps + ), "alphas have to be defined for each timestep" + + to_torch = lambda x: torch.tensor(x, dtype=torch.float32).to(self.device) + + self.register_buffer("betas", to_torch(betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) + ) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * ( + 1.0 - alphas_cumprod_prev + ) / (1.0 - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer("posterior_variance", to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer( + "posterior_log_variance_clipped", + to_torch(np.log(np.maximum(posterior_variance, 1e-20))), + ) + self.register_buffer( + "posterior_mean_coef1", + to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), + ) + self.register_buffer( + "posterior_mean_coef2", + to_torch( + (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) + ), + ) + + if self.parameterization == "eps": + lvlb_weights = self.betas**2 / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + elif self.parameterization == "x0": + lvlb_weights = ( + 0.5 + * np.sqrt(torch.Tensor(alphas_cumprod)) + / (2.0 * 1 - torch.Tensor(alphas_cumprod)) + ) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + +class LatentDiffusion(DDPM): + def __init__( + self, + diffusion_model, + device, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + scale_factor=1.0, + scale_by_std=False, + *args, + **kwargs, + ): + self.num_timesteps_cond = 1 + self.scale_by_std = scale_by_std + super().__init__(device, *args, **kwargs) + self.diffusion_model = diffusion_model + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + self.num_downs = 2 + self.scale_factor = scale_factor + + def make_cond_schedule( + self, + ): + self.cond_ids = torch.full( + size=(self.num_timesteps,), + fill_value=self.num_timesteps - 1, + dtype=torch.long, + ) + ids = torch.round( + torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) + ).long() + self.cond_ids[: self.num_timesteps_cond] = ids + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + super().register_schedule( + given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s + ) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def apply_model(self, x_noisy, t, cond): + # x_recon = self.model(x_noisy, t, cond['c_concat'][0]) # cond['c_concat'][0].shape 1,4,128,128 + t_emb = timestep_embedding(x_noisy.device, t, 256, repeat_only=False) + x_recon = self.diffusion_model(x_noisy, t_emb, cond) + return x_recon + + +class LDM(InpaintModel): + name = "ldm" + pad_mod = 32 + is_erase_model = True + + def __init__(self, device, fp16: bool = True, **kwargs): + self.fp16 = fp16 + super().__init__(device) + self.device = device + + def init_model(self, device, **kwargs): + self.diffusion_model = load_jit_model( + LDM_DIFFUSION_MODEL_URL, device, LDM_DIFFUSION_MODEL_MD5 + ) + self.cond_stage_model_decode = load_jit_model( + LDM_DECODE_MODEL_URL, device, LDM_DECODE_MODEL_MD5 + ) + self.cond_stage_model_encode = load_jit_model( + LDM_ENCODE_MODEL_URL, device, LDM_ENCODE_MODEL_MD5 + ) + if self.fp16 and "cuda" in str(device): + self.diffusion_model = self.diffusion_model.half() + self.cond_stage_model_decode = self.cond_stage_model_decode.half() + self.cond_stage_model_encode = self.cond_stage_model_encode.half() + + self.model = LatentDiffusion(self.diffusion_model, device) + + @staticmethod + def download(): + download_model(LDM_DIFFUSION_MODEL_URL, LDM_DIFFUSION_MODEL_MD5) + download_model(LDM_DECODE_MODEL_URL, LDM_DECODE_MODEL_MD5) + download_model(LDM_ENCODE_MODEL_URL, LDM_ENCODE_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + model_paths = [ + get_cache_path_by_url(LDM_DIFFUSION_MODEL_URL), + get_cache_path_by_url(LDM_DECODE_MODEL_URL), + get_cache_path_by_url(LDM_ENCODE_MODEL_URL), + ] + return all([os.path.exists(it) for it in model_paths]) + + @torch.cuda.amp.autocast() + def forward(self, image, mask, config: InpaintRequest): + """ + image: [H, W, C] RGB + mask: [H, W, 1] + return: BGR IMAGE + """ + # image [1,3,512,512] float32 + # mask: [1,1,512,512] float32 + # masked_image: [1,3,512,512] float32 + if config.ldm_sampler == LDMSampler.ddim: + sampler = DDIMSampler(self.model) + elif config.ldm_sampler == LDMSampler.plms: + sampler = PLMSSampler(self.model) + else: + raise ValueError() + + steps = config.ldm_steps + image = norm_img(image) + mask = norm_img(mask) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + image = torch.from_numpy(image).unsqueeze(0).to(self.device) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) + masked_image = (1 - mask) * image + + mask = self._norm(mask) + masked_image = self._norm(masked_image) + + c = self.cond_stage_model_encode(masked_image) + torch.cuda.empty_cache() + + cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # 1,1,128,128 + c = torch.cat((c, cc), dim=1) # 1,4,128,128 + + shape = (c.shape[1] - 1,) + c.shape[2:] + samples_ddim = sampler.sample( + steps=steps, conditioning=c, batch_size=c.shape[0], shape=shape + ) + torch.cuda.empty_cache() + x_samples_ddim = self.cond_stage_model_decode( + samples_ddim + ) # samples_ddim: 1, 3, 128, 128 float32 + torch.cuda.empty_cache() + + # image = torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0) + # mask = torch.clamp((mask + 1.0) / 2.0, min=0.0, max=1.0) + inpainted_image = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) + + # inpainted = (1 - mask) * image + mask * predicted_image + inpainted_image = inpainted_image.cpu().numpy().transpose(0, 2, 3, 1)[0] * 255 + inpainted_image = inpainted_image.astype(np.uint8)[:, :, ::-1] + return inpainted_image + + def _norm(self, tensor): + return tensor * 2.0 - 1.0 diff --git a/inpaint/model/manga.py b/inpaint/model/manga.py new file mode 100644 index 0000000..1f58251 --- /dev/null +++ b/inpaint/model/manga.py @@ -0,0 +1,97 @@ +import os +import random + +import cv2 +import numpy as np +import torch +import time +from loguru import logger + +from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model +from .base import InpaintModel +from iopaint.schema import InpaintRequest + + +MANGA_INPAINTOR_MODEL_URL = os.environ.get( + "MANGA_INPAINTOR_MODEL_URL", + "https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit", +) +MANGA_INPAINTOR_MODEL_MD5 = os.environ.get( + "MANGA_INPAINTOR_MODEL_MD5", "7d8b269c4613b6b3768af714610da86c" +) + +MANGA_LINE_MODEL_URL = os.environ.get( + "MANGA_LINE_MODEL_URL", + "https://github.com/Sanster/models/releases/download/manga/erika.jit", +) +MANGA_LINE_MODEL_MD5 = os.environ.get( + "MANGA_LINE_MODEL_MD5", "0c926d5a4af8450b0d00bc5b9a095644" +) + + +class Manga(InpaintModel): + name = "manga" + pad_mod = 16 + is_erase_model = True + + def init_model(self, device, **kwargs): + self.inpaintor_model = load_jit_model( + MANGA_INPAINTOR_MODEL_URL, device, MANGA_INPAINTOR_MODEL_MD5 + ) + self.line_model = load_jit_model( + MANGA_LINE_MODEL_URL, device, MANGA_LINE_MODEL_MD5 + ) + self.seed = 42 + + @staticmethod + def download(): + download_model(MANGA_INPAINTOR_MODEL_URL, MANGA_INPAINTOR_MODEL_MD5) + download_model(MANGA_LINE_MODEL_URL, MANGA_LINE_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + model_paths = [ + get_cache_path_by_url(MANGA_INPAINTOR_MODEL_URL), + get_cache_path_by_url(MANGA_LINE_MODEL_URL), + ] + return all([os.path.exists(it) for it in model_paths]) + + def forward(self, image, mask, config: InpaintRequest): + """ + image: [H, W, C] RGB + mask: [H, W, 1] + return: BGR IMAGE + """ + seed = self.seed + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + gray_img = torch.from_numpy( + gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32) + ).to(self.device) + start = time.time() + lines = self.line_model(gray_img) + torch.cuda.empty_cache() + lines = torch.clamp(lines, 0, 255) + logger.info(f"erika_model time: {time.time() - start}") + + mask = torch.from_numpy(mask[np.newaxis, :, :, :]).to(self.device) + mask = mask.permute(0, 3, 1, 2) + mask = torch.where(mask > 0.5, 1.0, 0.0) + noise = torch.randn_like(mask) + ones = torch.ones_like(mask) + + gray_img = gray_img / 255 * 2 - 1.0 + lines = lines / 255 * 2 - 1.0 + + start = time.time() + inpainted_image = self.inpaintor_model(gray_img, lines, mask, noise, ones) + logger.info(f"image_inpaintor_model time: {time.time() - start}") + + cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy() + cur_res = (cur_res * 127.5 + 127.5).astype(np.uint8) + cur_res = cv2.cvtColor(cur_res, cv2.COLOR_GRAY2BGR) + return cur_res diff --git a/inpaint/model/mat.py b/inpaint/model/mat.py new file mode 100644 index 0000000..0c5360f --- /dev/null +++ b/inpaint/model/mat.py @@ -0,0 +1,1945 @@ +import os +import random + +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from iopaint.helper import ( + load_model, + get_cache_path_by_url, + norm_img, + download_model, +) +from iopaint.schema import InpaintRequest +from .base import InpaintModel +from .utils import ( + setup_filter, + Conv2dLayer, + FullyConnectedLayer, + conv2d_resample, + bias_act, + upsample2d, + activation_funcs, + MinibatchStdLayer, + to_2tuple, + normalize_2nd_moment, + set_seed, +) + + +class ModulatedConv2d(nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + kernel_size, # Width and height of the convolution kernel. + style_dim, # dimension of the style code + demodulate=True, # perfrom demodulation + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output to +-X, None = disable clamping. + ): + super().__init__() + self.demodulate = demodulate + + self.weight = torch.nn.Parameter( + torch.randn([1, out_channels, in_channels, kernel_size, kernel_size]) + ) + self.out_channels = out_channels + self.kernel_size = kernel_size + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2)) + self.padding = self.kernel_size // 2 + self.up = up + self.down = down + self.register_buffer("resample_filter", setup_filter(resample_filter)) + self.conv_clamp = conv_clamp + + self.affine = FullyConnectedLayer(style_dim, in_channels, bias_init=1) + + def forward(self, x, style): + batch, in_channels, height, width = x.shape + style = self.affine(style).view(batch, 1, in_channels, 1, 1) + weight = self.weight * self.weight_gain * style + + if self.demodulate: + decoefs = (weight.pow(2).sum(dim=[2, 3, 4]) + 1e-8).rsqrt() + weight = weight * decoefs.view(batch, self.out_channels, 1, 1, 1) + + weight = weight.view( + batch * self.out_channels, in_channels, self.kernel_size, self.kernel_size + ) + x = x.view(1, batch * in_channels, height, width) + x = conv2d_resample( + x=x, + w=weight, + f=self.resample_filter, + up=self.up, + down=self.down, + padding=self.padding, + groups=batch, + ) + out = x.view(batch, self.out_channels, *x.shape[2:]) + + return out + + +class StyleConv(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + style_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this layer. + kernel_size=3, # Convolution kernel size. + up=1, # Integer upsampling factor. + use_noise=False, # Enable noise input? + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + demodulate=True, # perform demodulation + ): + super().__init__() + + self.conv = ModulatedConv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + style_dim=style_dim, + demodulate=demodulate, + up=up, + resample_filter=resample_filter, + conv_clamp=conv_clamp, + ) + + self.use_noise = use_noise + self.resolution = resolution + if use_noise: + self.register_buffer("noise_const", torch.randn([resolution, resolution])) + self.noise_strength = torch.nn.Parameter(torch.zeros([])) + + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + self.activation = activation + self.act_gain = activation_funcs[activation].def_gain + self.conv_clamp = conv_clamp + + def forward(self, x, style, noise_mode="random", gain=1): + x = self.conv(x, style) + + assert noise_mode in ["random", "const", "none"] + + if self.use_noise: + if noise_mode == "random": + xh, xw = x.size()[-2:] + noise = ( + torch.randn([x.shape[0], 1, xh, xw], device=x.device) + * self.noise_strength + ) + if noise_mode == "const": + noise = self.noise_const * self.noise_strength + x = x + noise + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + out = bias_act( + x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp + ) + + return out + + +class ToRGB(torch.nn.Module): + def __init__( + self, + in_channels, + out_channels, + style_dim, + kernel_size=1, + resample_filter=[1, 3, 3, 1], + conv_clamp=None, + demodulate=False, + ): + super().__init__() + + self.conv = ModulatedConv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + style_dim=style_dim, + demodulate=demodulate, + resample_filter=resample_filter, + conv_clamp=conv_clamp, + ) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + self.register_buffer("resample_filter", setup_filter(resample_filter)) + self.conv_clamp = conv_clamp + + def forward(self, x, style, skip=None): + x = self.conv(x, style) + out = bias_act(x, self.bias, clamp=self.conv_clamp) + + if skip is not None: + if skip.shape != out.shape: + skip = upsample2d(skip, self.resample_filter) + out = out + skip + + return out + + +def get_style_code(a, b): + return torch.cat([a, b], dim=1) + + +class DecBlockFirst(nn.Module): + def __init__( + self, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): + super().__init__() + self.fc = FullyConnectedLayer( + in_features=in_channels * 2, + out_features=in_channels * 4**2, + activation=activation, + ) + self.conv = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=4, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, ws, gs, E_features, noise_mode="random"): + x = self.fc(x).view(x.shape[0], -1, 4, 4) + x = x + E_features[2] + style = get_style_code(ws[:, 0], gs) + x = self.conv(x, style, noise_mode=noise_mode) + style = get_style_code(ws[:, 1], gs) + img = self.toRGB(x, style, skip=None) + + return x, img + + +class DecBlockFirstV2(nn.Module): + def __init__( + self, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): + super().__init__() + self.conv0 = Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + ) + self.conv1 = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=4, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, ws, gs, E_features, noise_mode="random"): + # x = self.fc(x).view(x.shape[0], -1, 4, 4) + x = self.conv0(x) + x = x + E_features[2] + style = get_style_code(ws[:, 0], gs) + x = self.conv1(x, style, noise_mode=noise_mode) + style = get_style_code(ws[:, 1], gs) + img = self.toRGB(x, style, skip=None) + + return x, img + + +class DecBlock(nn.Module): + def __init__( + self, + res, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): # res = 2, ..., resolution_log2 + super().__init__() + self.res = res + + self.conv0 = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + up=2, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.conv1 = StyleConv( + in_channels=out_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, img, ws, gs, E_features, noise_mode="random"): + style = get_style_code(ws[:, self.res * 2 - 5], gs) + x = self.conv0(x, style, noise_mode=noise_mode) + x = x + E_features[self.res] + style = get_style_code(ws[:, self.res * 2 - 4], gs) + x = self.conv1(x, style, noise_mode=noise_mode) + style = get_style_code(ws[:, self.res * 2 - 3], gs) + img = self.toRGB(x, style, skip=img) + + return x, img + + +class MappingNet(torch.nn.Module): + def __init__( + self, + z_dim, # Input latent (Z) dimensionality, 0 = no latent. + c_dim, # Conditioning label (C) dimensionality, 0 = no label. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output, None = do not broadcast. + num_layers=8, # Number of mapping layers. + embed_features=None, # Label embedding dimensionality, None = same as w_dim. + layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track. + torch_dtype=torch.float32, + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + self.torch_dtype = torch_dtype + + if embed_features is None: + embed_features = w_dim + if c_dim == 0: + embed_features = 0 + if layer_features is None: + layer_features = w_dim + features_list = ( + [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] + ) + + if c_dim > 0: + self.embed = FullyConnectedLayer(c_dim, embed_features) + for idx in range(num_layers): + in_features = features_list[idx] + out_features = features_list[idx + 1] + layer = FullyConnectedLayer( + in_features, + out_features, + activation=activation, + lr_multiplier=lr_multiplier, + ) + setattr(self, f"fc{idx}", layer) + + if num_ws is not None and w_avg_beta is not None: + self.register_buffer("w_avg", torch.zeros([w_dim])) + + def forward( + self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False + ): + # Embed, normalize, and concat inputs. + x = None + if self.z_dim > 0: + x = normalize_2nd_moment(z) + if self.c_dim > 0: + y = normalize_2nd_moment(self.embed(c)) + x = torch.cat([x, y], dim=1) if x is not None else y + + # Main layers. + for idx in range(self.num_layers): + layer = getattr(self, f"fc{idx}") + x = layer(x) + + # Update moving average of W. + if self.w_avg_beta is not None and self.training and not skip_w_avg_update: + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast. + if self.num_ws is not None: + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + + # Apply truncation. + if truncation_psi != 1: + assert self.w_avg_beta is not None + if self.num_ws is None or truncation_cutoff is None: + x = self.w_avg.lerp(x, truncation_psi) + else: + x[:, :truncation_cutoff] = self.w_avg.lerp( + x[:, :truncation_cutoff], truncation_psi + ) + + return x + + +class DisFromRGB(nn.Module): + def __init__( + self, in_channels, out_channels, activation + ): # res = 2, ..., resolution_log2 + super().__init__() + self.conv = Conv2dLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + activation=activation, + ) + + def forward(self, x): + return self.conv(x) + + +class DisBlock(nn.Module): + def __init__( + self, in_channels, out_channels, activation + ): # res = 2, ..., resolution_log2 + super().__init__() + self.conv0 = Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + ) + self.conv1 = Conv2dLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + down=2, + activation=activation, + ) + self.skip = Conv2dLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + down=2, + bias=False, + ) + + def forward(self, x): + skip = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x) + x = self.conv1(x, gain=np.sqrt(0.5)) + out = skip + x + + return out + + +class Discriminator(torch.nn.Module): + def __init__( + self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + channel_decay=1, + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + activation="lrelu", + mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch. + mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + + resolution_log2 = int(np.log2(img_resolution)) + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 + self.resolution_log2 = resolution_log2 + + def nf(stage): + return np.clip( + int(channel_base / 2 ** (stage * channel_decay)), 1, channel_max + ) + + if cmap_dim == None: + cmap_dim = nf(2) + if c_dim == 0: + cmap_dim = 0 + self.cmap_dim = cmap_dim + + if c_dim > 0: + self.mapping = MappingNet( + z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None + ) + + Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)] + for res in range(resolution_log2, 2, -1): + Dis.append(DisBlock(nf(res), nf(res - 1), activation)) + + if mbstd_num_channels > 0: + Dis.append( + MinibatchStdLayer( + group_size=mbstd_group_size, num_channels=mbstd_num_channels + ) + ) + Dis.append( + Conv2dLayer( + nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation + ) + ) + self.Dis = nn.Sequential(*Dis) + + self.fc0 = FullyConnectedLayer(nf(2) * 4**2, nf(2), activation=activation) + self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim) + + def forward(self, images_in, masks_in, c): + x = torch.cat([masks_in - 0.5, images_in], dim=1) + x = self.Dis(x) + x = self.fc1(self.fc0(x.flatten(start_dim=1))) + + if self.c_dim > 0: + cmap = self.mapping(None, c) + + if self.cmap_dim > 0: + x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) + + return x + + +def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512): + NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512} + return NF[2**stage] + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = FullyConnectedLayer( + in_features=in_features, out_features=hidden_features, activation="lrelu" + ) + self.fc2 = FullyConnectedLayer( + in_features=hidden_features, out_features=out_features + ) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = ( + x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + ) + return windows + + +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + # B = windows.shape[0] / (H * W / window_size / window_size) + x = windows.view( + B, H // window_size, W // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class Conv2dLayerPartial(nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + kernel_size, # Width and height of the convolution kernel. + bias=True, # Apply additive bias before the activation function? + activation="linear", # Activation function: 'relu', 'lrelu', etc. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output to +-X, None = disable clamping. + trainable=True, # Update the weights of this layer during training? + ): + super().__init__() + self.conv = Conv2dLayer( + in_channels, + out_channels, + kernel_size, + bias, + activation, + up, + down, + resample_filter, + conv_clamp, + trainable, + ) + + self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size) + self.slide_winsize = kernel_size**2 + self.stride = down + self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0 + + def forward(self, x, mask=None): + if mask is not None: + with torch.no_grad(): + if self.weight_maskUpdater.type() != x.type(): + self.weight_maskUpdater = self.weight_maskUpdater.to(x) + update_mask = F.conv2d( + mask, + self.weight_maskUpdater, + bias=None, + stride=self.stride, + padding=self.padding, + ) + mask_ratio = self.slide_winsize / (update_mask.to(torch.float32) + 1e-8) + update_mask = torch.clamp(update_mask, 0, 1) # 0 or 1 + mask_ratio = torch.mul(mask_ratio, update_mask).to(x.dtype) + x = self.conv(x) + x = torch.mul(x, mask_ratio) + return x, update_mask + else: + x = self.conv(x) + return x, None + + +class WindowAttention(nn.Module): + r"""Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + down_ratio=1, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q = FullyConnectedLayer(in_features=dim, out_features=dim) + self.k = FullyConnectedLayer(in_features=dim, out_features=dim) + self.v = FullyConnectedLayer(in_features=dim, out_features=dim) + self.proj = FullyConnectedLayer(in_features=dim, out_features=dim) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask_windows=None, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + norm_x = F.normalize(x, p=2.0, dim=-1, eps=torch.finfo(x.dtype).eps) + q = ( + self.q(norm_x) + .reshape(B_, N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + k = ( + self.k(norm_x) + .view(B_, -1, self.num_heads, C // self.num_heads) + .permute(0, 2, 3, 1) + ) + v = ( + self.v(x) + .view(B_, -1, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + + attn = (q @ k) * self.scale + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze( + 1 + ).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + + if mask_windows is not None: + attn_mask_windows = mask_windows.squeeze(-1).unsqueeze(1).unsqueeze(1) + attn = attn + attn_mask_windows.masked_fill( + attn_mask_windows == 0, float(-100.0) + ).masked_fill(attn_mask_windows == 1, float(0.0)) + with torch.no_grad(): + mask_windows = torch.clamp( + torch.sum(mask_windows, dim=1, keepdim=True), 0, 1 + ).repeat(1, N, 1) + + attn = self.softmax(attn) + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + return x, mask_windows + + +class SwinTransformerBlock(nn.Module): + r"""Swin Transformer Block. + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + input_resolution, + num_heads, + down_ratio=1, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert ( + 0 <= self.shift_size < self.window_size + ), "shift_size must in 0-window_size" + + if self.shift_size > 0: + down_ratio = 1 + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + down_ratio=down_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.fuse = FullyConnectedLayer( + in_features=dim * 2, out_features=dim, activation="lrelu" + ) + + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + if self.shift_size > 0: + attn_mask = self.calculate_mask(self.input_resolution) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def calculate_mask(self, x_size): + # calculate attention mask for SW-MSA + H, W = x_size + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + + return attn_mask + + def forward(self, x, x_size, mask=None): + # H, W = self.input_resolution + H, W = x_size + B, L, C = x.shape + # assert L == H * W, "input feature has wrong size" + + shortcut = x + x = x.view(B, H, W, C) + if mask is not None: + mask = mask.view(B, H, W, 1) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll( + x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2) + ) + if mask is not None: + shifted_mask = torch.roll( + mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2) + ) + else: + shifted_x = x + if mask is not None: + shifted_mask = mask + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + if mask is not None: + mask_windows = window_partition(shifted_mask, self.window_size) + mask_windows = mask_windows.view(-1, self.window_size * self.window_size, 1) + else: + mask_windows = None + + # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size + if self.input_resolution == x_size: + attn_windows, mask_windows = self.attn( + x_windows, mask_windows, mask=self.attn_mask + ) # nW*B, window_size*window_size, C + else: + attn_windows, mask_windows = self.attn( + x_windows, + mask_windows, + mask=self.calculate_mask(x_size).to(x.dtype).to(x.device), + ) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + if mask is not None: + mask_windows = mask_windows.view(-1, self.window_size, self.window_size, 1) + shifted_mask = window_reverse(mask_windows, self.window_size, H, W) + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2) + ) + if mask is not None: + mask = torch.roll( + shifted_mask, shifts=(self.shift_size, self.shift_size), dims=(1, 2) + ) + else: + x = shifted_x + if mask is not None: + mask = shifted_mask + x = x.view(B, H * W, C) + if mask is not None: + mask = mask.view(B, H * W, 1) + + # FFN + x = self.fuse(torch.cat([shortcut, x], dim=-1)) + x = self.mlp(x) + + return x, mask + + +class PatchMerging(nn.Module): + def __init__(self, in_channels, out_channels, down=2): + super().__init__() + self.conv = Conv2dLayerPartial( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + activation="lrelu", + down=down, + ) + self.down = down + + def forward(self, x, x_size, mask=None): + x = token2feature(x, x_size) + if mask is not None: + mask = token2feature(mask, x_size) + x, mask = self.conv(x, mask) + if self.down != 1: + ratio = 1 / self.down + x_size = (int(x_size[0] * ratio), int(x_size[1] * ratio)) + x = feature2token(x) + if mask is not None: + mask = feature2token(mask) + return x, x_size, mask + + +class PatchUpsampling(nn.Module): + def __init__(self, in_channels, out_channels, up=2): + super().__init__() + self.conv = Conv2dLayerPartial( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + activation="lrelu", + up=up, + ) + self.up = up + + def forward(self, x, x_size, mask=None): + x = token2feature(x, x_size) + if mask is not None: + mask = token2feature(mask, x_size) + x, mask = self.conv(x, mask) + if self.up != 1: + x_size = (int(x_size[0] * self.up), int(x_size[1] * self.up)) + x = feature2token(x) + if mask is not None: + mask = feature2token(mask) + return x, x_size, mask + + +class BasicLayer(nn.Module): + """A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + dim, + input_resolution, + depth, + num_heads, + window_size, + down_ratio=1, + mlp_ratio=2.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # patch merging layer + if downsample is not None: + # self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + self.downsample = downsample + else: + self.downsample = None + + # build blocks + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + down_ratio=down_ratio, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) + else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + + self.conv = Conv2dLayerPartial( + in_channels=dim, out_channels=dim, kernel_size=3, activation="lrelu" + ) + + def forward(self, x, x_size, mask=None): + if self.downsample is not None: + x, x_size, mask = self.downsample(x, x_size, mask) + identity = x + for blk in self.blocks: + if self.use_checkpoint: + x, mask = checkpoint.checkpoint(blk, x, x_size, mask) + else: + x, mask = blk(x, x_size, mask) + if mask is not None: + mask = token2feature(mask, x_size) + x, mask = self.conv(token2feature(x, x_size), mask) + x = feature2token(x) + identity + if mask is not None: + mask = feature2token(mask) + return x, x_size, mask + + +class ToToken(nn.Module): + def __init__(self, in_channels=3, dim=128, kernel_size=5, stride=1): + super().__init__() + + self.proj = Conv2dLayerPartial( + in_channels=in_channels, + out_channels=dim, + kernel_size=kernel_size, + activation="lrelu", + ) + + def forward(self, x, mask): + x, mask = self.proj(x, mask) + + return x, mask + + +class EncFromRGB(nn.Module): + def __init__( + self, in_channels, out_channels, activation + ): # res = 2, ..., resolution_log2 + super().__init__() + self.conv0 = Conv2dLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + activation=activation, + ) + self.conv1 = Conv2dLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + activation=activation, + ) + + def forward(self, x): + x = self.conv0(x) + x = self.conv1(x) + + return x + + +class ConvBlockDown(nn.Module): + def __init__( + self, in_channels, out_channels, activation + ): # res = 2, ..., resolution_log + super().__init__() + + self.conv0 = Conv2dLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + activation=activation, + down=2, + ) + self.conv1 = Conv2dLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + activation=activation, + ) + + def forward(self, x): + x = self.conv0(x) + x = self.conv1(x) + + return x + + +def token2feature(x, x_size): + B, N, C = x.shape + h, w = x_size + x = x.permute(0, 2, 1).reshape(B, C, h, w) + return x + + +def feature2token(x): + B, C, H, W = x.shape + x = x.view(B, C, -1).transpose(1, 2) + return x + + +class Encoder(nn.Module): + def __init__( + self, + res_log2, + img_channels, + activation, + patch_size=5, + channels=16, + drop_path_rate=0.1, + ): + super().__init__() + + self.resolution = [] + + for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16 + res = 2**i + self.resolution.append(res) + if i == res_log2: + block = EncFromRGB(img_channels * 2 + 1, nf(i), activation) + else: + block = ConvBlockDown(nf(i + 1), nf(i), activation) + setattr(self, "EncConv_Block_%dx%d" % (res, res), block) + + def forward(self, x): + out = {} + for res in self.resolution: + res_log2 = int(np.log2(res)) + x = getattr(self, "EncConv_Block_%dx%d" % (res, res))(x) + out[res_log2] = x + + return out + + +class ToStyle(nn.Module): + def __init__(self, in_channels, out_channels, activation, drop_rate): + super().__init__() + self.conv = nn.Sequential( + Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + down=2, + ), + Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + down=2, + ), + Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + down=2, + ), + ) + + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = FullyConnectedLayer( + in_features=in_channels, out_features=out_channels, activation=activation + ) + # self.dropout = nn.Dropout(drop_rate) + + def forward(self, x): + x = self.conv(x) + x = self.pool(x) + x = self.fc(x.flatten(start_dim=1)) + # x = self.dropout(x) + + return x + + +class DecBlockFirstV2(nn.Module): + def __init__( + self, + res, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): + super().__init__() + self.res = res + + self.conv0 = Conv2dLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + activation=activation, + ) + self.conv1 = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, ws, gs, E_features, noise_mode="random"): + # x = self.fc(x).view(x.shape[0], -1, 4, 4) + x = self.conv0(x) + x = x + E_features[self.res] + style = get_style_code(ws[:, 0], gs) + x = self.conv1(x, style, noise_mode=noise_mode) + style = get_style_code(ws[:, 1], gs) + img = self.toRGB(x, style, skip=None) + + return x, img + + +class DecBlock(nn.Module): + def __init__( + self, + res, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): # res = 4, ..., resolution_log2 + super().__init__() + self.res = res + + self.conv0 = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + up=2, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.conv1 = StyleConv( + in_channels=out_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, img, ws, gs, E_features, noise_mode="random"): + style = get_style_code(ws[:, self.res * 2 - 9], gs) + x = self.conv0(x, style, noise_mode=noise_mode) + x = x + E_features[self.res] + style = get_style_code(ws[:, self.res * 2 - 8], gs) + x = self.conv1(x, style, noise_mode=noise_mode) + style = get_style_code(ws[:, self.res * 2 - 7], gs) + img = self.toRGB(x, style, skip=img) + + return x, img + + +class Decoder(nn.Module): + def __init__( + self, res_log2, activation, style_dim, use_noise, demodulate, img_channels + ): + super().__init__() + self.Dec_16x16 = DecBlockFirstV2( + 4, nf(4), nf(4), activation, style_dim, use_noise, demodulate, img_channels + ) + for res in range(5, res_log2 + 1): + setattr( + self, + "Dec_%dx%d" % (2**res, 2**res), + DecBlock( + res, + nf(res - 1), + nf(res), + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ), + ) + self.res_log2 = res_log2 + + def forward(self, x, ws, gs, E_features, noise_mode="random"): + x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode) + for res in range(5, self.res_log2 + 1): + block = getattr(self, "Dec_%dx%d" % (2**res, 2**res)) + x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode) + + return img + + +class DecStyleBlock(nn.Module): + def __init__( + self, + res, + in_channels, + out_channels, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ): + super().__init__() + self.res = res + + self.conv0 = StyleConv( + in_channels=in_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + up=2, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.conv1 = StyleConv( + in_channels=out_channels, + out_channels=out_channels, + style_dim=style_dim, + resolution=2**res, + kernel_size=3, + use_noise=use_noise, + activation=activation, + demodulate=demodulate, + ) + self.toRGB = ToRGB( + in_channels=out_channels, + out_channels=img_channels, + style_dim=style_dim, + kernel_size=1, + demodulate=False, + ) + + def forward(self, x, img, style, skip, noise_mode="random"): + x = self.conv0(x, style, noise_mode=noise_mode) + x = x + skip + x = self.conv1(x, style, noise_mode=noise_mode) + img = self.toRGB(x, style, skip=img) + + return x, img + + +class FirstStage(nn.Module): + def __init__( + self, + img_channels, + img_resolution=256, + dim=180, + w_dim=512, + use_noise=False, + demodulate=True, + activation="lrelu", + ): + super().__init__() + res = 64 + + self.conv_first = Conv2dLayerPartial( + in_channels=img_channels + 1, + out_channels=dim, + kernel_size=3, + activation=activation, + ) + self.enc_conv = nn.ModuleList() + down_time = int(np.log2(img_resolution // res)) + # 根据图片尺寸构建 swim transformer 的层数 + for i in range(down_time): # from input size to 64 + self.enc_conv.append( + Conv2dLayerPartial( + in_channels=dim, + out_channels=dim, + kernel_size=3, + down=2, + activation=activation, + ) + ) + + # from 64 -> 16 -> 64 + depths = [2, 3, 4, 3, 2] + ratios = [1, 1 / 2, 1 / 2, 2, 2] + num_heads = 6 + window_sizes = [8, 16, 16, 16, 8] + drop_path_rate = 0.1 + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + + self.tran = nn.ModuleList() + for i, depth in enumerate(depths): + res = int(res * ratios[i]) + if ratios[i] < 1: + merge = PatchMerging(dim, dim, down=int(1 / ratios[i])) + elif ratios[i] > 1: + merge = PatchUpsampling(dim, dim, up=ratios[i]) + else: + merge = None + self.tran.append( + BasicLayer( + dim=dim, + input_resolution=[res, res], + depth=depth, + num_heads=num_heads, + window_size=window_sizes[i], + drop_path=dpr[sum(depths[:i]) : sum(depths[: i + 1])], + downsample=merge, + ) + ) + + # global style + down_conv = [] + for i in range(int(np.log2(16))): + down_conv.append( + Conv2dLayer( + in_channels=dim, + out_channels=dim, + kernel_size=3, + down=2, + activation=activation, + ) + ) + down_conv.append(nn.AdaptiveAvgPool2d((1, 1))) + self.down_conv = nn.Sequential(*down_conv) + self.to_style = FullyConnectedLayer( + in_features=dim, out_features=dim * 2, activation=activation + ) + self.ws_style = FullyConnectedLayer( + in_features=w_dim, out_features=dim, activation=activation + ) + self.to_square = FullyConnectedLayer( + in_features=dim, out_features=16 * 16, activation=activation + ) + + style_dim = dim * 3 + self.dec_conv = nn.ModuleList() + for i in range(down_time): # from 64 to input size + res = res * 2 + self.dec_conv.append( + DecStyleBlock( + res, + dim, + dim, + activation, + style_dim, + use_noise, + demodulate, + img_channels, + ) + ) + + def forward(self, images_in, masks_in, ws, noise_mode="random"): + x = torch.cat([masks_in - 0.5, images_in * masks_in], dim=1) + + skips = [] + x, mask = self.conv_first(x, masks_in) # input size + skips.append(x) + for i, block in enumerate(self.enc_conv): # input size to 64 + x, mask = block(x, mask) + if i != len(self.enc_conv) - 1: + skips.append(x) + + x_size = x.size()[-2:] + x = feature2token(x) + mask = feature2token(mask) + mid = len(self.tran) // 2 + for i, block in enumerate(self.tran): # 64 to 16 + if i < mid: + x, x_size, mask = block(x, x_size, mask) + skips.append(x) + elif i > mid: + x, x_size, mask = block(x, x_size, None) + x = x + skips[mid - i] + else: + x, x_size, mask = block(x, x_size, None) + + mul_map = torch.ones_like(x) * 0.5 + mul_map = F.dropout(mul_map, training=True) + ws = self.ws_style(ws[:, -1]) + add_n = self.to_square(ws).unsqueeze(1) + add_n = ( + F.interpolate( + add_n, size=x.size(1), mode="linear", align_corners=False + ) + .squeeze(1) + .unsqueeze(-1) + ) + x = x * mul_map + add_n * (1 - mul_map) + gs = self.to_style( + self.down_conv(token2feature(x, x_size)).flatten(start_dim=1) + ) + style = torch.cat([gs, ws], dim=1) + + x = token2feature(x, x_size).contiguous() + img = None + for i, block in enumerate(self.dec_conv): + x, img = block( + x, img, style, skips[len(self.dec_conv) - i - 1], noise_mode=noise_mode + ) + + # ensemble + img = img * (1 - masks_in) + images_in * masks_in + + return img + + +class SynthesisNet(nn.Module): + def __init__( + self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels=3, # Number of color channels. + channel_base=32768, # Overall multiplier for the number of channels. + channel_decay=1.0, + channel_max=512, # Maximum number of channels in any layer. + activation="lrelu", # Activation function: 'relu', 'lrelu', etc. + drop_rate=0.5, + use_noise=False, + demodulate=True, + ): + super().__init__() + resolution_log2 = int(np.log2(img_resolution)) + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 + + self.num_layers = resolution_log2 * 2 - 3 * 2 + self.img_resolution = img_resolution + self.resolution_log2 = resolution_log2 + + # first stage + self.first_stage = FirstStage( + img_channels, + img_resolution=img_resolution, + w_dim=w_dim, + use_noise=False, + demodulate=demodulate, + ) + + # second stage + self.enc = Encoder( + resolution_log2, img_channels, activation, patch_size=5, channels=16 + ) + self.to_square = FullyConnectedLayer( + in_features=w_dim, out_features=16 * 16, activation=activation + ) + self.to_style = ToStyle( + in_channels=nf(4), + out_channels=nf(2) * 2, + activation=activation, + drop_rate=drop_rate, + ) + style_dim = w_dim + nf(2) * 2 + self.dec = Decoder( + resolution_log2, activation, style_dim, use_noise, demodulate, img_channels + ) + + def forward(self, images_in, masks_in, ws, noise_mode="random", return_stg1=False): + out_stg1 = self.first_stage(images_in, masks_in, ws, noise_mode=noise_mode) + + # encoder + x = images_in * masks_in + out_stg1 * (1 - masks_in) + x = torch.cat([masks_in - 0.5, x, images_in * masks_in], dim=1) + E_features = self.enc(x) + + fea_16 = E_features[4] + mul_map = torch.ones_like(fea_16) * 0.5 + mul_map = F.dropout(mul_map, training=True) + add_n = self.to_square(ws[:, 0]).view(-1, 16, 16).unsqueeze(1) + add_n = F.interpolate( + add_n, size=fea_16.size()[-2:], mode="bilinear", align_corners=False + ) + fea_16 = fea_16 * mul_map + add_n * (1 - mul_map) + E_features[4] = fea_16 + + # style + gs = self.to_style(fea_16) + + # decoder + img = self.dec(fea_16, ws, gs, E_features, noise_mode=noise_mode) + + # ensemble + img = img * (1 - masks_in) + images_in * masks_in + + if not return_stg1: + return img + else: + return img, out_stg1 + + +class Generator(nn.Module): + def __init__( + self, + z_dim, # Input latent (Z) dimensionality, 0 = no latent. + c_dim, # Conditioning label (C) dimensionality, 0 = no label. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # resolution of generated image + img_channels, # Number of input color channels. + synthesis_kwargs={}, # Arguments for SynthesisNetwork. + mapping_kwargs={}, # Arguments for MappingNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + + self.synthesis = SynthesisNet( + w_dim=w_dim, + img_resolution=img_resolution, + img_channels=img_channels, + **synthesis_kwargs, + ) + self.mapping = MappingNet( + z_dim=z_dim, + c_dim=c_dim, + w_dim=w_dim, + num_ws=self.synthesis.num_layers, + **mapping_kwargs, + ) + + def forward( + self, + images_in, + masks_in, + z, + c, + truncation_psi=1, + truncation_cutoff=None, + skip_w_avg_update=False, + noise_mode="none", + return_stg1=False, + ): + ws = self.mapping( + z, + c, + truncation_psi=truncation_psi, + truncation_cutoff=truncation_cutoff, + skip_w_avg_update=skip_w_avg_update, + ) + img = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode) + return img + + +class Discriminator(torch.nn.Module): + def __init__( + self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + channel_decay=1, + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + activation="lrelu", + mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch. + mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + + resolution_log2 = int(np.log2(img_resolution)) + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 + self.resolution_log2 = resolution_log2 + + if cmap_dim == None: + cmap_dim = nf(2) + if c_dim == 0: + cmap_dim = 0 + self.cmap_dim = cmap_dim + + if c_dim > 0: + self.mapping = MappingNet( + z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None + ) + + Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)] + for res in range(resolution_log2, 2, -1): + Dis.append(DisBlock(nf(res), nf(res - 1), activation)) + + if mbstd_num_channels > 0: + Dis.append( + MinibatchStdLayer( + group_size=mbstd_group_size, num_channels=mbstd_num_channels + ) + ) + Dis.append( + Conv2dLayer( + nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation + ) + ) + self.Dis = nn.Sequential(*Dis) + + self.fc0 = FullyConnectedLayer(nf(2) * 4**2, nf(2), activation=activation) + self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim) + + # for 64x64 + Dis_stg1 = [DisFromRGB(img_channels + 1, nf(resolution_log2) // 2, activation)] + for res in range(resolution_log2, 2, -1): + Dis_stg1.append(DisBlock(nf(res) // 2, nf(res - 1) // 2, activation)) + + if mbstd_num_channels > 0: + Dis_stg1.append( + MinibatchStdLayer( + group_size=mbstd_group_size, num_channels=mbstd_num_channels + ) + ) + Dis_stg1.append( + Conv2dLayer( + nf(2) // 2 + mbstd_num_channels, + nf(2) // 2, + kernel_size=3, + activation=activation, + ) + ) + self.Dis_stg1 = nn.Sequential(*Dis_stg1) + + self.fc0_stg1 = FullyConnectedLayer( + nf(2) // 2 * 4**2, nf(2) // 2, activation=activation + ) + self.fc1_stg1 = FullyConnectedLayer( + nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim + ) + + def forward(self, images_in, masks_in, images_stg1, c): + x = self.Dis(torch.cat([masks_in - 0.5, images_in], dim=1)) + x = self.fc1(self.fc0(x.flatten(start_dim=1))) + + x_stg1 = self.Dis_stg1(torch.cat([masks_in - 0.5, images_stg1], dim=1)) + x_stg1 = self.fc1_stg1(self.fc0_stg1(x_stg1.flatten(start_dim=1))) + + if self.c_dim > 0: + cmap = self.mapping(None, c) + + if self.cmap_dim > 0: + x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) + x_stg1 = (x_stg1 * cmap).sum(dim=1, keepdim=True) * ( + 1 / np.sqrt(self.cmap_dim) + ) + + return x, x_stg1 + + +MAT_MODEL_URL = os.environ.get( + "MAT_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_mat/Places_512_FullData_G.pth", +) + +MAT_MODEL_MD5 = os.environ.get("MAT_MODEL_MD5", "8ca927835fa3f5e21d65ffcb165377ed") + + +class MAT(InpaintModel): + name = "mat" + min_size = 512 + pad_mod = 512 + pad_to_square = True + is_erase_model = True + + def init_model(self, device, **kwargs): + seed = 240 # pick up a random number + set_seed(seed) + + fp16 = not kwargs.get("no_half", False) + use_gpu = "cuda" in str(device) and torch.cuda.is_available() + self.torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32 + + G = Generator( + z_dim=512, + c_dim=0, + w_dim=512, + img_resolution=512, + img_channels=3, + mapping_kwargs={"torch_dtype": self.torch_dtype}, + ).to(self.torch_dtype) + # fmt: off + self.model = load_model(G, MAT_MODEL_URL, device, MAT_MODEL_MD5) + self.z = torch.from_numpy(np.random.randn(1, G.z_dim)).to(self.torch_dtype).to(device) + self.label = torch.zeros([1, self.model.c_dim], device=device).to(self.torch_dtype) + # fmt: on + + @staticmethod + def download(): + download_model(MAT_MODEL_URL, MAT_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + return os.path.exists(get_cache_path_by_url(MAT_MODEL_URL)) + + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W] mask area == 255 + return: BGR IMAGE + """ + + image = norm_img(image) # [0, 1] + image = image * 2 - 1 # [0, 1] -> [-1, 1] + + mask = (mask > 127) * 255 + mask = 255 - mask + mask = norm_img(mask) + + image = ( + torch.from_numpy(image).unsqueeze(0).to(self.torch_dtype).to(self.device) + ) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.torch_dtype).to(self.device) + + output = self.model( + image, mask, self.z, self.label, truncation_psi=1, noise_mode="none" + ) + output = ( + (output.permute(0, 2, 3, 1) * 127.5 + 127.5) + .round() + .clamp(0, 255) + .to(torch.uint8) + ) + output = output[0].cpu().numpy() + cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return cur_res diff --git a/inpaint/model/mi_gan.py b/inpaint/model/mi_gan.py new file mode 100644 index 0000000..f1ce25f --- /dev/null +++ b/inpaint/model/mi_gan.py @@ -0,0 +1,110 @@ +import os + +import cv2 +import torch + +from iopaint.helper import ( + load_jit_model, + download_model, + get_cache_path_by_url, + boxes_from_mask, + resize_max_size, + norm_img, +) +from .base import InpaintModel +from iopaint.schema import InpaintRequest + +MIGAN_MODEL_URL = os.environ.get( + "MIGAN_MODEL_URL", + "https://github.com/Sanster/models/releases/download/migan/migan_traced.pt", +) +MIGAN_MODEL_MD5 = os.environ.get("MIGAN_MODEL_MD5", "76eb3b1a71c400ee3290524f7a11b89c") + + +class MIGAN(InpaintModel): + name = "migan" + min_size = 512 + pad_mod = 512 + pad_to_square = True + is_erase_model = True + + def init_model(self, device, **kwargs): + self.model = load_jit_model(MIGAN_MODEL_URL, device, MIGAN_MODEL_MD5).eval() + + @staticmethod + def download(): + download_model(MIGAN_MODEL_URL, MIGAN_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + return os.path.exists(get_cache_path_by_url(MIGAN_MODEL_URL)) + + @torch.no_grad() + def __call__(self, image, mask, config: InpaintRequest): + """ + images: [H, W, C] RGB, not normalized + masks: [H, W] + return: BGR IMAGE + """ + if image.shape[0] == 512 and image.shape[1] == 512: + return self._pad_forward(image, mask, config) + + boxes = boxes_from_mask(mask) + crop_result = [] + config.hd_strategy_crop_margin = 128 + for box in boxes: + crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config) + origin_size = crop_image.shape[:2] + resize_image = resize_max_size(crop_image, size_limit=512) + resize_mask = resize_max_size(crop_mask, size_limit=512) + inpaint_result = self._pad_forward(resize_image, resize_mask, config) + + # only paste masked area result + inpaint_result = cv2.resize( + inpaint_result, + (origin_size[1], origin_size[0]), + interpolation=cv2.INTER_CUBIC, + ) + + original_pixel_indices = crop_mask < 127 + inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][ + original_pixel_indices + ] + + crop_result.append((inpaint_result, crop_box)) + + inpaint_result = image[:, :, ::-1].copy() + for crop_image, crop_box in crop_result: + x1, y1, x2, y2 = crop_box + inpaint_result[y1:y2, x1:x2, :] = crop_image + + return inpaint_result + + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W] mask area == 255 + return: BGR IMAGE + """ + + image = norm_img(image) # [0, 1] + image = image * 2 - 1 # [0, 1] -> [-1, 1] + mask = (mask > 120) * 255 + mask = norm_img(mask) + + image = torch.from_numpy(image).unsqueeze(0).to(self.device) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) + + erased_img = image * (1 - mask) + input_image = torch.cat([0.5 - mask, erased_img], dim=1) + + output = self.model(input_image) + output = ( + (output.permute(0, 2, 3, 1) * 127.5 + 127.5) + .round() + .clamp(0, 255) + .to(torch.uint8) + ) + output = output[0].cpu().numpy() + cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return cur_res diff --git a/inpaint/model/opencv2.py b/inpaint/model/opencv2.py new file mode 100644 index 0000000..de47209 --- /dev/null +++ b/inpaint/model/opencv2.py @@ -0,0 +1,29 @@ +import cv2 +from .base import InpaintModel +from iopaint.schema import InpaintRequest + +flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA} + + +class OpenCV2(InpaintModel): + name = "cv2" + pad_mod = 1 + is_erase_model = True + + @staticmethod + def is_downloaded() -> bool: + return True + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] + return: BGR IMAGE + """ + cur_res = cv2.inpaint( + image[:, :, ::-1], + mask, + inpaintRadius=config.cv2_radius, + flags=flag_map[config.cv2_flag], + ) + return cur_res diff --git a/inpaint/model/original_sd_configs/__init__.py b/inpaint/model/original_sd_configs/__init__.py new file mode 100644 index 0000000..23896a7 --- /dev/null +++ b/inpaint/model/original_sd_configs/__init__.py @@ -0,0 +1,19 @@ +from pathlib import Path +from typing import Dict + +CURRENT_DIR = Path(__file__).parent.absolute() + + +def get_config_files() -> Dict[str, Path]: + """ + - `v1`: Config file for Stable Diffusion v1 + - `v2`: Config file for Stable Diffusion v2 + - `xl`: Config file for Stable Diffusion XL + - `xl_refiner`: Config file for Stable Diffusion XL Refiner + """ + return { + "v1": CURRENT_DIR / "v1-inference.yaml", + "v2": CURRENT_DIR / "v2-inference-v.yaml", + "xl": CURRENT_DIR / "sd_xl_base.yaml", + "xl_refiner": CURRENT_DIR / "sd_xl_refiner.yaml", + } diff --git a/inpaint/model/original_sd_configs/sd_xl_base.yaml b/inpaint/model/original_sd_configs/sd_xl_base.yaml new file mode 100644 index 0000000..6047379 --- /dev/null +++ b/inpaint/model/original_sd_configs/sd_xl_base.yaml @@ -0,0 +1,93 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2816 + num_classes: sequential + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4] + num_head_channels: 64 + use_linear_in_transformer: True + transformer_depth: [1, 2, 10] + context_dim: 2048 + spatial_transformer_attn_type: softmax-xformers + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + layer: hidden + layer_idx: 11 + + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + freeze: True + layer: penultimate + always_return_pooled: True + legacy: False + + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: target_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity diff --git a/inpaint/model/original_sd_configs/sd_xl_refiner.yaml b/inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file mode 100644 index 0000000..2d5ab44 --- /dev/null +++ b/inpaint/model/original_sd_configs/sd_xl_refiner.yaml @@ -0,0 +1,86 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2560 + num_classes: sequential + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 384 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_head_channels: 64 + use_linear_in_transformer: True + transformer_depth: 4 + context_dim: [1280, 1280, 1280, 1280] + spatial_transformer_attn_type: softmax-xformers + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + legacy: False + freeze: True + layer: penultimate + always_return_pooled: True + + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: aesthetic_score + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity diff --git a/inpaint/model/original_sd_configs/v1-inference.yaml b/inpaint/model/original_sd_configs/v1-inference.yaml new file mode 100644 index 0000000..d4effe5 --- /dev/null +++ b/inpaint/model/original_sd_configs/v1-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/inpaint/model/original_sd_configs/v2-inference-v.yaml b/inpaint/model/original_sd_configs/v2-inference-v.yaml new file mode 100644 index 0000000..8ec8dfb --- /dev/null +++ b/inpaint/model/original_sd_configs/v2-inference-v.yaml @@ -0,0 +1,68 @@ +model: + base_learning_rate: 1.0e-4 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + parameterization: "v" + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False # we set this to false because this is an inference only config + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" diff --git a/inpaint/model/paint_by_example.py b/inpaint/model/paint_by_example.py new file mode 100644 index 0000000..bf1e5b7 --- /dev/null +++ b/inpaint/model/paint_by_example.py @@ -0,0 +1,68 @@ +import PIL +import PIL.Image +import cv2 +import torch +from loguru import logger + +from iopaint.helper import decode_base64_to_image +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class PaintByExample(DiffusionInpaintModel): + name = "Fantasy-Studio/Paint-by-Example" + pad_mod = 8 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import DiffusionPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + model_kwargs = { + "local_files_only": is_local_files_only(**kwargs), + } + + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Paint By Example Model NSFW checker") + model_kwargs.update( + dict(safety_checker=None, requires_safety_checker=False) + ) + + self.model = DiffusionPipeline.from_pretrained( + self.name, torch_dtype=torch_dtype, **model_kwargs + ) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + # TODO: gpu_id + if kwargs.get("cpu_offload", False) and use_gpu: + self.model.image_encoder = self.model.image_encoder.to(device) + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + if config.paint_by_example_example_image is None: + raise ValueError("paint_by_example_example_image is required") + example_image, _, _ = decode_base64_to_image( + config.paint_by_example_example_image + ) + output = self.model( + image=PIL.Image.fromarray(image), + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + example_image=PIL.Image.fromarray(example_image), + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + negative_prompt="out of frame, lowres, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, disfigured, gross proportions, malformed limbs, watermark, signature", + output_type="np.array", + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/plms_sampler.py b/inpaint/model/plms_sampler.py new file mode 100644 index 0000000..131a8f4 --- /dev/null +++ b/inpaint/model/plms_sampler.py @@ -0,0 +1,225 @@ +# From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py +import torch +import numpy as np +from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like +from tqdm import tqdm + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + steps, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=False, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=steps, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, ): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + time_range = list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + return img + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/inpaint/model/power_paint/__init__.py b/inpaint/model/power_paint/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/power_paint/pipeline_powerpaint.py b/inpaint/model/power_paint/pipeline_powerpaint.py new file mode 100644 index 0000000..13c1d27 --- /dev/null +++ b/inpaint/model/power_paint/pipeline_powerpaint.py @@ -0,0 +1,1243 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer +from diffusers.configuration_utils import FrozenDict +from diffusers.image_processor import VaeImageProcessor +from diffusers.loaders import ( + FromSingleFileMixin, + LoraLoaderMixin, + TextualInversionLoaderMixin, +) +from diffusers.models import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + UNet2DConditionModel, +) +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + deprecate, + is_accelerate_available, + is_accelerate_version, + logging, +) +from diffusers.utils.torch_utils import randn_tensor +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def prepare_mask_and_masked_image( + image, mask, height, width, return_image: bool = False +): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch single image + if image.ndim == 3: + assert ( + image.shape[0] == 3 + ), "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert ( + image.ndim == 4 and mask.ndim == 4 + ), "Image and Mask must have 4 dimensions" + assert ( + image.shape[-2:] == mask.shape[-2:] + ), "Image and Mask must have the same spatial dimensions" + assert ( + image.shape[0] == mask.shape[0] + ), "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError( + f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not" + ) + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [ + i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image + ] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +class StableDiffusionInpaintPipeline( + DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: Union[AutoencoderKL, AsymmetricAutoencoderKL], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if ( + hasattr(scheduler.config, "steps_offset") + and scheduler.config.steps_offset != 1 + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate( + "steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if ( + hasattr(scheduler.config, "skip_prk_steps") + and scheduler.config.skip_prk_steps is False + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate( + "skip_prk_steps not set", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr( + unet.config, "_diffusers_version" + ) and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse( + "0.9.0.dev0" + ) + is_unet_sample_size_less_64 = ( + hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + ) + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate( + "sample_size<64", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4 + if unet.config.in_channels != 9: + logger.info( + f"You have loaded a UNet with {unet.config.in_channels} input channels which." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a + time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. + Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the + iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError( + "`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." + ) + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: + _, hook = cpu_offload_with_hook( + cpu_offloaded_model, device, prev_module_hook=hook + ) + + if self.safety_checker is not None: + _, hook = cpu_offload_with_hook( + self.safety_checker, device, prev_module_hook=hook + ) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + promptA, + promptB, + t, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA=None, + negative_promptB=None, + t_nag=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + prompt = promptA + negative_prompt = negative_promptA + + if promptA is not None and isinstance(promptA, str): + batch_size = 1 + elif promptA is not None and isinstance(promptA, list): + batch_size = len(promptA) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + promptA = self.maybe_convert_prompt(promptA, self.tokenizer) + + text_inputsA = self.tokenizer( + promptA, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputsB = self.tokenizer( + promptB, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_idsA = text_inputsA.input_ids + text_input_idsB = text_inputsB.input_ids + untruncated_ids = self.tokenizer( + promptA, padding="longest", return_tensors="pt" + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_idsA.shape[ + -1 + ] and not torch.equal(text_input_idsA, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = text_inputsA.attention_mask.to(device) + else: + attention_mask = None + + # print("text_input_idsA: ",text_input_idsA) + # print("text_input_idsB: ",text_input_idsB) + # print('t: ',t) + + prompt_embedsA = self.text_encoder( + text_input_idsA.to(device), + attention_mask=attention_mask, + ) + prompt_embedsA = prompt_embedsA[0] + + prompt_embedsB = self.text_encoder( + text_input_idsB.to(device), + attention_mask=attention_mask, + ) + prompt_embedsB = prompt_embedsB[0] + prompt_embeds = prompt_embedsA * (t) + (1 - t) * prompt_embedsB + # print("prompt_embeds: ",prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_images_per_prompt, seq_len, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokensA: List[str] + uncond_tokensB: List[str] + if negative_prompt is None: + uncond_tokensA = [""] * batch_size + uncond_tokensB = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokensA = [negative_promptA] + uncond_tokensB = [negative_promptB] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokensA = negative_promptA + uncond_tokensB = negative_promptB + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokensA = self.maybe_convert_prompt( + uncond_tokensA, self.tokenizer + ) + uncond_tokensB = self.maybe_convert_prompt( + uncond_tokensB, self.tokenizer + ) + + max_length = prompt_embeds.shape[1] + uncond_inputA = self.tokenizer( + uncond_tokensA, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_inputB = self.tokenizer( + uncond_tokensB, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = uncond_inputA.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embedsA = self.text_encoder( + uncond_inputA.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embedsB = self.text_encoder( + uncond_inputB.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = ( + negative_prompt_embedsA[0] * (t_nag) + + (1 - t_nag) * negative_prompt_embedsB[0] + ) + + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=prompt_embeds_dtype, device=device + ) + + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_images_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # print("prompt_embeds: ",prompt_embeds) + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess( + image, output_type="pil" + ) + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor( + feature_extractor_input, return_tensors="pt" + ).to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if strength < 0 or strength > 1: + raise ValueError( + f"The value of strength should in [0.0, 1.0] but is {strength}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if (callback_steps is None) or ( + callback_steps is not None + and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and ( + not isinstance(prompt, str) and not isinstance(prompt, list) + ): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = ( + noise + if is_strength_max + else self.scheduler.add_noise(image_latents, noise, timestep) + ) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = ( + latents * self.scheduler.init_noise_sigma + if is_strength_max + else latents + ) + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample( + generator=generator[i] + ) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.sample( + generator=generator + ) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, + mask, + masked_image, + batch_size, + height, + width, + dtype, + device, + generator, + do_classifier_free_guidance, + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) + if do_classifier_free_guidance + else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + @torch.no_grad() + def __call__( + self, + promptA: Union[str, List[str]] = None, + promptB: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 1.0, + tradoff: float = 1.0, + tradoff_nag: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_promptA: Optional[Union[str, List[str]]] = None, + negative_promptB: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + task_class: Union[torch.Tensor, float, int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked + out with `mask_image` and repainted according to `prompt`). + mask_image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted + while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel + (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the + expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + prompt = promptA + negative_prompt = negative_promptA + # 1. Check inputs + self.check_inputs( + prompt, + height, + width, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) + if cross_attention_kwargs is not None + else None + ) + prompt_embeds = self._encode_prompt( + promptA, + promptB, + tradoff, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA, + negative_promptB, + tradoff_nag, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + mask, masked_image, init_image = prepare_mask_and_masked_image( + image, mask_image, height, width, return_image=True + ) + mask_condition = mask.clone() + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if ( + num_channels_latents + num_channels_mask + num_channels_masked_image + != self.unet.config.in_channels + ): + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * 2) if do_classifier_free_guidance else latents + ) + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + if num_channels_unet == 9: + latent_model_input = torch.cat( + [latent_model_input, mask, masked_image_latents], dim=1 + ) + + # predict the noise residual + if task_class is not None: + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + task_class=task_class, + )[0] + else: + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, **extra_step_kwargs, return_dict=False + )[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents[:1] + init_mask = mask[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = ( + 1 - init_mask + ) * init_latents_proper + init_mask * latents + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(self, i, t, {}) + + if not output_type == "latent": + condition_kwargs = {} + if isinstance(self.vae, AsymmetricAutoencoderKL): + init_image = init_image.to( + device=device, dtype=masked_image_latents.dtype + ) + init_image_condition = init_image.clone() + init_image = self._encode_vae_image(init_image, generator=generator) + mask_condition = mask_condition.to( + device=device, dtype=masked_image_latents.dtype + ) + condition_kwargs = { + "image": init_image_condition, + "mask": mask_condition, + } + image = self.vae.decode( + latents / self.vae.config.scaling_factor, + return_dict=False, + **condition_kwargs, + )[0] + image, has_nsfw_concept = self.run_safety_checker( + image, device, prompt_embeds.dtype + ) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess( + image, output_type=output_type, do_denormalize=do_denormalize + ) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput( + images=image, nsfw_content_detected=has_nsfw_concept + ) diff --git a/inpaint/model/power_paint/power_paint.py b/inpaint/model/power_paint/power_paint.py new file mode 100644 index 0000000..f17a5a3 --- /dev/null +++ b/inpaint/model/power_paint/power_paint.py @@ -0,0 +1,101 @@ +from PIL import Image +import PIL.Image +import cv2 +import torch +from loguru import logger + +from ..base import DiffusionInpaintModel +from ..helper.cpu_text_encoder import CPUTextEncoderWrapper +from ..utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) +from iopaint.schema import InpaintRequest +from .powerpaint_tokenizer import add_task_to_prompt +from ...const import POWERPAINT_NAME + + +class PowerPaint(DiffusionInpaintModel): + name = POWERPAINT_NAME + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" + + def init_model(self, device: torch.device, **kwargs): + from .pipeline_powerpaint import StableDiffusionInpaintPipeline + from .powerpaint_tokenizer import PowerPaintTokenizer + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + model_kwargs = {"local_files_only": is_local_files_only(**kwargs)} + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + self.model = handle_from_pretrained_exceptions( + StableDiffusionInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.name, + variant="fp16", + torch_dtype=torch_dtype, + **model_kwargs, + ) + self.model.tokenizer = PowerPaintTokenizer(self.model.tokenizer) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + promptA, promptB, negative_promptA, negative_promptB = add_task_to_prompt( + config.prompt, config.negative_prompt, config.powerpaint_task + ) + + output = self.model( + image=PIL.Image.fromarray(image), + promptA=promptA, + promptB=promptB, + tradoff=config.fitting_degree, + tradoff_nag=config.fitting_degree, + negative_promptA=negative_promptA, + negative_promptB=negative_promptB, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + callback_steps=1, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/power_paint/power_paint_v2.py b/inpaint/model/power_paint/power_paint_v2.py new file mode 100644 index 0000000..1a27f65 --- /dev/null +++ b/inpaint/model/power_paint/power_paint_v2.py @@ -0,0 +1,186 @@ +from itertools import chain + +import PIL.Image +import cv2 +import torch +from iopaint.model.original_sd_configs import get_config_files +from loguru import logger +from transformers import CLIPTextModel, CLIPTokenizer +import numpy as np + +from ..base import DiffusionInpaintModel +from ..helper.cpu_text_encoder import CPUTextEncoderWrapper +from ..utils import ( + get_torch_dtype, + enable_low_mem, + is_local_files_only, + handle_from_pretrained_exceptions, +) +from .powerpaint_tokenizer import task_to_prompt +from iopaint.schema import InpaintRequest, ModelType +from .v2.BrushNet_CA import BrushNetModel +from .v2.unet_2d_condition import UNet2DConditionModel_forward +from .v2.unet_2d_blocks import ( + CrossAttnDownBlock2D_forward, + DownBlock2D_forward, + CrossAttnUpBlock2D_forward, + UpBlock2D_forward, +) + + +class PowerPaintV2(DiffusionInpaintModel): + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" + hf_model_id = "Sanster/PowerPaint_v2" + + def init_model(self, device: torch.device, **kwargs): + from .v2.pipeline_PowerPaint_Brushnet_CA import ( + StableDiffusionPowerPaintBrushNetPipeline, + ) + from .powerpaint_tokenizer import PowerPaintTokenizer + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + model_kwargs = {"local_files_only": is_local_files_only(**kwargs)} + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + text_encoder_brushnet = CLIPTextModel.from_pretrained( + self.hf_model_id, + subfolder="text_encoder_brushnet", + variant="fp16", + torch_dtype=torch_dtype, + local_files_only=model_kwargs["local_files_only"], + ) + + brushnet = BrushNetModel.from_pretrained( + self.hf_model_id, + subfolder="PowerPaint_Brushnet", + variant="fp16", + torch_dtype=torch_dtype, + local_files_only=model_kwargs["local_files_only"], + ) + + if self.model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + pipe = StableDiffusionPowerPaintBrushNetPipeline.from_single_file( + self.model_id_or_path, + torch_dtype=torch_dtype, + load_safety_checker=False, + original_config_file=get_config_files()["v1"], + brushnet=brushnet, + text_encoder_brushnet=text_encoder_brushnet, + **model_kwargs, + ) + else: + pipe = handle_from_pretrained_exceptions( + StableDiffusionPowerPaintBrushNetPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + torch_dtype=torch_dtype, + brushnet=brushnet, + text_encoder_brushnet=text_encoder_brushnet, + variant="fp16", + **model_kwargs, + ) + pipe.tokenizer = PowerPaintTokenizer( + CLIPTokenizer.from_pretrained(self.hf_model_id, subfolder="tokenizer") + ) + self.model = pipe + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + # Monkey patch the forward method of the UNet to use the brushnet_unet_forward method + self.model.unet.forward = UNet2DConditionModel_forward.__get__( + self.model.unet, self.model.unet.__class__ + ) + + # Monkey patch unet down_blocks to use CrossAttnDownBlock2D_forward + for down_block in chain( + self.model.unet.down_blocks, self.model.brushnet.down_blocks + ): + if down_block.__class__.__name__ == "CrossAttnDownBlock2D": + down_block.forward = CrossAttnDownBlock2D_forward.__get__( + down_block, down_block.__class__ + ) + else: + down_block.forward = DownBlock2D_forward.__get__( + down_block, down_block.__class__ + ) + + for up_block in chain(self.model.unet.up_blocks, self.model.brushnet.up_blocks): + if up_block.__class__.__name__ == "CrossAttnUpBlock2D": + up_block.forward = CrossAttnUpBlock2D_forward.__get__( + up_block, up_block.__class__ + ) + else: + up_block.forward = UpBlock2D_forward.__get__( + up_block, up_block.__class__ + ) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + image = image * (1 - mask / 255.0) + img_h, img_w = image.shape[:2] + + image = PIL.Image.fromarray(image.astype(np.uint8)) + mask = PIL.Image.fromarray(mask[:, :, -1], mode="L").convert("RGB") + + promptA, promptB, negative_promptA, negative_promptB = task_to_prompt( + config.powerpaint_task + ) + + output = self.model( + image=image, + mask=mask, + promptA=promptA, + promptB=promptB, + promptU=config.prompt, + tradoff=config.fitting_degree, + tradoff_nag=config.fitting_degree, + negative_promptA=negative_promptA, + negative_promptB=negative_promptB, + negative_promptU=config.negative_prompt, + num_inference_steps=config.sd_steps, + # strength=config.sd_strength, + brushnet_conditioning_scale=1.0, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/power_paint/powerpaint_tokenizer.py b/inpaint/model/power_paint/powerpaint_tokenizer.py new file mode 100644 index 0000000..53a68c9 --- /dev/null +++ b/inpaint/model/power_paint/powerpaint_tokenizer.py @@ -0,0 +1,254 @@ +import copy +import random +from typing import Any, List, Union +from transformers import CLIPTokenizer + +from iopaint.schema import PowerPaintTask + + +def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask): + if task == PowerPaintTask.object_remove: + promptA = prompt + " P_ctxt" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + " P_obj" + negative_promptB = negative_prompt + " P_obj" + elif task == PowerPaintTask.context_aware: + promptA = prompt + " P_ctxt" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + negative_promptB = negative_prompt + elif task == PowerPaintTask.shape_guided: + promptA = prompt + " P_shape" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + negative_promptB = negative_prompt + elif task == PowerPaintTask.outpainting: + promptA = prompt + " P_ctxt" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + " P_obj" + negative_promptB = negative_prompt + " P_obj" + else: + promptA = prompt + " P_obj" + promptB = prompt + " P_obj" + negative_promptA = negative_prompt + negative_promptB = negative_prompt + + return promptA, promptB, negative_promptA, negative_promptB + + +def task_to_prompt(task: PowerPaintTask): + promptA, promptB, negative_promptA, negative_promptB = add_task_to_prompt( + "", "", task + ) + return ( + promptA.strip(), + promptB.strip(), + negative_promptA.strip(), + negative_promptB.strip(), + ) + + +class PowerPaintTokenizer: + def __init__(self, tokenizer: CLIPTokenizer): + self.wrapped = tokenizer + self.token_map = {} + placeholder_tokens = ["P_ctxt", "P_shape", "P_obj"] + num_vec_per_token = 10 + for placeholder_token in placeholder_tokens: + output = [] + for i in range(num_vec_per_token): + ith_token = placeholder_token + f"_{i}" + output.append(ith_token) + self.token_map[placeholder_token] = output + + def __getattr__(self, name: str) -> Any: + if name == "wrapped": + return super().__getattr__("wrapped") + + try: + return getattr(self.wrapped, name) + except AttributeError: + try: + return super().__getattr__(name) + except AttributeError: + raise AttributeError( + "'name' cannot be found in both " + f"'{self.__class__.__name__}' and " + f"'{self.__class__.__name__}.tokenizer'." + ) + + def try_adding_tokens(self, tokens: Union[str, List[str]], *args, **kwargs): + """Attempt to add tokens to the tokenizer. + + Args: + tokens (Union[str, List[str]]): The tokens to be added. + """ + num_added_tokens = self.wrapped.add_tokens(tokens, *args, **kwargs) + assert num_added_tokens != 0, ( + f"The tokenizer already contains the token {tokens}. Please pass " + "a different `placeholder_token` that is not already in the " + "tokenizer." + ) + + def get_token_info(self, token: str) -> dict: + """Get the information of a token, including its start and end index in + the current tokenizer. + + Args: + token (str): The token to be queried. + + Returns: + dict: The information of the token, including its start and end + index in current tokenizer. + """ + token_ids = self.__call__(token).input_ids + start, end = token_ids[1], token_ids[-2] + 1 + return {"name": token, "start": start, "end": end} + + def add_placeholder_token( + self, placeholder_token: str, *args, num_vec_per_token: int = 1, **kwargs + ): + """Add placeholder tokens to the tokenizer. + + Args: + placeholder_token (str): The placeholder token to be added. + num_vec_per_token (int, optional): The number of vectors of + the added placeholder token. + *args, **kwargs: The arguments for `self.wrapped.add_tokens`. + """ + output = [] + if num_vec_per_token == 1: + self.try_adding_tokens(placeholder_token, *args, **kwargs) + output.append(placeholder_token) + else: + output = [] + for i in range(num_vec_per_token): + ith_token = placeholder_token + f"_{i}" + self.try_adding_tokens(ith_token, *args, **kwargs) + output.append(ith_token) + + for token in self.token_map: + if token in placeholder_token: + raise ValueError( + f"The tokenizer already has placeholder token {token} " + f"that can get confused with {placeholder_token} " + "keep placeholder tokens independent" + ) + self.token_map[placeholder_token] = output + + def replace_placeholder_tokens_in_text( + self, + text: Union[str, List[str]], + vector_shuffle: bool = False, + prop_tokens_to_load: float = 1.0, + ) -> Union[str, List[str]]: + """Replace the keywords in text with placeholder tokens. This function + will be called in `self.__call__` and `self.encode`. + + Args: + text (Union[str, List[str]]): The text to be processed. + vector_shuffle (bool, optional): Whether to shuffle the vectors. + Defaults to False. + prop_tokens_to_load (float, optional): The proportion of tokens to + be loaded. If 1.0, all tokens will be loaded. Defaults to 1.0. + + Returns: + Union[str, List[str]]: The processed text. + """ + if isinstance(text, list): + output = [] + for i in range(len(text)): + output.append( + self.replace_placeholder_tokens_in_text( + text[i], vector_shuffle=vector_shuffle + ) + ) + return output + + for placeholder_token in self.token_map: + if placeholder_token in text: + tokens = self.token_map[placeholder_token] + tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)] + if vector_shuffle: + tokens = copy.copy(tokens) + random.shuffle(tokens) + text = text.replace(placeholder_token, " ".join(tokens)) + return text + + def replace_text_with_placeholder_tokens( + self, text: Union[str, List[str]] + ) -> Union[str, List[str]]: + """Replace the placeholder tokens in text with the original keywords. + This function will be called in `self.decode`. + + Args: + text (Union[str, List[str]]): The text to be processed. + + Returns: + Union[str, List[str]]: The processed text. + """ + if isinstance(text, list): + output = [] + for i in range(len(text)): + output.append(self.replace_text_with_placeholder_tokens(text[i])) + return output + + for placeholder_token, tokens in self.token_map.items(): + merged_tokens = " ".join(tokens) + if merged_tokens in text: + text = text.replace(merged_tokens, placeholder_token) + return text + + def __call__( + self, + text: Union[str, List[str]], + *args, + vector_shuffle: bool = False, + prop_tokens_to_load: float = 1.0, + **kwargs, + ): + """The call function of the wrapper. + + Args: + text (Union[str, List[str]]): The text to be tokenized. + vector_shuffle (bool, optional): Whether to shuffle the vectors. + Defaults to False. + prop_tokens_to_load (float, optional): The proportion of tokens to + be loaded. If 1.0, all tokens will be loaded. Defaults to 1.0 + *args, **kwargs: The arguments for `self.wrapped.__call__`. + """ + replaced_text = self.replace_placeholder_tokens_in_text( + text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load + ) + + return self.wrapped.__call__(replaced_text, *args, **kwargs) + + def encode(self, text: Union[str, List[str]], *args, **kwargs): + """Encode the passed text to token index. + + Args: + text (Union[str, List[str]]): The text to be encode. + *args, **kwargs: The arguments for `self.wrapped.__call__`. + """ + replaced_text = self.replace_placeholder_tokens_in_text(text) + return self.wrapped(replaced_text, *args, **kwargs) + + def decode( + self, token_ids, return_raw: bool = False, *args, **kwargs + ) -> Union[str, List[str]]: + """Decode the token index to text. + + Args: + token_ids: The token index to be decoded. + return_raw: Whether keep the placeholder token in the text. + Defaults to False. + *args, **kwargs: The arguments for `self.wrapped.decode`. + + Returns: + Union[str, List[str]]: The decoded text. + """ + text = self.wrapped.decode(token_ids, *args, **kwargs) + if return_raw: + return text + replaced_text = self.replace_text_with_placeholder_tokens(text) + return replaced_text diff --git a/inpaint/model/power_paint/v2/BrushNet_CA.py b/inpaint/model/power_paint/v2/BrushNet_CA.py new file mode 100644 index 0000000..b892c84 --- /dev/null +++ b/inpaint/model/power_paint/v2/BrushNet_CA.py @@ -0,0 +1,1094 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from diffusers import UNet2DConditionModel +from diffusers.models.unet_2d_blocks import ( + get_down_block, + get_mid_block, + get_up_block, + CrossAttnDownBlock2D, + DownBlock2D, +) +from torch import nn + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.utils import BaseOutput, logging +from diffusers.models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from diffusers.models.embeddings import ( + TextImageProjection, + TextImageTimeEmbedding, + TextTimeEmbedding, + TimestepEmbedding, + Timesteps, +) +from diffusers.models.modeling_utils import ModelMixin + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class BrushNetOutput(BaseOutput): + """ + The output of [`BrushNetModel`]. + + Args: + up_block_res_samples (`tuple[torch.Tensor]`): + A tuple of upsample activations at different resolutions for each upsampling block. Each tensor should + be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be + used to condition the original UNet's upsampling activations. + down_block_res_samples (`tuple[torch.Tensor]`): + A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should + be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be + used to condition the original UNet's downsampling activations. + mid_down_block_re_sample (`torch.Tensor`): + The activation of the midde block (the lowest sample resolution). Each tensor should be of shape + `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. + Output can be used to condition the original UNet's middle block activation. + """ + + up_block_res_samples: Tuple[torch.Tensor] + down_block_res_samples: Tuple[torch.Tensor] + mid_block_res_sample: torch.Tensor + + +class BrushNetModel(ModelMixin, ConfigMixin): + """ + A BrushNet model. + + Args: + in_channels (`int`, defaults to 4): + The number of channels in the input sample. + flip_sin_to_cos (`bool`, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, defaults to 0): + The frequency shift to apply to the time embedding. + down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or + `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): + block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, defaults to 2): + The number of layers per block. + downsample_padding (`int`, defaults to 1): + The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, defaults to 1): + The scale factor to use for the mid block. + act_fn (`str`, defaults to "silu"): + The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use for the normalization. If None, normalization and activation layers is skipped + in post-processing. + norm_eps (`float`, defaults to 1e-5): + The epsilon to use for the normalization. + cross_attention_dim (`int`, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): + The dimension of the attention heads. + use_linear_projection (`bool`, defaults to `False`): + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + num_class_embeds (`int`, *optional*, defaults to 0): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + upcast_attention (`bool`, defaults to `False`): + resnet_time_scale_shift (`str`, defaults to `"default"`): + Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. + projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): + The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when + `class_embed_type="projection"`. + brushnet_conditioning_channel_order (`str`, defaults to `"rgb"`): + The channel order of conditional image. Will convert to `rgb` if it's `bgr`. + conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): + The tuple of output channel for each block in the `conditioning_embedding` layer. + global_pool_conditions (`bool`, defaults to `False`): + TODO(Patrick) - unused parameter. + addition_embed_type_num_heads (`int`, defaults to 64): + The number of heads to use for the `TextTimeEmbedding` layer. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 4, + conditioning_channels: int = 5, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", + up_block_types: Tuple[str, ...] = ( + "UpBlock2D", + "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", + ), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1280, + transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int, ...]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + projection_class_embeddings_input_dim: Optional[int] = None, + brushnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = ( + 16, + 32, + 96, + 256, + ), + global_pool_conditions: bool = False, + addition_embed_type_num_heads: int = 64, + ): + super().__init__() + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len( + only_cross_attention + ) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len( + down_block_types + ): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len( + down_block_types + ) + + # input + conv_in_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in_condition = nn.Conv2d( + in_channels + conditioning_channels, + block_out_channels[0], + kernel_size=conv_in_kernel, + padding=conv_in_padding, + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + ) + + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info( + "encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined." + ) + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + + elif encoder_hid_dim_type is not None: + raise ValueError( + f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." + ) + else: + self.encoder_hid_proj = None + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding( + projection_class_embeddings_input_dim, time_embed_dim + ) + else: + self.class_embedding = None + + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, + time_embed_dim, + num_heads=addition_embed_type_num_heads, + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, + image_embed_dim=cross_attention_dim, + time_embed_dim=time_embed_dim, + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps( + addition_time_embed_dim, flip_sin_to_cos, freq_shift + ) + self.add_embedding = TimestepEmbedding( + projection_class_embeddings_input_dim, time_embed_dim + ) + + elif addition_embed_type is not None: + raise ValueError( + f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'." + ) + + self.down_blocks = nn.ModuleList([]) + self.brushnet_down_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + + brushnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + attention_head_dim=attention_head_dim[i] + if attention_head_dim[i] is not None + else output_channel, + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.down_blocks.append(down_block) + + for _ in range(layers_per_block): + brushnet_block = nn.Conv2d( + output_channel, output_channel, kernel_size=1 + ) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + if not is_final_block: + brushnet_block = nn.Conv2d( + output_channel, output_channel, kernel_size=1 + ) + brushnet_block = zero_module(brushnet_block) + self.brushnet_down_blocks.append(brushnet_block) + + # mid + mid_block_channel = block_out_channels[-1] + + brushnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) + brushnet_block = zero_module(brushnet_block) + self.brushnet_mid_block = brushnet_block + + self.mid_block = get_mid_block( + mid_block_type, + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=mid_block_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_transformer_layers_per_block = list( + reversed(transformer_layers_per_block) + ) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + + self.up_blocks = nn.ModuleList([]) + self.brushnet_up_blocks = nn.ModuleList([]) + + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[ + min(i + 1, len(block_out_channels) - 1) + ] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resolution_idx=i, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=reversed_num_attention_heads[i], + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_head_dim=attention_head_dim[i] + if attention_head_dim[i] is not None + else output_channel, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + for _ in range(layers_per_block + 1): + brushnet_block = nn.Conv2d( + output_channel, output_channel, kernel_size=1 + ) + brushnet_block = zero_module(brushnet_block) + self.brushnet_up_blocks.append(brushnet_block) + + if not is_final_block: + brushnet_block = nn.Conv2d( + output_channel, output_channel, kernel_size=1 + ) + brushnet_block = zero_module(brushnet_block) + self.brushnet_up_blocks.append(brushnet_block) + + @classmethod + def from_unet( + cls, + unet: UNet2DConditionModel, + brushnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = ( + 16, + 32, + 96, + 256, + ), + load_weights_from_unet: bool = True, + conditioning_channels: int = 5, + ): + r""" + Instantiate a [`BrushNetModel`] from [`UNet2DConditionModel`]. + + Parameters: + unet (`UNet2DConditionModel`): + The UNet model weights to copy to the [`BrushNetModel`]. All configuration options are also copied + where applicable. + """ + transformer_layers_per_block = ( + unet.config.transformer_layers_per_block + if "transformer_layers_per_block" in unet.config + else 1 + ) + encoder_hid_dim = ( + unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None + ) + encoder_hid_dim_type = ( + unet.config.encoder_hid_dim_type + if "encoder_hid_dim_type" in unet.config + else None + ) + addition_embed_type = ( + unet.config.addition_embed_type + if "addition_embed_type" in unet.config + else None + ) + addition_time_embed_dim = ( + unet.config.addition_time_embed_dim + if "addition_time_embed_dim" in unet.config + else None + ) + + brushnet = cls( + in_channels=unet.config.in_channels, + conditioning_channels=conditioning_channels, + flip_sin_to_cos=unet.config.flip_sin_to_cos, + freq_shift=unet.config.freq_shift, + # down_block_types=['DownBlock2D','DownBlock2D','DownBlock2D','DownBlock2D'], + down_block_types=[ + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ], + # mid_block_type='MidBlock2D', + mid_block_type="UNetMidBlock2DCrossAttn", + # up_block_types=['UpBlock2D','UpBlock2D','UpBlock2D','UpBlock2D'], + up_block_types=[ + "UpBlock2D", + "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", + ], + only_cross_attention=unet.config.only_cross_attention, + block_out_channels=unet.config.block_out_channels, + layers_per_block=unet.config.layers_per_block, + downsample_padding=unet.config.downsample_padding, + mid_block_scale_factor=unet.config.mid_block_scale_factor, + act_fn=unet.config.act_fn, + norm_num_groups=unet.config.norm_num_groups, + norm_eps=unet.config.norm_eps, + cross_attention_dim=unet.config.cross_attention_dim, + transformer_layers_per_block=transformer_layers_per_block, + encoder_hid_dim=encoder_hid_dim, + encoder_hid_dim_type=encoder_hid_dim_type, + attention_head_dim=unet.config.attention_head_dim, + num_attention_heads=unet.config.num_attention_heads, + use_linear_projection=unet.config.use_linear_projection, + class_embed_type=unet.config.class_embed_type, + addition_embed_type=addition_embed_type, + addition_time_embed_dim=addition_time_embed_dim, + num_class_embeds=unet.config.num_class_embeds, + upcast_attention=unet.config.upcast_attention, + resnet_time_scale_shift=unet.config.resnet_time_scale_shift, + projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, + brushnet_conditioning_channel_order=brushnet_conditioning_channel_order, + conditioning_embedding_out_channels=conditioning_embedding_out_channels, + ) + + if load_weights_from_unet: + conv_in_condition_weight = torch.zeros_like( + brushnet.conv_in_condition.weight + ) + conv_in_condition_weight[:, :4, ...] = unet.conv_in.weight + conv_in_condition_weight[:, 4:8, ...] = unet.conv_in.weight + brushnet.conv_in_condition.weight = torch.nn.Parameter( + conv_in_condition_weight + ) + brushnet.conv_in_condition.bias = unet.conv_in.bias + + brushnet.time_proj.load_state_dict(unet.time_proj.state_dict()) + brushnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) + + if brushnet.class_embedding: + brushnet.class_embedding.load_state_dict( + unet.class_embedding.state_dict() + ) + + brushnet.down_blocks.load_state_dict( + unet.down_blocks.state_dict(), strict=False + ) + brushnet.mid_block.load_state_dict( + unet.mid_block.state_dict(), strict=False + ) + brushnet.up_blocks.load_state_dict( + unet.up_blocks.state_dict(), strict=False + ) + + return brushnet.to(unet.dtype) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors( + name: str, + module: torch.nn.Module, + processors: Dict[str, AttentionProcessor], + ): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor( + return_deprecated_lora=True + ) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor( + self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]] + ): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all( + proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS + for proc in self.attn_processors.values() + ): + processor = AttnAddedKVProcessor() + elif all( + proc.__class__ in CROSS_ATTENTION_PROCESSORS + for proc in self.attn_processors.values() + ): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = ( + num_sliceable_layers * [slice_size] + if not isinstance(slice_size, list) + else slice_size + ) + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice( + module: torch.nn.Module, slice_size: List[int] + ): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + brushnet_cond: torch.FloatTensor, + conditioning_scale: float = 1.0, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[BrushNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]: + """ + The [`BrushNetModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor. + timestep (`Union[torch.Tensor, float, int]`): + The number of timesteps to denoise an input. + encoder_hidden_states (`torch.Tensor`): + The encoder hidden states. + brushnet_cond (`torch.FloatTensor`): + The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. + conditioning_scale (`float`, defaults to `1.0`): + The scale factor for BrushNet outputs. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): + Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the + timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep + embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + added_cond_kwargs (`dict`): + Additional conditions for the Stable Diffusion XL UNet. + cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): + A kwargs dictionary that if specified is passed along to the `AttnProcessor`. + guess_mode (`bool`, defaults to `False`): + In this mode, the BrushNet encoder tries its best to recognize the input content of the input even if + you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. + return_dict (`bool`, defaults to `True`): + Whether or not to return a [`~models.brushnet.BrushNetOutput`] instead of a plain tuple. + + Returns: + [`~models.brushnet.BrushNetOutput`] **or** `tuple`: + If `return_dict` is `True`, a [`~models.brushnet.BrushNetOutput`] is returned, otherwise a tuple is + returned where the first element is the sample tensor. + """ + # check channel order + channel_order = self.config.brushnet_conditioning_channel_order + + if channel_order == "rgb": + # in rgb order by default + ... + elif channel_order == "bgr": + brushnet_cond = torch.flip(brushnet_cond, dims=[1]) + else: + raise ValueError( + f"unknown `brushnet_conditioning_channel_order`: {channel_order}" + ) + + # prepare attention_mask + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError( + "class_labels should be provided when num_class_embeds > 0" + ) + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) + emb = emb + class_emb + + if self.config.addition_embed_type is not None: + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + + elif self.config.addition_embed_type == "text_time": + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + + emb = emb + aug_emb if aug_emb is not None else emb + + # 2. pre-process + brushnet_cond = torch.concat([sample, brushnet_cond], 1) + sample = self.conv_in_condition(brushnet_cond) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if ( + hasattr(downsample_block, "has_cross_attention") + and downsample_block.has_cross_attention + ): + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. PaintingNet down blocks + brushnet_down_block_res_samples = () + for down_block_res_sample, brushnet_down_block in zip( + down_block_res_samples, self.brushnet_down_blocks + ): + down_block_res_sample = brushnet_down_block(down_block_res_sample) + brushnet_down_block_res_samples = brushnet_down_block_res_samples + ( + down_block_res_sample, + ) + + # 5. mid + if self.mid_block is not None: + if ( + hasattr(self.mid_block, "has_cross_attention") + and self.mid_block.has_cross_attention + ): + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = self.mid_block(sample, emb) + + # 6. BrushNet mid blocks + brushnet_mid_block_res_sample = self.brushnet_mid_block(sample) + + # 7. up + up_block_res_samples = () + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[ + : -len(upsample_block.resnets) + ] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block: + upsample_size = down_block_res_samples[-1].shape[2:] + + if ( + hasattr(upsample_block, "has_cross_attention") + and upsample_block.has_cross_attention + ): + sample, up_res_samples = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + return_res_samples=True, + ) + else: + sample, up_res_samples = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + return_res_samples=True, + ) + + up_block_res_samples += up_res_samples + + # 8. BrushNet up blocks + brushnet_up_block_res_samples = () + for up_block_res_sample, brushnet_up_block in zip( + up_block_res_samples, self.brushnet_up_blocks + ): + up_block_res_sample = brushnet_up_block(up_block_res_sample) + brushnet_up_block_res_samples = brushnet_up_block_res_samples + ( + up_block_res_sample, + ) + + # 6. scaling + if guess_mode and not self.config.global_pool_conditions: + scales = torch.logspace( + -1, + 0, + len(brushnet_down_block_res_samples) + + 1 + + len(brushnet_up_block_res_samples), + device=sample.device, + ) # 0.1 to 1.0 + scales = scales * conditioning_scale + + brushnet_down_block_res_samples = [ + sample * scale + for sample, scale in zip( + brushnet_down_block_res_samples, + scales[: len(brushnet_down_block_res_samples)], + ) + ] + brushnet_mid_block_res_sample = ( + brushnet_mid_block_res_sample + * scales[len(brushnet_down_block_res_samples)] + ) + brushnet_up_block_res_samples = [ + sample * scale + for sample, scale in zip( + brushnet_up_block_res_samples, + scales[len(brushnet_down_block_res_samples) + 1 :], + ) + ] + else: + brushnet_down_block_res_samples = [ + sample * conditioning_scale + for sample in brushnet_down_block_res_samples + ] + brushnet_mid_block_res_sample = ( + brushnet_mid_block_res_sample * conditioning_scale + ) + brushnet_up_block_res_samples = [ + sample * conditioning_scale for sample in brushnet_up_block_res_samples + ] + + if self.config.global_pool_conditions: + brushnet_down_block_res_samples = [ + torch.mean(sample, dim=(2, 3), keepdim=True) + for sample in brushnet_down_block_res_samples + ] + brushnet_mid_block_res_sample = torch.mean( + brushnet_mid_block_res_sample, dim=(2, 3), keepdim=True + ) + brushnet_up_block_res_samples = [ + torch.mean(sample, dim=(2, 3), keepdim=True) + for sample in brushnet_up_block_res_samples + ] + + if not return_dict: + return ( + brushnet_down_block_res_samples, + brushnet_mid_block_res_sample, + brushnet_up_block_res_samples, + ) + + return BrushNetOutput( + down_block_res_samples=brushnet_down_block_res_samples, + mid_block_res_sample=brushnet_mid_block_res_sample, + up_block_res_samples=brushnet_up_block_res_samples, + ) + + +def zero_module(module): + for p in module.parameters(): + nn.init.zeros_(p) + return module diff --git a/inpaint/model/power_paint/v2/__init__.py b/inpaint/model/power_paint/v2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/model/power_paint/v2/pipeline_PowerPaint_Brushnet_CA.py b/inpaint/model/power_paint/v2/pipeline_PowerPaint_Brushnet_CA.py new file mode 100644 index 0000000..c1892e6 --- /dev/null +++ b/inpaint/model/power_paint/v2/pipeline_PowerPaint_Brushnet_CA.py @@ -0,0 +1,1690 @@ +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from diffusers import StableDiffusionMixin, UNet2DConditionModel +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.image_processor import PipelineImageInput, VaeImageProcessor +from diffusers.loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + LoraLoaderMixin, + TextualInversionLoaderMixin, +) +from diffusers.models import AutoencoderKL, ImageProjection +from diffusers.models.lora import adjust_lora_scale_text_encoder +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from diffusers.utils.torch_utils import ( + is_compiled_module, + is_torch_version, + randn_tensor, +) +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion.pipeline_output import ( + StableDiffusionPipelineOutput, +) +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) + +from .BrushNet_CA import BrushNetModel + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import StableDiffusionBrushNetPipeline, BrushNetModel, UniPCMultistepScheduler + from diffusers.utils import load_image + import torch + import cv2 + import numpy as np + from PIL import Image + + base_model_path = "runwayml/stable-diffusion-v1-5" + brushnet_path = "ckpt_path" + + brushnet = BrushNetModel.from_pretrained(brushnet_path, torch_dtype=torch.float16) + pipe = StableDiffusionBrushNetPipeline.from_pretrained( + base_model_path, brushnet=brushnet, torch_dtype=torch.float16, low_cpu_mem_usage=False + ) + + # speed up diffusion process with faster scheduler and memory optimization + pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + # remove following line if xformers is not installed or when using Torch 2.0. + # pipe.enable_xformers_memory_efficient_attention() + # memory optimization. + pipe.enable_model_cpu_offload() + + image_path="examples/brushnet/src/test_image.jpg" + mask_path="examples/brushnet/src/test_mask.jpg" + caption="A cake on the table." + + init_image = cv2.imread(image_path) + mask_image = 1.*(cv2.imread(mask_path).sum(-1)>255)[:,:,np.newaxis] + init_image = init_image * (1-mask_image) + + init_image = Image.fromarray(init_image.astype(np.uint8)).convert("RGB") + mask_image = Image.fromarray(mask_image.astype(np.uint8).repeat(3,-1)*255).convert("RGB") + + generator = torch.Generator("cuda").manual_seed(1234) + + image = pipe( + caption, + init_image, + mask_image, + num_inference_steps=50, + generator=generator, + paintingnet_conditioning_scale=1.0 + ).images[0] + image.save("output.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set( + inspect.signature(scheduler.set_timesteps).parameters.keys() + ) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionPowerPaintBrushNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with BrushNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + brushnet ([`BrushNetModel`]`): + Provides additional conditioning to the `unet` during the denoising process. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_brushnet: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + brushnet: BrushNetModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_brushnet=text_encoder_brushnet, + tokenizer=tokenizer, + unet=unet, + brushnet=brushnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + promptA, + promptB, + t, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA=None, + negative_promptB=None, + t_nag=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + prompt = promptA + negative_prompt = negative_promptA + + if promptA is not None and isinstance(promptA, str): + batch_size = 1 + elif promptA is not None and isinstance(promptA, list): + batch_size = len(promptA) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + promptA = self.maybe_convert_prompt(promptA, self.tokenizer) + + text_inputsA = self.tokenizer( + promptA, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputsB = self.tokenizer( + promptB, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_idsA = text_inputsA.input_ids + text_input_idsB = text_inputsB.input_ids + untruncated_ids = self.tokenizer( + promptA, padding="longest", return_tensors="pt" + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_idsA.shape[ + -1 + ] and not torch.equal(text_input_idsA, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder_brushnet.config, "use_attention_mask") + and self.text_encoder_brushnet.config.use_attention_mask + ): + attention_mask = text_inputsA.attention_mask.to(device) + else: + attention_mask = None + + # print("text_input_idsA: ",text_input_idsA) + # print("text_input_idsB: ",text_input_idsB) + # print('t: ',t) + + prompt_embedsA = self.text_encoder_brushnet( + text_input_idsA.to(device), + attention_mask=attention_mask, + ) + prompt_embedsA = prompt_embedsA[0] + + prompt_embedsB = self.text_encoder_brushnet( + text_input_idsB.to(device), + attention_mask=attention_mask, + ) + prompt_embedsB = prompt_embedsB[0] + prompt_embeds = prompt_embedsA * (t) + (1 - t) * prompt_embedsB + # print("prompt_embeds: ",prompt_embeds) + + if self.text_encoder_brushnet is not None: + prompt_embeds_dtype = self.text_encoder_brushnet.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_images_per_prompt, seq_len, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokensA: List[str] + uncond_tokensB: List[str] + if negative_prompt is None: + uncond_tokensA = [""] * batch_size + uncond_tokensB = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokensA = [negative_promptA] + uncond_tokensB = [negative_promptB] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokensA = negative_promptA + uncond_tokensB = negative_promptB + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokensA = self.maybe_convert_prompt( + uncond_tokensA, self.tokenizer + ) + uncond_tokensB = self.maybe_convert_prompt( + uncond_tokensB, self.tokenizer + ) + + max_length = prompt_embeds.shape[1] + uncond_inputA = self.tokenizer( + uncond_tokensA, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_inputB = self.tokenizer( + uncond_tokensB, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if ( + hasattr(self.text_encoder_brushnet.config, "use_attention_mask") + and self.text_encoder_brushnet.config.use_attention_mask + ): + attention_mask = uncond_inputA.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embedsA = self.text_encoder_brushnet( + uncond_inputA.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embedsB = self.text_encoder_brushnet( + uncond_inputB.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = ( + negative_prompt_embedsA[0] * (t_nag) + + (1 - t_nag) * negative_prompt_embedsB[0] + ) + + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=prompt_embeds_dtype, device=device + ) + + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_images_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # print("prompt_embeds: ",prompt_embeds) + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + # print('1 ',prompt,negative_prompt) + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + # print('2 ',prompt,negative_prompt) + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + # print('3 ',prompt,negative_prompt) + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + # print('4 ',prompt,negative_prompt) + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + # print('5 ',prompt,negative_prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + # print(prompt, text_input_ids) + untruncated_ids = self.tokenizer( + prompt, padding="longest", return_tensors="pt" + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[ + -1 + ] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask + ) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + output_hidden_states=True, + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm( + prompt_embeds + ) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_images_per_prompt, seq_len, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + # print("neg: ", uncond_input.input_ids) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=prompt_embeds_dtype, device=device + ) + + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_images_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image( + self, image, device, num_images_per_prompt, output_hidden_states=None + ): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder( + image, output_hidden_states=True + ).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = ( + uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, + ip_adapter_image, + ip_adapter_image_embeds, + device, + num_images_per_prompt, + do_classifier_free_guidance, + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len( + self.unet.encoder_hid_proj.image_projection_layers + ): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack( + [single_image_embeds] * num_images_per_prompt, dim=0 + ) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat( + [single_negative_image_embeds, single_image_embeds] + ) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = ( + single_image_embeds.chunk(2) + ) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, + *(repeat_dims * len(single_image_embeds.shape[1:])), + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, + *(repeat_dims * len(single_negative_image_embeds.shape[1:])), + ) + single_image_embeds = torch.cat( + [single_negative_image_embeds, single_image_embeds] + ) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, + *(repeat_dims * len(single_image_embeds.shape[1:])), + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess( + image, output_type="pil" + ) + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor( + feature_extractor_input, return_tensors="pt" + ).to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + brushnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and ( + not isinstance(callback_steps, int) or callback_steps <= 0 + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs + for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and ( + not isinstance(prompt, str) and not isinstance(prompt, list) + ): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.brushnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.brushnet, BrushNetModel) + or is_compiled + and isinstance(self.brushnet._orig_mod, BrushNetModel) + ): + self.check_image(image, mask, prompt, prompt_embeds) + else: + assert False + + # Check `brushnet_conditioning_scale` + if ( + isinstance(self.brushnet, BrushNetModel) + or is_compiled + and isinstance(self.brushnet._orig_mod, BrushNetModel) + ): + if not isinstance(brushnet_conditioning_scale, float): + raise TypeError( + "For single brushnet: `brushnet_conditioning_scale` must be type `float`." + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError( + f"control guidance start: {start} can't be smaller than 0." + ) + if end > 1.0: + raise ValueError( + f"control guidance end: {end} can't be larger than 1.0." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def check_image(self, image, mask, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance( + image[0], PIL.Image.Image + ) + image_is_tensor_list = isinstance(image, list) and isinstance( + image[0], torch.Tensor + ) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + mask_is_pil = isinstance(mask, PIL.Image.Image) + mask_is_tensor = isinstance(mask, torch.Tensor) + mask_is_np = isinstance(mask, np.ndarray) + mask_is_pil_list = isinstance(mask, list) and isinstance( + mask[0], PIL.Image.Image + ) + mask_is_tensor_list = isinstance(mask, list) and isinstance( + mask[0], torch.Tensor + ) + mask_is_np_list = isinstance(mask, list) and isinstance(mask[0], np.ndarray) + + if ( + not mask_is_pil + and not mask_is_tensor + and not mask_is_np + and not mask_is_pil_list + and not mask_is_tensor_list + and not mask_is_np_list + ): + raise TypeError( + f"mask must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(mask)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.image_processor.preprocess(image, height=height, width=width).to( + dtype=torch.float32 + ) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image.to(device=device, dtype=dtype) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + noise = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = noise * self.scheduler.init_noise_sigma + return latents, noise + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + promptA: Union[str, List[str]] = None, + promptB: Union[str, List[str]] = None, + promptU: Union[str, List[str]] = None, + tradoff: float = 1.0, + tradoff_nag: float = 1.0, + image: PipelineImageInput = None, + mask: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_promptA: Optional[Union[str, List[str]]] = None, + negative_promptB: Optional[Union[str, List[str]]] = None, + negative_promptU: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + brushnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The BrushNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to BrushNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple BrushNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single BrushNet. When `prompt` is a list, and if a list of images is passed for a single BrushNet, + each will be paired with each prompt in the `prompt` list. This also applies to multiple BrushNets, + where a list of image lists can be passed to batch for each prompt and each BrushNet. + mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The BrushNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to BrushNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple BrushNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single BrushNet. When `prompt` is a list, and if a list of images is passed for a single BrushNet, + each will be paired with each prompt in the `prompt` list. This also applies to multiple BrushNets, + where a list of image lists can be passed to batch for each prompt and each BrushNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + brushnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the BrushNet are multiplied by `brushnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple BrushNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The BrushNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the BrushNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the BrushNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + brushnet = ( + self.brushnet._orig_mod + if is_compiled_module(self.brushnet) + else self.brushnet + ) + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance( + control_guidance_end, list + ): + control_guidance_start = len(control_guidance_end) * [ + control_guidance_start + ] + elif not isinstance(control_guidance_end, list) and isinstance( + control_guidance_start, list + ): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance( + control_guidance_end, list + ): + control_guidance_start, control_guidance_end = ( + [control_guidance_start], + [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + prompt = promptA + negative_prompt = negative_promptA + self.check_inputs( + prompt, + image, + mask, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + brushnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + global_pool_conditions = ( + brushnet.config.global_pool_conditions + if isinstance(brushnet, BrushNetModel) + else brushnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) + if self.cross_attention_kwargs is not None + else None + ) + + prompt_embeds = self._encode_prompt( + promptA, + promptB, + tradoff, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_promptA, + negative_promptB, + tradoff_nag, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + prompt_embedsU = None + negative_prompt_embedsU = None + prompt_embedsU = self.encode_prompt( + promptU, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_promptU, + prompt_embeds=prompt_embedsU, + negative_prompt_embeds=negative_prompt_embedsU, + lora_scale=text_encoder_lora_scale, + ) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(brushnet, BrushNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=brushnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + original_mask = self.prepare_image( + image=mask, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=brushnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + original_mask = (original_mask.sum(1)[:, None, :, :] < 0).to(image.dtype) + height, width = image.shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents, noise = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.1 prepare condition latents + # mask_i = transforms.ToPILImage()(image[0:1,:,:,:].squeeze(0)) + # mask_i.save('_mask.png') + # print(brushnet.dtype) + conditioning_latents = ( + self.vae.encode( + image.to(device=device, dtype=brushnet.dtype) + ).latent_dist.sample() + * self.vae.config.scaling_factor + ) + mask = torch.nn.functional.interpolate( + original_mask, + size=(conditioning_latents.shape[-2], conditioning_latents.shape[-1]), + ) + conditioning_latents = torch.concat([conditioning_latents, mask], 1) + # image = self.vae.decode(conditioning_latents[:1,:4,:,:] / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] + # from torchvision import transforms + # mask_i = transforms.ToPILImage()(image[0:1,:,:,:].squeeze(0)/2+0.5) + # mask_i.save(str(timesteps[0]) +'_C.png') + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat( + batch_size * num_images_per_prompt + ) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which brushnets to keep + brushnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + brushnet_keep.append( + keeps[0] if isinstance(brushnet, BrushNetModel) else keeps + ) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + is_unet_compiled = is_compiled_module(self.unet) + is_brushnet_compiled = is_compiled_module(self.brushnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if ( + is_unet_compiled and is_brushnet_compiled + ) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * 2) + if self.do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + # brushnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer BrushNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input( + control_model_input, t + ) + brushnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + brushnet_prompt_embeds = prompt_embeds + + if isinstance(brushnet_keep[i], list): + cond_scale = [ + c * s + for c, s in zip(brushnet_conditioning_scale, brushnet_keep[i]) + ] + else: + brushnet_cond_scale = brushnet_conditioning_scale + if isinstance(brushnet_cond_scale, list): + brushnet_cond_scale = brushnet_cond_scale[0] + cond_scale = brushnet_cond_scale * brushnet_keep[i] + + down_block_res_samples, mid_block_res_sample, up_block_res_samples = ( + self.brushnet( + control_model_input, + t, + encoder_hidden_states=brushnet_prompt_embeds, + brushnet_cond=conditioning_latents, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered BrushNet only for the conditional batch. + # To apply the output of BrushNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [ + torch.cat([torch.zeros_like(d), d]) + for d in down_block_res_samples + ] + mid_block_res_sample = torch.cat( + [torch.zeros_like(mid_block_res_sample), mid_block_res_sample] + ) + up_block_res_samples = [ + torch.cat([torch.zeros_like(d), d]) + for d in up_block_res_samples + ] + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embedsU, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_add_samples=down_block_res_samples, + mid_block_add_sample=mid_block_res_sample, + up_block_add_samples=up_block_res_samples, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, **extra_step_kwargs, return_dict=False + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop( + "negative_prompt_embeds", negative_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and brushnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.brushnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode( + latents / self.vae.config.scaling_factor, + return_dict=False, + generator=generator, + )[0] + image, has_nsfw_concept = self.run_safety_checker( + image, device, prompt_embeds.dtype + ) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess( + image, output_type=output_type, do_denormalize=do_denormalize + ) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput( + images=image, nsfw_content_detected=has_nsfw_concept + ) diff --git a/inpaint/model/power_paint/v2/unet_2d_blocks.py b/inpaint/model/power_paint/v2/unet_2d_blocks.py new file mode 100644 index 0000000..000d24f --- /dev/null +++ b/inpaint/model/power_paint/v2/unet_2d_blocks.py @@ -0,0 +1,342 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict, Optional, Tuple + +import torch +from diffusers.utils import is_torch_version, logging +from diffusers.utils.torch_utils import apply_freeu + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def CrossAttnDownBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + additional_residuals: Optional[torch.FloatTensor] = None, + down_block_add_samples: Optional[torch.FloatTensor] = None, +) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + lora_scale = ( + cross_attention_kwargs.get("scale", 1.0) + if cross_attention_kwargs is not None + else 1.0 + ) + + blocks = list(zip(self.resnets, self.attentions)) + + for i, (resnet, attn) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = ( + {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + ) + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb, scale=lora_scale) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, scale=lora_scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop( + 0 + ) # todo: add before or after + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +def DownBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + down_block_add_samples: Optional[torch.FloatTensor] = None, +) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb, scale=scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop(0) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, scale=scale) + + if down_block_add_samples is not None: + hidden_states = hidden_states + down_block_add_samples.pop( + 0 + ) # todo: add before or after + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +def CrossAttnUpBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + return_res_samples: Optional[bool] = False, + up_block_add_samples: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + lora_scale = ( + cross_attention_kwargs.get("scale", 1.0) + if cross_attention_kwargs is not None + else 1.0 + ) + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + if return_res_samples: + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = ( + {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + ) + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb, scale=lora_scale) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size, scale=lora_scale) + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop(0) + + if return_res_samples: + return hidden_states, output_states + else: + return hidden_states + + +def UpBlock2D_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + scale: float = 1.0, + return_res_samples: Optional[bool] = False, + up_block_add_samples: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + if return_res_samples: + output_states = () + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb, scale=scale) + + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop( + 0 + ) # todo: add before or after + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size, scale=scale) + + if return_res_samples: + output_states = output_states + (hidden_states,) + if up_block_add_samples is not None: + hidden_states = hidden_states + up_block_add_samples.pop( + 0 + ) # todo: add before or after + + if return_res_samples: + return hidden_states, output_states + else: + return hidden_states diff --git a/inpaint/model/power_paint/v2/unet_2d_condition.py b/inpaint/model/power_paint/v2/unet_2d_condition.py new file mode 100644 index 0000000..80741de --- /dev/null +++ b/inpaint/model/power_paint/v2/unet_2d_condition.py @@ -0,0 +1,402 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from diffusers.models.unet_2d_condition import UNet2DConditionOutput +from diffusers.utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + scale_lora_layers, + unscale_lora_layers, +) + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def UNet2DConditionModel_forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + down_block_add_samples: Optional[Tuple[torch.Tensor]] = None, + mid_block_add_sample: Optional[Tuple[torch.Tensor]] = None, + up_block_add_samples: Optional[Tuple[torch.Tensor]] = None, +) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`UNet2DConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added to UNet long skip connections from down blocks to up blocks for + example from ControlNet side model(s) + mid_block_additional_residual (`torch.Tensor`, *optional*): + additional residual to be added to UNet mid block output, for example from ControlNet side model + down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + for dim in sample.shape[-2:]: + if dim % default_overall_up_factor != 0: + # Forward upsample size to force interpolation output size. + forward_upsample_size = True + break + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = ( + 1 - encoder_attention_mask.to(sample.dtype) + ) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + t_emb = self.get_time_embed(sample=sample, timestep=timestep) + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) + if class_emb is not None: + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + aug_emb = self.get_aug_embed( + emb=emb, + encoder_hidden_states=encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + ) + if self.config.addition_embed_type == "image_hint": + aug_emb, hint = aug_emb + sample = torch.cat([sample, hint], dim=1) + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + encoder_hidden_states = self.process_encoder_hidden_states( + encoder_hidden_states=encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + ) + + # 2. pre-process + sample = self.conv_in(sample) + + # 2.5 GLIGEN position net + if ( + cross_attention_kwargs is not None + and cross_attention_kwargs.get("gligen", None) is not None + ): + cross_attention_kwargs = cross_attention_kwargs.copy() + gligen_args = cross_attention_kwargs.pop("gligen") + cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} + + # 3. down + lora_scale = ( + cross_attention_kwargs.get("scale", 1.0) + if cross_attention_kwargs is not None + else 1.0 + ) + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + is_controlnet = ( + mid_block_additional_residual is not None + and down_block_additional_residuals is not None + ) + # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets + is_adapter = down_intrablock_additional_residuals is not None + # maintain backward compatibility for legacy usage, where + # T2I-Adapter and ControlNet both use down_block_additional_residuals arg + # but can only use one or the other + is_brushnet = ( + down_block_add_samples is not None + and mid_block_add_sample is not None + and up_block_add_samples is not None + ) + if ( + not is_adapter + and mid_block_additional_residual is None + and down_block_additional_residuals is not None + ): + deprecate( + "T2I should not use down_block_additional_residuals", + "1.3.0", + "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ + and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ + for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", + standard_warn=False, + ) + down_intrablock_additional_residuals = down_block_additional_residuals + is_adapter = True + + down_block_res_samples = (sample,) + + if is_brushnet: + sample = sample + down_block_add_samples.pop(0) + + for downsample_block in self.down_blocks: + if ( + hasattr(downsample_block, "has_cross_attention") + and downsample_block.has_cross_attention + ): + # For t2i-adapter CrossAttnDownBlock2D + additional_residuals = {} + if is_adapter and len(down_intrablock_additional_residuals) > 0: + additional_residuals["additional_residuals"] = ( + down_intrablock_additional_residuals.pop(0) + ) + + if is_brushnet and len(down_block_add_samples) > 0: + additional_residuals["down_block_add_samples"] = [ + down_block_add_samples.pop(0) + for _ in range( + len(downsample_block.resnets) + + (downsample_block.downsamplers != None) + ) + ] + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + additional_residuals = {} + if is_brushnet and len(down_block_add_samples) > 0: + additional_residuals["down_block_add_samples"] = [ + down_block_add_samples.pop(0) + for _ in range( + len(downsample_block.resnets) + + (downsample_block.downsamplers != None) + ) + ] + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + scale=lora_scale, + **additional_residuals, + ) + if is_adapter and len(down_intrablock_additional_residuals) > 0: + sample += down_intrablock_additional_residuals.pop(0) + + down_block_res_samples += res_samples + + if is_controlnet: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = ( + down_block_res_sample + down_block_additional_residual + ) + new_down_block_res_samples = new_down_block_res_samples + ( + down_block_res_sample, + ) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + if ( + hasattr(self.mid_block, "has_cross_attention") + and self.mid_block.has_cross_attention + ): + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = self.mid_block(sample, emb) + + # To support T2I-Adapter-XL + if ( + is_adapter + and len(down_intrablock_additional_residuals) > 0 + and sample.shape == down_intrablock_additional_residuals[0].shape + ): + sample += down_intrablock_additional_residuals.pop(0) + + if is_controlnet: + sample = sample + mid_block_additional_residual + + if is_brushnet: + sample = sample + mid_block_add_sample + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if ( + hasattr(upsample_block, "has_cross_attention") + and upsample_block.has_cross_attention + ): + additional_residuals = {} + if is_brushnet and len(up_block_add_samples) > 0: + additional_residuals["up_block_add_samples"] = [ + up_block_add_samples.pop(0) + for _ in range( + len(upsample_block.resnets) + + (upsample_block.upsamplers != None) + ) + ] + + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + additional_residuals = {} + if is_brushnet and len(up_block_add_samples) > 0: + additional_residuals["up_block_add_samples"] = [ + up_block_add_samples.pop(0) + for _ in range( + len(upsample_block.resnets) + + (upsample_block.upsamplers != None) + ) + ] + + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + scale=lora_scale, + **additional_residuals, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) diff --git a/inpaint/model/sd.py b/inpaint/model/sd.py new file mode 100644 index 0000000..2f6698c --- /dev/null +++ b/inpaint/model/sd.py @@ -0,0 +1,129 @@ +import PIL.Image +import cv2 +import torch +from loguru import logger + +from .base import DiffusionInpaintModel +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) +from iopaint.schema import InpaintRequest, ModelType + + +class SD(DiffusionInpaintModel): + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" + + def init_model(self, device: torch.device, **kwargs): + from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + disable_nsfw_checker = kwargs["disable_nsfw"] or kwargs.get( + "cpu_offload", False + ) + if disable_nsfw_checker: + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + if self.model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + self.model = StableDiffusionInpaintPipeline.from_single_file( + self.model_id_or_path, + torch_dtype=torch_dtype, + load_safety_checker=not disable_nsfw_checker, + original_config_file=get_config_files()['v1'], + **model_kwargs, + ) + else: + self.model = handle_from_pretrained_exceptions( + StableDiffusionInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + variant="fp16", + torch_dtype=torch_dtype, + **model_kwargs, + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +class SD15(SD): + name = "runwayml/stable-diffusion-inpainting" + model_id_or_path = "runwayml/stable-diffusion-inpainting" + + +class Anything4(SD): + name = "Sanster/anything-4.0-inpainting" + model_id_or_path = "Sanster/anything-4.0-inpainting" + + +class RealisticVision14(SD): + name = "Sanster/Realistic_Vision_V1.4-inpainting" + model_id_or_path = "Sanster/Realistic_Vision_V1.4-inpainting" + + +class SD2(SD): + name = "stabilityai/stable-diffusion-2-inpainting" + model_id_or_path = "stabilityai/stable-diffusion-2-inpainting" diff --git a/inpaint/model/sdxl.py b/inpaint/model/sdxl.py new file mode 100644 index 0000000..b7099e8 --- /dev/null +++ b/inpaint/model/sdxl.py @@ -0,0 +1,110 @@ +import os + +import PIL.Image +import cv2 +import torch +from diffusers import AutoencoderKL +from loguru import logger + +from inpaint.schema import InpaintRequest, ModelType + +from .base import DiffusionInpaintModel +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) + + +class SDXL(DiffusionInpaintModel): + name = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdxl" + model_id_or_path = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" + + def init_model(self, device: torch.device, **kwargs): + from diffusers.pipelines import StableDiffusionXLInpaintPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + if self.model_info.model_type == ModelType.DIFFUSERS_SDXL: + num_in_channels = 4 + else: + num_in_channels = 9 + + if os.path.isfile(self.model_id_or_path): + self.model = StableDiffusionXLInpaintPipeline.from_single_file( + self.model_id_or_path, + torch_dtype=torch_dtype, + num_in_channels=num_in_channels, + load_safety_checker=False, + original_config_file=get_config_files()['xl'], + ) + else: + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + if "vae" not in model_kwargs: + vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype + ) + model_kwargs["vae"] = vae + self.model = handle_from_pretrained_exceptions( + StableDiffusionXLInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + torch_dtype=torch_dtype, + variant="fp16", + **model_kwargs + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + self.model.text_encoder_2 = CPUTextEncoderWrapper( + self.model.text_encoder_2, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=0.999 if config.sd_strength == 1.0 else config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/inpaint/model/utils.py b/inpaint/model/utils.py new file mode 100644 index 0000000..2278817 --- /dev/null +++ b/inpaint/model/utils.py @@ -0,0 +1,1033 @@ +import gc +import math +import random +import traceback +from typing import Any + +import torch +import numpy as np +import collections +from itertools import repeat + +from diffusers import ( + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + UniPCMultistepScheduler, + LCMScheduler, + DPMSolverSinglestepScheduler, + KDPM2DiscreteScheduler, + KDPM2AncestralDiscreteScheduler, + HeunDiscreteScheduler, +) +from loguru import logger + +from inpaint.schema import SDSampler +from torch import conv2d, conv_transpose2d + + +def make_beta_schedule( + device, schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3 +): + if schedule == "linear": + betas = ( + torch.linspace( + linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64 + ) + ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ).to(device) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2).to(device) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace( + linear_start, linear_end, n_timestep, dtype=torch.float64 + ) + elif schedule == "sqrt": + betas = ( + torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + ** 0.5 + ) + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt( + (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev) + ) + if verbose: + print( + f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}" + ) + print( + f"For the chosen value of eta, which is {eta}, " + f"this results in the following sigma_t schedule for ddim sampler {sigmas}" + ) + return sigmas, alphas, alphas_prev + + +def make_ddim_timesteps( + ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True +): + if ddim_discr_method == "uniform": + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == "quad": + ddim_timesteps = ( + (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2 + ).astype(int) + else: + raise NotImplementedError( + f'There is no ddim discretization method called "{ddim_discr_method}"' + ) + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f"Selected timesteps for ddim sampler: {steps_out}") + return steps_out + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat( + shape[0], *((1,) * (len(shape) - 1)) + ) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def timestep_embedding(device, timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) + * torch.arange(start=0, end=half, dtype=torch.float32) + / half + ).to(device=device) + + args = timesteps[:, None].float() * freqs[None] + + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + +###### MAT and FcF ####### + + +def normalize_2nd_moment(x, dim=1): + return ( + x * (x.square().mean(dim=dim, keepdim=True) + torch.finfo(x.dtype).eps).rsqrt() + ) + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +def _bias_act_ref(x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None): + """Slow reference implementation of `bias_act()` using standard TensorFlow ops.""" + assert isinstance(x, torch.Tensor) + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Add bias. + if b is not None: + assert isinstance(b, torch.Tensor) and b.ndim == 1 + assert 0 <= dim < x.ndim + assert b.shape[0] == x.shape[dim] + x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) + + # Evaluate activation function. + alpha = float(alpha) + x = spec.func(x, alpha=alpha) + + # Scale by gain. + gain = float(gain) + if gain != 1: + x = x * gain + + # Clamp. + if clamp >= 0: + x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type + return x + + +def bias_act( + x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None, impl="ref" +): + r"""Fused bias and activation function. + + Adds bias `b` to activation tensor `x`, evaluates activation function `act`, + and scales the result by `gain`. Each of the steps is optional. In most cases, + the fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports first and second order gradients, + but not third order gradients. + + Args: + x: Input activation tensor. Can be of any shape. + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The shape must be known, and it must match the dimension of `x` + corresponding to `dim`. + dim: The dimension in `x` corresponding to the elements of `b`. + The value of `dim` is ignored if `b` is not specified. + act: Name of the activation function to evaluate, or `"linear"` to disable. + Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. + See `activation_funcs` for a full list. `None` is not allowed. + alpha: Shape parameter for the activation function, or `None` to use the default. + gain: Scaling factor for the output tensor, or `None` to use default. + See `activation_funcs` for the default scaling of each activation function. + If unsure, consider specifying 1. + clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable + the clamping (default). + impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). + + Returns: + Tensor of the same shape and datatype as `x`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ["ref", "cuda"] + return _bias_act_ref( + x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp + ) + + +def _get_filter_size(f): + if f is None: + return 1, 1 + + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + fw = f.shape[-1] + fh = f.shape[0] + + fw = int(fw) + fh = int(fh) + assert fw >= 1 and fh >= 1 + return fw, fh + + +def _get_weight_shape(w): + shape = [int(sz) for sz in w.shape] + return shape + + +def _parse_scaling(scaling): + if isinstance(scaling, int): + scaling = [scaling, scaling] + assert isinstance(scaling, (list, tuple)) + assert all(isinstance(x, int) for x in scaling) + sx, sy = scaling + assert sx >= 1 and sy >= 1 + return sx, sy + + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, int) for x in padding) + if len(padding) == 2: + padx, pady = padding + padding = [padx, padx, pady, pady] + padx0, padx1, pady0, pady1 = padding + return padx0, padx1, pady0, pady1 + + +def setup_filter( + f, + device=torch.device("cpu"), + normalize=True, + flip_filter=False, + gain=1, + separable=None, +): + r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. + + Args: + f: Torch tensor, numpy array, or python list of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), + `[]` (impulse), or + `None` (identity). + device: Result device (default: cpu). + normalize: Normalize the filter so that it retains the magnitude + for constant input signal (DC)? (default: True). + flip_filter: Flip the filter? (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + separable: Return a separable filter? (default: select automatically). + + Returns: + Float32 tensor of the shape + `[filter_height, filter_width]` (non-separable) or + `[filter_taps]` (separable). + """ + # Validate. + if f is None: + f = 1 + f = torch.as_tensor(f, dtype=torch.float32) + assert f.ndim in [0, 1, 2] + assert f.numel() > 0 + if f.ndim == 0: + f = f[np.newaxis] + + # Separable? + if separable is None: + separable = f.ndim == 1 and f.numel() >= 8 + if f.ndim == 1 and not separable: + f = f.ger(f) + assert f.ndim == (1 if separable else 2) + + # Apply normalize, flip, gain, and device. + if normalize: + f /= f.sum() + if flip_filter: + f = f.flip(list(range(f.ndim))) + f = f * (gain ** (f.ndim / 2)) + f = f.to(device=device) + return f + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_2tuple = _ntuple(2) + +activation_funcs = { + "linear": EasyDict( + func=lambda x, **_: x, + def_alpha=0, + def_gain=1, + cuda_idx=1, + ref="", + has_2nd_grad=False, + ), + "relu": EasyDict( + func=lambda x, **_: torch.nn.functional.relu(x), + def_alpha=0, + def_gain=np.sqrt(2), + cuda_idx=2, + ref="y", + has_2nd_grad=False, + ), + "lrelu": EasyDict( + func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), + def_alpha=0.2, + def_gain=np.sqrt(2), + cuda_idx=3, + ref="y", + has_2nd_grad=False, + ), + "tanh": EasyDict( + func=lambda x, **_: torch.tanh(x), + def_alpha=0, + def_gain=1, + cuda_idx=4, + ref="y", + has_2nd_grad=True, + ), + "sigmoid": EasyDict( + func=lambda x, **_: torch.sigmoid(x), + def_alpha=0, + def_gain=1, + cuda_idx=5, + ref="y", + has_2nd_grad=True, + ), + "elu": EasyDict( + func=lambda x, **_: torch.nn.functional.elu(x), + def_alpha=0, + def_gain=1, + cuda_idx=6, + ref="y", + has_2nd_grad=True, + ), + "selu": EasyDict( + func=lambda x, **_: torch.nn.functional.selu(x), + def_alpha=0, + def_gain=1, + cuda_idx=7, + ref="y", + has_2nd_grad=True, + ), + "softplus": EasyDict( + func=lambda x, **_: torch.nn.functional.softplus(x), + def_alpha=0, + def_gain=1, + cuda_idx=8, + ref="y", + has_2nd_grad=True, + ), + "swish": EasyDict( + func=lambda x, **_: torch.sigmoid(x) * x, + def_alpha=0, + def_gain=np.sqrt(2), + cuda_idx=9, + ref="x", + has_2nd_grad=True, + ), +} + + +def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"): + r"""Pad, upsample, filter, and downsample a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 2. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 4. Downsample the image by keeping every Nth pixel (`down`). + + This sequence of operations bears close resemblance to scipy.signal.upfirdn(). + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + # assert isinstance(x, torch.Tensor) + # assert impl in ['ref', 'cuda'] + return _upfirdn2d_ref( + x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain + ) + + +def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.""" + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + # upx, upy = _parse_scaling(up) + # downx, downy = _parse_scaling(down) + + upx, upy = up, up + downx, downy = down, down + + # padx0, padx1, pady0, pady1 = _parse_padding(padding) + padx0, padx1, pady0, pady1 = padding[0], padding[1], padding[2], padding[3] + + # Upsample by inserting zeros. + x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad( + x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)] + ) + x = x[ + :, + :, + max(-pady0, 0) : x.shape[2] - max(-pady1, 0), + max(-padx0, 0) : x.shape[3] - max(-padx1, 0), + ] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) + x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + + +def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl="cuda"): + r"""Downsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a fraction of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the input. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + downx, downy = _parse_scaling(down) + # padx0, padx1, pady0, pady1 = _parse_padding(padding) + padx0, padx1, pady0, pady1 = padding, padding, padding, padding + + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw - downx + 1) // 2, + padx1 + (fw - downx) // 2, + pady0 + (fh - downy + 1) // 2, + pady1 + (fh - downy) // 2, + ] + return upfirdn2d( + x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl + ) + + +def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl="cuda"): + r"""Upsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a multiple of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + upx, upy = _parse_scaling(up) + # upx, upy = up, up + padx0, padx1, pady0, pady1 = _parse_padding(padding) + # padx0, padx1, pady0, pady1 = padding, padding, padding, padding + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw + upx - 1) // 2, + padx1 + (fw - upx) // 2, + pady0 + (fh + upy - 1) // 2, + pady1 + (fh - upy) // 2, + ] + return upfirdn2d( + x, + f, + up=up, + padding=p, + flip_filter=flip_filter, + gain=gain * upx * upy, + impl=impl, + ) + + +class MinibatchStdLayer(torch.nn.Module): + def __init__(self, group_size, num_channels=1): + super().__init__() + self.group_size = group_size + self.num_channels = num_channels + + def forward(self, x): + N, C, H, W = x.shape + G = ( + torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) + if self.group_size is not None + else N + ) + F = self.num_channels + c = C // F + + y = x.reshape( + G, -1, F, c, H, W + ) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. + y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group. + y = y.square().mean(dim=0) # [nFcHW] Calc variance over group. + y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. + y = y.mean(dim=[2, 3, 4]) # [nF] Take average over channels and pixels. + y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. + y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels. + x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels. + return x + + +class FullyConnectedLayer(torch.nn.Module): + def __init__( + self, + in_features, # Number of input features. + out_features, # Number of output features. + bias=True, # Apply additive bias before the activation function? + activation="linear", # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=1, # Learning rate multiplier. + bias_init=0, # Initial value for the additive bias. + ): + super().__init__() + self.weight = torch.nn.Parameter( + torch.randn([out_features, in_features]) / lr_multiplier + ) + self.bias = ( + torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) + if bias + else None + ) + self.activation = activation + + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight * self.weight_gain + b = self.bias + if b is not None and self.bias_gain != 1: + b = b * self.bias_gain + + if self.activation == "linear" and b is not None: + # out = torch.addmm(b.unsqueeze(0), x, w.t()) + x = x.matmul(w.t()) + out = x + b.reshape([-1 if i == x.ndim - 1 else 1 for i in range(x.ndim)]) + else: + x = x.matmul(w.t()) + out = bias_act(x, b, act=self.activation, dim=x.ndim - 1) + return out + + +def _conv2d_wrapper( + x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True +): + """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.""" + out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) + + # Flip weight if requested. + if ( + not flip_weight + ): # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False). + w = w.flip([2, 3]) + + # Workaround performance pitfall in cuDNN 8.0.5, triggered when using + # 1x1 kernel + memory_format=channels_last + less than 64 channels. + if ( + kw == 1 + and kh == 1 + and stride == 1 + and padding in [0, [0, 0], (0, 0)] + and not transpose + ): + if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64: + if out_channels <= 4 and groups == 1: + in_shape = x.shape + x = w.squeeze(3).squeeze(2) @ x.reshape( + [in_shape[0], in_channels_per_group, -1] + ) + x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]]) + else: + x = x.to(memory_format=torch.contiguous_format) + w = w.to(memory_format=torch.contiguous_format) + x = conv2d(x, w, groups=groups) + return x.to(memory_format=torch.channels_last) + + # Otherwise => execute using conv2d_gradfix. + op = conv_transpose2d if transpose else conv2d + return op(x, w, stride=stride, padding=padding, groups=groups) + + +def conv2d_resample( + x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False +): + r"""2D convolution with optional up/downsampling. + + Padding is performed only once at the beginning, not between the operations. + + Args: + x: Input tensor of shape + `[batch_size, in_channels, in_height, in_width]`. + w: Weight tensor of shape + `[out_channels, in_channels//groups, kernel_height, kernel_width]`. + f: Low-pass filter for up/downsampling. Must be prepared beforehand by + calling setup_filter(). None = identity (default). + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + groups: Split input channels into N groups (default: 1). + flip_weight: False = convolution, True = correlation (default: True). + flip_filter: False = convolution, True = correlation (default: False). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and (x.ndim == 4) + assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype) + assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2]) + assert isinstance(up, int) and (up >= 1) + assert isinstance(down, int) and (down >= 1) + # assert isinstance(groups, int) and (groups >= 1), f"!!!!!! groups: {groups} isinstance(groups, int) {isinstance(groups, int)} {type(groups)}" + out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) + fw, fh = _get_filter_size(f) + # px0, px1, py0, py1 = _parse_padding(padding) + px0, px1, py0, py1 = padding, padding, padding, padding + + # Adjust padding to account for up/downsampling. + if up > 1: + px0 += (fw + up - 1) // 2 + px1 += (fw - up) // 2 + py0 += (fh + up - 1) // 2 + py1 += (fh - up) // 2 + if down > 1: + px0 += (fw - down + 1) // 2 + px1 += (fw - down) // 2 + py0 += (fh - down + 1) // 2 + py1 += (fh - down) // 2 + + # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. + if kw == 1 and kh == 1 and (down > 1 and up == 1): + x = upfirdn2d( + x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter + ) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. + if kw == 1 and kh == 1 and (up > 1 and down == 1): + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + x = upfirdn2d( + x=x, + f=f, + up=up, + padding=[px0, px1, py0, py1], + gain=up**2, + flip_filter=flip_filter, + ) + return x + + # Fast path: downsampling only => use strided convolution. + if down > 1 and up == 1: + x = upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter) + x = _conv2d_wrapper( + x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight + ) + return x + + # Fast path: upsampling with optional downsampling => use transpose strided convolution. + if up > 1: + if groups == 1: + w = w.transpose(0, 1) + else: + w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw) + w = w.transpose(1, 2) + w = w.reshape( + groups * in_channels_per_group, out_channels // groups, kh, kw + ) + px0 -= kw - 1 + px1 -= kw - up + py0 -= kh - 1 + py1 -= kh - up + pxt = max(min(-px0, -px1), 0) + pyt = max(min(-py0, -py1), 0) + x = _conv2d_wrapper( + x=x, + w=w, + stride=up, + padding=[pyt, pxt], + groups=groups, + transpose=True, + flip_weight=(not flip_weight), + ) + x = upfirdn2d( + x=x, + f=f, + padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt], + gain=up**2, + flip_filter=flip_filter, + ) + if down > 1: + x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + + # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d. + if up == 1 and down == 1: + if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0: + return _conv2d_wrapper( + x=x, w=w, padding=[py0, px0], groups=groups, flip_weight=flip_weight + ) + + # Fallback: Generic reference implementation. + x = upfirdn2d( + x=x, + f=(f if up > 1 else None), + up=up, + padding=[px0, px1, py0, py1], + gain=up**2, + flip_filter=flip_filter, + ) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + if down > 1: + x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + + +class Conv2dLayer(torch.nn.Module): + def __init__( + self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + kernel_size, # Width and height of the convolution kernel. + bias=True, # Apply additive bias before the activation function? + activation="linear", # Activation function: 'relu', 'lrelu', etc. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + resample_filter=[ + 1, + 3, + 3, + 1, + ], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output to +-X, None = disable clamping. + channels_last=False, # Expect the input to have memory_format=channels_last? + trainable=True, # Update the weights of this layer during training? + ): + super().__init__() + self.activation = activation + self.up = up + self.down = down + self.register_buffer("resample_filter", setup_filter(resample_filter)) + self.conv_clamp = conv_clamp + self.padding = kernel_size // 2 + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2)) + self.act_gain = activation_funcs[activation].def_gain + + memory_format = ( + torch.channels_last if channels_last else torch.contiguous_format + ) + weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( + memory_format=memory_format + ) + bias = torch.zeros([out_channels]) if bias else None + if trainable: + self.weight = torch.nn.Parameter(weight) + self.bias = torch.nn.Parameter(bias) if bias is not None else None + else: + self.register_buffer("weight", weight) + if bias is not None: + self.register_buffer("bias", bias) + else: + self.bias = None + + def forward(self, x, gain=1): + w = self.weight * self.weight_gain + x = conv2d_resample( + x=x, + w=w, + f=self.resample_filter, + up=self.up, + down=self.down, + padding=self.padding, + ) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + out = bias_act( + x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp + ) + return out + + +def torch_gc(): + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + gc.collect() + + +def set_seed(seed: int): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def get_scheduler(sd_sampler, scheduler_config): + # https://github.com/huggingface/diffusers/issues/4167 + keys_to_pop = ["use_karras_sigmas", "algorithm_type"] + scheduler_config = dict(scheduler_config) + for it in keys_to_pop: + scheduler_config.pop(it, None) + + # fmt: off + samplers = { + SDSampler.dpm_plus_plus_2m: [DPMSolverMultistepScheduler], + SDSampler.dpm_plus_plus_2m_karras: [DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm_plus_plus_2m_sde: [DPMSolverMultistepScheduler, dict(algorithm_type="sde-dpmsolver++")], + SDSampler.dpm_plus_plus_2m_sde_karras: [DPMSolverMultistepScheduler, dict(algorithm_type="sde-dpmsolver++", use_karras_sigmas=True)], + SDSampler.dpm_plus_plus_sde: [DPMSolverSinglestepScheduler], + SDSampler.dpm_plus_plus_sde_karras: [DPMSolverSinglestepScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm2: [KDPM2DiscreteScheduler], + SDSampler.dpm2_karras: [KDPM2DiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm2_a: [KDPM2AncestralDiscreteScheduler], + SDSampler.dpm2_a_karras: [KDPM2AncestralDiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.euler: [EulerDiscreteScheduler], + SDSampler.euler_a: [EulerAncestralDiscreteScheduler], + SDSampler.heun: [HeunDiscreteScheduler], + SDSampler.lms: [LMSDiscreteScheduler], + SDSampler.lms_karras: [LMSDiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.ddim: [DDIMScheduler], + SDSampler.pndm: [PNDMScheduler], + SDSampler.uni_pc: [UniPCMultistepScheduler], + SDSampler.lcm: [LCMScheduler], + } + # fmt: on + if sd_sampler in samplers: + if len(samplers[sd_sampler]) == 2: + scheduler_cls, kwargs = samplers[sd_sampler] + else: + scheduler_cls, kwargs = samplers[sd_sampler][0], {} + return scheduler_cls.from_config(scheduler_config, **kwargs) + else: + raise ValueError(sd_sampler) + + +def is_local_files_only(**kwargs) -> bool: + from huggingface_hub.constants import HF_HUB_OFFLINE + + return HF_HUB_OFFLINE or kwargs.get("local_files_only", False) + + +def handle_from_pretrained_exceptions(func, **kwargs): + try: + return func(**kwargs) + except ValueError as e: + if "You are trying to load the model files of the `variant=fp16`" in str(e): + logger.info("variant=fp16 not found, try revision=fp16") + try: + return func(**{**kwargs, "variant": None, "revision": "fp16"}) + except Exception as e: + logger.info("revision=fp16 not found, try revision=main") + return func(**{**kwargs, "variant": None, "revision": "main"}) + raise e + except OSError as e: + previous_traceback = traceback.format_exc() + if "RevisionNotFoundError: 404 Client Error." in previous_traceback: + logger.info("revision=fp16 not found, try revision=main") + return func(**{**kwargs, "variant": None, "revision": "main"}) + elif "Max retries exceeded" in previous_traceback: + logger.exception( + "Fetching model from HuggingFace failed. " + "If this is your first time downloading the model, you may need to set up proxy in terminal." + "If the model has already been downloaded, you can add --local-files-only when starting." + ) + exit(-1) + raise e + except Exception as e: + raise e + + +def get_torch_dtype(device, no_half: bool): + device = str(device) + use_fp16 = not no_half + use_gpu = device == "cuda" + # https://github.com/huggingface/diffusers/issues/4480 + # pipe.enable_attention_slicing and float16 will cause black output on mps + # if device in ["cuda", "mps"] and use_fp16: + if device in ["cuda"] and use_fp16: + return use_gpu, torch.float16 + return use_gpu, torch.float32 + + +def enable_low_mem(pipe, enable: bool): + if torch.backends.mps.is_available(): + # https://huggingface.co/docs/diffusers/v0.25.0/en/api/pipelines/stable_diffusion/image_variation#diffusers.StableDiffusionImageVariationPipeline.enable_attention_slicing + # CUDA: Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch 2.0 or xFormers. + if enable: + pipe.enable_attention_slicing("max") + else: + # https://huggingface.co/docs/diffusers/optimization/mps + # Devices with less than 64GB of memory are recommended to use enable_attention_slicing + pipe.enable_attention_slicing() + + if enable: + pipe.vae.enable_tiling() diff --git a/inpaint/model/zits.py b/inpaint/model/zits.py new file mode 100644 index 0000000..d58ac01 --- /dev/null +++ b/inpaint/model/zits.py @@ -0,0 +1,476 @@ +import os +import time + +import cv2 +import torch +import torch.nn.functional as F + +from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model +from iopaint.schema import InpaintRequest +import numpy as np + +from .base import InpaintModel + +ZITS_INPAINT_MODEL_URL = os.environ.get( + "ZITS_INPAINT_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_zits/zits-inpaint-0717.pt", +) +ZITS_INPAINT_MODEL_MD5 = os.environ.get( + "ZITS_INPAINT_MODEL_MD5", "9978cc7157dc29699e42308d675b2154" +) + +ZITS_EDGE_LINE_MODEL_URL = os.environ.get( + "ZITS_EDGE_LINE_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_zits/zits-edge-line-0717.pt", +) +ZITS_EDGE_LINE_MODEL_MD5 = os.environ.get( + "ZITS_EDGE_LINE_MODEL_MD5", "55e31af21ba96bbf0c80603c76ea8c5f" +) + +ZITS_STRUCTURE_UPSAMPLE_MODEL_URL = os.environ.get( + "ZITS_STRUCTURE_UPSAMPLE_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_zits/zits-structure-upsample-0717.pt", +) +ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 = os.environ.get( + "ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5", "3d88a07211bd41b2ec8cc0d999f29927" +) + +ZITS_WIRE_FRAME_MODEL_URL = os.environ.get( + "ZITS_WIRE_FRAME_MODEL_URL", + "https://github.com/Sanster/models/releases/download/add_zits/zits-wireframe-0717.pt", +) +ZITS_WIRE_FRAME_MODEL_MD5 = os.environ.get( + "ZITS_WIRE_FRAME_MODEL_MD5", "a9727c63a8b48b65c905d351b21ce46b" +) + + +def resize(img, height, width, center_crop=False): + imgh, imgw = img.shape[0:2] + + if center_crop and imgh != imgw: + # center crop + side = np.minimum(imgh, imgw) + j = (imgh - side) // 2 + i = (imgw - side) // 2 + img = img[j : j + side, i : i + side, ...] + + if imgh > height and imgw > width: + inter = cv2.INTER_AREA + else: + inter = cv2.INTER_LINEAR + img = cv2.resize(img, (height, width), interpolation=inter) + + return img + + +def to_tensor(img, scale=True, norm=False): + if img.ndim == 2: + img = img[:, :, np.newaxis] + c = img.shape[-1] + + if scale: + img_t = torch.from_numpy(img).permute(2, 0, 1).float().div(255) + else: + img_t = torch.from_numpy(img).permute(2, 0, 1).float() + + if norm: + mean = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1) + std = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1) + img_t = (img_t - mean) / std + return img_t + + +def load_masked_position_encoding(mask): + ones_filter = np.ones((3, 3), dtype=np.float32) + d_filter1 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32) + d_filter2 = np.array([[0, 0, 0], [1, 1, 0], [1, 1, 0]], dtype=np.float32) + d_filter3 = np.array([[0, 1, 1], [0, 1, 1], [0, 0, 0]], dtype=np.float32) + d_filter4 = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], dtype=np.float32) + str_size = 256 + pos_num = 128 + + ori_mask = mask.copy() + ori_h, ori_w = ori_mask.shape[0:2] + ori_mask = ori_mask / 255 + mask = cv2.resize(mask, (str_size, str_size), interpolation=cv2.INTER_AREA) + mask[mask > 0] = 255 + h, w = mask.shape[0:2] + mask3 = mask.copy() + mask3 = 1.0 - (mask3 / 255.0) + pos = np.zeros((h, w), dtype=np.int32) + direct = np.zeros((h, w, 4), dtype=np.int32) + i = 0 + while np.sum(1 - mask3) > 0: + i += 1 + mask3_ = cv2.filter2D(mask3, -1, ones_filter) + mask3_[mask3_ > 0] = 1 + sub_mask = mask3_ - mask3 + pos[sub_mask == 1] = i + + m = cv2.filter2D(mask3, -1, d_filter1) + m[m > 0] = 1 + m = m - mask3 + direct[m == 1, 0] = 1 + + m = cv2.filter2D(mask3, -1, d_filter2) + m[m > 0] = 1 + m = m - mask3 + direct[m == 1, 1] = 1 + + m = cv2.filter2D(mask3, -1, d_filter3) + m[m > 0] = 1 + m = m - mask3 + direct[m == 1, 2] = 1 + + m = cv2.filter2D(mask3, -1, d_filter4) + m[m > 0] = 1 + m = m - mask3 + direct[m == 1, 3] = 1 + + mask3 = mask3_ + + abs_pos = pos.copy() + rel_pos = pos / (str_size / 2) # to 0~1 maybe larger than 1 + rel_pos = (rel_pos * pos_num).astype(np.int32) + rel_pos = np.clip(rel_pos, 0, pos_num - 1) + + if ori_w != w or ori_h != h: + rel_pos = cv2.resize(rel_pos, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST) + rel_pos[ori_mask == 0] = 0 + direct = cv2.resize(direct, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST) + direct[ori_mask == 0, :] = 0 + + return rel_pos, abs_pos, direct + + +def load_image(img, mask, device, sigma256=3.0): + """ + Args: + img: [H, W, C] RGB + mask: [H, W] 255 为 masks 区域 + sigma256: + + Returns: + + """ + h, w, _ = img.shape + imgh, imgw = img.shape[0:2] + img_256 = resize(img, 256, 256) + + mask = (mask > 127).astype(np.uint8) * 255 + mask_256 = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA) + mask_256[mask_256 > 0] = 255 + + mask_512 = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_AREA) + mask_512[mask_512 > 0] = 255 + + # original skimage implemention + # https://scikit-image.org/docs/stable/api/skimage.feature.html#skimage.feature.canny + # low_threshold: Lower bound for hysteresis thresholding (linking edges). If None, low_threshold is set to 10% of dtype’s max. + # high_threshold: Upper bound for hysteresis thresholding (linking edges). If None, high_threshold is set to 20% of dtype’s max. + + try: + import skimage + + gray_256 = skimage.color.rgb2gray(img_256) + edge_256 = skimage.feature.canny(gray_256, sigma=3.0, mask=None).astype(float) + # cv2.imwrite("skimage_gray.jpg", (gray_256*255).astype(np.uint8)) + # cv2.imwrite("skimage_edge.jpg", (edge_256*255).astype(np.uint8)) + except: + gray_256 = cv2.cvtColor(img_256, cv2.COLOR_RGB2GRAY) + gray_256_blured = cv2.GaussianBlur( + gray_256, ksize=(7, 7), sigmaX=sigma256, sigmaY=sigma256 + ) + edge_256 = cv2.Canny( + gray_256_blured, threshold1=int(255 * 0.1), threshold2=int(255 * 0.2) + ) + + # cv2.imwrite("opencv_edge.jpg", edge_256) + + # line + img_512 = resize(img, 512, 512) + + rel_pos, abs_pos, direct = load_masked_position_encoding(mask) + + batch = dict() + batch["images"] = to_tensor(img.copy()).unsqueeze(0).to(device) + batch["img_256"] = to_tensor(img_256, norm=True).unsqueeze(0).to(device) + batch["masks"] = to_tensor(mask).unsqueeze(0).to(device) + batch["mask_256"] = to_tensor(mask_256).unsqueeze(0).to(device) + batch["mask_512"] = to_tensor(mask_512).unsqueeze(0).to(device) + batch["edge_256"] = to_tensor(edge_256, scale=False).unsqueeze(0).to(device) + batch["img_512"] = to_tensor(img_512).unsqueeze(0).to(device) + batch["rel_pos"] = torch.LongTensor(rel_pos).unsqueeze(0).to(device) + batch["abs_pos"] = torch.LongTensor(abs_pos).unsqueeze(0).to(device) + batch["direct"] = torch.LongTensor(direct).unsqueeze(0).to(device) + batch["h"] = imgh + batch["w"] = imgw + + return batch + + +def to_device(data, device): + if isinstance(data, torch.Tensor): + return data.to(device) + if isinstance(data, dict): + for key in data: + if isinstance(data[key], torch.Tensor): + data[key] = data[key].to(device) + return data + if isinstance(data, list): + return [to_device(d, device) for d in data] + + +class ZITS(InpaintModel): + name = "zits" + min_size = 256 + pad_mod = 32 + pad_to_square = True + is_erase_model = True + + def __init__(self, device, **kwargs): + """ + + Args: + device: + """ + super().__init__(device) + self.device = device + self.sample_edge_line_iterations = 1 + + def init_model(self, device, **kwargs): + self.wireframe = load_jit_model( + ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5 + ) + self.edge_line = load_jit_model( + ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5 + ) + self.structure_upsample = load_jit_model( + ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 + ) + self.inpaint = load_jit_model( + ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5 + ) + + @staticmethod + def download(): + download_model(ZITS_WIRE_FRAME_MODEL_URL, ZITS_WIRE_FRAME_MODEL_MD5) + download_model(ZITS_EDGE_LINE_MODEL_URL, ZITS_EDGE_LINE_MODEL_MD5) + download_model( + ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 + ) + download_model(ZITS_INPAINT_MODEL_URL, ZITS_INPAINT_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + model_paths = [ + get_cache_path_by_url(ZITS_WIRE_FRAME_MODEL_URL), + get_cache_path_by_url(ZITS_EDGE_LINE_MODEL_URL), + get_cache_path_by_url(ZITS_STRUCTURE_UPSAMPLE_MODEL_URL), + get_cache_path_by_url(ZITS_INPAINT_MODEL_URL), + ] + return all([os.path.exists(it) for it in model_paths]) + + def wireframe_edge_and_line(self, items, enable: bool): + # 最终向 items 中添加 edge 和 line key + if not enable: + items["edge"] = torch.zeros_like(items["masks"]) + items["line"] = torch.zeros_like(items["masks"]) + return + + start = time.time() + try: + line_256 = self.wireframe_forward( + items["img_512"], + h=256, + w=256, + masks=items["mask_512"], + mask_th=0.85, + ) + except: + line_256 = torch.zeros_like(items["mask_256"]) + + print(f"wireframe_forward time: {(time.time() - start) * 1000:.2f}ms") + + # np_line = (line[0][0].numpy() * 255).astype(np.uint8) + # cv2.imwrite("line.jpg", np_line) + + start = time.time() + edge_pred, line_pred = self.sample_edge_line_logits( + context=[items["img_256"], items["edge_256"], line_256], + mask=items["mask_256"].clone(), + iterations=self.sample_edge_line_iterations, + add_v=0.05, + mul_v=4, + ) + print(f"sample_edge_line_logits time: {(time.time() - start) * 1000:.2f}ms") + + # np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8) + # cv2.imwrite("edge_pred.jpg", np_edge_pred) + # np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8) + # cv2.imwrite("line_pred.jpg", np_line_pred) + # exit() + + input_size = min(items["h"], items["w"]) + if input_size != 256 and input_size > 256: + while edge_pred.shape[2] < input_size: + edge_pred = self.structure_upsample(edge_pred) + edge_pred = torch.sigmoid((edge_pred + 2) * 2) + + line_pred = self.structure_upsample(line_pred) + line_pred = torch.sigmoid((line_pred + 2) * 2) + + edge_pred = F.interpolate( + edge_pred, + size=(input_size, input_size), + mode="bilinear", + align_corners=False, + ) + line_pred = F.interpolate( + line_pred, + size=(input_size, input_size), + mode="bilinear", + align_corners=False, + ) + + # np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8) + # cv2.imwrite("edge_pred_upsample.jpg", np_edge_pred) + # np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8) + # cv2.imwrite("line_pred_upsample.jpg", np_line_pred) + # exit() + + items["edge"] = edge_pred.detach() + items["line"] = line_pred.detach() + + @torch.no_grad() + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W] + return: BGR IMAGE + """ + mask = mask[:, :, 0] + items = load_image(image, mask, device=self.device) + + self.wireframe_edge_and_line(items, config.zits_wireframe) + + inpainted_image = self.inpaint( + items["images"], + items["masks"], + items["edge"], + items["line"], + items["rel_pos"], + items["direct"], + ) + + inpainted_image = inpainted_image * 255.0 + inpainted_image = ( + inpainted_image.cpu().permute(0, 2, 3, 1)[0].numpy().astype(np.uint8) + ) + inpainted_image = inpainted_image[:, :, ::-1] + + # cv2.imwrite("inpainted.jpg", inpainted_image) + # exit() + + return inpainted_image + + def wireframe_forward(self, images, h, w, masks, mask_th=0.925): + lcnn_mean = torch.tensor([109.730, 103.832, 98.681]).reshape(1, 3, 1, 1) + lcnn_std = torch.tensor([22.275, 22.124, 23.229]).reshape(1, 3, 1, 1) + images = images * 255.0 + # the masks value of lcnn is 127.5 + masked_images = images * (1 - masks) + torch.ones_like(images) * masks * 127.5 + masked_images = (masked_images - lcnn_mean) / lcnn_std + + def to_int(x): + return tuple(map(int, x)) + + lines_tensor = [] + lmap = np.zeros((h, w)) + + output_masked = self.wireframe(masked_images) + + output_masked = to_device(output_masked, "cpu") + if output_masked["num_proposals"] == 0: + lines_masked = [] + scores_masked = [] + else: + lines_masked = output_masked["lines_pred"].numpy() + lines_masked = [ + [line[1] * h, line[0] * w, line[3] * h, line[2] * w] + for line in lines_masked + ] + scores_masked = output_masked["lines_score"].numpy() + + for line, score in zip(lines_masked, scores_masked): + if score > mask_th: + try: + import skimage + + rr, cc, value = skimage.draw.line_aa( + *to_int(line[0:2]), *to_int(line[2:4]) + ) + lmap[rr, cc] = np.maximum(lmap[rr, cc], value) + except: + cv2.line( + lmap, + to_int(line[0:2][::-1]), + to_int(line[2:4][::-1]), + (1, 1, 1), + 1, + cv2.LINE_AA, + ) + + lmap = np.clip(lmap * 255, 0, 255).astype(np.uint8) + lines_tensor.append(to_tensor(lmap).unsqueeze(0)) + + lines_tensor = torch.cat(lines_tensor, dim=0) + return lines_tensor.detach().to(self.device) + + def sample_edge_line_logits( + self, context, mask=None, iterations=1, add_v=0, mul_v=4 + ): + [img, edge, line] = context + + img = img * (1 - mask) + edge = edge * (1 - mask) + line = line * (1 - mask) + + for i in range(iterations): + edge_logits, line_logits = self.edge_line(img, edge, line, masks=mask) + + edge_pred = torch.sigmoid(edge_logits) + line_pred = torch.sigmoid((line_logits + add_v) * mul_v) + edge = edge + edge_pred * mask + edge[edge >= 0.25] = 1 + edge[edge < 0.25] = 0 + line = line + line_pred * mask + + b, _, h, w = edge_pred.shape + edge_pred = edge_pred.reshape(b, -1, 1) + line_pred = line_pred.reshape(b, -1, 1) + mask = mask.reshape(b, -1) + + edge_probs = torch.cat([1 - edge_pred, edge_pred], dim=-1) + line_probs = torch.cat([1 - line_pred, line_pred], dim=-1) + edge_probs[:, :, 1] += 0.5 + line_probs[:, :, 1] += 0.5 + edge_max_probs = edge_probs.max(dim=-1)[0] + (1 - mask) * (-100) + line_max_probs = line_probs.max(dim=-1)[0] + (1 - mask) * (-100) + + indices = torch.sort( + edge_max_probs + line_max_probs, dim=-1, descending=True + )[1] + + for ii in range(b): + keep = int((i + 1) / iterations * torch.sum(mask[ii, ...])) + + assert torch.sum(mask[ii][indices[ii, :keep]]) == keep, "Error!!!" + mask[ii][indices[ii, :keep]] = 0 + + mask = mask.reshape(b, 1, h, w) + edge = edge * (1 - mask) + line = line * (1 - mask) + + edge, line = edge.to(torch.float32), line.to(torch.float32) + return edge, line diff --git a/inpaint/model_manager.py b/inpaint/model_manager.py new file mode 100644 index 0000000..dae37d3 --- /dev/null +++ b/inpaint/model_manager.py @@ -0,0 +1,260 @@ +from typing import List, Dict + +import torch +from loguru import logger +import numpy as np + +from inpaint.download import scan_models +from inpaint.helper import switch_mps_device +from inpaint.model import models, ControlNet, SD, SDXL +from inpaint.model.brushnet.brushnet_wrapper import BrushNetWrapper +from inpaint.model.power_paint.power_paint_v2 import PowerPaintV2 +from inpaint.model.utils import torch_gc, is_local_files_only +from inpaint.schema import InpaintRequest, ModelInfo, ModelType + + +class ModelManager: + def __init__(self, name: str, device: torch.device, **kwargs): + self.name = name + self.device = device + self.kwargs = kwargs + self.available_models: Dict[str, ModelInfo] = {} + self.scan_models() + + self.enable_controlnet = kwargs.get("enable_controlnet", False) + controlnet_method = kwargs.get("controlnet_method", None) + if ( + controlnet_method is None + and name in self.available_models + and self.available_models[name].support_controlnet + ): + controlnet_method = self.available_models[name].controlnets[0] + self.controlnet_method = controlnet_method + + self.enable_brushnet = kwargs.get("enable_brushnet", False) + self.brushnet_method = kwargs.get("brushnet_method", None) + + self.enable_powerpaint_v2 = kwargs.get("enable_powerpaint_v2", False) + + self.model = self.init_model(name, device, **kwargs) + + @property + def current_model(self) -> ModelInfo: + return self.available_models[self.name] + + def init_model(self, name: str, device, **kwargs): + logger.info(f"Loading model: {name}") + if name not in self.available_models: + raise NotImplementedError( + f"Unsupported model: {name}. Available models: {list(self.available_models.keys())}" + ) + + model_info = self.available_models[name] + kwargs = { + **kwargs, + "model_info": model_info, + "enable_controlnet": self.enable_controlnet, + "controlnet_method": self.controlnet_method, + "enable_brushnet": self.enable_brushnet, + "brushnet_method": self.brushnet_method, + } + + if model_info.support_controlnet and self.enable_controlnet: + return ControlNet(device, **kwargs) + + if model_info.support_brushnet and self.enable_brushnet: + return BrushNetWrapper(device, **kwargs) + + if model_info.support_powerpaint_v2 and self.enable_powerpaint_v2: + return PowerPaintV2(device, **kwargs) + + if model_info.name in models: + return models[name](device, **kwargs) + + if model_info.model_type in [ + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SD, + ]: + return SD(device, **kwargs) + + if model_info.model_type in [ + ModelType.DIFFUSERS_SDXL_INPAINT, + ModelType.DIFFUSERS_SDXL, + ]: + return SDXL(device, **kwargs) + + raise NotImplementedError(f"Unsupported model: {name}") + + @torch.inference_mode() + def __call__(self, image, mask, config: InpaintRequest): + """ + + Args: + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + config: + + Returns: + BGR image + """ + if config.enable_controlnet: + self.switch_controlnet_method(config) + if config.enable_brushnet: + self.switch_brushnet_method(config) + + self.enable_disable_powerpaint_v2(config) + self.enable_disable_lcm_lora(config) + return self.model(image, mask, config).astype(np.uint8) + + def scan_models(self) -> List[ModelInfo]: + available_models = scan_models() + self.available_models = {it.name: it for it in available_models} + return available_models + + def switch(self, new_name: str): + if new_name == self.name: + return + + old_name = self.name + old_controlnet_method = self.controlnet_method + self.name = new_name + + if ( + self.available_models[new_name].support_controlnet + and self.controlnet_method + not in self.available_models[new_name].controlnets + ): + self.controlnet_method = self.available_models[new_name].controlnets[0] + try: + # TODO: enable/disable controlnet without reload model + del self.model + torch_gc() + + self.model = self.init_model( + new_name, switch_mps_device(new_name, self.device), **self.kwargs + ) + except Exception as e: + self.name = old_name + self.controlnet_method = old_controlnet_method + logger.info(f"Switch model from {old_name} to {new_name} failed, rollback") + self.model = self.init_model( + old_name, switch_mps_device(old_name, self.device), **self.kwargs + ) + raise e + + def switch_brushnet_method(self, config): + if not self.available_models[self.name].support_brushnet: + return + + if ( + self.enable_brushnet + and config.brushnet_method + and self.brushnet_method != config.brushnet_method + ): + old_brushnet_method = self.brushnet_method + self.brushnet_method = config.brushnet_method + self.model.switch_brushnet_method(config.brushnet_method) + logger.info( + f"Switch Brushnet method from {old_brushnet_method} to {config.brushnet_method}" + ) + + elif self.enable_brushnet != config.enable_brushnet: + self.enable_brushnet = config.enable_brushnet + self.brushnet_method = config.brushnet_method + + pipe_components = { + "vae": self.model.model.vae, + "text_encoder": self.model.model.text_encoder, + "unet": self.model.model.unet, + } + if hasattr(self.model.model, "text_encoder_2"): + pipe_components["text_encoder_2"] = self.model.model.text_encoder_2 + + self.model = self.init_model( + self.name, + switch_mps_device(self.name, self.device), + pipe_components=pipe_components, + **self.kwargs, + ) + + if not config.enable_brushnet: + logger.info("BrushNet Disabled") + else: + logger.info("BrushNet Enabled") + + def switch_controlnet_method(self, config): + if not self.available_models[self.name].support_controlnet: + return + + if ( + self.enable_controlnet + and config.controlnet_method + and self.controlnet_method != config.controlnet_method + ): + old_controlnet_method = self.controlnet_method + self.controlnet_method = config.controlnet_method + self.model.switch_controlnet_method(config.controlnet_method) + logger.info( + f"Switch Controlnet method from {old_controlnet_method} to {config.controlnet_method}" + ) + elif self.enable_controlnet != config.enable_controlnet: + self.enable_controlnet = config.enable_controlnet + self.controlnet_method = config.controlnet_method + + pipe_components = { + "vae": self.model.model.vae, + "text_encoder": self.model.model.text_encoder, + "unet": self.model.model.unet, + } + if hasattr(self.model.model, "text_encoder_2"): + pipe_components["text_encoder_2"] = self.model.model.text_encoder_2 + + self.model = self.init_model( + self.name, + switch_mps_device(self.name, self.device), + pipe_components=pipe_components, + **self.kwargs, + ) + if not config.enable_controlnet: + logger.info("Disable controlnet") + else: + logger.info(f"Enable controlnet: {config.controlnet_method}") + + def enable_disable_powerpaint_v2(self, config: InpaintRequest): + if not self.available_models[self.name].support_powerpaint_v2: + return + + if self.enable_powerpaint_v2 != config.enable_powerpaint_v2: + self.enable_powerpaint_v2 = config.enable_powerpaint_v2 + pipe_components = {"vae": self.model.model.vae} + + self.model = self.init_model( + self.name, + switch_mps_device(self.name, self.device), + pipe_components=pipe_components, + **self.kwargs, + ) + if config.enable_powerpaint_v2: + logger.info("Enable PowerPaintV2") + else: + logger.info("Disable PowerPaintV2") + + def enable_disable_lcm_lora(self, config: InpaintRequest): + if self.available_models[self.name].support_lcm_lora: + # TODO: change this if load other lora is supported + lcm_lora_loaded = bool(self.model.model.get_list_adapters()) + if config.sd_lcm_lora: + if not lcm_lora_loaded: + logger.info("Load LCM LORA") + self.model.model.load_lora_weights( + self.model.lcm_lora_id, + weight_name="pytorch_lora_weights.safetensors", + local_files_only=is_local_files_only(), + ) + else: + logger.info("Enable LCM LORA") + self.model.model.enable_lora() + else: + if lcm_lora_loaded: + logger.info("Disable LCM LORA") + self.model.model.disable_lora() diff --git a/inpaint/plugins/__init__.py b/inpaint/plugins/__init__.py new file mode 100644 index 0000000..8128025 --- /dev/null +++ b/inpaint/plugins/__init__.py @@ -0,0 +1,74 @@ +from typing import Dict + +from loguru import logger + +from .anime_seg import AnimeSeg +from .gfpgan_plugin import GFPGANPlugin +from .interactive_seg import InteractiveSeg +from .realesrgan import RealESRGANUpscaler +from .remove_bg import RemoveBG +from .restoreformer import RestoreFormerPlugin +from ..schema import InteractiveSegModel, Device, RealESRGANModel + + +def build_plugins( + enable_interactive_seg: bool, + interactive_seg_model: InteractiveSegModel, + interactive_seg_device: Device, + enable_remove_bg: bool, + remove_bg_model: str, + enable_anime_seg: bool, + enable_realesrgan: bool, + realesrgan_device: Device, + realesrgan_model: RealESRGANModel, + enable_gfpgan: bool, + gfpgan_device: Device, + enable_restoreformer: bool, + restoreformer_device: Device, + no_half: bool, +) -> Dict: + plugins = {} + if enable_interactive_seg: + logger.info(f"Initialize {InteractiveSeg.name} plugin") + plugins[InteractiveSeg.name] = InteractiveSeg( + interactive_seg_model, interactive_seg_device + ) + + if enable_remove_bg: + logger.info(f"Initialize {RemoveBG.name} plugin") + plugins[RemoveBG.name] = RemoveBG(remove_bg_model) + + if enable_anime_seg: + logger.info(f"Initialize {AnimeSeg.name} plugin") + plugins[AnimeSeg.name] = AnimeSeg() + + if enable_realesrgan: + logger.info( + f"Initialize {RealESRGANUpscaler.name} plugin: {realesrgan_model}, {realesrgan_device}" + ) + plugins[RealESRGANUpscaler.name] = RealESRGANUpscaler( + realesrgan_model, + realesrgan_device, + no_half=no_half, + ) + + if enable_gfpgan: + logger.info(f"Initialize {GFPGANPlugin.name} plugin") + if enable_realesrgan: + logger.info("Use realesrgan as GFPGAN background upscaler") + else: + logger.info( + f"GFPGAN no background upscaler, use --enable-realesrgan to enable it" + ) + plugins[GFPGANPlugin.name] = GFPGANPlugin( + gfpgan_device, + upscaler=plugins.get(RealESRGANUpscaler.name, None), + ) + + if enable_restoreformer: + logger.info(f"Initialize {RestoreFormerPlugin.name} plugin") + plugins[RestoreFormerPlugin.name] = RestoreFormerPlugin( + restoreformer_device, + upscaler=plugins.get(RealESRGANUpscaler.name, None), + ) + return plugins diff --git a/inpaint/plugins/anime_seg.py b/inpaint/plugins/anime_seg.py new file mode 100644 index 0000000..286564b --- /dev/null +++ b/inpaint/plugins/anime_seg.py @@ -0,0 +1,462 @@ +import cv2 +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from PIL import Image + +from iopaint.helper import load_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest + + +class REBNCONV(nn.Module): + def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1): + super(REBNCONV, self).__init__() + + self.conv_s1 = nn.Conv2d( + in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride + ) + self.bn_s1 = nn.BatchNorm2d(out_ch) + self.relu_s1 = nn.ReLU(inplace=True) + + def forward(self, x): + hx = x + xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) + + return xout + + +## upsample tensor 'src' to have the same spatial size with tensor 'tar' +def _upsample_like(src, tar): + src = F.interpolate(src, size=tar.shape[2:], mode="bilinear", align_corners=False) + + return src + + +### RSU-7 ### +class RSU7(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512): + super(RSU7, self).__init__() + + self.in_ch = in_ch + self.mid_ch = mid_ch + self.out_ch = out_ch + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2 + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + b, c, h, w = x.shape + + hx = x + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + hx = self.pool5(hx5) + + hx6 = self.rebnconv6(hx) + + hx7 = self.rebnconv7(hx6) + + hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1)) + hx6dup = _upsample_like(hx6d, hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-6 ### +class RSU6(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU6, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + + hx6 = self.rebnconv6(hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-5 ### +class RSU5(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU5, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + + hx5 = self.rebnconv5(hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4 ### +class RSU4(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4F ### +class RSU4F(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4F, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx2 = self.rebnconv2(hx1) + hx3 = self.rebnconv3(hx2) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1)) + hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1)) + + return hx1d + hxin + + +class ISNetDIS(nn.Module): + def __init__(self, in_ch=3, out_ch=1): + super(ISNetDIS, self).__init__() + + self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1) + self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage1 = RSU7(64, 32, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 32, 128) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(128, 64, 256) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(256, 128, 512) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(512, 256, 512) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 256, 512) + + # decoder + self.stage5d = RSU4F(1024, 256, 512) + self.stage4d = RSU4(1024, 128, 256) + self.stage3d = RSU5(512, 64, 128) + self.stage2d = RSU6(256, 32, 64) + self.stage1d = RSU7(128, 16, 64) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + + def forward(self, x): + hx = x + + hxin = self.conv_in(hx) + hx = self.pool_in(hxin) + + # stage 1 + hx1 = self.stage1(hxin) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + hx6up = _upsample_like(hx6, hx5) + + # -------------------- decoder -------------------- + hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) + + # side output + d1 = self.side1(hx1d) + d1 = _upsample_like(d1, x) + return d1.sigmoid() + + +# 从小到大 +ANIME_SEG_MODELS = { + "url": "https://github.com/Sanster/models/releases/download/isnetis/isnetis.pth", + "md5": "5f25479076b73074730ab8de9e8f2051", +} + + +class AnimeSeg(BasePlugin): + # Model from: https://github.com/SkyTNT/anime-segmentation + name = "AnimeSeg" + support_gen_image = True + support_gen_mask = True + + def __init__(self): + super().__init__() + self.model = load_model( + ISNetDIS(), + ANIME_SEG_MODELS["url"], + "cpu", + ANIME_SEG_MODELS["md5"], + ) + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + mask = self.forward(rgb_np_img) + mask = Image.fromarray(mask, mode="L") + h0, w0 = rgb_np_img.shape[0], rgb_np_img.shape[1] + empty = Image.new("RGBA", (w0, h0), 0) + img = Image.fromarray(rgb_np_img) + cutout = Image.composite(img, empty, mask) + return np.asarray(cutout) + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + return self.forward(rgb_np_img) + + @torch.inference_mode() + def forward(self, rgb_np_img): + s = 1024 + + h0, w0 = h, w = rgb_np_img.shape[0], rgb_np_img.shape[1] + if h > w: + h, w = s, int(s * w / h) + else: + h, w = int(s * h / w), s + ph, pw = s - h, s - w + tmpImg = np.zeros([s, s, 3], dtype=np.float32) + tmpImg[ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w] = ( + cv2.resize(rgb_np_img, (w, h)) / 255 + ) + tmpImg = tmpImg.transpose((2, 0, 1)) + tmpImg = torch.from_numpy(tmpImg).unsqueeze(0).type(torch.FloatTensor) + mask = self.model(tmpImg) + mask = mask[0, :, ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w] + mask = cv2.resize(mask.cpu().numpy().transpose((1, 2, 0)), (w0, h0)) + return (mask * 255).astype("uint8") diff --git a/inpaint/plugins/base_plugin.py b/inpaint/plugins/base_plugin.py new file mode 100644 index 0000000..1f8bddc --- /dev/null +++ b/inpaint/plugins/base_plugin.py @@ -0,0 +1,30 @@ +from loguru import logger +import numpy as np + +from iopaint.schema import RunPluginRequest + + +class BasePlugin: + name: str + support_gen_image: bool = False + support_gen_mask: bool = False + + def __init__(self): + err_msg = self.check_dep() + if err_msg: + logger.error(err_msg) + exit(-1) + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + # return RGBA np image or BGR np image + ... + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + # return GRAY or BGR np image, 255 means foreground, 0 means background + ... + + def check_dep(self): + ... + + def switch_model(self, new_model_name: str): + ... diff --git a/inpaint/plugins/basicsr/LICENSE b/inpaint/plugins/basicsr/LICENSE new file mode 100644 index 0000000..1c9b5b8 --- /dev/null +++ b/inpaint/plugins/basicsr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2022 BasicSR Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/inpaint/plugins/basicsr/__init__.py b/inpaint/plugins/basicsr/__init__.py new file mode 100644 index 0000000..6bd8efd --- /dev/null +++ b/inpaint/plugins/basicsr/__init__.py @@ -0,0 +1,22 @@ +""" +Adapted from https://github.com/XPixelGroup/BasicSR +License: Apache-2.0 + +As of Feb 2024, `basicsr` appears to be unmaintained. It imports a function from `torchvision` that is removed in +`torchvision` 0.17. Here is the deprecation warning: + + UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in + 0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in + torchvision.transforms.v2.functional. + +As a result, a dependency on `basicsr` means we cannot keep our `torchvision` dependency up to date. + +Because we only rely on a single class `RRDBNet` from `basicsr`, we've copied the relevant code here and removed the +dependency on `basicsr`. + +The code is almost unchanged, only a few type annotations have been added. The license is also copied. + +Copy From InvokeAI +""" + +from .rrdbnet_arch import RRDBNet diff --git a/inpaint/plugins/basicsr/arch_util.py b/inpaint/plugins/basicsr/arch_util.py new file mode 100644 index 0000000..befe76a --- /dev/null +++ b/inpaint/plugins/basicsr/arch_util.py @@ -0,0 +1,80 @@ +from typing import Type, List, Union + +import torch +from torch import nn as nn +from torch.nn import init as init +from torch.nn.modules.batchnorm import _BatchNorm + + +@torch.no_grad() +def default_init_weights( + module_list: Union[List[nn.Module], nn.Module], + scale: float = 1, + bias_fill: float = 0, + **kwargs, +) -> None: + """Initialize network weights. + + Args: + module_list (list[nn.Module] | nn.Module): Modules to be initialized. + scale (float): Scale initialized weights, especially for residual + blocks. Default: 1. + bias_fill (float): The value to fill bias. Default: 0 + kwargs (dict): Other arguments for initialization function. + """ + if not isinstance(module_list, list): + module_list = [module_list] + for module in module_list: + for m in module.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, **kwargs) + m.weight.data *= scale + if m.bias is not None: + m.bias.data.fill_(bias_fill) + elif isinstance(m, nn.Linear): + init.kaiming_normal_(m.weight, **kwargs) + m.weight.data *= scale + if m.bias is not None: + m.bias.data.fill_(bias_fill) + elif isinstance(m, _BatchNorm): + init.constant_(m.weight, 1) + if m.bias is not None: + m.bias.data.fill_(bias_fill) + + +def make_layer( + basic_block: Type[nn.Module], num_basic_block: int, **kwarg +) -> nn.Sequential: + """Make layers by stacking the same blocks. + + Args: + basic_block (Type[nn.Module]): nn.Module class for basic block. + num_basic_block (int): number of blocks. + + Returns: + nn.Sequential: Stacked blocks in nn.Sequential. + """ + layers = [] + for _ in range(num_basic_block): + layers.append(basic_block(**kwarg)) + return nn.Sequential(*layers) + + +# TODO: may write a cpp file +def pixel_unshuffle(x: torch.Tensor, scale: int) -> torch.Tensor: + """Pixel unshuffle. + + Args: + x (Tensor): Input feature with shape (b, c, hh, hw). + scale (int): Downsample ratio. + + Returns: + Tensor: the pixel unshuffled feature. + """ + b, c, hh, hw = x.size() + out_channel = c * (scale**2) + assert hh % scale == 0 and hw % scale == 0 + h = hh // scale + w = hw // scale + x_view = x.view(b, c, h, scale, w, scale) + return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) diff --git a/inpaint/plugins/basicsr/img_util.py b/inpaint/plugins/basicsr/img_util.py new file mode 100644 index 0000000..3a5f1da --- /dev/null +++ b/inpaint/plugins/basicsr/img_util.py @@ -0,0 +1,172 @@ +import cv2 +import math +import numpy as np +import os +import torch +from torchvision.utils import make_grid + + +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + else: + return _totensor(imgs, bgr2rgb, float32) + + +def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): + """Convert torch Tensors into image numpy arrays. + + After clamping to [min, max], values will be normalized to [0, 1]. + + Args: + tensor (Tensor or list[Tensor]): Accept shapes: + 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); + 2) 3D Tensor of shape (3/1 x H x W); + 3) 2D Tensor of shape (H x W). + Tensor channel should be in RGB order. + rgb2bgr (bool): Whether to change rgb to bgr. + out_type (numpy type): output types. If ``np.uint8``, transform outputs + to uint8 type with range [0, 255]; otherwise, float type with + range [0, 1]. Default: ``np.uint8``. + min_max (tuple[int]): min and max values for clamp. + + Returns: + (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of + shape (H x W). The channel order is BGR. + """ + if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') + + if torch.is_tensor(tensor): + tensor = [tensor] + result = [] + for _tensor in tensor: + _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) + _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) + + n_dim = _tensor.dim() + if n_dim == 4: + img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() + img_np = img_np.transpose(1, 2, 0) + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 3: + img_np = _tensor.numpy() + img_np = img_np.transpose(1, 2, 0) + if img_np.shape[2] == 1: # gray image + img_np = np.squeeze(img_np, axis=2) + else: + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 2: + img_np = _tensor.numpy() + else: + raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}') + if out_type == np.uint8: + # Unlike MATLAB, numpy.unit8() WILL NOT round by default. + img_np = (img_np * 255.0).round() + img_np = img_np.astype(out_type) + result.append(img_np) + if len(result) == 1: + result = result[0] + return result + + +def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)): + """This implementation is slightly faster than tensor2img. + It now only supports torch tensor with shape (1, c, h, w). + + Args: + tensor (Tensor): Now only support torch tensor with (1, c, h, w). + rgb2bgr (bool): Whether to change rgb to bgr. Default: True. + min_max (tuple[int]): min and max values for clamp. + """ + output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0) + output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255 + output = output.type(torch.uint8).cpu().numpy() + if rgb2bgr: + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +def imfrombytes(content, flag='color', float32=False): + """Read an image from bytes. + + Args: + content (bytes): Image bytes got from files or other streams. + flag (str): Flags specifying the color type of a loaded image, + candidates are `color`, `grayscale` and `unchanged`. + float32 (bool): Whether to change to float32., If True, will also norm + to [0, 1]. Default: False. + + Returns: + ndarray: Loaded image array. + """ + img_np = np.frombuffer(content, np.uint8) + imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED} + img = cv2.imdecode(img_np, imread_flags[flag]) + if float32: + img = img.astype(np.float32) / 255. + return img + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file. + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv's :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = os.path.abspath(os.path.dirname(file_path)) + os.makedirs(dir_name, exist_ok=True) + ok = cv2.imwrite(file_path, img, params) + if not ok: + raise IOError('Failed in writing images.') + + +def crop_border(imgs, crop_border): + """Crop borders of images. + + Args: + imgs (list[ndarray] | ndarray): Images with shape (h, w, c). + crop_border (int): Crop border for each end of height and weight. + + Returns: + list[ndarray]: Cropped images. + """ + if crop_border == 0: + return imgs + else: + if isinstance(imgs, list): + return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs] + else: + return imgs[crop_border:-crop_border, crop_border:-crop_border, ...] diff --git a/inpaint/plugins/basicsr/rrdbnet_arch.py b/inpaint/plugins/basicsr/rrdbnet_arch.py new file mode 100644 index 0000000..31c08eb --- /dev/null +++ b/inpaint/plugins/basicsr/rrdbnet_arch.py @@ -0,0 +1,133 @@ +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .arch_util import default_init_weights, make_layer, pixel_unshuffle + + +class ResidualDenseBlock(nn.Module): + """Residual Dense Block. + + Used in RRDB block in ESRGAN. + + Args: + num_feat (int): Channel number of intermediate features. + num_grow_ch (int): Channels for each growth. + """ + + def __init__(self, num_feat: int = 64, num_grow_ch: int = 32) -> None: + super(ResidualDenseBlock, self).__init__() + self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1) + self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + # initialization + default_init_weights( + [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1 + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x1 = self.lrelu(self.conv1(x)) + x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) + x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) + x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + # Empirically, we use 0.2 to scale the residual for better performance + return x5 * 0.2 + x + + +class RRDB(nn.Module): + """Residual in Residual Dense Block. + + Used in RRDB-Net in ESRGAN. + + Args: + num_feat (int): Channel number of intermediate features. + num_grow_ch (int): Channels for each growth. + """ + + def __init__(self, num_feat: int, num_grow_ch: int = 32) -> None: + super(RRDB, self).__init__() + self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) + self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) + self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = self.rdb1(x) + out = self.rdb2(out) + out = self.rdb3(out) + # Empirically, we use 0.2 to scale the residual for better performance + return out * 0.2 + x + + +class RRDBNet(nn.Module): + """Networks consisting of Residual in Residual Dense Block, which is used + in ESRGAN. + + ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks. + + We extend ESRGAN for scale x2 and scale x1. + Note: This is one option for scale 1, scale 2 in RRDBNet. + We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size + and enlarge the channel size before feeding inputs into the main ESRGAN architecture. + + Args: + num_in_ch (int): Channel number of inputs. + num_out_ch (int): Channel number of outputs. + num_feat (int): Channel number of intermediate features. + Default: 64 + num_block (int): Block number in the trunk network. Defaults: 23 + num_grow_ch (int): Channels for each growth. Default: 32. + """ + + def __init__( + self, + num_in_ch: int, + num_out_ch: int, + scale: int = 4, + num_feat: int = 64, + num_block: int = 23, + num_grow_ch: int = 32, + ) -> None: + super(RRDBNet, self).__init__() + self.scale = scale + if scale == 2: + num_in_ch = num_in_ch * 4 + elif scale == 1: + num_in_ch = num_in_ch * 16 + self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) + self.body = make_layer( + RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch + ) + self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + # upsample + self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.scale == 2: + feat = pixel_unshuffle(x, scale=2) + elif self.scale == 1: + feat = pixel_unshuffle(x, scale=4) + else: + feat = x + feat = self.conv_first(feat) + body_feat = self.conv_body(self.body(feat)) + feat = feat + body_feat + # upsample + feat = self.lrelu( + self.conv_up1(F.interpolate(feat, scale_factor=2, mode="nearest")) + ) + feat = self.lrelu( + self.conv_up2(F.interpolate(feat, scale_factor=2, mode="nearest")) + ) + out = self.conv_last(self.lrelu(self.conv_hr(feat))) + return out diff --git a/inpaint/plugins/briarmbg.py b/inpaint/plugins/briarmbg.py new file mode 100644 index 0000000..880f530 --- /dev/null +++ b/inpaint/plugins/briarmbg.py @@ -0,0 +1,512 @@ +# copy from: https://huggingface.co/spaces/briaai/BRIA-RMBG-1.4/blob/main/briarmbg.py +import cv2 +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +import numpy as np +from torchvision.transforms.functional import normalize + + +class REBNCONV(nn.Module): + def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1): + super(REBNCONV, self).__init__() + + self.conv_s1 = nn.Conv2d( + in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride + ) + self.bn_s1 = nn.BatchNorm2d(out_ch) + self.relu_s1 = nn.ReLU(inplace=True) + + def forward(self, x): + hx = x + xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) + + return xout + + +## upsample tensor 'src' to have the same spatial size with tensor 'tar' +def _upsample_like(src, tar): + src = F.interpolate(src, size=tar.shape[2:], mode="bilinear") + + return src + + +### RSU-7 ### +class RSU7(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512): + super(RSU7, self).__init__() + + self.in_ch = in_ch + self.mid_ch = mid_ch + self.out_ch = out_ch + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2 + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + b, c, h, w = x.shape + + hx = x + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + hx = self.pool5(hx5) + + hx6 = self.rebnconv6(hx) + + hx7 = self.rebnconv7(hx6) + + hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1)) + hx6dup = _upsample_like(hx6d, hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-6 ### +class RSU6(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU6, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + + hx6 = self.rebnconv6(hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-5 ### +class RSU5(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU5, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + + hx5 = self.rebnconv5(hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4 ### +class RSU4(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4F ### +class RSU4F(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4F, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx2 = self.rebnconv2(hx1) + hx3 = self.rebnconv3(hx2) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1)) + hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1)) + + return hx1d + hxin + + +class myrebnconv(nn.Module): + def __init__( + self, + in_ch=3, + out_ch=1, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + groups=1, + ): + super(myrebnconv, self).__init__() + + self.conv = nn.Conv2d( + in_ch, + out_ch, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + self.bn = nn.BatchNorm2d(out_ch) + self.rl = nn.ReLU(inplace=True) + + def forward(self, x): + return self.rl(self.bn(self.conv(x))) + + +class BriaRMBG(nn.Module): + def __init__(self, in_ch=3, out_ch=1): + super(BriaRMBG, self).__init__() + + self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1) + self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage1 = RSU7(64, 32, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 32, 128) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(128, 64, 256) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(256, 128, 512) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(512, 256, 512) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 256, 512) + + # decoder + self.stage5d = RSU4F(1024, 256, 512) + self.stage4d = RSU4(1024, 128, 256) + self.stage3d = RSU5(512, 64, 128) + self.stage2d = RSU6(256, 32, 64) + self.stage1d = RSU7(128, 16, 64) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side3 = nn.Conv2d(128, out_ch, 3, padding=1) + self.side4 = nn.Conv2d(256, out_ch, 3, padding=1) + self.side5 = nn.Conv2d(512, out_ch, 3, padding=1) + self.side6 = nn.Conv2d(512, out_ch, 3, padding=1) + + # self.outconv = nn.Conv2d(6*out_ch,out_ch,1) + + def forward(self, x): + hx = x + + hxin = self.conv_in(hx) + # hx = self.pool_in(hxin) + + # stage 1 + hx1 = self.stage1(hxin) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + hx6up = _upsample_like(hx6, hx5) + + # -------------------- decoder -------------------- + hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) + + # side output + d1 = self.side1(hx1d) + d1 = _upsample_like(d1, x) + + d2 = self.side2(hx2d) + d2 = _upsample_like(d2, x) + + d3 = self.side3(hx3d) + d3 = _upsample_like(d3, x) + + d4 = self.side4(hx4d) + d4 = _upsample_like(d4, x) + + d5 = self.side5(hx5d) + d5 = _upsample_like(d5, x) + + d6 = self.side6(hx6) + d6 = _upsample_like(d6, x) + + return [ + F.sigmoid(d1), + F.sigmoid(d2), + F.sigmoid(d3), + F.sigmoid(d4), + F.sigmoid(d5), + F.sigmoid(d6), + ], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6] + + +def resize_image(image): + image = image.convert("RGB") + model_input_size = (1024, 1024) + image = image.resize(model_input_size, Image.BILINEAR) + return image + + +def create_briarmbg_session(): + from huggingface_hub import hf_hub_download + + net = BriaRMBG() + model_path = hf_hub_download("briaai/RMBG-1.4", "model.pth") + net.load_state_dict(torch.load(model_path, map_location="cpu")) + net.eval() + return net + + +def briarmbg_process(bgr_np_image, session, only_mask=False): + # prepare input + orig_bgr_image = Image.fromarray(bgr_np_image) + w, h = orig_im_size = orig_bgr_image.size + image = resize_image(orig_bgr_image) + im_np = np.array(image) + im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1) + im_tensor = torch.unsqueeze(im_tensor, 0) + im_tensor = torch.divide(im_tensor, 255.0) + im_tensor = normalize(im_tensor, [0.5, 0.5, 0.5], [1.0, 1.0, 1.0]) + # inference + result = session(im_tensor) + # post process + result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode="bilinear"), 0) + ma = torch.max(result) + mi = torch.min(result) + result = (result - mi) / (ma - mi) + # image to pil + im_array = (result * 255).cpu().data.numpy().astype(np.uint8) + + mask = np.squeeze(im_array) + if only_mask: + return mask + + pil_im = Image.fromarray(mask) + # paste the mask on the original image + new_im = Image.new("RGBA", pil_im.size, (0, 0, 0, 0)) + new_im.paste(orig_bgr_image, mask=pil_im) + rgba_np_img = np.asarray(new_im) + return rgba_np_img diff --git a/inpaint/plugins/facexlib/.gitignore b/inpaint/plugins/facexlib/.gitignore new file mode 100644 index 0000000..9f69454 --- /dev/null +++ b/inpaint/plugins/facexlib/.gitignore @@ -0,0 +1,135 @@ +.vscode +*.pth +*.png +*.jpg +version.py + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/inpaint/plugins/facexlib/__init__.py b/inpaint/plugins/facexlib/__init__.py new file mode 100644 index 0000000..6494685 --- /dev/null +++ b/inpaint/plugins/facexlib/__init__.py @@ -0,0 +1,3 @@ +# flake8: noqa +from .detection import * +from .utils import * diff --git a/inpaint/plugins/facexlib/detection/__init__.py b/inpaint/plugins/facexlib/detection/__init__.py new file mode 100644 index 0000000..eb3c79c --- /dev/null +++ b/inpaint/plugins/facexlib/detection/__init__.py @@ -0,0 +1,31 @@ +import torch +from copy import deepcopy + +from ..utils import load_file_from_url +from .retinaface import RetinaFace + + +def init_detection_model(model_name, half=False, device='cuda', model_rootpath=None): + if model_name == 'retinaface_resnet50': + model = RetinaFace(network_name='resnet50', half=half, device=device) + model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth' + elif model_name == 'retinaface_mobile0.25': + model = RetinaFace(network_name='mobile0.25', half=half, device=device) + model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth' + else: + raise NotImplementedError(f'{model_name} is not implemented.') + + model_path = load_file_from_url( + url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath) + + # TODO: clean pretrained model + load_net = torch.load(model_path, map_location=lambda storage, loc: storage) + # remove unnecessary 'module.' + for k, v in deepcopy(load_net).items(): + if k.startswith('module.'): + load_net[k[7:]] = v + load_net.pop(k) + model.load_state_dict(load_net, strict=True) + model.eval() + model = model.to(device) + return model diff --git a/inpaint/plugins/facexlib/detection/align_trans.py b/inpaint/plugins/facexlib/detection/align_trans.py new file mode 100644 index 0000000..07f1eb3 --- /dev/null +++ b/inpaint/plugins/facexlib/detection/align_trans.py @@ -0,0 +1,219 @@ +import cv2 +import numpy as np + +from .matlab_cp2tform import get_similarity_transform_for_cv2 + +# reference facial points, a list of coordinates (x,y) +REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278], + [33.54930115, 92.3655014], [62.72990036, 92.20410156]] + +DEFAULT_CROP_SIZE = (96, 112) + + +class FaceWarpException(Exception): + + def __str__(self): + return 'In File {}:{}'.format(__file__, super.__str__(self)) + + +def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False): + """ + Function: + ---------- + get reference 5 key points according to crop settings: + 0. Set default crop_size: + if default_square: + crop_size = (112, 112) + else: + crop_size = (96, 112) + 1. Pad the crop_size by inner_padding_factor in each side; + 2. Resize crop_size into (output_size - outer_padding*2), + pad into output_size with outer_padding; + 3. Output reference_5point; + Parameters: + ---------- + @output_size: (w, h) or None + size of aligned face image + @inner_padding_factor: (w_factor, h_factor) + padding factor for inner (w, h) + @outer_padding: (w_pad, h_pad) + each row is a pair of coordinates (x, y) + @default_square: True or False + if True: + default crop_size = (112, 112) + else: + default crop_size = (96, 112); + !!! make sure, if output_size is not None: + (output_size - outer_padding) + = some_scale * (default crop_size * (1.0 + + inner_padding_factor)) + Returns: + ---------- + @reference_5point: 5x2 np.array + each row is a pair of transformed coordinates (x, y) + """ + + tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) + tmp_crop_size = np.array(DEFAULT_CROP_SIZE) + + # 0) make the inner region a square + if default_square: + size_diff = max(tmp_crop_size) - tmp_crop_size + tmp_5pts += size_diff / 2 + tmp_crop_size += size_diff + + if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]): + + return tmp_5pts + + if (inner_padding_factor == 0 and outer_padding == (0, 0)): + if output_size is None: + return tmp_5pts + else: + raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size)) + + # check output size + if not (0 <= inner_padding_factor <= 1.0): + raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') + + if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None): + output_size = tmp_crop_size * \ + (1 + inner_padding_factor * 2).astype(np.int32) + output_size += np.array(outer_padding) + if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]): + raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])') + + # 1) pad the inner region according inner_padding_factor + if inner_padding_factor > 0: + size_diff = tmp_crop_size * inner_padding_factor * 2 + tmp_5pts += size_diff / 2 + tmp_crop_size += np.round(size_diff).astype(np.int32) + + # 2) resize the padded inner region + size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 + + if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]: + raise FaceWarpException('Must have (output_size - outer_padding)' + '= some_scale * (crop_size * (1.0 + inner_padding_factor)') + + scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] + tmp_5pts = tmp_5pts * scale_factor + # size_diff = tmp_crop_size * (scale_factor - min(scale_factor)) + # tmp_5pts = tmp_5pts + size_diff / 2 + tmp_crop_size = size_bf_outer_pad + + # 3) add outer_padding to make output_size + reference_5point = tmp_5pts + np.array(outer_padding) + tmp_crop_size = output_size + + return reference_5point + + +def get_affine_transform_matrix(src_pts, dst_pts): + """ + Function: + ---------- + get affine transform matrix 'tfm' from src_pts to dst_pts + Parameters: + ---------- + @src_pts: Kx2 np.array + source points matrix, each row is a pair of coordinates (x, y) + @dst_pts: Kx2 np.array + destination points matrix, each row is a pair of coordinates (x, y) + Returns: + ---------- + @tfm: 2x3 np.array + transform matrix from src_pts to dst_pts + """ + + tfm = np.float32([[1, 0, 0], [0, 1, 0]]) + n_pts = src_pts.shape[0] + ones = np.ones((n_pts, 1), src_pts.dtype) + src_pts_ = np.hstack([src_pts, ones]) + dst_pts_ = np.hstack([dst_pts, ones]) + + A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_) + + if rank == 3: + tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]]) + elif rank == 2: + tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]]) + + return tfm + + +def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'): + """ + Function: + ---------- + apply affine transform 'trans' to uv + Parameters: + ---------- + @src_img: 3x3 np.array + input image + @facial_pts: could be + 1)a list of K coordinates (x,y) + or + 2) Kx2 or 2xK np.array + each row or col is a pair of coordinates (x, y) + @reference_pts: could be + 1) a list of K coordinates (x,y) + or + 2) Kx2 or 2xK np.array + each row or col is a pair of coordinates (x, y) + or + 3) None + if None, use default reference facial points + @crop_size: (w, h) + output face image size + @align_type: transform type, could be one of + 1) 'similarity': use similarity transform + 2) 'cv2_affine': use the first 3 points to do affine transform, + by calling cv2.getAffineTransform() + 3) 'affine': use all points to do affine transform + Returns: + ---------- + @face_img: output face image with size (w, h) = @crop_size + """ + + if reference_pts is None: + if crop_size[0] == 96 and crop_size[1] == 112: + reference_pts = REFERENCE_FACIAL_POINTS + else: + default_square = False + inner_padding_factor = 0 + outer_padding = (0, 0) + output_size = crop_size + + reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding, + default_square) + + ref_pts = np.float32(reference_pts) + ref_pts_shp = ref_pts.shape + if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: + raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2') + + if ref_pts_shp[0] == 2: + ref_pts = ref_pts.T + + src_pts = np.float32(facial_pts) + src_pts_shp = src_pts.shape + if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: + raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2') + + if src_pts_shp[0] == 2: + src_pts = src_pts.T + + if src_pts.shape != ref_pts.shape: + raise FaceWarpException('facial_pts and reference_pts must have the same shape') + + if align_type == 'cv2_affine': + tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3]) + elif align_type == 'affine': + tfm = get_affine_transform_matrix(src_pts, ref_pts) + else: + tfm = get_similarity_transform_for_cv2(src_pts, ref_pts) + + face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1])) + + return face_img diff --git a/inpaint/plugins/facexlib/detection/matlab_cp2tform.py b/inpaint/plugins/facexlib/detection/matlab_cp2tform.py new file mode 100644 index 0000000..b2a8b54 --- /dev/null +++ b/inpaint/plugins/facexlib/detection/matlab_cp2tform.py @@ -0,0 +1,317 @@ +import numpy as np +from numpy.linalg import inv, lstsq +from numpy.linalg import matrix_rank as rank +from numpy.linalg import norm + + +class MatlabCp2tormException(Exception): + + def __str__(self): + return 'In File {}:{}'.format(__file__, super.__str__(self)) + + +def tformfwd(trans, uv): + """ + Function: + ---------- + apply affine transform 'trans' to uv + + Parameters: + ---------- + @trans: 3x3 np.array + transform matrix + @uv: Kx2 np.array + each row is a pair of coordinates (x, y) + + Returns: + ---------- + @xy: Kx2 np.array + each row is a pair of transformed coordinates (x, y) + """ + uv = np.hstack((uv, np.ones((uv.shape[0], 1)))) + xy = np.dot(uv, trans) + xy = xy[:, 0:-1] + return xy + + +def tforminv(trans, uv): + """ + Function: + ---------- + apply the inverse of affine transform 'trans' to uv + + Parameters: + ---------- + @trans: 3x3 np.array + transform matrix + @uv: Kx2 np.array + each row is a pair of coordinates (x, y) + + Returns: + ---------- + @xy: Kx2 np.array + each row is a pair of inverse-transformed coordinates (x, y) + """ + Tinv = inv(trans) + xy = tformfwd(Tinv, uv) + return xy + + +def findNonreflectiveSimilarity(uv, xy, options=None): + options = {'K': 2} + + K = options['K'] + M = xy.shape[0] + x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector + y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector + + tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) + tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) + X = np.vstack((tmp1, tmp2)) + + u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector + v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector + U = np.vstack((u, v)) + + # We know that X * r = U + if rank(X) >= 2 * K: + r, _, _, _ = lstsq(X, U, rcond=-1) + r = np.squeeze(r) + else: + raise Exception('cp2tform:twoUniquePointsReq') + sc = r[0] + ss = r[1] + tx = r[2] + ty = r[3] + + Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]]) + T = inv(Tinv) + T[:, 2] = np.array([0, 0, 1]) + + return T, Tinv + + +def findSimilarity(uv, xy, options=None): + options = {'K': 2} + + # uv = np.array(uv) + # xy = np.array(xy) + + # Solve for trans1 + trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) + + # Solve for trans2 + + # manually reflect the xy data across the Y-axis + xyR = xy + xyR[:, 0] = -1 * xyR[:, 0] + + trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) + + # manually reflect the tform to undo the reflection done on xyR + TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) + + trans2 = np.dot(trans2r, TreflectY) + + # Figure out if trans1 or trans2 is better + xy1 = tformfwd(trans1, uv) + norm1 = norm(xy1 - xy) + + xy2 = tformfwd(trans2, uv) + norm2 = norm(xy2 - xy) + + if norm1 <= norm2: + return trans1, trans1_inv + else: + trans2_inv = inv(trans2) + return trans2, trans2_inv + + +def get_similarity_transform(src_pts, dst_pts, reflective=True): + """ + Function: + ---------- + Find Similarity Transform Matrix 'trans': + u = src_pts[:, 0] + v = src_pts[:, 1] + x = dst_pts[:, 0] + y = dst_pts[:, 1] + [x, y, 1] = [u, v, 1] * trans + + Parameters: + ---------- + @src_pts: Kx2 np.array + source points, each row is a pair of coordinates (x, y) + @dst_pts: Kx2 np.array + destination points, each row is a pair of transformed + coordinates (x, y) + @reflective: True or False + if True: + use reflective similarity transform + else: + use non-reflective similarity transform + + Returns: + ---------- + @trans: 3x3 np.array + transform matrix from uv to xy + trans_inv: 3x3 np.array + inverse of trans, transform matrix from xy to uv + """ + + if reflective: + trans, trans_inv = findSimilarity(src_pts, dst_pts) + else: + trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) + + return trans, trans_inv + + +def cvt_tform_mat_for_cv2(trans): + """ + Function: + ---------- + Convert Transform Matrix 'trans' into 'cv2_trans' which could be + directly used by cv2.warpAffine(): + u = src_pts[:, 0] + v = src_pts[:, 1] + x = dst_pts[:, 0] + y = dst_pts[:, 1] + [x, y].T = cv_trans * [u, v, 1].T + + Parameters: + ---------- + @trans: 3x3 np.array + transform matrix from uv to xy + + Returns: + ---------- + @cv2_trans: 2x3 np.array + transform matrix from src_pts to dst_pts, could be directly used + for cv2.warpAffine() + """ + cv2_trans = trans[:, 0:2].T + + return cv2_trans + + +def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True): + """ + Function: + ---------- + Find Similarity Transform Matrix 'cv2_trans' which could be + directly used by cv2.warpAffine(): + u = src_pts[:, 0] + v = src_pts[:, 1] + x = dst_pts[:, 0] + y = dst_pts[:, 1] + [x, y].T = cv_trans * [u, v, 1].T + + Parameters: + ---------- + @src_pts: Kx2 np.array + source points, each row is a pair of coordinates (x, y) + @dst_pts: Kx2 np.array + destination points, each row is a pair of transformed + coordinates (x, y) + reflective: True or False + if True: + use reflective similarity transform + else: + use non-reflective similarity transform + + Returns: + ---------- + @cv2_trans: 2x3 np.array + transform matrix from src_pts to dst_pts, could be directly used + for cv2.warpAffine() + """ + trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) + cv2_trans = cvt_tform_mat_for_cv2(trans) + + return cv2_trans + + +if __name__ == '__main__': + """ + u = [0, 6, -2] + v = [0, 3, 5] + x = [-1, 0, 4] + y = [-1, -10, 4] + + # In Matlab, run: + # + # uv = [u'; v']; + # xy = [x'; y']; + # tform_sim=cp2tform(uv,xy,'similarity'); + # + # trans = tform_sim.tdata.T + # ans = + # -0.0764 -1.6190 0 + # 1.6190 -0.0764 0 + # -3.2156 0.0290 1.0000 + # trans_inv = tform_sim.tdata.Tinv + # ans = + # + # -0.0291 0.6163 0 + # -0.6163 -0.0291 0 + # -0.0756 1.9826 1.0000 + # xy_m=tformfwd(tform_sim, u,v) + # + # xy_m = + # + # -3.2156 0.0290 + # 1.1833 -9.9143 + # 5.0323 2.8853 + # uv_m=tforminv(tform_sim, x,y) + # + # uv_m = + # + # 0.5698 1.3953 + # 6.0872 2.2733 + # -2.6570 4.3314 + """ + u = [0, 6, -2] + v = [0, 3, 5] + x = [-1, 0, 4] + y = [-1, -10, 4] + + uv = np.array((u, v)).T + xy = np.array((x, y)).T + + print('\n--->uv:') + print(uv) + print('\n--->xy:') + print(xy) + + trans, trans_inv = get_similarity_transform(uv, xy) + + print('\n--->trans matrix:') + print(trans) + + print('\n--->trans_inv matrix:') + print(trans_inv) + + print('\n---> apply transform to uv') + print('\nxy_m = uv_augmented * trans') + uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1)))) + xy_m = np.dot(uv_aug, trans) + print(xy_m) + + print('\nxy_m = tformfwd(trans, uv)') + xy_m = tformfwd(trans, uv) + print(xy_m) + + print('\n---> apply inverse transform to xy') + print('\nuv_m = xy_augmented * trans_inv') + xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1)))) + uv_m = np.dot(xy_aug, trans_inv) + print(uv_m) + + print('\nuv_m = tformfwd(trans_inv, xy)') + uv_m = tformfwd(trans_inv, xy) + print(uv_m) + + uv_m = tforminv(trans, xy) + print('\nuv_m = tforminv(trans, xy)') + print(uv_m) diff --git a/inpaint/plugins/facexlib/detection/retinaface.py b/inpaint/plugins/facexlib/detection/retinaface.py new file mode 100644 index 0000000..6c4a84d --- /dev/null +++ b/inpaint/plugins/facexlib/detection/retinaface.py @@ -0,0 +1,419 @@ +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter + +from .align_trans import get_reference_facial_points, warp_and_crop_face +from .retinaface_net import ( + FPN, + SSH, + MobileNetV1, + make_bbox_head, + make_class_head, + make_landmark_head, +) +from .retinaface_utils import ( + PriorBox, + batched_decode, + batched_decode_landm, + decode, + decode_landm, + py_cpu_nms, +) + + +def generate_config(network_name): + cfg_mnet = { + "name": "mobilenet0.25", + "min_sizes": [[16, 32], [64, 128], [256, 512]], + "steps": [8, 16, 32], + "variance": [0.1, 0.2], + "clip": False, + "loc_weight": 2.0, + "gpu_train": True, + "batch_size": 32, + "ngpu": 1, + "epoch": 250, + "decay1": 190, + "decay2": 220, + "image_size": 640, + "return_layers": {"stage1": 1, "stage2": 2, "stage3": 3}, + "in_channel": 32, + "out_channel": 64, + } + + cfg_re50 = { + "name": "Resnet50", + "min_sizes": [[16, 32], [64, 128], [256, 512]], + "steps": [8, 16, 32], + "variance": [0.1, 0.2], + "clip": False, + "loc_weight": 2.0, + "gpu_train": True, + "batch_size": 24, + "ngpu": 4, + "epoch": 100, + "decay1": 70, + "decay2": 90, + "image_size": 840, + "return_layers": {"layer2": 1, "layer3": 2, "layer4": 3}, + "in_channel": 256, + "out_channel": 256, + } + + if network_name == "mobile0.25": + return cfg_mnet + elif network_name == "resnet50": + return cfg_re50 + else: + raise NotImplementedError(f"network_name={network_name}") + + +class RetinaFace(nn.Module): + def __init__(self, network_name="resnet50", half=False, phase="test", device=None): + self.device = ( + torch.device("cuda" if torch.cuda.is_available() else "cpu") + if device is None + else device + ) + + super(RetinaFace, self).__init__() + self.half_inference = half + cfg = generate_config(network_name) + self.backbone = cfg["name"] + + self.model_name = f"retinaface_{network_name}" + self.cfg = cfg + self.phase = phase + self.target_size, self.max_size = 1600, 2150 + self.resize, self.scale, self.scale1 = 1.0, None, None + self.mean_tensor = torch.tensor( + [[[[104.0]], [[117.0]], [[123.0]]]], device=self.device + ) + self.reference = get_reference_facial_points(default_square=True) + # Build network. + backbone = None + if cfg["name"] == "mobilenet0.25": + backbone = MobileNetV1() + self.body = IntermediateLayerGetter(backbone, cfg["return_layers"]) + elif cfg["name"] == "Resnet50": + import torchvision.models as models + + backbone = models.resnet50(pretrained=False) + self.body = IntermediateLayerGetter(backbone, cfg["return_layers"]) + + in_channels_stage2 = cfg["in_channel"] + in_channels_list = [ + in_channels_stage2 * 2, + in_channels_stage2 * 4, + in_channels_stage2 * 8, + ] + + out_channels = cfg["out_channel"] + self.fpn = FPN(in_channels_list, out_channels) + self.ssh1 = SSH(out_channels, out_channels) + self.ssh2 = SSH(out_channels, out_channels) + self.ssh3 = SSH(out_channels, out_channels) + + self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg["out_channel"]) + self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg["out_channel"]) + self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg["out_channel"]) + + self.to(self.device) + self.eval() + if self.half_inference: + self.half() + + def forward(self, inputs): + out = self.body(inputs) + + if self.backbone == "mobilenet0.25" or self.backbone == "Resnet50": + out = list(out.values()) + # FPN + fpn = self.fpn(out) + + # SSH + feature1 = self.ssh1(fpn[0]) + feature2 = self.ssh2(fpn[1]) + feature3 = self.ssh3(fpn[2]) + features = [feature1, feature2, feature3] + + bbox_regressions = torch.cat( + [self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1 + ) + classifications = torch.cat( + [self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1 + ) + tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)] + ldm_regressions = torch.cat(tmp, dim=1) + + if self.phase == "train": + output = (bbox_regressions, classifications, ldm_regressions) + else: + output = ( + bbox_regressions, + F.softmax(classifications, dim=-1), + ldm_regressions, + ) + return output + + def __detect_faces(self, inputs): + # get scale + height, width = inputs.shape[2:] + self.scale = torch.tensor( + [width, height, width, height], dtype=torch.float32, device=self.device + ) + tmp = [ + width, + height, + width, + height, + width, + height, + width, + height, + width, + height, + ] + self.scale1 = torch.tensor(tmp, dtype=torch.float32, device=self.device) + + # forawrd + inputs = inputs.to(self.device) + if self.half_inference: + inputs = inputs.half() + loc, conf, landmarks = self(inputs) + + # get priorbox + priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:]) + priors = priorbox.forward().to(self.device) + + return loc, conf, landmarks, priors + + # single image detection + def transform(self, image, use_origin_size): + # convert to opencv format + if isinstance(image, Image.Image): + image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) + image = image.astype(np.float32) + + # testing scale + im_size_min = np.min(image.shape[0:2]) + im_size_max = np.max(image.shape[0:2]) + resize = float(self.target_size) / float(im_size_min) + + # prevent bigger axis from being more than max_size + if np.round(resize * im_size_max) > self.max_size: + resize = float(self.max_size) / float(im_size_max) + resize = 1 if use_origin_size else resize + + # resize + if resize != 1: + image = cv2.resize( + image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR + ) + + # convert to torch.tensor format + # image -= (104, 117, 123) + image = image.transpose(2, 0, 1) + image = torch.from_numpy(image).unsqueeze(0) + + return image, resize + + def detect_faces( + self, + image, + conf_threshold=0.8, + nms_threshold=0.4, + use_origin_size=True, + ): + image, self.resize = self.transform(image, use_origin_size) + image = image.to(self.device) + if self.half_inference: + image = image.half() + image = image - self.mean_tensor + + loc, conf, landmarks, priors = self.__detect_faces(image) + + boxes = decode(loc.data.squeeze(0), priors.data, self.cfg["variance"]) + boxes = boxes * self.scale / self.resize + boxes = boxes.cpu().numpy() + + scores = conf.squeeze(0).data.cpu().numpy()[:, 1] + + landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg["variance"]) + landmarks = landmarks * self.scale1 / self.resize + landmarks = landmarks.cpu().numpy() + + # ignore low scores + inds = np.where(scores > conf_threshold)[0] + boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds] + + # sort + order = scores.argsort()[::-1] + boxes, landmarks, scores = boxes[order], landmarks[order], scores[order] + + # do NMS + bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype( + np.float32, copy=False + ) + keep = py_cpu_nms(bounding_boxes, nms_threshold) + bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep] + # self.t['forward_pass'].toc() + # print(self.t['forward_pass'].average_time) + # import sys + # sys.stdout.flush() + return np.concatenate((bounding_boxes, landmarks), axis=1) + + def __align_multi(self, image, boxes, landmarks, limit=None): + if len(boxes) < 1: + return [], [] + + if limit: + boxes = boxes[:limit] + landmarks = landmarks[:limit] + + faces = [] + for landmark in landmarks: + facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)] + + warped_face = warp_and_crop_face( + np.array(image), facial5points, self.reference, crop_size=(112, 112) + ) + faces.append(warped_face) + + return np.concatenate((boxes, landmarks), axis=1), faces + + def align_multi(self, img, conf_threshold=0.8, limit=None): + rlt = self.detect_faces(img, conf_threshold=conf_threshold) + boxes, landmarks = rlt[:, 0:5], rlt[:, 5:] + + return self.__align_multi(img, boxes, landmarks, limit) + + # batched detection + def batched_transform(self, frames, use_origin_size): + """ + Arguments: + frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c], + type=np.float32, BGR format). + use_origin_size: whether to use origin size. + """ + from_PIL = True if isinstance(frames[0], Image.Image) else False + + # convert to opencv format + if from_PIL: + frames = [ + cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames + ] + frames = np.asarray(frames, dtype=np.float32) + + # testing scale + im_size_min = np.min(frames[0].shape[0:2]) + im_size_max = np.max(frames[0].shape[0:2]) + resize = float(self.target_size) / float(im_size_min) + + # prevent bigger axis from being more than max_size + if np.round(resize * im_size_max) > self.max_size: + resize = float(self.max_size) / float(im_size_max) + resize = 1 if use_origin_size else resize + + # resize + if resize != 1: + if not from_PIL: + frames = F.interpolate(frames, scale_factor=resize) + else: + frames = [ + cv2.resize( + frame, + None, + None, + fx=resize, + fy=resize, + interpolation=cv2.INTER_LINEAR, + ) + for frame in frames + ] + + # convert to torch.tensor format + if not from_PIL: + frames = frames.transpose(1, 2).transpose(1, 3).contiguous() + else: + frames = frames.transpose((0, 3, 1, 2)) + frames = torch.from_numpy(frames) + + return frames, resize + + def batched_detect_faces( + self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True + ): + """ + Arguments: + frames: a list of PIL.Image, or np.array(shape=[n, h, w, c], + type=np.uint8, BGR format). + conf_threshold: confidence threshold. + nms_threshold: nms threshold. + use_origin_size: whether to use origin size. + Returns: + final_bounding_boxes: list of np.array ([n_boxes, 5], + type=np.float32). + final_landmarks: list of np.array ([n_boxes, 10], type=np.float32). + """ + # self.t['forward_pass'].tic() + frames, self.resize = self.batched_transform(frames, use_origin_size) + frames = frames.to(self.device) + frames = frames - self.mean_tensor + + b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames) + + final_bounding_boxes, final_landmarks = [], [] + + # decode + priors = priors.unsqueeze(0) + b_loc = ( + batched_decode(b_loc, priors, self.cfg["variance"]) + * self.scale + / self.resize + ) + b_landmarks = ( + batched_decode_landm(b_landmarks, priors, self.cfg["variance"]) + * self.scale1 + / self.resize + ) + b_conf = b_conf[:, :, 1] + + # index for selection + b_indice = b_conf > conf_threshold + + # concat + b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float() + + for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice): + # ignore low scores + pred, landm = pred[inds, :], landm[inds, :] + if pred.shape[0] == 0: + final_bounding_boxes.append(np.array([], dtype=np.float32)) + final_landmarks.append(np.array([], dtype=np.float32)) + continue + + # sort + # order = score.argsort(descending=True) + # box, landm, score = box[order], landm[order], score[order] + + # to CPU + bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy() + + # NMS + keep = py_cpu_nms(bounding_boxes, nms_threshold) + bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep] + + # append + final_bounding_boxes.append(bounding_boxes) + final_landmarks.append(landmarks) + # self.t['forward_pass'].toc(average=True) + # self.batch_time += self.t['forward_pass'].diff + # self.total_frame += len(frames) + # print(self.batch_time / self.total_frame) + + return final_bounding_boxes, final_landmarks diff --git a/inpaint/plugins/facexlib/detection/retinaface_net.py b/inpaint/plugins/facexlib/detection/retinaface_net.py new file mode 100644 index 0000000..ab6aa82 --- /dev/null +++ b/inpaint/plugins/facexlib/detection/retinaface_net.py @@ -0,0 +1,196 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def conv_bn(inp, oup, stride=1, leaky=0): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope=leaky, inplace=True)) + + +def conv_bn_no_relu(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + ) + + +def conv_bn1X1(inp, oup, stride, leaky=0): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope=leaky, inplace=True)) + + +def conv_dw(inp, oup, stride, leaky=0.1): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.LeakyReLU(negative_slope=leaky, inplace=True), + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope=leaky, inplace=True), + ) + + +class SSH(nn.Module): + + def __init__(self, in_channel, out_channel): + super(SSH, self).__init__() + assert out_channel % 4 == 0 + leaky = 0 + if (out_channel <= 64): + leaky = 0.1 + self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1) + + self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky) + self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1) + + self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky) + self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1) + + def forward(self, input): + conv3X3 = self.conv3X3(input) + + conv5X5_1 = self.conv5X5_1(input) + conv5X5 = self.conv5X5_2(conv5X5_1) + + conv7X7_2 = self.conv7X7_2(conv5X5_1) + conv7X7 = self.conv7x7_3(conv7X7_2) + + out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1) + out = F.relu(out) + return out + + +class FPN(nn.Module): + + def __init__(self, in_channels_list, out_channels): + super(FPN, self).__init__() + leaky = 0 + if (out_channels <= 64): + leaky = 0.1 + self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky) + self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky) + self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky) + + self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky) + self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky) + + def forward(self, input): + # names = list(input.keys()) + # input = list(input.values()) + + output1 = self.output1(input[0]) + output2 = self.output2(input[1]) + output3 = self.output3(input[2]) + + up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest') + output2 = output2 + up3 + output2 = self.merge2(output2) + + up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest') + output1 = output1 + up2 + output1 = self.merge1(output1) + + out = [output1, output2, output3] + return out + + +class MobileNetV1(nn.Module): + + def __init__(self): + super(MobileNetV1, self).__init__() + self.stage1 = nn.Sequential( + conv_bn(3, 8, 2, leaky=0.1), # 3 + conv_dw(8, 16, 1), # 7 + conv_dw(16, 32, 2), # 11 + conv_dw(32, 32, 1), # 19 + conv_dw(32, 64, 2), # 27 + conv_dw(64, 64, 1), # 43 + ) + self.stage2 = nn.Sequential( + conv_dw(64, 128, 2), # 43 + 16 = 59 + conv_dw(128, 128, 1), # 59 + 32 = 91 + conv_dw(128, 128, 1), # 91 + 32 = 123 + conv_dw(128, 128, 1), # 123 + 32 = 155 + conv_dw(128, 128, 1), # 155 + 32 = 187 + conv_dw(128, 128, 1), # 187 + 32 = 219 + ) + self.stage3 = nn.Sequential( + conv_dw(128, 256, 2), # 219 +3 2 = 241 + conv_dw(256, 256, 1), # 241 + 64 = 301 + ) + self.avg = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(256, 1000) + + def forward(self, x): + x = self.stage1(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.avg(x) + # x = self.model(x) + x = x.view(-1, 256) + x = self.fc(x) + return x + + +class ClassHead(nn.Module): + + def __init__(self, inchannels=512, num_anchors=3): + super(ClassHead, self).__init__() + self.num_anchors = num_anchors + self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) + + def forward(self, x): + out = self.conv1x1(x) + out = out.permute(0, 2, 3, 1).contiguous() + + return out.view(out.shape[0], -1, 2) + + +class BboxHead(nn.Module): + + def __init__(self, inchannels=512, num_anchors=3): + super(BboxHead, self).__init__() + self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0) + + def forward(self, x): + out = self.conv1x1(x) + out = out.permute(0, 2, 3, 1).contiguous() + + return out.view(out.shape[0], -1, 4) + + +class LandmarkHead(nn.Module): + + def __init__(self, inchannels=512, num_anchors=3): + super(LandmarkHead, self).__init__() + self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0) + + def forward(self, x): + out = self.conv1x1(x) + out = out.permute(0, 2, 3, 1).contiguous() + + return out.view(out.shape[0], -1, 10) + + +def make_class_head(fpn_num=3, inchannels=64, anchor_num=2): + classhead = nn.ModuleList() + for i in range(fpn_num): + classhead.append(ClassHead(inchannels, anchor_num)) + return classhead + + +def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2): + bboxhead = nn.ModuleList() + for i in range(fpn_num): + bboxhead.append(BboxHead(inchannels, anchor_num)) + return bboxhead + + +def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2): + landmarkhead = nn.ModuleList() + for i in range(fpn_num): + landmarkhead.append(LandmarkHead(inchannels, anchor_num)) + return landmarkhead diff --git a/inpaint/plugins/facexlib/detection/retinaface_utils.py b/inpaint/plugins/facexlib/detection/retinaface_utils.py new file mode 100644 index 0000000..8c35775 --- /dev/null +++ b/inpaint/plugins/facexlib/detection/retinaface_utils.py @@ -0,0 +1,421 @@ +import numpy as np +import torch +import torchvision +from itertools import product as product +from math import ceil + + +class PriorBox(object): + + def __init__(self, cfg, image_size=None, phase='train'): + super(PriorBox, self).__init__() + self.min_sizes = cfg['min_sizes'] + self.steps = cfg['steps'] + self.clip = cfg['clip'] + self.image_size = image_size + self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps] + self.name = 's' + + def forward(self): + anchors = [] + for k, f in enumerate(self.feature_maps): + min_sizes = self.min_sizes[k] + for i, j in product(range(f[0]), range(f[1])): + for min_size in min_sizes: + s_kx = min_size / self.image_size[1] + s_ky = min_size / self.image_size[0] + dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]] + dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]] + for cy, cx in product(dense_cy, dense_cx): + anchors += [cx, cy, s_kx, s_ky] + + # back to torch land + output = torch.Tensor(anchors).view(-1, 4) + if self.clip: + output.clamp_(max=1, min=0) + return output + + +def py_cpu_nms(dets, thresh): + """Pure Python NMS baseline.""" + keep = torchvision.ops.nms( + boxes=torch.Tensor(dets[:, :4]), + scores=torch.Tensor(dets[:, 4]), + iou_threshold=thresh, + ) + + return list(keep) + + +def point_form(boxes): + """ Convert prior_boxes to (xmin, ymin, xmax, ymax) + representation for comparison to point form ground truth data. + Args: + boxes: (tensor) center-size default boxes from priorbox layers. + Return: + boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. + """ + return torch.cat( + ( + boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin + boxes[:, :2] + boxes[:, 2:] / 2), + 1) # xmax, ymax + + +def center_size(boxes): + """ Convert prior_boxes to (cx, cy, w, h) + representation for comparison to center-size form ground truth data. + Args: + boxes: (tensor) point_form boxes + Return: + boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. + """ + return torch.cat( + (boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy + boxes[:, 2:] - boxes[:, :2], + 1) # w, h + + +def intersect(box_a, box_b): + """ We resize both tensors to [A,B,2] without new malloc: + [A,2] -> [A,1,2] -> [A,B,2] + [B,2] -> [1,B,2] -> [A,B,2] + Then we compute the area of intersect between box_a and box_b. + Args: + box_a: (tensor) bounding boxes, Shape: [A,4]. + box_b: (tensor) bounding boxes, Shape: [B,4]. + Return: + (tensor) intersection area, Shape: [A,B]. + """ + A = box_a.size(0) + B = box_b.size(0) + max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) + min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) + inter = torch.clamp((max_xy - min_xy), min=0) + return inter[:, :, 0] * inter[:, :, 1] + + +def jaccard(box_a, box_b): + """Compute the jaccard overlap of two sets of boxes. The jaccard overlap + is simply the intersection over union of two boxes. Here we operate on + ground truth boxes and default boxes. + E.g.: + A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) + Args: + box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] + box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] + Return: + jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] + """ + inter = intersect(box_a, box_b) + area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] + area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] + union = area_a + area_b - inter + return inter / union # [A,B] + + +def matrix_iou(a, b): + """ + return iou of a and b, numpy version for data augenmentation + """ + lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) + rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) + + area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) + area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) + area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) + return area_i / (area_a[:, np.newaxis] + area_b - area_i) + + +def matrix_iof(a, b): + """ + return iof of a and b, numpy version for data augenmentation + """ + lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) + rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) + + area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) + area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) + return area_i / np.maximum(area_a[:, np.newaxis], 1) + + +def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx): + """Match each prior box with the ground truth box of the highest jaccard + overlap, encode the bounding boxes, then return the matched indices + corresponding to both confidence and location preds. + Args: + threshold: (float) The overlap threshold used when matching boxes. + truths: (tensor) Ground truth boxes, Shape: [num_obj, 4]. + priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4]. + variances: (tensor) Variances corresponding to each prior coord, + Shape: [num_priors, 4]. + labels: (tensor) All the class labels for the image, Shape: [num_obj]. + landms: (tensor) Ground truth landms, Shape [num_obj, 10]. + loc_t: (tensor) Tensor to be filled w/ encoded location targets. + conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. + landm_t: (tensor) Tensor to be filled w/ encoded landm targets. + idx: (int) current batch index + Return: + The matched indices corresponding to 1)location 2)confidence + 3)landm preds. + """ + # jaccard index + overlaps = jaccard(truths, point_form(priors)) + # (Bipartite Matching) + # [1,num_objects] best prior for each ground truth + best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) + + # ignore hard gt + valid_gt_idx = best_prior_overlap[:, 0] >= 0.2 + best_prior_idx_filter = best_prior_idx[valid_gt_idx, :] + if best_prior_idx_filter.shape[0] <= 0: + loc_t[idx] = 0 + conf_t[idx] = 0 + return + + # [1,num_priors] best ground truth for each prior + best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) + best_truth_idx.squeeze_(0) + best_truth_overlap.squeeze_(0) + best_prior_idx.squeeze_(1) + best_prior_idx_filter.squeeze_(1) + best_prior_overlap.squeeze_(1) + best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior + # TODO refactor: index best_prior_idx with long tensor + # ensure every gt matches with its prior of max overlap + for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes + best_truth_idx[best_prior_idx[j]] = j + matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来 + conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来 + conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本 + loc = encode(matches, priors, variances) + + matches_landm = landms[best_truth_idx] + landm = encode_landm(matches_landm, priors, variances) + loc_t[idx] = loc # [num_priors,4] encoded offsets to learn + conf_t[idx] = conf # [num_priors] top class label for each prior + landm_t[idx] = landm + + +def encode(matched, priors, variances): + """Encode the variances from the priorbox layers into the ground truth boxes + we have matched (based on jaccard overlap) with the prior boxes. + Args: + matched: (tensor) Coords of ground truth for each prior in point-form + Shape: [num_priors, 4]. + priors: (tensor) Prior boxes in center-offset form + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + encoded boxes (tensor), Shape: [num_priors, 4] + """ + + # dist b/t match center and prior's center + g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2] + # encode variance + g_cxcy /= (variances[0] * priors[:, 2:]) + # match wh / prior wh + g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] + g_wh = torch.log(g_wh) / variances[1] + # return target for smooth_l1_loss + return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] + + +def encode_landm(matched, priors, variances): + """Encode the variances from the priorbox layers into the ground truth boxes + we have matched (based on jaccard overlap) with the prior boxes. + Args: + matched: (tensor) Coords of ground truth for each prior in point-form + Shape: [num_priors, 10]. + priors: (tensor) Prior boxes in center-offset form + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + encoded landm (tensor), Shape: [num_priors, 10] + """ + + # dist b/t match center and prior's center + matched = torch.reshape(matched, (matched.size(0), 5, 2)) + priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) + g_cxcy = matched[:, :, :2] - priors[:, :, :2] + # encode variance + g_cxcy /= (variances[0] * priors[:, :, 2:]) + # g_cxcy /= priors[:, :, 2:] + g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) + # return target for smooth_l1_loss + return g_cxcy + + +# Adapted from https://github.com/Hakuyume/chainer-ssd +def decode(loc, priors, variances): + """Decode locations from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + loc (tensor): location predictions for loc layers, + Shape: [num_priors,4] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded bounding box predictions + """ + + boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], + priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) + boxes[:, :2] -= boxes[:, 2:] / 2 + boxes[:, 2:] += boxes[:, :2] + return boxes + + +def decode_landm(pre, priors, variances): + """Decode landm from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + pre (tensor): landm predictions for loc layers, + Shape: [num_priors,10] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded landm predictions + """ + tmp = ( + priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], + ) + landms = torch.cat(tmp, dim=1) + return landms + + +def batched_decode(b_loc, priors, variances): + """Decode locations from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + b_loc (tensor): location predictions for loc layers, + Shape: [num_batches,num_priors,4] + priors (tensor): Prior boxes in center-offset form. + Shape: [1,num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded bounding box predictions + """ + boxes = ( + priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:], + priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]), + ) + boxes = torch.cat(boxes, dim=2) + + boxes[:, :, :2] -= boxes[:, :, 2:] / 2 + boxes[:, :, 2:] += boxes[:, :, :2] + return boxes + + +def batched_decode_landm(pre, priors, variances): + """Decode landm from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + pre (tensor): landm predictions for loc layers, + Shape: [num_batches,num_priors,10] + priors (tensor): Prior boxes in center-offset form. + Shape: [1,num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded landm predictions + """ + landms = ( + priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:], + priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:], + priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:], + priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:], + priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:], + ) + landms = torch.cat(landms, dim=2) + return landms + + +def log_sum_exp(x): + """Utility function for computing log_sum_exp while determining + This will be used to determine unaveraged confidence loss across + all examples in a batch. + Args: + x (Variable(tensor)): conf_preds from conf layers + """ + x_max = x.data.max() + return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max + + +# Original author: Francisco Massa: +# https://github.com/fmassa/object-detection.torch +# Ported to PyTorch by Max deGroot (02/01/2017) +def nms(boxes, scores, overlap=0.5, top_k=200): + """Apply non-maximum suppression at test time to avoid detecting too many + overlapping bounding boxes for a given object. + Args: + boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. + scores: (tensor) The class predscores for the img, Shape:[num_priors]. + overlap: (float) The overlap thresh for suppressing unnecessary boxes. + top_k: (int) The Maximum number of box preds to consider. + Return: + The indices of the kept boxes with respect to num_priors. + """ + + keep = torch.Tensor(scores.size(0)).fill_(0).long() + if boxes.numel() == 0: + return keep + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + area = torch.mul(x2 - x1, y2 - y1) + v, idx = scores.sort(0) # sort in ascending order + # I = I[v >= 0.01] + idx = idx[-top_k:] # indices of the top-k largest vals + xx1 = boxes.new() + yy1 = boxes.new() + xx2 = boxes.new() + yy2 = boxes.new() + w = boxes.new() + h = boxes.new() + + # keep = torch.Tensor() + count = 0 + while idx.numel() > 0: + i = idx[-1] # index of current largest val + # keep.append(i) + keep[count] = i + count += 1 + if idx.size(0) == 1: + break + idx = idx[:-1] # remove kept element from view + # load bboxes of next highest vals + torch.index_select(x1, 0, idx, out=xx1) + torch.index_select(y1, 0, idx, out=yy1) + torch.index_select(x2, 0, idx, out=xx2) + torch.index_select(y2, 0, idx, out=yy2) + # store element-wise max with next highest score + xx1 = torch.clamp(xx1, min=x1[i]) + yy1 = torch.clamp(yy1, min=y1[i]) + xx2 = torch.clamp(xx2, max=x2[i]) + yy2 = torch.clamp(yy2, max=y2[i]) + w.resize_as_(xx2) + h.resize_as_(yy2) + w = xx2 - xx1 + h = yy2 - yy1 + # check sizes of xx1 and xx2.. after each iteration + w = torch.clamp(w, min=0.0) + h = torch.clamp(h, min=0.0) + inter = w * h + # IoU = i / (area(a) + area(b) - i) + rem_areas = torch.index_select(area, 0, idx) # load remaining areas) + union = (rem_areas - inter) + area[i] + IoU = inter / union # store result in iou + # keep only elements with an IoU <= overlap + idx = idx[IoU.le(overlap)] + return keep, count diff --git a/inpaint/plugins/facexlib/parsing/__init__.py b/inpaint/plugins/facexlib/parsing/__init__.py new file mode 100644 index 0000000..322a87b --- /dev/null +++ b/inpaint/plugins/facexlib/parsing/__init__.py @@ -0,0 +1,24 @@ +import torch + +from ..utils import load_file_from_url +from .bisenet import BiSeNet +from .parsenet import ParseNet + + +def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_rootpath=None): + if model_name == 'bisenet': + model = BiSeNet(num_class=19) + model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/parsing_bisenet.pth' + elif model_name == 'parsenet': + model = ParseNet(in_size=512, out_size=512, parsing_ch=19) + model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth' + else: + raise NotImplementedError(f'{model_name} is not implemented.') + + model_path = load_file_from_url( + url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath) + load_net = torch.load(model_path, map_location=lambda storage, loc: storage) + model.load_state_dict(load_net, strict=True) + model.eval() + model = model.to(device) + return model diff --git a/inpaint/plugins/facexlib/parsing/bisenet.py b/inpaint/plugins/facexlib/parsing/bisenet.py new file mode 100644 index 0000000..3898cab --- /dev/null +++ b/inpaint/plugins/facexlib/parsing/bisenet.py @@ -0,0 +1,140 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .resnet import ResNet18 + + +class ConvBNReLU(nn.Module): + + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_chan) + + def forward(self, x): + x = self.conv(x) + x = F.relu(self.bn(x)) + return x + + +class BiSeNetOutput(nn.Module): + + def __init__(self, in_chan, mid_chan, num_class): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False) + + def forward(self, x): + feat = self.conv(x) + out = self.conv_out(feat) + return out, feat + + +class AttentionRefinementModule(nn.Module): + + def __init__(self, in_chan, out_chan): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False) + self.bn_atten = nn.BatchNorm2d(out_chan) + self.sigmoid_atten = nn.Sigmoid() + + def forward(self, x): + feat = self.conv(x) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + +class ContextPath(nn.Module): + + def __init__(self): + super(ContextPath, self).__init__() + self.resnet = ResNet18() + self.arm16 = AttentionRefinementModule(256, 128) + self.arm32 = AttentionRefinementModule(512, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0) + + def forward(self, x): + feat8, feat16, feat32 = self.resnet(x) + h8, w8 = feat8.size()[2:] + h16, w16 = feat16.size()[2:] + h32, w32 = feat32.size()[2:] + + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (h32, w32), mode='nearest') + + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (h16, w16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (h8, w8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat8, feat16_up, feat32_up # x8, x8, x16 + + +class FeatureFusionModule(nn.Module): + + def __init__(self, in_chan, out_chan): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.conv1 = nn.Conv2d(out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False) + self.conv2 = nn.Conv2d(out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + +class BiSeNet(nn.Module): + + def __init__(self, num_class): + super(BiSeNet, self).__init__() + self.cp = ContextPath() + self.ffm = FeatureFusionModule(256, 256) + self.conv_out = BiSeNetOutput(256, 256, num_class) + self.conv_out16 = BiSeNetOutput(128, 64, num_class) + self.conv_out32 = BiSeNetOutput(128, 64, num_class) + + def forward(self, x, return_feat=False): + h, w = x.size()[2:] + feat_res8, feat_cp8, feat_cp16 = self.cp(x) # return res3b1 feature + feat_sp = feat_res8 # replace spatial path feature with res3b1 feature + feat_fuse = self.ffm(feat_sp, feat_cp8) + + out, feat = self.conv_out(feat_fuse) + out16, feat16 = self.conv_out16(feat_cp8) + out32, feat32 = self.conv_out32(feat_cp16) + + out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True) + out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True) + out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True) + + if return_feat: + feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True) + feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True) + feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True) + return out, out16, out32, feat, feat16, feat32 + else: + return out, out16, out32 diff --git a/inpaint/plugins/facexlib/parsing/parsenet.py b/inpaint/plugins/facexlib/parsing/parsenet.py new file mode 100644 index 0000000..e178ebe --- /dev/null +++ b/inpaint/plugins/facexlib/parsing/parsenet.py @@ -0,0 +1,194 @@ +"""Modified from https://github.com/chaofengc/PSFRGAN +""" +import numpy as np +import torch.nn as nn +from torch.nn import functional as F + + +class NormLayer(nn.Module): + """Normalization Layers. + + Args: + channels: input channels, for batch norm and instance norm. + input_size: input shape without batch size, for layer norm. + """ + + def __init__(self, channels, normalize_shape=None, norm_type='bn'): + super(NormLayer, self).__init__() + norm_type = norm_type.lower() + self.norm_type = norm_type + if norm_type == 'bn': + self.norm = nn.BatchNorm2d(channels, affine=True) + elif norm_type == 'in': + self.norm = nn.InstanceNorm2d(channels, affine=False) + elif norm_type == 'gn': + self.norm = nn.GroupNorm(32, channels, affine=True) + elif norm_type == 'pixel': + self.norm = lambda x: F.normalize(x, p=2, dim=1) + elif norm_type == 'layer': + self.norm = nn.LayerNorm(normalize_shape) + elif norm_type == 'none': + self.norm = lambda x: x * 1.0 + else: + assert 1 == 0, f'Norm type {norm_type} not support.' + + def forward(self, x, ref=None): + if self.norm_type == 'spade': + return self.norm(x, ref) + else: + return self.norm(x) + + +class ReluLayer(nn.Module): + """Relu Layer. + + Args: + relu type: type of relu layer, candidates are + - ReLU + - LeakyReLU: default relu slope 0.2 + - PRelu + - SELU + - none: direct pass + """ + + def __init__(self, channels, relu_type='relu'): + super(ReluLayer, self).__init__() + relu_type = relu_type.lower() + if relu_type == 'relu': + self.func = nn.ReLU(True) + elif relu_type == 'leakyrelu': + self.func = nn.LeakyReLU(0.2, inplace=True) + elif relu_type == 'prelu': + self.func = nn.PReLU(channels) + elif relu_type == 'selu': + self.func = nn.SELU(True) + elif relu_type == 'none': + self.func = lambda x: x * 1.0 + else: + assert 1 == 0, f'Relu type {relu_type} not support.' + + def forward(self, x): + return self.func(x) + + +class ConvLayer(nn.Module): + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + scale='none', + norm_type='none', + relu_type='none', + use_pad=True, + bias=True): + super(ConvLayer, self).__init__() + self.use_pad = use_pad + self.norm_type = norm_type + if norm_type in ['bn']: + bias = False + + stride = 2 if scale == 'down' else 1 + + self.scale_func = lambda x: x + if scale == 'up': + self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') + + self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2))) + self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) + + self.relu = ReluLayer(out_channels, relu_type) + self.norm = NormLayer(out_channels, norm_type=norm_type) + + def forward(self, x): + out = self.scale_func(x) + if self.use_pad: + out = self.reflection_pad(out) + out = self.conv2d(out) + out = self.norm(out) + out = self.relu(out) + return out + + +class ResidualBlock(nn.Module): + """ + Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html + """ + + def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'): + super(ResidualBlock, self).__init__() + + if scale == 'none' and c_in == c_out: + self.shortcut_func = lambda x: x + else: + self.shortcut_func = ConvLayer(c_in, c_out, 3, scale) + + scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']} + scale_conf = scale_config_dict[scale] + + self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type) + self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none') + + def forward(self, x): + identity = self.shortcut_func(x) + + res = self.conv1(x) + res = self.conv2(res) + return identity + res + + +class ParseNet(nn.Module): + + def __init__(self, + in_size=128, + out_size=128, + min_feat_size=32, + base_ch=64, + parsing_ch=19, + res_depth=10, + relu_type='LeakyReLU', + norm_type='bn', + ch_range=[32, 256]): + super().__init__() + self.res_depth = res_depth + act_args = {'norm_type': norm_type, 'relu_type': relu_type} + min_ch, max_ch = ch_range + + ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731 + min_feat_size = min(in_size, min_feat_size) + + down_steps = int(np.log2(in_size // min_feat_size)) + up_steps = int(np.log2(out_size // min_feat_size)) + + # =============== define encoder-body-decoder ==================== + self.encoder = [] + self.encoder.append(ConvLayer(3, base_ch, 3, 1)) + head_ch = base_ch + for i in range(down_steps): + cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2) + self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args)) + head_ch = head_ch * 2 + + self.body = [] + for i in range(res_depth): + self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)) + + self.decoder = [] + for i in range(up_steps): + cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2) + self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args)) + head_ch = head_ch // 2 + + self.encoder = nn.Sequential(*self.encoder) + self.body = nn.Sequential(*self.body) + self.decoder = nn.Sequential(*self.decoder) + self.out_img_conv = ConvLayer(ch_clip(head_ch), 3) + self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch) + + def forward(self, x): + feat = self.encoder(x) + x = feat + self.body(feat) + x = self.decoder(x) + out_img = self.out_img_conv(x) + out_mask = self.out_mask_conv(x) + return out_mask, out_img diff --git a/inpaint/plugins/facexlib/parsing/resnet.py b/inpaint/plugins/facexlib/parsing/resnet.py new file mode 100644 index 0000000..fec8e82 --- /dev/null +++ b/inpaint/plugins/facexlib/parsing/resnet.py @@ -0,0 +1,69 @@ +import torch.nn as nn +import torch.nn.functional as F + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + + +class BasicBlock(nn.Module): + + def __init__(self, in_chan, out_chan, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(in_chan, out_chan, stride) + self.bn1 = nn.BatchNorm2d(out_chan) + self.conv2 = conv3x3(out_chan, out_chan) + self.bn2 = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU(inplace=True) + self.downsample = None + if in_chan != out_chan or stride != 1: + self.downsample = nn.Sequential( + nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(out_chan), + ) + + def forward(self, x): + residual = self.conv1(x) + residual = F.relu(self.bn1(residual)) + residual = self.conv2(residual) + residual = self.bn2(residual) + + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + out = shortcut + residual + out = self.relu(out) + return out + + +def create_layer_basic(in_chan, out_chan, bnum, stride=1): + layers = [BasicBlock(in_chan, out_chan, stride=stride)] + for i in range(bnum - 1): + layers.append(BasicBlock(out_chan, out_chan, stride=1)) + return nn.Sequential(*layers) + + +class ResNet18(nn.Module): + + def __init__(self): + super(ResNet18, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1) + self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2) + self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2) + self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(self.bn1(x)) + x = self.maxpool(x) + + x = self.layer1(x) + feat8 = self.layer2(x) # 1/8 + feat16 = self.layer3(feat8) # 1/16 + feat32 = self.layer4(feat16) # 1/32 + return feat8, feat16, feat32 diff --git a/inpaint/plugins/facexlib/utils/__init__.py b/inpaint/plugins/facexlib/utils/__init__.py new file mode 100644 index 0000000..706e077 --- /dev/null +++ b/inpaint/plugins/facexlib/utils/__init__.py @@ -0,0 +1,7 @@ +from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back +from .misc import img2tensor, load_file_from_url, scandir + +__all__ = [ + 'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', 'paste_face_back', + 'img2tensor', 'scandir' +] diff --git a/inpaint/plugins/facexlib/utils/face_restoration_helper.py b/inpaint/plugins/facexlib/utils/face_restoration_helper.py new file mode 100644 index 0000000..a547cc8 --- /dev/null +++ b/inpaint/plugins/facexlib/utils/face_restoration_helper.py @@ -0,0 +1,473 @@ +import cv2 +import numpy as np +import os +import torch +from torchvision.transforms.functional import normalize + +from ..detection import init_detection_model +from ..parsing import init_parsing_model +from ..utils.misc import img2tensor, imwrite + + +def get_largest_face(det_faces, h, w): + def get_location(val, length): + if val < 0: + return 0 + elif val > length: + return length + else: + return val + + face_areas = [] + for det_face in det_faces: + left = get_location(det_face[0], w) + right = get_location(det_face[2], w) + top = get_location(det_face[1], h) + bottom = get_location(det_face[3], h) + face_area = (right - left) * (bottom - top) + face_areas.append(face_area) + largest_idx = face_areas.index(max(face_areas)) + return det_faces[largest_idx], largest_idx + + +def get_center_face(det_faces, h=0, w=0, center=None): + if center is not None: + center = np.array(center) + else: + center = np.array([w / 2, h / 2]) + center_dist = [] + for det_face in det_faces: + face_center = np.array( + [(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2] + ) + dist = np.linalg.norm(face_center - center) + center_dist.append(dist) + center_idx = center_dist.index(min(center_dist)) + return det_faces[center_idx], center_idx + + +class FaceRestoreHelper(object): + """Helper for the face restoration pipeline (base class).""" + + def __init__( + self, + upscale_factor, + face_size=512, + crop_ratio=(1, 1), + det_model="retinaface_resnet50", + save_ext="png", + template_3points=False, + pad_blur=False, + use_parse=False, + device=None, + model_rootpath=None, + ): + self.template_3points = template_3points # improve robustness + self.upscale_factor = upscale_factor + # the cropped face ratio based on the square face + self.crop_ratio = crop_ratio # (h, w) + assert ( + self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1 + ), "crop ration only supports >=1" + self.face_size = ( + int(face_size * self.crop_ratio[1]), + int(face_size * self.crop_ratio[0]), + ) + + if self.template_3points: + self.face_template = np.array([[192, 240], [319, 240], [257, 371]]) + else: + # standard 5 landmarks for FFHQ faces with 512 x 512 + self.face_template = np.array( + [ + [192.98138, 239.94708], + [318.90277, 240.1936], + [256.63416, 314.01935], + [201.26117, 371.41043], + [313.08905, 371.15118], + ] + ) + self.face_template = self.face_template * (face_size / 512.0) + if self.crop_ratio[0] > 1: + self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2 + if self.crop_ratio[1] > 1: + self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2 + self.save_ext = save_ext + self.pad_blur = pad_blur + if self.pad_blur is True: + self.template_3points = False + + self.all_landmarks_5 = [] + self.det_faces = [] + self.affine_matrices = [] + self.inverse_affine_matrices = [] + self.cropped_faces = [] + self.restored_faces = [] + self.pad_input_imgs = [] + + if device is None: + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + else: + self.device = device + + # init face detection model + self.face_det = init_detection_model( + det_model, half=False, device=self.device, model_rootpath=model_rootpath + ) + + # init face parsing model + self.use_parse = use_parse + self.face_parse = init_parsing_model( + model_name="parsenet", device=self.device, model_rootpath=model_rootpath + ) + + def set_upscale_factor(self, upscale_factor): + self.upscale_factor = upscale_factor + + def read_image(self, img): + """img can be image path or cv2 loaded image.""" + # self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255] + if isinstance(img, str): + img = cv2.imread(img) + + if np.max(img) > 256: # 16-bit image + img = img / 65535 * 255 + if len(img.shape) == 2: # gray image + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + elif img.shape[2] == 4: # RGBA image with alpha channel + img = img[:, :, 0:3] + + self.input_img = img + + def get_face_landmarks_5( + self, + only_keep_largest=False, + only_center_face=False, + resize=None, + blur_ratio=0.01, + eye_dist_threshold=None, + ): + if resize is None: + scale = 1 + input_img = self.input_img + else: + h, w = self.input_img.shape[0:2] + scale = min(h, w) / resize + h, w = int(h / scale), int(w / scale) + input_img = cv2.resize( + self.input_img, (w, h), interpolation=cv2.INTER_LANCZOS4 + ) + + with torch.no_grad(): + bboxes = self.face_det.detect_faces(input_img, 0.97) * scale + for bbox in bboxes: + # remove faces with too small eye distance: side faces or too small faces + eye_dist = np.linalg.norm([bbox[5] - bbox[7], bbox[6] - bbox[8]]) + if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold): + continue + + if self.template_3points: + landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)]) + else: + landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)]) + self.all_landmarks_5.append(landmark) + self.det_faces.append(bbox[0:5]) + if len(self.det_faces) == 0: + return 0 + if only_keep_largest: + h, w, _ = self.input_img.shape + self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w) + self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]] + elif only_center_face: + h, w, _ = self.input_img.shape + self.det_faces, center_idx = get_center_face(self.det_faces, h, w) + self.all_landmarks_5 = [self.all_landmarks_5[center_idx]] + + # pad blurry images + if self.pad_blur: + self.pad_input_imgs = [] + for landmarks in self.all_landmarks_5: + # get landmarks + eye_left = landmarks[0, :] + eye_right = landmarks[1, :] + eye_avg = (eye_left + eye_right) * 0.5 + mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5 + eye_to_eye = eye_right - eye_left + eye_to_mouth = mouth_avg - eye_avg + + # Get the oriented crop rectangle + # x: half width of the oriented crop rectangle + x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] + # - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise + # norm with the hypotenuse: get the direction + x /= np.hypot(*x) # get the hypotenuse of a right triangle + rect_scale = 1.5 + x *= max( + np.hypot(*eye_to_eye) * 2.0 * rect_scale, + np.hypot(*eye_to_mouth) * 1.8 * rect_scale, + ) + # y: half height of the oriented crop rectangle + y = np.flipud(x) * [-1, 1] + + # c: center + c = eye_avg + eye_to_mouth * 0.1 + # quad: (left_top, left_bottom, right_bottom, right_top) + quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) + # qsize: side length of the square + qsize = np.hypot(*x) * 2 + border = max(int(np.rint(qsize * 0.1)), 3) + + # get pad + # pad: (width_left, height_top, width_right, height_bottom) + pad = ( + int(np.floor(min(quad[:, 0]))), + int(np.floor(min(quad[:, 1]))), + int(np.ceil(max(quad[:, 0]))), + int(np.ceil(max(quad[:, 1]))), + ) + pad = [ + max(-pad[0] + border, 1), + max(-pad[1] + border, 1), + max(pad[2] - self.input_img.shape[0] + border, 1), + max(pad[3] - self.input_img.shape[1] + border, 1), + ] + + if max(pad) > 1: + # pad image + pad_img = np.pad( + self.input_img, + ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), + "reflect", + ) + # modify landmark coords + landmarks[:, 0] += pad[0] + landmarks[:, 1] += pad[1] + # blur pad images + h, w, _ = pad_img.shape + y, x, _ = np.ogrid[:h, :w, :1] + mask = np.maximum( + 1.0 + - np.minimum( + np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2] + ), + 1.0 + - np.minimum( + np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3] + ), + ) + blur = int(qsize * blur_ratio) + if blur % 2 == 0: + blur += 1 + blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur)) + # blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0) + + pad_img = pad_img.astype("float32") + pad_img += (blur_img - pad_img) * np.clip( + mask * 3.0 + 1.0, 0.0, 1.0 + ) + pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip( + mask, 0.0, 1.0 + ) + pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255] + self.pad_input_imgs.append(pad_img) + else: + self.pad_input_imgs.append(np.copy(self.input_img)) + + return len(self.all_landmarks_5) + + def align_warp_face(self, save_cropped_path=None, border_mode="constant"): + """Align and warp faces with face template.""" + if self.pad_blur: + assert ( + len(self.pad_input_imgs) == len(self.all_landmarks_5) + ), f"Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}" + for idx, landmark in enumerate(self.all_landmarks_5): + # use 5 landmarks to get affine matrix + # use cv2.LMEDS method for the equivalence to skimage transform + # ref: https://blog.csdn.net/yichxi/article/details/115827338 + affine_matrix = cv2.estimateAffinePartial2D( + landmark, self.face_template, method=cv2.LMEDS + )[0] + self.affine_matrices.append(affine_matrix) + # warp and crop faces + if border_mode == "constant": + border_mode = cv2.BORDER_CONSTANT + elif border_mode == "reflect101": + border_mode = cv2.BORDER_REFLECT101 + elif border_mode == "reflect": + border_mode = cv2.BORDER_REFLECT + if self.pad_blur: + input_img = self.pad_input_imgs[idx] + else: + input_img = self.input_img + cropped_face = cv2.warpAffine( + input_img, + affine_matrix, + self.face_size, + borderMode=border_mode, + borderValue=(135, 133, 132), + ) # gray + self.cropped_faces.append(cropped_face) + # save the cropped face + if save_cropped_path is not None: + path = os.path.splitext(save_cropped_path)[0] + save_path = f"{path}_{idx:02d}.{self.save_ext}" + imwrite(cropped_face, save_path) + + def get_inverse_affine(self, save_inverse_affine_path=None): + """Get inverse affine matrix.""" + for idx, affine_matrix in enumerate(self.affine_matrices): + inverse_affine = cv2.invertAffineTransform(affine_matrix) + inverse_affine *= self.upscale_factor + self.inverse_affine_matrices.append(inverse_affine) + # save inverse affine matrices + if save_inverse_affine_path is not None: + path, _ = os.path.splitext(save_inverse_affine_path) + save_path = f"{path}_{idx:02d}.pth" + torch.save(inverse_affine, save_path) + + def add_restored_face(self, face): + self.restored_faces.append(face) + + def paste_faces_to_input_image(self, save_path=None, upsample_img=None): + h, w, _ = self.input_img.shape + h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor) + + if upsample_img is None: + # simply resize the background + upsample_img = cv2.resize( + self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4 + ) + else: + upsample_img = cv2.resize( + upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4 + ) + + assert len(self.restored_faces) == len( + self.inverse_affine_matrices + ), "length of restored_faces and affine_matrices are different." + for restored_face, inverse_affine in zip( + self.restored_faces, self.inverse_affine_matrices + ): + # Add an offset to inverse affine matrix, for more precise back alignment + if self.upscale_factor > 1: + extra_offset = 0.5 * self.upscale_factor + else: + extra_offset = 0 + inverse_affine[:, 2] += extra_offset + inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up)) + + if self.use_parse: + # inference + face_input = cv2.resize( + restored_face, (512, 512), interpolation=cv2.INTER_LINEAR + ) + face_input = img2tensor( + face_input.astype("float32") / 255.0, bgr2rgb=True, float32=True + ) + normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) + face_input = torch.unsqueeze(face_input, 0).to(self.device) + with torch.no_grad(): + out = self.face_parse(face_input)[0] + out = out.argmax(dim=1).squeeze().cpu().numpy() + + mask = np.zeros(out.shape) + MASK_COLORMAP = [ + 0, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 0, + 255, + 0, + 0, + 0, + ] + for idx, color in enumerate(MASK_COLORMAP): + mask[out == idx] = color + # blur the mask + mask = cv2.GaussianBlur(mask, (101, 101), 11) + mask = cv2.GaussianBlur(mask, (101, 101), 11) + # remove the black borders + thres = 10 + mask[:thres, :] = 0 + mask[-thres:, :] = 0 + mask[:, :thres] = 0 + mask[:, -thres:] = 0 + mask = mask / 255.0 + + mask = cv2.resize(mask, restored_face.shape[:2]) + mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up), flags=3) + inv_soft_mask = mask[:, :, None] + pasted_face = inv_restored + + else: # use square parse maps + mask = np.ones(self.face_size, dtype=np.float32) + inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up)) + # remove the black borders + inv_mask_erosion = cv2.erode( + inv_mask, + np.ones( + (int(2 * self.upscale_factor), int(2 * self.upscale_factor)), + np.uint8, + ), + ) + pasted_face = inv_mask_erosion[:, :, None] * inv_restored + total_face_area = np.sum(inv_mask_erosion) # // 3 + # compute the fusion edge based on the area of face + w_edge = int(total_face_area**0.5) // 20 + erosion_radius = w_edge * 2 + inv_mask_center = cv2.erode( + inv_mask_erosion, + np.ones((erosion_radius, erosion_radius), np.uint8), + ) + blur_size = w_edge * 2 + inv_soft_mask = cv2.GaussianBlur( + inv_mask_center, (blur_size + 1, blur_size + 1), 0 + ) + if len(upsample_img.shape) == 2: # upsample_img is gray image + upsample_img = upsample_img[:, :, None] + inv_soft_mask = inv_soft_mask[:, :, None] + + if ( + len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4 + ): # alpha channel + alpha = upsample_img[:, :, 3:] + upsample_img = ( + inv_soft_mask * pasted_face + + (1 - inv_soft_mask) * upsample_img[:, :, 0:3] + ) + upsample_img = np.concatenate((upsample_img, alpha), axis=2) + else: + upsample_img = ( + inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img + ) + + if np.max(upsample_img) > 256: # 16-bit image + upsample_img = upsample_img.astype(np.uint16) + else: + upsample_img = upsample_img.astype(np.uint8) + if save_path is not None: + path = os.path.splitext(save_path)[0] + save_path = f"{path}.{self.save_ext}" + imwrite(upsample_img, save_path) + return upsample_img + + def clean_all(self): + self.all_landmarks_5 = [] + self.restored_faces = [] + self.affine_matrices = [] + self.cropped_faces = [] + self.inverse_affine_matrices = [] + self.det_faces = [] + self.pad_input_imgs = [] diff --git a/inpaint/plugins/facexlib/utils/face_utils.py b/inpaint/plugins/facexlib/utils/face_utils.py new file mode 100644 index 0000000..13ff043 --- /dev/null +++ b/inpaint/plugins/facexlib/utils/face_utils.py @@ -0,0 +1,208 @@ +import cv2 +import numpy as np +import torch + + +def compute_increased_bbox(bbox, increase_area, preserve_aspect=True): + left, top, right, bot = bbox + width = right - left + height = bot - top + + if preserve_aspect: + width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width)) + height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height)) + else: + width_increase = height_increase = increase_area + left = int(left - width_increase * width) + top = int(top - height_increase * height) + right = int(right + width_increase * width) + bot = int(bot + height_increase * height) + return (left, top, right, bot) + + +def get_valid_bboxes(bboxes, h, w): + left = max(bboxes[0], 0) + top = max(bboxes[1], 0) + right = min(bboxes[2], w) + bottom = min(bboxes[3], h) + return (left, top, right, bottom) + + +def align_crop_face_landmarks(img, + landmarks, + output_size, + transform_size=None, + enable_padding=True, + return_inverse_affine=False, + shrink_ratio=(1, 1)): + """Align and crop face with landmarks. + + The output_size and transform_size are based on width. The height is + adjusted based on shrink_ratio_h/shring_ration_w. + + Modified from: + https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py + + Args: + img (Numpy array): Input image. + landmarks (Numpy array): 5 or 68 or 98 landmarks. + output_size (int): Output face size. + transform_size (ing): Transform size. Usually the four time of + output_size. + enable_padding (float): Default: True. + shrink_ratio (float | tuple[float] | list[float]): Shring the whole + face for height and width (crop larger area). Default: (1, 1). + + Returns: + (Numpy array): Cropped face. + """ + lm_type = 'retinaface_5' # Options: dlib_5, retinaface_5 + + if isinstance(shrink_ratio, (float, int)): + shrink_ratio = (shrink_ratio, shrink_ratio) + if transform_size is None: + transform_size = output_size * 4 + + # Parse landmarks + lm = np.array(landmarks) + if lm.shape[0] == 5 and lm_type == 'retinaface_5': + eye_left = lm[0] + eye_right = lm[1] + mouth_avg = (lm[3] + lm[4]) * 0.5 + elif lm.shape[0] == 5 and lm_type == 'dlib_5': + lm_eye_left = lm[2:4] + lm_eye_right = lm[0:2] + eye_left = np.mean(lm_eye_left, axis=0) + eye_right = np.mean(lm_eye_right, axis=0) + mouth_avg = lm[4] + elif lm.shape[0] == 68: + lm_eye_left = lm[36:42] + lm_eye_right = lm[42:48] + eye_left = np.mean(lm_eye_left, axis=0) + eye_right = np.mean(lm_eye_right, axis=0) + mouth_avg = (lm[48] + lm[54]) * 0.5 + elif lm.shape[0] == 98: + lm_eye_left = lm[60:68] + lm_eye_right = lm[68:76] + eye_left = np.mean(lm_eye_left, axis=0) + eye_right = np.mean(lm_eye_right, axis=0) + mouth_avg = (lm[76] + lm[82]) * 0.5 + + eye_avg = (eye_left + eye_right) * 0.5 + eye_to_eye = eye_right - eye_left + eye_to_mouth = mouth_avg - eye_avg + + # Get the oriented crop rectangle + # x: half width of the oriented crop rectangle + x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] + # - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise + # norm with the hypotenuse: get the direction + x /= np.hypot(*x) # get the hypotenuse of a right triangle + rect_scale = 1 # TODO: you can edit it to get larger rect + x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale) + # y: half height of the oriented crop rectangle + y = np.flipud(x) * [-1, 1] + + x *= shrink_ratio[1] # width + y *= shrink_ratio[0] # height + + # c: center + c = eye_avg + eye_to_mouth * 0.1 + # quad: (left_top, left_bottom, right_bottom, right_top) + quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) + # qsize: side length of the square + qsize = np.hypot(*x) * 2 + + quad_ori = np.copy(quad) + # Shrink, for large face + # TODO: do we really need shrink + shrink = int(np.floor(qsize / output_size * 0.5)) + if shrink > 1: + h, w = img.shape[0:2] + rsize = (int(np.rint(float(w) / shrink)), int(np.rint(float(h) / shrink))) + img = cv2.resize(img, rsize, interpolation=cv2.INTER_AREA) + quad /= shrink + qsize /= shrink + + # Crop + h, w = img.shape[0:2] + border = max(int(np.rint(qsize * 0.1)), 3) + crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), + int(np.ceil(max(quad[:, 1])))) + crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, w), min(crop[3] + border, h)) + if crop[2] - crop[0] < w or crop[3] - crop[1] < h: + img = img[crop[1]:crop[3], crop[0]:crop[2], :] + quad -= crop[0:2] + + # Pad + # pad: (width_left, height_top, width_right, height_bottom) + h, w = img.shape[0:2] + pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), + int(np.ceil(max(quad[:, 1])))) + pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - w + border, 0), max(pad[3] - h + border, 0)) + if enable_padding and max(pad) > border - 4: + pad = np.maximum(pad, int(np.rint(qsize * 0.3))) + img = np.pad(img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') + h, w = img.shape[0:2] + y, x, _ = np.ogrid[:h, :w, :1] + mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], + np.float32(w - 1 - x) / pad[2]), + 1.0 - np.minimum(np.float32(y) / pad[1], + np.float32(h - 1 - y) / pad[3])) + blur = int(qsize * 0.02) + if blur % 2 == 0: + blur += 1 + blur_img = cv2.boxFilter(img, 0, ksize=(blur, blur)) + + img = img.astype('float32') + img += (blur_img - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) + img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) + img = np.clip(img, 0, 255) # float32, [0, 255] + quad += pad[:2] + + # Transform use cv2 + h_ratio = shrink_ratio[0] / shrink_ratio[1] + dst_h, dst_w = int(transform_size * h_ratio), transform_size + template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]]) + # use cv2.LMEDS method for the equivalence to skimage transform + # ref: https://blog.csdn.net/yichxi/article/details/115827338 + affine_matrix = cv2.estimateAffinePartial2D(quad, template, method=cv2.LMEDS)[0] + cropped_face = cv2.warpAffine( + img, affine_matrix, (dst_w, dst_h), borderMode=cv2.BORDER_CONSTANT, borderValue=(135, 133, 132)) # gray + + if output_size < transform_size: + cropped_face = cv2.resize( + cropped_face, (output_size, int(output_size * h_ratio)), interpolation=cv2.INTER_LINEAR) + + if return_inverse_affine: + dst_h, dst_w = int(output_size * h_ratio), output_size + template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]]) + # use cv2.LMEDS method for the equivalence to skimage transform + # ref: https://blog.csdn.net/yichxi/article/details/115827338 + affine_matrix = cv2.estimateAffinePartial2D( + quad_ori, np.array([[0, 0], [0, output_size], [dst_w, dst_h], [dst_w, 0]]), method=cv2.LMEDS)[0] + inverse_affine = cv2.invertAffineTransform(affine_matrix) + else: + inverse_affine = None + return cropped_face, inverse_affine + + +def paste_face_back(img, face, inverse_affine): + h, w = img.shape[0:2] + face_h, face_w = face.shape[0:2] + inv_restored = cv2.warpAffine(face, inverse_affine, (w, h)) + mask = np.ones((face_h, face_w, 3), dtype=np.float32) + inv_mask = cv2.warpAffine(mask, inverse_affine, (w, h)) + # remove the black borders + inv_mask_erosion = cv2.erode(inv_mask, np.ones((2, 2), np.uint8)) + inv_restored_remove_border = inv_mask_erosion * inv_restored + total_face_area = np.sum(inv_mask_erosion) // 3 + # compute the fusion edge based on the area of face + w_edge = int(total_face_area**0.5) // 20 + erosion_radius = w_edge * 2 + inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8)) + blur_size = w_edge * 2 + inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0) + img = inv_soft_mask * inv_restored_remove_border + (1 - inv_soft_mask) * img + # float32, [0, 255] + return img diff --git a/inpaint/plugins/facexlib/utils/misc.py b/inpaint/plugins/facexlib/utils/misc.py new file mode 100644 index 0000000..b1a597c --- /dev/null +++ b/inpaint/plugins/facexlib/utils/misc.py @@ -0,0 +1,118 @@ +import cv2 +import os +import os.path as osp +import torch +from torch.hub import download_url_to_file, get_dir +from urllib.parse import urlparse + +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file. + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv's :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = os.path.abspath(os.path.dirname(file_path)) + os.makedirs(dir_name, exist_ok=True) + return cv2.imwrite(file_path, img, params) + + +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + else: + return _totensor(imgs, bgr2rgb, float32) + + +def load_file_from_url(url, model_dir=None, progress=True, file_name=None, save_dir=None): + """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py + """ + if model_dir is None: + hub_dir = get_dir() + model_dir = os.path.join(hub_dir, 'checkpoints') + + if save_dir is None: + save_dir = os.path.join(ROOT_DIR, model_dir) + os.makedirs(save_dir, exist_ok=True) + + parts = urlparse(url) + filename = os.path.basename(parts.path) + if file_name is not None: + filename = file_name + cached_file = os.path.abspath(os.path.join(save_dir, filename)) + if not os.path.exists(cached_file): + print(f'Downloading: "{url}" to {cached_file}\n') + download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) + return cached_file + + +def scandir(dir_path, suffix=None, recursive=False, full_path=False): + """Scan a directory to find the interested files. + Args: + dir_path (str): Path of the directory. + suffix (str | tuple(str), optional): File suffix that we are + interested in. Default: None. + recursive (bool, optional): If set to True, recursively scan the + directory. Default: False. + full_path (bool, optional): If set to True, include the dir_path. + Default: False. + Returns: + A generator for all the interested files with relative paths. + """ + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + root = dir_path + + def _scandir(dir_path, suffix, recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + if full_path: + return_path = entry.path + else: + return_path = osp.relpath(entry.path, root) + + if suffix is None: + yield return_path + elif return_path.endswith(suffix): + yield return_path + else: + if recursive: + yield from _scandir(entry.path, suffix=suffix, recursive=recursive) + else: + continue + + return _scandir(dir_path, suffix=suffix, recursive=recursive) diff --git a/inpaint/plugins/gfpgan/archs/gfpganv1_clean_arch.py b/inpaint/plugins/gfpgan/archs/gfpganv1_clean_arch.py new file mode 100644 index 0000000..0733216 --- /dev/null +++ b/inpaint/plugins/gfpgan/archs/gfpganv1_clean_arch.py @@ -0,0 +1,322 @@ +import math +import random +import torch +from torch import nn +from torch.nn import functional as F + +from .stylegan2_clean_arch import StyleGAN2GeneratorClean + + +class StyleGAN2GeneratorCSFT(StyleGAN2GeneratorClean): + """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). + + It is the clean version without custom compiled CUDA extensions used in StyleGAN2. + + Args: + out_size (int): The spatial size of outputs. + num_style_feat (int): Channel number of style features. Default: 512. + num_mlp (int): Layer number of MLP style layers. Default: 8. + channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. + narrow (float): The narrow ratio for channels. Default: 1. + sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. + """ + + def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1, sft_half=False): + super(StyleGAN2GeneratorCSFT, self).__init__( + out_size, + num_style_feat=num_style_feat, + num_mlp=num_mlp, + channel_multiplier=channel_multiplier, + narrow=narrow) + self.sft_half = sft_half + + def forward(self, + styles, + conditions, + input_is_latent=False, + noise=None, + randomize_noise=True, + truncation=1, + truncation_latent=None, + inject_index=None, + return_latents=False): + """Forward function for StyleGAN2GeneratorCSFT. + + Args: + styles (list[Tensor]): Sample codes of styles. + conditions (list[Tensor]): SFT conditions to generators. + input_is_latent (bool): Whether input is latent style. Default: False. + noise (Tensor | None): Input noise or None. Default: None. + randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. + truncation (float): The truncation ratio. Default: 1. + truncation_latent (Tensor | None): The truncation latent tensor. Default: None. + inject_index (int | None): The injection index for mixing noise. Default: None. + return_latents (bool): Whether to return style latents. Default: False. + """ + # style codes -> latents with Style MLP layer + if not input_is_latent: + styles = [self.style_mlp(s) for s in styles] + # noises + if noise is None: + if randomize_noise: + noise = [None] * self.num_layers # for each style conv layer + else: # use the stored noise + noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] + # style truncation + if truncation < 1: + style_truncation = [] + for style in styles: + style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) + styles = style_truncation + # get style latents with injection + if len(styles) == 1: + inject_index = self.num_latent + + if styles[0].ndim < 3: + # repeat latent code for all the layers + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + else: # used for encoder with different latent code for each layer + latent = styles[0] + elif len(styles) == 2: # mixing noises + if inject_index is None: + inject_index = random.randint(1, self.num_latent - 1) + latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) + latent = torch.cat([latent1, latent2], 1) + + # main generation + out = self.constant_input(latent.shape[0]) + out = self.style_conv1(out, latent[:, 0], noise=noise[0]) + skip = self.to_rgb1(out, latent[:, 1]) + + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], + noise[2::2], self.to_rgbs): + out = conv1(out, latent[:, i], noise=noise1) + + # the conditions may have fewer levels + if i < len(conditions): + # SFT part to combine the conditions + if self.sft_half: # only apply SFT to half of the channels + out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) + out_sft = out_sft * conditions[i - 1] + conditions[i] + out = torch.cat([out_same, out_sft], dim=1) + else: # apply SFT to all the channels + out = out * conditions[i - 1] + conditions[i] + + out = conv2(out, latent[:, i + 1], noise=noise2) + skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space + i += 2 + + image = skip + + if return_latents: + return image, latent + else: + return image, None + + +class ResBlock(nn.Module): + """Residual block with bilinear upsampling/downsampling. + + Args: + in_channels (int): Channel number of the input. + out_channels (int): Channel number of the output. + mode (str): Upsampling/downsampling mode. Options: down | up. Default: down. + """ + + def __init__(self, in_channels, out_channels, mode='down'): + super(ResBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) + self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) + self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) + if mode == 'down': + self.scale_factor = 0.5 + elif mode == 'up': + self.scale_factor = 2 + + def forward(self, x): + out = F.leaky_relu_(self.conv1(x), negative_slope=0.2) + # upsample/downsample + out = F.interpolate(out, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) + out = F.leaky_relu_(self.conv2(out), negative_slope=0.2) + # skip + x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) + skip = self.skip(x) + out = out + skip + return out + + +class GFPGANv1Clean(nn.Module): + """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. + + It is the clean version without custom compiled CUDA extensions used in StyleGAN2. + + Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. + + Args: + out_size (int): The spatial size of outputs. + num_style_feat (int): Channel number of style features. Default: 512. + channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. + decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. + fix_decoder (bool): Whether to fix the decoder. Default: True. + + num_mlp (int): Layer number of MLP style layers. Default: 8. + input_is_latent (bool): Whether input is latent style. Default: False. + different_w (bool): Whether to use different latent w for different layers. Default: False. + narrow (float): The narrow ratio for channels. Default: 1. + sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. + """ + + def __init__( + self, + out_size, + num_style_feat=512, + channel_multiplier=1, + decoder_load_path=None, + fix_decoder=True, + # for stylegan decoder + num_mlp=8, + input_is_latent=False, + different_w=False, + narrow=1, + sft_half=False): + + super(GFPGANv1Clean, self).__init__() + self.input_is_latent = input_is_latent + self.different_w = different_w + self.num_style_feat = num_style_feat + + unet_narrow = narrow * 0.5 # by default, use a half of input channels + channels = { + '4': int(512 * unet_narrow), + '8': int(512 * unet_narrow), + '16': int(512 * unet_narrow), + '32': int(512 * unet_narrow), + '64': int(256 * channel_multiplier * unet_narrow), + '128': int(128 * channel_multiplier * unet_narrow), + '256': int(64 * channel_multiplier * unet_narrow), + '512': int(32 * channel_multiplier * unet_narrow), + '1024': int(16 * channel_multiplier * unet_narrow) + } + + self.log_size = int(math.log(out_size, 2)) + first_out_size = 2**(int(math.log(out_size, 2))) + + self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1) + + # downsample + in_channels = channels[f'{first_out_size}'] + self.conv_body_down = nn.ModuleList() + for i in range(self.log_size, 2, -1): + out_channels = channels[f'{2**(i - 1)}'] + self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down')) + in_channels = out_channels + + self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1) + + # upsample + in_channels = channels['4'] + self.conv_body_up = nn.ModuleList() + for i in range(3, self.log_size + 1): + out_channels = channels[f'{2**i}'] + self.conv_body_up.append(ResBlock(in_channels, out_channels, mode='up')) + in_channels = out_channels + + # to RGB + self.toRGB = nn.ModuleList() + for i in range(3, self.log_size + 1): + self.toRGB.append(nn.Conv2d(channels[f'{2**i}'], 3, 1)) + + if different_w: + linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat + else: + linear_out_channel = num_style_feat + + self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel) + + # the decoder: stylegan2 generator with SFT modulations + self.stylegan_decoder = StyleGAN2GeneratorCSFT( + out_size=out_size, + num_style_feat=num_style_feat, + num_mlp=num_mlp, + channel_multiplier=channel_multiplier, + narrow=narrow, + sft_half=sft_half) + + # load pre-trained stylegan2 model if necessary + if decoder_load_path: + self.stylegan_decoder.load_state_dict( + torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) + # fix decoder without updating params + if fix_decoder: + for _, param in self.stylegan_decoder.named_parameters(): + param.requires_grad = False + + # for SFT modulations (scale and shift) + self.condition_scale = nn.ModuleList() + self.condition_shift = nn.ModuleList() + for i in range(3, self.log_size + 1): + out_channels = channels[f'{2**i}'] + if sft_half: + sft_out_channels = out_channels + else: + sft_out_channels = out_channels * 2 + self.condition_scale.append( + nn.Sequential( + nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), + nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) + self.condition_shift.append( + nn.Sequential( + nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), + nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) + + def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True, **kwargs): + """Forward function for GFPGANv1Clean. + + Args: + x (Tensor): Input images. + return_latents (bool): Whether to return style latents. Default: False. + return_rgb (bool): Whether return intermediate rgb images. Default: True. + randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. + """ + conditions = [] + unet_skips = [] + out_rgbs = [] + + # encoder + feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2) + for i in range(self.log_size - 2): + feat = self.conv_body_down[i](feat) + unet_skips.insert(0, feat) + feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2) + + # style code + style_code = self.final_linear(feat.view(feat.size(0), -1)) + if self.different_w: + style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) + + # decode + for i in range(self.log_size - 2): + # add unet skip + feat = feat + unet_skips[i] + # ResUpLayer + feat = self.conv_body_up[i](feat) + # generate scale and shift for SFT layers + scale = self.condition_scale[i](feat) + conditions.append(scale.clone()) + shift = self.condition_shift[i](feat) + conditions.append(shift.clone()) + # generate rgb images + if return_rgb: + out_rgbs.append(self.toRGB[i](feat)) + + # decoder + image, _ = self.stylegan_decoder([style_code], + conditions, + return_latents=return_latents, + input_is_latent=self.input_is_latent, + randomize_noise=randomize_noise) + + return image, out_rgbs diff --git a/inpaint/plugins/gfpgan/archs/restoreformer_arch.py b/inpaint/plugins/gfpgan/archs/restoreformer_arch.py new file mode 100644 index 0000000..1485c3e --- /dev/null +++ b/inpaint/plugins/gfpgan/archs/restoreformer_arch.py @@ -0,0 +1,759 @@ +"""Modified from https://github.com/wzhouxiff/RestoreFormer""" + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class VectorQuantizer(nn.Module): + """ + see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py + ____________________________________________ + Discretization bottleneck part of the VQ-VAE. + Inputs: + - n_e : number of embeddings + - e_dim : dimension of embedding + - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 + _____________________________________________ + """ + + def __init__(self, n_e, e_dim, beta): + super(VectorQuantizer, self).__init__() + self.n_e = n_e + self.e_dim = e_dim + self.beta = beta + + self.embedding = nn.Embedding(self.n_e, self.e_dim) + self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) + + def forward(self, z): + """ + Inputs the output of the encoder network z and maps it to a discrete + one-hot vector that is the index of the closest embedding vector e_j + z (continuous) -> z_q (discrete) + z.shape = (batch, channel, height, width) + quantization pipeline: + 1. get encoder input (B,C,H,W) + 2. flatten input to (B*H*W,C) + """ + # reshape z -> (batch, height, width, channel) and flatten + z = z.permute(0, 2, 3, 1).contiguous() + z_flattened = z.view(-1, self.e_dim) + # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z + + d = ( + torch.sum(z_flattened**2, dim=1, keepdim=True) + + torch.sum(self.embedding.weight**2, dim=1) + - 2 * torch.matmul(z_flattened, self.embedding.weight.t()) + ) + + # could possible replace this here + # #\start... + # find closest encodings + + min_value, min_encoding_indices = torch.min(d, dim=1) + + min_encoding_indices = min_encoding_indices.unsqueeze(1) + + min_encodings = torch.zeros(min_encoding_indices.shape[0], self.n_e).to(z) + min_encodings.scatter_(1, min_encoding_indices, 1) + + # dtype min encodings: torch.float32 + # min_encodings shape: torch.Size([2048, 512]) + # min_encoding_indices.shape: torch.Size([2048, 1]) + + # get quantized latent vectors + z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) + # .........\end + + # with: + # .........\start + # min_encoding_indices = torch.argmin(d, dim=1) + # z_q = self.embedding(min_encoding_indices) + # ......\end......... (TODO) + + # compute loss for embedding + loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean( + (z_q - z.detach()) ** 2 + ) + + # preserve gradients + z_q = z + (z_q - z).detach() + + # perplexity + + e_mean = torch.mean(min_encodings, dim=0) + perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) + + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q, loss, (perplexity, min_encodings, min_encoding_indices, d) + + def get_codebook_entry(self, indices, shape): + # shape specifying (batch, height, width, channel) + # TODO: check for more easy handling with nn.Embedding + min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices) + min_encodings.scatter_(1, indices[:, None], 1) + + # get quantized latent vectors + z_q = torch.matmul(min_encodings.float(), self.embedding.weight) + + if shape is not None: + z_q = z_q.view(shape) + + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q + + +# pytorch_diffusion + derived encoder decoder +def nonlinearity(x): + # swish + return x * torch.sigmoid(x) + + +def Normalize(in_channels): + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) + + def forward(self, x): + if self.with_conv: + pad = (0, 1, 0, 1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + dropout, + temb_channels=512, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + else: + self.nin_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x + h + + +class MultiHeadAttnBlock(nn.Module): + def __init__(self, in_channels, head_size=1): + super().__init__() + self.in_channels = in_channels + self.head_size = head_size + self.att_size = in_channels // head_size + assert ( + in_channels % head_size == 0 + ), "The size of head should be divided by the number of channels." + + self.norm1 = Normalize(in_channels) + self.norm2 = Normalize(in_channels) + + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.num = 0 + + def forward(self, x, y=None): + h_ = x + h_ = self.norm1(h_) + if y is None: + y = h_ + else: + y = self.norm2(y) + + q = self.q(y) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = q.reshape(b, self.head_size, self.att_size, h * w) + q = q.permute(0, 3, 1, 2) # b, hw, head, att + + k = k.reshape(b, self.head_size, self.att_size, h * w) + k = k.permute(0, 3, 1, 2) + + v = v.reshape(b, self.head_size, self.att_size, h * w) + v = v.permute(0, 3, 1, 2) + + q = q.transpose(1, 2) + v = v.transpose(1, 2) + k = k.transpose(1, 2).transpose(2, 3) + + scale = int(self.att_size) ** (-0.5) + q.mul_(scale) + w_ = torch.matmul(q, k) + w_ = F.softmax(w_, dim=3) + + w_ = w_.matmul(v) + + w_ = w_.transpose(1, 2).contiguous() # [b, h*w, head, att] + w_ = w_.view(b, h, w, -1) + w_ = w_.permute(0, 3, 1, 2) + + w_ = self.proj_out(w_) + + return x + w_ + + +class MultiHeadEncoder(nn.Module): + def __init__( + self, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks=2, + attn_resolutions=(16,), + dropout=0.0, + resamp_with_conv=True, + in_channels=3, + resolution=512, + z_channels=256, + double_z=True, + enable_mid=True, + head_size=1, + **ignore_kwargs, + ): + super().__init__() + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.enable_mid = enable_mid + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(MultiHeadAttnBlock(block_in, head_size)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + if self.enable_mid: + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, + 2 * z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1, + ) + + def forward(self, x): + hs = {} + # timestep embedding + temb = None + + # downsampling + h = self.conv_in(x) + hs["in"] = h + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](h, temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + + if i_level != self.num_resolutions - 1: + # hs.append(h) + hs["block_" + str(i_level)] = h + h = self.down[i_level].downsample(h) + + # middle + # h = hs[-1] + if self.enable_mid: + h = self.mid.block_1(h, temb) + hs["block_" + str(i_level) + "_atten"] = h + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + hs["mid_atten"] = h + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + # hs.append(h) + hs["out"] = h + + return hs + + +class MultiHeadDecoder(nn.Module): + def __init__( + self, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks=2, + attn_resolutions=(16,), + dropout=0.0, + resamp_with_conv=True, + in_channels=3, + resolution=512, + z_channels=256, + give_pre_end=False, + enable_mid=True, + head_size=1, + **ignorekwargs, + ): + super().__init__() + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.enable_mid = enable_mid + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + "Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) + + # z to block_in + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) + + # middle + if self.enable_mid: + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(MultiHeadAttnBlock(block_in, head_size)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, z): + # assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + if self.enable_mid: + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class MultiHeadDecoderTransformer(nn.Module): + def __init__( + self, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks=2, + attn_resolutions=(16,), + dropout=0.0, + resamp_with_conv=True, + in_channels=3, + resolution=512, + z_channels=256, + give_pre_end=False, + enable_mid=True, + head_size=1, + **ignorekwargs, + ): + super().__init__() + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.enable_mid = enable_mid + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + "Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) + + # z to block_in + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) + + # middle + if self.enable_mid: + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(MultiHeadAttnBlock(block_in, head_size)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, z, hs): + # assert z.shape[1:] == self.z_shape[1:] + # self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + if self.enable_mid: + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h, hs["mid_atten"]) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block]( + h, hs["block_" + str(i_level) + "_atten"] + ) + # hfeature = h.clone() + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class RestoreFormer(nn.Module): + def __init__( + self, + n_embed=1024, + embed_dim=256, + ch=64, + out_ch=3, + ch_mult=(1, 2, 2, 4, 4, 8), + num_res_blocks=2, + attn_resolutions=(16,), + dropout=0.0, + in_channels=3, + resolution=512, + z_channels=256, + double_z=False, + enable_mid=True, + fix_decoder=False, + fix_codebook=True, + fix_encoder=False, + head_size=8, + ): + super(RestoreFormer, self).__init__() + + self.encoder = MultiHeadEncoder( + ch=ch, + out_ch=out_ch, + ch_mult=ch_mult, + num_res_blocks=num_res_blocks, + attn_resolutions=attn_resolutions, + dropout=dropout, + in_channels=in_channels, + resolution=resolution, + z_channels=z_channels, + double_z=double_z, + enable_mid=enable_mid, + head_size=head_size, + ) + self.decoder = MultiHeadDecoderTransformer( + ch=ch, + out_ch=out_ch, + ch_mult=ch_mult, + num_res_blocks=num_res_blocks, + attn_resolutions=attn_resolutions, + dropout=dropout, + in_channels=in_channels, + resolution=resolution, + z_channels=z_channels, + enable_mid=enable_mid, + head_size=head_size, + ) + + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25) + + self.quant_conv = torch.nn.Conv2d(z_channels, embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, z_channels, 1) + + if fix_decoder: + for _, param in self.decoder.named_parameters(): + param.requires_grad = False + for _, param in self.post_quant_conv.named_parameters(): + param.requires_grad = False + for _, param in self.quantize.named_parameters(): + param.requires_grad = False + elif fix_codebook: + for _, param in self.quantize.named_parameters(): + param.requires_grad = False + + if fix_encoder: + for _, param in self.encoder.named_parameters(): + param.requires_grad = False + + def encode(self, x): + hs = self.encoder(x) + h = self.quant_conv(hs["out"]) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info, hs + + def decode(self, quant, hs): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant, hs) + + return dec + + def forward(self, input, **kwargs): + quant, diff, info, hs = self.encode(input) + dec = self.decode(quant, hs) + + return dec, None diff --git a/inpaint/plugins/gfpgan/archs/stylegan2_clean_arch.py b/inpaint/plugins/gfpgan/archs/stylegan2_clean_arch.py new file mode 100644 index 0000000..553368a --- /dev/null +++ b/inpaint/plugins/gfpgan/archs/stylegan2_clean_arch.py @@ -0,0 +1,434 @@ +import math +import random +import torch +from torch import nn +from torch.nn import functional as F + +from iopaint.plugins.basicsr.arch_util import default_init_weights + + +class NormStyleCode(nn.Module): + def forward(self, x): + """Normalize the style codes. + + Args: + x (Tensor): Style codes with shape (b, c). + + Returns: + Tensor: Normalized tensor. + """ + return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8) + + +class ModulatedConv2d(nn.Module): + """Modulated Conv2d used in StyleGAN2. + + There is no bias in ModulatedConv2d. + + Args: + in_channels (int): Channel number of the input. + out_channels (int): Channel number of the output. + kernel_size (int): Size of the convolving kernel. + num_style_feat (int): Channel number of style features. + demodulate (bool): Whether to demodulate in the conv layer. Default: True. + sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. + eps (float): A value added to the denominator for numerical stability. Default: 1e-8. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + num_style_feat, + demodulate=True, + sample_mode=None, + eps=1e-8, + ): + super(ModulatedConv2d, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.demodulate = demodulate + self.sample_mode = sample_mode + self.eps = eps + + # modulation inside each modulated conv + self.modulation = nn.Linear(num_style_feat, in_channels, bias=True) + # initialization + default_init_weights( + self.modulation, + scale=1, + bias_fill=1, + a=0, + mode="fan_in", + nonlinearity="linear", + ) + + self.weight = nn.Parameter( + torch.randn(1, out_channels, in_channels, kernel_size, kernel_size) + / math.sqrt(in_channels * kernel_size**2) + ) + self.padding = kernel_size // 2 + + def forward(self, x, style): + """Forward function. + + Args: + x (Tensor): Tensor with shape (b, c, h, w). + style (Tensor): Tensor with shape (b, num_style_feat). + + Returns: + Tensor: Modulated tensor after convolution. + """ + b, c, h, w = x.shape # c = c_in + # weight modulation + style = self.modulation(style).view(b, 1, c, 1, 1) + # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1) + weight = self.weight * style # (b, c_out, c_in, k, k) + + if self.demodulate: + demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) + weight = weight * demod.view(b, self.out_channels, 1, 1, 1) + + weight = weight.view( + b * self.out_channels, c, self.kernel_size, self.kernel_size + ) + + # upsample or downsample if necessary + if self.sample_mode == "upsample": + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False) + elif self.sample_mode == "downsample": + x = F.interpolate(x, scale_factor=0.5, mode="bilinear", align_corners=False) + + b, c, h, w = x.shape + x = x.view(1, b * c, h, w) + # weight: (b*c_out, c_in, k, k), groups=b + out = F.conv2d(x, weight, padding=self.padding, groups=b) + out = out.view(b, self.out_channels, *out.shape[2:4]) + + return out + + def __repr__(self): + return ( + f"{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, " + f"kernel_size={self.kernel_size}, demodulate={self.demodulate}, sample_mode={self.sample_mode})" + ) + + +class StyleConv(nn.Module): + """Style conv used in StyleGAN2. + + Args: + in_channels (int): Channel number of the input. + out_channels (int): Channel number of the output. + kernel_size (int): Size of the convolving kernel. + num_style_feat (int): Channel number of style features. + demodulate (bool): Whether demodulate in the conv layer. Default: True. + sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + num_style_feat, + demodulate=True, + sample_mode=None, + ): + super(StyleConv, self).__init__() + self.modulated_conv = ModulatedConv2d( + in_channels, + out_channels, + kernel_size, + num_style_feat, + demodulate=demodulate, + sample_mode=sample_mode, + ) + self.weight = nn.Parameter(torch.zeros(1)) # for noise injection + self.bias = nn.Parameter(torch.zeros(1, out_channels, 1, 1)) + self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + def forward(self, x, style, noise=None): + # modulate + out = self.modulated_conv(x, style) * 2**0.5 # for conversion + # noise injection + if noise is None: + b, _, h, w = out.shape + noise = out.new_empty(b, 1, h, w).normal_() + out = out + self.weight * noise + # add bias + out = out + self.bias + # activation + out = self.activate(out) + return out + + +class ToRGB(nn.Module): + """To RGB (image space) from features. + + Args: + in_channels (int): Channel number of input. + num_style_feat (int): Channel number of style features. + upsample (bool): Whether to upsample. Default: True. + """ + + def __init__(self, in_channels, num_style_feat, upsample=True): + super(ToRGB, self).__init__() + self.upsample = upsample + self.modulated_conv = ModulatedConv2d( + in_channels, + 3, + kernel_size=1, + num_style_feat=num_style_feat, + demodulate=False, + sample_mode=None, + ) + self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) + + def forward(self, x, style, skip=None): + """Forward function. + + Args: + x (Tensor): Feature tensor with shape (b, c, h, w). + style (Tensor): Tensor with shape (b, num_style_feat). + skip (Tensor): Base/skip tensor. Default: None. + + Returns: + Tensor: RGB images. + """ + out = self.modulated_conv(x, style) + out = out + self.bias + if skip is not None: + if self.upsample: + skip = F.interpolate( + skip, scale_factor=2, mode="bilinear", align_corners=False + ) + out = out + skip + return out + + +class ConstantInput(nn.Module): + """Constant input. + + Args: + num_channel (int): Channel number of constant input. + size (int): Spatial size of constant input. + """ + + def __init__(self, num_channel, size): + super(ConstantInput, self).__init__() + self.weight = nn.Parameter(torch.randn(1, num_channel, size, size)) + + def forward(self, batch): + out = self.weight.repeat(batch, 1, 1, 1) + return out + + +class StyleGAN2GeneratorClean(nn.Module): + """Clean version of StyleGAN2 Generator. + + Args: + out_size (int): The spatial size of outputs. + num_style_feat (int): Channel number of style features. Default: 512. + num_mlp (int): Layer number of MLP style layers. Default: 8. + channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. + narrow (float): Narrow ratio for channels. Default: 1.0. + """ + + def __init__( + self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1 + ): + super(StyleGAN2GeneratorClean, self).__init__() + # Style MLP layers + self.num_style_feat = num_style_feat + style_mlp_layers = [NormStyleCode()] + for i in range(num_mlp): + style_mlp_layers.extend( + [ + nn.Linear(num_style_feat, num_style_feat, bias=True), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + ] + ) + self.style_mlp = nn.Sequential(*style_mlp_layers) + # initialization + default_init_weights( + self.style_mlp, + scale=1, + bias_fill=0, + a=0.2, + mode="fan_in", + nonlinearity="leaky_relu", + ) + + # channel list + channels = { + "4": int(512 * narrow), + "8": int(512 * narrow), + "16": int(512 * narrow), + "32": int(512 * narrow), + "64": int(256 * channel_multiplier * narrow), + "128": int(128 * channel_multiplier * narrow), + "256": int(64 * channel_multiplier * narrow), + "512": int(32 * channel_multiplier * narrow), + "1024": int(16 * channel_multiplier * narrow), + } + self.channels = channels + + self.constant_input = ConstantInput(channels["4"], size=4) + self.style_conv1 = StyleConv( + channels["4"], + channels["4"], + kernel_size=3, + num_style_feat=num_style_feat, + demodulate=True, + sample_mode=None, + ) + self.to_rgb1 = ToRGB(channels["4"], num_style_feat, upsample=False) + + self.log_size = int(math.log(out_size, 2)) + self.num_layers = (self.log_size - 2) * 2 + 1 + self.num_latent = self.log_size * 2 - 2 + + self.style_convs = nn.ModuleList() + self.to_rgbs = nn.ModuleList() + self.noises = nn.Module() + + in_channels = channels["4"] + # noise + for layer_idx in range(self.num_layers): + resolution = 2 ** ((layer_idx + 5) // 2) + shape = [1, 1, resolution, resolution] + self.noises.register_buffer(f"noise{layer_idx}", torch.randn(*shape)) + # style convs and to_rgbs + for i in range(3, self.log_size + 1): + out_channels = channels[f"{2 ** i}"] + self.style_convs.append( + StyleConv( + in_channels, + out_channels, + kernel_size=3, + num_style_feat=num_style_feat, + demodulate=True, + sample_mode="upsample", + ) + ) + self.style_convs.append( + StyleConv( + out_channels, + out_channels, + kernel_size=3, + num_style_feat=num_style_feat, + demodulate=True, + sample_mode=None, + ) + ) + self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True)) + in_channels = out_channels + + def make_noise(self): + """Make noise for noise injection.""" + device = self.constant_input.weight.device + noises = [torch.randn(1, 1, 4, 4, device=device)] + + for i in range(3, self.log_size + 1): + for _ in range(2): + noises.append(torch.randn(1, 1, 2**i, 2**i, device=device)) + + return noises + + def get_latent(self, x): + return self.style_mlp(x) + + def mean_latent(self, num_latent): + latent_in = torch.randn( + num_latent, self.num_style_feat, device=self.constant_input.weight.device + ) + latent = self.style_mlp(latent_in).mean(0, keepdim=True) + return latent + + def forward( + self, + styles, + input_is_latent=False, + noise=None, + randomize_noise=True, + truncation=1, + truncation_latent=None, + inject_index=None, + return_latents=False, + ): + """Forward function for StyleGAN2GeneratorClean. + + Args: + styles (list[Tensor]): Sample codes of styles. + input_is_latent (bool): Whether input is latent style. Default: False. + noise (Tensor | None): Input noise or None. Default: None. + randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. + truncation (float): The truncation ratio. Default: 1. + truncation_latent (Tensor | None): The truncation latent tensor. Default: None. + inject_index (int | None): The injection index for mixing noise. Default: None. + return_latents (bool): Whether to return style latents. Default: False. + """ + # style codes -> latents with Style MLP layer + if not input_is_latent: + styles = [self.style_mlp(s) for s in styles] + # noises + if noise is None: + if randomize_noise: + noise = [None] * self.num_layers # for each style conv layer + else: # use the stored noise + noise = [ + getattr(self.noises, f"noise{i}") for i in range(self.num_layers) + ] + # style truncation + if truncation < 1: + style_truncation = [] + for style in styles: + style_truncation.append( + truncation_latent + truncation * (style - truncation_latent) + ) + styles = style_truncation + # get style latents with injection + if len(styles) == 1: + inject_index = self.num_latent + + if styles[0].ndim < 3: + # repeat latent code for all the layers + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + else: # used for encoder with different latent code for each layer + latent = styles[0] + elif len(styles) == 2: # mixing noises + if inject_index is None: + inject_index = random.randint(1, self.num_latent - 1) + latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = ( + styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) + ) + latent = torch.cat([latent1, latent2], 1) + + # main generation + out = self.constant_input(latent.shape[0]) + out = self.style_conv1(out, latent[:, 0], noise=noise[0]) + skip = self.to_rgb1(out, latent[:, 1]) + + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip( + self.style_convs[::2], + self.style_convs[1::2], + noise[1::2], + noise[2::2], + self.to_rgbs, + ): + out = conv1(out, latent[:, i], noise=noise1) + out = conv2(out, latent[:, i + 1], noise=noise2) + skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space + i += 2 + + image = skip + + if return_latents: + return image, latent + else: + return image, None diff --git a/inpaint/plugins/gfpgan_plugin.py b/inpaint/plugins/gfpgan_plugin.py new file mode 100644 index 0000000..760f525 --- /dev/null +++ b/inpaint/plugins/gfpgan_plugin.py @@ -0,0 +1,61 @@ +import cv2 +import numpy as np +from loguru import logger + +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest + + +class GFPGANPlugin(BasePlugin): + name = "GFPGAN" + support_gen_image = True + + def __init__(self, device, upscaler=None): + super().__init__() + from .gfpganer import MyGFPGANer + + url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" + model_md5 = "94d735072630ab734561130a47bc44f8" + model_path = download_model(url, model_md5) + logger.info(f"GFPGAN model path: {model_path}") + + # Use GFPGAN for face enhancement + self.face_enhancer = MyGFPGANer( + model_path=model_path, + upscale=1, + arch="clean", + channel_multiplier=2, + device=device, + bg_upsampler=upscaler.model if upscaler is not None else None, + ) + self.face_enhancer.face_helper.face_det.mean_tensor.to(device) + self.face_enhancer.face_helper.face_det = ( + self.face_enhancer.face_helper.face_det.to(device) + ) + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + weight = 0.5 + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + logger.info(f"GFPGAN input shape: {bgr_np_img.shape}") + _, _, bgr_output = self.face_enhancer.enhance( + bgr_np_img, + has_aligned=False, + only_center_face=False, + paste_back=True, + weight=weight, + ) + logger.info(f"GFPGAN output shape: {bgr_output.shape}") + + # try: + # if scale != 2: + # interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4 + # h, w = img.shape[0:2] + # output = cv2.resize( + # output, + # (int(w * scale / 2), int(h * scale / 2)), + # interpolation=interpolation, + # ) + # except Exception as error: + # print("wrong scale input.", error) + return bgr_output diff --git a/inpaint/plugins/gfpganer.py b/inpaint/plugins/gfpganer.py new file mode 100644 index 0000000..26cdb71 --- /dev/null +++ b/inpaint/plugins/gfpganer.py @@ -0,0 +1,156 @@ +import os + +import cv2 +import torch +from torchvision.transforms.functional import normalize +from torch.hub import get_dir + +from .facexlib.utils.face_restoration_helper import FaceRestoreHelper +from .gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean +from .basicsr.img_util import img2tensor, tensor2img + + +class MyGFPGANer: + """Helper for restoration with GFPGAN. + + It will detect and crop faces, and then resize the faces to 512x512. + GFPGAN is used to restored the resized faces. + The background is upsampled with the bg_upsampler. + Finally, the faces will be pasted back to the upsample background image. + + Args: + model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically). + upscale (float): The upscale of the final output. Default: 2. + arch (str): The GFPGAN architecture. Option: clean | original. Default: clean. + channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. + bg_upsampler (nn.Module): The upsampler for the background. Default: None. + """ + + def __init__( + self, + model_path, + upscale=2, + arch="clean", + channel_multiplier=2, + bg_upsampler=None, + device=None, + ): + self.upscale = upscale + self.bg_upsampler = bg_upsampler + + # initialize model + self.device = ( + torch.device("cuda" if torch.cuda.is_available() else "cpu") + if device is None + else device + ) + # initialize the GFP-GAN + if arch == "clean": + self.gfpgan = GFPGANv1Clean( + out_size=512, + num_style_feat=512, + channel_multiplier=channel_multiplier, + decoder_load_path=None, + fix_decoder=False, + num_mlp=8, + input_is_latent=True, + different_w=True, + narrow=1, + sft_half=True, + ) + elif arch == "RestoreFormer": + from .gfpgan.archs.restoreformer_arch import RestoreFormer + + self.gfpgan = RestoreFormer() + + hub_dir = get_dir() + model_dir = os.path.join(hub_dir, "checkpoints") + + # initialize face helper + self.face_helper = FaceRestoreHelper( + upscale, + face_size=512, + crop_ratio=(1, 1), + det_model="retinaface_resnet50", + save_ext="png", + use_parse=True, + device=self.device, + model_rootpath=model_dir, + ) + + loadnet = torch.load(model_path) + if "params_ema" in loadnet: + keyname = "params_ema" + else: + keyname = "params" + self.gfpgan.load_state_dict(loadnet[keyname], strict=True) + self.gfpgan.eval() + self.gfpgan = self.gfpgan.to(self.device) + + @torch.no_grad() + def enhance( + self, + img, + has_aligned=False, + only_center_face=False, + paste_back=True, + weight=0.5, + ): + self.face_helper.clean_all() + + if has_aligned: # the inputs are already aligned + img = cv2.resize(img, (512, 512)) + self.face_helper.cropped_faces = [img] + else: + self.face_helper.read_image(img) + # get face landmarks for each face + self.face_helper.get_face_landmarks_5( + only_center_face=only_center_face, eye_dist_threshold=5 + ) + # eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels + # TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations. + # align and warp each face + self.face_helper.align_warp_face() + + # face restoration + for cropped_face in self.face_helper.cropped_faces: + # prepare data + cropped_face_t = img2tensor( + cropped_face / 255.0, bgr2rgb=True, float32=True + ) + normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) + cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device) + + try: + output = self.gfpgan(cropped_face_t, return_rgb=False, weight=weight)[0] + # convert to image + restored_face = tensor2img( + output.squeeze(0), rgb2bgr=True, min_max=(-1, 1) + ) + except RuntimeError as error: + print(f"\tFailed inference for GFPGAN: {error}.") + restored_face = cropped_face + + restored_face = restored_face.astype("uint8") + self.face_helper.add_restored_face(restored_face) + + if not has_aligned and paste_back: + # upsample the background + if self.bg_upsampler is not None: + # Now only support RealESRGAN for upsampling background + bg_img = self.bg_upsampler.enhance(img, outscale=self.upscale)[0] + else: + bg_img = None + + self.face_helper.get_inverse_affine(None) + # paste each restored face to the input image + restored_img = self.face_helper.paste_faces_to_input_image( + upsample_img=bg_img + ) + return ( + self.face_helper.cropped_faces, + self.face_helper.restored_faces, + restored_img, + ) + else: + return self.face_helper.cropped_faces, self.face_helper.restored_faces, None diff --git a/inpaint/plugins/interactive_seg.py b/inpaint/plugins/interactive_seg.py new file mode 100644 index 0000000..27859fa --- /dev/null +++ b/inpaint/plugins/interactive_seg.py @@ -0,0 +1,130 @@ +import hashlib +from typing import List + +import numpy as np +import torch +from loguru import logger + +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.plugins.segment_anything import SamPredictor, sam_model_registry +from iopaint.plugins.segment_anything.predictor_hq import SamHQPredictor +from iopaint.plugins.segment_anything2.build_sam import build_sam2 +from iopaint.plugins.segment_anything2.sam2_image_predictor import SAM2ImagePredictor +from iopaint.schema import RunPluginRequest + +# 从小到大 +SEGMENT_ANYTHING_MODELS = { + "vit_b": { + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", + "md5": "01ec64d29a2fca3f0661936605ae66f8", + }, + "vit_l": { + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", + "md5": "0b3195507c641ddb6910d2bb5adee89c", + }, + "vit_h": { + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", + "md5": "4b8939a88964f0f4ff5f5b2642c598a6", + }, + "mobile_sam": { + "url": "https://github.com/Sanster/models/releases/download/MobileSAM/mobile_sam.pt", + "md5": "f3c0d8cda613564d499310dab6c812cd", + }, + "sam_hq_vit_b": { + "url": "https://huggingface.co/lkeab/hq-sam/resolve/main/sam_hq_vit_b.pth", + "md5": "c6b8953247bcfdc8bb8ef91e36a6cacc", + }, + "sam_hq_vit_l": { + "url": "https://huggingface.co/lkeab/hq-sam/resolve/main/sam_hq_vit_l.pth", + "md5": "08947267966e4264fb39523eccc33f86", + }, + "sam_hq_vit_h": { + "url": "https://huggingface.co/lkeab/hq-sam/resolve/main/sam_hq_vit_h.pth", + "md5": "3560f6b6a5a6edacd814a1325c39640a", + }, + "sam2_tiny": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_tiny.pt", + "md5": "99eacccce4ada0b35153d4fd7af05297", + }, + "sam2_small": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt", + "md5": "7f320dbeb497330a2472da5a16c7324d", + }, + "sam2_base": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_base_plus.pt", + "md5": "09dc5a3d7719f64aaea1d37341ef26f2", + }, + "sam2_large": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt", + "md5": "08083462423be3260cd6a5eef94dc01c", + }, +} + + +class InteractiveSeg(BasePlugin): + name = "InteractiveSeg" + support_gen_mask = True + + def __init__(self, model_name, device): + super().__init__() + self.model_name = model_name + self.device = device + self._init_session(model_name) + + def _init_session(self, model_name: str): + model_path = download_model( + SEGMENT_ANYTHING_MODELS[model_name]["url"], + SEGMENT_ANYTHING_MODELS[model_name]["md5"], + ) + logger.info(f"SegmentAnything model path: {model_path}") + if "sam_hq" in model_name: + self.predictor = SamHQPredictor( + sam_model_registry[model_name](checkpoint=model_path).to(self.device) + ) + elif model_name.startswith("sam2"): + sam2_model = build_sam2( + model_name, ckpt_path=model_path, device=self.device + ) + self.predictor = SAM2ImagePredictor(sam2_model) + else: + self.predictor = SamPredictor( + sam_model_registry[model_name](checkpoint=model_path).to(self.device) + ) + self.prev_img_md5 = None + + def switch_model(self, new_model_name): + if self.model_name == new_model_name: + return + + logger.info( + f"Switching InteractiveSeg model from {self.model_name} to {new_model_name}" + ) + self._init_session(new_model_name) + self.model_name = new_model_name + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + img_md5 = hashlib.md5(req.image.encode("utf-8")).hexdigest() + return self.forward(rgb_np_img, req.clicks, img_md5) + + @torch.inference_mode() + def forward(self, rgb_np_img, clicks: List[List], img_md5: str): + input_point = [] + input_label = [] + for click in clicks: + x = click[0] + y = click[1] + input_point.append([x, y]) + input_label.append(click[2]) + + if img_md5 and img_md5 != self.prev_img_md5: + self.prev_img_md5 = img_md5 + self.predictor.set_image(rgb_np_img) + + masks, _, _ = self.predictor.predict( + point_coords=np.array(input_point), + point_labels=np.array(input_label), + multimask_output=False, + ) + mask = masks[0].astype(np.uint8) * 255 + return mask diff --git a/inpaint/plugins/realesrgan.py b/inpaint/plugins/realesrgan.py new file mode 100644 index 0000000..21e0a8f --- /dev/null +++ b/inpaint/plugins/realesrgan.py @@ -0,0 +1,468 @@ +import math + +import cv2 +import numpy as np +import torch +from torch import nn +import torch.nn.functional as F +from loguru import logger + +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest, RealESRGANModel + + +class RealESRGANer: + """A helper class for upsampling images with RealESRGAN. + + Args: + scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. + model_path (str): The path to the pretrained model. It can be urls (will first download it automatically). + model (nn.Module): The defined network. Default: None. + tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop + input images into tiles, and then process each of them. Finally, they will be merged into one image. + 0 denotes for do not use tile. Default: 0. + tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. + pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. + half (float): Whether to use half precision during inference. Default: False. + """ + + def __init__( + self, + scale, + model_path, + dni_weight=None, + model=None, + tile=0, + tile_pad=10, + pre_pad=10, + half=False, + device=None, + gpu_id=None, + ): + self.scale = scale + self.tile_size = tile + self.tile_pad = tile_pad + self.pre_pad = pre_pad + self.mod_scale = None + self.half = half + + # initialize model + if gpu_id: + self.device = ( + torch.device(f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu") + if device is None + else device + ) + else: + self.device = ( + torch.device("cuda" if torch.cuda.is_available() else "cpu") + if device is None + else device + ) + + if isinstance(model_path, list): + # dni + assert len(model_path) == len( + dni_weight + ), "model_path and dni_weight should have the save length." + loadnet = self.dni(model_path[0], model_path[1], dni_weight) + else: + # if the model_path starts with https, it will first download models to the folder: weights + loadnet = torch.load(model_path, map_location=torch.device("cpu")) + + # prefer to use params_ema + if "params_ema" in loadnet: + keyname = "params_ema" + else: + keyname = "params" + model.load_state_dict(loadnet[keyname], strict=True) + + model.eval() + self.model = model.to(self.device) + if self.half: + self.model = self.model.half() + + def dni(self, net_a, net_b, dni_weight, key="params", loc="cpu"): + """Deep network interpolation. + + ``Paper: Deep Network Interpolation for Continuous Imagery Effect Transition`` + """ + net_a = torch.load(net_a, map_location=torch.device(loc)) + net_b = torch.load(net_b, map_location=torch.device(loc)) + for k, v_a in net_a[key].items(): + net_a[key][k] = dni_weight[0] * v_a + dni_weight[1] * net_b[key][k] + return net_a + + def pre_process(self, img): + """Pre-process, such as pre-pad and mod pad, so that the images can be divisible""" + img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() + self.img = img.unsqueeze(0).to(self.device) + if self.half: + self.img = self.img.half() + + # pre_pad + if self.pre_pad != 0: + self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), "reflect") + # mod pad for divisible borders + if self.scale == 2: + self.mod_scale = 2 + elif self.scale == 1: + self.mod_scale = 4 + if self.mod_scale is not None: + self.mod_pad_h, self.mod_pad_w = 0, 0 + _, _, h, w = self.img.size() + if h % self.mod_scale != 0: + self.mod_pad_h = self.mod_scale - h % self.mod_scale + if w % self.mod_scale != 0: + self.mod_pad_w = self.mod_scale - w % self.mod_scale + self.img = F.pad( + self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), "reflect" + ) + + def process(self): + # model inference + self.output = self.model(self.img) + + def tile_process(self): + """It will first crop input images to tiles, and then process each tile. + Finally, all the processed tiles are merged into one images. + + Modified from: https://github.com/ata4/esrgan-launcher + """ + batch, channel, height, width = self.img.shape + output_height = height * self.scale + output_width = width * self.scale + output_shape = (batch, channel, output_height, output_width) + + # start with black image + self.output = self.img.new_zeros(output_shape) + tiles_x = math.ceil(width / self.tile_size) + tiles_y = math.ceil(height / self.tile_size) + + # loop over all tiles + for y in range(tiles_y): + for x in range(tiles_x): + # extract tile from input image + ofs_x = x * self.tile_size + ofs_y = y * self.tile_size + # input tile area on total image + input_start_x = ofs_x + input_end_x = min(ofs_x + self.tile_size, width) + input_start_y = ofs_y + input_end_y = min(ofs_y + self.tile_size, height) + + # input tile area on total image with padding + input_start_x_pad = max(input_start_x - self.tile_pad, 0) + input_end_x_pad = min(input_end_x + self.tile_pad, width) + input_start_y_pad = max(input_start_y - self.tile_pad, 0) + input_end_y_pad = min(input_end_y + self.tile_pad, height) + + # input tile dimensions + input_tile_width = input_end_x - input_start_x + input_tile_height = input_end_y - input_start_y + tile_idx = y * tiles_x + x + 1 + input_tile = self.img[ + :, + :, + input_start_y_pad:input_end_y_pad, + input_start_x_pad:input_end_x_pad, + ] + + # upscale tile + try: + with torch.no_grad(): + output_tile = self.model(input_tile) + except RuntimeError as error: + print("Error", error) + print(f"\tTile {tile_idx}/{tiles_x * tiles_y}") + + # output tile area on total image + output_start_x = input_start_x * self.scale + output_end_x = input_end_x * self.scale + output_start_y = input_start_y * self.scale + output_end_y = input_end_y * self.scale + + # output tile area without padding + output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale + output_end_x_tile = output_start_x_tile + input_tile_width * self.scale + output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale + output_end_y_tile = output_start_y_tile + input_tile_height * self.scale + + # put tile into output image + self.output[ + :, :, output_start_y:output_end_y, output_start_x:output_end_x + ] = output_tile[ + :, + :, + output_start_y_tile:output_end_y_tile, + output_start_x_tile:output_end_x_tile, + ] + + def post_process(self): + # remove extra pad + if self.mod_scale is not None: + _, _, h, w = self.output.size() + self.output = self.output[ + :, + :, + 0 : h - self.mod_pad_h * self.scale, + 0 : w - self.mod_pad_w * self.scale, + ] + # remove prepad + if self.pre_pad != 0: + _, _, h, w = self.output.size() + self.output = self.output[ + :, + :, + 0 : h - self.pre_pad * self.scale, + 0 : w - self.pre_pad * self.scale, + ] + return self.output + + @torch.no_grad() + def enhance(self, img, outscale=None, alpha_upsampler="realesrgan"): + h_input, w_input = img.shape[0:2] + # img: numpy + img = img.astype(np.float32) + if np.max(img) > 256: # 16-bit image + max_range = 65535 + print("\tInput is a 16-bit image") + else: + max_range = 255 + img = img / max_range + if len(img.shape) == 2: # gray image + img_mode = "L" + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + elif img.shape[2] == 4: # RGBA image with alpha channel + img_mode = "RGBA" + alpha = img[:, :, 3] + img = img[:, :, 0:3] + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + if alpha_upsampler == "realesrgan": + alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB) + else: + img_mode = "RGB" + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # ------------------- process image (without the alpha channel) ------------------- # + self.pre_process(img) + if self.tile_size > 0: + self.tile_process() + else: + self.process() + output_img = self.post_process() + output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy() + output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0)) + if img_mode == "L": + output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY) + + # ------------------- process the alpha channel if necessary ------------------- # + if img_mode == "RGBA": + if alpha_upsampler == "realesrgan": + self.pre_process(alpha) + if self.tile_size > 0: + self.tile_process() + else: + self.process() + output_alpha = self.post_process() + output_alpha = ( + output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy() + ) + output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0)) + output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY) + else: # use the cv2 resize for alpha channel + h, w = alpha.shape[0:2] + output_alpha = cv2.resize( + alpha, + (w * self.scale, h * self.scale), + interpolation=cv2.INTER_LINEAR, + ) + + # merge the alpha channel + output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA) + output_img[:, :, 3] = output_alpha + + # ------------------------------ return ------------------------------ # + if max_range == 65535: # 16-bit image + output = (output_img * 65535.0).round().astype(np.uint16) + else: + output = (output_img * 255.0).round().astype(np.uint8) + + if outscale is not None and outscale != float(self.scale): + output = cv2.resize( + output, + ( + int(w_input * outscale), + int(h_input * outscale), + ), + interpolation=cv2.INTER_LANCZOS4, + ) + + return output, img_mode + + +class SRVGGNetCompact(nn.Module): + """A compact VGG-style network structure for super-resolution. + + It is a compact network structure, which performs upsampling in the last layer and no convolution is + conducted on the HR feature space. + + Args: + num_in_ch (int): Channel number of inputs. Default: 3. + num_out_ch (int): Channel number of outputs. Default: 3. + num_feat (int): Channel number of intermediate features. Default: 64. + num_conv (int): Number of convolution layers in the body network. Default: 16. + upscale (int): Upsampling factor. Default: 4. + act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu. + """ + + def __init__( + self, + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_conv=16, + upscale=4, + act_type="prelu", + ): + super(SRVGGNetCompact, self).__init__() + self.num_in_ch = num_in_ch + self.num_out_ch = num_out_ch + self.num_feat = num_feat + self.num_conv = num_conv + self.upscale = upscale + self.act_type = act_type + + self.body = nn.ModuleList() + # the first conv + self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) + # the first activation + if act_type == "relu": + activation = nn.ReLU(inplace=True) + elif act_type == "prelu": + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == "leakyrelu": + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the body structure + for _ in range(num_conv): + self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) + # activation + if act_type == "relu": + activation = nn.ReLU(inplace=True) + elif act_type == "prelu": + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == "leakyrelu": + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the last conv + self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) + # upsample + self.upsampler = nn.PixelShuffle(upscale) + + def forward(self, x): + out = x + for i in range(0, len(self.body)): + out = self.body[i](out) + + out = self.upsampler(out) + # add the nearest upsampled image, so that the network learns the residual + base = F.interpolate(x, scale_factor=self.upscale, mode="nearest") + out += base + return out + + +class RealESRGANUpscaler(BasePlugin): + name = "RealESRGAN" + support_gen_image = True + + def __init__(self, name, device, no_half=False): + super().__init__() + self.model_name = name + self.device = device + self.no_half = no_half + self._init_model(name) + + def _init_model(self, name): + from .basicsr import RRDBNet + + REAL_ESRGAN_MODELS = { + RealESRGANModel.realesr_general_x4v3: { + "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth", + "scale": 4, + "model": lambda: SRVGGNetCompact( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_conv=32, + upscale=4, + act_type="prelu", + ), + "model_md5": "91a7644643c884ee00737db24e478156", + }, + RealESRGANModel.RealESRGAN_x4plus: { + "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", + "scale": 4, + "model": lambda: RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_block=23, + num_grow_ch=32, + scale=4, + ), + "model_md5": "99ec365d4afad750833258a1a24f44ca", + }, + RealESRGANModel.RealESRGAN_x4plus_anime_6B: { + "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth", + "scale": 4, + "model": lambda: RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_block=6, + num_grow_ch=32, + scale=4, + ), + "model_md5": "d58ce384064ec1591c2ea7b79dbf47ba", + }, + } + if name not in REAL_ESRGAN_MODELS: + raise ValueError(f"Unknown RealESRGAN model name: {name}") + model_info = REAL_ESRGAN_MODELS[name] + + model_path = download_model(model_info["url"], model_info["model_md5"]) + logger.info(f"RealESRGAN model path: {model_path}") + + self.model = RealESRGANer( + scale=model_info["scale"], + model_path=model_path, + model=model_info["model"](), + half=True if "cuda" in str(self.device) and not self.no_half else False, + tile=512, + tile_pad=10, + pre_pad=10, + device=self.device, + ) + + def switch_model(self, new_model_name: str): + if self.model_name == new_model_name: + return + self._init_model(new_model_name) + self.model_name = new_model_name + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + logger.info(f"RealESRGAN input shape: {bgr_np_img.shape}, scale: {req.scale}") + result = self.forward(bgr_np_img, req.scale) + logger.info(f"RealESRGAN output shape: {result.shape}") + return result + + @torch.inference_mode() + def forward(self, bgr_np_img, scale: float): + # 输出是 BGR + upsampled = self.model.enhance(bgr_np_img, outscale=scale)[0] + return upsampled diff --git a/inpaint/plugins/remove_bg.py b/inpaint/plugins/remove_bg.py new file mode 100644 index 0000000..64bf785 --- /dev/null +++ b/inpaint/plugins/remove_bg.py @@ -0,0 +1,71 @@ +import os +import cv2 +import numpy as np +from loguru import logger +from torch.hub import get_dir + +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest, RemoveBGModel + + +class RemoveBG(BasePlugin): + name = "RemoveBG" + support_gen_mask = True + support_gen_image = True + + def __init__(self, model_name): + super().__init__() + self.model_name = model_name + + hub_dir = get_dir() + model_dir = os.path.join(hub_dir, "checkpoints") + os.environ["U2NET_HOME"] = model_dir + + self._init_session(model_name) + + def _init_session(self, model_name: str): + if model_name == RemoveBGModel.briaai_rmbg_1_4: + from iopaint.plugins.briarmbg import ( + create_briarmbg_session, + briarmbg_process, + ) + + self.session = create_briarmbg_session() + self.remove = briarmbg_process + else: + from rembg import new_session, remove + + self.session = new_session(model_name=model_name) + self.remove = remove + + def switch_model(self, new_model_name): + if self.model_name == new_model_name: + return + + logger.info( + f"Switching removebg model from {self.model_name} to {new_model_name}" + ) + self._init_session(new_model_name) + self.model_name = new_model_name + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + + # return BGRA image + output = self.remove(bgr_np_img, session=self.session) + return cv2.cvtColor(output, cv2.COLOR_BGRA2RGBA) + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + + # return BGR image, 255 means foreground, 0 means background + output = self.remove(bgr_np_img, session=self.session, only_mask=True) + return output + + def check_dep(self): + try: + import rembg + except ImportError: + return ( + "RemoveBG is not installed, please install it first. pip install rembg" + ) diff --git a/inpaint/plugins/restoreformer.py b/inpaint/plugins/restoreformer.py new file mode 100644 index 0000000..9bc3f07 --- /dev/null +++ b/inpaint/plugins/restoreformer.py @@ -0,0 +1,44 @@ +import cv2 +import numpy as np +from loguru import logger + +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest + + +class RestoreFormerPlugin(BasePlugin): + name = "RestoreFormer" + support_gen_image = True + + def __init__(self, device, upscaler=None): + super().__init__() + from .gfpganer import MyGFPGANer + + url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth" + model_md5 = "eaeeff6c4a1caa1673977cb374e6f699" + model_path = download_model(url, model_md5) + logger.info(f"RestoreFormer model path: {model_path}") + + self.face_enhancer = MyGFPGANer( + model_path=model_path, + upscale=1, + arch="RestoreFormer", + channel_multiplier=2, + device=device, + bg_upsampler=upscaler.model if upscaler is not None else None, + ) + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + weight = 0.5 + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + logger.info(f"RestoreFormer input shape: {bgr_np_img.shape}") + _, _, bgr_output = self.face_enhancer.enhance( + bgr_np_img, + has_aligned=False, + only_center_face=False, + paste_back=True, + weight=weight, + ) + logger.info(f"RestoreFormer output shape: {bgr_output.shape}") + return bgr_output diff --git a/inpaint/plugins/segment_anything/__init__.py b/inpaint/plugins/segment_anything/__init__.py new file mode 100644 index 0000000..420f04b --- /dev/null +++ b/inpaint/plugins/segment_anything/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .build_sam import ( + build_sam_vit_h, + build_sam_vit_l, + build_sam_vit_b, + build_sam_vit_h_hq, + build_sam_vit_l_hq, + build_sam_vit_b_hq, + sam_model_registry, +) +from .predictor import SamPredictor diff --git a/inpaint/plugins/segment_anything/build_sam.py b/inpaint/plugins/segment_anything/build_sam.py new file mode 100644 index 0000000..9b905ef --- /dev/null +++ b/inpaint/plugins/segment_anything/build_sam.py @@ -0,0 +1,269 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from functools import partial + +from iopaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT + +from .modeling import ( + ImageEncoderViT, + MaskDecoder, + PromptEncoder, + Sam, + TwoWayTransformer, +) +from .modeling.image_encoder_hq import ImageEncoderViTHQ +from .modeling.mask_decoder import MaskDecoderHQ +from .modeling.sam_hq import SamHQ + + +def build_sam_vit_h(checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +def build_sam_vit_l(checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b(checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +def build_sam_vit_t(checkpoint=None): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + mobile_sam = Sam( + image_encoder=TinyViT( + img_size=1024, + in_chans=3, + num_classes=1000, + embed_dims=[64, 128, 160, 320], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 5, 10], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.0, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=0.8, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + + mobile_sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + mobile_sam.load_state_dict(state_dict) + return mobile_sam + + +def build_sam_vit_h_hq(checkpoint=None): + return _build_sam_hq( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +def build_sam_vit_l_hq(checkpoint=None): + return _build_sam_hq( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b_hq(checkpoint=None): + return _build_sam_hq( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +sam_model_registry = { + "default": build_sam_vit_h, + "vit_h": build_sam_vit_h, + "vit_l": build_sam_vit_l, + "vit_b": build_sam_vit_b, + "sam_hq_vit_h": build_sam_vit_h_hq, + "sam_hq_vit_l": build_sam_vit_l_hq, + "sam_hq_vit_b": build_sam_vit_b_hq, + "mobile_sam": build_sam_vit_t, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + sam.load_state_dict(state_dict) + return sam + + +def _build_sam_hq( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = SamHQ( + image_encoder=ImageEncoderViTHQ( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoderHQ( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + vit_dim=encoder_embed_dim, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + device = "cuda" if torch.cuda.is_available() else "cpu" + state_dict = torch.load(f, map_location=device) + info = sam.load_state_dict(state_dict, strict=False) + print(info) + for n, p in sam.named_parameters(): + if ( + "hf_token" not in n + and "hf_mlp" not in n + and "compress_vit_feat" not in n + and "embedding_encoder" not in n + and "embedding_maskfeature" not in n + ): + p.requires_grad = False + + return sam diff --git a/inpaint/plugins/segment_anything/modeling/__init__.py b/inpaint/plugins/segment_anything/modeling/__init__.py new file mode 100644 index 0000000..38e9062 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .sam import Sam +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder +from .transformer import TwoWayTransformer diff --git a/inpaint/plugins/segment_anything/modeling/common.py b/inpaint/plugins/segment_anything/modeling/common.py new file mode 100644 index 0000000..2bf1523 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/common.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + +from typing import Type + + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/inpaint/plugins/segment_anything/modeling/image_encoder.py b/inpaint/plugins/segment_anything/modeling/image_encoder.py new file mode 100644 index 0000000..a6ad9ad --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/image_encoder.py @@ -0,0 +1,395 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional, Tuple, Type + +from .common import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter( + torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) + ) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + self.pos_embed + + for blk in self.blocks: + x = blk(x) + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/inpaint/plugins/segment_anything/modeling/image_encoder_hq.py b/inpaint/plugins/segment_anything/modeling/image_encoder_hq.py new file mode 100644 index 0000000..f12803b --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/image_encoder_hq.py @@ -0,0 +1,422 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional, Tuple, Type + +from .common import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViTHQ(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter( + torch.zeros( + 1, img_size // patch_size, img_size // patch_size, embed_dim + ) + ) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + self.pos_embed + + interm_embeddings = [] + for blk in self.blocks: + x = blk(x) + if blk.window_size == 0: + interm_embeddings.append(x) + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x, interm_embeddings + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock( + embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer + ) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = ( + self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + ) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos( + attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W) + ) + + attn = attn.softmax(dim=-1) + x = ( + (attn @ v) + .view(B, self.num_heads, H, W, -1) + .permute(0, 2, 3, 1, 4) + .reshape(B, H, W, -1) + ) + x = self.proj(x) + + return x + + +def window_partition( + x: torch.Tensor, window_size: int +) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = ( + x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + ) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, + window_size: int, + pad_hw: Tuple[int, int], + hw: Tuple[int, int], +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view( + B, Hp // window_size, Wp // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + + rel_h[:, :, :, :, None] + + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/inpaint/plugins/segment_anything/modeling/mask_decoder.py b/inpaint/plugins/segment_anything/modeling/mask_decoder.py new file mode 100644 index 0000000..67e0f77 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/mask_decoder.py @@ -0,0 +1,410 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import List, Tuple, Type + +from .common import LayerNorm2d + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + tranformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + ) + + # Select the correct mask or masks for outptu + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, mask_slice, :, :] + iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight], dim=0 + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + +# https://github.com/SysCV/sam-hq/blob/main/segment_anything/modeling/mask_decoder_hq.py#L17 +class MaskDecoderHQ(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + vit_dim: int = 1024, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth + ) + + # HQ-SAM parameters + self.hf_token = nn.Embedding(1, transformer_dim) # HQ-Ouptput-Token + self.hf_mlp = MLP( + transformer_dim, transformer_dim, transformer_dim // 8, 3 + ) # corresponding new MLP layer for HQ-Ouptput-Token + self.num_mask_tokens = self.num_mask_tokens + 1 + + # three conv fusion layers for obtaining HQ-Feature + self.compress_vit_feat = nn.Sequential( + nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim), + nn.GELU(), + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 8, kernel_size=2, stride=2 + ), + ) + + self.embedding_encoder = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + nn.GELU(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + ) + self.embedding_maskfeature = nn.Sequential( + nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1), + LayerNorm2d(transformer_dim // 4), + nn.GELU(), + nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1), + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + hq_token_only: bool, + interm_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the ViT image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + vit_features = interm_embeddings[0].permute( + 0, 3, 1, 2 + ) # early-layer ViT feature, after 1st global attention block in ViT + hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat( + vit_features + ) + + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + hq_features=hq_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + # mask with highest score + mask_slice = slice(1, self.num_mask_tokens - 1) + iou_pred = iou_pred[:, mask_slice] + iou_pred, max_iou_idx = torch.max(iou_pred, dim=1) + iou_pred = iou_pred.unsqueeze(1) + masks_multi = masks[:, mask_slice, :, :] + masks_sam = masks_multi[ + torch.arange(masks_multi.size(0)), max_iou_idx + ].unsqueeze(1) + else: + # singale mask output, default + mask_slice = slice(0, 1) + iou_pred = iou_pred[:, mask_slice] + masks_sam = masks[:, mask_slice] + + masks_hq = masks[:, slice(self.num_mask_tokens - 1, self.num_mask_tokens)] + if hq_token_only: + masks = masks_hq + else: + masks = masks_sam + masks_hq + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + hq_features: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], + dim=0, + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + + upscaled_embedding_sam = self.output_upscaling(src) + upscaled_embedding_hq = self.embedding_maskfeature( + upscaled_embedding_sam + ) + hq_features.repeat(b, 1, 1, 1) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + if i < self.num_mask_tokens - 1: + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + else: + hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :])) + + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding_sam.shape + + masks_sam = ( + hyper_in[:, : self.num_mask_tokens - 1] + @ upscaled_embedding_sam.view(b, c, h * w) + ).view(b, -1, h, w) + masks_sam_hq = ( + hyper_in[:, self.num_mask_tokens - 1 :] + @ upscaled_embedding_hq.view(b, c, h * w) + ).view(b, -1, h, w) + masks = torch.cat([masks_sam, masks_sam_hq], dim=1) + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/inpaint/plugins/segment_anything/modeling/prompt_encoder.py b/inpaint/plugins/segment_anything/modeling/prompt_encoder.py new file mode 100644 index 0000000..c3143f4 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/prompt_encoder.py @@ -0,0 +1,214 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch import nn + +from typing import Any, Optional, Tuple, Type + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/inpaint/plugins/segment_anything/modeling/sam.py b/inpaint/plugins/segment_anything/modeling/sam.py new file mode 100644 index 0000000..303bc2f --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/sam.py @@ -0,0 +1,174 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import Any, Dict, List, Tuple + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + @torch.no_grad() + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input promts, + C is determiend by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x diff --git a/inpaint/plugins/segment_anything/modeling/sam_hq.py b/inpaint/plugins/segment_anything/modeling/sam_hq.py new file mode 100644 index 0000000..d2ae3a3 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/sam_hq.py @@ -0,0 +1,177 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import Any, Dict, List, Tuple + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class SamHQ(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + hq_token_only: bool =False, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input prompts, + C is determined by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings, interm_embeddings = self.image_encoder(input_images) + interm_embeddings = interm_embeddings[0] # early layer + + outputs = [] + for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + hq_token_only=hq_token_only, + interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0), + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x \ No newline at end of file diff --git a/inpaint/plugins/segment_anything/modeling/tiny_vit_sam.py b/inpaint/plugins/segment_anything/modeling/tiny_vit_sam.py new file mode 100644 index 0000000..a5127c7 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/tiny_vit_sam.py @@ -0,0 +1,822 @@ +# -------------------------------------------------------- +# TinyViT Model Architecture +# Copyright (c) 2022 Microsoft +# Adapted from LeViT and Swin Transformer +# LeViT: (https://github.com/facebookresearch/levit) +# Swin: (https://github.com/microsoft/swin-transformer) +# Build the TinyViT Model +# -------------------------------------------------------- + +import collections +import itertools +import math +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from typing import Tuple + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return x + return tuple(itertools.repeat(x, n)) + + return parse + + +to_2tuple = _ntuple(2) + + +def _trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are + applied while sampling the normal with mean/std applied, therefore a, b args + should be adjusted to match the range of mean, std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + with torch.no_grad(): + return _trunc_normal_(tensor, mean, std, a, b) + + +def drop_path( + x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True +): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * ( + x.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class TimmDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): + super(TimmDropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f"drop_prob={round(self.drop_prob,3):0.3f}" + + +class Conv2d_BN(torch.nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1 + ): + super().__init__() + self.add_module( + "c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False) + ) + bn = torch.nn.BatchNorm2d(b) + torch.nn.init.constant_(bn.weight, bn_weight_init) + torch.nn.init.constant_(bn.bias, 0) + self.add_module("bn", bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = torch.nn.Conv2d( + w.size(1) * self.c.groups, + w.size(0), + w.shape[2:], + stride=self.c.stride, + padding=self.c.padding, + dilation=self.c.dilation, + groups=self.c.groups, + ) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class DropPath(TimmDropPath): + def __init__(self, drop_prob=None): + super().__init__(drop_prob=drop_prob) + self.drop_prob = drop_prob + + def __repr__(self): + msg = super().__repr__() + msg += f"(drop_prob={self.drop_prob})" + return msg + + +class PatchEmbed(nn.Module): + def __init__(self, in_chans, embed_dim, resolution, activation): + super().__init__() + img_size: Tuple[int, int] = to_2tuple(resolution) + self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) + self.num_patches = self.patches_resolution[0] * self.patches_resolution[1] + self.in_chans = in_chans + self.embed_dim = embed_dim + n = embed_dim + self.seq = nn.Sequential( + Conv2d_BN(in_chans, n // 2, 3, 2, 1), + activation(), + Conv2d_BN(n // 2, n, 3, 2, 1), + ) + + def forward(self, x): + return self.seq(x) + + +class MBConv(nn.Module): + def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path): + super().__init__() + self.in_chans = in_chans + self.hidden_chans = int(in_chans * expand_ratio) + self.out_chans = out_chans + + self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1) + self.act1 = activation() + + self.conv2 = Conv2d_BN( + self.hidden_chans, + self.hidden_chans, + ks=3, + stride=1, + pad=1, + groups=self.hidden_chans, + ) + self.act2 = activation() + + self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0) + self.act3 = activation() + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.act2(x) + + x = self.conv3(x) + + x = self.drop_path(x) + + x += shortcut + x = self.act3(x) + + return x + + +class PatchMerging(nn.Module): + def __init__(self, input_resolution, dim, out_dim, activation): + super().__init__() + + self.input_resolution = input_resolution + self.dim = dim + self.out_dim = out_dim + self.act = activation() + self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0) + stride_c = 2 + if out_dim == 320 or out_dim == 448 or out_dim == 576: + stride_c = 1 + self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim) + self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) + + def forward(self, x): + if x.ndim == 3: + H, W = self.input_resolution + B = len(x) + # (B, C, H, W) + x = x.view(B, H, W, -1).permute(0, 3, 1, 2) + + x = self.conv1(x) + x = self.act(x) + + x = self.conv2(x) + x = self.act(x) + x = self.conv3(x) + x = x.flatten(2).transpose(1, 2) + return x + + +class ConvLayer(nn.Module): + def __init__( + self, + dim, + input_resolution, + depth, + activation, + drop_path=0.0, + downsample=None, + use_checkpoint=False, + out_dim=None, + conv_expand_ratio=4.0, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + MBConv( + dim, + dim, + conv_expand_ratio, + activation, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation + ) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.norm = nn.LayerNorm(in_features) + self.fc1 = nn.Linear(in_features, hidden_features) + self.fc2 = nn.Linear(hidden_features, out_features) + self.act = act_layer() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(torch.nn.Module): + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), + ): + super().__init__() + # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, h) + self.proj = nn.Linear(self.dh, dim) + + points = list(itertools.product(range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets)) + ) + self.register_buffer( + "attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False + ) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and hasattr(self, "ab"): + del self.ab + else: + self.register_buffer( + "ab", + self.attention_biases[:, self.attention_bias_idxs], + persistent=False, + ) + + def forward(self, x): # x (B,N,C) + B, N, _ = x.shape + + # Normalization + x = self.norm(x) + + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, -1).split( + [self.key_dim, self.key_dim, self.d], dim=3 + ) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + ( + self.attention_biases[:, self.attention_bias_idxs] + if self.training + else self.ab + ) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class TinyViTBlock(nn.Module): + r"""TinyViT Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int, int]): Input resolution. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + local_conv_size (int): the kernel size of the convolution between + Attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + """ + + def __init__( + self, + dim, + input_resolution, + num_heads, + window_size=7, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + local_conv_size=3, + activation=nn.GELU, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + assert window_size > 0, "window_size must be greater than 0" + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + assert dim % num_heads == 0, "dim must be divisible by num_heads" + head_dim = dim // num_heads + + window_resolution = (window_size, window_size) + self.attn = Attention( + dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution + ) + + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_activation = activation + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=mlp_activation, + drop=drop, + ) + + pad = local_conv_size // 2 + self.local_conv = Conv2d_BN( + dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim + ) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + res_x = x + if H == self.window_size and W == self.window_size: + x = self.attn(x) + else: + x = x.view(B, H, W, C) + pad_b = (self.window_size - H % self.window_size) % self.window_size + pad_r = (self.window_size - W % self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + # window partition + x = ( + x.view(B, nH, self.window_size, nW, self.window_size, C) + .transpose(2, 3) + .reshape(B * nH * nW, self.window_size * self.window_size, C) + ) + x = self.attn(x) + # window reverse + x = ( + x.view(B, nH, nW, self.window_size, self.window_size, C) + .transpose(2, 3) + .reshape(B, pH, pW, C) + ) + + if padding: + x = x[:, :H, :W].contiguous() + + x = x.view(B, L, C) + + x = res_x + self.drop_path(x) + + x = x.transpose(1, 2).reshape(B, C, H, W) + x = self.local_conv(x) + x = x.view(B, C, L).transpose(1, 2) + + x = x + self.drop_path(self.mlp(x)) + return x + + def extra_repr(self) -> str: + return ( + f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " + f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" + ) + + +class BasicLayer(nn.Module): + """A basic TinyViT layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + out_dim: the output dimension of the layer. Default: dim + """ + + def __init__( + self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + downsample=None, + use_checkpoint=False, + local_conv_size=3, + activation=nn.GELU, + out_dim=None, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + TinyViTBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) + else drop_path, + local_conv_size=local_conv_size, + activation=activation, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation + ) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class TinyViT(nn.Module): + def __init__( + self, + img_size=224, + in_chans=3, + num_classes=1000, + embed_dims=[96, 192, 384, 768], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=1.0, + ): + super().__init__() + self.img_size = img_size + self.num_classes = num_classes + self.depths = depths + self.num_layers = len(depths) + self.mlp_ratio = mlp_ratio + + activation = nn.GELU + + self.patch_embed = PatchEmbed( + in_chans=in_chans, + embed_dim=embed_dims[0], + resolution=img_size, + activation=activation, + ) + + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + kwargs = dict( + dim=embed_dims[i_layer], + input_resolution=( + patches_resolution[0] + // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + patches_resolution[1] + // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + ), + # input_resolution=(patches_resolution[0] // (2 ** i_layer), + # patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)], + activation=activation, + ) + if i_layer == 0: + layer = ConvLayer( + conv_expand_ratio=mbconv_expand_ratio, + **kwargs, + ) + else: + layer = BasicLayer( + num_heads=num_heads[i_layer], + window_size=window_sizes[i_layer], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + **kwargs, + ) + self.layers.append(layer) + + # Classifier head + self.norm_head = nn.LayerNorm(embed_dims[-1]) + self.head = ( + nn.Linear(embed_dims[-1], num_classes) + if num_classes > 0 + else torch.nn.Identity() + ) + + # init weights + self.apply(self._init_weights) + self.set_layer_lr_decay(layer_lr_decay) + self.neck = nn.Sequential( + nn.Conv2d( + embed_dims[-1], + 256, + kernel_size=1, + bias=False, + ), + LayerNorm2d(256), + nn.Conv2d( + 256, + 256, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(256), + ) + + def set_layer_lr_decay(self, layer_lr_decay): + decay_rate = layer_lr_decay + + # layers -> blocks (depth) + depth = sum(self.depths) + lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] + # print("LR SCALES:", lr_scales) + + def _set_lr_scale(m, scale): + for p in m.parameters(): + p.lr_scale = scale + + self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0])) + i = 0 + for layer in self.layers: + for block in layer.blocks: + block.apply(lambda x: _set_lr_scale(x, lr_scales[i])) + i += 1 + if layer.downsample is not None: + layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1])) + assert i == depth + for m in [self.norm_head, self.head]: + m.apply(lambda x: _set_lr_scale(x, lr_scales[-1])) + + for k, p in self.named_parameters(): + p.param_name = k + + def _check_lr_scale(m): + for p in m.parameters(): + assert hasattr(p, "lr_scale"), p.param_name + + self.apply(_check_lr_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {"attention_biases"} + + def forward_features(self, x): + # x: (N, C, H, W) + x = self.patch_embed(x) + + x = self.layers[0](x) + start_i = 1 + + for i in range(start_i, len(self.layers)): + layer = self.layers[i] + x = layer(x) + B, _, C = x.size() + x = x.view(B, 64, 64, C) + x = x.permute(0, 3, 1, 2) + x = self.neck(x) + return x + + def forward(self, x): + x = self.forward_features(x) + # x = self.norm_head(x) + # x = self.head(x) + return x diff --git a/inpaint/plugins/segment_anything/modeling/transformer.py b/inpaint/plugins/segment_anything/modeling/transformer.py new file mode 100644 index 0000000..f1a2812 --- /dev/null +++ b/inpaint/plugins/segment_anything/modeling/transformer.py @@ -0,0 +1,240 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import Tensor, nn + +import math +from typing import Tuple, Type + +from .common import MLPBlock + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attenion layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/inpaint/plugins/segment_anything/predictor.py b/inpaint/plugins/segment_anything/predictor.py new file mode 100644 index 0000000..23d0649 --- /dev/null +++ b/inpaint/plugins/segment_anything/predictor.py @@ -0,0 +1,285 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from .modeling import Sam + +from typing import Optional, Tuple + + +class SamPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + from .utils.transforms import ResizeLongestSide + + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[ + None, :, :, : + ] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + labels_torch = torch.as_tensor( + point_labels, dtype=torch.int, device=self.device + ) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor( + mask_input, dtype=torch.float, device=self.device + ) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + ) + + masks = masks[0].detach().cpu().numpy() + iou_predictions = iou_predictions[0].detach().cpu().numpy() + low_res_masks = low_res_masks[0].detach().cpu().numpy() + return masks, iou_predictions, low_res_masks + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks( + low_res_masks, self.input_size, self.original_size + ) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self.features is not None + ), "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/inpaint/plugins/segment_anything/predictor_hq.py b/inpaint/plugins/segment_anything/predictor_hq.py new file mode 100644 index 0000000..d8fd50f --- /dev/null +++ b/inpaint/plugins/segment_anything/predictor_hq.py @@ -0,0 +1,292 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from .modeling import Sam + +from typing import Optional, Tuple + +from .utils.transforms import ResizeLongestSide + + +class SamHQPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + # import pdb;pdb.set_trace() + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + # import pdb;pdb.set_trace() + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[ + None, :, :, : + ] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features, self.interm_features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + hq_token_only: bool = False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + labels_torch = torch.as_tensor( + point_labels, dtype=torch.int, device=self.device + ) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor( + mask_input, dtype=torch.float, device=self.device + ) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + hq_token_only=hq_token_only, + ) + + masks_np = masks[0].detach().cpu().numpy() + iou_predictions_np = iou_predictions[0].detach().cpu().numpy() + low_res_masks_np = low_res_masks[0].detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + hq_token_only: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + hq_token_only=hq_token_only, + interm_embeddings=self.interm_features, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks( + low_res_masks, self.input_size, self.original_size + ) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self.features is not None + ), "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/inpaint/plugins/segment_anything/utils/__init__.py b/inpaint/plugins/segment_anything/utils/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything/utils/transforms.py b/inpaint/plugins/segment_anything/utils/transforms.py new file mode 100644 index 0000000..90f50ed --- /dev/null +++ b/inpaint/plugins/segment_anything/utils/transforms.py @@ -0,0 +1,112 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch.nn import functional as F +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + +from copy import deepcopy +from typing import Tuple + + +class ResizeLongestSide: + """ + Resizes images to longest side 'target_length', as well as provides + methods for resizing coordinates and boxes. Provides methods for + transforming both numpy array and batched torch tensors. + """ + + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + target_size = self.get_preprocess_shape( + image.shape[0], image.shape[1], self.target_length + ) + return np.array(resize(to_pil_image(image), target_size)) + + def apply_coords( + self, coords: np.ndarray, original_size: Tuple[int, ...] + ) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).astype(float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes( + self, boxes: np.ndarray, original_size: Tuple[int, ...] + ) -> np.ndarray: + """ + Expects a numpy array shape Bx4. Requires the original image size + in (H, W) format. + """ + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: + """ + Expects batched images with shape BxCxHxW and float format. This + transformation may not exactly match apply_image. apply_image is + the transformation expected by the model. + """ + # Expects an image in BCHW format. May not exactly match apply_image. + target_size = self.get_preprocess_shape( + image.shape[0], image.shape[1], self.target_length + ) + return F.interpolate( + image, target_size, mode="bilinear", align_corners=False, antialias=True + ) + + def apply_coords_torch( + self, coords: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).to(torch.float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes_torch( + self, boxes: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with shape Bx4. Requires the original image + size in (H, W) format. + """ + boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + @staticmethod + def get_preprocess_shape( + oldh: int, oldw: int, long_side_length: int + ) -> Tuple[int, int]: + """ + Compute the output size given input size and target long side length. + """ + scale = long_side_length * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) diff --git a/inpaint/plugins/segment_anything2/__init__.py b/inpaint/plugins/segment_anything2/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything2/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything2/build_sam.py b/inpaint/plugins/segment_anything2/build_sam.py new file mode 100644 index 0000000..5100f70 --- /dev/null +++ b/inpaint/plugins/segment_anything2/build_sam.py @@ -0,0 +1,262 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +import torch +from pathlib import Path + +from .modeling.backbones.hieradet import Hiera +from .modeling.backbones.image_encoder import ImageEncoder, FpnNeck +from .modeling.memory_attention import MemoryAttention, MemoryAttentionLayer +from .modeling.memory_encoder import MemoryEncoder, MaskDownSampler, Fuser, CXBlock +from .modeling.position_encoding import PositionEmbeddingSine +from .modeling.sam.transformer import RoPEAttention +from .modeling.sam2_base import SAM2Base + +CURRENT_DIR = Path(__file__).parent +CONFIG_DIR = CURRENT_DIR / "sam2_configs" + +common_kwargs = dict( + num_maskmem=7, + image_size=1024, + sigmoid_scale_for_mem_enc=20.0, + sigmoid_bias_for_mem_enc=-10.0, + use_mask_input_as_output_without_sam=True, + directly_add_no_mem_embed=True, + use_high_res_features_in_sam=True, + multimask_output_in_sam=True, + iou_prediction_use_sigmoid=True, + use_obj_ptrs_in_encoder=True, + add_tpos_enc_to_obj_ptrs=False, + only_obj_ptrs_in_the_past_for_eval=True, + pred_obj_scores=True, + pred_obj_scores_mlp=True, + fixed_no_obj_ptr=True, + multimask_output_for_tracking=True, + use_multimask_token_for_obj_ptr=True, + multimask_min_pt_num=0, + multimask_max_pt_num=1, + use_mlp_for_obj_ptr_proj=True, + compile_image_encoder=False, +) + + +def build_memory_attention(): + return MemoryAttention( + d_model=256, + pos_enc_at_input=True, + layer=MemoryAttentionLayer( + activation="relu", + dim_feedforward=2048, + dropout=0.1, + pos_enc_at_attn=False, + self_attention=RoPEAttention( + rope_theta=10000.0, + feat_sizes=[32, 32], + embedding_dim=256, + num_heads=1, + downsample_rate=1, + dropout=0.1, + ), + d_model=256, + pos_enc_at_cross_attn_keys=True, + pos_enc_at_cross_attn_queries=False, + cross_attention=RoPEAttention( + rope_theta=10000.0, + feat_sizes=[32, 32], + embedding_dim=256, + num_heads=1, + downsample_rate=1, + dropout=0.1, + kv_in_dim=64, + ), + ), + num_layers=4, + ) + + +def build_memory_encoder(): + return MemoryEncoder( + out_dim=64, + position_encoding=PositionEmbeddingSine( + num_pos_feats=64, normalize=True, scale=None, temperature=10000 + ), + mask_downsampler=MaskDownSampler( + kernel_size=3, + stride=2, + padding=1, + ), + fuser=Fuser( + layer=CXBlock( + dim=256, + kernel_size=7, + padding=3, + layer_scale_init_value=1e-6, + use_dwconv=True, + ), + num_layers=2, + ), + ) + + +def build_sam2_tiny(): + return SAM2Base( + **common_kwargs, + image_encoder=ImageEncoder( + scalp=1, + trunk=Hiera( + embed_dim=96, + num_heads=1, + stages=(1, 2, 7, 2), + global_att_blocks=(5, 7, 9), + window_pos_embed_bkg_spatial_size=(7, 7), + window_spec=(8, 4, 14, 7), + ), + neck=FpnNeck( + position_encoding=PositionEmbeddingSine( + num_pos_feats=256, + normalize=True, + scale=None, + temperature=10000, + ), + d_model=256, + backbone_channel_list=[768, 384, 192, 96], + fpn_top_down_levels=[2, 3], + fpn_interp_model="nearest", + ), + ), + memory_attention=build_memory_attention(), + memory_encoder=build_memory_encoder(), + ) + + +def build_sam2_small(): + return SAM2Base( + **common_kwargs, + image_encoder=ImageEncoder( + scalp=1, + trunk=Hiera( + embed_dim=96, + num_heads=1, + stages=(1, 2, 11, 2), + global_att_blocks=(7, 10, 13), + window_pos_embed_bkg_spatial_size=(7, 7), + window_spec=(8, 4, 14, 7), + ), + neck=FpnNeck( + position_encoding=PositionEmbeddingSine( + num_pos_feats=256, + normalize=True, + scale=None, + temperature=10000, + ), + d_model=256, + backbone_channel_list=[768, 384, 192, 96], + fpn_top_down_levels=[2, 3], + fpn_interp_model="nearest", + ), + ), + memory_attention=build_memory_attention(), + memory_encoder=build_memory_encoder(), + ) + + +def build_sam2_base(): + return SAM2Base( + **common_kwargs, + image_encoder=ImageEncoder( + scalp=1, + trunk=Hiera( + embed_dim=112, + num_heads=2, + stages=(2, 3, 16, 3), + global_att_blocks=(12, 16, 20), + window_pos_embed_bkg_spatial_size=(14, 14), + window_spec=(8, 4, 14, 7), + ), + neck=FpnNeck( + position_encoding=PositionEmbeddingSine( + num_pos_feats=256, + normalize=True, + scale=None, + temperature=10000, + ), + d_model=256, + backbone_channel_list=[896, 448, 224, 112], + fpn_top_down_levels=[2, 3], + fpn_interp_model="nearest", + ), + ), + memory_attention=build_memory_attention(), + memory_encoder=build_memory_encoder(), + ) + + +def build_sam2_large(): + return SAM2Base( + **common_kwargs, + image_encoder=ImageEncoder( + scalp=1, + trunk=Hiera( + embed_dim=144, + num_heads=2, + stages=(2, 6, 36, 4), + global_att_blocks=(23, 33, 43), + window_pos_embed_bkg_spatial_size=(7, 7), + window_spec=(8, 4, 16, 8), + ), + neck=FpnNeck( + position_encoding=PositionEmbeddingSine( + num_pos_feats=256, + normalize=True, + scale=None, + temperature=10000, + ), + d_model=256, + backbone_channel_list=[1152, 576, 288, 144], + fpn_top_down_levels=[2, 3], + fpn_interp_model="nearest", + ), + ), + memory_attention=build_memory_attention(), + memory_encoder=build_memory_encoder(), + ) + + +sam2_model_registry = { + "sam2_tiny": build_sam2_tiny, + "sam2_small": build_sam2_small, + "sam2_base": build_sam2_base, + "sam2_large": build_sam2_large, +} + + +def build_sam2( + name, + ckpt_path=None, + device="cuda", + mode="eval", +): + model = sam2_model_registry[name]() + _load_checkpoint(model, ckpt_path) + model = model.to(device) + if mode == "eval": + model.eval() + return model + + +def _load_checkpoint(model, ckpt_path): + if ckpt_path is not None: + sd = torch.load(ckpt_path, map_location="cpu")["model"] + missing_keys, unexpected_keys = model.load_state_dict(sd) + if missing_keys: + logging.error(missing_keys) + raise RuntimeError() + if unexpected_keys: + logging.error(unexpected_keys) + raise RuntimeError() + logging.info("Loaded checkpoint sucessfully") diff --git a/inpaint/plugins/segment_anything2/modeling/__init__.py b/inpaint/plugins/segment_anything2/modeling/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything2/modeling/backbones/__init__.py b/inpaint/plugins/segment_anything2/modeling/backbones/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/backbones/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything2/modeling/backbones/hieradet.py b/inpaint/plugins/segment_anything2/modeling/backbones/hieradet.py new file mode 100644 index 0000000..9375b6a --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/backbones/hieradet.py @@ -0,0 +1,295 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from functools import partial +from typing import List, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..backbones.utils import ( + PatchEmbed, + window_partition, + window_unpartition, +) + +from ..sam2_utils import DropPath, MLP + + +def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor: + if pool is None: + return x + # (B, H, W, C) -> (B, C, H, W) + x = x.permute(0, 3, 1, 2) + x = pool(x) + # (B, C, H', W') -> (B, H', W', C) + x = x.permute(0, 2, 3, 1) + if norm: + x = norm(x) + + return x + + +class MultiScaleAttention(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + q_pool: nn.Module = None, + ): + super().__init__() + + self.dim = dim + self.dim_out = dim_out + + self.num_heads = num_heads + head_dim = dim_out // num_heads + self.scale = head_dim**-0.5 + + self.q_pool = q_pool + self.qkv = nn.Linear(dim, dim_out * 3) + self.proj = nn.Linear(dim_out, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (B, H * W, 3, nHead, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) + # q, k, v with shape (B, H * W, nheads, C) + q, k, v = torch.unbind(qkv, 2) + + # Q pooling (for downsample at stage changes) + if self.q_pool: + q = do_pool(q.reshape(B, H, W, -1), self.q_pool) + H, W = q.shape[1:3] # downsampled shape + q = q.reshape(B, H * W, self.num_heads, -1) + + # Torch's SDPA expects [B, nheads, H*W, C] so we transpose + x = F.scaled_dot_product_attention( + q.transpose(1, 2), + k.transpose(1, 2), + v.transpose(1, 2), + ) + # Transpose back + x = x.transpose(1, 2) + x = x.reshape(B, H, W, -1) + + x = self.proj(x) + + return x + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + mlp_ratio: float = 4.0, + drop_path: float = 0.0, + norm_layer: Union[nn.Module, str] = "LayerNorm", + q_stride: Tuple[int, int] = None, + act_layer: nn.Module = nn.GELU, + window_size: int = 0, + ): + super().__init__() + + if isinstance(norm_layer, str): + norm_layer = partial(getattr(nn, norm_layer), eps=1e-6) + + self.dim = dim + self.dim_out = dim_out + self.norm1 = norm_layer(dim) + + self.window_size = window_size + + self.pool, self.q_stride = None, q_stride + if self.q_stride: + self.pool = nn.MaxPool2d( + kernel_size=q_stride, stride=q_stride, ceil_mode=False + ) + + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + q_pool=self.pool, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = MLP( + dim_out, + int(dim_out * mlp_ratio), + dim_out, + num_layers=2, + activation=act_layer, + ) + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x # B, H, W, C + x = self.norm1(x) + + # Skip connection + if self.dim != self.dim_out: + shortcut = do_pool(self.proj(x), self.pool) + + # Window partition + window_size = self.window_size + if window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, window_size) + + # Window Attention + Q Pooling (if stage change) + x = self.attn(x) + if self.q_stride: + # Shapes have changed due to Q pooling + window_size = self.window_size // self.q_stride[0] + H, W = shortcut.shape[1:3] + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + pad_hw = (H + pad_h, W + pad_w) + + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, window_size, pad_hw, (H, W)) + + x = shortcut + self.drop_path(x) + # MLP + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Hiera(nn.Module): + """ + Reference: https://arxiv.org/abs/2306.00989 + """ + + def __init__( + self, + embed_dim: int = 96, # initial embed dim + num_heads: int = 1, # initial number of heads + drop_path_rate: float = 0.0, # stochastic depth + q_pool: int = 3, # number of q_pool stages + q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages + stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage + dim_mul: float = 2.0, # dim_mul factor at stage shift + head_mul: float = 2.0, # head_mul factor at stage shift + window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14), + # window size per stage, when not using global att. + window_spec: Tuple[int, ...] = ( + 8, + 4, + 14, + 7, + ), + # global attn in these blocks + global_att_blocks: Tuple[int, ...] = ( + 12, + 16, + 20, + ), + return_interm_layers=True, # return feats from every stage + ): + super().__init__() + + assert len(stages) == len(window_spec) + self.window_spec = window_spec + + depth = sum(stages) + self.q_stride = q_stride + self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] + assert 0 <= q_pool <= len(self.stage_ends[:-1]) + self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] + self.return_interm_layers = return_interm_layers + + self.patch_embed = PatchEmbed( + embed_dim=embed_dim, + ) + # Which blocks have global att? + self.global_att_blocks = global_att_blocks + + # Windowed positional embedding (https://arxiv.org/abs/2311.05613) + self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size + self.pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size) + ) + self.pos_embed_window = nn.Parameter( + torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]) + ) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + + cur_stage = 1 + self.blocks = nn.ModuleList() + + for i in range(depth): + dim_out = embed_dim + # lags by a block, so first block of + # next stage uses an initial window size + # of previous stage and final window size of current stage + window_size = self.window_spec[cur_stage - 1] + + if self.global_att_blocks is not None: + window_size = 0 if i in self.global_att_blocks else window_size + + if i - 1 in self.stage_ends: + dim_out = int(embed_dim * dim_mul) + num_heads = int(num_heads * head_mul) + cur_stage += 1 + + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + drop_path=dpr[i], + q_stride=self.q_stride if i in self.q_pool_blocks else None, + window_size=window_size, + ) + + embed_dim = dim_out + self.blocks.append(block) + + self.channel_list = ( + [self.blocks[i].dim_out for i in self.stage_ends[::-1]] + if return_interm_layers + else [self.blocks[-1].dim_out] + ) + + def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor: + h, w = hw + window_embed = self.pos_embed_window + pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") + pos_embed = pos_embed + window_embed.tile( + [x // y for x, y in zip(pos_embed.shape, window_embed.shape)] + ) + pos_embed = pos_embed.permute(0, 2, 3, 1) + return pos_embed + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.patch_embed(x) + # x: (B, H, W, C) + + # Add pos embed + x = x + self._get_pos_embed(x.shape[1:3]) + + outputs = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if (i == self.stage_ends[-1]) or ( + i in self.stage_ends and self.return_interm_layers + ): + feats = x.permute(0, 3, 1, 2) + outputs.append(feats) + + return outputs diff --git a/inpaint/plugins/segment_anything2/modeling/backbones/image_encoder.py b/inpaint/plugins/segment_anything2/modeling/backbones/image_encoder.py new file mode 100644 index 0000000..5f92baf --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/backbones/image_encoder.py @@ -0,0 +1,133 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ImageEncoder(nn.Module): + def __init__( + self, + trunk: nn.Module, + neck: nn.Module, + scalp: int = 0, + ): + super().__init__() + self.trunk = trunk + self.neck = neck + self.scalp = scalp + assert ( + self.trunk.channel_list == self.neck.backbone_channel_list + ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}" + + def forward(self, sample: torch.Tensor): + # Forward through backbone + features, pos = self.neck(self.trunk(sample)) + if self.scalp > 0: + # Discard the lowest resolution features + features, pos = features[: -self.scalp], pos[: -self.scalp] + + src = features[-1] + output = { + "vision_features": src, + "vision_pos_enc": pos, + "backbone_fpn": features, + } + return output + + +class FpnNeck(nn.Module): + """ + A modified variant of Feature Pyramid Network (FPN) neck + (we remove output conv and also do bicubic interpolation similar to ViT + pos embed interpolation) + """ + + def __init__( + self, + position_encoding: nn.Module, + d_model: int, + backbone_channel_list: List[int], + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + fpn_interp_model: str = "bilinear", + fuse_type: str = "sum", + fpn_top_down_levels: Optional[List[int]] = None, + ): + """Initialize the neck + :param trunk: the backbone + :param position_encoding: the positional encoding to use + :param d_model: the dimension of the model + :param neck_norm: the normalization to use + """ + super().__init__() + self.position_encoding = position_encoding + self.convs = nn.ModuleList() + self.backbone_channel_list = backbone_channel_list + for dim in backbone_channel_list: + current = nn.Sequential() + current.add_module( + "conv", + nn.Conv2d( + in_channels=dim, + out_channels=d_model, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ), + ) + + self.convs.append(current) + self.fpn_interp_model = fpn_interp_model + assert fuse_type in ["sum", "avg"] + self.fuse_type = fuse_type + + # levels to have top-down features in its outputs + # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3 + # have top-down propagation, while outputs of level 0 and level 1 have only + # lateral features from the same backbone level. + if fpn_top_down_levels is None: + # default is to have top-down features on all levels + fpn_top_down_levels = range(len(self.convs)) + self.fpn_top_down_levels = list(fpn_top_down_levels) + + def forward(self, xs: List[torch.Tensor]): + + out = [None] * len(self.convs) + pos = [None] * len(self.convs) + assert len(xs) == len(self.convs) + # fpn forward pass + # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py + prev_features = None + # forward in top-down order (from low to high resolution) + n = len(self.convs) - 1 + for i in range(n, -1, -1): + x = xs[i] + lateral_features = self.convs[n - i](x) + if i in self.fpn_top_down_levels and prev_features is not None: + top_down_features = F.interpolate( + prev_features.to(dtype=torch.float32), + scale_factor=2.0, + mode=self.fpn_interp_model, + align_corners=( + None if self.fpn_interp_model == "nearest" else False + ), + antialias=False, + ) + prev_features = lateral_features + top_down_features + if self.fuse_type == "avg": + prev_features /= 2 + else: + prev_features = lateral_features + x_out = prev_features + out[i] = x_out + pos[i] = self.position_encoding(x_out).to(x_out.dtype) + + return out, pos diff --git a/inpaint/plugins/segment_anything2/modeling/backbones/utils.py b/inpaint/plugins/segment_anything2/modeling/backbones/utils.py new file mode 100644 index 0000000..32d55c7 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/backbones/utils.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +"""Some utilities for backbones, in particular for windowing""" + +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def window_partition(x, window_size): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = ( + x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + ) + return windows, (Hp, Wp) + + +def window_unpartition(windows, window_size, pad_hw, hw): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view( + B, Hp // window_size, Wp // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, ...] = (7, 7), + stride: Tuple[int, ...] = (4, 4), + padding: Tuple[int, ...] = (3, 3), + in_chans: int = 3, + embed_dim: int = 768, + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/inpaint/plugins/segment_anything2/modeling/memory_attention.py b/inpaint/plugins/segment_anything2/modeling/memory_attention.py new file mode 100644 index 0000000..8a14327 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/memory_attention.py @@ -0,0 +1,169 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional + +import torch +from torch import nn, Tensor + +from .sam.transformer import RoPEAttention + +from .sam2_utils import get_activation_fn, get_clones + + +class MemoryAttentionLayer(nn.Module): + + def __init__( + self, + activation: str, + cross_attention: nn.Module, + d_model: int, + dim_feedforward: int, + dropout: float, + pos_enc_at_attn: bool, + pos_enc_at_cross_attn_keys: bool, + pos_enc_at_cross_attn_queries: bool, + self_attention: nn.Module, + ): + super().__init__() + self.d_model = d_model + self.dim_feedforward = dim_feedforward + self.dropout_value = dropout + self.self_attn = self_attention + self.cross_attn_image = cross_attention + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation_str = activation + self.activation = get_activation_fn(activation) + + # Where to add pos enc + self.pos_enc_at_attn = pos_enc_at_attn + self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries + self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys + + def _forward_sa(self, tgt, query_pos): + # Self-Attention + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn(q, k, v=tgt2) + tgt = tgt + self.dropout1(tgt2) + return tgt + + def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): + kwds = {} + if num_k_exclude_rope > 0: + assert isinstance(self.cross_attn_image, RoPEAttention) + kwds = {"num_k_exclude_rope": num_k_exclude_rope} + + # Cross-Attention + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + v=memory, + **kwds, + ) + tgt = tgt + self.dropout2(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + num_k_exclude_rope: int = 0, + ) -> torch.Tensor: + + # Self-Attn, Cross-Attn + tgt = self._forward_sa(tgt, query_pos) + tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + # MLP + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + +class MemoryAttention(nn.Module): + def __init__( + self, + d_model: int, + pos_enc_at_input: bool, + layer: nn.Module, + num_layers: int, + batch_first: bool = True, # Do layers expect batch first input? + ): + super().__init__() + self.d_model = d_model + self.layers = get_clones(layer, num_layers) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + self.pos_enc_at_input = pos_enc_at_input + self.batch_first = batch_first + + def forward( + self, + curr: torch.Tensor, # self-attention inputs + memory: torch.Tensor, # cross-attention inputs + curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs + num_obj_ptr_tokens: int = 0, # number of object pointer *tokens* + ): + if isinstance(curr, list): + assert isinstance(curr_pos, list) + assert len(curr) == len(curr_pos) == 1 + curr, curr_pos = ( + curr[0], + curr_pos[0], + ) + + assert ( + curr.shape[1] == memory.shape[1] + ), "Batch size must be the same for curr and memory" + + output = curr + if self.pos_enc_at_input and curr_pos is not None: + output = output + 0.1 * curr_pos + + if self.batch_first: + # Convert to batch first + output = output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + memory = memory.transpose(0, 1) + memory_pos = memory_pos.transpose(0, 1) + + for layer in self.layers: + kwds = {} + if isinstance(layer.cross_attn_image, RoPEAttention): + kwds = {"num_k_exclude_rope": num_obj_ptr_tokens} + + output = layer( + tgt=output, + memory=memory, + pos=memory_pos, + query_pos=curr_pos, + **kwds, + ) + normed_output = self.norm(output) + + if self.batch_first: + # Convert back to seq first + normed_output = normed_output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + + return normed_output diff --git a/inpaint/plugins/segment_anything2/modeling/memory_encoder.py b/inpaint/plugins/segment_anything2/modeling/memory_encoder.py new file mode 100644 index 0000000..14cb6e7 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/memory_encoder.py @@ -0,0 +1,181 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .sam2_utils import DropPath, get_clones, LayerNorm2d + + +class MaskDownSampler(nn.Module): + """ + Progressively downsample a mask by total_stride, each time by stride. + Note that LayerNorm is applied per *token*, like in ViT. + + With each downsample (by a factor stride**2), channel capacity increases by the same factor. + In the end, we linearly project to embed_dim channels. + """ + + def __init__( + self, + embed_dim=256, + kernel_size=4, + stride=4, + padding=0, + total_stride=16, + activation=nn.GELU, + ): + super().__init__() + num_layers = int(math.log2(total_stride) // math.log2(stride)) + assert stride**num_layers == total_stride + self.encoder = nn.Sequential() + mask_in_chans, mask_out_chans = 1, 1 + for _ in range(num_layers): + mask_out_chans = mask_in_chans * (stride**2) + self.encoder.append( + nn.Conv2d( + mask_in_chans, + mask_out_chans, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + self.encoder.append(LayerNorm2d(mask_out_chans)) + self.encoder.append(activation()) + mask_in_chans = mask_out_chans + + self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) + + def forward(self, x): + return self.encoder(x) + + +# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) +class CXBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + kernel_size=7, + padding=3, + drop_path=0.0, + layer_scale_init_value=1e-6, + use_dwconv=True, + ): + super().__init__() + self.dwconv = nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=padding, + groups=dim if use_dwconv else 1, + ) # depthwise conv + self.norm = LayerNorm2d(dim, eps=1e-6) + self.pwconv1 = nn.Linear( + dim, 4 * dim + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = ( + nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = self.norm(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class Fuser(nn.Module): + def __init__(self, layer, num_layers, dim=None, input_projection=False): + super().__init__() + self.proj = nn.Identity() + self.layers = get_clones(layer, num_layers) + + if input_projection: + assert dim is not None + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + + def forward(self, x): + # normally x: (N, C, H, W) + x = self.proj(x) + for layer in self.layers: + x = layer(x) + return x + + +class MemoryEncoder(nn.Module): + def __init__( + self, + out_dim, + mask_downsampler, + fuser, + position_encoding, + in_dim=256, # in_dim of pix_feats + ): + super().__init__() + + self.mask_downsampler = mask_downsampler + + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + self.fuser = fuser + self.position_encoding = position_encoding + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward( + self, + pix_feat: torch.Tensor, + masks: torch.Tensor, + skip_mask_sigmoid: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + ## Process masks + # sigmoid, so that less domain shift from gt masks which are bool + if not skip_mask_sigmoid: + masks = F.sigmoid(masks) + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return {"vision_features": x, "vision_pos_enc": [pos]} diff --git a/inpaint/plugins/segment_anything2/modeling/position_encoding.py b/inpaint/plugins/segment_anything2/modeling/position_encoding.py new file mode 100644 index 0000000..f4b57ae --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/position_encoding.py @@ -0,0 +1,216 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Any, Optional, Tuple + +import numpy as np + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__( + self, + num_pos_feats, + temperature: int = 10000, + normalize: bool = True, + scale: Optional[float] = None, + ): + super().__init__() + assert num_pos_feats % 2 == 0, "Expecting even model width" + self.num_pos_feats = num_pos_feats // 2 + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + self.cache = {} + + def _encode_xy(self, x, y): + # The positions are expected to be normalized + assert len(x) == len(y) and x.ndim == y.ndim == 1 + x_embed = x * self.scale + y_embed = y * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, None] / dim_t + pos_y = y_embed[:, None] / dim_t + pos_x = torch.stack( + (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2 + ).flatten(1) + pos_y = torch.stack( + (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2 + ).flatten(1) + return pos_x, pos_y + + @torch.no_grad() + def encode_boxes(self, x, y, w, h): + pos_x, pos_y = self._encode_xy(x, y) + pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) + return pos + + encode = encode_boxes # Backwards compatibility + + @torch.no_grad() + def encode_points(self, x, y, labels): + (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape + assert bx == by and nx == ny and bx == bl and nx == nl + pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten()) + pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1) + pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) + return pos + + @torch.no_grad() + def forward(self, x: torch.Tensor): + cache_key = (x.shape[-2], x.shape[-1]) + if cache_key in self.cache: + return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1) + y_embed = ( + torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device) + .view(1, -1, 1) + .repeat(x.shape[0], 1, x.shape[-1]) + ) + x_embed = ( + torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device) + .view(1, 1, -1) + .repeat(x.shape[0], x.shape[-2], 1) + ) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + self.cache[cache_key] = pos[0] + return pos + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C + + +# Rotary Positional Encoding, adapted from: +# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py +# 2. https://github.com/naver-ai/rope-vit +# 3. https://github.com/lucidrains/rotary-embedding-torch + + +def init_t_xy(end_x: int, end_y: int): + t = torch.arange(end_x * end_y, dtype=torch.float32) + t_x = (t % end_x).float() + t_y = torch.div(t, end_x, rounding_mode="floor").float() + return t_x, t_y + + +def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0): + freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + + t_x, t_y = init_t_xy(end_x, end_y) + freqs_x = torch.outer(t_x, freqs_x) + freqs_y = torch.outer(t_y, freqs_y) + freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) + freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) + return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) + shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_enc( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + repeat_freqs_k: bool = False, +): + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = ( + torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + if xk.shape[-2] != 0 + else None + ) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + if xk_ is None: + # no keys to rotate, due to dropout + return xq_out.type_as(xq).to(xq.device), xk + # repeat freqs along seq_len dim to match k seq_len + if repeat_freqs_k: + r = xk_.shape[-2] // xq_.shape[-2] + freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) diff --git a/inpaint/plugins/segment_anything2/modeling/sam/__init__.py b/inpaint/plugins/segment_anything2/modeling/sam/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything2/modeling/sam/mask_decoder.py b/inpaint/plugins/segment_anything2/modeling/sam/mask_decoder.py new file mode 100644 index 0000000..fb8bb05 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam/mask_decoder.py @@ -0,0 +1,295 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional, Tuple, Type + +import torch +from torch import nn + +from ..sam2_utils import LayerNorm2d, MLP + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + use_high_res_features: bool = False, + iou_prediction_use_sigmoid=False, + dynamic_multimask_via_stability=False, + dynamic_multimask_stability_delta=0.05, + dynamic_multimask_stability_thresh=0.98, + pred_obj_scores: bool = False, + pred_obj_scores_mlp: bool = False, + use_multimask_token_for_obj_ptr: bool = False, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.pred_obj_scores = pred_obj_scores + if self.pred_obj_scores: + self.obj_score_token = nn.Embedding(1, transformer_dim) + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.use_high_res_features = use_high_res_features + if use_high_res_features: + self.conv_s0 = nn.Conv2d( + transformer_dim, transformer_dim // 8, kernel_size=1, stride=1 + ) + self.conv_s1 = nn.Conv2d( + transformer_dim, transformer_dim // 4, kernel_size=1, stride=1 + ) + + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, + iou_head_hidden_dim, + self.num_mask_tokens, + iou_head_depth, + sigmoid_output=iou_prediction_use_sigmoid, + ) + if self.pred_obj_scores: + self.pred_obj_score_head = nn.Linear(transformer_dim, 1) + if pred_obj_scores_mlp: + self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3) + + # When outputting a single mask, optionally we can dynamically fall back to the best + # multimask output token if the single mask output token gives low stability scores. + self.dynamic_multimask_via_stability = dynamic_multimask_via_stability + self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta + self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + torch.Tensor: batched SAM token for mask output + """ + masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + repeat_image=repeat_image, + high_res_features=high_res_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + masks = masks[:, 1:, :, :] + iou_pred = iou_pred[:, 1:] + elif self.dynamic_multimask_via_stability and not self.training: + masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) + else: + masks = masks[:, 0:1, :, :] + iou_pred = iou_pred[:, 0:1] + + if multimask_output and self.use_multimask_token_for_obj_ptr: + sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape + else: + # Take the mask output token. Here we *always* use the token for single mask output. + # At test time, even if we track after 1-click (and using multimask_output=True), + # we still take the single mask token here. The rationale is that we always track + # after multiple clicks during training, so the past tokens seen during training + # are always the single mask token (and we'll let it be the object-memory token). + sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape + + # Prepare output + return masks, iou_pred, sam_tokens_out, object_score_logits + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + s = 0 + if self.pred_obj_scores: + output_tokens = torch.cat( + [ + self.obj_score_token.weight, + self.iou_token.weight, + self.mask_tokens.weight, + ], + dim=0, + ) + s = 1 + else: + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight], dim=0 + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + if repeat_image: + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + else: + assert image_embeddings.shape[0] == tokens.shape[0] + src = image_embeddings + src = src + dense_prompt_embeddings + assert ( + image_pe.size(0) == 1 + ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, s, :] + mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + if not self.use_high_res_features: + upscaled_embedding = self.output_upscaling(src) + else: + dc1, ln1, act1, dc2, act2 = self.output_upscaling + feat_s0, feat_s1 = high_res_features + upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) + upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + if self.pred_obj_scores: + assert s == 1 + object_score_logits = self.pred_obj_score_head(hs[:, 0, :]) + else: + # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1 + object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1) + + return masks, iou_pred, mask_tokens_out, object_score_logits + + def _get_stability_scores(self, mask_logits): + """ + Compute stability scores of the mask logits based on the IoU between upper and + lower thresholds, similar to https://github.com/fairinternal/onevision/pull/568. + """ + mask_logits = mask_logits.flatten(-2) + stability_delta = self.dynamic_multimask_stability_delta + area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() + area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() + stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) + return stability_scores + + def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): + """ + When outputting a single mask, if the stability score from the current single-mask + output (based on output token 0) falls below a threshold, we instead select from + multi-mask outputs (based on output token 1~3) the mask with the highest predicted + IoU score. This is intended to ensure a valid mask for both clicking and tracking. + """ + # The best mask from multimask output tokens (1~3) + multimask_logits = all_mask_logits[:, 1:, :, :] + multimask_iou_scores = all_iou_scores[:, 1:] + best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) + batch_inds = torch.arange( + multimask_iou_scores.size(0), device=all_iou_scores.device + ) + best_multimask_logits = multimask_logits[batch_inds, best_scores_inds] + best_multimask_logits = best_multimask_logits.unsqueeze(1) + best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds] + best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1) + + # The mask from singlemask output token 0 and its stability score + singlemask_logits = all_mask_logits[:, 0:1, :, :] + singlemask_iou_scores = all_iou_scores[:, 0:1] + stability_scores = self._get_stability_scores(singlemask_logits) + is_stable = stability_scores >= self.dynamic_multimask_stability_thresh + + # Dynamically fall back to best multimask output upon low stability scores. + mask_logits_out = torch.where( + is_stable[..., None, None].expand_as(singlemask_logits), + singlemask_logits, + best_multimask_logits, + ) + iou_scores_out = torch.where( + is_stable.expand_as(singlemask_iou_scores), + singlemask_iou_scores, + best_multimask_iou_scores, + ) + return mask_logits_out, iou_scores_out diff --git a/inpaint/plugins/segment_anything2/modeling/sam/prompt_encoder.py b/inpaint/plugins/segment_anything2/modeling/sam/prompt_encoder.py new file mode 100644 index 0000000..0f6d46e --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam/prompt_encoder.py @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple, Type + +import torch +from torch import nn + +from ..position_encoding import PositionEmbeddingRandom + +from ..sam2_utils import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [ + nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings) + ] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = ( + 4 * image_embedding_size[0], + 4 * image_embedding_size[1], + ) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords( + points, self.input_image_size + ) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + point_embedding[labels == 2] += self.point_embeddings[2].weight + point_embedding[labels == 3] += self.point_embeddings[3].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords( + coords, self.input_image_size + ) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings diff --git a/inpaint/plugins/segment_anything2/modeling/sam/transformer.py b/inpaint/plugins/segment_anything2/modeling/sam/transformer.py new file mode 100644 index 0000000..2dedccb --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam/transformer.py @@ -0,0 +1,327 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import warnings +from functools import partial +from typing import Tuple, Type + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from ..position_encoding import apply_rotary_enc, compute_axial_cis + +from ..sam2_utils import MLP +from ...utils.misc import get_sdpa_settings + +warnings.simplefilter(action="ignore", category=FutureWarning) +OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings() + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLP( + embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation + ) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + dropout: float = 0.0, + kv_in_dim: int = None, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert ( + self.internal_dim % num_heads == 0 + ), "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + self.dropout_p = dropout + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + with torch.backends.cuda.sdp_kernel( + enable_flash=USE_FLASH_ATTN, + # if Flash attention kernel is off, then math kernel needs to be enabled + enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + enable_mem_efficient=OLD_GPU, + ): + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out + + +class RoPEAttention(Attention): + """Attention with rotary position encoding.""" + + def __init__( + self, + *args, + rope_theta=10000.0, + # whether to repeat q rope to match k length + # this is needed for cross-attention to memories + rope_k_repeat=False, + feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.compute_cis = partial( + compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta + ) + freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) + self.freqs_cis = freqs_cis + self.rope_k_repeat = rope_k_repeat + + def forward( + self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 + ) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Apply rotary position encoding + w = h = math.sqrt(q.shape[-2]) + self.freqs_cis = self.freqs_cis.to(q.device) + if self.freqs_cis.shape[0] != q.shape[-2]: + self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) + if q.shape[-2] != k.shape[-2]: + assert self.rope_k_repeat + + num_k_rope = k.size(-2) - num_k_exclude_rope + q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, + k[:, :, :num_k_rope], + freqs_cis=self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + with torch.backends.cuda.sdp_kernel( + enable_flash=USE_FLASH_ATTN, + # if Flash attention kernel is off, then math kernel needs to be enabled + enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + enable_mem_efficient=OLD_GPU, + ): + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/inpaint/plugins/segment_anything2/modeling/sam2_base.py b/inpaint/plugins/segment_anything2/modeling/sam2_base.py new file mode 100644 index 0000000..7896060 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam2_base.py @@ -0,0 +1,832 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.distributed +import torch.nn.functional as F + +from torch.nn.init import trunc_normal_ + +from .sam.mask_decoder import MaskDecoder +from .sam.prompt_encoder import PromptEncoder +from .sam.transformer import TwoWayTransformer +from .sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames + +# a large negative value as a placeholder score for missing objects +NO_OBJ_SCORE = -1024.0 + + +class SAM2Base(torch.nn.Module): + def __init__( + self, + image_encoder, + memory_attention, + memory_encoder, + num_maskmem=7, # default 1 input frame + 6 previous frames + image_size=512, + backbone_stride=16, # stride of the image backbone output + sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob + sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob + # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks + binarize_mask_from_pts_for_mem_enc=False, + use_mask_input_as_output_without_sam=False, + # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder + # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit, + # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model + # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM. + max_cond_frames_in_attn=-1, + # on the first frame, whether to directly add the no-memory embedding to the image feature + # (instead of using the transformer encoder) + directly_add_no_mem_embed=False, + # whether to use high-resolution feature maps in the SAM mask decoder + use_high_res_features_in_sam=False, + # whether to output multiple (3) masks for the first click on initial conditioning frames + multimask_output_in_sam=False, + # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`; + # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points) + multimask_min_pt_num=1, + multimask_max_pt_num=1, + # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`) + multimask_output_for_tracking=False, + # Whether to use multimask tokens for obj ptr; Only relevant when both + # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True + use_multimask_token_for_obj_ptr: bool = False, + # whether to use sigmoid to restrict ious prediction to [0-1] + iou_prediction_use_sigmoid=False, + # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5). + # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of + # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame. + memory_temporal_stride_for_eval=1, + # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click + # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames + add_all_frames_to_correct_as_cond=False, + # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks) + non_overlap_masks_for_mem_enc=False, + # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder=False, + # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`) + max_obj_ptrs_in_encoder=16, + # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`) + add_tpos_enc_to_obj_ptrs=True, + # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference + # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + proj_tpos_enc_in_obj_ptrs=False, + # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation + # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking) + only_obj_ptrs_in_the_past_for_eval=False, + # Whether to predict if there is an object in the frame + pred_obj_scores: bool = False, + # Whether to use an MLP to predict object scores + pred_obj_scores_mlp: bool = False, + # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True; + # Whether to have a fixed no obj pointer when there is no object present + # or to use it as an additive embedding with obj_ptr produced by decoder + fixed_no_obj_ptr: bool = False, + # Soft no object, i.e. mix in no_obj_ptr softly, + # hope to make recovery easier if there is a mistake and mitigate accumulation of errors + soft_no_obj_ptr: bool = False, + use_mlp_for_obj_ptr_proj: bool = False, + # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class. + sam_mask_decoder_extra_args=None, + compile_image_encoder: bool = False, + ): + super().__init__() + + # Part 1: the image backbone + self.image_encoder = image_encoder + # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting + self.use_high_res_features_in_sam = use_high_res_features_in_sam + self.num_feature_levels = 3 if use_high_res_features_in_sam else 1 + self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + if use_obj_ptrs_in_encoder: + # A conv layer to downsample the mask prompt to stride 4 (the same stride as + # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, + # so that it can be fed into the SAM mask decoder to generate a pointer. + self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) + self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs + if proj_tpos_enc_in_obj_ptrs: + assert add_tpos_enc_to_obj_ptrs # these options need to be used together + self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs + self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval + + # Part 2: memory attention to condition current frame's visual features + # with memories (and obj ptrs) from past frames + self.memory_attention = memory_attention + self.hidden_dim = memory_attention.d_model + + # Part 3: memory encoder for the previous frame's outputs + self.memory_encoder = memory_encoder + self.mem_dim = self.hidden_dim + if hasattr(self.memory_encoder, "out_proj") and hasattr( + self.memory_encoder.out_proj, "weight" + ): + # if there is compression of memories along channel dim + self.mem_dim = self.memory_encoder.out_proj.weight.shape[0] + self.num_maskmem = num_maskmem # Number of memories accessible + # Temporal encoding of the memories + self.maskmem_tpos_enc = torch.nn.Parameter( + torch.zeros(num_maskmem, 1, 1, self.mem_dim) + ) + trunc_normal_(self.maskmem_tpos_enc, std=0.02) + # a single token to indicate no memory embedding from previous frames + self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + trunc_normal_(self.no_mem_embed, std=0.02) + trunc_normal_(self.no_mem_pos_enc, std=0.02) + self.directly_add_no_mem_embed = directly_add_no_mem_embed + # Apply sigmoid to the output raw mask logits (to turn them from + # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder + self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc + self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc + self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc + self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc + self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval + # On frames with mask input, whether to directly output the input mask without + # using a SAM prompt encoder + mask decoder + self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam + self.multimask_output_in_sam = multimask_output_in_sam + self.multimask_min_pt_num = multimask_min_pt_num + self.multimask_max_pt_num = multimask_max_pt_num + self.multimask_output_for_tracking = multimask_output_for_tracking + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid + + # Part 4: SAM-style prompt encoder (for both mask and point inputs) + # and SAM-style mask decoder for the final mask output + self.image_size = image_size + self.backbone_stride = backbone_stride + self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args + self.pred_obj_scores = pred_obj_scores + self.pred_obj_scores_mlp = pred_obj_scores_mlp + self.fixed_no_obj_ptr = fixed_no_obj_ptr + self.soft_no_obj_ptr = soft_no_obj_ptr + if self.fixed_no_obj_ptr: + assert self.pred_obj_scores + assert self.use_obj_ptrs_in_encoder + if self.pred_obj_scores and self.use_obj_ptrs_in_encoder: + self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) + trunc_normal_(self.no_obj_ptr, std=0.02) + self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj + + self._build_sam_heads() + self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond + self.max_cond_frames_in_attn = max_cond_frames_in_attn + + # Model compilation + if compile_image_encoder: + # Compile the forward function (not the full module) to allow loading checkpoints. + print( + "Image encoder compilation is enabled. First forward pass will be slow." + ) + self.image_encoder.forward = torch.compile( + self.image_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + @property + def device(self): + return next(self.parameters()).device + + def forward(self, *args, **kwargs): + raise NotImplementedError( + "Please use the corresponding methods in SAM2VideoPredictor for inference." + "See notebooks/video_predictor_example.ipynb for an example." + ) + + def _build_sam_heads(self): + """Build SAM-style prompt encoder and mask decoder.""" + self.sam_prompt_embed_dim = self.hidden_dim + self.sam_image_embedding_size = self.image_size // self.backbone_stride + + # build PromptEncoder and MaskDecoder from SAM + # (their hyperparameters like `mask_in_chans=16` are from SAM code) + self.sam_prompt_encoder = PromptEncoder( + embed_dim=self.sam_prompt_embed_dim, + image_embedding_size=( + self.sam_image_embedding_size, + self.sam_image_embedding_size, + ), + input_image_size=(self.image_size, self.image_size), + mask_in_chans=16, + ) + self.sam_mask_decoder = MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=self.sam_prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=self.sam_prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + use_high_res_features=self.use_high_res_features_in_sam, + iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid, + pred_obj_scores=self.pred_obj_scores, + pred_obj_scores_mlp=self.pred_obj_scores_mlp, + use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr, + **(self.sam_mask_decoder_extra_args or {}), + ) + if self.use_obj_ptrs_in_encoder: + # a linear projection on SAM output tokens to turn them into object pointers + self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) + if self.use_mlp_for_obj_ptr_proj: + self.obj_ptr_proj = MLP( + self.hidden_dim, self.hidden_dim, self.hidden_dim, 3 + ) + else: + self.obj_ptr_proj = torch.nn.Identity() + if self.proj_tpos_enc_in_obj_ptrs: + # a linear projection on temporal positional encoding in object pointers to + # avoid potential interference with spatial positional encoding + self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) + else: + self.obj_ptr_tpos_proj = torch.nn.Identity() + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=self.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > 0 + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + if multimask_output: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + # Only hard possible with gt + assert not self.teacher_force_obj_scores_for_mem + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + """ + Directly turn binary `mask_inputs` into a output mask logits without using SAM. + (same input and output shapes as in _forward_sam_heads above). + """ + # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). + out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 + mask_inputs_float = mask_inputs.float() + high_res_masks = mask_inputs_float * out_scale + out_bias + low_res_masks = F.interpolate( + high_res_masks, + size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + # a dummy IoU prediction of all 1's under mask input + ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() + if not self.use_obj_ptrs_in_encoder: + # all zeros as a dummy object pointer (of shape [B, C]) + obj_ptr = torch.zeros( + mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device + ) + else: + # produce an object pointer using the SAM decoder from the mask input + _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads( + backbone_features=backbone_features, + mask_inputs=self.mask_downsample(mask_inputs_float), + high_res_features=high_res_features, + ) + # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; + # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying + # on the object_scores from the SAM decoder. + is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + lambda_is_obj_appearing = is_obj_appearing.float() + object_score_logits = out_scale * lambda_is_obj_appearing + out_bias + if self.pred_obj_scores: + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_masks, + high_res_masks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def forward_image(self, img_batch: torch.Tensor): + """Get the image feature on the input batch.""" + backbone_out = self.image_encoder(img_batch) + if self.use_high_res_features_in_sam: + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + return backbone_out + + def _prepare_backbone_features(self, backbone_out): + """Prepare and flatten visual features.""" + backbone_out = backbone_out.copy() + assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"]) + assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels + + feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + + feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] + # flatten NxCxHxW to HWxNxC + vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] + vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] + + return backbone_out, vision_feats, vision_pos_embeds, feat_sizes + + def _prepare_memory_conditioned_features( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + ): + """Fuse the current frame's visual feature map with previous memory.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + device = current_vision_feats[-1].device + # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. + # In this case, we skip the fusion with any memory. + if self.num_maskmem == 0: # Disable memory and skip fusion + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + return pix_feat + + num_obj_ptr_tokens = 0 + # Step 1: condition the visual features of the current frame on previous memories + if not is_init_cond_frame: + # Retrieve the memories encoded with the maskmem backbone + to_cat_memory, to_cat_memory_pos_embed = [], [] + # Add conditioning frames's output first (all cond frames have t_pos=0 for + # when getting temporal positional embedding below) + assert len(output_dict["cond_frame_outputs"]) > 0 + # Select a maximum number of temporally closest cond frames for cross attention + cond_outputs = output_dict["cond_frame_outputs"] + selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( + frame_idx, cond_outputs, self.max_cond_frames_in_attn + ) + t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] + # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory + # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1 + # We also allow taking the memory frame non-consecutively (with r>1), in which case + # we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame. + r = self.memory_temporal_stride_for_eval + for t_pos in range(1, self.num_maskmem): + t_rel = self.num_maskmem - t_pos # how many frames before current frame + if t_rel == 1: + # for t_rel == 1, we take the last frame (regardless of r) + if not track_in_reverse: + # the frame immediately before this frame (i.e. frame_idx - 1) + prev_frame_idx = frame_idx - t_rel + else: + # the frame immediately after this frame (i.e. frame_idx + 1) + prev_frame_idx = frame_idx + t_rel + else: + # for t_rel >= 2, we take the memory frame from every r-th frames + if not track_in_reverse: + # first find the nearest frame among every r-th frames before this frame + # for r=1, this would be (frame_idx - 2) + prev_frame_idx = ((frame_idx - 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx - (t_rel - 2) * r + else: + # first find the nearest frame among every r-th frames after this frame + # for r=1, this would be (frame_idx + 2) + prev_frame_idx = -(-(frame_idx + 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx + (t_rel - 2) * r + out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None) + if out is None: + # If an unselected conditioning frame is among the last (self.num_maskmem - 1) + # frames, we still attend to it as if it's a non-conditioning frame. + out = unselected_cond_outputs.get(prev_frame_idx, None) + t_pos_and_prevs.append((t_pos, out)) + + for t_pos, prev in t_pos_and_prevs: + if prev is None: + continue # skip padding frames + # "maskmem_features" might have been offloaded to CPU in demo use cases, + # so we load it back to GPU (it's a no-op if it's already on GPU). + feats = prev["maskmem_features"].cuda(non_blocking=True) + to_cat_memory.append(feats.flatten(2).permute(2, 0, 1)) + # Spatial positional encoding (it might have been offloaded to CPU in eval) + maskmem_enc = prev["maskmem_pos_enc"][-1].cuda() + maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) + # Temporal positional encoding + maskmem_enc = ( + maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1] + ) + to_cat_memory_pos_embed.append(maskmem_enc) + + # Construct the list of past object pointers + if self.use_obj_ptrs_in_encoder: + max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) + # First add those object pointers from selected conditioning frames + # (optionally, only include object pointers in the past during evaluation) + if not self.training and self.only_obj_ptrs_in_the_past_for_eval: + ptr_cond_outputs = { + t: out + for t, out in selected_cond_outputs.items() + if (t >= frame_idx if track_in_reverse else t <= frame_idx) + } + else: + ptr_cond_outputs = selected_cond_outputs + pos_and_ptrs = [ + # Temporal pos encoding contains how far away each pointer is from current frame + (abs(frame_idx - t), out["obj_ptr"]) + for t, out in ptr_cond_outputs.items() + ] + # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame + for t_diff in range(1, max_obj_ptrs_in_encoder): + t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff + if t < 0 or (num_frames is not None and t >= num_frames): + break + out = output_dict["non_cond_frame_outputs"].get( + t, unselected_cond_outputs.get(t, None) + ) + if out is not None: + pos_and_ptrs.append((t_diff, out["obj_ptr"])) + # If we have at least one object pointer, add them to the across attention + if len(pos_and_ptrs) > 0: + pos_list, ptrs_list = zip(*pos_and_ptrs) + # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape + obj_ptrs = torch.stack(ptrs_list, dim=0) + # a temporal positional embedding based on how far each object pointer is from + # the current frame (sine embedding normalized by the max pointer num). + if self.add_tpos_enc_to_obj_ptrs: + t_diff_max = max_obj_ptrs_in_encoder - 1 + tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim + obj_pos = torch.tensor(pos_list, device=device) + obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim) + obj_pos = self.obj_ptr_tpos_proj(obj_pos) + obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim) + else: + obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim) + if self.mem_dim < C: + # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C + obj_ptrs = obj_ptrs.reshape( + -1, B, C // self.mem_dim, self.mem_dim + ) + obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) + obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) + to_cat_memory.append(obj_ptrs) + to_cat_memory_pos_embed.append(obj_pos) + num_obj_ptr_tokens = obj_ptrs.shape[0] + else: + num_obj_ptr_tokens = 0 + else: + # for initial conditioning frames, encode them without using any previous memory + if self.directly_add_no_mem_embed: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + # Use a dummy token on the first frame (to avoid emtpy memory input to tranformer encoder) + to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)] + to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] + + # Step 2: Concatenate the memories and forward through the transformer encoder + memory = torch.cat(to_cat_memory, dim=0) + memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + + pix_feat_with_mem = self.memory_attention( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + memory=memory, + memory_pos=memory_pos_embed, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + def _encode_new_memory( + self, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + is_mask_from_pts, + ): + """Encode the current image and its prediction into a memory feature.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts + if binarize and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + maskmem_out = self.memory_encoder( + pix_feat, + mask_for_mem, + skip_mask_sigmoid=True, # sigmoid already applied + ) + maskmem_features = maskmem_out["vision_features"] + maskmem_pos_enc = maskmem_out["vision_pos_enc"] + + return maskmem_features, maskmem_pos_enc + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None and self.use_mask_input_as_output_without_sam: + # When use_mask_input_as_output_without_sam=True, we directly output the mask input + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat_with_mem = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat_with_mem, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + ) + ( + _, + _, + _, + low_res_masks, + high_res_masks, + obj_ptr, + _, + ) = sam_outputs + + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + is_mask_from_pts=(point_inputs is not None), + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + return current_out + + def _use_multimask(self, is_init_cond_frame, point_inputs): + """Whether to use multimask output in the SAM head.""" + num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1) + multimask_output = ( + self.multimask_output_in_sam + and (is_init_cond_frame or self.multimask_output_for_tracking) + and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) + ) + return multimask_output + + def _apply_non_overlapping_constraints(self, pred_masks): + """ + Apply non-overlapping constraints to the object scores in pred_masks. Here we + keep only the highest scoring object at each spatial location in pred_masks. + """ + batch_size = pred_masks.size(0) + if batch_size == 1: + return pred_masks + + device = pred_masks.device + # "max_obj_inds": object index of the object with the highest score at each location + max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) + # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks` + batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] + keep = max_obj_inds == batch_obj_inds + # suppress overlapping regions' scores below -10.0 so that the foreground regions + # don't overlap (here sigmoid(-10.0)=4.5398e-05) + pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) + return pred_masks diff --git a/inpaint/plugins/segment_anything2/modeling/sam2_utils.py b/inpaint/plugins/segment_anything2/modeling/sam2_utils.py new file mode 100644 index 0000000..6d97059 --- /dev/null +++ b/inpaint/plugins/segment_anything2/modeling/sam2_utils.py @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num): + """ + Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs` + that are temporally closest to the current frame at `frame_idx`. Here, we take + - a) the closest conditioning frame before `frame_idx` (if any); + - b) the closest conditioning frame after `frame_idx` (if any); + - c) any other temporally closest conditioning frames until reaching a total + of `max_cond_frame_num` conditioning frames. + + Outputs: + - selected_outputs: selected items (keys & values) from `cond_frame_outputs`. + - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`. + """ + if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num: + selected_outputs = cond_frame_outputs + unselected_outputs = {} + else: + assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames" + selected_outputs = {} + + # the closest conditioning frame before `frame_idx` (if any) + idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None) + if idx_before is not None: + selected_outputs[idx_before] = cond_frame_outputs[idx_before] + + # the closest conditioning frame after `frame_idx` (if any) + idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None) + if idx_after is not None: + selected_outputs[idx_after] = cond_frame_outputs[idx_after] + + # add other temporally closest conditioning frames until reaching a total + # of `max_cond_frame_num` conditioning frames. + num_remain = max_cond_frame_num - len(selected_outputs) + inds_remain = sorted( + (t for t in cond_frame_outputs if t not in selected_outputs), + key=lambda x: abs(x - frame_idx), + )[:num_remain] + selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain) + unselected_outputs = { + t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs + } + + return selected_outputs, unselected_outputs + + +def get_1d_sine_pe(pos_inds, dim, temperature=10000): + """ + Get 1D sine positional embedding as in the original Transformer paper. + """ + pe_dim = dim // 2 + dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) + dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) + + pos_embed = pos_inds.unsqueeze(-1) / dim_t + pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) + return pos_embed + + +def get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +class DropPath(nn.Module): + # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py + def __init__(self, drop_prob=0.0, scale_by_keep=True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + if self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and self.scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + activation: nn.Module = nn.ReLU, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + self.act = activation() + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/inpaint/plugins/segment_anything2/sam2_image_predictor.py b/inpaint/plugins/segment_anything2/sam2_image_predictor.py new file mode 100644 index 0000000..99ac570 --- /dev/null +++ b/inpaint/plugins/segment_anything2/sam2_image_predictor.py @@ -0,0 +1,445 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL.Image import Image + +from .modeling.sam2_base import SAM2Base + +from .utils.transforms import SAM2Transforms + + +class SAM2ImagePredictor: + def __init__( + self, + sam_model: SAM2Base, + mask_threshold=0.0, + max_hole_area=0.0, + max_sprinkle_area=0.0, + ) -> None: + """ + Uses SAM-2 to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam-2): The model to use for mask prediction. + mask_threshold (float): The threshold to use when converting mask logits + to binary masks. Masks are thresholded at 0 by default. + fill_hole_area (int): If fill_hole_area > 0, we fill small holes in up to + the maximum area of fill_hole_area in low_res_masks. + """ + super().__init__() + self.model = sam_model + self._transforms = SAM2Transforms( + resolution=self.model.image_size, + mask_threshold=mask_threshold, + max_hole_area=max_hole_area, + max_sprinkle_area=max_sprinkle_area, + ) + + # Predictor state + self._is_image_set = False + self._features = None + self._orig_hw = None + # Whether the predictor is set for single image or a batch of images + self._is_batch = False + + # Predictor config + self.mask_threshold = mask_threshold + + # Spatial dim for backbone feature maps + self._bb_feat_sizes = [ + (256, 256), + (128, 128), + (64, 64), + ] + + @torch.no_grad() + def set_image( + self, + image: Union[np.ndarray, Image], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image + with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + self.reset_predictor() + # Transform the image to the form expected by the model + if isinstance(image, np.ndarray): + logging.info("For numpy array image, we assume (HxWxC) format") + self._orig_hw = [image.shape[:2]] + elif isinstance(image, Image): + w, h = image.size + self._orig_hw = [(h, w)] + else: + raise NotImplementedError("Image format not supported") + + input_image = self._transforms(image) + input_image = input_image[None, ...].to(self.device) + + assert ( + len(input_image.shape) == 4 and input_image.shape[1] == 3 + ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" + logging.info("Computing image embeddings for the provided image...") + backbone_out = self.model.forward_image(input_image) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + logging.info("Image embeddings computed.") + + @torch.no_grad() + def set_image_batch( + self, + image_list: List[Union[np.ndarray]], + ) -> None: + """ + Calculates the image embeddings for the provided image batch, allowing + masks to be predicted with the 'predict_batch' method. + + Arguments: + image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray + with pixel values in [0, 255]. + """ + self.reset_predictor() + assert isinstance(image_list, list) + self._orig_hw = [] + for image in image_list: + assert isinstance( + image, np.ndarray + ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC" + self._orig_hw.append(image.shape[:2]) + # Transform the image to the form expected by the model + img_batch = self._transforms.forward_batch(image_list) + img_batch = img_batch.to(self.device) + batch_size = img_batch.shape[0] + assert ( + len(img_batch.shape) == 4 and img_batch.shape[1] == 3 + ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}" + logging.info("Computing image embeddings for the provided images...") + backbone_out = self.model.forward_image(img_batch) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + self._is_batch = True + logging.info("Image embeddings computed.") + + def predict_batch( + self, + point_coords_batch: List[np.ndarray] = None, + point_labels_batch: List[np.ndarray] = None, + box_batch: List[np.ndarray] = None, + mask_input_batch: List[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images. + It returns a tupele of lists of masks, ious, and low_res_masks_logits. + """ + assert self._is_batch, "This function should only be used when in batched mode" + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image_batch(...) before mask prediction." + ) + num_images = len(self._features["image_embed"]) + all_masks = [] + all_ious = [] + all_low_res_masks = [] + for img_idx in range(num_images): + # Transform input prompts + point_coords = ( + point_coords_batch[img_idx] if point_coords_batch is not None else None + ) + point_labels = ( + point_labels_batch[img_idx] if point_labels_batch is not None else None + ) + box = box_batch[img_idx] if box_batch is not None else None + mask_input = ( + mask_input_batch[img_idx] if mask_input_batch is not None else None + ) + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, + point_labels, + box, + mask_input, + normalize_coords, + img_idx=img_idx, + ) + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + img_idx=img_idx, + ) + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = ( + iou_predictions.squeeze(0).float().detach().cpu().numpy() + ) + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + all_masks.append(masks_np) + all_ious.append(iou_predictions_np) + all_low_res_masks.append(low_res_masks_np) + + return all_masks, all_ious, all_low_res_masks + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, point_labels, box, mask_input, normalize_coords + ) + + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + ) + + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + def _prep_prompts( + self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1 + ): + unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + unnorm_coords = self._transforms.transform_coords( + point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) + labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + if len(unnorm_coords.shape) == 2: + unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...] + if box is not None: + box = torch.as_tensor(box, dtype=torch.float, device=self.device) + unnorm_box = self._transforms.transform_boxes( + box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) # Bx2x2 + if mask_logits is not None: + mask_input = torch.as_tensor( + mask_logits, dtype=torch.float, device=self.device + ) + if len(mask_input.shape) == 3: + mask_input = mask_input[None, :, :, :] + return mask_input, unnorm_coords, labels, unnorm_box + + @torch.no_grad() + def _predict( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + img_idx: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using SAM2Transforms. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + concat_points = (point_coords, point_labels) + else: + concat_points = None + + # Embed prompts + if boxes is not None: + box_coords = boxes.reshape(-1, 2, 2) + box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device) + box_labels = box_labels.repeat(boxes.size(0), 1) + # we merge "boxes" and "points" into a single "concat_points" input (where + # boxes are added at the beginning) to sam_prompt_encoder + if concat_points is not None: + concat_coords = torch.cat([box_coords, concat_points[0]], dim=1) + concat_labels = torch.cat([box_labels, concat_points[1]], dim=1) + concat_points = (concat_coords, concat_labels) + else: + concat_points = (box_coords, box_labels) + + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + points=concat_points, + boxes=None, + masks=mask_input, + ) + + # Predict masks + batched_mode = ( + concat_points is not None and concat_points[0].shape[0] > 1 + ) # multi object prediction + high_res_features = [ + feat_level[img_idx].unsqueeze(0) + for feat_level in self._features["high_res_feats"] + ] + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), + image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=batched_mode, + high_res_features=high_res_features, + ) + + # Upscale the masks to the original image resolution + masks = self._transforms.postprocess_masks( + low_res_masks, self._orig_hw[img_idx] + ) + low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0) + if not return_logits: + masks = masks > self.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self._features is not None + ), "Features must exist if an image has been set." + return self._features["image_embed"] + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_predictor(self) -> None: + """ + Resets the image embeddings and other state variables. + """ + self._is_image_set = False + self._features = None + self._orig_hw = None + self._is_batch = False diff --git a/inpaint/plugins/segment_anything2/utils/__init__.py b/inpaint/plugins/segment_anything2/utils/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/inpaint/plugins/segment_anything2/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/inpaint/plugins/segment_anything2/utils/misc.py b/inpaint/plugins/segment_anything2/utils/misc.py new file mode 100644 index 0000000..296ecc1 --- /dev/null +++ b/inpaint/plugins/segment_anything2/utils/misc.py @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + +import numpy as np +import torch +from PIL import Image + + +def get_sdpa_settings(): + if torch.cuda.is_available(): + old_gpu = torch.cuda.get_device_properties(0).major < 7 + # only use Flash Attention on Ampere (8.0) or newer GPUs + use_flash_attn = torch.cuda.get_device_properties(0).major >= 8 + if not use_flash_attn: + warnings.warn( + "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.", + category=UserWarning, + stacklevel=2, + ) + # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only + # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases) + pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2]) + if pytorch_version < (2, 2): + warnings.warn( + f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. " + "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).", + category=UserWarning, + stacklevel=2, + ) + math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn + else: + old_gpu = True + use_flash_attn = False + math_kernel_on = True + + return old_gpu, use_flash_attn, math_kernel_on + + +def mask_to_box(masks: torch.Tensor): + """ + compute bounding box given an input mask + + Inputs: + - masks: [B, 1, H, W] boxes, dtype=torch.Tensor + + Returns: + - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor + """ + B, _, h, w = masks.shape + device = masks.device + xs = torch.arange(w, device=device, dtype=torch.int32) + ys = torch.arange(h, device=device, dtype=torch.int32) + grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy") + grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w) + grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w) + min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1) + max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1) + min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1) + max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1) + bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1) + + return bbox_coords + + +def _load_img_as_tensor(img_path, image_size): + img_pil = Image.open(img_path) + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images + img_np = img_np / 255.0 + else: + raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}") + img = torch.from_numpy(img_np).permute(2, 0, 1) + video_width, video_height = img_pil.size # the original video size + return img, video_height, video_width + + +def concat_points(old_point_inputs, new_points, new_labels): + """Add new points and labels to previous point inputs (add at the end).""" + if old_point_inputs is None: + points, labels = new_points, new_labels + else: + points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1) + labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1) + + return {"point_coords": points, "point_labels": labels} diff --git a/inpaint/plugins/segment_anything2/utils/transforms.py b/inpaint/plugins/segment_anything2/utils/transforms.py new file mode 100644 index 0000000..fe552e0 --- /dev/null +++ b/inpaint/plugins/segment_anything2/utils/transforms.py @@ -0,0 +1,77 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torchvision.transforms import Normalize, Resize, ToTensor + + +class SAM2Transforms(nn.Module): + def __init__( + self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0 + ): + """ + Transforms for SAM2. + """ + super().__init__() + self.resolution = resolution + self.mask_threshold = mask_threshold + self.max_hole_area = max_hole_area + self.max_sprinkle_area = max_sprinkle_area + self.mean = [0.485, 0.456, 0.406] + self.std = [0.229, 0.224, 0.225] + self.to_tensor = ToTensor() + self.transforms = torch.jit.script( + nn.Sequential( + Resize((self.resolution, self.resolution)), + Normalize(self.mean, self.std), + ) + ) + + def __call__(self, x): + x = self.to_tensor(x) + return self.transforms(x) + + def forward_batch(self, img_list): + img_batch = [self.transforms(self.to_tensor(img)) for img in img_list] + img_batch = torch.stack(img_batch, dim=0) + return img_batch + + def transform_coords( + self, coords: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates, + If the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + + Returns + Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model. + """ + if normalize: + assert orig_hw is not None + h, w = orig_hw + coords = coords.clone() + coords[..., 0] = coords[..., 0] / w + coords[..., 1] = coords[..., 1] / h + + coords = coords * self.resolution # unnormalize coords + return coords + + def transform_boxes( + self, boxes: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates, + if the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + """ + boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw) + return boxes + + def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor: + """ + Perform PostProcessing on output masks. + """ + return masks diff --git a/inpaint/runtime.py b/inpaint/runtime.py new file mode 100644 index 0000000..e109528 --- /dev/null +++ b/inpaint/runtime.py @@ -0,0 +1,86 @@ +# https://github.com/huggingface/huggingface_hub/blob/5a12851f54bf614be39614034ed3a9031922d297/src/huggingface_hub/utils/_runtime.py +import os +import platform +import sys +from pathlib import Path + +import packaging.version +from inpaint.schema import Device +from loguru import logger +from rich import print +from typing import Dict, Any + + +_PY_VERSION: str = sys.version.split()[0].rstrip("+") + +if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"): + import importlib_metadata # type: ignore +else: + import importlib.metadata as importlib_metadata # type: ignore + +_package_versions = {} + +_CANDIDATES = [ + "torch", + "torchvision", + "Pillow", + "diffusers", + "transformers", + "opencv-python", + "accelerate", + "iopaint", + "rembg", +] +# Check once at runtime +for name in _CANDIDATES: + _package_versions[name] = "N/A" + try: + _package_versions[name] = importlib_metadata.version(name) + except importlib_metadata.PackageNotFoundError: + pass + + +def dump_environment_info() -> Dict[str, str]: + """Dump information about the machine to help debugging issues.""" + + # Generic machine info + info: Dict[str, Any] = { + "Platform": platform.platform(), + "Python version": platform.python_version(), + } + info.update(_package_versions) + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n") + return info + + +def check_device(device: Device) -> Device: + if device == Device.cuda: + import platform + + if platform.system() == "Darwin": + logger.warning("MacOS does not support cuda, use cpu instead") + return Device.cpu + else: + import torch + + if not torch.cuda.is_available(): + logger.warning("CUDA is not available, use cpu instead") + return Device.cpu + elif device == Device.mps: + import torch + + if not torch.backends.mps.is_available(): + logger.warning("mps is not available, use cpu instead") + return Device.cpu + return device + + +def setup_model_dir(model_dir: Path): + model_dir = model_dir.expanduser().absolute() + logger.info(f"Model directory: {model_dir}") + os.environ["U2NET_HOME"] = str(model_dir) + os.environ["XDG_CACHE_HOME"] = str(model_dir) + if not model_dir.exists(): + logger.info(f"Create model directory: {model_dir}") + model_dir.mkdir(exist_ok=True, parents=True) + return model_dir diff --git a/inpaint/schema.py b/inpaint/schema.py new file mode 100644 index 0000000..659e341 --- /dev/null +++ b/inpaint/schema.py @@ -0,0 +1,491 @@ +import random +from enum import Enum +from pathlib import Path +from typing import Optional, Literal, List + +from loguru import logger + +from inpaint.const import ( + INSTRUCT_PIX2PIX_NAME, + KANDINSKY22_NAME, + POWERPAINT_NAME, + ANYTEXT_NAME, + SDXL_CONTROLNET_CHOICES, + SD2_CONTROLNET_CHOICES, + SD_CONTROLNET_CHOICES, + SD_BRUSHNET_CHOICES, +) +from pydantic import BaseModel, Field, computed_field, model_validator + + +class ModelType(str, Enum): + INPAINT = "inpaint" # LaMa, MAT... + DIFFUSERS_SD = "diffusers_sd" + DIFFUSERS_SD_INPAINT = "diffusers_sd_inpaint" + DIFFUSERS_SDXL = "diffusers_sdxl" + DIFFUSERS_SDXL_INPAINT = "diffusers_sdxl_inpaint" + DIFFUSERS_OTHER = "diffusers_other" + + +class ModelInfo(BaseModel): + name: str + path: str + model_type: ModelType + is_single_file_diffusers: bool = False + + @computed_field + @property + def need_prompt(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [ + INSTRUCT_PIX2PIX_NAME, + KANDINSKY22_NAME, + POWERPAINT_NAME, + ANYTEXT_NAME, + ] + + @computed_field + @property + def controlnets(self) -> List[str]: + if self.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + return SDXL_CONTROLNET_CHOICES + if self.model_type in [ModelType.DIFFUSERS_SD, ModelType.DIFFUSERS_SD_INPAINT]: + if "sd2" in self.name.lower(): + return SD2_CONTROLNET_CHOICES + else: + return SD_CONTROLNET_CHOICES + if self.name == POWERPAINT_NAME: + return SD_CONTROLNET_CHOICES + return [] + + @computed_field + @property + def brushnets(self) -> List[str]: + if self.model_type in [ModelType.DIFFUSERS_SD]: + return SD_BRUSHNET_CHOICES + return [] + + @computed_field + @property + def support_strength(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [POWERPAINT_NAME, ANYTEXT_NAME] + + @computed_field + @property + def support_outpainting(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [KANDINSKY22_NAME, POWERPAINT_NAME] + + @computed_field + @property + def support_lcm_lora(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] + + @computed_field + @property + def support_controlnet(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] + + @computed_field + @property + def support_brushnet(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ] + + @computed_field + @property + def support_powerpaint_v2(self) -> bool: + return ( + self.model_type + in [ + ModelType.DIFFUSERS_SD, + ] + and self.name != POWERPAINT_NAME + ) + + +class Choices(str, Enum): + @classmethod + def values(cls): + return [member.value for member in cls] + + +class RealESRGANModel(Choices): + realesr_general_x4v3 = "realesr-general-x4v3" + RealESRGAN_x4plus = "RealESRGAN_x4plus" + RealESRGAN_x4plus_anime_6B = "RealESRGAN_x4plus_anime_6B" + + +class RemoveBGModel(Choices): + u2net = "u2net" + u2netp = "u2netp" + u2net_human_seg = "u2net_human_seg" + u2net_cloth_seg = "u2net_cloth_seg" + silueta = "silueta" + isnet_general_use = "isnet-general-use" + briaai_rmbg_1_4 = "briaai/RMBG-1.4" + + +class Device(Choices): + cpu = "cpu" + cuda = "cuda" + mps = "mps" + + +class InteractiveSegModel(Choices): + vit_b = "vit_b" + vit_l = "vit_l" + vit_h = "vit_h" + sam_hq_vit_b = "sam_hq_vit_b" + sam_hq_vit_l = "sam_hq_vit_l" + sam_hq_vit_h = "sam_hq_vit_h" + mobile_sam = "mobile_sam" + sam2_tiny = "sam2_tiny" + sam2_small = "sam2_small" + sam2_base = "sam2_base" + sam2_large = "sam2_large" + + +class PluginInfo(BaseModel): + name: str + support_gen_image: bool = False + support_gen_mask: bool = False + + +class CV2Flag(str, Enum): + INPAINT_NS = "INPAINT_NS" + INPAINT_TELEA = "INPAINT_TELEA" + + +class HDStrategy(str, Enum): + # Use original image size + ORIGINAL = "Original" + # Resize the longer side of the image to a specific size(hd_strategy_resize_limit), + # then do inpainting on the resized image. Finally, resize the inpainting result to the original size. + # The area outside the mask will not lose quality. + RESIZE = "Resize" + # Crop masking area(with a margin controlled by hd_strategy_crop_margin) from the original image to do inpainting + CROP = "Crop" + + +class LDMSampler(str, Enum): + ddim = "ddim" + plms = "plms" + + +class SDSampler(str, Enum): + dpm_plus_plus_2m = "DPM++ 2M" + dpm_plus_plus_2m_karras = "DPM++ 2M Karras" + dpm_plus_plus_2m_sde = "DPM++ 2M SDE" + dpm_plus_plus_2m_sde_karras = "DPM++ 2M SDE Karras" + dpm_plus_plus_sde = "DPM++ SDE" + dpm_plus_plus_sde_karras = "DPM++ SDE Karras" + dpm2 = "DPM2" + dpm2_karras = "DPM2 Karras" + dpm2_a = "DPM2 a" + dpm2_a_karras = "DPM2 a Karras" + euler = "Euler" + euler_a = "Euler a" + heun = "Heun" + lms = "LMS" + lms_karras = "LMS Karras" + + ddim = "DDIM" + pndm = "PNDM" + uni_pc = "UniPC" + lcm = "LCM" + + +class PowerPaintTask(Choices): + text_guided = "text-guided" + context_aware = "context-aware" + shape_guided = "shape-guided" + object_remove = "object-remove" + outpainting = "outpainting" + + +class ApiConfig(BaseModel): + host: str + port: int + inbrowser: bool + model: str + no_half: bool + low_mem: bool + cpu_offload: bool + disable_nsfw_checker: bool + local_files_only: bool + cpu_textencoder: bool + device: Device + input: Optional[Path] + mask_dir: Optional[Path] + output_dir: Optional[Path] + quality: int + enable_interactive_seg: bool + interactive_seg_model: InteractiveSegModel + interactive_seg_device: Device + enable_remove_bg: bool + remove_bg_model: str + enable_anime_seg: bool + enable_realesrgan: bool + realesrgan_device: Device + realesrgan_model: RealESRGANModel + enable_gfpgan: bool + gfpgan_device: Device + enable_restoreformer: bool + restoreformer_device: Device + + +class InpaintRequest(BaseModel): + image: Optional[str] = Field(None, description="base64 encoded image") + mask: Optional[str] = Field(None, description="base64 encoded mask") + + ldm_steps: int = Field(20, description="Steps for ldm model.") + ldm_sampler: str = Field(LDMSampler.plms, discription="Sampler for ldm model.") + zits_wireframe: bool = Field(True, description="Enable wireframe for zits model.") + + hd_strategy: str = Field( + HDStrategy.CROP, + description="Different way to preprocess image, only used by erase models(e.g. lama/mat)", + ) + hd_strategy_crop_trigger_size: int = Field( + 800, + description="Crop trigger size for hd_strategy=CROP, if the longer side of the image is larger than this value, use crop strategy", + ) + hd_strategy_crop_margin: int = Field( + 128, description="Crop margin for hd_strategy=CROP" + ) + hd_strategy_resize_limit: int = Field( + 1280, description="Resize limit for hd_strategy=RESIZE" + ) + + prompt: str = Field("", description="Prompt for diffusion models.") + negative_prompt: str = Field( + "", description="Negative prompt for diffusion models." + ) + use_croper: bool = Field( + False, description="Crop image before doing diffusion inpainting" + ) + croper_x: int = Field(0, description="Crop x for croper") + croper_y: int = Field(0, description="Crop y for croper") + croper_height: int = Field(512, description="Crop height for croper") + croper_width: int = Field(512, description="Crop width for croper") + + use_extender: bool = Field( + False, description="Extend image before doing sd outpainting" + ) + extender_x: int = Field(0, description="Extend x for extender") + extender_y: int = Field(0, description="Extend y for extender") + extender_height: int = Field(640, description="Extend height for extender") + extender_width: int = Field(640, description="Extend width for extender") + + sd_scale: float = Field( + 1.0, + description="Resize the image before doing sd inpainting, the area outside the mask will not lose quality.", + gt=0.0, + le=1.0, + ) + sd_mask_blur: int = Field( + 11, + description="Blur the edge of mask area. The higher the number the smoother blend with the original image", + ) + sd_strength: float = Field( + 1.0, + description="Strength is a measure of how much noise is added to the base image, which influences how similar the output is to the base image. Higher value means more noise and more different from the base image", + le=1.0, + ) + sd_steps: int = Field( + 50, + description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.", + ) + sd_guidance_scale: float = Field( + 7.5, + help="Higher guidance scale encourages to generate images that are closely linked to the text prompt, usually at the expense of lower image quality.", + ) + sd_sampler: str = Field( + SDSampler.uni_pc, description="Sampler for diffusion model." + ) + sd_seed: int = Field( + 42, + description="Seed for diffusion model. -1 mean random seed", + validate_default=True, + ) + sd_match_histograms: bool = Field( + False, + description="Match histograms between inpainting area and original image.", + ) + + sd_outpainting_softness: float = Field(20.0) + sd_outpainting_space: float = Field(20.0) + + sd_lcm_lora: bool = Field( + False, + description="Enable lcm-lora mode. https://huggingface.co/docs/diffusers/main/en/using-diffusers/inference_with_lcm#texttoimage", + ) + + sd_keep_unmasked_area: bool = Field( + True, description="Keep unmasked area unchanged" + ) + + cv2_flag: CV2Flag = Field( + CV2Flag.INPAINT_NS, + description="Flag for opencv inpainting: https://docs.opencv.org/4.6.0/d7/d8b/group__photo__inpaint.html#gga8002a65f5a3328fbf15df81b842d3c3ca05e763003a805e6c11c673a9f4ba7d07", + ) + cv2_radius: int = Field( + 4, + description="Radius of a circular neighborhood of each point inpainted that is considered by the algorithm", + ) + + # Paint by Example + paint_by_example_example_image: Optional[str] = Field( + None, description="Base64 encoded example image for paint by example model" + ) + + # InstructPix2Pix + p2p_image_guidance_scale: float = Field(1.5, description="Image guidance scale") + + # ControlNet + enable_controlnet: bool = Field(False, description="Enable controlnet") + controlnet_conditioning_scale: float = Field( + 0.4, description="Conditioning scale", ge=0.0, le=1.0 + ) + controlnet_method: str = Field( + "lllyasviel/control_v11p_sd15_canny", description="Controlnet method" + ) + + # BrushNet + enable_brushnet: bool = Field(False, description="Enable brushnet") + brushnet_method: str = Field(SD_BRUSHNET_CHOICES[0], description="Brushnet method") + brushnet_conditioning_scale: float = Field( + 1.0, description="brushnet conditioning scale", ge=0.0, le=1.0 + ) + + # PowerPaint + enable_powerpaint_v2: bool = Field(False, description="Enable PowerPaint v2") + powerpaint_task: PowerPaintTask = Field( + PowerPaintTask.text_guided, description="PowerPaint task" + ) + fitting_degree: float = Field( + 1.0, + description="Control the fitting degree of the generated objects to the mask shape.", + gt=0.0, + le=1.0, + ) + + @model_validator(mode="after") + def validate_field(cls, values: "InpaintRequest"): + if values.sd_seed == -1: + values.sd_seed = random.randint(1, 99999999) + logger.info(f"Generate random seed: {values.sd_seed}") + + if values.use_extender and values.enable_controlnet: + logger.info("Extender is enabled, set controlnet_conditioning_scale=0") + values.controlnet_conditioning_scale = 0 + + if values.use_extender: + logger.info("Extender is enabled, set sd_strength=1") + values.sd_strength = 1.0 + + if values.enable_brushnet: + logger.info("BrushNet is enabled, set enable_controlnet=False") + if values.enable_controlnet: + values.enable_controlnet = False + if values.sd_lcm_lora: + logger.info("BrushNet is enabled, set sd_lcm_lora=False") + values.sd_lcm_lora = False + + if values.enable_controlnet: + logger.info("ControlNet is enabled, set enable_brushnet=False") + if values.enable_brushnet: + values.enable_brushnet = False + + return values + + +class RunPluginRequest(BaseModel): + name: str + image: str = Field(..., description="base64 encoded image") + clicks: List[List[int]] = Field( + [], description="Clicks for interactive seg, [[x,y,0/1], [x2,y2,0/1]]" + ) + scale: float = Field(2.0, description="Scale for upscaling") + + +MediaTab = Literal["input", "output", "mask"] + + +class MediasResponse(BaseModel): + name: str + height: int + width: int + ctime: float + mtime: float + + +class GenInfoResponse(BaseModel): + prompt: str = "" + negative_prompt: str = "" + + +class ServerConfigResponse(BaseModel): + plugins: List[PluginInfo] + modelInfos: List[ModelInfo] + removeBGModel: RemoveBGModel + removeBGModels: List[RemoveBGModel] + realesrganModel: RealESRGANModel + realesrganModels: List[RealESRGANModel] + interactiveSegModel: InteractiveSegModel + interactiveSegModels: List[InteractiveSegModel] + enableFileManager: bool + enableAutoSaving: bool + enableControlnet: bool + controlnetMethod: Optional[str] + disableModelSwitch: bool + isDesktop: bool + samplers: List[str] + + +class SwitchModelRequest(BaseModel): + name: str + + +class SwitchPluginModelRequest(BaseModel): + plugin_name: str + model_name: str + + +AdjustMaskOperate = Literal["expand", "shrink", "reverse"] + + +class AdjustMaskRequest(BaseModel): + mask: str = Field( + ..., description="base64 encoded mask. 255 means area to do inpaint" + ) + operate: AdjustMaskOperate = Field(..., description="expand/shrink/reverse") + kernel_size: int = Field(5, description="Kernel size for expanding mask") diff --git a/inpaint/tests/.gitignore b/inpaint/tests/.gitignore new file mode 100644 index 0000000..89b7717 --- /dev/null +++ b/inpaint/tests/.gitignore @@ -0,0 +1,2 @@ +*_result.png +result/ \ No newline at end of file diff --git a/inpaint/tests/__init__.py b/inpaint/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/inpaint/tests/anime_test.png b/inpaint/tests/anime_test.png new file mode 100644 index 0000000000000000000000000000000000000000..6b86838593077bef0e819d63955f47ed089941b5 GIT binary patch literal 491736 zcmZU(1ymi)vNpUo?(QDk*|@uI+}+(RxVyUtcX!tS0YcE=uE8xhBTeC4RbnlaN*}dYsx~+|)NO~BH=x=v@)c>l` zAecvIT}rBM60gIx1CKUQ!?mYR`Ma4G>l(#s^MGQydcvwL|HM4*PfVxU1ij5KY196- zoipTIcFooXB!RvxRyDQKO?5%M5j>1Ls#3IJkOm+%>yH(+eopi!)ChNN?T`dU`|O0I zA!9Oe0TfJvh#i{4=McDSz4xysn-iJA$=ro)>W>QsJLa*bTT6XgkcDQLJYUUY%3k!B z)UI)G#6>elXvsGCr|pDy)?uotM!i}174MTCwt+{7G$Gy=Aa`oN_m7;%SZISS6%+vU zA3PiY3P=Ee{@{QgRR~D3?dgoAR{9aa5cB&QO5m6w;71;oa}#>Vtv!SvbN(apq*$?-GAzn%P#9|?=kX0A5Q zZZ=MiWdHazF?Dix6C@}9C(!?ve?O;%m(Bl0a{Th}I1Ye|>*I1^$up zDcg8i*y~8xIDF*nBZd$ch(q9C_WwW4|3&;Sr1t+IIoR0#8~Ill0I-Cpx)<=gZ#mO^)ctqYe9rhydrb}U8U(Uj5a^EV zSOkN-j3K4it<3i_JJS1$jva|q_bu#Htxjz5u>>RpnP?J}2+y}3n6>BMH`EONhc91= zy~i!vfugbt$8J`6lV;z}qgIUUG5Jk9gkHim1u`0a`YfY6zl;#zBzDx!((3i8G?~xY z+e$B?oRhY_!qU_Qf^4eX(ygim^ttWY8JA@BYVw;(?{#ff&+npb_#fUv(kE=TlypKQ z=DxI!{9>(Hu+^;7s8gqR=^s8*=r53^Vp`X0x;B7cZ?o%1eztJ5{ra#nN zB`P<3~B}PimTH2fyM` zk8`}nHtlAUXty=nsW5|X{kF$#qh`;o?6%dvFgE4!F|G}!bvy_1=9P9K ze4{*Pm=yv7rpR_&vKdocb;?nzJIZV29>}Y|1xuDP4}xuHv)f|IOqm$WRhLr7wE2<` z{~Gpqd5@G^(R;ky^}fC>?s+^-VjpjjH}Gie8>luco@&;pHAu`qoEJ2TcbwmEe{NwA zNNu+5vRI_Q_|88hPSJVl(?Tt;2Cuers;kh2OKt=BGg5bbi2LWnbWV z47L`7W*rt??CN0XkjMM$^Le`-jlDUKRhRxoN?MC{?P{5vT&vE+w2^k>k)c83qZ0Gl z#$7LGk=|sX8oN!C_PFkhlyqEbb~K@%%^dR`_8Q;!cpeUiIj3xeV=2Pw#F^MTkk#qt z8_BIxZy==fneES%jyDJ4I~l_ZeIc z**m6}#OXK&FX>PvCzBr@iWe<|!K{XIpf}TuICiOq{(0}J?&V6kBH_}BCJG}A(4a0M zGF}(}fqJ8wUoCo2IomH$Z&pg`rXAII?ejcZoS=iCpydfxXqgJ;c8ewxir1Y^aeCXA z3Au;g>lQg*6u+70WXP5Qse!U_e%YFlpXwmwAu4R!#w$!~YHC35&}XEM5ac}$kH14T z8_Lh;x+L|oYajrEXxM--^lZ#bn39cqiJ^>axgZ04knZjmKEQgTs>@aeZvBjM&Hi?) z0DzSztu2pV8c-=jSb_oxkYQP#`x}>Dt-$}ZRoi#_keer@(Vnh>=a?cJB5<#{Y8oVl zgw{wKC<1@u%mj+Z>d>9SDA16r$l>B!vg7|@cd1?w|3FihBnMD+397Qt84v=;j+%jk z_y!|=6t&LdJASSE-hS zbnTYxt;uX2{meW3pUiw6Ors!{ENRUaRa+g{R-I}N;_|wbn>x1ptZ5X11MD;Py3I%y zTYWX#2^#`~-hjelcJT>ydPLB7d+#!Em+r#@ztu&gu&{~MG;0&B22t%EKvm7yVH{En zWUU*doO%-_6K$T`4Z&_o0#K0)Kj;rlTWegpdg3YB2FP!XO70W#$h1GtTEgj(!i6dg zxGi>c^x_z&>e_b&+!vtJIs~5DdP^(!z19gIF>wE&XwY2*$ZD1BY8bsMJ5~$bU7^^7 zk7*wV#9>~tz03xZJQfgdVSwOgUQ%bEKzwTh3%`E?k2&6g7@N%hxB!i_*Wl#!m2NJj zP=B_lr8{yI5&8*vjB0(AZ;1R$J~mwC2RT-y%cO?^FQuAbnX*X0ervhv^E}^ zNou`eR}!RwQ%+i;#gWxSquZPML)WFtlqRw4{96|#yK9yqHEhfDocWG`w@o`^1Y)eK zDMF#8Wx0d*2}B{MRbO8yntKDb4-B}*vW7vq8>sRnJr>wKET)?c<^aT{YmX5ym6l>0 zK+(=Szxue3jE_K!(ECyg#kGT};=dLnE!;6`3U$#QG480t|21^$|H-^qwG*tTD4U7QsGyo9{2nhBFzHf54{Ys24QdniFPoi%B|&(^%Ael zTlppzdL-c5lhN>{SE6D9s3F^YHpbgZnQ$?=dXzgx4k9!P%d9D=S!}=@xp{i66PZ@P ztLk_KdF`^f=5(r3-K~bjLdGwdXH*jwgdWey;2@j+i_O8X0h|F%O7T*@LjXYgd-W9K zz&g2-h(JWEj4xvE&0oA$6OV}Ds!th(!jvgGTe0GaoppdPCE?rh*W;s#^BcE$8g>X~ zuzPmZ2;d73d#@jA3RA1LMX_>K?UTLI1V6l(OWK-l(|hNU!=3A@3)7RFD(~O&xs3}?$&l3YYG%f&nTu7ah)3EYOcG9+vC`AXDly1K2 z{9V6F#z8icVmo|<9AH?A3{$8z%WRO_@=F%_a9iYbf?RM3&{KS|&2C=cvr|aCDh!9V z#F&l-hcHZ=2_RG4EOZFvxb1>?1O${4g0rK(&cu8LL|L<;$6D3tgGEAHr&!W1W($O! zgr7%v)$_8Ap=`7H~o*6iO7bdaI-f^d|PVfcB(7>1Dp{)Pa$WNV-iA4t*2199(ttoP$ zY#Orc3{>ZaU`clvE?BWBb^ufI`yzgQ#j8Of4%>ctAt@f^UYkqdph<-@xqG^RmEYZT z5eHE2#s>+qfDo5v8#{2Fys;61`y%qKn)SILM=xkBV^<1oc^$`Bz3bFA-;I8>1x=$* zCvK6D4QYpZU_l}qY}#W3fN@v%IdLNj+WBj_NI$uzmC+ckT=Di5!nQgu%@b|RW-LMq z-LVOAL`ARcR?x$u+tR6D&~7w=sfWHnS9%Htw3v*8S=o(rC_j#ZrK%9!WgA0|ot}vV zFW%RXz;V3Hv2HW18|bu&LQK@rbD7mV#C^Z!9DE}-3S(iQ!@Pc%UOdHQ%wk7YMs?#S zgA8s&nKzU)$dENj9X|wIMWh2y-MUAa4Yga8iC zjr=_2WY|(55U??w&p?4bR-e9i!C^BiY#PMwQKjeDH-ba-1n$k! zyk5|6!AfMHRB7)ogwqYv)q4GWSp72kFqleYJ()75QYY|=0xYIKsx1P7q7Ibt439`= zeHgGlTZ9js<9Fd|-(SSQ{Or}g_NJRTvEL+ESLgKHa~9ZH=ec%S7ypzryp+-Pm5jBh>ok6wQA!ysxyOGl+Aq`%%7j2?X zY9H?NbKoE8G-W0|k!EetO^w<*UFgNcNjhYaa+n{b$d=I#Wzc&rk+7CaE}kjYg?!&U z*pu3>uZ0Mu+m<8gW!ftabXrF6!B7DFjcp{4N>M>zmVn{ia64npI? zPc#DKQVt;d2~5zUBg!;=q%5+RYB?4PQp))ZB5yzu1N$V&Mrtc`A)s2}T-#ecwWplg zVLKn4kT|PNj0HoA2tGK&o-KzZBgNy*54s{qGRLSCrLwzosGo0#QhEB;#Y|NOcij8Q zN6H#u$R1e8G^M}b&O?)$f)lfnMevZ!@A+wLBP<3vseW3|TW5}FN07!hK8qDYEmwe6 zjY1`iYaH>OiVVXtD-G&O!L)?|JICW!A3nc}8%`CL*zDgFL2#)a8LaYpbj#@L<0sBd z0sC-2cxQTYY(R66KMox&BtVmXhLwpf1~?M8LCn|NBCdB#AkKtumZ2d<9=q!{H~gKO zU#%I}h}Y_;fbZOvNVh(d)!1jar@|Wv-3eTRc2B1U!MiUML03^siUpAmRAd`BK*8X`9CZI-vUH9Q)OCb!v;&RSneIqbdhv76VY8}(VeRo>j60eSBBzZW++3td|ozzXrloX&yeG>N7iZQ>6E%%n5s7?plr3ydH>t1 zaE!Yi`p;T~Nl$2FdWugAP3#NGXX=C+6JXQiZkY087zdtU%ve=9jIq)K=#<9YK@N|K zTx67TV<8Y`XO#$5y2PAFIL*BQwk7SibUB4m{45#H(BU*T4lf?@sB47YEcBs;k$CU$ zM-FZjO6=4tqYyRR9B4(anQJ}L6Yss1tO5MFfg;Gpl#<}}{3Jjm%+<7b@d~SEd2X75 zMhS`gjjo~1uD|98e*+Ay2?ZVGWM!wV>QNB_gGpvGEAJKs_XqMF(njJi`9v805b!oUk#8whKo zbE7$pm_vg{fQKIYH5Xo-CnRK-$x^G0es81phT*2WS`YK&qfD&0x+NIu#lz(QapFAc zh1wcqm~yvf_QVN z$INAK*&asOZhzSqIFdlp7gXnjo?dpYF7x>W!sgNVG7N?w7DXtV%%(fURLiED0B!9V zik_fRU|r}1!Tr?DZ>13J0G+kz*ssm6-h)NQ{bMceVo}KTGFCA8Q#VI`A` zN3WjySSo4lPj$WLneU)MZZbJZ5Bv&8fLmpxGs=$aJ%{mpGSBW5!}@@Jy4Nqpz6CP@{JdShO$(9e3~2#XWUYm#?sP`Q0{OI12Wvc zOC$-tVv!jA$_k$9yz>y7cOBgvmc9fcyIxfU6rVL*1~7g~r8rnMIa8L{FHy zyy{5l)!{rN?_OLJbmag>p8&y(N-A7=qr@QSU^W7m1Vf($7IyV-c93ig-+`+P1(rAp zJd?WO0?xf=DBEEztZ@n~{sywAeby6%h`QWog#@^bBym7>o3?;gBRp+ij|#vYCAd52 z_T}$Hdsw`E8^kRYY}K9la^f-&T=faqp_zIw2C}W=BpA_n4Ssgo?I?x2fl{wYZ-zpx zsMy#tSfsOL;3V{pj(|Yb(i2;zr01>sRK3gDK@Xd<=$#{e+Ue|*z-bKFI)J9sr8HcK zl6Ks(F?!j!04&goV28GYKA#MD8OZ}$eZQX>G2zZk;GSe+BB*kV?oLpzJX z8*6mbMLI^DeR9%Z6qrwUan_47IV)P$_wTuvdhYP4WUrcWwo^LMb)&o6yJvGdP_}b= zKgtxwhTeCZxxPBI^~_E{twg-3fAif&I`J~Pxa-Y7NlAao{8J%tQ`)8No8W@!Q#y{G zE*bsuv8*C@&9`_c-h=a;JSf~WLEG5gl2D~$qbOy|7cF)2efQnk& z>9oMnTk5(=SY?00SVRDUXw+vS*)g>!f4MxB+uA z6qblG47c+xJ+7Y;pj7Tga>vYBFmy<$q4rd2I_c$<7c#P~Q(=%QQT9?VOdK6yqRS@9 zcZ9T#7Ly@T)8uwaTL@HanZ5I=06AMi=WMkMF_1#=zqNqF81z)VaE+-4S!F-khg!T| zn_x1VV}KEsF?NJN{C5LfCq(hP?h=AGV;aM zy}CD{#h?>Oq0A?$0~gkxsu?)8+PPm~J`9_eA84&A<1n{NTn z>z>KehL+d2L=k$f{;NBt+aJn^;SyxPFNUuV>9l!zmWidE`l@B-=^u(8F%1opYVG zWTv`cl)C!D;99|)q?5?f>VA*WV5^yEn#cjEmZW8v$8m2*@MvB)NI*dB`sgAlm!wyg z)kUC18Z%vV!Z%B4+)SSncMKulgybg(&?%-Qs~Y<~mnLn<=9R-sV=iICMkQ4_tj`ug zd&DxiGP#k$R3v5xly%ASM?TC68ms`I1V`gAJpZx?CLYKl^N}JU)Cx4hfW=rn^3ndY zZr7K_Fr7`AE{zc(0{#{s1Sa_m;ng*YC{c~2&6pGmvk}T96Fb#ArO&wb1rt}BS!1L= zqv@50oJfc#CQK?Ujx9D(r?w~R`7vCMDN}?Ei!yXi{-Nq~W-Ym18@p=ORpwT`e zxfJqg@g7T+C+Th_e>2qMD6azqkKe*x!MtkMK+?vbRqlX#)<)uUjDo{kVNt|>QV3WyKxK9$I%6+BhF=8t?>YZebC;2>^nBQ>}5NX~nzg^VN zaZpy}bJm~es3Nyqi0yXl1|=mr?*s^oA6U4^s~>=RZGg>M8b2D*;N?#Xw%hGeEK}~k z^m`R2W*6t<%T^o3z=NjEOPk=IL!4Kc+vv)5gNFPBeO4JP164 zSia6oiBwp>tVb?5b@%BLLD!X7Fz-N|76vQ;s#QqoOD@yL!4TLps&j+WfB8` zO=C=q5MsHZ8J>>Yj9XHDSds@&U}O%iNbyL<*Y^`Xt+Qsg>0?ytAs|OETkb@P^4%Fzs$A5#*wTme5!bmY1(4k1FS&Qo_W#MGqzacqV`xB@>pFG z7g)w)0dh})M5;3WjNeCa)CWl_b#ek*$+CO*?Pu0oY#%rJCWLNxh&^_86!leGDorxa z&rezaR#Ok>_)-Xyv+XlkUr1cpFjCiKvmlWxl~NOrnUpaM4!)zX`gMhfu`%JwRo*tf zQ+-cfnR{4h*mkKpIbZb-lb~i#v6PghA|L15(>$A5vL&5fN3B1NfK2^u_orb#(6Lni(M`26=Kc0RUS6}f|8+eos(*K2#2xfo6UONa z^8LnL?i>o|w%gxoVVTBt{y}r+kG1n{`;KJ3#HYjo+T0#Fp6Y2S!qg6{9Lw3~PbEo~ zX(xR6V&y`gepzzZ!2gtdTWPm@-0+6jF(>@+sRC)F_5a zz01KWhQ_HKq(g145Dte7zn}??<8b&Sn{ioVJUZ5yDtZ}Yc?k0Eoyx9kvo&qs&xL|A z|B-1HhF7pOw;-$2B^p^%sjdNk*JhtF1iH6jy#+R{XleJkDhr> z<8rQ>t`1fejIRHpoAj_yC?h#s6RWlj;7NlXa0d)htxiCvrWwedaxw{|9~p5x!;iHE z!?)LjSI08C_4!!<-3`&#iB7reh|%IywDO&Ox=K%4e9A<+k^(SpQugpAt!T;eP>@YZ z2o=|dL7(EV%E%Mxl@G86u6s*)0<|FZIfdz@vzr21YDx6X7n9tjok0mdm5#bVOd(*oon)(E$42 zpeH^GK_{ixG~+gBdDX)!m~rv1w{~oO{r2nV)&m;kVDenN%X6zrxWCA>@SE~i;Su!q zjr2uDpujiaE4d>&v%R;^kW$&^$xfeQ!>J2vs$*o#=E*UPypLa9ZJuH8pxvM)`kKdC zjcL8 zY7p&efO#6Xx(b!$SSAaHvb4sAs?>Ngq;GJ%SFMDChwK;w$i@@KW~omqV6gik+iFUivb55JR+#4Y&V*TwPUSRtQYU9H4c$)&-pTXoiz+ z$}){WA?}4zyhJ#d$Db*dF7wu>!$*6`dG%7Gf)iY?l&4<K633r-IVZ zyAq-8t9*Mtp9e=;%YL{+!+7i}kl)8IBD}?EwV@yYwQZH$LUL^kEn%#J(NGmzElYwo z2VgZp@^S}!{xuW{=PQPBVi7989~&QcrN+QmSyz~?r(cV)DE6rs#@;LCUR@^a`%{Uo z#fXn~R1PdH_Q{Y#?u^_4Y)BI)osjYDv?1*}LxZB3UN%(>ob8;&c;cQUB&TbZxrmtp zffqw<80zHGVz{!$8Tv6(wr}RP8;-F%fA^+}@La|GtU~v3Mn(m?O9c}=J*ubIs_Uz+ zaL)lY>o3JiX{o@=qcj@R^4Uwp%P6?bo0zTbr~D&|h}%!Z2yh#1to7-z;fdB5Pf>@0 zn>08ouCpydaYZ6`{N4T6L|~nw=#sYzKk+S}C7W}jZ#kSqnM7H}BFS1?q!9A>>L;6P zbo-sJP95Nd2O8lGh9)P!=l3wn$>J}4Fgv30kg5Al0P2l+-_H|(sA9c&Z8z$==fP&k zPgE{f`T8}$((m;2-Q>SVEqHjza7MXOWp>lUZzW6$BXk>}L7Yj2u0e>Y);NB2)CE81 zpb+-yPsI30y~EPA=6S?hmCrcg4y%uMAxP}G0D3SDd_bevd(jg=H20Q^EI(a_wkQg* zF~R*t!oiU{l~ak@G6W>J=A!WhxnFFal<2c9BLtXtIKi6P!p-6q*AjAUdo0QI3-Ydo z&YTITGC}z6`9x^4D`S>Nns7h(P(Oc{B#0i|Tq??>6r_`YGDVvWjP=+uNe;_825%3@aT!7xk~I3i)1H?DWH= zq@yruPe#FyZJ|h> zo17q}N z*6UBO)^L&rrWh8I8mxS6EEOmp^8#{9a7`_1%duhPvcQGnvPOhsq~?VuAFbRYt7Y9hCIKP3L2*me0yMi|G@*ru&k4Qxv3jlWDd!JDl-aJ$wdGRo%ReoI z(^?Fr5V#HVsZSm7r{2=gSHF(~W$0z3&jz5B_N#-ymmX>d1`4K6Ev~rlEZc9w%758w ztUwbyiR6i71(UX4R;f*X{3VIIfy5pb@m^%zi#PL%kU(BgQXQjGMN?HFx)QR}K+rV& zw&5ykOUterg1XNI_Cyh9uFHOHc>fPI#jDzAu^PeAm< zXZeP}?)ryUQbS4A_!9xa;jEZ!YLCY^U<^l*IKnUnrdF5(P%ZoN+dg)XG@`&6l%_*5 zM(W!bWcRN(lBHMcB3g{CI|#et3;7;;G@Qg;;=m}Tf68(5k?P>yp8-33Kk zX`8K1cvf(~;Md5L$KrW>D`i|N45_~67psz`h(a#p^sXljOXg7h`Lt#hpvs_<3Bxgl z(bSiT7fhEJx*SA8H!@LzMy1bSHdC|3xH8e8wEh_Ir?>C=kV^kh#hp5;p4s^ZDJ#la z5C@=Z#XI;0IRuzT_dPD6BP8opLzPJ_Z_2g6Q{EGc>QVgtc5-^G}0Vq8k}Yvx-O4 zcioB51?%@_QSXyimizDfc|l#D3?f_@f!La-h*=mbh^~Tk>n%fB@k5g-{#SR6(-O1r z%GpJ#i1K_eQNw3_mA7n~xrsgWurw^>DHHGz?pw4{x4ognnsA3Ope!3;qq zpg5vnEdxaDKJ6}|d~!Ga`H4FH;rUAy%mXz}s^Y}=pj2oS-!B6BVKVY5%_(^Bw>(5h zcc?{1#1t9ZXobTeI>q#ipZXY$&yBt-^dZ58`cGcDB#*)umiMIqLMYoykvx(GxbnUl zUPz`598wH~Z4PdjM7GCysfb!=4b9gD6H5&2+qpssxkW)Gx3>Y*QP5&w>e~dVmhcwa z3?j?NK|8cFC6-DMj*{+L6&imR|LUp&O+!ya%Q`~+8y_wS8wL62xuW({MfTU)5Jm8!b4auRd`x zg1;~(XJw&L`dn-rLpcv!Yz+=68Ve4HAP-RXprs!Yufn>tT_&y`eeGQT8obcz z#N8NFTBVI8xd@6;q81^xcl;Lj~nX-BF7#@C|6k^W{d zT?Q_TD#^K?j6zJK;kR82e57@kh@W;PuDpZ8;;WWQjE$n-^&9Y8--21uSRuD2McIuDsPA z|8g1*#$-p<6Ct)l$?<5*ri7cS-kTDt!B*4Mm*|to=2f>J+NIyU4!6D2j#>sR;?-}3p}>@%lX3QtYY z9e{H1$5u7^5c7OY7)`V#1PE4ehHk=S0ulvfR^2V!lfx^*JN+FIbo2GQ~0{h%w3c2l<$J#X~C3GTBm zP-(QZz>lW2bL#hA%C{)#M+`d$TOn3TMK)55orJYn%CvRekyZ1>`KC2U<#t24JbIub zzoFU1-pC^cU$K#&?%{2ZQwhI}-20(6>mJec@UpoD@ae~& zqv;hSbI|Z_#1k3(>7dyqM-ma!gkB?oFbn28;e6PtE#RM83uz^1z10St)c60%mXj}X zElQad7=nH?J-3XHN4gfLcopiYL)T(SC^96`*3cg|VEt~o%43%x>vhDm=Tlq9vTxG< zepnlSAyJlQWkj}MV)~tFV-har+iLOY!}Ca}W|2G(qK9 zgb(h|>EgMbkTZ%0mPAcOOxCl@Pr6+KJ*A1rEvQx7aOyEo=^_?XHsxX){9q7bP+D;o z?&^dLv5iHzm-{W_By<|07g<6)YNOUXa}K3xx4n)65yPD8kHT7b{XJ}Z&1q!L(l|Ua zCPmt|{9gEQb;rmO%sSNuZ-I&FpS3IGDoErxX?#6+jSDu(6;pf{C5qIGwwpf61&Rsf z=UYA0=SxFFJfdh!#PB!wTFk3XfkT%56<8-?9*kvc0GX5F%I%csfa73-geN)-zZOn8 zi3rx`iH1NZEw_B9dMFTXU*qK@Gh?@46FSQZYia6b;5(=9;Yn%`0iw?J4rbkEYKnt| z`8@a_syFEDy?zt?{LpdP{xi@c^T4zw4OcHePE)2tfjo>|`bx4b#^CXVaN^&G(%d`Tn(f8?3 zF%!lg&1noFqI+ptasDCIQcQzA;XyixCucAcm8tQT646L6ynp+|1aYFZRL|e?tdtwI z!_p#*=XYlmMX0m5i2~g{G5Uh~D;$67Y#XPak*zN~t5Htv4f;RnAa)!+4$Hb4O*Fjf z;g(Hntx`Oi$FqQ2RCy82$d%`wXFsJoOGF;8wN3ks2-@=tbVgry*pxTZJFP0a^YoQ? zN>cxh&42(@4Y#EyoEC^e&TFR@-ZPW3I34o~3_G3l-PaQpL6BC_zi9I}&=Tf}$AE5a_I_0q@3(N;FM+I?R=8K5+DC<;F*7F{S;;c2au2!124rXI7dgc)sONF2h9 z_yg6@OO))pRDeqL_V~o@u})$1JyhH0ZDvOgk>1p1@MuRQ!Mg30u~Fh6EdGZBdmIky z0bZO8eK}9W34&dAt4iU++N{{Rf8;zD8~lxxB^Vr^_#3VPw7enW%u8WiP}E|@yzWaA zmF$7TiJ7n2=LPvYm%uXT9(^Nkl|9;lI7Yzp;8)z#;5jshV`>sY9Lxk7u3`&2aEji} zxK&a@+?F-d+D!@O0j)u4*>3WOyY=G+w&`XqR`Qy%2Hkcj_hnPnrdRUr6m_PX{ZlK= zPUyZGb@e0pMl<evJ#R5(@2$S;T#eB-pMVKHO% zbgiW3fMb3wfd4ERE(`iOr{m)$r=uqk{w>@A6lI52Kwz>pV#UQq7xuW4zHlirQ-FrO zNY&M^N2s)hpT?w)s5)!eJxh9ABLCV4qpGfDkiGGm4iLmiOmLPB}wlf8vRK8zp z?*QL6ZN4_)uW2n8?CqjRC&7q4$ts;l9B~5FpWEj>4rK$)rS?>t$1C()d1mZ+d`4Sh z&_o{l8Ux&)3q01=osmmDQ{Cpq)Eyutd~CfR(!;b#nk(C zw;^!z$_g{czPrA+-k)dcE^A!YyVZU$QQ5ek=oAm{FCS=@)O#U#EHnQ$!@ev|tAS}- z5WeDLy9I{A-wY1tE`o)QlMt%_*>?o8x972D9LdGM{ZV`^uH zEE)P~tOHkLf!3C`_a+N|N?*5?7s7u$KbiVl2-n6$lbD7g(b06CIGd>xoxlympGr&> z?t;x2m+R|#x|v@IrwV(^QB7J_j{vBuw)guo&4)yS)ncb93CF0Gw)Dx&XUSEt_K8=-4$9h;PX83$swF)X zqAzaFBN>mc#KfDF^juD}_MEj7^WXzmfHkrh;!aS)EX3;@xPG3it^ajnD2fR{bb^Xt*A zas^r%3^feSD$6-wRwL{oqC66gO=61wL(B8|`gSiNKPDkU{iL>1_KT!hdhnB7sa@aV zxoyGgv6b{i!b?;wGPFP&Te4rlPo#`^uOc+Z!8t~IhV1N&3x&low88nKb3Bf-~P!l&IOpO)VO z5pnmrtQWI{S-c*7O@A6=N>48!pYSR|UTf z`15aANtzkC>IUzw+EizO=UQ9|tS(PEJ z2!+GkGtH}d=d5>!b`{R9#-ubsis(e_S-kfdQs zo0H>NPm@Ad2UvS&M;K8jD0Xwb%pBw`TmK9Iq+$*U{ zs9EarE1^j;WSLVV^Rq|>_6Y!T05XF%mFYA7hLHWFZz3Cr%Y->tKFSxjEDH z>O^M>3pw=6pfW3YsEUWXypWJ`?wTL&UdFJxgehAOsd4u$TH_+D?>)T$fF3k!t|dtMSd~1 zm^sAVbO6e!^xpy>j-Ma)3VI_Tq3ESc3n*=8sj+bZw)R$Vghqo~>EArukBH44xVK*_ z{a@Y;pvFk60<&E z3Co5G)gT6WDiep_L_1!@%qDhj3l%%*KL3E6JwUcvteSThBncjh7+p6SG8Y&2@*Fx# zIcQav{9CL@fGF43)?uR=qUv%EyNoc38NGn+rX*e0oC+3O~|YcW_DG{gU$%1V>G9PMlO<6 zQHUEq^2mf)k$vBCP0%sCd*TR@?;!H@pqJ^I!I>hQ5DuiG{f$_e)L?ZN&!Y zirNGL-xFBf%Etm~8X7CzgTJ7QSa&8`eW7BR{D~3&gq=ww0HPg~9%5|Wv<5L*nY)on zx+yOJffR*M#&gFj-H0n97hu?9CK)H5JQJ|Wt?Ew=To!?%N;&Q@AlPn8q z?0RUfVD|fh-;hrE>={5e1bMaN=;aLN1Z-+Ufs|+X1&%-p2tDrgUY=x%N&N%L*8FRC04xfvoMB*0Gmt%__ zIJnMu_)9)-2ZSq5zPZlI2@8Q>>HX<3B(8Gt2VA4RA!?e|e20XWeI`0Cv1i_^UvHb| zO-I+&&(rU>&3OXfp@Q(dNM1vaR$js^>sN;B4SOCjpH(g{+Vg?M8;~zl#lmUh=XbFN z%IgV}EzvtCYOxP*v$0>@M8qP_b*Q5aTp&(ZQZknCGx1O+V_bcA^thk+%kmC`f2Wx6 zv1^s>VL&Q52~w^~#SMOm76uHy_OG$-?;)l6@;wU~+_|$79MnB>1S<<8{k3Pzq4}Yf zl0kk*P;YSC02%dk_FxvzwSiz^$1dnqeB<&0cUrQ)rucbo}-5n=vpVA;}jA-^ZB;T?k#29fOHP4P8}AO=BFMeT~#?Z zAho^*x!2k-+XH7@`~Cz)p46Opo^#?|+K0?zzox+KL>bP1mKXJ$8=+wcsWh07_Yj7V zT)8F}>d6@=iLgmvelBJw2&*-W$wCuR{zwjV8o=*58g{W3S)omef+=AuW85-(J`9#M zTJkeQnvAcLkR7o4Hl16w%6~c8k`Bec)6866+847AghRM%5wfgxio7q2sB53BZvoZf zA$t3+;yIa^ECswjA0;{d;$ge%q!jVI9W0Sj3v?ZeMk#ccJ%zf0B49^CbBB|}m?5Zf zS$f^@I5J$9-kArsS02-}DzVW$dRe~X?fuNpvBc?*@?mi-sb%5o^c{Y>5}Vv}uMDn! zEi!d;oO(JyQXb0rjr5Gu_w+#JyLW7%-x4dMe!d^GK-o_F+72TPppp!dX>uG}fo>la zf&pEuubvSia$Wfe1T*nqP7pfGDRvYfj65G#FnVZl}uQ`07}y z*Af~D*ZS0@cm1$##UDLf6pz&AE`v;HLcwL6`1&i$|MFRe%RajkMIJ&Yi{iv08FVm4 zt6|J#U?LZ<@_fksyUq71EZ4-edM%y!%Ju|KQEwOjzmOe9*&jA1lV1+*4v{L!gWd}G+^NeOw0@>*kuvGB4p zjbAn1zh})MMPoP$66unoj61**2!7`z=VOP7RsqB1!04g|mu0wC#?jj+Wr<}6^RmH* zdK9}?h?~{UCx=;4j*RgWq4#m;-y6)Om@-!rz2yi{%&2qTZw@idmgfY6Unux`9|YtL z7I8b1-PT|;?G^CUtNGS|Z4MO_L4P0ki+c06SKr(^e-#c7>tD*UEt;#Y!MmV4?22pT zKGX;A^yI4%Lai}0&s@q}Ug{3pcvX!d``O2V z*oXXof1x>ojNpAYuhhI+lxue`3wvBFB<2xFfE)gGD`;_T1usyxE|KLAlcuD_B< z%+uF^a8Z>`(S+zlV&h%B93wnV9gvbsIfibSi}L7!;SBcx7MWqz=@LK|TT&BAB9FRn zneE|UdHFfF^JotyIR&VJm5(~1K*?t$bO1Tgd7WuU2PSn?m4}Y6psl_ANjr38%5J`O z5~DmSM7Mna%xp49mZllD~Zc{a{}JCaE7>`V>=>^jZh>6L#pPg57qg-|jrvi)SSE9=Y>3 zdvSel+50C-?DRSZ5Fckly|XkAObuYt_8;g|mZp2Y&jgb9U?~YsVVH%w|#35@2>< zjK$3C-DCj2B(oHB)5JnC9tIeJBNI6*^uuTgMI}$J8uVPn$)-|{+hqX$#IXfCeUhaF zgfN<)MS zF=L4BeP_daa{u=uWEH+r_9T-PN%t2nTwvmLjtN;3n{BV{yJzWv_ zW;ojqXNJ*pn{HDtw_j|!=s#SINZ+71d;&xBGuFPRE+7y>9Yi^i)g}dg`FG1WJm8Lrv}2@%P|^%MMIlC&q6zv2?cp z06+jqL_t(|Nd$vmdOn_EJ3FOF8MKrK4cGYiF6-|fVQnbUV(_A^G09MfwJ6{AOMmi{ zx3Nm<{Vrs$QU#i>7bkY>BVVFl&@uQ3l{X!#w+@^er0$XfKv3b-ks$J@)_MV_e%8z& z{%i2^{3`2L&Yn7F^H-Qnrv_}t&H>xKZ_uXp66|vviJ}i6X)z6Eh8`G!fJ2N_?`t-s z@53zm90HsYcJ$pD)OyqGa$AD1EW#y9K>>!ILv52r3Q=#Ux44IzakaFJD(^+4GSp4% zO;p1M=H51{wmQC=#d{AkSAK;~B<2k`aV508!VckmhU#!^2Y_>Uf(Y6>Y|p+?7$hcp zHrcD<^q`%1=VlgdmF*yQAwBM8Z>1ayaj};93{v0_SNuV_rr}y%Y4q6T`6Lq4JQ2N5 z*<0^kL4Ac=H$iMEFj-?GV&G{bz5-4Tlao2z#K-N(L8MdmwMw%vwx#uEL}*xEN!#oq zP8FA{_T-bV*zcZs9Y)W3FJcJ_NZhy%!_#h-4e}3bYLHGefyNNrVha*Uy+;mB+FyQ= zFhK{f=?o!ZVzW8RcI`j>*O%;>XWm6p)eAwjqs}95$0Up*jX_ya+fdZhT1$7*(x;Jn2mZOJl(0o>OICy-!!6zO5n zrJ*wF%3OvrL6-4+-9}Ln_Vm*(EjB)cPTF;s`2Z3|0kvHU$A}i|+Zwo&m+Z`W96^x! za;%@4nVGd;Jn=Y_5C%U3`JQ|3$HjfGe-9HC$U%P-FlETg`vf(<{v%AsIz8V_{ij9g z+DhiN*Ir{1^)mf7;|%ekhrZyna^@nYqyDNcO5$QdNf|J1;Lv;80X6jL?JtTk=o4%g zVvOX;5Gd2{mv935#bb{`Vp z67iAkhw7l`{ePSGdPd3Wke~ka2Y4%^7AHB`%N3AX?+PuMyV6wZ z4=TStI{YRyK=Df(O-=1*91g%sSksu&uHdwn9$9-{-~aT-R$%geNZG}mqkAeK{JfDdLCnxV*Hph1U{S|P_`)LyOo86D=_18ATqVurfF!qQ6@E)hNm z^pE!uAFsz%VZ-CNfA7g-Z(_0Tfb^!YP|}lxAY-*0BjNy)J5!kK4}HE@(0drVPhm@0 zAriJC8su0&nkDoNa~yW)fbsJVtDzX+nnf=$-~~*05^7D2J6q6+Hdeabj+za5NAZp% zsVNM6*+#UDdIfjr44Vkb`QfJhJM5NQ_h2vUL&ZmMQ@TZwXcDut^VTevY#-9n7`7#3 zG|Deq0a!#&sG>rXi+Uoi@H}bkOLO+y-<`5oUcZFY182qAAjga*$t+EF9xXD}BHs}l z0dBr|2#IDt;6bM{%XGOvQz4pc_d;004zMXjk zpz{u|fy6fflLoPDe)6zPGg*;srcQMCzP^?nJUC!~|M#ErHB-bcM^Y+=NFKah#`RoL z;+GbX7S5ND#(1B|9V39w{v!p*zX~u`Z2Drt&Yx*oaiPEjBxbt^06a0p)*T}-o&*~O zBB5zfwFYtuP_N^eSu3+$$7(O$nk_rQa)a|rlpE)gGiT1)Pk;Pl`WTP{bHD%oPuc$c zhpCB}2Q?A^_&1rK#W=*RuJ;gA55U{$4Y%)gxRFfb)B@d~efC-QmwMJ!j+2v9NSj}0 zA|RC*FF7!22CW3tW~?QXF7pNtG3ri-ODFB|?)En(q(1iP6Dy4cB&J3C<>Qa}c-Ew5 z_pW_}lKQHvgtfV(08jJ`>uUrc6q(=ayrZ2fkjI?Et`@uhM~F+a;zP;}A71eK=@k)ECG{#MOU_ASU0} zH)J3C_-C+_PI8aBF!2n%-ix^YNAIY-{&Kh+<*%_G9IVzKkG@3{6^g+T=Q3cs=k9)wL6=9Rp+&lKqR>K~(qqula;D&t)rl6pwGM)osD7lCWK`pf4m$f}- z@KU?5NQ}EPc7jkv7cVZ6uWT&@VwH_YgNdgl5W9Ok7kt)>I<6P#X5ao%CJp0W-jk;; zLvNxsus2prI}H0k60HI-X#+(8lln^%X32yPphE)2{k)g(LSO#!o%Z#we-x((?1iOy zRFYao!2U{0gsNHwP?rcZw8SE8HX=;nZa$0@I6T`(MZH~Ny%OLGsE!lo zZwgoR$*D9OR%)}!B7KrITXIx^LDUElm0(MhbbQ2?=Hhnjbk$xzP5p3&IDP80J@)9& zpfAzX0n~dw`f;EAO9~7Q5jxYy)gNRN6vMjiLxW3`8G+pPqZ=rs7zcUOw9h{Cj6L_< zvkvqV6FcqUFMo~dP!jKlFh&(c%)WgNww>+ye2^<%XVu#qH;i);LF(sKJS&SOfC))c z6HWD#UAy+$7r*dTzB4&slB$n7)Ot8B6*AC4X+~0*$`+*c?WI)v1@&aR9OSQd&|hcb z{BU4GB9i1*VgguYvG|iu|CGM7g!&Ul9qeElYm(6Pp2oP|RsMQczca4Nqit1RuDfGp3=PSoSW zb?q>|YYJe>NUn#8VUJ8+Uvik z?4$+Q`6_nFy_C~|Nduol3@Ci`TLX$u#1JT}7OEz350|Y;QkbG1$ob*HFYUAYK5>9W zp%da6+{2>{J@6==iW7XY6VwU%fqL@fD+6{dXpvyXo@S#(MR#9V zsMuM;H61@OZEwDHhVu+j!cl7xA}9t*%cWW~K{1mIj9PB?b&RWZT(z@#?0>krYby;& zf(^U~<4gchazKM1Nr=uRyc{K^1WzZ?Ft`=Bq{9bTlEmuM?zZ3h)+g-~c-Q4{XvpQD zCv9IY^{@^pZ40!k7XNDEq#(SCqu4h@%t9stDL}7Q9A^Qo*@^cWEJ_wVh+R0?WGx$B zio;F3LgN6S)GtU)v`<~$hS=BAOoCPxS}f2e=qvAPm)nykPXSDiGHGE#1Nh%}?P|9X!rN7@3qtPl=X+UF4kuWy|rXd{^D^MHZC#nhN-DN4oqrq zClmch<1ds9Ia38CEhQHmOY}W_qQf7G)Yak1Vl9&lT_31C# z-rWaWGmvGB@N`t9wX>6nbOJw#_)63X7-J3?Ymcvgi5}sK04+MGCFspK$ur&nAqCD} zfv0@`2j4*}f{h72rt8-WOelS6vleZ(58g{a1o*G^XbZr!o^O=@pZe@3ALyrJQ@hh1 z8%q~E@X*5^k|^54Mz{5XY)TTNlNcr%*l^s<+Yy_=C}SrdrVBKk7?jj@5+PyafRhHL zwink z`AZM2QNiF*8N*+?4z+>JghiF8!(M;mWd|hzX;TJcFEelWD33ZCGbLq4J^~qN%IfB`eRzLe9efslz?4Aeq!c2#iDdoW>oK93AWKkarRC~0j7FEXr_OKP) z!|7$nTdc)#RRm5E6i4H9b(I}|iJ^Cym~T?Soj<$4_XU88U4Gflf(EgnLf-!Se|ylr z`qd+N-5sYLm{RWhLJ$zcjGMrV-LI5)&Q#}V6#$e0e}nk+MDO>&9~3mx$_Dp z3)r}tLGK`E5usd?j|`&PcMH)3T;>dU7_PJ}&JL(ar7ni9(w7;7XYT`DI35vHep}*{8xAy+6zpWB>7oX7k%l&@<_d*gLHyKx4mW?K|yIM z4?l1KDb?p}mHo1Q^~)z{E3F5sBmD+oa`llWa4I81F_bZk1Ac`h+5{C3(c#{F{%!Lg znZM-dq(gtBBIvhkYIt8#hD$v15_Di%w4eX-y8s7$p|8(@N#2i=%Ea)*9~_u?cTHk) zckuvB0w>?`SYw|`hefuSAkb_GP(BSX9X@!QOHk_5p=ADL{SO6=+B_KG|Js?;e& zQ0D%;q;(X5L3yv#^E9vktxQMJchy@RIfH?`1>{KRJ7tkObVgu zYKxE(6M-J0H)#Eiw)W7XT|-59rGY94ClGeAR9_Jcb(b6P$4g zuPq=|HyC87?*MCUO)|{hOhs)bh=auV1ZuX7V(=m5Frci)JEbAw;nnawOxeuhE^=O? zPAkw*lOEi70Xoc@zMaS50DDW(4jFkf0nW2a*z%66N2QTYPub6Y_EYYMU6Sv+?^8%j z2SR((=IT9b{egD5(ZiXXPkgTb&6uLtfeeLA#*|D_M{4jBks-0AVCTVdPgMd>*x@BEMc#v=OSI{u(n{-nG!C*ciN`5$wj`w={rJ1M?MrnHpARIE!lD2ixR}-SohhSyP4@NgPIljEChZB0_2V zak;k|0XyLs-tEGBZR*p&(nEW6a==XuE>T_#SROF)tWnf}xR?gNwUkmHvhae*+eqv{ zAF&8^g&ZDK*$hqrxcsJw-Daq%nlN$q-RBuSE2$~0)K+C=h*uaJT(}o6*tNbI;v;w+{v=N@a%DW>AClb2e^cnSi%0`e|+9P`DtR?u?8%gQAi-94;ez$ zLE~Zup^hFTHez8StC--n)c)Vt3#)-hiT8P-#U>B`}uI zGTv<*L?snS%+kQtnjlS7)ShzgkkBmkpd06*cZxCrx3+K3sD1P!hwQ-Kozz2<6WP1U zcw`c3er(+Be{=I!=o3rs4AY7)(ofWQ7XO+Zpig4E}dv~>fu-lZ8$Dxm3w(?qcNp7+HBBJjNc)80LYXd~IZkj~o!L(D~h z01FUBuk%oOHD*M&8#rJx1=hVigLm0;zxy?kewIxkCvblFCjCgWYI?YHH_}x2kfB-ze-h+lsPn3VZ;92M=#|J z1#Mrtj*}*p-RQ0M5R=e^S^Kmp0)ix_DvmEt{Q3u|c<0^4UX$V!6D!@9Te`-vOK7}a z#V#cPQ9n@40ujX!AtZg-YYWfws31r>B~;iSzyBe-{m9(_ld=~#>P)GKf%=)G1Du9t z^PB{NdRPD34Hs#oL>fI49b&oOwDhA|o%oe~dAuU$h`#azCZzCPjddQ9v=<)qWOa!{ z_w?U0;U3bOQ*k2eK5uB=>U@?L^gB&uvq%6rJ_(HPEnJudOt2pA2WBD~`UHYAZ}5Tm0;Z*N zgd}>2-?%Wtuv)0EfJx;*N7d{~AW?KR;1y_a5(urVlERa?3*xT;hP5icT zo4{?mZxF_hjVFUQ6-97H85lM+aqdix3=XNJa7gQ#XcwdfIiC0= z0q_k2LQEBqX)`pr%E$S@%9(L7;3_ejo@uaPdD@^e5$hkE13W=p^tC*jAa6i%7E`!eEe9PB?BW4xB^|>)-#%M zx~sZ)3}y5_C))xIEP&DOx7x1d+{KgjuRr-0bu9XE%I=dYZ}(2W zd*X@j;YmqE4*u<$I&2Sp<`IY_yNlpN<%{?M<)b|UqrCpyXinF9bb7;qNjbEEN$Zuw zXDT?@JpR-VQF$-0&W(Nq&k4Y!u@odGXO@7P0~2%%UV*Ba=qS;Hb`~fC1Tse?@$Qs% zpiIP?2k!lh-F54IK8Ay>s7?EAg^aEZiV|d>s$^o&HH1Qbj`Q z?FX1vX66VD=YDiC;7SWly`Pk(9W?)j}lO|F)z@!O&wl4saK-){&9_t$l zqw)osZ@74kugbHVy80TgUf1o|;Rn8kx_=!o!hS(i*4q%0F(7idO>67%Z3lIfx=hX! zBD|VN6?9;N$w_=opfeRP>28f@<)beo5^>eZgF?l52j2k(fGL9wBm>GzJta_TB3f%E zAY)~ck2;^$P`FCM)fFM0-Qv;c&FhK9iT`(Tw>ASqKq3m&G5(o zdj##WkALbe+{34A5PL?wxk%ZIfC-`p3us%i$3QPd?pEjUGik}bHaoq{Y>TqtLD;Ob z$zZydZ9EvjL`P6VI)Fnb0iXcC1OwH!9J1+TSewzMV`duH?4ovrb8Yx!5 zBsX{cqksF$RGoTIQ>Vmr|8Z$)`#`_i`&6YXFey5}+Q7X7kqTWRQ@K5zT)Gm@CJwntRj3{2`CH+tw5fm5eL{Z6gA6_`qmC42l=-}l`B zWMh&(>NK-o`v5V2U&W%$V7AyKEo5>l1eOogahj zQn!#H4tnE-?X2 zT9-)|B{51(M~&#>XT|^p^o-Q=!%gvA*_ax{!rbf9QXa2gNlbcNOKa9kv|bX`DH0ae zuU_k$=+G&(YrS`U_ha{c?(ZoxrG@~$>ezc3gw|QqChr7HP&2r#hl-_Hsu-@T^8`+s z*6KP;%QwHnwK#@4r^Z05AgM@=*2HVhi7kTjtao<4=zCI31@T8AWR!+B&TB9Okej$X z$>icGUANRW(nV{wp=Yufl$Td7vmo~s%Aj=%B&OL2oWRpTZKg$`QiI9Hr8P=o%D(Oa z0&6YS?RVZx47q#k{*T>`Q9cVZzr-TeQ@P$`n4clc z5N_5zS!Vq#{Jg~Otqr`ehEW3!5gkGs4*F_t>YXRPUWt%9@&>A~A)bI&_LuYLuh?&& zJi+HI3qp^uQQ%J0Us>C^lNg1=>ZMYz$j&0kDbP_=^!CB_qPTG_R6^8}Mo^ ze&tHVe)sGNd*;{deRUR30isdJ2I`CicVI#SP7{*ITMiPa z)dWsH!Qd>l-pJ95hr+c`1mlNODN9O}gs>CiZf1$9~o%H3?yqCHe=dJeCm1P%dC{+IzoL|5V}C27FW$J;N8-Gv#G??!qY~CV&Yk zMXJ=*41Pz&lJm9NHJXb(E^^=I+{pA-#O zu$e}{w5rGh&}C}pL3`-IN3IG?H(EM@l9Rk^S+@hzYJJfjds2XjJ(ndLG9!p*Nlm6c z5EikjM+Ezn%9Nw8s-LL8Y8tG*uKR1i#6yx&SBd0u$F29;eRqG7dNPp}Fh#WITXblo zN&V+(sI&S1aP~Sy4_@zCJWAzi15*Lp@lp|q>Cq1gOxU#Hmym;UO1r_E?Gi0$?ybr>f*aOG+fn2Gj!@IkUUbInd< zhtiBiYjEWF5Y1jRGftsCNumQ5bFAW+uu7*dYauC8lXnc5oeyj=K6McONfMG?S?i)S zGm$}2wi|63?!W{nY0n{nle>>oq!zATd7KQEmuKz0qp#7a1f-JkG@}oSCV7F?k!q4i zDhkp%MwpvC+s5}4_2MPJZ;^awO(pijJV4H|Io{g?baVgdd0r9X~ zVasZ|M;*W+dxZlNDh||;^1h3+p!ed1n*IEzZ`-p^9V5OREsbM|tb4Ay&~)Ahuc{=w z5*5>ggnVcgY!1Q3Lb)?Y;MMcKYlj@4yK35in6Aet3UXDTDOn zmvq?=u_3AQ&Prlx15{?M$!p6&z2Acv0x6$|Ej{_g8f;wXf=? zLrnFL$poUJ3n+^8O##zmkNpe%5~q{v0H#Rm5v6lI=c_UCz<;&_Q?a_h0^9Gpny!Cf zz!%*Hb-el`J+%!`%3rEH@s9vZAw_x(v{YZ>H~m-KTGFW|Q$@C#K62mmA2yya8tLj^0tgOu2(Rbbe%p^_F1*OhHn(|IY z2dJ|RMivknC4|r&AA#vl4zSasyjDfy8>DAi=6l z7(ufCQ7)6qH}3VaSt)j&;^rxiU@XmILmWDM{Sc}$1yOzS@wWlaleSQlHHtQJ_fwr| zEI$NBv@3O&>QljM>*)F=>GfbQ>SdAW;1FS$aE2(eqwoTIPAx6e@Dhws&ck>&eUviq zVWL6_RZOJ3CQL7-s2!+wwY__xZ;_ByH9K-AO9rwEzk(A4%-PuD{qngg3z0Ld&6?t6 zMknQRa1!^V&4Xqu^hthlO>M!6e{$o`K(lifgT$oQ1x#`mcU2yYXWd?@UfeFDf8)n> z9iC|zE7$dWct{dcPZwa43OMMusGn4M)FWbabmkk#<%2z{8kNv$RG2(H_H%#=h?YoV@#-D@JXtPbmfnMrzc#7N)WetzY?&^J= zcXZuf*O`yZ8y$~GVv@(LtMcUhFl#@3^m}MmJ~%M#6fm)dNwM;>Sz?^2Fk&(tD)q+% zn2vS6j*GEx{q6tNlC&gFE3YTlHF}3>OH4GJ$I0t3XjRvsy5oh7os1ijbTk);!-ND( zRXTi;&^2Xd1uN)IwcWO6@l9qfQX4AYxX;2h%gOKon4C|C&Leti-Hi5qLQdnm&Hx`b zvNUQB?&4CDojG-ayd}LULIpN0)?#R1CSk3d9JF9>*Y0uq#Dj+%m=u*QJJJ}#T7ZO9c&dD0(FEOT4c9LW|%3VimX;7sSF~KjM!`J zg8S&BFWW0`okztgm8trG)Tc;V)QAdr{S}@`v{5)C*%Jqc`t7c}hlwB9XM6WfxSPIQ z)-Rn~Mm@M3(^WA5jYc6V@pqOP85kZ5XC@7#Pv$LX|bh`WA zeA1oH{e13!aPRHz&+X38Nlc2Jfg%JJgAB7>_DY%&BJLJ&|JcR(;2;PgLJD<#?QdSO;Et zPdrgNKeP}zWqG>(x?5>ifhqSzxRu(-`#jEj^_f;Htys0ve>nst8o^d>c`Y> z6pja(W9sc8vI&G)x%_mtHn|a&P+*r-wombHn3q$t6K|GsTWYnYS}DxPJm=iCb4UV_ z&P7N24q(drPV}{|IdjpN;1>lxnK?jqCvMB@^8rkpGuECm%J*e$X%d(uu6@^Ofj)vy zuU~sky7Gz-!57FE_`{mcOnDF<=Y#(^BJj4 z!YfZx9?lOB`~dnB8Y+le$j3}dp7G&e4Bk@|F}J+e==UtUShj2pgGaUq0+`wWCTR&Q z=fXze|JVLgck6Bc$h;h=r_vN5Kxvca6$oYo^)M^h0jBp}eI|=$TVifW0Mb)V0jxkaVnk>&RJ(-;741aR zZlgS{1R7=F9ix7GmHCA!0O{stdxPw1dj!ql@IJlM=HfPfEdBVgEjVQ0rVVf`UNR@G zV4tV+E?P-Y*tx`iqb?1Yx0o-|(Fa3YfNZdw1a~dfobLHW$DI`txcC-z9g+?m)~);-v&C{*ibW| zJenSTcyrpheFShi9jA?RS@7Es;gOn{pPRs>7#gpj5beswf(-}Lnstq7<*GK;H*p?^ zjG?|dK<;RIb6ZLWyIIT}+MEJNo{~xV_*HbySQio+lVS#{P*-CoZr}Ar0Fz#duFrb+ zyRV$#E-qjB6ok)tl5KTTkVM;XG6X`^#SjLT<0O)}(@y-z#~*(n{p`s{;47k9FIbVT z|LEKxUgPQWZu2YU0lMnd3@gZ$nHKysnWJ#GI;5_mM@bcdPUU>r3 zhpxVX>AjNAW){x81wB)=f7LCF@ZqEygyo(nC(q_U0r@WH=i}{wi92U!ck#B=ee6K` zK2AHm?3Ta{yomd0f7>+P>URlDo~k78ksfvQrDc4UM&yTf)>HQPvg8CA$%D~ul}9)z zX(wi)IFS3=!CW#f>O`JQ3#28iiu|~&)`CC ztS>?%4fo+iw{?JRI0lJmVf5$uM4b>M@eHE;1VRjrHwG9Gw!XoDtj0ZK>kxwDE?&q^ z!YmOOA~MEa)`9KaKZ@xWFFglF0ZO^eZJ|8>5|!oHZ-toL%Rkz&iFHofcM4433itT{ zrpvBO>(rQ_f%=jL#o&3_l#jX0Ofx(3x|@r72lKVNs$y99ybPmnpZ$kJ|OL{@E+fImv(er~B|S z1(;x{WlI32>j0(!xPyM^=Ab1o#T*a!%=WG**UTc&MzhhIS}P2#3-z#R?T4=U+jHQ} zw;7liXsR-UlDdlrFn!<##siv-Ewogeno4bK>e>9uFky6>T9-KOz`MAQ%%zvFP-6-w zE(U5^H#)ljrk-B`nCOjqunJ0olR;eD@XRw>Mhn6~HkqE{RILl#I9t1#$ETbuBr_i=bI|lbVvy8_k4GJ2}Y7;zesqv?r2+Vod!N?Z@CjgBikD{b792m98eSBAV4*P{H14%Aw3$=2wbnwt4V41 z9va~&z&6en@U7{ROU_8=UwC>jpnFB_-$!&<(kNbGPU&a@uubxZnJlz3%WRA=M_`W1 zq=DDx%rUf$b1k01iGsBh5u_ANWnhT;5i%TNi-%p?2%_3gAlgG?>DlKG5L}i0sR(Rq zdl~ex1t6_M!&$v~W12TSz4FI(ESwpXk9v*%GvD#QSoD^<)fV5(kVTWU=5Wz^Yx zW=UlyEx+^P$y}aU9+`2v)R-PCG^Y0BXiOEaJlc0QjcKwhmLv0Nz>+_MC^L#s2*sHf zJu^jP+Mj{xxEhlt1o`kJh0)1qO!h&wqTO#ZFgb@8b5yWU)|k#tAO66{7@x{-wUeOf zpfysEumojz5MY{3PUc0d=tnVaQdl~aEfB)F_~{zc{SSQSIHo+_S%CMHMy4M)y`dw| zqR^N!FzGVh-#@_MQQ1bJ6=tLoSmIZ4E-qC4nN4G=y5(&&FwqGlg_XwnmjV&C^Q8sK@mvgeu-sSY;yOV+Pmifi&S?2)Aelc zz$E|Kt9cipfrgF%oZJYb?cUC=vY6qz1DFn{=U?2zYIWw5U>HY!b_TcP(8;2WE!&r^ zWDELpma{1#4jC)l*9lqxBCh-C1jJgbJ5@DOfqSj=0T8`L+~8g6K>b8V9>PR9e*qCu zEg9g{3=LZVaD&?pF}DZBp__Jl7WKx6mmsM%gB z4|W*Mob&_$k%gryXc2 z7jze2zwVrL?FT;&e;{b0g=d!AwihUa9|UBN3VKd3#o+Y9Y{@}z2~28%qa`pom}F3{ z#?--l_R*e$Val7``GI#H510Vjg$tLneU8AyMuC{}A^>w0Fa^I9eB!ZnsFh|r@4d5K zdrSVS@uKRyqHF?_4dka1~|N7B;&J_7_4dtBqB|ZSuh^jj8 zx9b^_C~rO{FsYhw9)a)L7w1YEiOkT#F0w|%ZXqgp1Dese+7FGcsY;FM@FAiKuq*4u z?^=_ty!u?`FXmE)Y;$&EP%oAJ#H2gg&F5$jz{bR2Xc}q^P(kTN3gPHM+0ikd9oTy$ zJ^$RRY!Y~c{fE}Fhs!y*o*N4gO$Pz1i~NxTL^e1yn)dGwAE3qjE;eGc9kdF=H@U!ALETS12e5OyHmMiQ zY0ti+=^(D^qa&`3f~G_&Z$pDzym$c)6ALguw%~Et%-o$cB6Jdi0awlU5cT}cgA?hQ zU+hRfeSBluy%jI4iB9Uk`X)3+trGRj`MKeu%%UkzN0^B^HyUh87hk-V;G<`xOD{Pk zEu1&P^=<%EU_1p4v4b^bXil$-3RLd0Wzb-@hk>fg00$F25?R6PTEX%aKH+ zsgLpwHLoa}xmTww@a;J94`CF?_7B=40JHk`w>Z}MAS%%{roPrqXQjt%tEp8zK3nD*}p(GVI_Y06`c$pJ*{Fu?s4 zV49wpzim4I)JUEcoN6V3$gu*XGbe^T1pndHtGf49*pO7v= zBisIVfGIixCyltB;e;E?n|R%*z!aVS#NQ2Vg7E}9L5cpD`#)LmlSZ^58GUe{;}5G} zPa`!mS_!)~_rzo-oxr)05%wcO-=Tx*amAiCod#GX@^~B1;STM*Q!~4P4iMn67aMIG zQ4-EPcTqa$yv596$Vg1QQ(=Lr4gda8V#e)fD~bb{_6Bfwc1@ImrgQ}l5tMv`x~oybt<1;ts9X6(Z0UaHavUC(HLy!C!V}P&z@spYH({de z!1U=-T@7pmE$!}0o8QO@4yA`5d@jB8>`p)}d-V-zPBcy;0(*$-N zr*~h<{K)lZ<5;j5?=tphWy^&6y7^3$FHCQ4X-hA^ILO9;m>8KaYYt#i=;luhh0ucB znb>t;f`^u&0XIc-n-t?L;$0ZMvoz-wEI5V5Rmc#_rL=YC$Z?PuChOQV8WC-w}UyR2MS;+ z=9sRZHpf%~6ZN5h(7(38)A55l;-1Pk_nYMRS}EM6sl)UHyQiZtuS*OQezsg0Mj`D(@o5C>R45~+|;N7OhHHL zr+iRQOt47)cp8)UO?Q5X$SHuyq{bvL?GLZKUkaE|$(>`G1f~THZnG&*gHAOtmDVBa zG+R5(cHSnP?b>VuBA=NpRp%9;dDET$5wbYrKco{gr>hn`;6m)oSh#$k%dZ( zFe{D6bHSW>#2PsmA%V{B6%P33pPZl1ND2=5t0~8S9J}JW9*;EVH8NafTdos_YA-gb z^Hhz*Rcir|G=nv>sa|^*V1@S7#$1fxRY!zt&#hZO7vavD6&7=9t~8=VBl947dFch% z+cSm+G=N8%9!9!r>lx$v2g-ubvds1?%|_u$ubR$%2lunR#`ABat#9nX8DM^T=ey2M zYfnD~-~n_neGL)8`!G=;-gtd)dip1Oh#t;rdbExpCV!=Yyeyb1L=EksARcADZSm3t z>4JAGO;^5W6}#9jN_b~u%401R3vdl}@7_Y1J;yQc)gdH~vX{*3Y!0~j^`kKJ#J2ANlnS-daltWDa>ipZu)q|Z>1|&mfG|7QzOj8n`sp+OB*%p5-s*WR z%1)nf;9y((ys`DT`2CZ2PQIt^W1sooSmVlw%#Wi^zC#$uY20VSO$ba}6D$OV{9W7- z6t-7;z4&;{#I|nG!39Ca>U+2~GPfXA`5^RNOB2uetP4j?c2?X0a{J4C#K>Y^r=^vx z*cr8V&KX04VkFPxzhQ0K*tcgfQ4&_2%J$r>`*8D)zP2=lPobpEW81{&$ggtJpbGqvAA^gn&oLdb4~LW;BJkX z(H$fY9yp4S-=1E3=@2`s(s41}h2{l)bp(10UIKlR$Vr#o%gK9uhL{-(6?mAz;mi3!QewlD71Lt4?R{nWboXY|ckib+=G|^NnsIkiSZx*VhQcgoef8-jTADs~{Q_YU zgF85{5F{ly1psk}Wm3q-sGZ-QcMh|TxjK8arJR`*kRcvPCTXPWnAe z(1SzlQ|2IDnOmBg;a4G68Dc4>Ce4Cw6EcFOWobkh*YTl^2;e$HO0b0$Qg? zeoc~cYL>smFN*j}Bp;f5u}aB{M8l*Op^gdB4I=$WTqBiQOTu3_#VqW1iSb1*NWks5UUNPbLiJpt`|S4#zm>`V1WvWD;FEo6v%mFKbU%pfP>q+KbZqQ`wt}T}6js)@C}2 z8}+$dL$Od#bPPPpQ#WfAZR2y2002M$NklS~Wa0G+QbeeeUsKKp>7mxwin$*YY~)nI=^+PL9Rdh*HF(Mx0>KM;KUBI>rZb@cYgNbblJPtr*=$dhjw+QP0urrwGm)qYmYH# ze=Mtzn~trYE4>jLL|XRRc#xJhZ0iF?$#0+`->DLwW4 zBLNoSd0GWbKI8q;$w8otZw01^W-l;tZR$S>puA&%f~KR!v~t;6fQg09$UB{cBND_HJ*kyI~icIOlA3M6#b-6y)D3W`@j8Sc%>S}dxB3L^BDDf+#5!4%3vWzOL#e5 zaPnK?mIStu3lIZD;ux)?qVYsiTEEN;@$G#x&ikyLvUoN?(AUt>1V>(%&3OQq(GOVH zquccmG5DM_F`nJ7J+GrZji&Q1UXiZ2a(!C91~0O97TmE*q{MfNcK8cn>?C0m8q-kP z!_v$(>gidfh;vRMcV4s%`Lu_i zu-$+X`w(Fc8zs+C&7Lr^k=utc0UYaZNsl~sFx_|WQvf1!G`d+sGo1<)B7z^?jroGs z^x+SmnXbKV9q|C&HURf^=9(HCI+!fYCuHa<6i#>75X>`FXNIHHqD z3V_*|yd25S%n#ziMut$@3NU^5zI({Tc5lYL{c3>zY}$j(ClMgp++FBv#1UG*Y?c{W zmRD>eZ_2;v6X8}YAD?&{$NMUP>5+%h6FkaKg2^(0t--~7ksFNC4dR}ZGRuYH00XI3NZ1IayQ7SrX|%pS5Pm0tKkVu z?DTvR8dIe#MOMG+*KB?E`1x9ws6^aW_vzoe17O0EJpv%JVj=A6UHv!MI0_DKC19P6T*_Ruy4xGH7(7 zsr2?8A&kun4C;IEa62Vkd&7n4tamIVXeQdxh+2`FgTfI|f(dYbO@~;->0(YJ^6I`l zj*!=!mnXIXLnafQ^mi2IZjytHMkdBErwzBIty>PWx!)$tdV9GC!H$2Qz@8+AYOz(T|sg!vS0D#8CW_yND8E5hB znDY_X`s-DL&>n|a(EIey`_ec5?lGJ*4)cD%4&&btF-~1YfH$=@lAmS)JLm_3|G1kn7Rvd-nVZ?-&TnLPhi zz*Ih4K4-sBum&*EPe$OA2Os=C&Ov)JFs-NpCbVAH>y2S{QkxT)fCQemBr{W4uIX3e z-Mq0@vagkcHSe=NYD}Bnd@(&a3o!YNH3*FfFpX$0JlECfzD){rujV>NU1hE~U1&@_ zYD|-+Jj8=NBHk_iEr-#|P0}yQ&u`C#Oo9OJ$Qu@gxNGX4xId~(8Tj@$f8n`oDsKdk zoY&IPs}Bd1+Q7s!mYe4UOTY8SbN;67e_4$wo;RPjCq*$&Dy!QHv4d$N8wHMx9ZG9X>q;N__{HhMcP+!)4L9jg zyp7;C9U!!k$0XrkW7@XmXxhGos0ZXfpBQFXDMAA?l6elQDihjqc^7kD1Fj!JThKLo z&#vC|?9*?gS2yftvK3PxF3~M*>~lnYhH%TK|59@5#z;%ELI?z^DO1`x!YigQNB9n1 zxFg~OV6u2YSGxZC_u)`*4vSBZ61`#&^9Iqkg9-vGAQEOZ@ zatGPDmVt(*I6D)0>uo%NJL-~CngFJA(hVQSVPh?ec$pJ3db}OX33fLEq_F&^1sPGA zM?zo@v(5csT`gDdw2yb}2dEBU1|^2s5hk0B=8VVRVm3y+{q_&zt#=+4zK-_d@+L#S>63MYpHP=)>S_;?dbLFu+FuU#jo7-Mb-w9wsgJSL4d$0IlI%EAg zn5wB~026{tU_xseQNWZLRDD;!CkvC@e%=x>{AMj-RVG!3W4tSXNmJevlfZNnro0HV zWnV-fhSNjz3jhF?4^LIDl;x@Z4t}zoNnw6sPR&Vswf$PDGNBI-me0mF@9P(s(3l>2 z@cy(X^`awtyRDTLopbmFF?5b_L*-hX*T1E@2*L4_%1a3NsTF6AOxnD z(v#2C)|kpUwrs*=T_JN!4*F(lO!m%PT8aQ(Rh#nGq??%QL!S=oQ2>*)|f_xjm^gT5uEGVHdQC?+UVb@j^ zPXd#?55NkMhI}iBZWDUF$B>l0Yr@()x_i=RW57Kx1=%g9iZg3 zOs+T`$2OrK&vU&p9Oax&4e=>7A5C`wWZs7$j8s0SM9rFu9afWGp7e@ZGMI@Hv3tq+L}Ik)5Yo1D^{h2OI*-O;8zxQ^ zHsd4Qj$U^20R=~1GN7FzmyLzIW9*{I)(y^4jXqPapi?0>vY3ox;y6DO4>JPDhbbak|*7m%iqm`?+pw9+*o)LWQ9bu>T3eAC$I95kqYLK$r% zLiyJ8{IfgKzP&EuZ2*|&r4N7T%5?ra&c>6lC2iS!C{3{7wiQ#Sr97Dw95BlxrC^?s z9}nr7r73UAw%5{k?p0$Fm|D<2KbTHG4PfG56K#Cx zg{qjZr#*C>pBb1sLSuTO0H!5sOaPNRHpd{5HZ8R!d5@yf=2_rb7J5<%*phvYSuY6AlOrD#^q6BpV_Fp&Q#R#IF!*%cp#9%}`F@|XFKfNYbt~P!{T;wm zw9WK-mF43>%8=_72WXsbp6VR18k43x7uMce0j4TYqTDueE|IN2o#_9ffa%%^7qmL@ zIxY}lBAOs@=qSL1zW~AvkC6$OF*F&8VIxUcItPua=8?O|)LtEegBx`WkdPTnY*-h` zDg^YJ!8FzEx+J4`1AsJ+;NyJED4lAUd7mLZ!-S|&C>k-_DA*Iuf$5`$7mUm~!i241 zj0RAFU)lIH+RN5AFiEaef%iiA6w0-L#qI7Rf+uuAw znG6=`_6?Y}z#F42V9-P8A!KweiBx{)UztUVOF=6@meJw}^Fjt({J_;$r3=nKg9+Th zbn0nMfF|B~V+T?vb5KTL7y%&1F*UjtV+;-InP2QpU;o+@0F%K{7juqvUG9MdD769H z698+PILc;-r=>eT^PY76d2>@&Qy(4E`8wNFmvH8>aJ@JDlVPF+wmd&pmfhck9%4<) zG2HF<;MMs2vwH~Mx{3Gh39rYCE_!FW^quc!kDGBk9*5KX&Q-K?9uZpG%s;u4O&%9? zsQL=7Q4sSl{>L#l@Eu_8!!wd*xiJFfE=te6@N;&sy_Yr{LE99Vu1V_wrbc)fkrRW4 z254jSiE;R-7BJcO6*^_A2E<%;i2Tf=n{d=5{K9$t^J~Pnhyl|7pnR zZx+vg2zkjGS`jkm(7prgx_n{!=!b8hyD(1|GNv|d?bGJr`?Eunw!OF)`N#wCW_B4T_sBZS)8M&yV|Aw9z??IKN7pee18Ch^ z-Tj!HC(kw~o`+{*&<%ZpPowOX{_RPDpC_GFre672-KYQawG+YI0F7ol7jgO-!;I%d zzfKX2BLI^cl@+k}dT#&|LboO``4aE*{$K+RM1C|SoE6-?w4E(N+`4=opUycLFpVUC4p*lJL2yyq6m0|R|&!^UUuW;v8v z+J|v~SQ=W>`4=xkJtp!3-g~iV6vvLyfqBG-JDmRh?w_QWpWnedbr-iIXr90{NHE2A z;`8Z(zJNulix|SZo`zfFa6Eeo=h*lbeNnMGS<$(5qB2!xg**ATvHX~oAzLok@C--*Oi`o z@fkY!Kg2v+D|3Ip^;;iGXP$l@z@&NFT4>740H8MK>!v4eg9J*5=~vN6A%ex2k$A=k zAo&UMWpGv=Bh?88JcpJ2_okoz^fB5mu}9RY>CQVo$6%29!9xK|3M929KF&e5u#Z@+ z-AG#5KLyW`E z4rKGZGfV(D4NC{!mW7?NVKPtKy;icX%?7FpdriYl0Y2F|$l%6LKliir#M2K~fGM;* z8lcR__sUDQOUyCRc%7YUOq7#0p;W~;^;UXXPWq4zJZ{}`-Z>YhYp?leYNrnmvPhj? zP^;+FJu(X(3>rrJM(#yB&wR}LY9%?t^2bK2XaVh=?KmI37S1Qbw1a&&z{&FRT5Dm; zW((HzJiXyKz*K;-<@XbVX1*=M+nL@LVEWV_TvwDzVk11Tuev@XCjIgLngAf``w6+@ zyc5KY$Ov@o7}-IDXuKF|2Gw%?C|L$D37`-$PJu4oYD=SO5sN$t&`pGGG?4`jn0XY2 z2K3|}w5ibE)Q$kkG0Nad=0VoQ^fN*~a%hC8$D@S&!5)rMqPfvgT?gK9{X~6uWy7;* zD+Fp~@1zxL+R`;2IWJvx%{wqt5Ye2NdhTr6)-pGBA0Wc>KW$DAJ@9PWvjeSx?I0Xg zg!dl8Sy>5GFfznQ#u_O#rll(uvu_Za3BpMGiBi5}`@UF6YZ+qaSm@H+!U9C>#M+0A z;}`P=6<{hEp!!?7N&87)GOYf*x%1MsjNVqSTm={(Vq>%sHZQD8D^?Snb^$6s^&1{^ z<2~Zxx#*TT!OgE7VUFpk^z5?-&?4!OL|f>`8*xIHXzJrcbpr!~Ho`Ssz($GJ15O{g zW*PHteUu?PoVd_Wp`)98%(0bi8{9_QP|hau0Y1s!&WnQu1DV5w9ooOQDgE?`oy05b z#bq7$_g=KzF#^yw&(D0|S}>{}9U({{pE%f-k7lO)O5RDD1g3mn2m~LlkrX=Zovmy# zxHY}>$_vr$n$Va|JN2Ac%<3GIvp6xoL@lf@W1+ZjdUnCpGBIhoaLJ?Pswax_sF5sM zMBKunu9fG!4luRhz4^1BJ;et#w1`WNaaDw)(~sms@_!` z8d1OYXk3k`;H_zosfFaSOaJ__u)b~BHsamSKle1@x(-L%ES$eEo%fE5u%0N)G^YzH zwjw-3`9|QCNnpx?F2D|fJzLw#@`~45sRe6*J2~qzZaz(Z12!8szmi_rOvDw;@evw} zdI(P0+;02GOFAo60h6XY;!9dW1-#z@N_@*xqpTxza5vaHkd~dYJe_+M0~dZ{QTL7+ z#k0)5FWR)AUC@v_rC*SI##7P5Yn(-TLQDIw3)n{p71qXF`Qam2VqW?M?{Hq5Tq6P?{DT`P6O9#h zx3dGBB^H$wcRgc_+*=7Qm>#=P$JwNBUVZwl zt4>SryYkeuZe1&2eS}Vg3oq{8189*up~*epe1b1`mFRHe)lp$31v-o40rn$OsX=li)v$$v6?aTx6GE*)P(9Yidk@kFiLJ? zqG!?Y$Q&Z8V;2z^WmSWn?m7la59lC|KqGhNDtLa5H` zZC|khq;N>EP{ldE;@E2jB92tvAJs*alUK)_E_k(@`mh=dK{|v(jr4I4Xul!nC{q@7 znP#)mNCE96|IFloZsys1Cet#?Tht|v%?94pwA9Z$hvsol=@F;KIg0hdz8H}yXm`sW zL3pL6?Nn8cEK}@5mVag{!Explv&#KX&pd(2{^sbun?2Sv2@71Fi+7a3RM0mxrs$j5 zl&4&9=js3>$}9Yqz!dpop>S*Uo+E_Pg4a9SyXaRs8tGhAi>_Iqm*ut$CclC&YCpmq zwJ+$(t~wAPi8ruyGdAck>Z+z(mb(H@(Ov>j?vs^lqZph=OP&pwoI*C0?Nol}vv1#D zarER94^=%__o@H<33!ANR6$@>^}H&$7!}}3-q6n?PbRY6OI{5|la-s%Nrdc~R9y^9 z#Yd9_4W8-|jOi$*wL!uIHHA~ciq%~ZoW)~=4svCCn2{Jc>ea=IU`C8A0wQI}>ZWEr zC#lxEY>?4aXUAMdc*oevoJBL_q1Mz+1c(bRS)H!`_+@GB zX>4l8LQomA56$b9=epB_5B!1!vik$@oU?L7rRSa?6hfKKpis(DAME9H_L5GH^0{ub z?_u4otul9HU~=_!cGFg9RM9)rCQiV{k(Uh1hlfjG${YH%g%)_M`v~eYC>ZfPv8FbbHriWMucKIHC}>!0Zh=CNmV`5j3PgT z(CJ`{jnhw`pMK{n?;=Y2B21O6?;7s~xHaw3&X{OI;Dw-6U@FW)EKyK2YDRd(c0i8e z7Eim?v9@b$9D&={mUitPPfz`PM|$Mp&FL3E-v($B=^Ssa+?mLVdKYAjGu51z|=I0sm!v*=->8@ay#lK zaoohnT^cEn@;r@4`MyfYe8#i!9}^Y4(nQ3LF+%#dO}#>Bkg|;enr`_aC~3Jc*8nB~ zfTL0?sN`E5lx3TK-rug%sg+`mp`c?falR|45v*uvu0aMAtpFy`=>bm56k3FNQYWYy zT9b|=L1YMR8~Mp}B4_S1vPrFEgofF)DeGi%?cIpWC%D(o(WCGi`A^Z9ct^<_zAu0Y zurX$)=k>}PWuRv#lq=#JzVMgS(G%8fV0zh^Kux*}Cmn$)HvOwfzVf6rs7{#=s?Ss( zSJS!ryzgwLUqVy>g$3`>yNS(7-C3c3(DO_nAMujrPo)Plj-`l1D7u#&5Nz7QCwJ*r9Zo^)0j`7X%FE zWYDgrYD{J3HoBb_fT?#>eTcP0LuuXVo$1sw<|4?B(a~^zaDgoUB(+cET}>Tj@jE&d zBik@PY0roMF-?uJ?fB6{EoskoLh)?vCu%ofL;2eo=^Q!KjX7%*jf(&nX7@+d)vR38 zm9GBqMVPKmWi(=d(1EmL+ky0xN8d~@GNK>qR~u`GDU6MUfJLKFpp_$6>zL;l5EGm( zojDBoVr{hz8(&Wb@^>>YF)o`yP!;BS!zuhG6L&?+<&JCJgma@Vssoe-4@QImt-?o!wln0i z*0Fh?9V#^em(g>CBd`r%w>h{W^LL6O*_vl7%bacI);Ky)zfOiqHdfw(h#WX5+%@(~o z-fIE4KTO%Sh2`gecCC-nMPJF+y~c;~#&EVpn6*+cUiK zrT~-t!^706R*LmI1wE%;=c!sM;MAL@{<6pnxE#cUCWPiW9(+evB0540L}Q?q&5VLl z9fKIH8h(@;wpo@ZnZIK}{M5I^uK-l?s+G#PfNC@fYr#ehw6}-BEOl~0c%FaCZBfcA z>FeB8nDUJLKv_Cj$21!-nXf$1On*H{=#(Z*czQ@0S8fm#J|Xgzd`U&}{k7{f+u)0^ z%=k7ipAVf4j@!y$z~kPIX!lys-Tsr90}HA z7D@nI`ri8bwfH+B0ec ze_~+Bt7r%WGV?P5Okvi@T9fgwpB}h>L;BG}h9v?x zFj@978h1N;J?-r9Ii^@4N-Xgi1X78H@=&YLLHcFc;AHUrP#uQ7t$ipmSI}8 z(3S#kKewY8=XvJ^>v4xapg9D2Y*d@z$Y-5(Xp-b!6W-)j>&d&2itScx*#nluw^~G zRi{wYhA()j9hNVu&d>MK7h+`Z?f&z*II<`T~$Jy*lP>T_{`JF@eKK-rfkc;4GQi;fAAQJ$~YOUBeM_fT7W{4<8)D1V>nY0;2lqJBEY7pp)x)q~hZ7%4+hkXu+T}pyVX)_)vXXG<$_y^3xBq7opzmIU#T+|u(s(!_EtJAt-<@Rg(wq7G;d^nFl!lh9@>F2 z_-SgzvCmAzCi#2Qj@NPh?;)Tjgi>HAEJD6T zogjkkKRpQ?+K*}_ZqH9h6K^QKIo5%rfvxJd?%ta2{fF_{Yvq3M;1APJo_qp+ zX(3G1%5>W&Z%a*B3L5EC22_UGAPJ^p2SxPB(B!Qp=i*ajm?IC=DxVto^Z`*>iu_#- zrsc80dJPWzci;X-I&kDLrpFPyK~GJe{MgND7|*pvEEx?U6e**G=aXr~ag9_8yvW~n zFYfW~auR;}W0u?H5Q0+qynyo%n=3x`(&KnsV!`3P0b0roi^0EGBo5vDRRVS^14Y~3 zH^*RGxmNshaA^Q*2Vea?eQE8=)6#9Xe2O@a?P-KHTczo_x_rJj1}fDji{o-IQsYvT zHwK+*V!`;5j#5)E0g$4o3WVpXPCMVY=WF~@YlC88#o;>u%G{|Vd@>Vcto!W1)EW*n zWeb?ki6rko*=J59-{ap?cm9X2nb6MdM5o_|-LY*2sSv_Qkb0s)G_i?UYhx!7s#)8^ zv1+f4Iu`3>AZm`2!MU>*(wpa0yKL%^xg*?Dw5ZzMYP;Lv_t?Z=p} z8%`bb4l#F!%PvCvz~R>P{ri5w_7$(-&_SCu&1cRBuPp$k38#iSp0)Er0#XL130&)2 zTSn5k?4JA4>n}|2yJBrxx|sd0TA5>Lao|Abh(QM(NP(o##C&72!|O(1cgX8m@GP<| z8Nl?T9SsdFW?rBJ4Hfh0>x4HtHjn$bmyTJ2lD{=!&P;Y!<_uzfd8l+4_wB!!JmBejxS zSX;{KRQ__rUn|j4Y2M;F>4ArSh?B@;VftMGFx`Ih?En+Q0P@Vh1ih)uxD`Smh34&{ z|KeOe4v?==Zm$ZqwUT+{nj)g3L2N&c$-hq%7y|6;cmFM$X&#KI@@vULNSv?&uEZ)a&oqEbP^s(wB-~=$qs71aNV0!rpEFW`u zr}Mj6(=Fd2AC=%BFge)Mf)T(34db7KojA_)i@_fC=|9$&R
ZvEsPsjZ21RJ}|y z1OQi;%x6etU8i26e?pgn4AU;f&(NA2cosB|dwqr!bBL6y0mrLdd$y;$zx8znlC%@& z{#63gdG9CkgX<(1B?T}+Sj`7^S|b41xD7ZuG!sXyg*gis+WKi>M5x>F2!RG00W(c- zMhF~k9m_u^&J3Rs&wNHe{9-xQ`PES2YJ$lY%VCqI=`bD^ek1|Ix%*4O#uG^(`vFGVA zr=Xc#W18$dGE5~w66ZTgbKZ$+Oqqvk30`?vV&^whYfQt8*5*R(9HTtWLbvDZ4=T7<_WJQ8JL&@e!F zCxvAb^A52(-b&cv`8cW>#njt3Ko}#oRPXEQN?W%Mr0uV>`Fi*K5KsddzYP_*XPw!O~C^KalyG#lo`uDR+X>1?*ohN1ze z$Ts+uwqfw%yMpcG{gZ!eXD8ujCcONNC}1797y{m#IFWzXZgW-%r8x_`()~YtAU*a( z2BwwER;D{|xs!$L06mWhPI@{9FhNiIW6b9i0x^5xdA_K7$wbwK{Y_BkzxunsPJ0jTr;fyJT(`agOa{MgVt~ZkeK}@Zo}UpIZ(6l{O}gc#+cBflcNng@<+_U^Rykn6m+sxX7|cFo+&S9QtNM&u-1iN z>$$r3-|%JTI}tiecuFV*dDyk#wlFL4ZQWzFH%H?-2_WR#*>tWGbBWGOIu*3G>1IlY zcj6RgX$te8l^O+X*!zf&>zSNRaBByJkzQhkyNN}ph>8(B{`v>A*Idt0-K4R-V%8Fj z41xv)Gv?Th-&7q8ZBG zP@XKljT!ix0H#sq8WwaeNniNclsXt34C0PXH)mg^ z)^zq+OVj(`e@43KqElE{%tBu_T*Tux1|h&Zg8&}J>(MnphH}Z{X9gnCLHtAhM9~-+ z&{<|76V@GR>zjRP+Z)*NkK(^irwY0k9Vxd#JX}zD@=FiGI)J!Va*|u&R0_z*H3T;q z)8@^c?999M)%5kh`>PN@MqR${gV(2X)}NP};3s3{!Gh3Ho~JJ66lI$FHid`+uvV&b zCA9#;bD>o#XlCJ^^7#=qvYg9;?eG2I{`BL=f6U;l1%bFK-Ffq!SRyD_VJZ`pCa9m^ zR!e&4y7J1XXa07u#D$t^wz6bTSMs*JO0@W&kBv+*4E_H6>t9WK_wNf}I_|<0BrsWk(3q-#>E@em!!m@cI3bB@0h2s8 zn+9FsN9#8&mG$?uyrLYjd?mz%MTe#on0D>mp1yg{*RejJ^+~7craXZu`cdRoyh&nO z=GoUV{nEfROFI=W{*V!=71m6OP z8I;QT4Jn!D24+&+oTv^No0buVT-XaemfU{YzWxvH6ggL8+I8k3=ASeFIU8^qNeesy@5)78Mv zxl7ae=Pya8t?y(}FM<=##)XU2zyx@-FHErYn7>?bIl^}HZtY>%{$>Uoqcq|KaR`U> z;vAfZ=G&Csc&$6_+JUDcb50>#ZM*m=(5PD2_2RJV4;oVlfqYNZg*Yrn%H|h>i7a#8 z)*Vw2VWnQ(x;cIIum6Gzj3cS*bsxAsoqgK5%#o^nLRV!vDvH{O<|$>sjKs60;G2F; zkayUo&?yH#A?(Q~hu50^1pE!C3rzPt@cs1YV~;{J=(}Q7`qZs=rdEW0ImaX*F`^1n zo`(^kLYfuzEbAO}@MMh?Bl8;PE5Kww@_zwP@r?z;=nsGTSAU*%vmcWh)9LHaNVj2X z9A>xVRsg4p4Lbl~wUR^3=uMceJwLJq8c)F57Don1WxBA%aui(Y~xPed6{o z<<&6{nPtSZ0v7a|b~b(<511^A<@95nvTW5VCDpVV@IF|*ftZ!0DNhq~O<=OU|E13{ z-S(;f6FNZ}84Vx`>Oki31mkxsI-S5=$3ob~20h>g)4VwiY3-W%Y4s{L#_Po6i^ZSr zC|k#M13M#~7!k0h0o95XMDY0L*kbm3Q^*k#S3{C-M!Jn*0-8 zCGwv3nCc0s5U@?$m3Ls|Ae#f;Lr9n&w3_yG zz3d|(uH7R?um9D>?Z;Tdo{V2mF6y;OIi>Fb*wiVVcpyC zUs%HPsKjmcCB;rOOlQNBVjPcLIB zq^FYwy%rV{U98(*XA-HCiIi};$H3Usibm||==5uBH z391>K=t`8psSnHM9MjI-XiS*$)Rg|!0@E$G{Q><;CY}D_RKYpjeFs7#YHb~73kG&7 zJ#9fc^;GtfXM%ia=paH8jmn4(j6MYdKXvR_8v8lf@(@_(2zofzG{3Po<jM7*i8r zYxeEwCob0^wj*a=XwYyYU9gIg0s>_wh{m|#F@5~f;6`ghZ)tF;aBjBdW91~6(YI%n6v9olr zZekE1G^9$b{&(Ib-akFJ;&&lbLaoOpuFL)~z|_UAvUh#yw^Ij;|J0ybnB!A3iI7k{ zTHg#DGhn#q@Bca-IJ6Vs!E;X9GU|4LGB8Zt8}W=|8-r0|38GQziaLTR*0pDKnBg3J zxOPmFnNnE+2y{Vv+Kwy0nC?Qe4TxT zo@YVhP+Gm3U2zvKV&u6Njd~@KD0L*N#|ID?X1qa< z^Z3lc(xvI$8>c^0d3ctCOt)Ds{@PC0-Fn;aGdQD?QET60g-ZhGZ?{@jBW`XUV!_%l zK_}bOn$_)8*swdSQv#G4h=V5~9hIo{)GLIHR9^h{9CDCf_B4ejf)ta_*l081H%8Ne z1IN;Vy>;o}9{2uXQkC#Sh!+}DYRWx~(wQrXWAm^FF@bMc$0?ty==<4z{6%6+>6PW~ zk5hq(TPaQSi(rPbj-H=P(QI+kY==Ob4b;faxoD{Z0gw zbbX7CCnzb9Tj7nU>=6+{kXa8#io8k3aql zk*N1$Hfbg9+v0RSYhKPf_aYWA>xRoah`c_G$Jsb#vt-qvByF>B@;p}ydib4zDW9kS zPjQ1yln=}2ypLnrSZA~r=9o4COn>s#KZQS?qijkyT>J5KZp<+OR8(AGLS+oT4Ic;# za}ufIpa31SAcoru-pLLMGc%izmc(AX=eyrdKl#~{8JL!=NT0g{V8Y3zbbbh6g7+a? z(4o|rrsr4+yK4GWA5XtOepANa^Jy17Hx4}lnEv8_rk(rLn8wnXXPgyYgA)k63NSGs zs|T3e$4clZ=RC5|m7k;u^bnZhW%+kL%oKcF*T+=m4h21g4)H zADChe6kuXtXg_=_U=ptkQ8kRmVF1&e0F!e}uGtDXF^x$4Ea^M*%Is%yJE<|{`U<{& z^mKJVDKN>4Zv44(_qGC`Bw-`x83%?156Ml_wq@~mW=`->*?5f(Y3S;08E2~ z>90>KRx}Y(UK1MXC!MWJf%IG{05sz@}3_Ux#!bjfBz zJ$&l9*ZGYR7AsE>(y61PA)UH*F+pZoGQfI|quqUIyZczsdyMCRL)!F`i!Mp;z3fUl zIIHe)Ua4n*qQG-k+58H%rQAtF=*SbK@CN1evZAH1GiQMGhyU|Muazl;L~$)Tzug>Q zYVODy)1Q9zPeOp|JaglG!+aekQc+>c4{&>=*Dz?e#<@uZTj+(LN;luCd)UM}>N zwyu`+&F_ATMU(&&USlf(CN-uG=5Dg{L$0U57a9{knfj@jN;6}wJGWRTKi2}}iF zaV`Up{V@YmfD`YG^Ing_=4a*A(3nDCc+WU2t*U1Q4Dk=e^P<4SahGXEJ@f2}9#ZoBRG z3Si0vWaW}{7^8C69W~%^5zEdIpjlgD z^j!iInuDf1_hTx(@&qQGE;Q4~)R9W&k1E=TGgC0HoHIRfO2-3*zL#e;<-Hvm6C=~k z4uI)P{}!(~R<5I%hAA(AiT5kt8CeZ+;qJfxIvUgNFy+DJXaUZlq4x7nkaEK`%^U7< zqjQ3;xC(+iN1!=|%m5y{KsgrSDo^VUK!@>F+%pC(M=_5!;{aiVa_6Iv>bwi@EbbEs z?HFUU^(E}b^xk*B50eOGcf&Q}5IT`t4^w0fI7-LeJ_66!`23gzCa+Kmu2~X=LB7>* zf5*?_Tmf*!edb}6nmd}(1~sO?_~W31!HGY1-6zudXI>D%q-hg~F6q8<;HdY$j5#)k@aS64+LG+Qj;x1;IC( z)wLpJQyy}&(3o6cD^tz}OcOyo>r}ik-XX6rD4)fYm$mM=u~so>Ip_T+`^+p^oy2K? z>G$FPvUB_7^nx&#!Gw5YGb4&c4Y4oKBEk#Jn>Pw5rSRg?!9mR?$B%>1WYK7}0hnB1 zQfnIK6B-lOY=}N=x`Rah4Xp`qlCj5z=VQuiOB-L_o%ZhHo$M*pP~QQw2?Q{TpoE86 zc8E}Ga*?h5q(6XZaG*E6{K|9TF76!D5OLx{=muwSBj5auXC_Ai=*G1 z#w0K;0GNI^b+!;KVQ?aVNzZA8-?(#jnDPes_s#DXz=VrDT%Lgm+q^=`@(vJ?QX$gb zriQ~jPnhy(0R&z+nsASRWQ{fD#fM`%Uue>FAl-u}$YUIVWx&{a=lWbjoi5IX*@?D7V7I4;TEOfAgKSd4Eq6 zy2bAdT6V~M506l9;-8^0z4FF}^v8cb2~0PA^pgP7g&CNr_pb^}d4Vma7zK>H<%eTa zDTL!k)gMiH0@F8{W6GvHfob_PU~;|zLGM7%dQvy%&1$966D;Te=|C30;Jw_GX)=2m z%Ih=oKRyS$^(r8DAbrjz><;tQ))>iKtYbs)_qUV z#uLTyC}qvSR9=fKnQq+Xk`h6!1UBFEqsJedRb#T^Mp_SNAAY#DQJ3*9fvFsq%HQy9 z091Y*FP9UmG^Q}+9dC|llD=kfqRIM8_Y&0n9kgSVDL7S3c~}*8?!lCI_t$8D&7^N- z%F8sRR20R}TWCxtTHwlk=3l;}a`M-HPtZYkb z*R?PiZW#Ptf{YS;hJ~AXO_H(r_@<>MIIPfXk~N^ug;?H5$+hZuSF155+Vos8>k z#sXdi+yr|F^>jBA^z%U4wGDwv@Ys6duDPpj1(af*$p&D!8(wNQa6e85uALE>p2cj1 zt1`eeNUW(aQALNKPJ9Q6kbzkQ_|t-B*uOfCGgPgpedglz@Ld zox<_+1j3=24)2&?6z@A1)HY%Ma^2e)9ZD_O1;C6y(m)^EW}y9Ea`8LU``&W}z{ETw z7n*s8ldl0Xbe_^wDMjzbPki*|bm3VSG2mg&i8@OI&CgaYhtUDxb+yBs%S7=PmG3n0U5WrNJ@)&5=p^|EMO0%58+h-kIJ@nXv>FJk#TGNzgy_?`YO?cKm z=9qAPaBFonqj)uZk*x#fNb+X@Q}rCv7x2o%ISY$UEmNMEXFfKA6HhMikHH?iXggWg zFy}FeXg@N@LCD(DE6-M?+ zK|zTKzHSW;F!^!L(nXjUg6IfpXP5ir^560SK&VJY!xO3tG-0e0iK zOS_KAMZB+UcpevA=e7pJBQ0x8s_)!h)j?zu&s9>~Kz5#p9u!CUcX5-?*zq(yX(m&6 zxGz)o%A=z)3h&UAM~5(6iXrTCugD#>@~+AO@5rx-e4X%1dc*9b*t zQ5&KVx&0}aqsoHL(-1j z%qy>NNdNuM{s_LL)XXv6eElZ@CNw7MATWhp-_aAi8B~aNw;)qfA;zYjlrkak{N@e? zO$ktOJcYX|&rSeLZLG2S=C{9<9)I#D3@BRDN=$iQ_|#|d%F~o*aimWkENIHJoSG+1 zQQv%+=>;8fy`@big!g!8d1|Fufr)2~s@=ynD}SQ%L#Z*HerEdgZJ$bws5o;77EG6e zPctyNZ&&%A=wAgbB&ELtm?(tD@L~Oqs-y znHipNl1CgMRR&bni4V9--N=S4HPO3{Y3H8p=^JWH$~Fh|%1}Oz(LzI(Hvlv1lRsrK zJV!qSRQ1YRvScN5!t+uGj#SE)Ml*RPG8_v*N2zT0CCgm(yFuB;EyN(bGz6Eoi#2EED{`Zkxi1|( zX!HU!2Gw1>!(ucaZJ~D;S`^~MQrD+rjM`y#B=_dmH*&AGzcC_Q6Dgawhxv<6n!jZT z7=TGeiWW)2JD9S~@}NplZP|0Dk89q;=sGkO9(0bz-ai6o8OSx>OiOd|uM2!MG2)4zwQ*ntz*)g~4hk1`=^2OCGDG436~)mW(c9kj&f z)6(V30Ak(a8G(_H9l$b%^8=mRV3uLZ!^B}n5ABa~Ilp3(2G+-pXY!t)L7Y=CVt!@A zIKfSGUI+_m3(*9+I(nBLWv+9%03_(Zz!%yXIX5~z45 zAA{e@^oJX6k>Oufdtlq8g~6r6s?N19B%x$?`1T5a<>0+=2p@@;v2o@PmtCgd>v1oXWJb-u}q%_SU$>-wGX}6V)TlEJ;+eMqQP4 zg9yXJeaBds{8#J%yc6DL53u!Tq%Yk0*@#RLuJ++EW2_K>#|a!iMSj*l(}c`8)Qs<#M}m($wpPw*(ys&^dD{C0o$vqXzV!1~)R+XOsq2``S9kHs91}br zrn~}}f)dfWc>~`;zc`dsU^)JoQUm?@1BtQXSX z0o9H@JJR2N^RJ3)naMiX)4%?MapPinhu@r2BlO+mH5Hg#Zy~e4 zoxn5~VEPXX1l;JX04DeALD;y7UmfdV7{UTf-)3uvYG8s6(0B$g;r*30rUIDgDAq+_ zlGpuX%_WAvrI+939ENS!M~tXu1iYp_>7k(^1PGxJ{7T7Z)m^M| zdmIEX;n8^ackW4#JoaN~128RHneO`B7gHBrj?Uvu1tvk;BGPX>AZ0p4ZFzqb(34Uw z?v)ZjrxxKF3fn8$t7q0vVW; z858guL?TQ3m|p^BE}z@UNWc7nui%kP&wSl-ScmAJ9NSh?fXQI2JTK^c|NjG+UOpZ$ z%`(RXO(99V$+phN`KO@dRwKHLt1*4{Q(wfJRI@pKDC*#IzhdIu_MciwI%#&)gzB7P zGnOBL>2GEQCNiU~rIVDK{KN_VU7P9?5y>@;GFmbsH~wC%LLW%0mNusg z&s&(zxtOp(t!%A;u%BSj>bM^!NnLMXEH+JZ?)cco#&%6@Zfk*%0!&0`fk|7w(UB!$ z9s$6}4j^a<%}J{3LyQ`RS(JO^Xk&VF^LW~_nTQaFiD1B5Crx(aI5@cXPlMa9I|l+l z0c!w=W|;h*5z4oH$D53@`YDrhOoSt+9mA!SmULm7^DZttwPEQa)S>7oFi7t3KH{-= zSmwAh@(C^DHwH}e(r{$J1Oln;^L->rqD`CN!pMV45Q^#rO;ATOqY_`i~&PP>xcQI3H%rI4m@#;zgNm zwwEWM%W;8eW?kj<0p=Eu^r|rdOuGOk`tn%?Fc~sQk3j{w+~F`WW`Lj_M2b>mdinYo zG9XX9W?zF>c)=t+Eqgo?^p%tHKdMOEIGD^D6FXpYqZ-pcs4+#XyjmJll_`&G%Q>c4 z#{};M8%Y=Yb`L#c-~ z00WHX-SlUIT~bGwUvl58akM6dpPiX4lofg zRObilXs1_W3Nul3RGE=$0)6$WNs3&a08#ICFz-m>JcW9{%y8 zSOUQCenc~qU` zP*YMrX7IykOn><`O?d(n?&1K`U7x{?8o_4yKqFfsK*6cu`G!vM7_pDxdrN$J*vrl_Bi845dX{7J> z_w=W^%*O~!^Sb6SRpFpF-}ZL=4PLF0k-VC{mf0?b0yTxk^uHD`MO)P<``da7VwXH* zzIAur_Pc14ZD>mn2qvSy(-&CnUw1UETh#(EElDd^>ZPT|BxgDDMabAE`Dh#MM{pkL zVMo~`BZ+NI006c@*6WUmZ-cMu;nl{X*EuXK3~*{kyCQa*#|D0l;G&Ee`Ug7G_HA`( z(}r#q+2Ue8?A}a7DreGb&fGfgXXQCTM&~ovHK(0jYXK+ZO>*Mj5%eG-`@4@F4ME)7 z%N!NpG{lC01C*twXMo@RfE~VUc$f7YL+>C6X@HeZ1!#E2@{10qpyADVl9Z$9;Lrvd zl|ty(3QWezYnxvHQ)$YZ1STg$oh6?@dkkQD`MGpT6)=Uygy1QGNrwu<|A)qeySS1g zz&<)Z6(vPq<(d5LEmAY#=e;qiJCIdq33KMSuP!`(B=Afly ztV#qt0hlo5eH&nU?9r?-EuRidKETtVF$qu{GQhKrafK84Fuyxa`b0s4cs8V}C^D@8 z-e=xLUgfiXt5x*k@N%TD7hpnT+Pyox;m*SO;jYhqF|`}UiouaKc=1;t8)u*-h-4bk zZ-QQ2kS^ruF1lXJG~#q|94{;SneP_-$vK{xZpsUdiH@$u^z=(cey9aZ4p8#~TdP)a ze$ZWV8Z<@b;t{7Ugc(;au*1u$WLqg#$LI&N)s4uNnw>}x*5zT4Qf=(vs2yMh-NX>nHeO>9bSI5%Ejr)ltfT4g0*#?Gf>6ic< zJBiPh(gHwf;oN4{Q#9a?tZ9q;Flo6Llmb`=Z{bBo)$T`=6HDFYb_6q6ADia&^%8WH z{f>?t90sgL(!oPL?6TU&&bC7UDN%`G;z7LPCSVvrDw@6EPqnksZ1bA{rr-I>OpQr_ ztm}yLOXJM>cn0j5U)CY>Kv15983{FhQ!p)pnFm`aUl3NU551wF{wcb6$U zk>p>J%lYCgyvjSuYeD^~C;27N^kOkN!dB60Ogne)3ZW@5eOX}AB7z3w7V8leJAfTv zqK{{Ou{?grsQ6asmS8kRV}e(r==`w;wo!cZJ8MjE4)vd+F#$|Z%>qo)W4gu^A&CU0 zQe*NR0+W1X%W$Z-rY5c)wpQ<8&EV%hb5~l-{7eR>865Z%ER35m$ni*XZw_a&#KQ&-|y zRmU6;45ytl*cua@a~%TH1OZ0-2OHDoH`ov96~faT)s;DGVD>E+JZdp%0L&VAt_kn9 zxoA%d=Xc^U!UlvmE(l-){jF)=zQ(k7Z+|+%MuEmxGm3Ug+Zfv~bP(0NE-jqX9G^L? zpK4~#$?X?Ze#%OZYyOL2aAm_;JsQ!*1u9(Xv-Nj{^YQ~AjD}SPc z@GC})l-JQ`>6Mp(39|xV*-ZSdY|2A3WQ|q}+U7W#?RW0GJ8jtbLYlvL;r~zEd%$US zT?L-^tDI9;=Ond)t(>i33)^J;8-fEiwsu(KBu5Mkco^7WVF%3E14EeMw>x+ij1`=5 zzzN&3Yzf)QId-enId|op_y0fly|1dPtAxdB*<1B}?|m=aaPGM$-E+^S9%{DQ=OK#x z#lVh`M1;Xb_u29M^7jDw6r=*hbDXoS_5l8pQkukb1$@{KLlbl3(2q87V zs0(-Em}9#7<&=l59^zfc98(2Coo6NhlT?#Wl*yM&a_P0Q?7%ah%8$qMzG`yaN6rQ! zQ(P?XQ7FW}9aG-Z>7#$~r%Z3*;YAs5dEGCjt1rGB?=pkB<}!81wiQ^|a(#04OTM#4 zpgB~Rcs0$^<+pd^rnLXip}1}g!e(5*C5Fa$1Kp$BspTp8&cJm#*Y>1aK7DKY$8UcN z+-`{lwi5o1#yrOqjETm?92oF+3}qI$q*~QnGj0ikMK}l~uZyes{?a#BJI{=X&|o7& z>Ekzj0>(tFJaGG>^Dap5efy26f^Zjg1J`9TSn7kA@+->842X*q8O({R4htE3X=#Wf zzd@&9+#bat|B74?3N!91Xu7q#9c_!g@x8C5yB_==V~n|MoNzKzF8d$G#PSTsop5lW zZOND*Es?U;RI*dlhE#oZ}Dw zO_f$mv@~EOQ&MAsOQwB?_TrWIcLr9WjnD*Xj=~>##QY+e2`eGD_=yVvSEnj^de(&l zL|bbo!4TGj<_gwFd-9V%4%h#rJw&7fmu+dSV0}lGnprL~yOl~Z;V1Uuo${?Lg4?l7+9VS{h zOIy<{Yh5JfbwIQZLRbfbQr-_U9x7{R8|7i*tAIJPFcf#Not||q6FW+x)o3-SQ5$0S z+n$~}HYQBifooG2AJ)^d=oH4J1O%a!pf{1{^z>w`V{*>PJy}F*r6O&~dQtIM5M(^u zA+)Y6hg`D7#_-5IHuYiRz76Bq*O&I~>xV&(FyEwPj#rtTN_(^In>;B~iV=<*mHu)&g*+$3BYAN7CX#pO;qGX;g zOwgyjM;lgXD*Z`FW=wf~Lxbt>Zn=r|aYwKoOc9j#!t{Z6y_c=kv8z}O#}FBW^8@h9 z+EUKZVX&w%aZX)I4_cXkP7T3Ocuqmrw}PQ@X4Z}a>*1WB%s%FO=>yX@3F3O^{kI1$ zyI+$@Mwi>4-;tTwe*4Su-+{&i>gM(qf>b#m#br&6I&;in;i+M6{B6JduC$?d1C7J~ z7kn?5UZ{;mJ*i)SI7 z%6wz;xNv@GQDcG|N31;Bp?9K_)|yaPN)lv~16?8^0Wi!<$(YRF!4!;1k(4rP802b9 zS6_A&jOlv%L5)eqM90Bh?agw4dMwYwEJ|7Vnx*D@d;>erg@18BZd-o{HrFE$@5TGx zq+Zljjp>n{Po|Il`G2iyOgFs#jc81lF^OL=CSe6EguM|LR$@#taLWj(l~VNdG``(| zg9p;rZvT$dFumfsSETdLJCEIZMW?&83!>MkA~m>HAf?D-f|&b@$-fDmrcee}x;sU)z=Z^;lw7~g#(gY5k4!tKnM z)R^A?&i4?r5lv9034Hkv&#>7*ZkZDoXsCxg#-c}x-#qU-jFHfod?*NLu0b#(PN6P> zDvnjg1pbwH@=app-Fg3aLi=x&AdoEA`)27JlMKn<3QbizE%-!X39FEo|o>Gbz<-Al&gjy3xZ?SnCWJjVLz z>zIHmbrDuMc`VO?UBLB4WK5@KU#-6I@Uv1{4Zl`;wt6klWw9NVPkr^*7y2=0?b_Ry z_8vrQnr(#`_TZJ5ApT?NOthsdE?&oiPdkTU%o?VJxyfKm22DgTWIl&MUC%x(wH;~y z!2!I)_NM*J+ZgSxWK0q?rCjG}njw%4NP^AG1vNNV00P8Xh{_ns&jj-U9jMPR3l@D$ zK@6wRg7hx)*^ZW0%z#ZW&3YJ-a|f(h!8U9VSv~(o*#FZd|hypYI zq?0lx2QEYj6PJz;(*zuyn{ywSX0~w{XG@1;FsQ>gWb~!MAr=(F#K7Yi3{}P?Q^hl^PSYnY4%y{xXF}7)L$)f-$Kv(b=|Jckw_< z8IymCW0a}8xaO#9E`M2i<+ZOY7!$a`CVkpbjZz+_2>ZYev7Dlw@uT=%-cx7u&lHby zTD+DS6Xi3k{fg^8>AS+bvAr!l{EUq0Pa&Y1EbCIqm>PjmsWC}dgE1j3uQb_heqr(y zPwC$Yg5q`{eC*!Cb{`KufVH4D4fOY?OD?*Y4fW2!<+zj08vC#WV7?Tr5ac)tBYw{^ z5!bV6ZD=H4{l?dsUu|P9cXN8zue>9*v3^EV8P;iAVkhwdezY^|ZC*b;Wta%l+G^RUXOfAp=JxpzH)|f0=89~Oh zXv%YeZAV8}u9Lab5%CutqL8cwMJtf8@>o)VRpQ3Cy$8z-QSmYD#hN{6l!A`5LOigo z`4~n1&|j;EF;eIuS7_rb0^S02Y~Mjld1_2_|7whBHFt60uf+IbX-q4P2^$UQfxq9U zr|9p~FF*g4qHoHFDxdtuTNmc=wmEQgB0bF(*h2$&%Qf`iSkS;{47GM4U3STO7M~>; z0I$uDLDWYX=sGjFX9`jzTN_l^0=)^dlV-ukp4gq9!6Y_1hkGdiS0eV~9yk&vhB!>7 zj={@rm`07D^OZ2`>V^o>B4cUiL-1D2Y`S^-IeN68@HUjsK21(!x3{*WuI?sORU#T- zYHNZiwXwi=baa6GhDuU`l5v4}&TY}jb#%IBn5YM9qtpf*T2g}rJ_tu3w#aI`+Sz?| z0mgJBJ@~-0X_s4;&tRfKq|xgwOkE(huAYWn@wlSK1g2zDM+x%9qeQpt+fx~nUU``E zZv0@?l;<3iiV0Cfcd@)#d1Z~MH09w41N`krrMx`U1zgsc+E}}E-4$1-SHJw#^nscvZ6q3m@s#R7z7m-| zEcT7R#EBE>tKa)hss+(TM@GX9e+Nxu* zpz~8kl&E0`-U^ON6DB{Xg#v&KSEO+e zjh|x5Bdk$jhA5d$5S2oLtYQTx4kYVVZf~F#;sDiDSUNw%xDH7N;vi$9pIl&j-Idp- zAHDw7z@IpO5-3b(6&O=}*ve!qWE}s+d6bvGg0*ljG%McCf8MiB`Qy3TcmC}~8D4SU zo4~TJwK+ZX)Z>{k@w(e6l#D5x@{m+y;yfXkd9iL2$J=a})4UFk;Zu)ro%GWbv+O~O1 zdJoP8M9K?|Nv)~6z!rFiDLP0t4OubK2$FCOI0Ue~@H~u(wk%J8I}Na1%nb!T!0%MAL##6wHdK#E`0&m?8)(*b! z#t$&=@h(NcI*en>@e{{G5L>ryJ%=&kfthLzxo$UC5=&JWHpOG{qQOGPi~cORc`*qYZvPA z#2cR?6vn~n^vsh(XhdzPg?SeYX{n=iHl2TV8!oNxPRh{1ZG3uaAP9|<;I%bwX;6nG zNe9!J682~J?n{rLef5uJO(}aaDfwaAgWx%MWhza~Gqobcx8k(W43lbUo`R8#(Gm2H zA_SCA(^PmT&Ee9{_T0n6wdo{G>gZwCIvwu|Ce@6FSC2P~+RvdwdzhFW6g45F6Clz& z`PQ*_RCDUV3$VMpoh>Dr(^@73-GaRtM+&|E>YMm9VrOQ}RCuhRH96Nc*f*D+WUlG4 z$M&UN`-Tabg#B9NmoY)u+{(kb9S5D)PBZ0+R1&9El7z%`8e?KVruW{6^8*WPom*;D zV}dZaJ-Vj6W_D=Biq6*R*_7AW(-p-mYD`wITwv=)fmzB_lC(?*et1iTRDF8=!t}s% zGA6y9WSWbNDQiqIQg}vbr(*jNBHV@bx~r~DKlUR(iWUoF0zMTTMXHPmFa`CLh~~yu zj{<^gL0~!6#8UFi{EiIB`{f){`F_0S+XX|*jOoEAA4?zkvp;d1hO~Rb>)x2ITB0!p ztjNpZ;M~qiW(&*H3@szF{b~?R2vgnN-Rb_Po=P|2_PuWHx^w~aFwWmS_2d(*gXvDM z`}tppes_IWEI?I@m3V`vy_?pjzx~9=(?5Lut6|F9wq-@D z7tTXVh)*>hF@1(Wskm=akDfCo7+SmG>5p;@V0{+_ZXFl0##H z=%O(($Mi`U6P6yCQcMSMR9KT2C^Tk~!}gS)oO|#vGF`LW`GFT~=L<{flo>`Vy|-GM zqRv+91*D9vi!W6^@wH!Ca0;Ngxd(IERC@ZUfpp~XWNPhL7esbqd^nxiJ(JEQ3dET^ zda$pfyw42*4<^C^(D;M|on0ac3&a@=ZkDx7ed&?s`qGiX?A4;_$8ER6Z5u*`{LCB{ z*HBxVFbQ_xw9vbz9oKOP7@Ikaje?NiquVLCeX~U0IRah3eS3kqr^)e!^xSiY(lgKW zrJ<3|=&VLO{Pym7F4o()xuAqX^UDJB8}8>s6+(k*!a1Tz(<77e&3IHasa4@j(ZKfM zbq%_wV{U=aZ{4~L?VufEvp?N;?^9{--T@E;w{74zK{N?;1lS+u9p+vPdn99OV{St? zRf(8uZ8EYh0(@y-g``gX99RxKr9GLR9zt&O{D#-QF}5im=^ut@q2W1T(LATz(HAXH zpO2eAeN%e)v4_%zEt|q!+yXT->P(_bt#)=L&sGMQ&JQ|em`>4{mR4%Xc@Q@G%62%v zWIp9Orscydh&uIi75$I@=uf2UuDLDIa zm&7OTb1$1MVH#@8h2$m-=P5_V)zaCKzJLF{>EpNF6pOAUFu(NLH>4|JOy-$2GMN$| z-fM>WT2d~}YA%**?`hJMFqyDNB+YxE)SFn$$Qf|?e6Xy94<-?~Sh9{o^=^$@<*BXtYBmvJ&DDxTVydL3k7 z%nCA^L%RN39Hpc2kND4%8xl&=#V74|HMG*!L9nLE3SbyHK*>@HAyEHap_gB`Vn3xxNZMQ7+pK# zj5^yua@BN3wsCH_Cd+vtX%c6XIyO1gBszqfephb~fv9(;zxmt0hOTBU-TthUjENFt zOkhA!OhB6Rs03GWeFt_MHf+U-ZB3Zd*R3PK^Y|22Dqx%+TeXUT_XU3Vknmk8Md>`Z zQYvwGrF*Mg6NVX%`H9M>zV*M-#z~N%Jq-?|^z5!-2ry9|*r2Zs$nUl~!y0MQ3V*&|ye;f})PzO9EIt7eLW{??^ zNY4XHA-<$%Vt@jh7+^e8GNx(Fp`D#Q=}m8VbGni!3gZJvX|&IHbgtLZ^?QzX3?Vdp z?#o0Mf9kQ+!MsOVjD%Vyq8E{72N|?$KlCP3l5{}HY%)QjF)Jl;j;fm^dbTI85o1me zV0qiV%!$Dw6|w>xue;{u={zRP?GH7rU>?38lo(XDH|B^qdhQ=5k!Kt^H`Q@8V5liS z7biXvb&Gtcn@7tI2NVWyBY0I0PCddL)0e*Xm2~vj(X^&#P5RlNdJP)|o|m;diMtt7 zN423jwtA3onU)SLpONs(5W=LwTOqlkMIDBi?b&|_6SAhg8WwDiA?RQ=fcd+r;xWQw z?b*E(e5(+#q&00J0{r$f&P6L1G7>iF%m zv0e~5A-*YCX^pTy3^8`^!#_y(Kl%gUowa}K;JxS%@{loU#Su1+ecPyJJ5686khCP1 zjLAAp!x+ak%@TrW`}Q4}V|!9JT2l+_?V#2gXb~X}t z6ikBOVyeGhNYZ>)0|OE^GSFIZ_nBQ!r>}keE1~^mjfuWr#+Wj2Ey?oAkH4Ww(2orp z3&w9TL2Z(pyF?CadqE8JY|639}Te|@C#`P<(Po{iE@ z{_OxkZGacx7CXnmAK|?vmRO1#F3osTx zgToW)seNPV;0ZdGcy&rWE>hEcr*Nx>auWolnQP6w+e~DD7NqLw@!E9ah#q)sg@GpK z9Mb}^yL1=V)YjA6M12~gyaskhtwZY>o2X@ymNTqlY?&7wL#P;*7F!e0`picYo~h9;Cj0C=&2(z1koW*jv)RSEuB4v z7?=}qCpD%?%vU2W#uE_aof(tckmm_gJ1Ha1={>S4V}j|`1CQ&jy*{0N#@T6ta4EIG z!Z`~WlYKHUGC)i-7BsVlsc+yUCLtIP?J_wqWk&7-8rmCh35^K}R8xmZQ%RTzMCz6C zi~A*P3VXi;9|0qFv5mpttG>(!#0o}*3AT3)?zX%eb7sIw2xuPlF_BqD5K_D6z$|M# zo?Ekg^f}AdD=@C+o7KTkYA^Ae1EKivR+xyVke2-nkB@>Q z4+of3XFHb(9ENNkc<&xe65cW9{~~AVp~*?O1UwukIz4D5pf3&XY3Q$jA756(F=but z4_T~4SO=Uuf0OH}qt8ky?%EfTp=IWeKLcm*JXGqoYy~{~vAqqe2xVqKVf0KkKtM$2Pc!rJj>Ba=R7EO1}+70Z^&Dtes z*aV@=#t2>J=6zZfjvYIkjvPG@&Q;S0FM0vbvh;!M0_QU^6$aoqT2(pwlPmYbd}mf> zyTg$LB$=d~q2a9Y>D(dQUkt`n?|x{x6IQKQ6&K1U?DG{8AP>A*;a2YRm2N!GwQT+k z1c|3ApS$hdG<`TaB2`)wLK5>Shtr|M!#+X>BV67lhD7d;Qub_GoH^Iq98+ zroeh9X|fIg3z(Z~A>1RF;&#H|4xOAuiX4r5dO+#YP4B}fkdPqi?(I^``k~64!~(P6 zP(6Dv(eZk}fv;2yf(C(aVD71HGWDXhwNq})TVOJ(uVLG97RS2gX$%hZIh=+D z`=~=dWlb@^E5``~lpwRX z$+CTE;>QF(gV8yrDPs0H;6!ZQIx|^2y;oLaOvYc6Up5&r z0T@h=PU69c>Cw2U}2^8le9Sz{_qc~ildoG@0J_n|zQ zUua9gQBJR4FeWbB4%dXam{>yC$ZvZx&`7Go8P$rip-=oGyc95T$6N_Tlt~{{=zkGk z#;J5HK@rG{OvX&|GoA(W@+5NfV9xm%?-Z~NA|Yik@neRyMoOafdTgRO$(S@H>-6B( z9EO{j3&KY~(;kU&-Ufx9Dm0@Y@Z=@a)Jzn~`dUs=R1;Q)mtXfPw4@fiw-ADwTEM-5 zG>H3nJ2I~z0!5-x32AJNE>j3==4$r1z{R?#Hc&OttrUlkP%`rFC zf(IS^3G;-Ly-jI~NDpH}ga{ghK#a2W<{5wIlw4drqbqZwQPomvqT4bH))=d zVNANKs*%E5qkM&OeOqc+csk3jw$2$jM=I7#uuh@>I1w&zn&>|{p8Adu;1!{2p2!aM zY(`jzU0G)0o=ys5ZcM0lB77y{^_UvMwoaW;v?xX7CB_z-THg@blmm-_sAl58Rx{CD zHVgI^6>wOY}b+-JzH zJym0p882l_r)o?g*{*~{w7X;&!I&@sYWL3?le0hejtK+>sp6bEPXQ0LQs*WllxlHG z+|v-QdCaLIv-K-KzG?nBt0EtX8Y!1ioX0?j%a(|<4WWMuZOZD~4$I{bb2_dIskjCO zSW#$5{}m<&XCYE^5LyXt5LY#Mo>QYHn;__=_EpwFjVe2X)TRAr#OD@lagwVs4vH z8t~UST|cw5%?%=@nt}fTq>N*EZK48aCdegG^C*$EzV=hDfxjc0IL_eR>caKVn0P|7 zFAxdF7|a=JD1)+JnsGJ}??=%h7z_wJh+x5p$Qih)xp@@N6fCS~jAh{Hv_U81Mi4Hx z5X}r+nMF`J_xuafxffhQ)R*HhVCE*FCF2OW3Ij@8Z7e%@ZqLrNXU{JBh4~=*P#5mp zK$%m?pg1yBkavHMCjL-gMq_Z7D>Y=-G0}th!?}1Tj))sd>Ef#+T&+6Fj|OosAneif z;+gWyUh{Sdx5$3U*J_Muby0lD%gau46BkRW{EwU7ztFuFRUFeCv%(CBMjQ%GFsIT7 zLNXxHO1e9n(2y|UF@J(HSVUdu?XD#VYi%&6dg9r+KND;OAfojQOs4%u=ThIW&JpZ( zE7AZ<$P@Wbz-TlhK`cUBVoqkXHVta_8{#2cv`faM(?dg(M0GNqcX4~#xQ(ryyC85d z4CkN*nZxQo0TRK;bp38O{@&Ua?8-(hXQCd)R6zq0VR3H=CU(Slcn62lQx6|VCytI` zqJvP)KLBiJkLknOlBA-E4BDiLu-#_JzQq35C7gw#MBhH2kPCP^tN?iu-E3Q5Pg z9GyPc!HRn;r_~sflCauXZ%;4o`aNipqhZFAF$KW^VW-jV8kjWQe`p`XZ6dATupuqr zcw%LPF%@%6dM2I9n0SUdl^Ro(9<5NtGUlE{!1<+;F*%W6qENtu`o?dFqIUW1o40{8 z-3;*YAm~cpG9@~S`WM8NJahC01~d#U5}9OVH`koP#$8m>dvT|@nFnls$3ZNElSnma zE9Up1<44ldyPplMNR9H`9cQyXjHn`*N}U&pfsUC-LT_8EDXQa_M2BG}Q6S$S2we5I z3Gf|J;( z`RMIOE|IrVb7&h9S~3oz6?LHum}w*0Sp&k#+2_23G0}sBO&Q>$PB=#O7ylZWyNF5w zjsn8`=DdKX%tIW6^l>q3R)c6Idc5u0^=!=5DlNB=_t`tn3D{GefQbZB#-v8+!f8kM zN~s>W2cfbY>L%=j={#!>rx8rm#(XzoHgZooGJ|H)g2vFnyxcfmaN`7(9RZ$)_wUE7 zO;ioqZ{L*~l6@W~U+OAq*gP3H)Fh7^;JZj*d5=3Yyfeu9$W6>GopH{2>F}|BwCYaA zTQ9I;Est(=0v=XxO+SSyhrI$GzM&~1tJ zLi9cOg@ZYRb#P!J9XZYx7~|QUItCU%pG6CqA!j&nzl86O?DM-31id2mB?|l(_9>lMxVM64NveG{o6CKqCV^Rf|Fi3bs?b(75Ebk}>Xi04COf`UT zfCz@%n*PmeelA^h(PgY{!F&mWsZ%3>5V)>o7Qf;NVEM(bd@&Z-_O4rp_UYhPdZkt2ywef_I~k;!!Qpi&l;1ZKvRHKwXLudD!>l<@G91V`rw!B;XQ@5e6>R?@^ZVQoDn zSPI`w5VapXcz^oqzx}`Cz8d8(z3vU^G9=^*rr22g>Z=kh%5i;Cnany0&KarE%rbsvz?(3s(moxO8Zq^{rA`IG z(>XewM|M5)blSgvFAh~Fi2x2$BFhkHXqaX2IQ5|K85j94s+Q}>>dCoa!rR%T1E*(5{ z0GJr{(Y!57Cn6h8eRQz0LN6di6cS+JwigQTnw}rUnd#%7_?uu%(Ko=c4K?Cgv@BVeoC?I<7;6(-qOt&r`n`e}Q~~I?(56X@;NQQKkp+viPwq zzc5cPyY|oi6qoo5Sn_b?LmztI!j>}{QuhYKxM1IeC?7jMio`UOjvdk7%p4AJ@|tkP zUW4b?ny&U>PEGFFR67wSy;jxU`%6dB{4!|?g315lSFB7w^+?tGxXvNQod4% zrl?um_Xs&bw{}3-S~0H}!P_-64AKe0?hqzPSJJ6z2Ku^%bm^7dX~(&ZAW)VDbv+*= zuH8Y_7LD9KWS#&3KmbWZK~%Hr?JRR$z3UnooUJ$l7`jJ0zKE|{uXJqiQJo;D2Q;F7 z2BN}Rt9!oxY5H437w(}i~Pn9#HnkP+#0ciPnrSLbJU{hdbhJ9z8_ zS|LaSVJsPw_Gk%@CWASSDC}lSFG84N}JRMwkHXC)yOI7Rev=c3_odIMVzX zgsxhCzGk`BOxNym0ECH0fW>>fuwt_2m}KzKx$n`3V}Y%vDINka?7m{;e+7Q z3=3?}Pw)7ZcK{#901xu2*Fq zxasq0KBE9~5|~|l$f49sYI+}k}W64-7o>?iYEc&cc z&?@i6Xm>19L2;z=!T0_80uj<-Omk`NW(YPL@SPkO!IXD09X&DyA!gkUYl~cx-pb%! z+tUGaV$KQC1p@>V@-Q?QO}J#k^0m`W;VdwX4S62vBVxBvpp~70(CPju@zkL}SIeea zw%Pz0U>h-c*od>j42yNgK@z3d@rh$-K=bLsOS{sRZMc`Wx~%{N60^k=4D1+dt`4&> zcy1o&2%IuDZ);;R8erlyp25p-_k=jmJ|tEL<-udegIL(s_6~!%;?+dmCCD>$#9RRU z^e&7Ju%oDBNW@5zGA5&oH?vszoxgG;dtsfG8i3(Fmi+eh`gGpP7}w*XVdLJxF?-Jns))SCMJp46sD&+;Qqt|_rjR=r?re5U3BKb z3k5$3m??_%XdTMm0F!aaZ~w}RGAFN>j45X5)Q(rq;+85595Q#etbf6F|b?QRZC88F(xNW8#v16Qyzf0#fV* zPK{(t3pBLC*xCtW`kykU(9G06?fZf;frknRFE(Q;nUYo4f>6S;PGM3Q#ue`cgsZ<- zYO3?QqRL81+*m1v;J=hHz4z_Ewb0Nqnc911(&jUpU{Ea(+ZlpX9>;Zj5bcM7j_F|* zbzKBmL*#^Y1kP*699IjccQVqztg$1gl?TEO7#$%(&E>mB69~j<3Q5FG_(pNboC(H+ z0S)s&Cqz@5vYP@{6lbBj-WSOyCr_~0wZbGN3~{Z*R*waan$?KK=i001p2OpgH3n_$ zZo6?48eOO1iI{V+qv=6AUk8mE@?ThB;u)#89#S!fHaOH@lOBKgWV-W?o$0_nJf|qT zj+kXOMcCRFic?@AKia@Nj)Pu}X`bhFQjjqjHp#&)W73o-GYG~+F3-o9%1+7}6K$sB zX3&nDgnjor-jlX%*jf!!Bk?r0VA!$IEbW(DK7VVv_rZJ9+70U|M+q7dK?ketNIb_z zv&PhhtF%llYfLg+7c;6|frK`c6v)Ys;wNp-NOU@55^=qk33@i?@rz6Im`z`C&2{N@ zKlcj}0YQy5YpM_|HBeG8Cf~L!K2-)?lwA%I*KQSV5xH)Z7w?#d_PT0KIW3Z=6hF=M z!#xi@kpAdTKZ=&eGPiuRZ!Fiov0pne)d}OT z-g!@Y_TXV0bckdEK2E9;ky``h%ZTp1QPs68P))vsA?wHT=}x}%QyUTI{(58@ctW1 z$6%y7SGcr6Eoqkal_tJ{f&LhGH^Z0?$(Ud!u1S+IMTH#OmMv}slR${%+?iP|4SBsH znkCFg;cPB6c)aGI0j@D}zR~liAAK-Ac<)_l-+_H-A%_?<>|N6 z)gajH+?yFwY?w%mUo6I?)rg9pmSRi+6VuXj(X^sGu~gSQfyUI3Hg50248rR5fq}Gd|50K^4WsTm`L9`Lj-aBA ztdPbb0vmo4h^DGwrORO*!-Ps}Y74a)1aqxO1vPSF0=L*vT*!y;nuCCeSnar=uOS$# zM|1^De#{La#Rx!Y&ph6B>((`-GtOcI!#2%x$~m|KBTdx8{KhAm)1G~a^#&*DK=y5l zutNlRB?J*l6^Nv^Btj{z>3+#7a2*=}cxBcI&|T^1f%)`LckD{{-Lso*FVF&skgbds z9hv3!XrbnaNi+xT`?E?)NUBq`|KQPM;Xa-jQ%9jOY0869fV_6?N~ubaY3ovr$>`#p zZ1D8~!lSHLorR${024I`5du_2veG0pmu~%B!I(C#hfx(y8a(uZG$!6V1@Wvbc?9v{ zRK_HM3fEnTuv>~)3%_H2i8jhKo#UDXK87Uv+1LD=w3!v>dgr+ycaFZ9r@qy4$bacM zS)KCRyv2_yUfJ;Y&&Q=+a=yEmkhU4zGN!v9xytk(_&5p7l^p5YO zG2F#_3sYVV!rd&enumFqXkN9*@;uY8tS!k@@_+tqU3@O0A^0voLi6<^N&L$~32sIZ z%X+p7pJLp~j5IS|dHEITCx7y%U{Dt#D6$RMpON#W-4x+s0_Fyctv8Ax(Kp z7?TVsYfThpnNc6cKW6+X1R~A7nwoX5S5uyWF}312VL!V2@xg-!SR?rqg5+-2D;_|z z9FG1~fYVIpjsu2YnnJUa+B?pDSAjSL17OSs0M+EZGI`nG8cJgtK zNyan)er`SEoV4wXmtYZXL6}+#bH$m8$6X8A%qFClUUCtzWf(H&l;@x2M2-(i3ysNh zuXue)rTA4Q1;v$8?4-=gGF=)EK4r%AF*GJBPL3}wV~R1iyduD3xv=OX!m=J=vev%f z8EJyoBKdojBtr!;SG*feDl6VGzno{##rM7aKeE7<2|i-rv8Pn(UQZN;4XxB@Hk}{@ z$6>Sq_bEb^fl+9d$+73wupqD#`=!hQaDo(8#OLfB7rN;ttd}2?ZZIaOIrC2otnpwA zb1VqPeN#<`e&kI6**<+_UB%bB?j zBBQr5QF|Oja%&81(RhtDHX8b!^;UT3v9t)O8TPEGgSQ=~Db_T(0iDcZ4rI}5h(Ji`me1ar?s@Q@ ztTB<5tNmjTLqM}BZ>h$FSvDFa-Z;kuOhT*VMc`R7ia6u92;#ZKMy4c@GEciRwi0nL z7ECD^6r)~^NeTUDe*CA>nOo1mBs&^RVGgF`yjU)S?V+&jmbHtEOFZ}l$9$KWjh|Hrf{+WM|6-d z{F6eI9-1RfWN>~6Ma1%fv(T8%y8tI6qPjE@yc&rUO^uqsOrQek$}28| zj&xuaXT2x=XdMIO1yiyE{jI>Oz_wDd&sIvcT2S(nt+cY z$#@N9ifK`X6B-lU1%sLdfn`jSn5-5Vlcqf2LfLdCJ>+2`9V554V-Ugj_L0Eaely*C+b({n1D|(CSYM*l=58& zdJ1FWnR1S)M7E4inL(5UFKbMgW{Nqc`dBE-Kw&^yABlluAD$m`H*OU8Q$P9>>6|ln z;O09_y;%cBpE)m9BE845ta4EJJ6|_flB1pf^0`yqiHGxJOTVqN>(5$YO!qR!^x;4K z6DF_;WrauL>wos=gE6_K%W}pP&I3y`D$cE&bE3}uwYIi}$!ln6IDP3Kz6{>C19P~v zO9~+VbBv2w)+h~}JeuYRW%DE&TLTQMjpo$A6xsu!5vH6_D4J+7G%*i-u6b^|JxZqI zpF@x}uX_&SmwhAA3aOh;5PvkuS?BtC=8f1sW&&mr<}eqH1piS_>ohfj>8mF8snTqF z=)l2r_uY4=M<0EZH9&hp0M*oAO69(;7yufXNx(w<&&)>}B15pPQAmL^1#VdW;%im> z%NO%^@kH^MMBl2KS5!F|lXJh5fm&dWfHZ5t^RPrF`Rj()OjZ~b3Bgb-qZw1};`D{3UTTC>~(X%I=Txsr|#p2TFOq>ww- zMyRegGSGV0^h9_e7Y~MohYoO~I`k3{07NFjqm6NH#z@@*e5S@-x$Pp_Sth2@h_LE3AQE|nKo|S5(HVv%(*%3{d$tg7<5`NWWOdGrCKNHEysQ-Y6KueJCRS*hd=Xg z>+IZGM1GLS@va}ak@a!tWC;OtDUEGKAi4U|E7B{kBb?HXvmw;<74>i-XxN@Dqs-eQ zpE8x#<67MF*??I2NcCv@7T>I14){t?+PXT^eKICBCbhiUx^RBD^1_R8d?MNhpNJw) zIT8TdVB4dQJTLkx);MuKj<8_Fk5=5XpV{?n`u2BjXRdV%jKB~wj2qf$(9IeIvjv2r znc=?loqzmVYNe9R)O7~QT&-8Q1;bgaa`jA;maTPgItH?K#`9`f2!CFe5oL|Z`x2$1 zpNd@U&$!}s?N7WLli9SF!NZ3D#yDzB!EBG=-d@L?&!ucP|MM{ISHq+pe&+Geu0HlZ zKSn4X91&=j!HnPe_IHNX>EdynKV&jm1*k2!+DH`qM#jpa^v$oaHO2S78__S?(J(u} zRq;uwJNLCU6wX}a6NHFdgh{lUV`vEZsraUuQBJgrckK80aJ@YGgg*j)oJRJ=G0dCX z$#Zkehw+eR?>Xqx7%w4`VE?@9md|N2Yl4D^j$mupN> zUeR{(N%)$CjME^7THCa78-h1xW}^2rV44)4BJ7%YQT>P;#c$t^e2SybkY^oj1#3>Z z=N;kfIE$xvF!ybKM|_Kyd1iSk`}?%(T(HkfK2v$~n|@=#0b!6+CJAUWjEPV~M%!-2 zLl32b?Un}snrk6~9f03?P&?1l)!|%IZ3Ja)Bp;O-w?Kg9M8{W%KWBL|rh$_%Ce}tx zjEdN zM4SY;9dEFSNz5il%vy+J-HG{^$VivwQxU4{Ksp)(P@8drU6*Ukr2pYpe=}_$n5zpy zb@dhlWlVb4xj=Ldjq?*QrrUmSN7}G;b9K-NSG61^=MmPV$e4P1x&c8M3jrd2 z6}f1>NG{shXI}SQgq}w0ia92Td;=Pgh^VO{2(6|?7@b?Ix67C=gE3wEigebFb6}R_ zNnLbncAhaKFs~JZgB&8|I`AFHsTqND)UcSiE+1NQjC}ILORnZq2}gS{riaoWee@&X z70gt|^b4;iy7(nbuIDvL!I)^Uj43ADgCR*`fC>XyQX_GnK&q}|L2E0EhG&p)AA0oh z^wn?tBXddX!7Uex(x=oV=1Let^>~Q(9o&P}~!b#{8)8{ZHN_OqY+Y})ngE(EN}bnZFlrgy#bU127)5E)VFKe{rjN9)uq}5Ej9(EVN7o zV~ROVt_iz98O|dgeh6KOT<{E^VVp_$W6?5o(cmUkn!=hff`iDpFs3cr&nB7*o1pTN zb#vXDTAtRVDYT~Zfx%UmUmA{DM)%15WSKA}3by}^7PDI9hZrxE8!+&BGa!NK{2BE@ z2a?Q~b|2WA{__8nG0E^&XH2=_)PS}M18Pnm!I%(agE7Hi+n8r;0PlpUF!i)q*5lPhQHh z^UOVI!m7Zj1eh-511DfkeVH-k4F(zTjbmxuMnW8IVPB>epA=;P2#m=s++CBjd20)9 zo0t-bYF<-|DUI&(8Kz2krX?8DLwBR44E1uY9eeiENPR~j^uw6)j0!Q4))8m#@|SH* z+qUvOCX0roe4{?qB3cOkuR-Xy>Z2FF|=091pkYD~E8%8+tI3cL!{nCQP4AAwzTJFOO< zWJbXlV(gTRiFIW#MYSbObMbDKF_9PfsWHizu35&I)Fw1(O5_9a#l%2n%PI=DT(9}N zICs7>GPG7wY1qHJ5h!)uOvz{u|^bBd?)m(0j{WDmyCs-DB^FO zx6FCvWWM56eDf7A(T*VhQLh=suQ3sy3&!*p^w$e#Oww}Kh;G@m4NqtGo?}0^Mx416 zeDx@_7A0dUk6g&tihI`Id!`6}>q(5w#p~AIJWsh^J_AwBZ~Nk@X*I?a{l=r>vj3I0 z{@dS%F)7LV%Wh+_olXL1I(oEutYbbV)pK1UZsVj01$J0DO?rmQX@OuWnT9^!mH z>Y0ba)F`ScXJAPW#xye2m>#|tW-y3Zv7wtCbRTCI&izd6&Zcu;x)$>b<_28Ox1Y&8 zODj$s4Gs>L4a?7#%Cml5fsvAK=;HhKdhrU#cyt2AG<6Vt98tJxC|-kti6u_Xri zqq_znb?se2%9d3+|14o6(V`g~{PrlZ&c|Rz+zpM1qZ)R39`E?3|8%1B7MU?Ac*>YG z=fRlhS+|*JWG7gmt65L2*6mdG9^4zaqNb#|*YegARphcuFN<@Tl4fM1LOio`Cyr4= zF?ZI<{ADZicopz+*7;SabrWz7kFH=yGEyP!k#C5IpWITQQnr=)h{I*_e!k_KWnY1x zw6V&VI1hn`HkSmhJgUh=)b-+B$B?W;@mIz;j&^ZAksow`(3Drv(Wwq0kh(e+R5GTS zTWVpf{p?Twbi_gonnoVhtEw@XUofV{y7S1A#KWX`hp)14s~QunGYEmEyxj-(gvMmh zN}nn zA9D*FEbjXZ7!$De1>fS9SNIhDQe2Pw<$E%Rc+O`}P1zE0>NS5=`^$>*UwO|vexJUR zV8m^%;G>U9>U@*u6^^wnW4I@SY)&m0u#r`dL6FEnBr@2e>xyJhN}@8RMu|Fqd9bnm zhcyDhn9!JNQ#Ztt$}rj%2szXkL0Vgx19JTktL9})b_$HCv2lvvt&QnSm`Ve6s6m^v zswHDe%rQ+*w5JDtfb+v~!y|R2C!cvJ4NdGNVniifb?upH6Q;ZRCMJAa2(w=VVyvez zmIG~~W0iZ5n;GOWxl06#H1GCon#@=WL^O+`LCT&YjunfJ4>8}gdv9Mlc5*896X9Yn zb5KKsI%-69?jgS2IPR{7tv5#9{~N~iAAjRRX%h}6GA5ZyNaY}}UXIS=)iLjM>lZ(l zzIyvNQ}?>Hz@EVlG0`NLO?fM8Oy(7HOtjyD5=h0@q9;_LF~u!3Ci-*f9Fubl(XYM_ z>+bZikAEUvcE#0Tit~4@VZ%HZTeCo5-SbCt zXARB|bwt~K_})8Hg>_8?st1?UdC>30xnTUO5w94Nr?=K>Nu0P-9<8&#?Z?cRe3$VU z&q>792uKn#2!({(bGK({#Ehp|Zw%&>Lm_EKq;B9(xb^!s@DB|eGp1|f6i8xmBye&| zi7*L+M+!`>(2qJfr|qPCP5W&KsVy+#X|xM9(SU8#*jj_q97-+F-(|aOg<4=vw20Ga zsYlAz!GuFFrIInlaS1UjiCjKv{$^f?cl<5fWK5y4(Qj^SdfrPfhB2KV+EYCmuo}iF zT9UhJBb74#*$9&H_2aL6CEI`)&{iBFuc(_^jXz};D{D+V7Ejm~pNuxe5Aa)!iF&Bb zoodSa3p(Y6GbZzkJ+!u*$(&dhrpIoCd*hG#M@@l&@XN_D#_t$w`KsrpxW=)%U5L;5 zSKN;mZG-O?&sX0opYg)7v{dL=daJ7C3Rts!|GWMZ*OaJ0TmeqUD^C~++@sq-)U^;n z99QkSMi8$7j)M1|wp7ty^+~`g4QpPJNom_|t7GnoJ)Y#@74|-wnyg^T%gzrqiFF>h zWcxR_fXNGHw4RuI8#WV{k2xn*PG~PU;rffIw0>iK+Hsb)Qr*27{E;i=;n#c8g8n41e5Uv^L3nLQnPyY)H z+5)E4Qdd0s_uNY?K3l;1!7I+cO2vDT=VWBS97ESY0^?cy9$1Lo&aV-k}UQi6E$ zzNRN7X3qG>po>QI^>2SSedQm&m44(^KaMH)1SVB_B4CZA%ef3eVPSzCU}p&H^U!^F zrY@Wzn!v4CWJ<$o@JyAAfepFh(bjxgwla=Ou6tMX1WkF%KaY#&QLbfk^^|k!jx0$3 zM61lDAk5T*F)EXim&CqL;EH3|JlZ>3BKkux3hoOB``q6$41?i{w=yL0aGdQW=9#By z(Y#A3E)ST5aW#Qso#3a7?`2jDY2M5IYu~Af`YFc+C;sP_EoV&JFmLAqXW>FJqWEx4 z*ks0(^EQjn7UOy0#5jhz+M6X~lC`<$-8G16Nt*J`ed$F!M?KM&6gp&G*4@SUbYQxi za6~)U(9s~Xrv~BASw54u*0D71&V?lWQWdWgtvu~U7xN~flzpYbF!I;YH<`EB- zcYW9A^7X~@d@bj2>i6?8roe96B~Db{_l`d(OnEk#q(nk*g97{ra53@A781=YJZ-}{ z7I*S_7Ub$)9vRWWkS^ZK3`&9`V`_&nwbr!6U|c}^kkAkIu@mJ0NI}q5&0{ubj%_pM z=7&>vuK`$_QpXyl{mhu==f=X}p%=~PEErRBi-B2j$prYStTLv#sg`u|NPT+v0oD?+ z*!G;4Y)?l{?n*VyeGsRV&N#al^PS#v!$@eXwSX}-xXuaZ3tfhthjGvclfbXEhCr^f zQ?+;)ji#e~`i@Vez7yjy$22pB^u%6M6(S?g)3*5vWfMvL@n`muH(SMH)9odorW%tg z=qK3zAQ+P{%6KT$DNS^u_4H)!R}dS-%R7q_y9|4McSmRX{onlUw5cm=OcAe6JEQOL zi8%|9_)}l{`}Boxel7JN(J2(zsk)rUCUctdaBY_1WyZud37GO+my{8tAX-$ynw+jN zagBOOv<(BLc_*9lia91L4^>cOs)6z7{LqPM>zd22Og}>OhqKRm34IHaLHJ@$OS_1# zh{4sidnLE2gvBCUcvq^kLW*ZsxbCIWly~1l+4(_+tSRr!GA3N48_^CzW1>DTY>YW3 zjxiw2z&?r(*XTPnrtjbTVEW7#zsw~0Su9Lt<3>VPNt`3R77SSvg^X#Qxy6BF2h(om znAWs{V-iycqxhx9sPQTUAf{s%F4@B?0s3J);|p zLwI9e1qN!?G9&BYQPZ6!VULy>OpP|UEsYvd3(gBN_$k&R+2&|=)2f_ zQPU>n7@0(uaha2IRC+g(kuUS4eXk&h)&}v9wT|4&*8>Bi9C1^IlUp0F@(gVhmY8-W zo1%q|R>71wW*q$gfVSq`w|@?rgax5|2Zrx zMmcYy?OR#A{u8fyHT6d5W1iLh)+|78TNy-@<2y2@fIgWoOTvzq0)}x~oa?|PHH*z4 z8AsujcK}n~U!gG-rab!);L=SzDF9_myk&k|iZg%3Cp0GJm^N>Z@JbP4NybDTQflF7 zD(m2Rj5jj!XfF9+e+c8;Z_ztmi2HHEIeGbB`A%^x^YQ8BNt{|KSu5cf$ud89;~xPn zx;iQ*;ox>)MRX{%>gsr6_!4wjM#X5=c7FdxIgfB|7h7{`OF_a$a-U`UpLBL8_- zW0?@b4m74~VN5SWYr63Kiz4VPg*ldDNCXW0vP~Q^-bLyDs9uW#Li((tYt*~^${LRO zoSq~YZXa;(gAayRo{UL{hZ|n^hV(LAqhlTv<}8tpIVRiUfD!NQ6S9n)9pwCWJBfSl zdpIleZ0ujH3U0}P_z;!$ zJI*MqJg@!+7IDrqwY;yH)D+KIU(Pe3&FA^L{mD%}Iya0%H8s_hi>@1(18QO($@>Zl z>hkd(-%*xdt4REoZC|aFG*d?6eOY8QmUDB@+V*q^HHIk?7Ut4~_K42KZwpmJhiQZ3 zYAII6XEJlj=jN{&Zw|&(PusM11k?pAdC07ycKLO)$RF|>g)teY(>XUaMCrja^_Zk{ zH*VaD*Y8b!r2Q6U13)zHJ@3Y$}#B8$R{4k`6)=*57GD4`;=%~%5ux+3;d|& zx4a;JYpGc&36=m&akKL4H~t~NMJ$>m^@-#o0Ha_~6*Q#A=1CkB8q=0DiIm(9wbpH$ zxu#*aB4!)(Q8bBh=Trg#aEM@ErzX7C+=Opts)3O8NN^))IMc-9gE7rBk0UK`D|1#K z(r~=(n#S1fxWZbawZyw~>vPr))e!@)lX<2M#=&c1T@x$HkwB#=4TKPxpXy4_KABiM z@i@%e&frjk#8`3vb*%z4|-I*YahKtp3F8PhOz5CZ?2 zjOn_|uSze2IbCp`jLEfez_1W)OX9W^(X)A?Kz{0)5t|chz6zm?XI30$yv1v+Fyi|j zdLU&}9@06)=q;~@FrDul$!jG55>e426iSy zlJ~`DaP-3ky?DlVcwRylNzbPbKqQMP$=e(K@d6teD!Yo7%-|>D~KEJGAnQZk+ zDf(=M8){qC*CkH0IS3rj=AV>Dois5U4Z^us{Ho9<6&N7C))#@yXPnQ}qcn)2?~`#^ zAH|?xOgv+M%Ai8u005zxEvcLJ3Ak_^!%8p>v9Y~N850&S)=2J1JI+3jXVLo*0&8&p z*S*@^tZTv92~3lOGfA(w`f7IW?!}DfZpeB28~H`alydAsdM#+?l8O|kVWFT>QLkV$ zocn`3O2*_&SYvy0+PQak!I;#HO5FV67?WAcnAY}gWVi86?2ERBtx>w6xw!aaAy^Kl zsGc}tN0F8uy%NiRqSijMc;9EGg-f5eCFL{ab)Q?B)VNnlf}+~dp8m#-|A}k4Vy9;2 zkyO#Y9sF`Qvp-0nL~F$fcDh$?IZ z$ga2}gDh4XAp*d(8WeLBp$CAJOc>6=EDVdpE-ZA+3?6s@bwdYIKbrv7v2j_;NLtf7 z&qBY}h~}W%vP`A9p$)gxUL@41bo+N6OlO~cW_rU7uSmyEJeb;g2NBfmREHuGzKMA= zhpe_>J&R(`r)dL6_Chea(%xqW)02-LPWxa^1ARm`A5{_qVHrS5;j@_Nl$I)(A;z7( z#hjzmwevYP>qGlnWK5XT zyh3B*;rue9T4P&t`jm|6i=Stk=p9IN_7}MB09PYm;101)#jF1+jEOpRGPtk3;!4&s zU6n3iA0M{~Q9{iaE@{YZ7K7tYaL}d_E^!Tb#BSSogzbacG7H1&+Ol zAOv&F15Tn*))JcMUflN=CWfnwNv0PqhcPu$=lY^7%M0R3QBgYW)(jb+@*SQr8hQhQ zn9urLd^p#$xeh5<1Ck+;6bgl_i-Id$pZ5qc(p+tdMcGKKQ5Nr7Cs|7TrcNHS`?61; zo=&9?i}zhyr@1NW#kcS*lk4v^-G%uU{E+#C$;O-)&Fg6Vc*auf!MMQErm0tfuWt6v3UT7%{>PMye|f3k>=+Kco-p`d_f+5GC! zc6%>(y~m=40IOt7iyD)R>CYH5FPt&C7n6)>-P(<5 zsrD`K%40FU!<1JH$KpEo9d`kO+=lA&#k+CeXI4mgKe*Fx6ws*NsQlJ@)R;(CI!=~6 zQZOd43Dv(ptz9>r&bx>;Mca|qn;_DJ;Ly_uRfkR?NGp=iAn~L|u(QuhFkizOqlNhv zG?6+y^04({%9~|QNXAryV?<35e7(qYtHx0v;b74;n|gZN(}sue zY~dKOi&VU(sApxiJ&jYhCmueWc0GL}G^YtncQhvt)S@HYl3Vp%_h;8FRWR`o#RS)J zcGd*(gE6hclvip@5p{v*ULa$d#FV#wts2w+9vTyhX&{5h?H>=|LN}g(zkE_r*!tY7lz4pl(~q;7B*gVixy*Gfr~M30t4Xpe|89El zo;!mvHGqFICVDP3CK*!${UHwev6x^G9;t^RgA``mx+4;JtH#{O{HFwW@!5PX@ruQn zl;M4&g}cV67RQS@*1!fCsR_mQ?4ARoHNciCm7Qh%UN8925`JZ3R0!Dt?j7~|;U#^zOt(qzydT+s)xa8z_?#}Xl{_XCs z#DBn)MhN^1>sbs-Sds=NV=%utc#X$%us zVwyRotTDMwxS9kVti2oann7H(S=90AWb2Cc>xtR7hPj)TL72=GRVbijd;{5imx^MVo1 z&eaiiXfhqyKShM`skCo5-gigvGGeQBHBL2Kv2dfF#H;Z<75M-JAP1kkrmxt}*G7x)HO%hyS0b-KF)D zI6vI<`Og%Li9ts?MPp))DHs#Z6#t*kF)@C+U`*ErW5SfD#)Kvrp@NwzsX&m}E?$F+s>eV=CsDBJQ9Psagv8I6rD~ z;MzxO4(iKp-*Vgc?o8kO&Yd{cTnNE6yb{mSr_-~eY%Vv5W-ZPzX9)w`vZgCN_0YXI zKRk-NG|RN9X33ZsD5;rpf-#Xr*}7mj{Eqn`h`MXOByuuo``Wpf`Z?ztc*pnncpk)z zGv^}eFx%B(Vp2nr-Ka%j>juX)H)0_$Ki)GxnNko@35$99=Q$bI#8}CA6gR71z@!wN ztJm_0Z8R4do(xr!v-w-j7)}tum}q+hScdsjH7MSaX-@IXBy%9V5T-mC7*$<#Y%u#s z(Q2T9rdTkiM)UP~svqc5JZZ~HxVnS%xK0ZY#h8Nw29n1~#^#2NTM>BALYP!|fKE8~ z1!Lc_V>>2CtUm~e7oGpobTxr*kF%q0GZ8PSbc|g=W~7K{df#&`yZSYW$t`$Br@1(mdRBPO>7f^!lb!U^3u?)#b5w1+|5_;@ogrC9=`j*rygOfZuU z?`K!r6S%MvCvSQZ1i(bb!<1RHl4^yw3_Ui__7t^slQ1U$ys?JJ5cBL@I|fstY<8*b zB0y&Y%(CzJDBGjYrF-t2PdlGJ##ZF7Nw0b1Rq3wpeJpJ{qo4R!#OY#wOblI+sZhr# z7X+NbDPoeaL%W_io*umWAahG&X@a`WF#ol{*6zB`+A*%_a+nLFnpxe`$%D!CNmS;F zjESR+X$(_NBc4oaLu11Ef$i2u@Q9m1G<%_pX$r=)0WZ5ht}-V3hG@u2e4I*LoFs1Q zXiJ~OjQ5GpeTKQeGcr7RATy?H8X0ts3B>OBS7J=vgzm|VX^ANhViYDh7t03DX2dNA zL4?eqU2%_gmB=5c5iR!Ssi}PT)T_lWU=Yp^RgEdnW!&)kUrbkBd?~he9T(_Ji8P~5 zjY%S4atK8E;tV8Ejp@7Jy9;;m+tZn6oez$(^g(eOOTiqA!N)M!O=_0H`K1mb+r_p8 zkKX_NG<@PnHs#TDiFB~!e}*xQV{QrU2gW3Ub3RdTObm~ymW?+hw3J*WxM9jeNOPxC znUY87To6djdekaEPhLS7`IY~osrj0Ddf!VFP-{ponDXffsFW0H$xY!^16~@^DF`Xg zc#OKzfo@#roN#DCJlh2G4&pBCDLY)Uq3q6qOrk+K2i3+nihL<2_eXhTETJ)Z&C#hD zuMzqqV=|xIgNv23O?oHJ1H&<_7zVEm0i<9|n(a1i-VTk~M$`-3!!biQx3q_QbxW&r z+%n7AbpE+#r%TzBX^0@Vt*jl?Lluw}8k3Ak2J-xj=@gAA;7tbp^RA3ZhPl*~r^aMA zyimrZFdd8ujcLP%Ed<(LN04kaCgP5|%p>>oB9J)CLJ5@#7y?EA3wSE#2=DTmC)|n@ z?-joT`td4f^nd)$kDc;eI$xb~kspCCOD;b&l@Nun_H;W|CL727c{K|c zUka?bO+K#Cu-?|GintcWZ$n#IsKxnJ970-iv%r;744f5iAufQ6ELT8yxgU4&|Kr1d z9E4D>ueZG64e5$YaeiQcW_NMO8)b`g84{L6g=tHS6fnUxEl)hPD}C-uUqwLL3O=y)(YZf7iKda53`x|5SJjRQ-(0HY?9af zE7#NHt6a}_i|*pA7se^0m-09X>k?!?oUUB&&N|YNwo97RdME<=YyIYtK1zdwS)| zuVbl8KLR@S#Jf-wDL)b7$={1TNA8QsdPE2@Yvh0Q1s6Fwg{lElW|K!^Z^KzhF%=EYy} zZ^#Bt=%cCccoX;ZrdwXQCB5o3my#-l?8ZIYNX|%j zdkd_Jy`>DsoTO}%SV~BhNWMngo#x z;N<`lfuvXnk)CP({zJzi@Wa$K^IBUYKb_HVeE`%DlC;cCMrgkqnPkVyM3iDPFhQ6~ z)l6`0up@o_X%(8+dw%ct!}0C9eD0AcekwaG+8~J+>ROWymuE zX1T}|G_JK-&^V7>VlPZLTy-`5zcI>^>5c$`ZWki=$aL>BEZx5FxqRlj3^-n+S&3(> z7R+(vemnhyz%-6@)W|j;4;R1$6O@fyL3_}0ewt*jsu{8Lj4ElZq;oH9NNdkwyNkwd zCJ0gui!`)<8lC6Hal(Ht!wE%~((pFLM2)3@*5m-g+#ltNi$ zQ)ow+&_OQed05pB#Lnd?4-F`UGjQ4x=?fTY?DZ!)ImNXk$!5feEA*tmU1)Tju~K zBEE{x@`B*4oZ}A4!t{&=2v`I|fJ0NArdJuO z+vH@EogoTn=-gLJ8wID}&CU(}MK4gc{Kx*J;}Ib7aicK}GY88NROlT-&nww&;CvE2 zFF`{ZU`|#6Of6yd#!LZFvh_Nt#avkXs#p9(TCrpi3+=l~ZDHJa-i)^7vjir4@$^HX z({uKNm&7}lId71c1DM>3BLmaU^nQTJdY=kR`k18USpOAGrOf9DOx1$A4|gnib}^pC zyS2`r9qafK*ZKQdJ9xa&p@qgIA5^Q(&&~Z~-#jN^swnR!kKIH%@8S$h)%7PB^dY1* zCWb8tjS1qk;Wo&LguTKI+q9w9RyRkshoc8H=}n~lyC+C+2{8_{{R9~z1SS`~8p=(T zxj%X(22YJl^}QM?y~x+5)L;@yr|g6_RT#G$1Kxb|wdrSm?xytkqhDi((XS+|Tj|ox z3(}Ge>=H;yqH)ZFkZ+h}E9NGAps>)d@9onDTpg`%QK+<9& z82+po$889(31G@ALt`od6HK{@=(hkRwi0l+PWSOK}6&aVhepTbVQG|F~>Urb^Wr}ssU*)j8&;XCF{y2Zb%A&LNsWoFlj{XN#ez>hdq{ivQ|WyCditNaVX1W8 zTpjMI(lbsk?et&bQtE3^Oy7W6x(S$RB}EIz5W=1OL^GaIdz1KWO%5#VXv$NIsuWo0 z8NCGtT?k4pS`GnVx`9$w>BW6U;%PJK!%R5DTqIwIg`pQ%RR*tQ#B`qP)**b%yh}$#|D6O(cKGRs9oLx_EG7%@#hCIok=~^NU`j(w5)1Vnrd2Q# z-)$a|M4TI+G9xwD!N{wK+#Bj^NImT=AbWB&?Rt8cEzKK9{K_t%OjI=?7pG}2w=?XG z&@e*%M>>LF1i9=i3=$0{J>?oG%y>h?1L>tVU7uci>#NX!?n+<(m(LI)-Ip%AY<{}v z(uJvvbVdmRA-Tl{PBeWmo>yZ5ID?mI#*Jm9LTh07R3M44FgL>(reKtql* zFqKSsC5MyzOI@Y7Djz}o8RoLHzATS0J zqg3X^aJt#g{=n~$Qt1>lCkgNa<2Bb2z%<7RH42%$JN9bIdpp2XkH(}Q>$Y2eVNO$C z)S^WD;VX$B_#+L%j*vv4nX2@KuY8^LOkWQzs$5`0KyVYeCUP&168*S;=e9J^-I*HM zM@JiV83L6C9+`>SQUo(lCc?Ny$Kj&{MHy)?z-0XeCYh8VAt1R;gVzYQ4fvJRkOoPY zT&vSps$xr z0Y3}oms1YYjpak;%e7hZiMeWBjixjRrwTCK zzIzwI^nU102+reY`9CZ$*&c=k&AF1`ll=9*dk*4fIzXf!+F4}_k`r1VGe9CVL%1)qcaMZAkH{SA;nwOS zl(P?wX<$%bYQ&r;Fm;Evqy$(1Q`VR)N3J9@w#{<`)0i65;w9O~By;ATTJ7T_TIVrWc@mMqEsC&usv1tx+Y8ra&wDdSYj!4HfGGgLoF3sHHR@m_lU4QbV? zm9e%>^C`oL#EX{6M2krwJqEea7c!Qhq5PXa$2sUGa=Y>U)FYQM2fcEk%tBz&l=pW0 zboE412@aa_HkUM}Q%!j?t~g{E5*PoeMKB#Cfz-lE`cHqB#mu+gfp)rr)_Y_o*?`HX z{owwz4`7;Nfo&aQTUOGTtfA&UH73EyKGJ*>#YY*I!kE!uH72RqeifJkoVak3`ztYd z)syK%)0jRbL(5$+XK1;(>}qJE!j2Q$z7d!r0jbZhJ@J;GoBQ|lzVoUanjDv(^hX8~ zeSXrf`AY_~*)5v6$hjY9_!Cd3E>_3DMx$Fb;dNmi)6dY%m__yyg)h#B=^D*cQ=z`I zdgfdsD5LhImL#LinKXPJH^dWiU(SG&z^)13Y%L@K0O%$GxJiVK5ynB2-bEK~4$bL! zXBYj+vj9PTuv9&^uhGZ!`q%wjY9q~+OF7(U&8Ow{VAPW_l^)9A5wOcS4^u$daK|TV zYA0}5hNd_*CRviEyzM28DWV&>x)2tASYWE9&!CE!Tqn^0{aH{$9%ILv%JC=HGOBu3 z{uG+ysfFcU;gO}9oD58k#A%1Y4@XJ0Gnp>Fyg6+=zlQ7#T?~YI(*$+yfvMP#Xbc0b z(rsS4*l@7g0k&L#ZweK8YGLX;+??+EDk0jtm~doV7-tt)bBhyjg!$1>I!n;-aH8e={zm6)-9>zbM}ina?W*}iAm;b$&x*%#&oie$w;s~ z510;Wb=FCSA+r$OIO#f-O2UYbclELw|1V)8)%YZqrf1lXNCssp#s>Q1_`&_@$i8PV z;jy-l0GUb!h=3fXK>!3Hva`wSfOTdH{L>&aD{B9pp3mAAq8|E&?)_@v1Kg^LX z1@N<`N_{ofKO#3N-W+w-jmG5vw##q4+jnQ`Kh4W?9nX4?{pEA!I0T$v#$LnA@_CD3 z7{RHYe3eFK9XfnCBJx%)Umk0k#tF7@t_ZN2dZq+m)U5ehAEStQAoJ}Cf0gCz1eqOS48GMOFsm%EU z`}UAs{G#+mf@d07K&{~E9P>2|PPU!;oe7w-PsNLJKSXUyWe7|RSd4i!QI-Kr_?WWR zm#=pyW!mx!0qO_wF%`fRV^0}TChx^j&_sThMuK$Cs>URJ1}*t?%a<79zdd5t=lVv%zo?3-i`JROP> zo@b^e2oWEfmpYF$rhC3|B<I4`~V&95Q5L}&W=-~APv`fX+B$;3k4%5>4@Md`dtT2n0x zc1Oof;2*2Uj*Z02zELCnjcikJJl%tz>Bs@M)o2QhCQG0YIm`*#rU|5-!Gt0(O&D=V zxcDey&i=z{Oay}fOlk)`M8Y`%4U`xnTwFhe{ck@N?+%6%w31YjJ}74+XEg&Lw_*9p z^xoh9L;9X{PK3gn?O8vGz^I?W9Y^P}`Jeu!zx~)pS^Rl!1b0M8EmEIl`$(O&9`o1+ z!i)fU9RbYI$xa=;-m!^cWDtvY?+JQv^2n?>Hvm(t#$?op2}@Z@6G>r`Qo;?5i9UE4 z+S03j;ugaC+hVfCbzJB|ZR_PT1tFg=(B?4YOhbj{KNDP&p>kw2VV(+(pnc?M0 zY|5&7CiuPg-IxCMBOgtj^g#>jxZZTzZD>rFghcE!oh0_hD1fh7EHrcmTGNpeKYTlA z|Bd7tyZ3=dSY-Q61hkcCCv33CSR6jMFO3fNr}_B$c0K)M>g#Ba{g;M`(rcoh?4#Vh zFcTPxeISYCv~TTWfys>Txo>>;DI8oI8k}>|Hv3z=^NIg)jlZ4o$PBAsAl^am^Kb4$(+!V| z=N8ZRN@hjglAdR)%}NVe7!>H1qWzDc+2~Ww0s@)^Z7Byd<`KQS5I?TKKgIca`lXKc zav{y2J<1hx*g2ZuyDWUcHNq0r4!_p>Sp&Dll^^@3!vrqgYrP_gPO~aYXy+3)*LRe=~Dt z+pZny582?77Uk532^nUFd4ki9oGNd(?lGugl^(!F=LrvtlWJ1s~?NH0Xc zRn{UQ)v&;>iIC(5Ojnrc2Kt9lr&**&BG^O}A5%3Z(xf(Ek}FC;<>ie4Ku5ab@{It~ ztI0Oekv{g(zahwCHzqLds2O7oR%^QWibZMVdI+)s&_r7qXaAK-d|xB|^=zAQEZz6* zgV>RgsK(unotDrZr?Y88Tyr%^CiF7_(;#av+!3?_Ge7_n6J->s&vi^7F730gB((f4 zeMn?;0n;S+tys7){mZsI?RRj76GcV6mz%w&KIUs9Wg9&n7%uni)``ZDajXpl!tyiv7srx-M$C1xwM&fB0 zG95PeD9J!WqKks_81vD#(zj|%3ImF=KHKF80+JvlNIO3qSKDO))c{K^){A+7a{~fk z740*N28J7ZFxnyU2uj6^UvusFO|z$hqI+pEr~ym^Xi;X2KMz0C(v_?E9f1d}te?OP z>p6vsK|kUE89rKxg59`&E!GoU0;N97R)ctYDWHPSo}2ZuaS8?=#qa*+{|N4o#t`;Q zG^_u(69MGw|N8YD*g`pR9bFW@N@&0lh%ptE?7HB;_JM*LX3~Gdz!ZQQJ#oq(+ELJl zXHEj9*(ZwIqRWY9tFGxw8!xC%mt92;yXAxQ5}Ci*uEK~s_16F=y9VYXI8C}Dkl)MM z_sAfFDI}e#3D+q!qA5+{V>*!z?E$D*=fqq?)h4?GYo%H-87^7c#!j$JOg1t`96XVp z+1;Lo1^}ut!nnDfVWd1In?WWjkt{WB5ZtkJ`Q>ZVo8R~fq#JBJd;MElYyU?-JXYfXOJlDI~kqt5*fnZNzt_ zDZ?WqX0)hb7`}dz;~gF8$g!jB@{8$#DfnUmlXF8LaV?YKy%Ve($;mRQcbMv6GRzt^ zo$PZFsB&K@WW*tH@*)3Dd3Rd;TPxk!Td*=~OZ8@PG@07h_u7~Q)mPFmc{|@0i;@A|8!GbX0ul^QoaPD~NK{ zeAfnGH$(qr2yvBYi`rzO44zhilA2TaKu028kDC_ZCDU<(i;3!y<%CMj?VQoZbDRa|HJwoPEtv|s!#0ojEyXr;cjJO51s zQ)!+>BWC?6QO5Ky-xrwTN(R2NvM;ULFqJOZ(vVheKzgqioERX&sWqwPbxeE>5;3r{ z04A9{L;!Q292Z=g2u_${Yk+}t_!-s{urHIlLN>KbrRD3`srJ13w0y;UqIB3E1IE60 zPiK1Ukpp>Q?qDN?f>uJ(Sa;!^VC(f!B&c#C`-X;(rON=OHvvqegY5xKhxZ=F>||~} zwkeqCVMp7Fbn#^?(xsQPiPi$lF-X#5nDdSt97+#AxIgW9x(n@$7OUB5-4BDYWFgqP zefDrfL(K(DuFI%)%W5R)o^IDVfP3_tJ|>J6@jZyy;`qh?X96ar-Ekb@GRM)OtQ8)L0F*AXT(kumVh~O~#9Bj9* zJeOn|vMCP!l;&5w3rtmj`dmjaIj%1zcyuY9?)|l7rm*h~G;z-@pXsin!${-aH;A?+ zGq*qWaoC^cck>+8WwaEq{LHv9?!z!MHKtlZ#aFN07`_Ufr5AZJ2V8BKtB(1R`1mj!d;{$YWMlM`xRyaXnbkoJ>F#5%26 zxstvF3~1l5X^d#Aj8`TsGoEe-c^%gh<^C9R*;X%_@fD!I?7`sP4c2_fypN2=li1_$oJ@C`jv`|o@I@Rr<}E;pt-pUoPF*jV3IIS zJ8U;*tg^D+G=FIyewylZ&P5ez-h3E$IWhfitWp9eCt~yxo`Jm0uu*5`#LI)?&b5=H zEos}M<7w-oq$t{tp9dnVBK6RUHC2>LIQsdf90GhCY)WfCKAN6<^k~}lL>~)tYcQW6 zW8fF6#=L^g6ye(w>^eI%(w;8AVnce%n{EkU`uKA#+O-QIR5vgiONS2hrL9jLNgc{MVnaQGPK zU`7B4$Pn2U>2D05MMKgg;-odTP#u$UkSqa$lnk~5t;s-x z04Az}bh3qbk}(^k|l zo86ph@~wj5#dn`UTjn~<5}-+H_E9icBRXp_cV;t|8dOtgO)@;!mW?om0=oUFq-y`l z2z<7>gulZAdC5Ql2I`LdgO*gEw&07XZ<@#bL}`*8Wbi}>Yx0h!C)oRG41bdvQyGF* zBo?(z^h=mLEtztsj7ZQDSn99LS9~hJGyjGeXR~^Yqh$(6L50vmKS1vS(^bosN9eSE zF!Re9E-u4!?Bh8U<8v&r(i6ToY+Hn@7a$p@^0~|qWOaEQaqH76rc9hV3mp-fXiwE^UHlv54O zF>7;(Ib~-}$+7dZA|~cL=uu#zlj$Ec8#uY(R1O~*l9_Yu;$p=Mkxjpgn^# z9DmoS)!-jJ3Qg&+jtjZ=Bw%ukz2EMHfCQ!Z9OYptfTyiGOFkxwJgY~hweMf#a{{I+ z(g%%CoJjTaj;9Uhr*z>Jb>wD43o6GsSSdh3d=Wv%17XYtKTHH;XIht3&yXI*0HTq$ zbl*37+1#%zbsfQpNg~+R`Dij%w4`%cz}qmNFm%Rfko{0x*HAV!FCBblglz;mV!ct< zaR`M)u`Yx&IKp*BljPkS9_b?Z;hgk~Z+=zym_G5*|D6u**^e!nH3<;q5GEJ3xccU5 zHhZhV*MP4INy_y}?!9#6Fwst!32RskJUXgWJH+7Qi=tg2$>~$T`8vR$&jn1H394|q z$rMZjBq17UI5?8 zvuXeS{pow(`yL5)R|SFk+!r30@EcP9X-4R<`+~WQV4J^!d~{nbyPT9c`0p@JOu=NU z5N@h5=elsw{)N8gJ>f5Tiyk>~HUb1(~6>>H`J6Z6_V-}!F%`ZVX= za2<<&*RBmIK1WFjhSY`n6h5Yq-YGPgDSsz5^+4iN_)4GLwlj5+A)t=bL_M1Rdku&Afe=4o_`2A_2*Fpc0RGMQ{dhT=aeXKWl3&3I)5waIdD{MDKao!Zk)>g1KuHF_3nU>Rn67?g^hDZn`MGFJuR>!wo<8%*k74(J zCMNJ0ejYV5co3kXeU>z&1Qg@cSUH`8g18AFb#=b8+mrdvzi@^DWoSK6$BY9AgC&UiTZ15l0goIzX0iu z$+(R0?>~9Zd(&dT$3=enOAM>lWFuf3!PkJ~{x5gmo!<9>KMfy~o5p#_bTu7Fxm|)J zzeN&POazhog))4$VfH7FeL({;@Nv$1S&$l#CNRxj^?)POFEc_P$Ptn(45o&Dy5*%W zO*h~468gg+4WbnR(n_~efFyigiMP=m_5~A@`c5Cy^yjs{X3vb7d-H;07 z^SkNL(M}e*lIWLl9ymebUouYPK~3AAdYqh)0Fx1mYKU~3;}9k!o6o8Cm5|? z`OV%f^*Udu2aTQ*yfH_zppI~6^0g5;rZ8e4jN3y9p33~m8$iO5%h$!Bw3AEX+SB?>04?Q*?VY$?Hg?lEP5SJCcrG~fAX zH@?gWVYH+DterzaBZ4cqc1m9$ZE$3D0Yo+ro-H_)VbL){uU8*2c^+4;T^HuKe!#zp zq|D>^lznYjvnu`SfBX-rjR;Be=TWGX8)f>;Sp4sULpq4wuseJ@f1w4&h$?Z=*8Cy* zC7T^-E5gl)E)AkAx9-@U-t`CXf{v{q<4n7}xr(|6uke0rdXz5MoU(!GjL2a71klbFRD=LQ>8OE)#M#XuOz=HA$2p#}%@vKQ& zYgr32GFY2PC6*reb}w2Gq0a;J)0%bj2<2XoR&7KIY{KUQS?RAb|DNI5(P@->f<>@T zJlvHYf2b$*bTU9?^LZXV76#mXpvFeK)0LNRN^g4oD@h}AB;9$(r?Cs~jPxejT%Ev{ zRmEhlM53MosGN`<5|#{Pf)5B|w84fug!$z)4$v5cV>Fln4vaql#kwnj>B#Z$I~amq zjn>owFvSLWOe7=oLS5(jV8jwteCB>6K5fxPAZb6g>@hUJQJkyG37P%S`~D;~qNz== zzm;{C;YzID1H3YkSPw=>^#A<_zsJ_<>%w6yLz-bECUNbKwg^nmCyA?-w5m$5(uXVb z1xLfyYpAd1HIp#EgHbB^8=hRv53dxMZnz;*%?xS!q|agR^n*;y-aPpzCPe-KJ-{2pKyUo)-dkNnQRQRvU`_@}pROMm)jA7FdwmSEJs@Y>g=OD?)N-%CGv zkNs|7LIwMY#dsqLM=Bp^a8jD5pPLDKc<9lsL_mHc&2L-I^~QnGFGhuuaip4dKlaf5 z_(AtEPuO0awJrkkDD)CLrv_tz>C%tt4^U!$wr%pML2J?;X9Oj~dg(K40Xa<(_qVpR zqD3>uxONO7L1yK+$Z&mms7+TPoJePC&2+G}4FvAQ zIRzRzW`OWwe^;B;Hy1(=zdL;KiVpXwB~68AX8Ps_`qaAl5|~^JZ1xwy$z;H0IS@2m zXzaY44m{z!nvp(LpCK4&f(xIMekYzeURNEFc*f^W5(P<)=d=`4t$`5+2A>|5j7lil+bW z_x%y|mB$!~2~VYM3Wc#yUm@Eu&&{yA`D`0`HceJzTCsFhT1W!dc@6W@YRr1_A+u#j zZ(bL-MnCwqG>2w=_e%dezoS_^;)OgIny?@g&v;J2RF2wRL#xKgH&;>9nO1M8WIa<8 zq1Q|{!pH-R9F^cwN;Nm4|{kcTg z%qLkQS;S3D84RcdPPwm`&_j)B=hl9>MJiuuYdh5 zX|U^H`t0q0pZ4t9O(txJ>?{G3QG$a^5E+?7mlJuGr-LP6GAd7VUPl*U#7syjW(*&I z6FyH`2r|!=7;S+mOnFSmtTAP$ywZ)p^w$7WgTMsytAOJv^F{dVa{#80L>c97A_OJF z=bK&J3Oc0E510sLXTWZ`>8A8@%#dyK7X)(-$2E-KnHFPPP+#eNuHQVBwWZ9sf-Ki-=A*3Z^J;{de)sNwP0!#UhY-nN zP>)Y&AcUqcbJ3C3*JlYK0#oo3>mK!Yx&*tT?v#tRq}F8GAsLo{qc0|;W!f0hf0XIn zjuEMPpmVk07_=zU`9~BV0TYg;OAYkn1fcP|(Snv^qA?#E)=G~7;DX_=Ma4{$~|s?$#N-@ ziu+F?laY(^le;P2oA7G-mX9K2~Hzh)HIz*pAU?V?{z-0Avm)I@o zIRTUL4b?J6qu?fw65TeQHnY%n6F#A)R(&_B+Bo@@Ea)ILr8JH$7mWrAf*ITWWNAKL zn-1)mO!s}~U_|JxUA-o4-m-#>%?x6BJGOTu0f^FRD^Y8f$!nyL@XFg*{5lc9v}0=z zT9IMs_^jY2qsXC?6aDF$D=$p9-gZkG>e!z?^Qn&!X1<5?IV>tYTfjurmC{@IBp~E$ zt1b&*ayQIsfT_@!^nFPvno5{4G06g;xGz+Bx=0C9X~=hp%zy^3*LSdjq^to7s`ZH-vGMr#i3~=nn#B7*{)^N5orfL}=Wln2Md~@523IMncAbmO7(#<#C zz(!?Ds52TNQhcRa228*sCe-wMp3K2782Rb*eDpWziBI`IKX`@rhn7(K?uGG>lh&z% zC@LcYAAacJ^p4+q7n(y}`}Wp1y$RvvN~sHz5sV&YugTNEmI;>xaKA#Nu37|rNMphhZC4!O~P(d4NO_6sF zjba#6n80PO#qjAdPWF|iZKY@lvks=-fF|s9McMrXi3AW*zBDgA<@H3|68%)vfa){u zLEup>N>_lOt%4IBn7YZduaC?K$U5doKBx!RJC4qQTz2k^s39F=uEatR6X1sEp8K+P zITu_+MwLdk$QYo$2-A$I3sbMUY%>`?Dp{kqp0U99Og}mdno0Q|FNIDAb1q=Y?aTKB zz%%yw>fCiRz|@S!!~)y*vA~w9o(Y)B?epASwA07hF3S2b1*T|9?vmUu&&8By3Kdt9 z3p^v^9Yl@vlQXX-U9h<+En7`#75#J$CN9Iq(aAK$O%Z{|bU>@Xs*VX~oUPW69jHmW zw+$1P-NA-`t?9h;SCA^GmEA^(j>AWU!7V1D86GqX+RtrboPKrQ<{Djfr(Owm5tUTYb zJ0mb@#^Jm{7fR(9ebzoBeE*%9hH6oe6gv{bEyW#q@eEITVVy63XYx^na zAedGRte}&DDi^^y>T5q_ZOF0bvqBEn$2mXwBS-!QUlemK4T zoxc~1R|);CZ+W^(*eYY-0B;cNMHTh-D&IAJ(zef z9n~}vG1Bxz1ITH=bAgnJ(VDh~OU{Y%` z^N5k8R$l+6H@Mqs_>X8?FnWKrD6R?NH2Nzue}$YLfM`-}3IF93^F)(Yxu7RFJC3=F z;@)sEP)_WU3lFi(l5E6N_$)mllvELI*^4k(gW2n%%dSX^R*>Tn6W$Qon4(1QiO%%q z*Wa3MxN-};>nZ4m3JOfqpyYhEO;$dSg*_F3#k<`SK4;!NLV&Q591J4|Rv&KGbXSj- z_VjLn>5mkV<^(3tH_V`Mvr~vdYx@w8!6ZQ67R%(#wJHFesdRa~7wTu&alDx_pPY-|K4Zf_OA2s(+o$u%{S&bwe$ zWMgQVkG;7r+sQ{JVfvJT($81{P7n+fdv*DOv}@Z?BwRgqke?>lAP~ds2*4Dk zye$``+urb#Y}$4>{o_A;GVR>9Bm5y}OJj1q%@C6mPC%Ccz-R{rFzHCu$7BFRC#Jl} zxy5yc_&U>M95NOsqVIm!WDyz@4ANm4)Q_XaBrq+@z*Nt<046R`stV%ceznR|HKrW= zzz85+su6f?4`H`Xgk~b0&K5A8$;YI|)Wml4H(o_{@oTS3D^{)$WnwK(*z+~z(I*n1 zpp=2gi5yjp?=tWJuV766?n(d4MB+nyj^GZjvFGCV{8=e;5NEZ(^bi`;+us2&!I)*h z|1N;(a{2~PqJIPm@0UeXVrmYCTLLDvCM9gwD@{`Grj{k?!3Up6pZ~(wQg7cdn^d9^ zkrCf)0E47F+OzE`d`$%H4E7Ny!dyVQwmjSKSVg<|UdE46Q_pe8DD!hMyCJ1lkay20 zM#H1s2vD;g>O_R#t995}3N-egbQD1l{3dhPzi3~0-Y)5}y6o!tpggHq=8=X4PvjIU+m-sY+}3ELW{uIvU|s6zsAi+U!B{U;gQ?}3>&{6l z*SUTK8!pjzM#PPn_|>h@C$V>{ZMonBP%6V5ILsdAEsaal!MzjdJ0x2@v=@dk-i(QW zXgSP?20vVN#rZ6-C0u;qSo)`b{7l-ubz2bZ*#IUxO;d;((@DStF&;eHPCE)X~rQ3z#U^XN9!O zXsa>#ydM=XnO*$4EmyD~(=}zTbr(}zF$$+Tw=j&B66DuQ_$8f(+i#S2o$!GmNnxhGAk z*}`BN>xt}?dBy!D0QL(H5DI+-yMm$U^YA&=xsMG84Vb6^)EbEXGGn;ug}_KQ&QlZ9 zTy8|E`6roSjI#h$^q*RwkI^SIL|Mvp_Nl;J4Xs7s0Zr1Uq_#9dBw8CmBAWTa(?qEKr8#=)C?RUBrS$Y=P=HL+fb=LtX=gjkY6#tH)}Tr?hZ4;POGZi zf~oV_;0Qq|8pC;MLSe;2-qOpqGPYU2nC)<@bNf_{$(^3vk{xKKQ+Wb>OMnd@9zRKo zp7MHy1pQ1^jISC~8N$hmwFD6n6*#YLKEA@n5HxBK6oxU`-f;C*>FUcbkEpz24rgGp zC7MU6An0e^{Kw-1kZhm*Sp3fEI3MH2eZDJn>R)dlgL#T(X??0ySjxb@;}a zFsP>N8e37rh}Gc30hrPhz=Sjt+7iiNwcl1(wGup`pQ!^!Gv+_i6pc*~y3M2>AL~i$ zRxe1u^wytEjWy%xlb`rV+P!0E)|e=NB1~qqfJyHpnM6Op|&w6vwW@4r9&_B($Ulj=>Z!SFh>YZFdS*JHQqWKXStbnZ z!B+@l5>y6ySZ8Nhl#|D#)L?$IUkz4pyrqF27G>*Os;f630xgI#yoQFx%eD(%3PAIT z))dgpBqB5SrIK1m_&ziHMED?|pwFZ+>Dh6!ez{!VoX@;_ljT6;22f~P?86#xf@H+D z*Qh(q;%?s&G!f%(dooB#{{>Tz-s7Io-ne1{dK#kc0;gs|(+1fNw>#Ht-^{pIt2IGy z_OsMtJ#0_3i8fePm~r80Bdkq~(0_cw6GRr9GjAS2JN*cthUYtf>(OElA9#lRk89G~ ze&tuvf&F`MOh=j z1YxpeyBbqM(<&F3(3VJu#h|iEA3xLaLsRL|2fNZUJ4o)@%b)>FrfnGmn3gYThWWgj zFv)6yAO0@w*tWg25HW*-6FP^B157aY9|V|G-(6n;O#;LK6IsREyL$MIh%^{fBk4?z zwRht1?V}&`4+us+KbtF^bTQg?B5iV#2zKsQ3`_>01vx zkpBBSe&-Zm%Fb<9lUttYjLrm10SYtpaMCBIQ2`=@m`?=62HN;sfyqU`_EnW(x3o!( zN!rR~W`;5IIao-ZG(lkT5$0!=yw>j$UVbk=#}BjqNT!-ZGB7=SIxrEHCo?O6$)_@u znSlf_!9YT5l94$?n22f`B2@g2JHO1;WC|dd#wfnZI_Mp~ypZ)S58QJ%!97RVG>{+< z#yi5?Wdt&B%J4ZpMAj=qllxb5t&B%$c?f{(V9i-e+dLLdw}nq9HUTW%KZvQxB5Zfj zhtVn2QGivGk9N^Ex5F?qcM}?&+Kc{SugP^_J_AzvGab(bteerSsrF>|R>nM^>Cj;2 ze6=s)piKf(7J#s60BYREA)}S)^3kU9t`?!@E?{cf?gdQbF$P++%|>IGODsl_9lMlY?%-B$;_@ z)8ZvfFobM^t7ILMYIaWt!2m}{`*Vbmf6jTs(_%Qn~3^$bnYfmO77Cu$HMa?b$KJb`VN$Obn#dvC$qj@~TR=zV>Bl zZc|a-0WaxPHr(=_X8hE7>SeskoYYWne0pDw9#BEyJda432*H*%`R(BENG9 zAL9hy^ZjvEzWC(I@=a6C^9QCpS?AQ4FwN8eOg;UhY0oo<)0e*VjkI(3J{Bx5Ox;7h zXam)u)$ZH2m5lmOt6mM_D< zF`unqptUhg=+K7d-Z3NSqW!+*esFB;eSK#7Ce?go?q;XPhQ<1=h7bbHz!$;*06+jq zL_t*SUm1H~JU18&;J!}Mq$zf+m-lO`#sC+60L?%$zse<)o%=nQQ;8AFYHd?wQs6zW zt%@ebIPsJ0pKJ1O4xqm|ir(W!n}V75*`8-zy# z7rSfdrFTOkFt1`GrowvWrIC29lM;lryl_|ivrOCOUDW_n1Is&FTgf=jy?uaZ1M9`A z2x790O_=gD@Ve#2YhHOv`pH+^oVMdjtYXZvrj*B5fQaC2C2zLgXFJ|zF@$lDNkVPD>NqB92*6eG^PTW%8gJY zLMw;GW~oW$A+rsMl8K{+G!0Cc^3)=iEj`7Q_Z&1P`##Kk%)H1!Skjo>pQxF7%@zD0 zlbiF1p9}2}jq#Likx&%y`IA4w3)yq1YQp|ElIQO0r4J7_^p>)UWx@uyBD%hCcy)n1k`~ z@uO)ircd`*ss_}|&TfVW{U;`BwKIyon|Y@U9w8uw*3G*~f4f*zU5m+jezU$>+D4xV z0P*4%yUuGAxR)+k81D*An&+&i7ex1Rk9G8Z8MW8=9EFNlI7=PPN$7dmV;;NAw8I=H zx28O4G8TqXs?m9Zty+`7oaX`kAOOQ`1t;g0<4D0#cCHO|ps(yN{ei=j*-QGX5nKZX zA!({o`)Pz1v=Gn{alO>ppclu_vG5hl+)new^J+;+)W|~hDL|^Gp((9B=iGE5DYd%0 zF*{NPv##`m8_jn$kdD7X{m>376U_+3y zbOzFA_|htIvMKM!M`My6i{m+IOjUHUlPyQcF+q?cG_`reW`5+%tEtnEQ5TZp82cRQ ztcwT{(*2A9QuPg$X+7G~rVCp!*^!N)ob^xSqH|jT%QN?#3s$GJjhMcM4z|OyDbJ`o z9h>vfzRJl<)}Hq7AzSnylF}siKvb{?N*PEmx#5!Z@|!MCU-{DK(t{5?cvek$vo)qu zfeGsZ(S;|Q@+tua`z-@gp)o;3^k+NLyS2B zNFV(3zexuU9%HQ8(oNwhOe)v|Y|v&h%OCyXYXAuPIKNl9u}V2Z$Q% z!+Ow|TI!h>3X~l7eQ0d-x7yfo%*}$(ilvKDt}=wWt|@#*USEJhXe#kTXyc{;6HwHA z42m$KPw;Y-vboLjvzg1tRn5r)lO{c1=Ka%*Xkb)C13JjutuR)AlZ#{_igrUY`0uJP z%>w>{+Pqd4*s`|r@-3|UYYiV$6GD|qc{?!EnlJCd3pQaUWiK%L`*dK!k6|VWju#r3 z+zuuKQ-(kL-#MZCe@h?>Pb!M8}jOH&GC*-^!-+17V|8YDh!?v5u*N-pZz_ ztOZC^6?h?u31H{XnzVd%9osW-uiKlmES8~#yi~Pw^iHH>!G;?qJZ^RuTc4@fa0*{i zWqE5ldSD7*I+hOZGlvzRMD~X+U{9Y~&90nZv2r=V3M7Js54XZl1*PzRsySdx9BUs zJA?2cNmH7;)YSDitsq*k-V_Ox6DLdO^{q&YM~)uHS6D?B>ZQTdz1M&Uqri&(=RrQO z^eh0Mwg{4LRB5Uy0}u>`p;^&)0;6U(&0Lz2$Dwy~{0TUYi!?4X?<8u~qMJGAv$#F* zQ*e@@o54Toz}&Efo|n&z!w-D9!)yh9Y}~rc7ky>G6we)G91MJz&m445EVWJpBBqEc zFz7;URRPD^(wNY&1Sc26H?vD|>%zsUoru3lpUdE1a^Xd3@xldZ{puB|tMfSgW+?s4 zPmzn0Je~zGMO&lZ+=BJyM*&Q9C_;->lw#G|>|eg~@5dFG1f^4e>Ft>1Goi~gJ%Z?` z-(i~GB#7w$iOJx=p~4fTmqVQe8DDT`4)f-XkFzETP-$o^!^~GrN_lL#WD&2dCTg!b zcc_!9oN&$9uL(jZXAM&=*9W*M6PdN#oOyhnV*7P>yFI*bJU#x%5u#*}l-$vl&|UYi zsYOJZ80$moT$^71+E=8nfA!Av?R&lz2}j*?$%$gSstrwt35C;>?>0yxbG=GPY!I1~ z$p5~_XB)-g3ilZw6AWRNDbIag>X>K^P0dK79kFghr*9cH@hK*WlQf$uD6!Pf^m=Y9 z-;MC=#lfw_>*QUEui>xW`zPeKpy!doBPx&DNP^K8?rk8V>RUwR{nk5(%0oM8Kr=A{ z&vpt-I!Z%hf|;1H*)US3-0js?MpPbV#%dU(k)yWD-z`dp91}eNNjevtVyeRw%Mm(S z(~cec(qH`LhqEimmrcsPJz6?7IF4do1(B?5pI zkb%Lt$LH}-|3^7$Ol}o21TAzUe7O#aQ&cx==-#Ww6JSKYdfxt(rUfo}jLRtuo^Y|P zX2<~TD23~oCu(B0$FOmQB7Hbf9+fgxka;L8SudwH+s8gx17=%iDN7&o$902-z|)av z4r~nr9aOMlS(>s@6j!1>KI7b@SD@|Tbk4@}(xwY8Nr&6p*(8v4nP^&rSQ~1LhJ}}{ zT)HUz%&Tun=d52#{z(=t(~i*cXm9{3(t`k~!;t32`OM`Uo7{K)Zu?`rN}nA|52Jz= zKpL1UPqA4058h9>IBhlaN95V#yynkHEd^~j_LP>(Vs1s*{89lh8~m_j(Q*P?7BR*o zab3P9e3VtOSe^EBo#PsPQuD)=bmA+V)=X;z6O#kPv2uxJ5 z)IkLvNN;`&^>;?|Sz>w-D30I{t;EnLJl&@I)N`_!NW2p+;{O+qkZ zqb52fe0IdBwz0HimD{Ql%rJ&68nAQ4xm#0AO~9hr0%QL(L+Ob}*=hDD40sAvOb`SB z-6q2IR*QJv+urs|>A`#dIo)~ZXG8r)pTZmiQ1Y*qG%^yWi|d%CG$<2`6DNR)L1rH0 z-*Ywjn~$hA=L~O0XoH#6WFJ$<38XkOTI0v5Lu)zS!H&C;3I?K9;x)j7LY;J)IrP_= zjoYM(MocLUx`v{=e}q@g8n9%^{PfrFCs!T`CdV;-xb`E~I+P~3eO&`H`!>l#fAj6X zlNP{WnwwezkV0yeQ2;~(OfH!~1J@cQD3>VK0EMI-jV)R@^KSW}-7yMoP2d_UL&nV? zRtt!F#2Osl?UxEPgi7dSQA=ameExaqCc?!xtlvaGXu4|R8pqw&^8u#JNPJe%77v=H zF-k1x&hy1#cJ6TocqUx@?ZF&nL1uo)>zD|^MrepoWyUjxn>MK}2}mXnh6VCYUomz{ z)J^jik=g!0`isB%2qo2|riRvZ2=k8FI3jT>!pDj34z@i%is@(&0qOj->74cHJYI_z zw4n_(r$tELl_UxsMHu_*5B*j8{OA6OwKw=N&~jFiP2S}Qz8sffw(ssnlaiUM3A@Kt zEWD*{Zi=UvRs$!~B-V@nuY!yb`dtj7XoeXtG1?A>>imf$s)Xzt0q8a6o_lJ?uC!_G znzUrWg6z+t=|=KZ;>*lE!hDDA%@)mM#%hZ0XEY2=8T8YDIiwXp9YssCo}p%c255R7Te16QtSWo4>0@5ILG?{G$^)?Gbq_$#=h!0_Y=DBLP zxSKd0>u~8xBMEOuW4+YU70ZaMV^;#UBw0?s4tFpI(8Bb;8E!5>TQ?pAjgvl#Q2cAJ zxhbt)vmrDvmj_IYv#^-DxJlr_{X5e2S6`O?{abEJN001BGcr&_Fa{V9TJ(QUYQpnJ z4*SVZ!bJf>YR(y`X3jgm+gDM*Ut1VfN4~24PP^hh#MA~UQJV_QNCIe z^5ZO(>--O0s3;5$3`1+nLU@|jJU^`fm~2Z88iwQP#Bo0OT<4Ro;(LCdnOlCJ^YFeo zPtZQv1UWsggXRRr6gQPUKVYINr3w`cA_|)Qo0ssfwq^wMcl&`lK#7zn4NNGuPgety zu+(CP2C)(Mvz@|#5qxZ|-ZCEBpU+>|$Th>5pj?}QY-qB@$p&^ttxx-Q52S5dkEatI zO8qcAZN#j=4=EFAskNy({nuZ6YkKsNyV5`Yzhiz1haN6(`;aZ zIB5&xpw=Ij0SJWYr9d!Z8*12YyK6AL_kAA-Q(jxkqI6<_#KX|%z(60_K1j3G)|zg- z{#v$Qxil8uwvZK~oTxpxB3Z~WUo&+mTv#_XM&-(0rm|D5o*>dh62+F z_qZRCV_grE*XNYCO?TNj{#L;WU~=KGX0AAJZzE>mb@EzCm7tYl50dG<6nDUH_v zUVc7+Pi)sSd(+0%tJ89JJ}&k{k~z99j9e}QlI+FL`3Twr9E@NaVFI+DATt8bUxJkY z;tt@`%&o?wAQa3vbI&Lvm>ym1`*F0AQO2YdVNOllu2)#ll(lF95BsGI?UvUS70 z$Jq18=s^QW41yRzaIhYFyXwQ!{>AS$bhMittAnl&>&d!hn6BJDT$&t&~ciD z^1@mHNhxRw?WwV4jNEx_aZI>(1wNy};qFlN4cDC-C$VBz8DZi*Y0vI1Ld7+c7|Dkt znMtez*f{+LVlsZ^mv2kEcRiTyxZ^YUn1r7hV5+4)F9|BSMjT6m(r)o?!)X77QDN`p*LyZacu%xvqYfR+JD}bq{7Mni;$@ABkiiyipe3t)9 zGsY*+bs)UPo;^k=KJ;K-$21L0Z)SllraU9&rh$nMk=j4|LH5~uDia?I3g*N-FW|@eNo;^U-FXlnxB4kT8+sPG^A!QiNrzRGF!*J1F29fafeboXkv2H~s#;cOC zsp0v8lk2e%chV#P*=cqKKw&9a3+Ca&5)5li-Gf-}em}KNNQ&6A{~(LPPoOQerS&UU zQXX0+nmxV1wRy}7V2XG9hksfX%GKa`t^o+`M6M~IEragGGfj9yQST9g2bus(EE#BN zGBLXrikg1!M8oQ3PBZ~vj)mtnt%i>kI%h8)fQdfl9``BArb@FQ@F40Uz_`ZDlwc=% z2iWa)2$#X)bhNY6-bl+8kXLZ+IPMyyI*Hnb%b!%TksArN;Cl1|~a`BY;V!LR&&( zqCHxNW;CWB4w&L0bNzv4&g2I8L4e6gsDM-)FPO$8BU0e00vMP(B~8g>t3)P26sq_L zMbVjMXhf9|d~L&ETDze-Z8&EMlSo@~cLp3n$;U?;(~*P2WOFzHp^zb=&%r>|#|cUh zAWQ)B4U@2(e0%@?t*>KC_U-AjpZ#=Xd)6U1h}L08)?p?zVoIUoY-&tKAO$cn(F$M^ zK(uaaz8J*Jkb%h!zs&2!?@)gRCQW(uCjrya04Dci0+_I&M`{;=NmGxLmbRP?jVasr z3v!rwk7&K;08EmStfBy>rESgW((^Vkk*{O7*mH2iyN;wxsFUp7p5sv-(Pd>`c1ZmmUOl|YBJ8>mxl(5-xG@xr#WbH|hEnkzP^-~MlJ zLt{FE?}~-O+^fc9l3%JA?;v56|H5lbj)S5?J|b&;e337pG2sq4TfkI2qx7hmx0t`V zy)!?S>ch`xyzkVDi|;=wU~+}FqbCz$E>cmH@rYC!j}#@;gNY%L__8IM*#^_fHPvbT z28gPKn+Pda>71B===*BYzCC^Ez}`MU31T%t1O*sIJIcLET=Q`Lj}3LFx4hw}Fby6} zw}0mELuw`D0%x?$9Y^#F1x!pdeN6Q*mSe}+IIyHKDK9!o`YC)l0kLd{IRN8+FoV!q zX8D-RmlmcxG^R#&*c>bQm^9_d9L^>%$u#CV1df=z)b&)2Y1+p`+Z;Onp8`zym{?%D z_9S4E7GQKR>>nJMcvb*Ze)qYj9@C~gG}nh7%qfXZ(wIm|q?VC2CJh%cJZKz7c+wZ4 zF+rH|4s)iy`V!#VXfrzNi86zkCjHBhWyCU% zFctH}dK}#Ed++v3%^wmM!s-yd0M@e(9H#y|7wUTqO%A3Cotnxq`kX)((xnY(qoz; zGEiP#A2Fcx1d%wYhuDhX4Y&Rz*PKY70hru<(nO7BNEVpt8_XYe_BAH?&*?sZf4ivQ2f!pV7!`%1flTLaE?JG_yqWH++fz41VcmQVbuWB zfBUsxPyhV2uM<{(c`!Q7b-Q-$P7B-EH%o^%Ox|%0A1jlI0QAt9_>uZB1E7C_$+_ij zy@LodyI`mW9bB+(9pV@dV)4`B%l5& zwG%rff}MCK4T(OoAEKY=i(yozdi+XV0F%2npSx^v0F!wU-Ldzfr=Dgnu7zp+I-KqR zQ^tF=U+6qDAPOVGY!|*RgB_Nk;kiK8cDX+4$!$B+O2XI$wg4phPSc$hhYBn!&ST5< z9BLI&xn=2aXHS^(8mZR^z~R;-tBFi(ZfQz=nn$1s$3j#q_}>IRUu_yym{xag-<5v; zHLpvre#I-(r$2LhI()Q~V4`M#=mcAnFHUc~_2-FhTLOq9EX%_vB&!{Pq|p5pcFNYMcFTM^CHtek=VJ@1Be!f0+H%2$R`m zT8cesCmI!&8C0HRQ3SPcJ9gJRwJoTEpkV5RmYei-aBzZ*53qqA2zQ9d2B29t;-!$7 zFbe=oYESdo46ncEXnOtYZh_(VrrU4-B$6u{7tT#hGCse7MUkNfW2-j*UraWh9g#i` zvaej#IOpf$uthQ$2j7hW-B#U6dgNFKzmxT#tqmqz9XSC7U&{<1158B8N;8YnqhaTQ zQlT;NNPHZ%sjGL8$wh`~CgGB$3)5e|=Y6RGP@O{4(nhXX!?iI=eUe~3cKmNW@V)e# z>neT+=`=@eRYh=tHXTi z3AK}a06Me>2u({cPj$HeN39Q)FRv{&?K&n5S3=!x4iKU>1u$J_$X$SOV#J;j(!m>nQTc}49hN9VJB<6_lXOm^OD-av)s} z!3eAuy6vrRjV#&g$R}vRRWp)5xpiw=vtn78dcCJo(;f9DECOF6wZbf}Mzh##4KU>@ zfeD~83;8h^=zbWzKAVluLJjRxqIb!{0AbTDn9JHpE-W~`>DHf1k39Zl`YOP5%@voT zeGv5qT{zBeO>P668NSNt1N+W-QTUi3eaxO_Ul>o%95{?AZyq`7NXlr4O;a2IszU&%K1!x3VeUhSWUOo^sSE*F^wd+DIFuJZbuL6$y(b04;J5(M#zo*R9W2n|k9GL_Muox9U(Zh2Mujko=mbYS1X^ufRWNLqmQCxuRNDdIl))U2_8#(yw2y^2~P8PM$s=OarMG{US?roJUV}f z@5+WbTs#*Nmv~j_8t*vuC^e6tr>`!3F1{CcL^JXin41prx&~Hk*Yiv^88nhjv)%9j z({tPm0#0@t2bUmK(Ijh~B0DpLYNq7=?xX4Ff9~Z>?!NTdfB1AHU37tM4((*YAHd|G z!c}?8<_(EV5D7_#>RBVJ9W|R(UE4ISltpCH5O!;|Afti~yX^%O+QvE+_u)Bm^k^`C zw_C%7Oo%=)Q1~!9aN>3{xg_Exteg1YAHFBmt5s6Ai*+^eS;t7A zNfjHBDDS@izVus!i!WcZ8b-)Era6Gg+-yxqe>o))PGicL4!U@@F)X!R;Lthl78*@N zeAS@|*p3Jn_ZmL4+JbO#Z1My@Y`OR%OnKL04p~dcF2K}8whw_R!o@Rf84+i9NJH_t z)aQPmp(}a}Eh|oy>RFtN>*CDGe*~sc#@F>$ZWQ>xe)pX)>#Q-o8DG#<47rugu&gE$BaV7PO@#We)&fLR^K>}AaCHmQ& ziR%E#&wuRW>381!?sVBj7laSUrj4?V`eRRRO`F!QAsnB8285Gp>g2?2;JuZoI~mmw zw6TZCpOR+74$N~y zXe@^7tHE8kX=6kV>d%sfG&S0$Nf_)9roBg=*h(h-B?vJqBJja&F~0uIZ^xRht1r8R zV1u3rF43XyyiiLr-Bz$Q!I2>xKu|aVIvr+@tR}MiyRdc)uy^O#7U;1GVabg1>VASN zQBRn*_LM1>$b z##|PZoR{T=dASQ-1FH5ti-1?0Mi75$QT;D9yTL+Yj}1&<7k#} z7l7v@k5_z#HhFX@TzomMg*JRm02Eus>+`I1T_{hs-HvH7ht%W?VDk3>Eyl>tk$l!` z{TBa=_b7lrrysLwblORu8N61>fWFA<>s*yb@zErD#^2p2&<{Q|(8~{AWW{)SAb*#p zVw{+6P@Fu&pJ-^@?jK&$wJm**AEtkdQXEtne`S@qwH!z_AqJ9kMCE0Kq)xB^(Nstd zB*SG^2w;mXmvuNzV9IM@dw^r|!^w$*AX0n=~3<9ErbvFhIxFy)N`*P=0vqP>)WDI)VSU7ZbJiq1IY4@v~U zgk}r?WR25)kTI9n^ezw zAv2D*w|AynfBIEvEwu8$BafwT-FH8rurz4gpc=IitLaeylXJ~7x>CsNTZFbtcQf~DpspWYEB zyLD^KM30t9UBC0a2cxg9+j3cKXQ6gGf~KdnVhF8eg6j<0aXr-#eca2^lYV%_!S*g@ zHX0&RVX{-mX-;pgGlIF{rXnc=izUN8LfK-3UjumE+X=W!GMl{*Pb%)#xvl z`bWT&TTCb$P4Y~~L=9APt-zxOriLl=o&hHMh~HjtUhwR%6|X+! zMDeKN_grRik=t*ZCofENn&|+as0+WjKNI#Ah}!cxkb>kSFavfCon%*88r#O|`oZ4z z^opCWL{l0{U;N^o5iUN8SXE?QXbg=>V8Xw^Op}1}t3*sx&F@y-+RX+gC5vYRCQW&+ zYf)p0Obyftp(jf?6bKwqA_Ku-n<+u2R`{3%rgp+sGcb*j5O-l{Om$4YG8jN?9Uvgl zu}WW-)PQf{TMyh93vAVxT>buHYfP?Vy7H2X(@T*)SFT)(DMl?}#+2uH`g+d56cdkL zlc4N93Gi7CfoY6uLt`qL^341&158`O=QIOM(6_+E;zKv2&-#b>Ii#lrt6 zKw$*wd1M?WNe~3)E5c0EzufV;^nnk2ARRt@I9+`HCIB1lpMlhm)Y;RO_8mBwEup>;uV_AUF)82!}+XI+1kz5EG{Tcn6GrBK`7P-jEI)IF!Epjl0ldSYVI;*L6t) zXfhGNLm6r=J6TWVoY=TwZ8l2+=#M=1WIDkv&^J+6g$Kc7918_A6ip3(9nbZ5K~iAq z0hrn`8TTQ8c4KvLqrfS&wiQGRc6E29@j4c+0#f?L#^}cZ`U52^o)<}LnNLR#9ZD~~ z=IZoc|I>ehXHYj%pp8G#q_b3LDlRcc^n@1<*U87JK^oAVog1Yk_E zBkcqP$W-SMs{Y2d8Rg_w0=yyJnTsr<`~W7mR`-O#55xVP>4s}Bg87W5uYToAL|+kj zPy!|;wFWdMbw<_hGXaym5HH&6dvQpVX9OlDZwo2&T~vF7eW%>_$-66Itn_Km4Q6mu&&<*2OmDHTBJCD44z~U~oZO&j*-t?|YwQm!}KwnLvx7nSK`_ z#z2eb2TV*B$*Tl{(}m6Xp`Z@xz%~a#Lp&3K9bjS{ts|G{=*J0zRt4~o?cx3}d?|h4 z&;KGFCyRI;v~dvuB7JB{%`GzQL{P$B`l=Vx*M`uP^g~AKCuqHjwpQ~ZJ7g*++HCj! zg8?*GU3y`b;Cbf3LSw>gxPh&~n*=8OR1NrjI?yCEBmVN6ULQc(ODZ9Sn+KnGBK9>h zoP1ni4DcDnffUOeV17#13VabYN?#bTgmOWBNw^H)nRVfz$DhKYF`Qoc(rY5%qVsqs zGqnuO?A5fRHr)^SJ&q<=NpQzPtN=Z%)iOY$8b75z+IcwPO>QC=GT}>(Rd?FzxH;0d%U!;y@M*&IwEliFB;Nztl0> zPpUIEUqnEeq79RsLea?Mo$(|-hnxQ4NHVZRwSe0CU{><;Ftd#7n$ zQ&V~|fGJFQa{&`v!4P)Mo{KTzsWB}?V+vCqV_ONYR6Nc+JcE-Y9$*B0(Jj0(-_G;P zUubk3852&Pcb-4}a1JP}o#~JdOH-C?c?S6e82SUR(3pPvZSRVdMAD^p&s?6BD1G8B z15@cSMFUFb&-8Ey(gx>-QFu)H(vwfRDXPR}A}JFb$`2_`==a$ox!|PvfFE=&#>RUB z6O-07_z~ht0|J-^y4e2sT+BD)>0kflE11f(?Q>n2^4PvWjVXW@u#0D&t}$6lpB=Te z38lk9J2Nm%0Zc7{{zs1=1DKeev>9Ba<0*+Ys4>MrP*?l3it}m?q0OoL1WkFYh+&dt$Fw_~#+sFq#-tP&z=V%!nHm%My z^d!TZs)G81MoSOOPvLJ+L+L#zqV$=2SjI4gfTN~lrge8x&cH-|J4U!x#33{Jewg?F z^*{d~B1=!;zaU%4P+z(XKb%{s*EJA`QCk_n)DP3X2%k&?*QgO_2G*1$D7nW_zo}{P zHErL!HxdP3d)cM&Jan1>rlSPmG@~)C7nm@KnKw?~aL@ls-J8JYbyfG;TeDvLxC13rIZjzm?@zY=xr&bZJ1gpg+LiX2uYm5cCh1k zo(FlhWlff3NtV9nS?4{nY)cgGFTLOQ9sAXL-ZSj8&)#dV|C;vNMBOc0xRCsX%L$7< z9yFp!?U_xR2(vyA85@+p)x5e{p2qxjPC}003&vrX&3Hyka^;;gp~>RR54nM$0rO(s zboHgd#9INT8Dx{V_Oi>uzbeSBV50Lr#)QkCl<{sZil^DhaEO*ehY6tApB62e8(NjX zw15A>^z_;d={A;`aN)lp08lNg#KDASD^Q3kPO$Bv!`lJVorl{9fFNRyDApk~)FFVl zfiXC#u|6FpLx+NcDUiwlCrk^N|BZ0PMGk*2b|I^JC% zGz+=UbpDs1dKNHoLbF~mLba~%|0|}v%po&^KBEIOU~(Q48q;_+rU0g=dZK2GnRw14#y8cC)1eh!0Z{Ln)2TLmU|1p z1mSU0#sKn%XWxhIR9E54Z*dq}lOeps;k-#LL zn>AFSu_(i;L=6ky`M$Na!wkmAgC7ihFaRQg9~h*D?K%(-A{9HpY%8oV5QSy(C*DVm z2|y|UlRkt9J4H%6g2tpdPZP{U{0BKO4PwSn6H{X<(Z&tK5$kF+da zS&~mvEx>fyrRlaCZel&AIb^%;A?)7}YjoaBwuonrX386;t=N$w*y0ag9CJ6XZ+sWd zd0D>6U`=&%Cir25kLk`=156`Kd9;-;m;sa8j&GY48oT+!y{JIDP)J?Qa?A5%o-O~$_f z6L$1<5=n^!mtOyhm&XKN0|&;TMVT^a{g!QMBfwGutxET1bLhZ7q1o7AiE@Zn0ZKaY zBl9g)(+LxrrnI8a-Uu5d$!Sxph15e(#)28s(-jvkjV#|z6z;^|^VBoz(r!%LjR;&a zF6Z<~kB2ZpZrHjLjcIPAbn@99MC(2F_^Nap*%s;%BEob>JCq4bw6nrj*3bk$Aewg& z#@}+HGd;a&duk#>_-W=s4Ux#S3GkhaAGWi%E433oJ^&q8GG@vc3nzIWbMTp$4-tt3 zyob^)*Iu3878(<6-QG*|rb&_U=~35e@(-2*dND7H_f}i7Jt7;6Jc<2c{MITm0+Z*0 zn&=xf(FlIvIZz14`N#R#I4zo;Hq?>UHdkJxB>Ln>qpt%1xl5C->!4N9J_-!#i)XfY z66IvPB#+7qV8YiFJ|_CM#NZ=q#rTky`Dn*c4ovZzqtcDn#JS;mp1D4MZaxJ3ry5Lrt}|1PFCiw60!lVg5rk}tna2R8_k8GsY2KoRApwTQ zBrs9Wa_W!+lUXSY7dKCsEh;cXe_M}HHZ@i2ZeWJxoMh5yH(|#DQyF!NJ_%-Rp9oB7 zfP7DBXh>HqyC}WzdAAWh-xLmV7f@7VLf?!h>93Hk91!kXXKB!ocyrTPqv&7ySZV64 zjR#x*sNX16cj)!fK4yAfw{Bhf%lE!7O`b$vI~K+>iR<-D{56lBkyYNVSwZGPs)PV0 z0pBMEz59}E*%wdc1_xaXJWpT~ayo-6FIQq>gB|gkNIm+!3Z}d9;YZV7f9T`sDE^>} zXUv3Io=OMMl4>y1R%@0bnB@XYcLQ9zdfQg~BpqqS^hs$V`6Y{x`b#iTRkEy0F{Z!r z@fAd#vAhf#)0*|0Q$GN5<)TGsW<+WcIks}g_SA;PRn7i9oZXW!pE^zAAW3DPf6H}* zZqH6Tc5R1ARRM|HxHz=hZ47p#7@I<}d*GcQcj*V*^E6cJ58r zvOwTF-}sg^4k0lzOGqIFz9hRbN-xtIV+aC302A~R0b10pL;_(jD-;S&A&5fP29>Ct z4VS??aBFNXp<(JOSG(?FoL93B)W#j#(g#2OVfq~*l=fE`XP7XChzR7E{Q(3O>IPj} zaMYE@bJ%E1lebQoz@n`66A607l%me+Aw>3{VsiV=_weitM)$#DZB|&pmGL z|J=>%Tmr@+6R~V?z^jpI7sFHf(&mU)at9CbSSiDS(N!Z3L!C0uv^L z-RC zpNi$ldl{r^PBPCTOq2DP@>Z;TD*f5}-b*T{x#3)v$;-4uV>%a@`0bn$TlBxvm;?>q z-*y(z1h^hDP=w}1poY{CA**3h&}T>6GTQ1HX*};q$U`-MNjk4j4teDm}Nm|iVH^k$(Hm!2edfvU8gC(97+3Ut5*X|f0ZTxmTs^A zt=}fW>gA!$#^lrJ5B&r%C^|%qDS*lL^}{`W^M9BwhJlHbMR3P3STjKS(PmD;WX1s^ zuC?^=H~%Aj>F>UfI+#i@9VXtWX^qhFqqI{s+EN*Ul60`_f`zGzcYJjHhSUj-&LES- z#PL(&c#e*>O{gH0=%C^%NZa6^74g?!(^xyz46HYbO?>h@v;mv&NKo} zwr$_Vs<>0rq6M>H;!aZSK@+bdRTOi3VD#(JFg9%7McpxdVs7ohvak(bOLKE0b6}X7 z!3hI&WSqJ1kYyHUQ9Z)4_d)k<%)6U8sR`k!J2l}iGbPjVrAyKU^JcTEKvR^l8$Z$! zwCxjJ-Dw}-bQB@sG=Uxc)VmGsX!qU&TA+GzVeX1tgU!(Q_y`1n+Gw}a z2vt?^Dpx2rh{3s30t*Whq`k*m)0(Y2QU$)GiS>=?@IL&!nDCd+o}W(hY{RZQG5>e) z-3K@g)d2ozRD;qN_qXicNASaS>D_O6djzbU#Q!HxaAOE$^j`Q59>IRL+=btnQ0v?> zZer{~Plam*B>^fDzUJjvCe$UJ>)(EGNngW;{XAHQYYyO5{NS*TEeh&lH@O3IH)|M@fu^0d2 zh)2)cGLSj%a2&|yfu4UZ#}?1XlZ)Q{#t#kyQw}mwaQ1ZmI{1`Oy@g*^$_lzu`0bhV zw~XQ2_;#?8X~uA5if0&+$0(l7KLVsM$S;n2BnPGd5RQyI&X2f8pOhxEy4tGL(`Cw) zMkd=58UvVgI3FjGq#?jD z--0^ITq`txQHsDs2nWPZ8!8Q|G4Z64z~uTeO-&QiXFu`RsTm2Xm;7HwH0etdaExBT zT@&-B9(iI#`r|+Q(`2guY9!V%fyq=i)IrmVZI}a7rWyH$XKzNq<$dE|=o+ChnM;f! z7zgxDe5?=PL?4ew`hU@_x1|?7|Bhf@U4U6RW{?WAyusy|Pit+@a-+le-({*S}!kOza>CE>< zH1iJ7pfVhRD1e`9+;{#>XqlXoO@+3^zE9R~ISwH1)5M1MTZeh99qD}CQ!CS#{^1|f zDa>u-p=Z)QraEZ81jg0I`;}qJ8|ppDy=9o4m!?*p@!*;@sh74K6`1IYVt~^))o$8( z{jNO`rFQYW1w`wW#su3{JGYVI=NQ^;S(=M)p@ImtYTohMD=wz(v3cP5u5YMLvt|*o z2k6LDx9!}Q9(rUYq3cZOo;W3P=Q`J% z6fI!w!kYDHI`iht0o+&&l!?MFqcI4r&L%5_31Z6_FFl0VS3`?~l&dYGpAHk4Oys%` zu-mYEPbz}OXH1%s4(>USPUGKPI+^G+1U)CIo}#-?08Yp0uYoFZb831AoE+0F`wm1T z>D%7;=2*ii^3T!U!Efk4S#u6Ha-GPL2kpfCNWVE2y1q0MK*hI267-5e6yk|8q@gH} z9llM$D<4p!a)H7Mw7sXFc_w`XQ(oi zqHP2w!o_7yGK!o@odF&z=Eo-WKCWS4d2RTAI5yUnH&{eqL1Woa_ul(KBA!Tm%QHs>Ca$kz;A@f#CQBUx zm?U_KVU$f;h(Sr)j?|c9iFP$68Yf%jH3KJo5&F*R0ikKs#W`g>B_zQT-aWXy*8m3g zfBsybQH5#EZ;yH2=jo$!Qn8MB`#*++ZBVGMl2=YUBdZ^8LmN3!Qvu<@2OkO(hUtMW zTDmkA>(hB|x#0(-WG8Nh9_e#3>hc;3g*Hcl+z1sFPJ?i9o&5&^&D+a^UCXH7 z?X?W$xL3mQ{wPB_iu-s4KuKUS=jV{x6~Mpm;DPjCKlna9#aM@y1twlUgjjul`fGna zFrhJF$`hC-08DC32CxL5FN`0-=xmKC8)MSG$2018OcsF2BOK4iAGeRW{XA?K8<>2j zY_r{p-t*=&8WT^A$${3>002M$Nklx*gVIM$K5oGShky$i*x+-TT@#i$Tl)6OyPJ zlNqC9HC^A20k3~aQ(i9vtq1enf?0D?H>M5)N#sthX6a&o)>|iN1=60&4E*$|C(`s; zGeYVd6PRFL4jMH&+e-j&CV$@ERN<^GG#ak6tz|}<@^S#UKu5nHQ{@O?3UDG!+LS5! zB5QmGK-$JUlwg2J?V2g1AAN0tlD6_NYZlr)z>@vwPoC-K^+A8UdQc_b!vo9aQ0wCR zyp3`M02i$@OGPhraivs2yOXtw^fj68M?a&!Cfh-flhMgEMgo$>`?3G5mZ0PoRb=0= zUBkM^!PrVDqm>A=Q!v^IFxO`MYUR+Z#~T4CLEBV0t~NAas3>N;DU+Mh70WM42k|ZZ zWbL|i67!zG)Ql<5P;>o=n}^wG268qXbHMsvV{3jO-w+iA7$v(^%MB6O#k`qAEt*MTaD?b0s5#S z$?IWEK}=4?FK1SBfyq=jnsFoR0d#D7FX#UpCCYF%31cTrs7+6iWkV3GCo1z-U--N< zWlAG0g_c6lfm*U{TM8|-6RQ5?>b2?F9Y+Z3uOSUrCy{w%PGMY3#(ZsGX}WI1GjGL446#A|j?eHKV*hgQ=A50MI%CgdeR6*B8{iO!^M40Z__qfNT zTX%C|NxBSR;!l|SI3M)RHMT=`AN$;-aEb&QJ|-FpTtbtBcG|l-Ltx9TO#)Ln<0edb zKM$BFGfa5_ObCbtU{br!`L$&1mN_5T<-~Bdhegy8>YOGh2#$%W)3h?8{23v)0Q|;{X}I)-Z)}EI**PX-t2|^ z26o-zi{rtN_$}l16X!VY-FA6MCk0@#H{-bXWhKP*njQ2p6*1Yiv=|MchUgwv6z%D0 zC;LEq223=tx$Kn6o^xQroG^*OEHKF&#=%6iHHWlAn}x=t7E&qJ7u*@QD0pw(PtS+u{=Ju--iG8Wpi9O--ap#y2kG;*x*oY8>ER4`6(bkLovG3CS> zjX2^@4ji^|025}EZkSUYK@^oRG@E`9KoXezsPe;Z2~$RE!^~-jvIDFbDZ3q};UrB% zZZ!w_@wpP90*o4xw3XAH^bmp#13SzZF-V||_|C2iaI*o<*_27O*&7h!gh&U8Wl66B zk`ammgj%x`Z#D5nqELvcF#Lu~v~=reKaSj_ThA2}zCDqAg{RRV_G3mm1%qrtV{D+m zdYGG25AZpU%}7Yo+L)rMll?M4vKZ6aB!FqjMT=7l?ep0Bjrc)WvSI`<8EH@kFqyt; zTsd=@$Y!yfAcC!jS?{ahhnz})iOXzk+Py6uIewH-__Fk3{Af2_bRoXZ12La&#th87 znCEx3qg^T4SMY(rH}J}s+fd+VEgX$ z`06dJqr`ev2M?zmXkMoQm<0>wlZtCf+PZxQE5#mWe55oTKh-RJa^19TWoTZkj|3nZ z2-JwVTV}o=^I$7=IRfY%@{0nK0UCY!n9kFfd|*c5F^;Ie{pt9S zmb1@@W4wbk<>A5my&%6YF6PWwTL`<}KFT(x#zdjk$0s`ei~j1Z_wtug_z?%9LRlp| zZuv5uaDc^cjun_l&M4yfzIPNGzk(T@0UY1T*T-azJ8i|28X99DIFNSZ7nnpg=4t>} zV6tDVM{a`in1C`2Oth(*f_3xSe2cxpZ+kxPPoL*(cOwpKsY_)s44+PMLTLa~XiN+q z?l6~^Gv{qjgfti+CKv|484&zfme0YG;MllG|y~WADa3d zH(il_*ws0v)Q!rM6|;p7i6NJ_!vU!>`#y zt~-Jh(9-MC!m1f7`ZHbU%z0IQ3|se@^dPgp^3sdbxaQjQlc$~vy1#+>Ot;;14d*)m zIxHCYb8L4b#Y~u50%(KK1ScFz>z>_C$o!GC@96Qgeg~$s8Pllmth8n4E;QQ?gon~J zh2<}%0g^5au8Gf#;4S#8P9k7TpFRx@qay7llCT}&ppAarPgaZz=PybfM^2=Zhg#Eg zw59R%tn<4B6qjr-Me8FJIl{*~Fxho5wpz&=UWLzT*4(+#PJLvk2ulz5>a$dM(c0lW zLL>I%KYrwCeg%E;ty$jjrdDZyiQnEUT`RuD_1xe+juH75C4sLb(B(RVODcFzm(Edu z^NyI0#x+Ld#rj&%xI8%gZ4GQ8H z1mj3tZ2g3Y>>e-7uIBIT`3dguu z>CJHQ<|fP$L?2ZGN<}Of*xN(sHOnh^7Jx}4Di2_)NTmqD{EY7Pf$=P#LB%cG7vS$% z+Zi7d8JjV+^ul@bs66zan7J-DeIM)@*FCKp5iMfsO$(tLOr8@sY?$UoP*5yFBL@s6N8XJm1|A8DX$)h ze96Lv>GDf2W1wNaVvr6(Bh^eGR!eoWyZw{V8Csua&f({Cw6Jg6#kxCu8-x%9tA8X$ zPF7YFIO2~K(>Dn4z)bZqJ@CMHVc-o2CcWwA8%UD`BahHy%89n;J$x@a!QpP7O9T10 zKqP~yhN0S~GH35Ik?kPb#g88NPFl}04-@F42{8RZo?{S**%};#&XFpHHeFNbB-#US zni`21tR#<5F`t=B<^oJz>8Z`zV3sWTGG$U~tb!rINUB(dy^l#h$I-y62xp%+XHMFR zFJvbsL(>7-Uso<(Oh`G)rMK)&3+GNvFSz-tRLr!48vH~}vzk&7ek`dRJFE!HC*18Z z|E{WeN@|6vKe6hWw07NQ`UKi!S&yB2TEdJp5kX7s-1#%Uw~=u!Xz2b)Oihgd@s*ce zlp5wtPXG3UAEccGW&F|qx-(sXe`ydu&JY@6_~B%B{L32Fe&d5|x_R|8X%CToRro)j ze0FncImW8G)25M;Vl!C_mGt*3tkNT*_iM#Q_GVjSp`>Z6ZZ5ipuqSLzjazA{~~Y+1ak zd_{9Nk~p+ye-;E{=r4X#zbsTd<4>M9=b$uebgm0S3N`|m!jJS7;KXk(jctG|H|7q| z$JC9rrJOl`$KW4d{__23q-d!;OM2CeSxtnZZU}?DOm>}7X;x15m-6ZAegaF3$_rp3 zDzAx^eSN-THXk*Py%>8$LGzm^_x3bMAHd^?u8s3-3Ps9Iw}W zMEJvS89@k^$-i&{x);PDA<)t8)9bxRORixvnfVE|<5;4-ig^R4I#B-&6=$_ud`t#E zm!I^%yeM5?(&fP=?I#8XsT7eWl3`~HN?J8UMz08blX7QK3 z_QSJ7%w(UVMID2iwX32*JTmzG*6m3DZ*XIh!C~ z0%JEn+m0PO)2Bc4x1l*%)~jFjN&+C3(SEcg{}eRT-UX&X)jN*)R}D7kz;@w2pAC#4 z{2)04?2Rm~(2wcpi=X%;!4A*HJTp@ORr+267xzTp!$7Q$9H5T@-iu!Y-7QUrp^vMp zOee|qJY~VGv>(uTiuHhe{w$U|nLxxCN@)0ex|uwBy!`|WxHz2phlp%@miIDYtlHjU z7aZQu&!Az!4C`Z zTb=A+Z6&qT=x^v0BnXoiO<;6iH+|Djd-~mg*7%|qy(s& zfxbV=_!itUfXn>G3Q;as6SOLCqQQMmF?}T9yR?V_Op>h6&PW%Y6IKVJZ7JUg%N0Cp z-G&Y6Lm#;}#&`HY4aA^sPK*|qxU%p$m;N)r>}Q&RX^5P|R8!Ny9I=KN_tk(iRfz2j z7(4=e|focYnER*avbN!;yY^?>XO}C^tRu7*I8v` zmB^UGaD@WrY3jm({2B`!O=Aj9AFp`UqlbU!0MEC)On+xst*64yqtkdjD<@DWx%ex< z%F275j~7Vr0+ZpY`j%>|nYrD12tUUW21F0iGT9XRl^D^O=-5g$hvBF^3yA+&dF*C$ zrsxd!M{a66SybLR8WZLSql%1JQ)4=csbQS+FBsf*x?2DfGsk7Bp)EmZD)*ylOir|G zWPp6`lb^!m2T|jLD5A|wdn2K0@2`ZxG?5+RF#;dn`seRT6NsXk$UQQl(SXUU6nXH2 zZD%};I9JkT)ptGC3g?^7dr%a#ZzV(XDXZvweoQn>K&SC$Y-r%?IaqeatwPUueA@Jc@ zK*QzBEysoOoX2z->5>e5h{#MpFX-1|q9~uA0h8e5$9ZG6QxCvA4uPhJzNp2w@sIcY zP5QTg_{UU@BwC9G@_Uf*#t_WG22I zql_muZh`qE<^)YdqtfgP2%s^wv24g;f@#LrS2A&RF`5zSdNytjbC1%$ffg4poS$h- z^Jk?O-hN{mPv6#0gCWxnj+$Q0p*&xP(*>ZIse1x*gj~|R41s0emTl3$-~Il>fDoo` zn7?Z~XsmXW|$HiSksf%oh}=vupx$;FuNo?5e>eq(Yh0V+o7 zDKuP!_BCl@V?^XtV741#{^4GHY)1i!DMWu3Ga2^a(N;|1n44*jy#!KBb>0rigZo=M z)8Z-9Qfuo$`VQYRAI<3sMls)q_Ksw<0I!pM1*rpo&MBVzqUS$9z4a|`0?12AM9hlB zwDSo}vTZcK#(H75P00+YdB1)}9U-ym4PtsgtDJ|@AbvJ#UlbJX+? zM-(b`vHlE`pa<@VeZSpuz~YkF%HgZjI>u#O>xukicFFmFeXlXL82h7~bBx7m&zw^z zEV}DY@i7&){HAy|)v*Fm85)6Mm;Lrwe!Q?B1yC6~Wq3dC%u7D|`%JyLI{wF9+yT+> zr0iD)NOrHkGeF8Z-eVak`Ap4ZS^5-xOyjGGvcVp|Z_l=LjHo=i3G)?e9JxdRn5T(H zrc{w8X_x=U;`RNg*H!3he z)M3hls56bJ08Da1UoyHn>+6w3XT7bjKsT1gBrr+YYD=XMS1ru@`YW$WFJXdhHReWm zZbZCQlNq5*uIA$kbQ3@LBS^YsbY*4o{WI69xx^-oSufhe>(1Q_5E+sI(e`WBtV#FY z`{B4m@N?OV%P+k&QYtAB$ozd~L}29wQKo=o+8nD}6TIL*9hVd_EKdW76?roWkq zv%k@{(jP}#9%b>!gU1nc5V)4io{K-J5-npL$$mRRNSHzp%v1yt>pBT->xD16J@s~U zMi_QKplY^z>9!8~X+k*RA@n6tssPlsty@VYG%11#PGL4WOs>5LzWZpJJ8O=kJ#E{) zH%*;7C8k)MB2v$hnI%Q;#!TKtKVOobcf-}L?3vzs_eaxmw6vEJ4LHQ}1@iTP?Km`{ z+VSra$<-v}Wy^^u$w71|S*q z02Dy`+>a2t2UBKiXBU%&n_*(bXfmvwL|b(UOaR)H0x%7Ahxq(7bD)g3*LQ zL$q;ZDz|Ka%4q zcHmeVle$MExzN;@T-LzwWV1iG+30FaA+2%Uf2T1iJz3VLKlD+21w;_?T<2PJ;siH% z!$DsR$FC=&!Q(4g!S=5Arb#dm!)M0=CKJq#Ho?~V<%DE^4*zB+RD1@U$JLl*z6K0b z@XyB43=zOYE1Ht11j4Goc6HO$SEpZl(Tm9)I3WgMKkr|Gz)Q&?laE;J6JRqh6zJnz;HoXW+-$wa=svefT3hC-Y;y?zet3T?1$uChov=o|%%G zz$E?nQX@f0mzdKOG)HODui)J`tX!Sm^Ox^SN0Hj66(faXma=^7?6~k_IgSjES-#KF zSD?f?8Rj-0ZKZwaWP0Y%zI4IWm!uMa>W7%o3=o)scF+U^l7Z^jKY%822w&E?>hiQ~ z?)8=Xvbk+!o4mc*aeNqeARReGp-|5k&|XC zX-h3_^9hQ$VE*hh8*^M&#|g5YA5Pa0&G&_W_$te997;D|e_h(ReMf48Nza@)BXyx6 zoyKQW$<3zG>Fw@Jx7~PEx*lNa2gU#MzaC0AlWT4Q{-T{bcZKlhyrrj~-2~nC#Uhxl zSZktUfk~~TjYv<&l$x9O9Yx!j4IRJyg?C_%Yf765=-7_-plQ&h=g~*We$kIsM8*l; z&tMJ5T^EURhZxiPnf8s)nEH-)rX?%^(%Y>`pD6+eE0v5V!NeZYxpN9Z({fAb7sJKP zvaY|Uh{U>km`^ls&g}HNuX}x3w)CP%uIxHr2IZhAA#iX#<1qY3_A~vb|1D@#T9DZ@ z?gbr9rScQON?>vnSc?JXo(*9xQ`j|QNeSS)aoZLq*nT_{jPy_P&VyW2rPYD)ubI@@ zkJ4H8$;z`#p31up0!kA|Dm{_FpN801AT{8}*N&^GpZrf64U;)_^|;iVgO)$^69$*} zdOw`^`{!?CpO-(5wgvB{?W1gNFZ$5CKbkd@KjMK_z*`FyVZRt6a@=};7?fmKQRTRu zH?W!g%6{h&Eu&Q;yM*h5K(c${J~}2;Uy87_?4RI~nN%#xdo?lTFJe8x^{{={kt?sU zf%z*qm7iI=GA7uTl@bv|)`vls(a;ec6Kv@qGe3_COg_VZ6q~(?PtCRrm|!wQHJz(5 zITPH0V#BxW$9WvUq^sLHE8zo7c0Sh$^v+?@lxO6TQod1ndGNz22AIn_f9$RgFneEr z10BY7eGp_sks+y7BO#81Q9kFJq@4kiHepQ_B*|2YWZsGrT$u7;jQO0Q z%#`OF7Kp)Dq<{UFRXS^jFy(Qeo5_t?@Pj@k2;6=MNs8X#TTl|5#=}r=V3pWkz2k); zDJ#*&d?uXt1Nc<3$7HmX?<++~TlurI{dh!FMPHhJN$Jvlm)ZLnZIe;Cq(LnvDouHJ z-+fP9D;T{7AJg*7F309hLR+4xz#w24X*C2>_dNoP7(4MTz4L5C%*SJ{Ydw4v^UmAS z`t=(C5N6f$I>lt^P`I%i``J30RYED(c}|)q4BXJvSA!Nkl|Gq8#9K>GYkK;?p0xbt zt5Y@1_`8oj8O&=s{*)&Cla+vy()(VLzaA!tqZ(;z$@H1&!g=%4*T3^$#Ga;f>m9ct zfNf8YJh2*Wfh4rJYsfO0$H~agqek+}X`F9WncoIn-ms!|f1{f!$ zwCPl)SnMV#Z9hP#7&j3qzm&0Y^|A|?|AhbnLmsbblRj~strJA~wICD-ZjLv>*HlV| zfOok~Limm&A;QpR!o*fEvpGJc0ch1sB^3a`DJ%ho zmmed-_QJ(W(hH&IN#D8W+Pcvg%cu1BjB zm>LLth+KIFKOjKmkTA^0xi$6+cY5A27}r57QDSIk*>MjHk5jH4Yg36BYqOn3+vK^n zOK<`1FZ$G9eKOuGFUrB?$ARy+%~ye@K^KO$6afm{sENld9O&-k?c+Y11QTgHawbw} zgs60s%PkxW5Xg3-%y3M`(ToXMme*v#5BzZC=aqGzA7+a%4_htsm}cSpEyC%$;_;uP z!%RA?B9)8MhIU%0AZ)ZDG4o8J)$;Q9jc4R_^TFqCI{9T(h)dl_NBClyZ*+<%8cb^( zi)AlhK!=e&HRVN8QtEEjT+NNnDEFC0Z8@>dxlBbmlQ}koN$4OMl3R`t<_lAqHDe~g zM6NtEkP1Leb5N|1N?B1JZC4HRef(#u(_7#3?$ppsZY)fR1_cO!4h})AgRpbKC<9F* zk%$-)2z+Nj$8v)Nqgk_tmH6^a0P|ybu;s*>ISdws3j3Jaa4A|VZ&6PB)Dw7c<;Ba8 zrf*2IF$pQX4RBovGxi~(jIb4Ic*e+&&&!CO?_t8uuJN3V-_2|6qO6K%>KP~b!>KgD z$toQEt6BEquJ;4(fRbA0>t6kubm`J%_#gpSn2(WirI>bmVRlt8-0qW|2rkUCf|(oj zHb}ddGMTsyU&SOC(Z@b;Px|tgzLe(T+bg4e%Xu$eIog2T;cu(z=un`erO|Hb3qY?4 zK%5LPO+xD4(A%0;w(L!pU41!`Xwz95Vr@E#d2I@Fc4`r7%sHqpZ2RE@p|xm!o{w*9 z0nYp`{42A_jkp?9+&6#x2n-)Hby;niH+Noo&2Ro@x_tTN>5t#~HkLgn=Nyc*0nOF9 zaEJEpW5FL%E7u;{w>Qn3G%Zb~ zy_@K#$z+ol#~5~A)CmN#{RBFlL=#h^?4f<_>p|$b%JGA?bcoRNS+r*tlhp^>$aLNe zND&GiK|x%r)*wBrH979oX4IHuiblff6O_2rh}4`q0IEi!7jJpq^D(9K?#GVCd@)Ut zzjDX((`~ohfTqOcT|WH)WC+{b8v+#NJ3g|H$1>XWw?LH9j6ya~w*pYjI3@(I+p;Nr z;NzI`5JsIeJplib|2l`P8hVWt3A`sZ3X09K;D=Rx(NHHflVQFV?YohLytSm4;-8VT zyjt)eYZXdH;jY;CeBpNtGHZ?rA$UGFjvTMK?^!-_?niH$wetGd;$HWxMYhEiwn1FU zG4~74SI{f^%wJ`uytsh^h94rJaG@2%#2XU@vx-S1XDT_CBb=i>qsl%^j?PzYec zq~hdR0|Jy*OALJA9IY4Hg_2SL6K&~$7{dO35HrU8+o2YD!`F zs1VV2J})P1pW}YMWi)o?`B`6Q{p2+xf1kN3zZ<%tY{S!C`)AeDPp7-?`T!a<^OYc$ z*8@zKUAQ!xzr=euucrh+I*I?JqEb^KOLSvi(mAhFytTa>t&;`j%4*WT|MUInzK?w@ zO`ts{Fd)j%i0so~HndkfH!7IbML-g)%`}e7KGgv>&TVR-ttKN-tm-U?ZgFj9yHg3bb zHIRPwrN5Tm@}@t*R9%@qj2TF#cEfGArH_5|6KU@rvUl@Nf|R-F-u>1$rQdk*9qGH@ z`SzRsCmpWRGQRYJgGHgkU(A=)M<+k+QM^=#jr#}7b*T2CEvt3l3 z>C)xbAY6@O-1n2>2@R~XHQm5MhBsVw8P7TZ(?{@? z-xU~anCvXe-#ph(2%vt)F`rccM8`A-UBSD$x;B96Zh?vPQvGOCE}AMm89Y>j7VY?r zrG6+sn1SCiL&`2fNbkn9IeGGQTpI;oVj{Jhe8`dJl?qRB2Y*6Xj^BlvQopR$d_}Tr z$}RBx@HsQa{Ox}5XzCt3l=sm5p98(TBpa_jDcjFX-IngPeEY}EHpup9AND{0zJkVN zDRJHlWz^Zj+>a~;1>P}i$}4D0K47#>hUK~~s-QV|WlSeyYS}&6GqSqbX#9g1>@X{w z)g=24aHEPj7)7&2m66uN%;T!10MqO#6n!c^^5{bV#D0KD>E4NMI2kGP7XhY1Q=Io& zW--^PZlswr?-|1YQ_ph_OqyY4!+<^xQ>}zBDrpmaM@F3a%9;ZMm|$=#0+{|hqVgJH zIsr^bqOQxN#@3E6Li@TAdrFGVuW;g?ed2Ggcgx;c!j48AZeD$30*9tN=Rl3aoKdc? z2>KYXe+?p`-vXG(X3#+9f#m|zjR4bZfGGeRa1)r&4l^2fZh$E&e$F2wpvDBIJ71kH z0hnrG(&Ye`QF0}iM+BxaGAs1p!?SJrFux5kz-x$VI?P1P`~UfCsf%S5`iK^L{GlJQ z&JO@XP=WH6z!VI}=N9S`eFWXp%Bd6$Rt7r}4UR{oaWJ|m`1_t{JD83zgd54&F?ZH% zBGWdfL-=vbC_M>tUL}P!(!XnWZX*bS@O&iUi9B}>=Vl{hY-P^SDwuc|%<%1h`lspQ z%dg@+2GZtj+ta(>e^+{9#bap;%SrsfAN_GGacTZ z*ZyKA+tR&7{enBuYezbXCLBm}F1<2+?sG&g)=fyS{oOaDn{K%k|H*ci=BPmskOndD zL0511z1OGTe$6XW1vEabu`zw~AHJBra{pIT4fNZD#?(BiG3wR<&22qQ1RkbHwP1~? zr|E|t1q^8L(7~h7SLU-i#A1VGy@P1C02BQxFv+_Dm_i_vuRxEC6(P&M8=!&w&NL~q zuVAL_g%+mIoXZ@i>8XLTD%G|T7zUXuclo7D(;_mpA33-ejnqiv4De*mmQ8ixIebeS z?iY?_`x!4&LsR6|hl$7WxzB5F!@hl8aE6~@B^N4_7+sf6G|AXeWZ5+ zm4KsIxIeGYNR4T%cRLM{IJbf+8KE%=P$x+He~gR`65Dg7 zF$qj_XU{=n5}44K0Dv>VWLNX*JTTLko=$K13xO%qm?BvtK%$SyD5CRCd0Ccd-&c)E z!6Di+n$z}RC$?d)-TKpUXQ(nt+zhkZlfIFJT z^g33sy?DumB>ROi684UT!~3#+s>BswXc0E?b+RuZd`tK zY9pxSyWjXaeL`jt*hwZmDm6k)G+cpf0|1KDT#@Ljsmnw_a6EvcX|Ia1Cjm^49^an^ z>c~_=J!VdynRf1LN&7n5V1)gV4yYW4C@^i@yDO$R3_%AIsH^^`#WSWcM~e&)dk-)n z^i}C)cm5Uu5^dy=JB4QRaQft@Ka)Q9sZWwwVN?3Tzub>NfqxaNLLH{WIS4%C0iZYh z&Tj#TlK^Q1pEArv_+iXL*UuO%gZcKMWoW+D>|Fs6_Mw5STem(vh~~6s-~RNH7v6@( zbQ8i2?6d-l1b&cyR(%~;!%`PRM7EtmOJw!bm%ZY(B!r!sZn^q;%u^pn+raF_n65$_ zSTYao??O^1F^}!7fAZ$URIM}>0C&RhzkKF1>4_(vNE0z1mUFBgAL_0n$4S;ou0JF~ zHKsmraFFrnI#q~1EFpg!AB_GvLdcnPTZs}gAC0Mxuz4r?su>7OWwb>;;Z;+fW0&R) z%_#?_AvDiU%!ZoQ<}6r@Zw!_X?@+^X%0xq5O?uu<*AcWqf>`ED*1~&?hBIB0e9dxl ztx5XGl28XU1f6hMe9PAar_h*qhZ@$=dSvm?Kvo5=8L?r8!c3D7ySTy| z3lEAKj<`DCwShvKgaHOHaUwV64Oh%+6<#P_kaCq?+;Y&$04eL}IN_hHRjfqRS>g&) z9>g(g_GF$1Fg^AVws44(0ia|SzyxD(9zX_6g?lM02#IMV^(0Hbf?bu$8rM&;e}V`^10pOZvE>0>fny@IfB+f2Y2g5Z@DhczbVkS)LHLRPTV zls6Zq$UF_ME5ZCV^#8KP^x9Xw3Shbb39=S*-8cdWFh3f_1~8eiSrf@gGi{KOdh52G z>7Vb%^n^KY{=6kjVjaga7AMlTzV@%Fn5eWm+W>lU5Xaz(Z`zO_+VYVk!inFA5Hu09 zVHw()!6D=5y+*W`$667ZSoO7+f!aj4bsHwA&CqKX*&&+9bTE$NF7a@P$+a@M0qCun z1^FhE^KK?WNH-?2gP5+~fJQX`(ks#l>fJ#2^xu8{@6+G@?dKyn#yN0D0W3qZI{_Mf zpm*GQEAxborq{gYH`2A&T?$>{S2GRD5KBS0Mil+)Xg88j?cmtq!>Nf(_jJDLtW-ekEY_Sl zm{TZNES@w-Y~H~{pgEdNZ72O=cOV{?^XEMHlihV z;6Ev5oSJ!~k8!FQ?*RQW4j^e~nGq8k+hE55F_->WNK{@Q6YLe#95-f1(e&?f6`EfR z8Mps@XG3$`*RCbmE`~}Y2B1mja)H3`#7R^X3kP$ zBI7ouykTHM)6wa#c3>E>dF+CLaBq}J*}2BV0p7`y&)@p7iwjIf@0g0Hm(OuRV4rJ^ z$;{&O=FSaMUIoT)o$NUihS2zX>A!G7GZAwhx%4y( zPQ;(N7mex8*S$6^xcVCAHFe@YYD)M2!XdWljr zc&56ZR7}+M^hr%PXb$93JY9k5vaCA2=H9P|) z-K<56(-*${WoWlI6_Kf>rmj4_=l6e~`B?2~_Plv4pjea+;Ws%z%A#6)T*ZL2=KDVC ze2Qk@eeh5$v(nA`$u|bkT)MGjl%P2{kEyqtg$dP|pe=n-nZ_h1kq)u!Fuvq)ilu^y zVcYHe7>5aR2L3-Y%xNAT53p;7)Uabd#>+2RmM&O0m(@Kq72+xY>=eGwGv#rAHm}L@ zGhh;|1i3KfQD@i}<9ehiPhcXtQ&S$m1lNfb1!`!2!~b)?eMa@!yI6v*5Q874OlGwK zRx)m$I1S;!gwGi-j(4Wb3PX=OxQpL@J73Aj<1@enU_dV!_b%|@?15+gI$qEGioIA{ zio4=?VLa#8IG2wG;w+oreB3;*uxRroh%%pfX{u;iaS z{Nr@!$U!71)pj+eJ|w~{(}KVh2px@@0c8LaOv1-&qRF0>edT`#(7eohW}-#LMws)C z5lN?1DsgMd)6{nuDbsNAh{}U;aYs1HjX1O$os1fhT2w+|KnabUGXy_$rM*acIWT?V z{U1!_RH=+-sm*D-wQd3MbYhtDh{}6&ZpuSr@;zkq0*-a*U?OG!6XqC$7zF6d$CLw; zq+oMWhxji1DKvLx%G1XLQ{&!|!4LYE^doB5F2zWGwkhxWG+T4CD5Za1`-)elWdajYz6_=hZcvKG)C+A0)PhPiZTQfS z9!ftV+HLaG*)YP1yaUzeE%kq_c<}pa#Ul^0MD53>$WlyJ<{l ztpWVYp+Dq!f>@ZYKy;=VF`nrm%@7(>5B;U0UPtKhip}c-n4Cf|1MunuD67ztoY$tq zzMi>E3z>h^dhlRcJdZ{7NZ)h$EjOo$3l~xV4(^`Q>4#tXTKepVKb0nP{c@}aCkQv+ zM}pR7w6YR>Yika);M?j?pZ>yqY1xfe5Mr)Ih0s8@hBDrvtG6enA;bcD^tqYPUB<*s zfGWk}U{s(2i-HZ!!fP`^dFQeLqeqeQ0j4r0R~M6a@Ami1BC?73PesueLu(drMJKFO_(mr()bDZu+X-?{DVg$>-Hjm zim+I;9>St|wyXN?V!lx`8r3NxWFz$t4H|QbI(i7P2bg$Am!Q$VQUOqx0ft%hoIFpB zi7`m`IT8d8c~BQqBIcvbE#_mmemAtQP%8b)^IS@2!Q9zohp8ONmv6PSbXF`4;6U(vm@7Gw2=J29^`PijaFtPk_u z|9T*RsRX;Z+JohpR8e!5<_B6XIwdqSp9Mwb^~f4I>pXiUI*{*D2&GF$Dg@vxb|(%V z#ID_rbjG-hbR`W;o^*n(O+Jo6J;Z>JkQ@L?c>;C-6O7OhV&6ePhLc8Ap1{=Ft(s51R6t zsKZEL@;V9JRy}{q509lWc}JGCKl3G)>VHGB%cJu2G0mU5ASOQ+mt_%fpjR+7%ghPc z=Z~bjAZ8n!ZRCC&81W~-ZN%?#;*j7dQfK4zg_eqI9^0RaoKu`ZNqYsU>o7$?WN=4b82gp)TEi`}0!moLOo5 zvJ2Ay8dfRl!_FVBNMHNp=hG|%sM>y#zv4XUW1i7Cd>1$y($m~~m}T1E{GoTJ8P~EF z6xvcPKBp4uT@?bE<1cT2TQ%EvFKC6I=Tb~CNf~QD4OxUauavYFn7t77(-%K>XMsoO0R&1QA25{zO?Fl-%MZo(m$u^4Yh>z z*QE`+4yNxuz9wCA$t97{wVwcuDl$ZzMswJ_7xNyVVYFZm$JHcC9b**L&q9cEpnFs~ z#y%Dk>Zr!#95ItcyX?7)+zH@e6Owk|6{a?tID3AYg>T6P5z7E$`OPUN!`eD0P8@^z zcM|}zJl%TJ^{I{M%0bMaYGR_B?c?}imv8Q%+VM+kp0L@Wb5M3-A-xa-8Qd z-tA7|B81zxgX8cZBX*%kPY<7yK@J&}5jE%Q7u^t9kARRbF7$>yiOl>uZ049jBjz}X_XB#}~K&Sbgp}fb>v_-}ovty-% zCpy7SA}(cMbYNaEPgf^EqQl%gTF(_QEtoft%ntw)b0e@DhFx2=-+B%M6U<3qdJ}`X z9!A@Ql&z+82ABwb=u~53-Jpod!}iZ0HXV-xSPe=*9&NMD?Z3!aK|7E1F`1UeZ7g6~ ze(?qPn63w0P31zA*Z&WIN#fFWuVif&fJYyDGOZ!Yby+3LCIC=14DLfMXd8QXr&;8) z+p>Ob+PZ#aI(4!=A{A@UMs#qupv^e@f9dSmFnZdL@*33W0T!LtRWP38Ob2KqS6(Ga zhs|!iXWxOyjo3@1+S*MULy&R)O&{}hYVj!vpqecP@ypzB*+uEdfqiIs{pn8pL_L__ z#*tQN!O~@^6yHw!Q|r?=KJ$-hDuTlNdNemCnpO-sABVI%FyyV2QMO=udgVL+IQ37& zheZ8r&`c^Z0hGaz!vt^7M;FJR=zon%F5lq9Zd0Mj9;f8^B3<-T}KY3 z$q1aM0K1>8+K`R|Vgi%TXS!|piH?_`zb{#ZOrlC1Zcys403}7<(vx}0(RrVKGXMCOK z`}!PHW1@uMFHv>?ll-^vQfEJK>|gMxUv`UjlfQAJe~W(lu|i5B8*+@&0$oQt>5K3} zqisHD^fPC!mdUyW2(fEZYv0X}2Uu{-ib`;{T&6J8%kzD**?76?yi#q{+6lg>#7jM9}|WF4K{=5sRDKC5(eTcPvf{d`aUnNp{Re07@g z^a-?~DILP@-GvE3iRifkrjrbi1@q>ozxnV-akL9eFiV-H_2iQSlcu~CPp?8_ddKiQ zreTc;-o~0#KVu4}v;MS`ux_39jw9(#IyI)*m#vUmJh!tTU7CO~>e z`siKnBP1WqhBn)CpcNky?N~KF&6qu#fFp9|0WO`LU94Dpl!&~ZFvn_2a-;Xyac=QkKqG!Ju$5i{C_KkC z{);XsG*p;yP&;#g8id&uIXTk7MdpWQfT_B+Dt+Ty-^6*nPhJt~xf+x9XPuM!7+l-M z`UL^y;1qSDyy%noQvlw=F`jV>NmVIph{0jt!(nXr-7uFv?B35EF!iQ|3jii^<(;cB zsSycG)INMnp)tJ+jpf^U;`)*wvW+Cx9ud=6RiUE8}kEaXJ0x!V;{Xhc0@8HDQa;7D$U-z^01QU7pp($wwx&eR5AH42$ zX${AoCJE_|ZCg@zds~=brVyMm0iV{9*5het3i~srA=LDyojdkoE~-pjOnK08d>Ci` zWHhU(4|5iiN5QJ=qWXV}Sa$lICobuG({pqP6J)GtcZD=4! zJqrk$({9DH8`GVC_y;L1Swch|ep<%YLk~Te9{TooQY~YA7Lk7&w(rH{H;|5@UCqZo z=$cDI2y1(oG<<~Mi1ONMG%epVD*A4+}-rU*g`M2JZx;oo= zYoc>$KY2!}=D`A9`1dCgkDBrx|Jf>l>C*@e@U?=* zq)%FnDJIwkFi}wSj^A!}4xm@im>MTEBY-zyP8XP1Dv^7}Ay{(-AO9)fdI#U!neCtX z_I=!9@_)Ab3;&fj6>dN89{KQSTi^kWmh=9iHJ|%z_GV+0#b_d%DSw>Dgskj$R!z-#f>H)dkOh5Lf>XE%snRu&CN?(^M^Ki& z`tSdq_U_*o8k3oTRi%f42~%EqLCVYPkZVrSq`X%?82!$ViDWLHEHJqgw5j@!vbdj< zZB6U{TmsXgg$vFCCN&qOE*+S>{aL{DMu6!&ACtf|LSveUNkdRS6a0|*m~xZph;GSt zv*r%%!TVw!6Vku)TBu6U!)T_wMk219V0+t5nDW$^U??R3lZzZ01MysGOwX;4sfdN| zdeQ{Up+hh+6KfhcVm`zgQo8KfcO?Dq|NSMRyZX_VUP!u|6A@u|7>O4%Q?P-_}m(7^T(&GnG{bbf7A_ArY+QU7Re)aYn(odg!Dzy^T_R3fN zM!M=c(&C&XLIbV64-KLUO{{Ip`t-AvkEBC~SP7NjfzpbaG@nrUWml6894)K|pY0$P z#wsFN1t!x;F-U?@XH1ShhW3ZS$1X4MhyrO0|29K{5oM*Gpa_A<2sIf;MEXKE2bgzt zaQn`5cy9}SH`*6~z2vfGY4Y{gr(VXhWJ4$q-;eDC;`?FJa~41p>@YIuJnFcC}ztr-}jDerMic>zqA32{W9 zXUa3e(-p7ws>Z=QPwU+&;0%8(Q)CQXgxLTm5ldKa~I*p{e0HJi#vgZJ8H9xJ`z@mfDYHt_j zKzuScFS{gd-oA|pF*K!_IPn3ZLBgzG{jwLtT1(&i$s;6EeI9LnGyWa@p5%{1YaL{X zj8YJ+x4S*<+`SJFVh+^Q8RX=fK?Ey-DEu}LpBX%cc-InuGEhaP5d$jNL;#QDFSY`= zlHUv;5N%(0qhHLTvZT=!hAde zWe6lqyZ7y99j8`qBpjZ!RnA)?t}TKbe(j~dLKyZ0wA7ne-bL%FvkPT6r{H zfKiMUS$_r#8A#ba*<@5RkdLV`VSY0Sb>?GY>H*Iy3t+N!M{Ln4nJG^RFbAdu%)zTB z(yyohO!}rW5VFVYw*tVy2ZwKsHK-zCi%>+LffoLa35?4%lo%kONerPf(4{b9`Nu-^eI@qRpV-LUOnH}jt zGoIIJ%5yH2ORJXzF$mKIQyvnUOL^#HYAR?S8c4hL>>^bASURx3C0%yWvb1=?f}n#QqTs&%m46B;zZ%V5)A0U%htdU$ zmZbH2_K*Q$AGAg0527?*inG7{!2YzC0EO5KpJg^+CfD$s7HPoG8P?nL-RWvXVas7o$g zP9Rfv2o}vGY%K-U`T&FVo1P8e8DRhD(L+oqo{_E~;9==f(l-?VLlc?f+u({Kmj7_R znQa!}V;fL6n`Af=k9r9WIgjCT+K(28wa90yi1uAUK+`8b`!~2K2y4f@*@s43#@L_0 zdyEG_&9j+}LCeoK`wZHr?bTkJG37OkZ%7O0%tt}4L-8V?AP-Uhi7S1k|M-!CQ&x`W zVvFOHZ~x}U&iNi|49XtH!V9NmjI~9*Jn8+rANYDa7y8ML_%(9#B7te-vBLg{8_zqd z5JsDhx@nGzjq`#}022ocd@y`=w9Q8FGm^0d(}IZV5b87NdzXw$`;T=pi4ey)QuJbc zOn?2ck6>Ct9@H-cK!g}-M^J`sHI8{xD^`*D;m_WQDX1|`5}06$c9x9Q$;6%HMl;m7 zF@VWjd74oKCYegs)}!^Uh0qkic)U(io@Ki{w}ZkyJB=eb>~oO&u*M`XU3_6IxxMtl zWhCun?I=u1Mji=FZh0SOed>`hYEbBJulU6`haRRH`g}FO^!^Xt6XwT$w4c|J`QZ|z zV}VH^*MH+2IFs`h6{}6lP_2s}Docg+Gd)XcpNG&+reN}!F?}XH87UeQ(UC)kf*~1d zeh@R=4cFfg^z0f=t$@}~9{Fjs{p())+F(TE5hO5JrO$rq;{>!YCyXey9zf_Y^NbeF zTa@-Qr)S;P%?LSU#is9mec94f0=VrbU|}uui|SaOq69N_XJ0pq>AemqyC?nNf$yi= zuemBMXR;@;(DVt3Xc2g-0IDKvbrBLx{}m$u={J*h!WJt%LGLO^Au4lRMy0pg`99}v z26Y@qgJ`7PTy7<3(TnI;MlQdhJGrr{lIEe-cBv69ctt$l)t*loO=TDejEr$V21aOkDLPGCh%{``6ttsP z$~}$P6p#X_I2gxey0p7Zq!7`MCTRBl6|0{LU@~2n#>PIffe?NXrcDz8gFYctSXPwd zBMWqUnlWm){3L*P0zqDj=FE#sBNb?6=56#+$8`oOSzQ%m;~?kG+vJI(Y#!BeHR>h} zSg}#I9FWgHmA#FCrWvs4$iv@@A~J9oy)?(Grn%9!98}Ib7X&!^Ju?2nLxaJ&AB@fp zguuqyWN0J7DHt0KSA_I3z{G`8d`;ENYNrpfUDOI~t&`mNvn?exum{&K43+KZPgM)+s|d{`(B zcBZ6Til!PSZ#0SgTg}xh97sw5NI5WNOomaAiSXxfKO)KS_> zaAK5<1SWs$w+di_dGjq`garm|#^t$&5GGqr9ARnmzO-=eoM`jY+xMmQPp(Z5eCr3S zhtz@Ahq(-so%sXj14;)Or{CYaEp@03qG34hTsYHIPytMiVV^_!`kw6j<(FK>f`_w0 zU^$79qPb4)Ue5jYsgTx%3AHp#{4C!>Hj9(25w&vl%CwArc*SqLoLA{Xo?sRh!i1kTK8ke)3*x6u@-s@gH*J=WhxK*(;oVon_fHqiu1| z=%=7vfr*p;k3upqP~FvhGS;ReHEfY|w2cy`JR1&E!~iOq(qwf5i7^>-U-`Dm5m6>CJC{M@X`h zVU&igdd;XBlfZz-VHMe@ViI#BQn_Z*bsN^BVNFT5KmT@GaVUNNhd&H6-_bqW(_;_) zD9yl>X#2Lo{CWuxnapBt{b-;+Tfd&fz-?(B?f#0T7p8I|__T~21u$BAI#LJbn2r(x zTk5ORTi^aiX~wjvX&3qHhN#m7=Czp(+=cy4q8XP#lWIdDa4Fb8H}M-LRitAbIvED0 z!k7xiI?5Of=1cb#Gj`~}FvmGxE0_~wFFOw{6@V!OS+xYlofa48(FjnH35I>s3dIx< zf-gFG-ytj>2P0FYdl|C$=^BSWgO!@L$9JsoAS(O zn5}p#U(5Vf*?_PDqti*=CtmT3Z_oF6pPB-6tOIox1 zsZ@>6VhW+tY8kr;kQgA*t$FSSdHy3yo?soQU1<)C>Ma0MJ@2SwOmyuhLj-x`cHm*( z4bxmox~)I?i}$7TE`EL5xo$lfC&-3?7LOeu678xny9Sok32mqVOwvx|mJG8VjLfQX z%xM(1_h`VxNjomq$RpgvOCq?|avLJ*`~2f|O1*X$ongddafld?M$mtpgZ-vvytTLO8@w9COYz zD2AIX7`6$9)tK<75w`okF8O0(Bx+eWab;SI&u%Q%gI2=G_b@gZn=p6Hog0GYZp^Q} z1XWaHO*tIR)a1_s#q?>@g8bSE57);U=5mf?&=98zb!SuxS~dfRGZ2ypkVz@)}B z!j$(lG$ufMn8w6J-xjq;+8!fOJokVMK_Bu%Utq2=wPBK+kH$2Pq{MXqQyFcYm4?F6 zj?v%jX2QONP-3G`OP@J#|H!-O;jBGIp5S->&XK2b+7XxnoOUk0hutV=;m`0}BrbC& zl`h=qFESyZjZkU}iGvdvD0xLzkw_ClvzFST;GD2xmL;!XNDK^^eN3V#BQIVZlWSG8 z1DIB?!IamV#-PdMz~o#*b6&L(!FRC6L~r}_Qpm(%wG_vvgTMd?XUVVHu#K$8>q%V6 zM7%@Pn9A@oIbpBZ$FxC>3F-bwG$w&bB1DwJeIU8fo)o~rspMD3p!!V!(}zQ2nu4_M z3Zq_QZawEQE7q6@en3ze3u7AcF)yH<;N$aH@)eNp+WtGxj6UT_dq)T{BzS8XS|%8ZzOKL@BZ|U-}pK? z;|Sd)bF>P~E|_CEYD*jGV+H}ghnFl#JDHe!1RB%Zk7l)9=IYd8B5Lp0oAy9>JuJky zW`9pw3^2Xo)n})7{J|xdvzVBPmNW>hbmB`fkDN;*RM6&DB?BgQ00PI0np1u(x}W2M zPK>Egp{)OijS4GJ4hfZ966>H@iSV;zV8jIhN;Xi0AALx4qAyF>RB*p@-qje{5#0Urr>&yd7mx=fyICn zr>|rSaq45jlsAgT)Qs=3f_}!T8_$SGvFEp((Xv5Vp)rla)W1Mr8Y3{##w<;eEnmd7 z(g7d0;{7;K{D;DppoX|2zrXl$_p=@tc{KYy+C2&~;=&Lxbv*Oiylz7U9r;M7&mD5% z3Qob1&+>O}DwBXRaLS5Mfuq@frM4WHC}dWayg)D^J6ny(iMDmkM1TPy|N7nUruD1V zuzIDy)I<`cqoY{W*+yAKhVT=i1$u>83xF9Rm*?FEe~2c}xzr!+?W2PmiJ^A`K(4$n9i_gc5xxX3DM`Hzw^NX|IglB|)T9+f|KqGsh<~`Bg!{ zV-I5PyWwBHOGX2NE-<<3W7_57fTZzhBjM&C0Qm6Y0x+R5y%Y0Mt5Q9{M8f>kfysIW z`R4YwHKkQ753vym_^PX~Ol@;!QcTTPrryH&&AT+I8UD`j2$~U;f+_GV4GB;li_LS~ zwuE`vL8Zi;de0N!oG`lRuZg1iDO); zu~jlptb!C!JMVfZ{q<+APVCw9LxlX3Zel@>OX$zP&#B z_dS=qGfkU18GqU~e9_f0@6WO+m>6#E67KuVUm3JvI~4%k_QGczK_Kq>p$W25Wg3(F z0Zg2;88{ijuGR}Rrs2SJJr*D`!_z`W<>fvmO?eg27J zLio7Wu^BL#+K9U{AoBK%CbHip`kX85&yvC`myb4w)!-kiS88({l-gn*DGhc2n4W$3 zpl5{13?dCe=B!Y`uXtW@CGtv^dXeTdqn%1^IWUbpr_-xk2V!B%!N>weS@_P8HHe&w z*!gNM`v3SqG`0}+WP;BsB+M4VY7OyP%Ygey^a0urnT967+MkP%K z5Nf#d9$dRfwYnO+dStbR@eNitrqvsE5S7;vevBh#&q~*RiOdg_QH{yqhXI&^0BTan zE+n`H-gW1__oqMkzz4CR&%~zOR9a&SU>YC55Z(mV7-X!^LaU=_ec;QDSomf$ql1JxHcz2agORZ|49-UQkS^c^{AbrZ@*} z`85ZfkW%T#YBY$&_!K_*>CZ5LU|9_Q_rCL;=>#TH8tUDK2BSkgG9ibQO|COXh4=f9 zd$N!9S4WmG-2Be=`?)^X0`oI|%XG5&Vo0}?C6+9~w07&Qx23yp`+2N@TSlKy45B)> zr-^N>jLH)3yJ5i3ty>rVtGR&G+i|*2#=ltaq)_H5bup%QK})Luvo6BH?_akjec&U1 zmQKC!TpTA&_^R4r_C!B&jY&RDgA`j{bj!wm_y!6L!ryMz+>a@H;vx^e*bNOZW5q=` z_%OBsG#y`2RD^iZAVOI$fdq9-GTuXG46~Lum;{#8Nnd);pQq|w-Kl|$51k#IL8GlC zbRA%vF!>^F1ATSwO7iK(!VRP`iZkc{qb3s<2flY0mlHPdH8`I`(TddXSK87ydMO!BFqbXkceB`S`MrUrIm=?=iq39~{w0GE_eLANd5}1bc(FqVcF9 znLt8h9=C9#OXwK+S9}}87 z1SV&kmDu7LrH=a!?IY3C*#$Xpu`vLCW*OH)yp zc3~XVObJ->w7_)v5HP*(l1tJFM=cDEq8ON*D}p@CV>954C-d^5_YWh8tZW`K>qzjd zLILx3^gAXmLnrzq9o)SH-hBQGSCblOU7CUb5fU%&-PO|>8OP~Wf_ECzlTR;8>(;MN zFN3x&Ah2QrfZrOuR?F(^8CV~J70K*e$80%h|74WW6Uk*@k3WwG+<;8Wv#_m z3Y(?8ESLV&^Xc;c^|3UzcQ7^enRuB641GRkdUMz9rO|t6zbAL>Oq+=s;Ih<+FU8q5 zgJ_FtOahaA<1!M?JF6L}O|O3Cg}j4clg>T(nbzSS+`*hCG$K+BsTFL(_jLNnC#6qb z`q4123kF%+*%pP5pd>dGn4-bZfQrDWjL1TPNnRyz1sK@|v^8?k(6Q`XJRr^)7%@C5 zF9RkWNl?&%aB=G08X6NW0FEnQI6f_3#uqbSsz;*_U?M2ws3YdZ{6={Pn9TLQy+GU2 zc%hc^(^6YZp(%Z;)aBW+5wul&VLsN2kGUUwLB)W!lUB;wi!>&l7u%r#lMgJl2}*-H zl}l_6+LD`|*zm>{&K5OR*tUcC=yVF7+q!LL6u=Can7`Dd#sG8L@g>U+ zpsCa|aZioOlp{(_%}Dl8WIW5aDKQw87j@65Zg>$Xn&vOVgvG$HufuQzrlqS$O@n{n zB?BhZ@go76Yp?zy^Me?&08=$YXcUx0F0ghHT^DNyp)vjG2mT_>oH=t;U@|4sA^Vu9 zujFtjz*LQ&Z$fKbI{xSd=_KaW9CzIDNS-XHh;!duc_!4&!AKe?rkw*|lwFfIettav zoqvyZ+)~Q~H09;MOH0X7A6&f-M01EHKyn z-TAlmDQ4~-k2d4m`k9?TvsyKb^RfHxV{yK}P0eUREofj(FvX7E&X{vljSt0jc%-2x zmoHBn0i2^T8J#zETAD%08e|MG;SQoeyQ$k2Oj(1>QCSL|KgTn^_v3#_Ws_UUvJF!v zb&*L%l?dt(2)I4lvZ@q*WM3o8o*i{;mbiG>ox^{W1u*3=8h&onUveK9WvBEFBPIAQ zwXlleE#Lk@`sF{}lqQqlr-Au=@?F;?>Vmc^X_NBu+O&(1xy39G(#3t91Xt9cA>b0C z)L159*!}PwU1dSLv_UzVSRLbE3r9UB!Cp+T`)MN+v}*d4`EJ;-A)P{A!At-0LjV(k zRD^_EdLA4Nl>tDmH5sU7+sZ2BL0%KUL|X_;9+#SAVV=hlICPBNIPTNE#!&JwW zpyNI@CVWgd;i5i!0m*zGlcv0KtSvRPi9AbojMk+zCY}7| z%9Dsq;_5_S8M4fCwE1b$Lky`yFy#qM1E(5G*+`tnB0z%Rb|lRY>4TNo;ZLuL4U;+mqvaSOkd!d(fr0RSP*Lb~D;pCoc` zElEX*zQfnq##&76EJ4wu00&f7k?COrnGGH#IqPnszfQ#`cs6Sm9f=@R$Fl^uogIWY zGd{Lq@*6;UUP1KUed|}JPk-)H>8y9Z5pzu!zE(6pnGkh~Whja&P#TDe>&L!eR&>vZ zvn7ARTD|e$b2bL$F1cZ93;P?iyn|eM%?&MS*UF9QiVt3vs>zT(j&f8HiC1Hg2)?LZ z+Nqi`Ttk-eEj0SCmMu^HjVPi8&E1|F1s$!7$fr;>+q(Ab@DRsE|*MQvpi8hI4Wv&8LDRyZ#*l zC4nRVC|DMo^1LGiUQDQhicx^~V`jYitJkG!l2W@kX)nS@6+&qn5rIw6lE750PrWdH ze6~zo5_W=Z0{}jiWm9rs(v&CfsARbjOKS0BoUxOSwfvi658E{3d&{A0+2y0%WE&p5 zMFt<aSs@P;ItL~nmscn%=kL)7FM!F;J$Q3Adg;wZC^{F+BD*qt zOSDjC!pk(B?3fZq225p$O<~I0u{GVIkLj6Jp)obnQ7%icg+<9^=+3EWZD|++Cg1R4 zG$z+-kr5e)P>tWJn+cJtHf&`D%^j&1!g?uz=}3U-x@*1^8j}GKH4Gv(CJ8<><>eaF zeJKZ~R*3oqfr)3B#5D(|0EW@%Cz>5Le+a+?RAbk!NN1e5D4lj96Ks!JNSUD{CaCrS zjxP2n{rKMe<8L!3WHgu2R}2OT>;<;bt_eW}=B>u`#M4iw%Pt3)qU{Efz_f@-zYq)& zSIqzu8WS{TcIOP3d|^iS_H&F#zO%B~P{obKn`Hi;FI?)E`wlGY=bAueFy3dEK9xRn z>Bm`BbYhyx{E`6xrUl7;FU(~pNn<;h1ipi{m-fMQW*#vcCfc2PmM%}zna}on%u}jG z`!Tw5XL}bsu#&Zj_NIP>kkwrs=|M6kobZZM(#Kf%Z#?<*_6^|YFtEUJ=_-STc6?Ew z6j#4Aro8-p!5^Z5MzkKsG+~jp0es@aYPY}mhaaXZKK{9M%CwoO5v8w|_G`cwTn~dz z1U6Md-_-!v;%(c~9Zx=$TBjWrZKuXr4N8>ZGt#$hWS#^n}q zCW8XO#~T8+GGOwaD5FA~!4C+FnCH#6dH*91r7wN$S|-qAng^JA2y_vc#zNE0POw6o zt5oWM_#B^VA(2@Ex;7<|8q>6ilUeb&DIJ5xG=XU(0Zb$z$bl*IDN!f>hd@3GAD4~~ zb<359Qui4YRB98LVk(1eNZXWk=fG5Sbqqu+ZK>G=&3xZ;v7gl`9L+_y9@u3PU@Rkf z;K+eJM{}vIAeGpm`Ibx5zBeNRy3>wVLf~7Kx*xq=jj0Tcsd4<6)B!NvjK;Kfc?L`^ zI7wY)bPMaz2uy}9w=&-;o4K96%+AT?^^8(B4!b0Q#AN1&Y6zqq$yZa}2GR(1BbmK) zz%&q1dDmYJFfqs?3deaT)LF?$6I3@$EB7(I|Ihv$jcFE%LP^kB!pEe>G=b=_EGjS4 zm?9-nXiRxakNnQs`A~tWxvo5&eOi_x=di?Q4U;;FZ3JYM8lekIW(st0O zv_V`H?RQ|~Sb;F;K8aWl(uY`5|hJhpB_Do0Z)BbAWZDonTIiqH{s1!vQr=~$6zk3&n{gz3+a`ss&HCncF# z9nd}jEBj4{{2C@HACDmQsZ0Ng)nAGBBc%L=HXa+ulYQ}*15W60E>Z9$P74F#kyH|y zANFW!+ppAz3x3tc`(KgE|PxSjxHlb{^=jLO5DdKc%+OP}s}uSev47Yu?Ese6}wr zaRo#dzq!s(POX3j_q)DKXZvn6rk|!&Pd&rLkG zWT>vT*+Axp+wS;v`oI4?i^^-nZxIWYAw?N&W)^WLf;!o<4W{UVWG?+3nE zqTa2Ah6;B*!$1vx7f; zc)phVm81nG_dS=t$L7!WqXhhs;bol?e+z%&8TlpovG`4CXiv4F#Y>+|U%vkOm{DIz zo4ogucZDg>NU>T#L$Fk160o6w!reS9o@Y6DyY(CiOojJKFFJE=0TX$8y-%^foxD8= z04`bbc>3{=e}ab1bJ%kFIjaO(^?xdeyarW8i!neK^FMb2_qWV(zK9;W7y(bN9 z*ql0;xcl6)r(vAW;(OXg8mvwLjx~Z{JOeQ9pFD6+TE1ybI{xJ2(#Jpc=OoUZSr~T= z4BnZKqj;h9qP#)$?(=b&MZgH(*@0{vL?6TjTtOQQJqM77V1p(#kJUiW;cKHEvnOcM z@~LixZ?0DuP)jkQ)J@600`vF2P6K;t(rq`~O0d#8vWj1nzW&v3q#fkV3wol>YO!!s z@#&+Vy73o}C!2c@eCHR;6WhFvHMB^*gU-wAD1$*){PSHAwhXUmfuVPj>rwijiU!(5 zu!VX5q~|Keq?3D}e|`<7yhZ5~0F#;coirUZCap$^d3>QdS=(g1!*_YYbJDax7yuF1 zaGT4&uhjnxp@!$BV>3*+3pqDJSHEMnMd+MJJ9SBnG;tB(t1aB zJnyW$00(^s(AdK7@+g&jD9oX;p!VD%MMM!e-&&pV@c;e`PMDuvBv8goV9>8Js z`fX|BmR%H^)cAyGIvH~VQ9reWdRCyt)uP3@6tZS`wJd?D42ia1U_u&FxHZY-U`=yc zv0-~!4KVc)9z1L2>~twB(@vh`3Y^Si;C=lJBvoMvy9aV2PvuDB# z$&v~}o-h&MW=U&?o^yqOAi1yeUwv9FbT`9C0 zCZGe|W~?#O0yUPAD7_CL*>2msF+Hq$N)+&1i}DSO@#o^7zlK-hP2kI1a`V^fGnY)R**k)6AJO((7LPnv4%p^n7x;&BWtkyg2@g8Tqh%XpWdX-jiuB5{1uDGjtxM&+rre zAaBs9P=^gn$xK-w*y=eTnmd0+PJe(7zb;xTkpl%JQD+U$p7ZKl*U zBaTrUOBR2_SNA^&m@1e&SIhj4{lxj-1~5JG@Z*@lSXs6JOx<++E(X1E_T!m!mi3Kn z&hZ()VxR`~jA%H2I@V)z?85wavtZft1V3!rige2)s+S6w_MwT$$S09WpchFm20I-s z9x8Ek&N6f+;0MS!7H4!CweM#*)dTE2Pq`;{`+U%nZ985mP zLH0txl(&i3t1on0Zf1^jS=&Bc%MEEVMm~i2qS096AJ9yFDIfJxLZrqnbK6u zji9|{P{Gg&ZGWIYvc8DW)&g$kWy!w_JrQ*&W)9iFgUESARpc6TVk7}Cl&uUYeIHED zI2pIl6nMUwG&JRw+9-a!qvSalL492#5-(;IhLwU{II5uym!F?CeM)L4i^Dqd>K*s8 z`DD4TOTA=fFiO-EI~6dVy$B(DDBsx^UXadvk)=8+VR;{K@ z_mZ~fu!wGyDcT1-H2Y`?h$q5yp)o&dD>Bi20I~gFQGw)7wA^}D@4fmNs9WP+7@a+rD#vYgF{6)OuP&Vgt2~L7iK-7N)FwvqV%bW{xo;*~IX^;ph zC#1O!kCVxM{W#uc(`qO#1JYUi!|zelLCclb=biKL0${U8co=cXzGM0&`Yv1&C7F_O`|4PnT1TLy=8 z^Bi;9J%O3_STeZJTX;2Zsxhr69g#*rGdoC^`k8Wi3p#^76wKVu>D7I=pe1%qoa+(kK}|!Fa_s4wRydOR zO1O%haUc7F++Nzck+I^hN(34+!&k$f_A)kncE8|Bv|+KvRE2bPu*OsXCfey3XhT2h zDJPx`J+dB?8dF&&K-u+q-9{5kqk#O$f+X@^2fi(Lq52077WRUiZ1_Xi;6yx8ah$*M zrT+=QGypRUU_xW6A%aKSz8cfw-vUg`{b*pIHEF+x;5smi$xK~XOdzy_@=nUzF(^pFr~LlbfeO!Y)a)d84$Sf-&H2~hBI9UwD3o08u$xk#OxW=G8$4M>Zz z*%$2O{h{=jnV^Or_gb&X7(y{PWk9PR%(P9t*zR`&%=+0{kseJ=Wuj9I{=s}Sxj4Vd zdgSkna*X`LyGCvvK~D@|Y9*_89kzeV+Kj_`$M&tM9!ApwD6YVaS5JK>wXs$f%&7su z+D{);nGHh*h4X$8f7i0LYtt%H@Hhu1j&*4hwOY>*XPBUlDNcEVnn#gd_ zPbQ5sPJMZr&$?3MS(tDmfdCILc`UTjzxn#t()x94$yDBkpYOEv{y!wW&UCa4K&b~) zU<0B2?knU(6e|6yu%!kZ6MyVX1p#OWr>3^&$ZW6a+dh-Iy5OSzTI^F1_8yFlluAJe zFwX!cz$wp);hJE)Gz1yupOVy7jk*fo(C+k?mtM{qVr}VjmwhJv?Ki%aes0E6m?{?`-!6iGM1C%ey znRDTr8(ep+hW<3mi@@PH(S+DbyU}PgleBE9+~+T4?Z*_{fTrwt*ALmj_})qcaSh{J z0X={TQ^aH0?@g*`wS@7l8?hR!4Uic7Ih()69p)6djaxltuFOO4p?}fhp$T08Bk-FWWH7Y$ECE zuJ&G{c!(l;iF`~h{V^Vm=+zgzlH`-)&@}8Y$^-$c37L?#fdsBKgtb2W=;HLlAKe%U zL>mC6(S1yq@?eVkm@;5Of>JUWg)no%wH+k1Ok={72ZIb#o&zqoBwI-nE_Nt-oRQ(55u!u<7Y%Klv%M@QJkJo%@h{<@@^p zrhWoks?nOt(Yp4jkwBy0zwMSZaoUvBG9DA{mhCWc5|<))*oM+>KW3VVZR6ARU%Wca zoH;w)ddD3sEci@%(_7w>?tkdP^xg0NGbF;)0tFJZ?L|k=#jN*+bk^BtkXNyqiKPSp zfD@WX^%bc(8bucC-v~|_@l$)5uSrv37cg2Gwx?`&`DcGOi9PgpPyzC-?UQ ztaX6?9A7e33E&g_tfl@X*;W7(?_RWUVa)G3MPPEw3QW}q4g!;R6xA(SE9w-d zhW{DuxL0MBAAY^REJ*g6gU^j}WC)nJQC{~y0+<{q5&VFA6XneCQW&+ zQzHZF#3;@(XE5CCTV3NuVC@6=!bipjP;osT3qZ21T_W$nw zmkgMC(DLRTaYVZAE7u@FPG^bkZTOF5{z#kDrJRqU>h)*=KmO^>>90Qa2~2s@k+zJ= z!-kLlNYjg{hjLS1=3_D`rD<%7eM~`?2a!uL;o2tQV?qEBn4Gj2fWmXK{C5c2fKB+9 zAjr4A;SK3^7hHfJq$(tN(-G~55i2QrUkvbUWM*Yy0J~*N!e8K;TyJ~*8_0aVC<9U2$S6T`s=0XHfY}l#%|RRG z7|onf1;aJew;R99xN&1xFmNjocsHi!FhPBcV1@^O^P6xB76=$B7-u#w}()8KW5#m^hm@y!T1e>JF zS^M!0_9JO9_sTh?lki7zh4dsHga)LRjRqhHMkH&X9qs5po;$$c%39KP#XMDxZ?S>^ zgO$&xAO7H`bjL6622990!J6Fg!Nlgo-6HGgjU^re@E zU+}A6|GV_iV^5@W&O9?{`-eB)7|D30lPFiA2Q$Ek(m<<>S(v`lC^Tsq?6Qr33bTmM zIbu%QVg?B`sZD513y)oxF8kysGDZ#q53N$I#P;VoYWI?uT7>WLSiDQ3iSwTHitJlZ z3Lg`^Mhk_v3?1`c+D3o3K-Oe^w`~O!Thebx_WS88uZqkr)?!Fw!pAf=_c37)F2}NR z0GKj7=tE<2Ue_YboUH^ss4>Nu)ksm3F}-Y@6-EukjlDv#qwK|h^7<4Xb3Zyb{){}6 zRnp2H!ls_&Cx`Ce8DPrlNoD`*YwymL2rvV%`m5I6dOHFFphtI|pnJX|CZ6fU16%`mcU{Px`@+eoCOuDojkR zsfo{4<`wRR)@>h6ifWz_9D_i!o2=?)2%mzv(#XIK>ozb6b_1Cakp3xCEv8cQ5896k zTl$Hldxa9|#?BHn#n{NQPTy&Qlx}w8SF#i5%{`KGG^N$dVKPE*3fkFKS6+etc5DP9 zcy9#i2uy5_qoJ*cEc4xL4wKE7jSq{F1wUlp7RqBjCcnogN=t^syM%fp^zxhE{x*I7 z%U{7Qf$+jVgCFWO<)JZ+#g87r4|IvzdH@qzug~<;TeDdCkb*PP*`;7XLeC$_wUI z=uP+i=l}R(!1N0MG`>-JHq6ffrlmyXb)(hfz|_GUrZT{*fx+j>n64P98A*bcz|dkEd&d|g~aE?*f2f@Fjc~Ys+ny06M*T1AN?zQ4)b72nPfX0 zm{^F8_Q<31jHa^f9B?tvvN2b(zA_&*G_MnwGzID$9srnPka4dTAnDwtKxD?6O@IHD zuS&mv;RV>+0Vae2Gks@sI0_)d?Ey?`u8#iEHm)0uzP-Ox(W7KMvjzd>AwtDJ@#)KB zs(>b__q_FO0MjD039<;lQ1u50Ofp>!liAo5n6mO&EJ-0Uz{5O3Tjtre%Rc(e1q59R zL(u=>kA9SHx$V~U(GN41=w%DicmD31>6f?vG96Bo*Ip#@S^!Om+tb~i>M5JS5B&nX z8QNj0Pp*D0ts~QT1q|2qf@+AYYN4$KCKt6l^6*Kqbin{Zm~+wK;K8^aeQH^H4mun| z$|<9<%%Wgaq!F222s2KT(3I7_dnb`;Rq1$^Bze;tUXvCqm_v39?0P0M?jxgwEudzL z5G3HZTt&#Q^r=fJI5uoEry|&H{IGRG6WLEbMC;Q_J+)Ky(k88K69`<`lkUCuq4fP9 z{1|QMDFTq1ndr?lf_A`_@#8WQ3_yUFF^dpXMdX-%mo6Bw5tUj7oOG%HDV1mrHXOPK z|EnuV+c}m?-MQH!e)C5PKyv}7&6~HRXP4Zxkk`3mn3Y`JBiwFH3{}6~PZmEu(IQ z4jbuXvh$*sxY2$8#cw+W;?<@t=+v6N1jg};TYjFF;(zG@n9P-zYfR+;Mk5P=?Z;eZ znt6dKq~}~?;$B~sZP|{0gBGDCHKq!rn=Xh&VCrRMQNzVwYG67Bpb20?iqyvxb7KH9 z+s?sSQ2?f!h(7$#M=wqD1SYPF^;lpYS-mvn?V&mTW5DEKmKoXJy9j0oV8WJp ztwe5D(l`QBC7&jw_}83wZhGCTUKysmUd%GCq>ApvCmV!5FU%ygc?|kdr7~C}1Gm%` zCZtjff{QJ;5qZCT?2+_|&s-i%%%{BPZEsJfl33PR_K}QNU{WJQ6IFZQ&Y{5>k7i){ ztQ?>nxR%{x0#gVFNT+w*en-LO9W<%d5?fK5%BJhmU>Fws;|Zm+2$%244`aPr%5cmc&=3PI|#mwzTS zj6vE%L2pQ7;<0LDK3_U0xj7~{5$0;?#Cxc)V5F79ZzwGL4rGc_#`QJu6n#ncl<)4} zJ{Z7Msm9DDx+dyr;7nr@n22&!V`5A>KCH5!e`rW_G&NUjv<@P^C_eaxB-FqIG!Ze+e*!C{m(; z1;1RJITT+jVS;=T738) ziPd@!0TZFN2Q;Q^un9~K92rUlqe5V6LE^jMtTWSV&Oe_?#2sy69dxc z+U9;Dk6kqQ@b~-6K^FjY@G0IY%PhUgaMYN7`{<+TlK@i#CLHH3z2|LjOQ&Mma6+@D zE}82fZL8K7UU_MVf$6_|kY;_K^}l{L%UdrajY?Og+leH*;l>|hB0DR+_SF}pT4vc_ zj%jttV~ZpB!C;U|>TERIK8U0brqV!~9fKc0m(U`%GwtB<6)Q<=1?};C^9pLltH+65 zj#e>|O!#vqHp3+86BwMJI6(c&0I@xsU;NCo2xc&U)*dpOLM7l-18wv&z|E+w#wQRs zm&J?}y=ZOy_@A)!rPGc-I$iYYbJCod(^3VX1hIi)5h1}fd&8U-VgIz%0CmuZsadp) zvdIYt(2mTsK?D%t*qv#~vX%Hq9#6~gCp}N*iq=*p6{Eq~UXCp@KA0f2AG+vadV}CQ z3EC{v>Ot8;n}L1zQ@?&X#*lg$t|lw#TGMcEt})ST0;rY`K}qnW=c50D&(i09tBr&y zmLH}g(p+eCp1!3?m&$!&ucQR%98q_+5~4PaH`<~Ob4jX_E7p)N=8MmRDI0 z>kHWXF4{@6Ax(_6rY^l~{`_>roFfoGj*FsJ@_Ygtd7Y&Zi1<5H5r1UAEm7g5ds!ti zj+p&^5r0FgJj8q8Xkp5;rT^=KA6|f|0e?-_%1Q&0=7Zs59x9UZxN^L$V)K&80Za8v>XJ)rsV|X44iN)s^96>g^*Z z7g2dHg(d{u^O*Sf>T}PA(OPSrQ?lGyvr`@sW4Xb77LIMq@JT!;mSD_rDt-(lGbJu;lQ%YcnCIdJWJHrud^u;l4V$4RUcWK>{rD@6HCrGEY3xHjn)*`4?HZ-SJQYV=a9afY5lW|fW7g01l z&LAK?mT4T*nMkh2t+wBZxqa;x(h*TN2pU?~RHr(?GoTkO6#|p}pt(pUsK!JIL%R_~ zV%1!JXB%cRG)LRbJ~Ysv26L%?!H9O`SPT8BugPGTzrO0qG>v4hdzpx@BAwG{&JUvP z^GA>JZNCN}Stpvw`U@ISZrW$1OaL`;Fow6;?fCjZ#3|eA|uw zE#9PifAc5X;dxQY;)`CyK6O3B+x}ZK>;imDXiRl5l+2We#su-o$PE{Fxo^!iHv&u_{K%!@V{*``F=>8?dU3rP zQwB_=Bsv6Ol5`zPIWX-a4UPA^60j0pZpw3<$I7x6=2l96Tx1&Jcz`_&bJzCm>E8S9$5i#R^!u-SZMq2a zoj2?zitK&wdp`nCUpnf@xlANw@-1gf)6|Jc*$A|;lg47}*U5|KONewGe!^87Hl=4b zY$RVIbA>3ofZv9npbkx=95B+iFs_~gan6rr9F&pKcI9Yqzjk{chVG2Q2BMiF& zUj>Yt`Z!6miu#uG>@0u-0RmfWBRJK&eM>rG!kBdNnWv|Yt((%arCp!sNzbiZP0FC|G-bm0GWyJeN0XCscn$~6ClUE5g7`&1Tfh~wv|s%>!F{`jVDRj_p%N(Yt5#b zx}AVO0f7Ac=dMaqiP-Dw09?68O)N7fYnqecX6?`I2W_77!G3`?alN2pDg3QrlrPTb zzu5BRt3_CCjF zMyS@?NpKsOYl1A2Hj!PthsE&P3L4X0REAy2;0L4f z^pEMRF0X4%t2Pt-KxCdNhmIu!z}K$(N<`%ujh98`W!gnV*zpx1omilA;@6DoJsU2&}VkHl-ei-Aeti$SfuoW7E&yV=)jOjUfw zqN$v9+9~ORbI*=QFSU_D$`RS5p_Lrur2)$jpNx322Ne~KR+GJy&0Ap@q1(}@h$dfG`7ms*4qaRpB`gP?20Wa|z?C{R4~&)A?2(wks&*PVBz zn{WMjdhHuupU!#Z`Kd=uinh7_y04{MZ@x91dFm;t3xQ}4VBQLF)Y0zCpIONqq_Jr0 zMB^dx8aP4M!(eLD@-^$ydbB1ak1(D2zHu;Ya{*`uoI0)wrywn)-AWE-H0hwnDBOL=EHuZCKPi3=}-fcG!gFaCzA;CswQGqpFK@r zvaik9f$w(HR;GW@7Cgt4O^`3;Mtj7Jz8g?8xI!OOfd$Zxm`Q`)4f=4*74&SoRlwqy z{w0A)FbbiBcJLlIQx)|hOe{R+=l~{7dHZ_Mm>?S4oW})iP(a45{LsS2twVn}5{CkS zypHqIdaS)99dka;ac_3agN`wd11ctIR;!aP^)Y?^+ONctDVZs+7hocZD{VCvi;t$f z3f5M#Km7=-VIt)P=E<}6FC$mpi6@+tjydX>2+%3R6xh_nIfXCpqZ~uyheB9Bw>W0~ zvLBQ&Ka(B$fB(1i^VFEISwvJGt-=bnd3NzAXq3h>A1FMB9Mp`{D0Eiv?7aIiU`v&j zh8%67z*3icOy?<_f<0bl`J+(;BF;%Ag=1L&83+iVJ&0jP%z*v)6#EfMC)bA6>9soGm+oj9hi$FxAZ zSs%xNyms5J$S&SOVoie*b})m#3A376WSAJjGa)2YIwup>0w!gs;r#%U30}=wQ3aDK zuWJclS_Lo}mACNd`RV#EUlVg*jLNb4W-5>5tT4KL5OWpM?RUTbgY*f~*O+ES$y^Z1 zfQiT*fe8Ti*u;q@m}Q|r{2bObPhCU_N7-xz2|Wvb*vW)VHKxeQjVVY{R!@qypI;s zJeodp*%gFl*HJz+rnkMB4ARG8%3%^DQD3I|q3A(h17uVX=;06?fI(vZkmht8??ZsW za7Ny??sUTsZb(a?TpHo<$G`gAv|-N<62neNzx?T~=__CSa+*Qb?@7!b8H5Sy+f$p| zPTLv!JdO#SbpT%(TCJv+sog({b^xX6SPa{y8FENXJ8>)bXv*c~3A& zL!(_xu9xo)OupkLPDu|v@pRg@hwKPrNK6dSfw)RhfF88WGYw-l^k3=Rkn%zNQYJ<{ z`?!Uv1#{ZMDPvL-&u?Z3R97>Vf}V2TkCB)g@S|?zW6(hjrq4z~kIgbN4J}8|v;Fok z3AmCl?sd$``sw|Tq&2J>-35J?1CDMLw1I$R85F8yq$VQmXJyvOZOv)kjH#F`SuP$R zm9m}(^t#Xh^_vY+{yu4%eyt+%Z#VUf1sI`yLB{k+vW*>>tesS?A5>qM?713^(rnvR z1eoN2AytKWj@dREm&f;4uD*uI!8Yo}Tq@8qYqtSthH*<5=_{;MYm_PZ zKfW^&M%sfD#c^RHJLMZ;x)gxRwIA>=10?SaGN)~-=oeFq)u8F#^T7Rti(eB{2k^Wh zK)Zr%g05Oa@T)+8sK&IXA|bz0*i_rhv^(lWyUm<2E1iDI=|mb56vZ5}ePkpzBNWXU z^&T?gQHH34rHsPFPK-^<9hKqlA#H^=DRoc&u94>k;1ddo1_i<9MgkKjEDM{OQ0($Y zHC!GmLy7-|i~iGq$sWvl-JNK-AuY>>q?7Xq87rzZ?o!G7`bj-9j!BFV{hh!3Ra(Yk zVqGw1*gg{)wMjEK1YmMqmTp31O94~faCWqvE_GSY=rn-|rsZm~yD`lKF!cdUTX!X5 z&zMUxE(XdDCR4s3Fu^7JSvMyKCJDXbAYfVmFkL+in96y-(x-z_V5)|Q1DHPbX_m>z zfJx_c027QwCvOf+I(c;-Yo9l=Ff=RbG1_JqQ}vKGZSS2(|7u)@+zv7@qKjZ)K^4$P z4oqWU4riT#DX#!b`w$8S&{zjyvcaIdqlkX^kJRKHd-A>UYV3>GA+3 z9Ns+XlD7g(#~+6ixEdPB!jWaH)^lGsb79bKB6tMpHB2Z7Ep_*k@te}Z;}@k4Oe$5FhPSTUkUsfWpNK$(sRTdh7pkIOYA0K^Z4JLk+c@Tt0hXHP zoP^&6!_*J68$f&#b65|iD*ML>tR~ucEZ_u-Ws)ssys^zN7J-R0O8}B7oO&>kbplKi zCQnU|EPW(cj9z6nno4bD|XR-eV8SC zYMWB!xXI~`2Odm!-FIKS*IbYKP^3Lm^F)*$b?fQajV4Go`k7PF;&?Ka4u#A$Oqu9F z12jlH1Ex%ym3enKnVnz}+rWAmC{js(wi}p&1}u2ei2+PiGCSI}0zi|F>}RS%oivF< zv(UeRB+j2QZIxsHBOm$@=GoT}D1{(K1h2xJ*TW3x8-x{UUEWxDp!c}%x50R>NkFx59AEWpUsAaj0dzZP#XHfyi4 zCf;ZJIwf_O##G9bXAuP@!70O~0+aTq56UYs@=Y+`L80#uwjqrvZsSj(a?$y=D8Jnw z224EPqV*C*(}%=0ZUT;580Rm4aeG?v^m3LD=nQGunf6XTY{JoL+K?U=XzRu{9vTzR zj-Upg4Txx6=_V(p)mnWZ+i<94vRG=>SkY47yU9uJIZvc30E^KcO~^ILyOvi zY3sZH^8L^n{`mcWoW|jU=s^RNIhNz=_|PA{KWOc!BbXaR?fPNXM*j74&6;)V5fIAL z)G6ew<2}42GEZRKCD;M~By_Zj=(S$nEu++&V?xh1+OYwE9FL!_g&>k%qUaPAH90vC zs@tpy0MqzMXiS7;Z(`0+0~(m7Gv~L(q-Km^Y7qKHHCu1P^nCcV!_zzQwVn6!Gt%aj zPp9vF^K0q!In&bIi7ly{bV^kOV^9&QpApd@;fvNCL z8&c4S2Bi|gR48{Rp{KpfW}h(0MX?3~m{u-dk-8x|C-0fcr3y(Ikf;aHV$nn5bwO8!{w$`Sq2q;f(xQXoP&UmeeT+T< z^f7!)mw!rAUa`is2yF>dp4tlx6P`h-7|Jko+ol#xY7apSru3=DB)6Na0N?%2_sEI3 zJza@o9A{-n*KqvQM0Ni08-Jhv{@eeEU+Lt?1YZvWHprnFAJ-1j47GQ5pz*ZE{1k-& z%}WYagD~k1Olr@giETyu?g6l3;wlobnI6WYDUH#jhbgZP^P zb!&Rnar4snY9`6T5e8UccL0q_TFHY5e3awC^f{Va1apsBUh~iG?P7)GiPKUyp!72W zB2GQ;)oBrCq+aU#>`K;5`u5+a`(d_q^n-NhI$d3~%ODnx$!#rZE*ewXZiafpz_@$Md$qbH@%e%emJXk@ay;MfC@`YlaY)BjpYbi)X&0i--9 zXT~|rsR_y4)ta0?Wcc_7Rvdo!JKml?an&aw3|1qc-@V4RFr!R8{A0k0`Y?tZkk|=E z9Drlnu*qMu!?YLWP-BWQLwOtp`l{UGaxO5M*Py0aOrm!`a37|;t7A@EDPSsBW1?>r zcyeI!TOl&0l2Chr>GV@i$HadCn5t``F-*yh=^k9jhk3-%Hbvu;% z1a;dlK#BW@fJvL9rUbVk22t48KA)FQNhALe9bBkD+*tfavzI#AA#5+CG3Adb)|R4k zDVSw%?<6t~JN2Z=nA5P8{Q4Jnq?OB_N$op#Lti9E#LS^t#-)oLSbhBhQ^Ci?{hB)5 zBpy3P@N+P_S@#I?5SSnUHKsjCgt?Dt4N*$J3&12XDsdk$MO24x9TUam0}oAL%>lrFtc8TJp`@__J@jZBZ(&i*NU@^abuhp^>7CV}bkhaV~U zm;ff~5I&~dlm{@CVfv94Vn86Y7*^|il5!Yo8O(pnGtZ=(e*DugpI$`%zbSL)MxM2L zZ2gr)y8Y~1|D3*X<>%8Y2=*{yvKOfxQi|F8Jut6r+qOlP@ktZL;j=*-pbW8uHs&Q~ z^_v8EIg7$|qRkl0U{-=E9?%4EPQcOK2GecCoD}p3XS3amNOO{?(QBQo2X@$unduRH zOq*C>vL1~Eb_G+!#6z3Og!GBl0d%iC>+JNdx4$E#e)INCCz#=uAALVP`Ov-T9p|2w z8V7q}xMXBd=c6qmhyVs=m&ii>M5YXAUjUE+Dz*tsJDFfUdGcYYqNOeU@Xq_w$!EVR z%{%5ed{130jo!?1EL{i*52u@Mxj8L<`f0|i!5QRHM3@}gjD~gO4AvvkZ(~Lc40!m2 z)@|7lTGM{&Y%q4zU7=@nE3cbc0%Pb^@_eY$5e@bM}IUEeAw6}}4>X3s!iVT|+%Oqg5meh?qiHCLmpqjmFdq>o8ps>JlE zsaThWTDJVkK9gmcLvP06xsPc+*Gb#>%xVh2q(A|H2QQDZ77ERN#?!O!7>^S4A#Nei zj{;6ZzO50@bPO5*5gHTz)?vUDwp1E7isE-+f279bGou38j(-o{{xg8d_M<~{|5DcR zbTp0E0WfLGn=)+zgk6z-!+etEB)Z&9q>u#FhV))nQy;)2aRe|83w{8Ur6@Md7Mk&j z!$V~b5rsyBn-p?46AFucOx*-+{CC!vCS!hb;2Hc7z{FsU{?4j=@Lw4c?NLf&>Q`g( zWC>t8SYu+aXi7n2Vvol4e@0snQGwQceZKDnL4k4Y!3BHaBgMe9_~A#>r!Tv_q{gJb zhlWt3#yrKKP$*+mV{8gZbsLiY?PPRNGkGm@dnV1EL-ZY!E(xMhySnSnd(uCA;W`+4 z8OwD~hW1!agkn2+v>Jw@8q1~F)Or)Ocbs3J| zh|h1v^r`8{*|X7{$UMP4n%1UI9H0K}JO7ZLU$!K@_jTu_!43j67(sZcW<>bl69m+CBk;gpGIX=}oUV z_0;r%_rE`V`PysKFYmr9tRRE5T?YY8Ms}I`pn~=?ho;(-^MgX`p^s&Z(ykxdQwE*! zJzheEJ-)jg-xtw}U;M)7$P95z>PEZMG;IIbdGhMW_RqE4P`Hs(vByb{sfEYs!nb@n zKD;CV%G+YkFX)aj62L^+b01Rx6ZBLPm}r$Ok7Wi-xyIzA+X`G3GY*@XPCfZFGN#Yv zcU1sW71_lN8+T0m$y_(X+anu#h4D$zBIwMb9>V4%@QgnkeA5taj5wuMt5z9}q!8Br z?869+SwS*Wo_BDSfBoWObBkq1;_$FPdARtngZ{ud7b$Dl9a+fH3KtraAvT3hi=HjI z-{<%dknD);H8UVvWVeOW_Dcxn)IJl~zzz;^&ny#UhcP1{o^G&_cHaiziSB;(W!Ve8sL3r_cvM8?$=LRh*2geU<>2rQ!B}J- zO6}jl_i@>0KAVo5Gdr>c=o^rcC`CEww4W=%j7LJ&#;-&efh~o;9j!024p)oL2)7;S zUV2dDbk@F13998XIVb#GphJ?Iz{Jcmku+a<&RMA$=7^Y``jAq~I9HK}s@leG(p|Qf z{mp;-qBxO%7hhD^_nH!0_$259%{-55%KOZf_%WE!6dzpI!!a492ReXT9R( z=@+-(9{#uqW12~4)QTUbn-8DLuENBOSKjSRygW<%l3C?fO27KpSB`#lI_f zxH#U14Qr_H&1ub=m8q3w;9YmjD7i6X$ERIH_HD(Sx5s%#&|p7fdOrZzi^ep6)~xip z^Uq87-Fq)-rydIv;~va_jcBqaKHVpOKxg6>#;UD{JYFcP6c_xCXFBe)`eZ4OjPi5p z374de+crlwkB|K2htlEGr=vCN74$I>JG7)GYM(MXf_Dgf&Lt9j+#DV`ILV-7y7H;) zc=qk3-tYJ{!Lz{+E@gAq{r9E6zUE7r##8_%Vl5F?#za(Jwt_8f8m2rWbu&RRf*Jyt zs^AB62^C)$ro5mX{GH{M89+xmuIN+!OqRtyk5b3CW8OBBC;Ay}^S)A>QNVGIXJqZ| zP|u!O>Uxjal3}@^Eo`i^j#W7@l`aJ3J_wkK3p856HRBDyh+-R2{?XLAa(xy7Ox#eY zbpEdVyBL&mjR_tdz|_v9xH9&LiQzfO#+4wOPD>~eF02*PM#%uatAZ{+en(|px(wpW=gx25S0J~ zS7iAOObn>&uHBR& z>0kaeU4F&q(wtc{g8@glIOWdgF=?W(?IOE4(r#papm`lg@hnZVr8cS__4gdtJ4w$_ z^;kZn08Gxsk?Zd-Z%7K zcH9^w!#4ZGE&tAb4gEFk(Oy1A5I%+}@5;}94kk>TEQ3X0I{IZqDPbzBk0mH*Q$S-s z<=%@_U622$7oX1^x89yQ0pbf^|C-b?ZBlB7iI!83hT6uoea)71*+)NRrqMJ5P-rKI zp!1=W=*_{nWckyObbZF9+ft{>9u7J|xN#>+O$<=fC!%$;XOj6MQBdfWT(zcr_GPd_b9V{AC_Tl41H z?c36?9{MeQJIwauCNOu7{BVGyfZ_yuh&{#ug@L3PJ2Z%H0m(>Op(?=#w8lt>eb9vC z-3YzE{NM+J3frzzhQPyV)ULXcyL)@45%PqMEeVB$DG65`Xi<&B4P*Q=Y(7 zjUVpL`|b^3GWcOX0-YLDEoI8X#T{q*m`pQdKNJI#z2}^-`2;=8!@o3t-pfL$iR}61 zaI1pGlx4}M@`X*n&G>0H4pmZuAIA7Qa9w^qFAl+E=y%bMQw^-1ciGOFAS^HyT{L1h z2c|6eftEV}Ox{?UN$CidFiUNt0h23b<{Hz;`-T|ah!eggo5#cndp5pA^X%hq3Bcb& z0Od9O)5o-j@YA{TXDi63Rm+yK9?tzpSVZMf_WgvPB6lKFv1{kV&~{pC&bdNt0eR%-=P5&O7s5G>?tx1JTc0*Rw<2v>`+O=Vw~^Uy95WT7 zjto}@&c^_N;1Gqj_q5!miviaMV{HdLCeAoA)s96#BWGVfNaP}(v}|Z=W@pjVDli#B zzG>t7bi+@7$eKv&(%9BI0!He>)L72F`$6nooxN#0b?5;A_EP7_{@{Ew!1Cf(o}Ye? z$rx0Yp{3?VvSFjEM2qGzr>0r1S6V3palkyzO z7ZwNhcaV|YsK$?c=!5BvZ@xI~-ns=dAxm_G_69HD8IjDEe$i|j`TS%)g-6Lne55S; zr3R+(F(G8Q2~2W9&8mW$$ju6&&P8L2ZMj|9N$&?N!f+cg&I=^Owij9Fmc;xnv4-HKGrhkmC3(H7ApU( z(@u?PsAB(4!Mrcv&lUe{9se>I{O6Vf(lAh>aXf1Sb#&qL0hs2K_sfO+1g1wGd?@uX zxE)}9nqn9zE}rGwhYBXoWf~K}q{hS+=uSNYq!@(OF*>Kvux84(4V<}d%Kkk}uuGX4}cck^rW zVyQ_h5#sL(}U?ryPG0=bF(r+5s*lXj=(32}r3$ zxn#EsDH@*r^b?Pz#;O6r$j7J6t5&3TmVg)pp!YFe28fC?vMmzPGSK?@ZEs}##5l=* zq)x_);M9&oX#TM$h2~UFS^5|w;hzRn6s**R)PD9ku4va<3U=o`cccd&xgX1b3s6EM zoF5=#ze#_)XzLwl>bvm;_fppIb>TPa-nlDXc-m>{HKe}UO3g^QoU9e(48)Y!%UEro&J3NPr;LwDNLf8JNh+MmtRo`?V_hpsC%#4JJ9e;klWBr3 z`^3l7xo4jl^-!}@U{M=2noo6fkUn&UVJ#+Z0@8mbFcH#0nTvs`Fh2=mHo|Y-k@GR* z0ZwR5n({JDP`{L#FTkWJuK>;gO!PJIObb@UNLOC$mQ=<)fJKh1p1r#>BZs=M&yu{{IJhi4!<2Wx-S;wLKO5Edom@-TN zZaFB~0B-tLI7;II8xZ;sKygf!yNgsM?MQ(0<^xQOh80ggl@=rU?q_hL93jzQ>!$WC z*rD|+nd*Np0F`M>1z@6#p*0oNGyiS51tvQ$md1wBwPOeGp}bE&x1O2qB*A=%OnFAm zow#UW`tldQkjBD5wbPsR0s0R!%5X_+hlRlNT)2=lu(rrKgMLKfcytL@VlaTnC2}2-(8vLEVyer*u z)6Gn@#V<%FxP*n74D_Rz<7?NR-RX&EmPP1tD~|q&6NwDvniwn4r=X<7uEw#SF|h&v z#qq}-livSFe-P#n{b5fnU6wxfsVmcrsZ%54wo3r`K z58-E8w>rK2#D&cJAwlY{%`6qOJGJlFl{$Ce$70-65+x--l>v^0BKQ@+%6`#{j83%2xed`VNb{GJ2tId|7`lfP5;KjSe+N2o{J4) z$l!WRte7u%0svdc>fqW^CfqgO-+r>LoP6Yw>D?E+D%AjfCXzKhQyH4&bo%dmx80F$ z_~o6ci3#uBj&%Ug@tc1rMr`_E^|`u8pgx{4@CJ`{qWzRXKVppixD}y8lk8Woy@vIT z76!oY12D5>v|giTL7BBx{+|FQ+CpG5SjMIdjfroAnzG=Bssq4u#C+%wV6q>|@$;f( zd7Y*_>C-KOyf_~k5d7A5vK{1oIsc6dNBr@br8e)+x6$_V^ZB@LujVT+Y z`DMeu1twqRODwE5TG7^jB5|p0Ka*UkYTP3zSAOo*WA$;FG)Q;VOp*J37dZw^dE=k+4pxIT$~1%b)XS08TO zRB=PiwPX}0YH36BUtsFzU3;-}Ihxb9 z4QtaqKmQ4SxR!MKIcJlF!`v;TEE=p%cisA{bnA^jBb2%?%xh*CanZGX(03oUDd+s$ zi4(pRQ{F_hq-IQ1&GebfLJe0)P-_sF*v0n(qpzz>|NBqhm!{93m)bXO3UImnv!74T zJhK`z-VDeWlP!$90f}E=@)`Pvs9FqO7ZTh7c({~_jPUSjhoNEB5@J3+?Ivt|-MWqG z-IrXF%9;qkL1R)-Gg`2gIZ`^n?il;+Kz29Pknh;h`r zr!)1iu2L_beMH9%QVs@8CNMf4VM-a?kXV(f?dsi^rp}z3+NRD*T?0&)MNl&pj;WbK zBBcHGJP1sC>AOL$p8!ZMU-@ME*Bif&@3MxxkVeum7HL|z}(#=gmZ)ttiAk5=K*G6QI8`b;|2oEmDwynf743m_Z%9?i9vzR=$! zvv;|XU=;CuNY0+K@A-a7O?mJwW?xa(%xq{GeTP8}2Y_iF>4?yp_+-HkYMh3Y%i}cV zWngXT6!t?~3Vv#VGhoX6&`|*|8?lXcoV!OEn?856%|yzPPZN3$aO!yW5u%_+zabtZ z)8?UG7adF|1~Bn^G}P1{F6mlGug$^i1>YqwIew$g0#Zgm4(I?T3S$IIAUr44s$x!s zV1d-zLz)8fTn#Qu@N@t_)jzAH=UYS#Z)HW|Wl9Lc^B-F@hhiO0#FpA`(!BC;?)} zXTa$wkKhN|U@Sfi>t%gy&wnrYAtMR;jxLG1GAG48F!*6V?GS3x6n9wwOLum@~v8d}rO{_UppADBTIH*4gCRxrh6W{-~y?|=~06+jqL_t*X&Ea>LJ7+d#9eh48s0pMW z>cY2aLtFgNSRDMcU&p?Vbmn=dr;91ergiJn7?`t5MqK~3Z>DjWnd|7&h)z}8aMVGw z_Ni+MsWCXJ+R-A6$}{(#IUtt=KSS z)BFfb(u2&z1hP9w6?EK5r(*UbfP{?u-T0~+8(8;D6FQ)%^IfUf?CIuDY-Ud3AOhY! z58Q>b|1Lnig}F>D;s<^9_5kY6rD8tQZZxFliC*jwm;kPB#!U^{QOCwD={@J4mtM(2 zc$iAyLll?x+5@dkntfz?c;$2H?|=9sa!}Hrk`80raU=hccI8_cSY?4ik!Y5_G9RS# z^>*XuZA73^ql!69(6@7}8Y$y9zIGkUuuov#7$(+Uf`T}1DkSj%{EdxQ$jT%EHi0SI z^_YkJ?G`J&QWlq#=|dP97W{xl+m8Sg;o_9r;D<42_~scb=eWS6&|+|k!y-~WLAz>5 z^NySspd>IEq?F~xBzT54Q1A}zsxY>+MtMH7MnSV2&o!ouzZVLd6}j+NEa0Ujx8cWa zi$X(;I&P(Fqgti;0H&@Li{myQ6%3fF2AIUN^B36KnTnSkE?mRW(aM|jMq$$9F}}wN+KAxKU=~mn(k_!4>zdOFqVkq+*pB(3 zH=X&4)6(a!x)Q0GGErDFlaA&4LyqJBlA&f+C(4##bh4a zckGC^Gb&HBj|oW~3=*dxAIvYFdhnl&4zj*B1GBlpTnC5BgvDZzL7+|$1g(a^$TVB@ z*=L+excHfvO~(@T27{yCn!f0`gJ0qiM^&kf!!%oz-)jK4Ku5oV2E4Se&#SY|`-=vN zgRBCEz3lNN1V4NMNt`z4;)~yKQ99+s<7pQ{xd|$<4>ml%D*Y6n#M|HbUNR@(cfkfP zOL0}&zrFr%(qznlQ<%8A4~;?kt3+zmF-BvL^%`SWM5Kv`fJ!gY%h0AVOz-dax6X5h0R|J_ z`@iq!f8d#?oVL&2Yp>sGd#yYc+!k9FOAtDf(Lfbulb|0lDvg6PE3PHekOCDHD3Q`O z#jg6<%AE7ygO9Vspup=yNxSmLf@by3SD&acXylEISp!n>X6iZ>lU*XW zS1&6mWX%ohe%i8jN$d5-jy{G2zyk@jF0@+cEEV8WdHXu(Hg`aOaWB`pVxTKkMOJV` zEoSglE0+^B8pU&v8HMqc#;FLjQi2PV=Z}eEpp6w5Qoe(fLCUvRjD`Z|w599wtZVn) zguM4=E}>yS*03$FU!_Iu8ZXg_7>`2bC3q2zKvLXNGI|fNf4uv)E%)E z9yq{mnR=t;4(@OJ3Uer_NLXVNO$HikBPezOt!lt+;tJmOdMB>_`b99>VJwcbrR1$%byMU-X_ z+B(!rpr{l}v`l%hHYHM!#v~pfUy|rjd{CP!4m4eTqb;aWCMrC13y(Q?kF>Vt&)$50 z`25X((`VP16qCTMd=8YZ{;nzTNfXbTL!;EL@Wvi$$Vom`!hJfj*)a#-=!d>bU2fVU z&#Zg7Bok35-SR<$oau3J9NgTQAQM5q37~X*9S3R+ijrK`lK-x2m$raWIicdSOvzLm zV5Vw!9X9YvoRbNJwbpX3d^Vc)(k$e264CuDVA6_uMfu42S%V~}tkL^m1~TP2Fkv>( z+KuMGq)F~6d>Hp8VDcRQl%!UlB7L?(!f{~A1DJN?yIuT*6UNwmci%zD4iZyBl+s8X znCK^gs0cZV$@1FU|Fqu%OauG(_kwilSA`WT=bI#{e1J*Xsx&5r9SfME={FH27jj_I zIW27(lYmf(yaJe{Z6(VmL%Usgz(Fq)g^v-ysr41@Sbrm6I(6I$0Wb-LX{SquJVT2c z_i1tCXDQK6hdRbz9NFwMx8MIga*+n4NiBq3O$Fn-?|KkmQt`ZKJ7@A4b|OU#qr?r6 zysM#?=`*HVN7ktvF>W&EI%t8laC2sSX0N>Xl6AsVmdwPBfr;_Jz6PfeX0RKkn1J@N zk6ekn2v$genY07=RbE9?vKk;#@P#B=F=MAuv>ut@jz9Srn|RIy=n{#Saal$iUwZL1 z`mWKl!~5ez=7b(c&!=S+(TbO5%o_o|wcECnS-hGI-<`t6-Zi(xPXYjT>hs^ispbwJ z5q}Gt&&w8m2WAIxU?0eAj<|*B+Y9dvHNN)^|sa z@gw~7fBTPsDIB2iJv=$g>Oeoc6cA`k(rm;W=tLT+eQK+}XoiZQ)c{NlbbL(` z!Fh9LVsFp)%+Pum#vB5d#>9FHF=Ra$&m>8vxyB@hq6u{9rU7KKYCnumr6~5m^ouc7 zEMLFb!^P7N3rqqPq$;!rMgA}^+0d9An2?gNO$(UTqAji2QQ$y1e%$eP&z-kn?!&C4 z@!U0%+d+=erI}{rmO{!eVu02vwp{s4;0G{@(zTTB&o@8`78pUTCyi;`aR3txBMs@5ne3s%inwaBbLhg&wwC)lIG_o5xKDVToA)9! zM^cqOI>FL6TB4wdDvfd#b&BqZ0F$2Lyvb*h*z!o$f-(RI2kTZXg)uI((}3kt)Kpb9Q?h#X`z2znE|5O`7Cp!#Doboq$ODZvCuHR!1vC1KdL`CDFRdud7a#gds6>B}3KR7DE)+B=Q^!0a7I(m4LnQ z+eM^WLc8Ykt^%}t=vUfajDUm*vu@*N+r@gVdYGmY5kL&96Q$yBJ#f8*y6n3T0mvO%aZ z#R!-p8j}l;T=2&LliU?5KAGFA4-134+aPEo6^&nOt{TWkC=JN@I^*M0D40B9Gb8%~ z3Jh?8b~~Lk9TNEvV8g+`2Hzoig!mt`)%&_v+!XYUYfP2jFKYrOzYq=e25aKzKVMYl zzFTw}t3f;j_WjYMCl0~*&E`>g* z<{lei!ib}G3j)KdXsVyQ_YuCQ4C?~JS24gkLX4G1O_fxoX{zRzN^B@3eRp9#OrpPC zdewzE|7$FrAcXG;LI3Mh&q1TqvBFHH@Nmt6=Hnr9muV!r5v>PMC_xL^NH%iS{Zgju z4n*KxdiHrXf9^ax<@n=m*6caZXpEh6_GH_%dpkw`Iyf+;%hX6)m3k?LqJRqg(a<`h zVl)+~u`_R*&7D2ldUfq=!)R+I(T0dWTo0xUoomy2BKbk8nS1NEZnsP{jE+6~LEM;l z(I87PyQ!RfIa*~gv={jI1ONzg0Erm103tya5a{Wn>7Us58`q=dQ)Zp=9tvsCXWlAB zlY}V&Qxd+@E!%dmUT~My0ZdV7<|?vTR#ZrIUN5_l5-W-Hp)}hXg|rJeVenC`BYW_L zzuQK#QN%(IzLtk^NoI|h%yaS=%Suv>$xi-XmA>ellVit?9&KwkQkej)Z0)-Bp5;Zp zY58t6K=M^71=ZH=+w7_rhpI9%tiWFc$l99tnwS?`;HwFK_wSM41xjKA(a@Dl zd7;MifHWom6S40{0TW%zqo-J(UVTZG)y4Yv8OWM7nP7%S^Tx-@^}SOfl+kp%TEQ)!hk5@}b5QFRFwXiIK5J0TZAkU{d=go;=>} zyzLhDAaRR<08H*(Wl~7WgKCj}6YaeZKXzd1o!blPROU|km;fe@Ytoniv&_uQLri%B zCJ#z!;aP)u6OOrn=|GK%-a{zTVohmg>R|cN0$@6M?6EcxQ->m@1Wa;D3qstS*TTKr zT-d_zfeaFvNKT8QNea;)JjR}`oU1zvFeIX3e6QP2)P z8ib0!KlifD`h2cyOIo|3%;U{Be?ESY=OP9_WEsqT6Q;uw7{8c}fGL#-w3~i@o%J9x zZ_)g(sI2=jdE~CK`~Uoy#ZZR(nrq}c%Opg+twk{&qBjv#CG;y(j0#!(Lz@l)BB^F@OS zp4zr!yWR4~o9ybVuEJ6lq+fD@Ipk}>L}~&iF5d)N`m6?N`nbj6cTIT#Fr|7*qITp??AasNO?g@j+{j{T;ik`dpF@GE-mN0~&P$`?W37+c968l% zhi-b#mNs9@br_8)#EpZ`Yk*IxOj47^wELScp&B(dY*5gm{f$(|G1y|{2Q*s81=`HP zH~ClJWzTFLT_k$6z~q^aF(ZMPn2C5!q&zv28{_u_lX7lNB2CR5w@h`Wz#I&K39W<%x>T+( zW%*H5;`>*>yTKjdVlHaBd`x;kMJyIdW8(7<0n?#R>hpzmM}Q03C?m>DPGg1CidbDs za#Nmwi7P~aDLXBhOxDK%tS4d0!!N-T)Iy{JCVH-=TSX?sp}PXc+)TpL<-e%Z@PlYG6I8^ZRMiH z_T2M-w-QQ6421dY-BU!_aB?a_8MHhUwGkFishA)w3X8D zv`;=KZJKEzDCxVvKMohFErGYHgZjd?BwGt-#iBkj~NW9|7DUSM%6l+RL9T&fiBUoUKqeNa^hSgG_qW?_zsde^>r@+i-0_qsA88Lf{HT5N-7?QjC_zlu zNTL6wWl6KT{gzwp^pnRih9U3L`+mNrarBLQu*dnEzH6){Tnm^~lS>-Y>@Nc!lK@m} zzzzT=4}M^w>|uONutlsIsg{etke-%>2G|v0I63ewMd4F~#AKS4Syu1j^KuVJG11lp zO8l;AMeF!ctvqlOCLJD_xK?ulZg%S7fTj5v}47U^?YwyLIZ#2)C>U zQCkrXT+8F5q7s#QN8#chO`DF!^n2^ptqT$~&T$>eKaOx1D8#p+O~8~0vsO5+=u8U- zB^)rQ{@)L3{zn(`J_;mECD&9WVrv}IKH+q11ekOgMGY$WA)7^=(wI)gyrR5>T7W8G z3S?D4M(6GS!xtvyL7IXN(I$x%QG~(|L2lT+&pfs4he31es`5Ee9c8a4=jzD$0 zNKj*=uzC4ecth@}u_X)VVv5^kM;}cTBebH2QpS`@04kM;!pxj*aJXdF6%mrowGMZpkO`1fWbwCZwD5q5=Q$BZk?8kt5xds9+V(w82=AZ`Ud@{oVAp z*Stco%EF5oS0hBox155BP+E_)Zx0ve9twugZ;w25x8LWP=UyNa$Pr}e_=!FE$YVDB z^O-y&8aeY^ZBt&lodj9sbnIyN+gl>$C(POrQNu89bLqu({o9wiLu zS3x_mnBiujvC5RE>j;?Yp;4Lh!>GJQ+9zO2z;`JDNIq5ZFMXC7Ede2>4<*&xq-Fr_ z-5DQzWSk#Ib`#O2ggUjy-}M<@5(;`v+7xZ)ziZTyS?13X{*hoBK3mAY!+k%jF@;AR z_%8Qc6pT*1Npm4;_c!w*J|7Mftk*y)IrQ0U0c)d+V9^5M!~*X06-s|3G&W(G5G_@y_!LaA<`<5nl9bZm@0N{vpJtn z5Ax)3f*84giK8F{NePnRC5fwZ|9xQcjNW4Cm=2X4xd2;u3DVIzQGvoiWpXefHUOCt5d0r0}*JPwq z)YCx^YXtUhq5u)EOsERjDB@YAHOW`yf`tyH4bt2sJjm`ZQ(hK!{qbXuu@eD+wrx8g z1OOteNAX}2nIlCzPFospU8JSE>vuLNl*^0k{5+i-`QmNUq}JJb_v^A>ti6^omkD&u zvgP*J1CPKAnZPiJvrnHu1>3=-!TG{^_3nxPrL8^j^s}t&-(%g-?vPx3VXI1omt&qP z28eTd_Ox>@xyU;9?rmSrnMY{%qnK6VFr6WxYh2Mhd?G=M>Q^xmz0237HfeoEJ_5}3 zzx{>vVsS4ad$xVUdXlw1i1{cH-xN%PHdhmhty(`}Z|GeSiF%J>oK^081I${X;weOn zsb9Nx?qX-2d@Akw(k_{NHW8lnwtU%A=#tXpWhK_RQ#(-O?PY7KxfG{)C89Nt5tKnYEgF3XOc-mXx*(8bydRSmv(WDyKzkU05r|Y%rDSyEN z=QQ#RmO^K{0CUwzN(0>Lupo%W_5#4`wi9&-P2}XXvjR+cWzc0$GFkk`nP*rV?vVgZ z*U(O-5rP|AE#>QPfAwAZjp#`NSj6mA_o|d&4)GRQNyMuZ*dt+0n-<(^-oEqJ>$Yjj z7JKf6m+YboF0fNionY^N@S(l%5`HVf*QJ?AsPc*en0ou?_OyrZyT^Lv_O^YDxqwUB zh~}%9xbEi>tm0cdkn=l;Mnojq9y02tA7>@%I$43TM z+D|zWRvkVjrQGP6+r>(DY_YjBXE4Fa-Rb@R3YZcAr4+!)fr&v%5-@pDJ^D|RCgNif zFf|~R9uAmfwwMSo-F4^f%x?i3kjmq$UJacD6TqCuWR1qG^1(-+*e`zhYwOjk2h!&c z15*-y7nxZ^o4#m}iJ*a0U$?aB7!g|lOlXHN3@z%^Z#t)u2_T?%U~*Gljb%}JbrRaa zsVAMtnv!gYmkAlonl2cef6u2#nuyTvlg?gN^(mYRw|qw ztPj$e$aje*&=f^jWa zy4(eV+@861?xZtp!Mp|59pE{K&nuTNM@S(;j+#OnzhCY7`>y@jwHDvKyJz~YlXeC( z*1U;_scHebgrf!mJzjnJCHv;9FGy}nwsJB-^yt;oa{CVeP)A|9YqX72^If-W5vjfw z0A?!R5oKHU?6nDJoM)F^^94a`25{og;nbC4y&YPbYEH@YXq213^Ue2JYq%NHC)Irc3FW|3wOFlflD6l-ffk}p z7cEJUD=5sjNvEHNiEgTW_W2BZ<@ML?y6b*nW5=Fgi=eCLUwqjV~xaRAayLM9df6O5k20%?PHIz(MYOak_N zm1Ur!XAeST(U=r2UcdqhhPg1q0E}e&@S|xCO#S-w2AGu5 z`B05XrU@;ub&ZKhCug(+lQgD-hq~EwOL}SoCi&BNt}IM~8WY$s3<6-1AxIYim>Owo zEp@RjIA^j)z$K$qAeu921cbVg22Y?IfK8~Gt+JCc+)nV8XJzEfY*3*HQwuB0gi#KUj2 zwU+i501$op=h`oS{c~^A-W|K_fqNcu2|WX*yJt^H9EY}4 zNFKbd07W;(`Noe?H4b4I5@pKJ&DlH)XP&b;YGN`-Nw1 z`S;)3FMe|!D@I}PXlT(e=Q4ak3Mh!?1`U*jQDWLxpL@X``_rH7s>$PRSZ*g<`pqJH zXZBat4#4~4t#{FgM7!_N$1H!_dONybH#>ItAP0(R^S`n0cURio_dR06PNc68#8?$> zc?CN;w;g?p_5y?T1_Ms@+jrW1_dH-%{q$OEMZKvF8`jydAp`81pZ|=ZBw;NJm}QE~ z!)&Ppsq&*~pame6)%9d>-|Z&FF5SDh88&b0R!U)%+VJk(?Uxr^XpLlns6!i4i4_TA zahN+FdVjjjU%CvTk?Pf4%@>J7Hxjf&y3%aK+la2@^M~vEhF?V{EI5RhL(zitcRqaIrcRy8T+2dxKu`)zc`ay6Zpu^JnGfR4 z(o~|Em&1k(b6^@ibOfN3gqfdgA85i(uR4zBR=);d#5?q=-&|ut2oMJ75M`bSs8Pr7 z-vnF&e&^3Tir)P*hiGd0RM>y`zx*Uq-T}c6)f-kgFa-~#X79g2cr{O%&p>-qX>E9{ z_UrhTw&1Z^+5<0E%Om#*9)PQ8puDqump@qeaC%?`1n7W6fJx^77FA?@s3#AZ3I&$t z=Mg?T3sWA&?a&H-3@~XOQ`o^=`GDm?tdwS>xhYS- z9}bvg9y@v5c$d)Hw5Fa93{LsZltDt$o%c8+DU;u{=`-y1+wUX_rx!jBsndrCrWQ3O z2(U$P(r{=W3^&0|c_j5@BFmJg6GROP7l)L=8U{hnNXTy*jAiu5;Wh%ZmF)1Pgc-Bm z-2E>)EA%010_a)vl7!mQCX-Q|G*0dG$t2dw&_%$?Lf%3Kwb+;TVt}wLrPZ!TXx4B5`bn>9s`pZ z%)0=_Klh6-yd*LggIB+=kI-F zZ@u;p>&117$ef@ef?YdzqGZK3pI7Qi?@pdgz@%3UV_hs@%I#$tR3d)s zicLQ4bnD%{r#<`3v(~?NZyfaBkp=sB{AC^O;XCfIapT5Xw|;#syHBozQz?L|Vsxrx z9L2hyR0{mfrhNP}Pd!cc1PoZ^1$M&F-qx>6maSO7$!2}Kie#{8dN54+Vso>SZSt5S z$l6e5^S)bcc_r1hvmA{yy`5ck%~f{n0D}h1l!*Xo1yyNN$V2$} zo%h%=_#nUC@V(8LHPi08get(7E}5ZPGT766k1E2J^8Sd+=1 z;D=~ZH7grL5H5bGk4fRUl9mSa@8=HKDom2mNRzsbY~+&c(p7o^)8O{Jo%Z?Xv#Gt( z3-gLJoECryjX?=NlK~ia^1?*jOeYG0u<8CoH-6N3ODk2Id#{YHl;ickxx%eVgI&;MdNgB*zXxDa6k!q|vGTLo_P59XVr772TcQIC^S4>o>|{H#PY2A9 z<+gb3dfP!w7d(h$9!RqhUE5hF{7i-UCHBpl?Sy3$;E<4R>-H2{Izbm#kev4D)5cjP zYYO8yt_CxE68$@G+GjTRgHP=@S6pR_=FTT&P^xt&Bz@?Sk>31A($#twXG zS|ga=rnPJO$|NBHLUg~I5c2}SKArVvQS?_i&!dICb$iS0yc15ev1ken@*e@f$pB0| zX~L#$*kW&f^fAB&@IW)tV)Rnjw*oq(ExCq^fTYi&iv}V@B;-_Ml1v3Gv)g8(!L@>> z0SU%`A9GwUswzCQB@hOhyu~Z>}*Ne-#hvq+hE?yHg>nruY`KgjW#0We8p zl9@yUADHq0jl%$w?yn)%U1MT#Df*FW({Lh(@%$Vs z)1*R&nL|F-8e^GEgYf*ofy+e?*$}$PzMyvSN1Ko>@>vidP|`kqRw#QlVT}QO7?==-f3bMfV82l*Q-JW>)HP8HU z*4bylc!v16YGLcHojdI;f&gN9_Iq!+#fDQnZ{)}kwq)Z58%@}FLg&t&_D0M(9?AN< z+4Jp{r=PdITS>n|Bv-7MBcY`A)kQYE2W7l7NG!^n$}1%bl{uvH8@U81)KKz#)tWpK zof7=gDjOijY>f$fW8rSg=+fS<`ptjYpyQ6R3M61v%#CO66>ZsWE9Wn?6OTUDwl7;{ zi#D#aGcLWrVhEO*JMCk8;E{)|9ok7Y*Wa*l6Jhz}0EA}SQa36LZPps4y^K{M>%-b) z6NE#5l`+@$5#d*%k{|sB*p-vcw0a5zHlh(~!FfCa-Z!-O#kb$KVj@emzEA18VlgkK z0({lbY8gN#tx3x(Jep24OEJG62MlU}l2G|YH5-+b{sv!o8%X-!Z|F?1neF_Y(3Tco-vDlY`4 z*3x`vv%*YN#L$6>xgJ^_LSRx^^$dKxGUX{Hk^FTP<;C1ye2+%xe`GExqEZ549c&iPL0i5t<(9}bdaAC{&C^3LB#uGHe^$oG{q+*ibmCKTMC_Of(JJXfPiVC{~V-E zj^=PvW5NdMJ(Pg`ybxeGSW=D_v9-8`55(Z=eD@V`FSm0tDB1#W5)kR$L@hB$B7iV# zJOC%LBwl1!XaFecF`aazf^FfZ^|oN{9IHiRLWl6lT?bJ&;$2ket7@(A*YBeNm5D-8 zEm|)T`k2HJwC#U#zA%6^0M(k~Ij-Q@<&?NdV(Qg(O5CN7qyL>KX@cK1A#j5Naas z#OALk908N|Nutv&lxD|GX-p2)D`G~%5Mp64=}cl4r*dz-l?RVkx_D8ey`cB-Y?r{p69L#xfu?e<<1*aJ}&}DLzHCfZtjfiRx{T=B* z-a}yGAU!CcSpoy?)HTO`b>sE^+qan6{`RM*EfY{^K+Cz{j7iSy-g#ORclwE1q~f>gte7fiGLqbo@4SU6D$hg zn|kAocJ-tQ4#3L6mQJuhn;u=Pk#%PBV=N(H;;;8UW_zi=+$}wj>JzlCDTc(Qb^Gkx zQ;)VD-7;A0x`%X3>2=eBbX$oZWQkBW%X+M_KSHXGD!KR^E`zVNvc zK`H}bpcUq^qNrB($V-2>O^mq;2x5BahPb=OXonZvM2}$MY9r^wcuAvL-hRJ zx^}_xkmj0~1T|?ynuqJxZ(wmh_d;;d{M>-{D>J+Ncomqkj~sQRSGZNQpXT5`3Ysbt zy)-4yTL?&s2P*JJ0Zj^o&|0om91Dd9J|_C6EkK$kU_xMpv-sMpVEq-xKo8O=6;UeF z=kQ@i;8RPpp+k>wfYLl+)*dj^LorI`K>5zK)29{p;y)W8AtCHQE~p*s7j61M`W9R_ zavrQ;Kc9B~ExxV$>vw(jk1km1zHar98-n(^w#28Xy6s`+2Q_AYBLWbJTr6^b->;ho z*Y)Fr8|e`_R{tY+jYXYgnnjL0=#5G1n;Hp2@oUg=Qeh=w1$TvM>Ng$8pDJZ{c1=m= z_)orH4}(G3nPNQz)L}qxq_YjSaL!y7^&uglP06uVkAs&*7p&u;BZ!!h<5$n2^$e$b7`r7Nh7L3RAf<- ztNr!vQ&_)YgXLhyZjDB!6iWgot#gugxGNuE;@%ES1ORwlAHE0IQXe;ez@{f`WaK<>0%d?@~;#qmE<^%vG0LZCR;(^yzf_=pXM}d-|ds zVa(^8da9Km+`KjOGb+GBrz8Y*^Ahcqw6Aqh+KMSR(Z5%3>(;HCmuCNV&1#$T-4eU< z+W)Xq0JjvFeJM4QzFV-^l3~I}_8($zzw(N0S-spX1`-luSYMWgHuIBfue6QpHrhk? zK5QkMcG}RcJ*|iUjA+b%H86+bvRyn$rJXhLc>I=aiG14$v%^dZSjIzN)s@ks}HB7za z(}PwNR1|{=Tzo)Lda6R})d+YhIrjx7GWDvM;PGgze|h51&_=nfTD6*x?#Jwk%P+H& zPCAL`!o6;_(Sqvlzu#cLzwu@Oc9;9Ij$)13(@*`Ck!-*exs!-JfDgZs%CE?bD;oAO zLU@y=K5yaI_T0-a;-X+qagLUXNMjN(rSm+p0yJWl6R4^jik?FOW%7|V0n_jil;vpc zz?6xfP0L~q1sUgX@C5mLp)~|5ZsVKY^$N}jerx{M_rW8y1-P+)uX2itafS*{JVlq%CP*XNQd2@nM^)zS$HmpvRXX^jInbU@-wAN^VNEM4(r2 zq!_(xTH#>$?|jj_LRrO$5lV z0b%kBZPj-%6M?ZZi=ThqSvKVO(N?@;hpBefvW4GSXVwLc9yr*B_3dxFFz>xN<1?#= z5TXbx7XYfLWhz3B)&Z&SiZCS}jWb(L^a{o@52pLcmtWeMq&S)|`AinkW+Q>8*oM{X z?He5Z=S@7r8lcU_eMMe|JeBn}o40SZJ(WbSvA*oS2OqYDfLM?8ww8kSR!sj?VZy7z zb{gH9=(qX`8-4Up>)pE}-An2smSGg4rEJ6GS6+%gq+FrV#V|wG=hVhEiU)vXp=h<0jpJDes zaGz&~Xoq&XgUCf$FLn|*&=&t-8s^X#nW=ep^=~1jJ70LqoKP-3WoDQ^Z@zuKaDkmW z{$x92>~SP=E+t2ze3)c+Ai(44ciyuv*L-jBZK$gSP%F|=Svmwvs`4v3@`bnXnL4yQ zMWZDE)&fEWL~Y0K)sD1I_uO@-jX~&203e=z<{6uV0DQ~To2`HU{!Wi23gAGHS-NDo z-F5f4*O=nrS27`szsYYXQ=Uxd!+#K%G=H70D~TKVpo zxeuc;37Guekv=~_V}eo06eb(5G|J+=rB+l{g?VKIg#tnU!)Q!(92-J3Q44W=TDhp=iuw`$T=TV)x2TxsBB>^Y%-HMU9r!pOkfk)oSryMzX%#ckG_dc zq(`PQWHJa-T@Onrp7U z%-VEphc=OC&piJUYl}8nE~dB1W5>C3eh&=)ow>8EL81OK z;Cm^wE6=~!Rxe#jSpgE!k`?<6BEHf9s||&PKketY{*h-(w71Co9h(%5RyoY3-ZGUEBLuDBvypR6nH1); zUMM>|ji4R;s{~Pq{zV1VjID%32XQPZIQ)6 zEdb=b`3p!1m5s&(cj7+vn77({{3&yK5OgNt+!uY_e-x}EJ04B5YM9W^_cYTzUN+BfCi)hq8qNi-p)MZbij%7 z?P$<46RLcUGzRB6@DL@Lmd5nZbxlhnM`@I9SD>JPO$*)zXB%6z$cWy z@kd&bFD^bvW71p;f;J*^j??yUfho`VKD%8c^f|z#>09xB36^oBLxzyHd7 z()D!x$gzGa9l-THL^=FjQmSZ?6I?BX!RsLvIxcYXNIe;HkRIxpz{O|~eUOB!u;I?0 zHqCS8wZc>;Q(hgWyr?StJpv|7T9V$?jDt;ifyU%d)1)=21#%ecrJw<3vLbUM002M$ zNklfGO0Nl-fw(Ru8y|4QsW3XvzyUrpqrlA7DM*Yv#z&ENh{Z zn_8sNmNrG_g_}Si1S!!2Gb7x^S0@@S#yh(_yFvtz;5DLj(h)UUDGvKK^!tt_%j~I# zA0x#OWwUwyee}(!F~?9{_e9KOQMP2}4BNALqjkbT{n6r8merw?O+NiJ+F4_dVtSH! zDxC$C^7%B;YNQEzYw9=dr3vMA%jgwah9FgIT{?HLOD{SXJ`v?Le*X5-D=_q3cI3bT zHg4omF!^eC$bT?@zBRJ;r<&u+NN!rrTBJ;gm7sY(9ReJiz0d z)vLU4p$ZkI5<#{epi1i0)+SwhiN*KqV5P9_I!HVo4K6ta-x33-NXFd+*i;e-0xP6n z5-e})9^12v@)Sq{eNxkH`LyX)w`G&{M7xY4Sf?E3R0R{-N>E0hUb%#|Pqgm%oEC1` zXd@<1^dgSR(hx&lu7dm>_R@TVOXCug93gk!Bo#F^S2nj;sT-?cP`}! z-9=Yn^BL!VB6BggMAHrYsdPV`pz|Up(m=np-udqhlStF92hZYS9)7C`-_9Sbxv>Fq16H<*3yONGA<2D``woNOz$r4?Bxm ze+V%BkdH}ay9G=NR#2M?nG^*j6(rVNZC^q*og{C~&jAPN51WZfU z?_^jS?UeDy+g-Qa#vn0qV9o*ny+A_0CiQ_f^?fXpRC(TR_`@G;-dA5)pWeAhOh|hz z0F$Ef+C)rwhXPZCfc(nM{|J~cX+$(8EjE-%A`2grNA$G>Oh~V(m`Mdpr=N0)1E|_r z1Cv(f=~hhMmNqdMrzalIpD`p1CT$Zi>9Yt+%@xY&LG%;9dvFe4B|tT@uaWdru|#Xl z!bCUw)6bBaVSZ?y$C6z3Y}&@`x9tnQc0XiBN`fsUIcHYxK)d+zE8q@|_Beiog8Txa z#4nY}lBT^cXTry*+oX#bDa02IPxrciFyjYfCwc6e8u6D7a>CnO%PBMD8ACkCQbb z1z$q}*$w&+9b~_odXr`K>+2;qHW2`@des`+%DdJy9ZiUO9P}AmTxCz(|FCV}x(()s z28^Ji_6vY^5`2(7gmp_3kF&igE_+*6Ho^x2h1RdhRJM~~gd(EhVgVbO=oxx4VB`lC zY`IX+6toZ$8E>9F+xh`03CzV}=&+Q1b!ZCf0K|!>oN7P4=PrUgs%&)y$!MY3pGXK` z?cw|{XWNU!;1_obIvWSm04lA>J+0%5$Ma`$T| zqTPCxT6|0brdrHO(rC2aON>`R3KC)k=BnFt%E>ad-;AHKovkFOW$LZBk+$f3J8ArQ zNLHqLt_%Ouu@db5u z!RG`1`u>3S1=s>5@rz)31oK%wBxzi-@&s$8q($jCX;KhBoBB%pL_ANwGl_Vf5SYSN z=txm)@JAnf^Gk3{q%CT8z!dyF@&r1Pn}#=(D%bRM!DX8M1`Rm$p?S>6b-7f~Zmy?? z7yk)DsCBN)dRnid&yw6#L9&)X-m`0`4H%9|i@~1Bx`i?%l0<;1o~AV5{KI%|bx0XX zYWY6_rYHtLO)ZA0?1pJ9S&ha7lZwNXmxaWHh}{(YAYhUOE7X`&C6iHd=|E9=t}#V` zY1Ni|d`v{;!E7f@7;kqVY02TNIzR4X;=F3icv?rH$iiwQ)0+qjo{PUh84NVhLST~I zA*Q~INF)a)n3*Iqw;4MD!Mt4?;kXvJ=D?KHq%q0I)C5crvYX*(m$b-GW4h>^bG#mt zA`+94piw4WI`uJVPHU0-y2cd#uJ`7jp)moD07#D{Ka$AKzw;i9zb2dLgm-)cajX;D zLloGX&;8v?FKAx@5qW1%Im1Tde`v@rLYw#u&AOBYz-89CdpA4oyh~9Nl5O$lv+UjX z-e+85JPNEFjRmFK2~xIvCrrJwMH5o2wM z_KYc(hwtSX7IXIRlWSWG3-OnvQt9*o>qP3LFBdGZ#osKk_C&{xBqHmsd+xC-ufEL2 z3?5;dW_?AL^?L}6h_%)*cSW3~;=d?|`K~D1X}yjgVWTcR)7F#{HjYWLEfH^(2pMo} zf(Me_&sK?cC$o!6u2i7W7469$S8T|^Y&AFL}(ISp`9>5V;#ddp0+u3bBN zuz-@5%34sR$Xe*G4Di$9YH!bdo!ln8RN#;u>e&9gwqayM?bPX zJo7D2KSPiS*;4?fWTN#ntg7P6)OBlu@>oJXSPg4zU&TJBH;uFC${qCZ8_gxrt4d{@ zIcXC9rK#So4?q0S9(n9>d-UOlF<*|tFIeDnL!cx8D(Cp0{ru#p$?0L zG&eoaFlZo|sJtOVNKKSY@B^A(6mv&YOH)a6A~2=#j>hD`!F58gQs*2XB&s7?*wiMVzvF=EsD=to2WS^VIiV1wQ$GgKk+oq9Gd_3 zFFq@xs)@n;i({L1(=fLqYyR^iZS)TUCq0}~3ZDf?l3rx@Rg@1L!t1~6gP8jc8vrAx zJodCtsn@cb-~l8EI7h_#8oYjRQJFb67Vr88E9N8 zEA2P!wk7NGAe2}%j&Yu+OX1?O*s87K$sV-@@M$rgOnSe&=_dPT2@DCzO_{A_;?Vu7 zdEO|lQwW1pG@YXIM0_IT#CS<)0obW;TiVq3vX_e)=wY{S&vT*;O?hI%GNpy5ORLag zW(7Y?Cir3U=@Y#_74aC${XFY7*9jyro`>6ppLKt4#Q}h%9~n2NJ@ztr1aRTIBzpCs z)_6&IQf@M7m5r=3iDhBv=kI=GpU;@%IK;W#^womggR5Kq{J7XfM3_rgbBL;dJg-gCD@=^4;UDp|I7S{lb(eiF%A7cREN%T_pW?8Dr+8(_Wo@{4Wsi6_{%n5x=Q8%hg4 zV-ez(lRe<;WsB{)3oo@KGS~DVBaM#fL^K-1nla?Mi%G$6BIMz5CiQB0-a&fqMwHP_V)!XHB-ugcQ$g zp;GX*0MX-by=4oDNKAnFt9*x+Y$%I|9PbkLRNqR~q-BXI*jor~i%ACaLt`5-Va(}V zUyNPzLuE0p`OmA#RoROuy~k}fCeuF=t=BQ9gNL^3y3)EdZ|2SW!v1jcRNG7%r5mrm zf%E>8Xg`*OF#mN=B|mM2Ha~s#OiF@0;XzOWCNFydQ*VdWLs5AuyCYMcstJf51x)hj zS7Qd2052a;Z~CNNhaBsJSyTlBJ@S?D77a7C171KM%0eV9%0Yp-Cc2hJr5PDaNB^Q9 z4%oyWToB`69p~07@gMl8(+EF0*ntBQWW%{0DlP|Hj4$eGUFiP-Fa>NI0bTmBuqEDUm%xzrRipN=-^QAY%{#0DM@mb;asOHr7>}8y947NSOnFK}(}77ZY2=51 zNs~!ttt2LkN?OO!uEPP7_l|MYl z0MepXfAI_gs5;hi2%Lg-92yfQola+BZ2n@d{R1cRwdXkD5z2xzH;4laQ)OOm+g+$osNSc3yFl>s8J01m_Z^|9lI54TpxHdSaItx03` z>EcDU;QO_f-6hBN5Y(WcnsmbM75SxPrX2|_KbZxSUwpNYNX#M!ty2760;UTuzR*S; ze=L>&bTQ~xCNM1l=-a;!KAP#aX~_y(O+LE@G9z?Da#ry|^_ys+3Wg|CUR<}fHsFlo zt%M*-xWAi@qcPzr`ED%^TI4U+ph^_!hsLBnt|MrIA@LeY*)i$1d(jH31eiu=Wm+Zz zQwhfvGf(oNb*&wYMd({Y07Opj-u8$4?zd>d*JDx>SdgAX-onrAsmK0gZ4kkVNn2D6 z5V}8<^-JmP(l{3%4}PFZa_vRMkR1U_gJ?iCo*L-G(S7>a(0+YMVU%po|MP9z#(gfp zoR<#JC1b{G1;dYK@#=FQe`4Inu`;plMZ*@8)hpyh zqE~4I;u*=%MxYVI*m1`mYv-MN9wjEa+4VQv;AAZSa66VGXkC-mK}mCxRx$07UV4*Hg%O^dY zR`}~BjY)n50n{hKew2qcW$ULq4})GnZ0In z-VXqiQUtZEF~R)~3rrIuz{Jde0dQaFN%z)dP;^`(6Qc>3(3l*Um@q8?lNKkZkxoVv z(KRNVs)5FYc~p@Jer4Vc+B7MH{s=TC0aJE1i6v>z|M$Se&AF~`;XC(J;XgI+Kwx6J z(zaLt!2>aPJ~vC!H??TV|M=VUn4d~)#K@8M_J<##VPufOVV@l}xSz!#$n0Q^OuxZ{ zZSd$Zq^a)C^P_S5b=Gied&TGcFOp%emsYIq}L07f6Y!fYNYk;+QnMo?~F#PZJ&{CE4OX2R|(eX z)UzAQ4_IS`=}WbO)ZYaFO;-XT`chkG`Kq2olfVZLLZmSaexQx_|weP1IcyDZWJ9XHG>Q)8_s z(zVxH!X%j1wmsU3z^;r;3?)|N!XLGj0MC7BfU4G;hN(6qHqG`dA*h1z{NvC+aTWiUu_v zVhZv%X>F5|!U_xpIK6x4+7*{x$)eycb_tP^CyhJNuD|{_mXB~8WH^!e5A!2dl_#Ef z(w=_yIU9Mz2z%mBlu_Z?WU8-0(2YSWl_~GuhaO~E3qs*xG$#3&l>bi8uwP>$XyXTf z2~QdH%5yOI(rSVi55|A-RMCX{pm^68y}6_n`Q%?Ce**ptfN()w&wWe+CgzH|Nv}}& z4Bx~1G`~*xSx+HpL(d~{0+_;Z@$isC--UL~8T6q?wD5}_mV5n}%^RgB*2Y`#@cN>Q z(NRsnBmo(_zB6LpnP?SEV43oIW6EnpI-Z5bv^SzLHA-W`CnZx}pfO>dIw1IAf98iD z3K!R+Rc}}`jcJ2Sc>oioX%3??L2LphCcwd_JOw?ps4;Q;X#mr0XiUw4Nv1qG6E#^9 zWXgjPD)`}B0Ta&D2ArRV1t!b@%{3+glbc5mJ4liaCH(}B+&mo7`rHm5nh5yE1Cz{Q zBL0Bfn}}7z%jQ2v_61+`BWLiuT6ir~Rdl7lg0DWYw5Ycp?Lj4}Yw&q|J?|^~YR;FK z5+_i-vx7Z|iKr3{rXwk3`jGiRf=n_%b=(JyaUNX4fD8F`8vX{y@xp?!29s;pf#j(*HmN`a&x zlo-IN5}@3Pk8H|0XWLmU^xRdj#|jBUmNqa5V9KWo?Zag9-bt>wEKF<)mf4G$vl#O( zaxz9SFbVTvX6N?Sd(1G40-*Zz?dOq!jnH6n1_2uQ!FHmhY@pa=x?v2rnAVBbp<4&i9$~;^%2rk*l{Y3@HKEecn$t2% zl6hr3+GJazE)^89hm=w8eKylhJNYDQpG3A3QaMFo%4;M|(A;(F?7_dk%3|VV61k?h zU?X#$qz}yvfkT2cEX;f|{R+4hA}+s@+ON4M|B}AT$0UEA)&`1CiDo(h>MuU`qOD%D z%5J#ncleR6x3Q$0DrVi8`@=n&xRFKp?d{jUz24sc;6wZMbw9J8{`4y3oG9CqU%F8C4BYlaC3Bcm39#KL|`Uv@u#C zoCo=sLV`9Q6{UlY+6!v#Hl(BHs@E-6tGqku_jzt(YhAJAVv%Etr)a1}j5piRIOp?N3Y zka(NDHv)zNCIM3DUvfg>{+cpQ7(8 zJ*dAs(-YAgk2r%2*d7KhNw}4L_R(~E@0Hgqw^K*&?@+^$b6vuZ7P|W8q`4oMO4(Ng zgF5=?qwLrd$69u$4i?|8Z9v1~05U(za)MU152o|nv(J-he4O9I~o(&r*`jM_u=bg#T-nd9N}mPYn2A{rR+BMtR@{(*PM=)o}F&# zJv&)Bq1!tMu2H(CA^gk6H{9zRUl1zC5$1)Zk7vKx3#_!W9yNi{r+hTiW z%(5}iSywCxdtubkm@)U!0n$8_6HXC+h4@=`^E&eMNp{W+zeixJv=3f?&F5(Le!bxBYY;d4X)Qo*+WAnV$$18cGXUO?+Z z{5(Thklcwvc}W0M*Q~Zy!?Uc0Kiv1?-w7Nk!iPm|Cef7!$fXXE7u8?ms9mJAMGB}Q zFhUD#1w@(yfmR`bO-_F0s*Cqxh6*JYJ?|r+r7h;tCn$OG@wAWZi+Qst_Ia~)>(R}2 zV_r<*o)u_}nHlYD)#|nO>)-sA^Xl#HyKhB%8fja%Zf9Pn+A@Ic54YYzIwj?MZ`sEb zLx4{b{yjZ`1U?5Q`c+I;@8ttH*L_SfOm@?1WZgSlKJ|xPumxB=GxK2hFX_i zT`hNLf9pIbm&Mtvu_3#}wr`hvMvQs{oxK#=+_bO2){yA4kyJUc+`A65Un%=a0hwCS z73MFgbgg)=B6wpr!6Fry`MS36Vb$x&2tNIDJDxW7;#m2>lu~Ffek?^PD$9YQR2mS{ zikP=mR5!l#_cxMM_gkBR88eAFRZ8lj_DLjGMKdb`WMc6@b?w*9Y5^uzsN=`e!tO+t ze87mA)AfL8Oi7uI?bXN97~3Lzd2wWlSh8+C;n+Et7?Z4#1@Ik+PKyFyD=|C%?Y$3d z-m>LHthEl9Izy}Ep`vGLEr|eAG64v+0EW!65?lpL!As8~#tWT517h3?DpEEVuT~2^ z<>wb#Cj^|UF;#x_$p@@wO0hfdB}~4c&}o5T1DtBGaCEj;5QhHt{8M)HQAgO#fB2)D z`6|)w7B5@s>zHr?Yo@gr{NTVu@B_{F`50Q^`266|5SXIKUFsUsfv0dUO^-vbrNacRnlDYj)R4E|lqaTl z=q5~-(yt5BtKyJ)Z~Dwf-*hgW;>YQ;ZlrVc(9O@&cYf9$fAHWu{Rnvk$8v~o-f8UD zcRC^1g6~1cD2fmD9^xs%Jduy&IA8?k0ZL%Zg4m15R*)(!h|W+BEG2R!kjl##iwMPC zLXAlY6vHI0>R{hZix{TJeR<=fd>Yc2g4lhtb@-T;z+4pk5RVkv7L7vLfE$=tipo>7 zosf}ylaX-b1g(@$1jeNm^>Lt09I9_jQiiSEyvM`Ebb*@~sn%Y9#m+)J4ne6i_ogqoR08LUFQ+h@QXR?z663_3eXN&EhXqK{(pcNJ&89utCt%WeaZRopM&+d=nVv;Np4Krb^2vb`$zJJS+$iAo z0?yIc>$x~z|Fntz^mDL}59(_O--%qG0ik+cssp8kl;wp)Z(-sohp~UN_&eKx_S$FA zAc6tLu!e;?LEkL2ugR4+xJNgy6r0pB$GVe=fX+~SFh0q>_8R`EO{B(YhXz#z7-^Ep zUnPJNHG4C?d9+7#p@yoH*%~-@Jv4Uql^5A~w3PY_*4*zJtBdWQkd1Y5Q?3G zR4t&Wr=3xNVG*XXVl<5L6Hm6Zt{q_H(Nv%94C5~Jko4T1y`5g70332Kfv5!({M%?n zNb&o~#Jw2b)h@I&!X~ULIo2{dwkK-^hDMDa9Z(^2Dfg;^rph>8ey)1V;`>OIRD;Hp z5}jzt_90byp7xJI7&NoL{rhC z+}7EdtW&D+wO*=7Tu4~Eq9Pk*PUTs?CaQ5Ez*IuuL=|&Vej+6)E-7VM0LLiVaBIwb zj4orze1l#U*rJ5I%3v?1uDRh(e$fi_C}C5<5UK#6-#2dF?Ea%82y*#!#tgfWIjYaf zzo_|CLVL2|6ACE0<&QU51*xO1x%OI{bk2FKTia(}f479fiw`h;wf+Y`82lubxt2w! zeR?Vd9?|3wc|77+SrTH{Cz@55J52=OlBhP&$7@FQCNLzYb0-W$(wGoN5jOM;LC}Nd zF^}qVn77D0)V$O*)L*w!MZWn@&HhM$S6?`Q@q5TGq95&*i!#9!e+ zR6`_*D_3{MaLCm-tg%scR3?&$8c70`A@Uw!Oqz^hICPhrh9u%55i(PQ<8%(rx+YN5 zq5iNueu&7NPh{^yE;NxCT*yQu*JUr zd!D6X@*pP10Z3umwOm-HGH{-2uyz2V)0&MCn7jnC7!duTE;<01#88lk8yJ1vTfVoo zTX)#vmFt*nNL$#k+X4`EET~k{$wVYEwN=m2$RtR`{HBb<<(NbiMyhP+Og~HqbO?+` z>zGz-%J*o)NvE8|I;L9!h7WOidi)3>$Y&*nT@Juq_uJoL)BYY{qKt#rq!KV;?x4Sv z#I-=_VgQy57RIVAs?wT@B&#P2oR;d>L${FFG|`m#U@s1Dou8GJMM$kAMzmE|*F^&` zU%snqZ#t9u+|y6D3FA+K78rlpAfObpiRC=Mt~x{m%wsDg7Z%_Ra9|Q`L}-yN_2373 zoMZU{GpoY~P{(^bv{+3h>x70uC#GypNWP6Q>~#dU~u(}kEB>{JAJMa9E3)YE=Wmf>6d!wuUaPPQa}sZ$VkCl zx{>kxt!uS0v^kzOC87aFQBHhRVjC|#v3%1OOhFk)^Rx*~Of`wB>B9mFWo80OgU5`p z(We|wG#j}Alkq#yo>uW_2ZT;n5_-OZd~%gVI|v*o!uL~Q8;JT_yndU-!`Rz&>TJnv z)6nQLxi?ZZ|Kc!fl>x?<;k)K{>Ghcbb zmdsjUIb_h~S8KWhdMqrnG-5H55vZaN zp2}NOz68y;0c}uOEmZEJF*V-H%qO-&z@tW0Y;?R$nK*?xl;}YlqLT`xMM|N1Epx1x z3c`udy97IB^w>vG%x=JH59YjL%$2H5wYwnSX0Z5GLX_xJVfyqe?gAu#z@Y}_UyPr! z3XqjR=!C#u2`V!G$sd`91}6S^_LNEXCDD@$zW&Ar4jk&?`>WTiwfEoofWG7zC=HN$rnLtp>e>MJZlB!&XnXd>)AaQ@WXf5r?+(?5C^fGm#ieA#sm-KS^w4Mrni<3 zV;%^Ye(WZ{R*Rd@J?+!n;`Q$V6BEv-qL>gMANPZfQg9)A!{n~qwnGaAH#F> z9yy0e>r*~#Oaz?7q_kMpPvf%QA7TXBegK#h*iZ-~+D>YdVFD)160@dHcVKD*A^mRw zlU6N!7?^)Tytn5KZKuC%}^^S9b(2qEeCj@ot2r6Au?w3EuQD_7c%g9MhRd-_@960~iXrLC-JJ*4)(uc3NbN zT^Q0KfKv=iapkt{cJ42)wy{)ht)*RBfU3Em5NXMPS{SN2N6L0m%X7%jqWv+9PXqm0 zMo@%md!?eyv?BQ7ZGs@4{PSO}C&4WB-wboWs^kaXu=+XvjADe9H2qbap6M(6dXWIE2*?BgxO3q#TpoQX$m3StU;^AWF>QG37SKFys{|(5YWNaO*`zA6Hc-zr%&-k z$r?W`X5Gs(w&I*508Rd5nIJ{$5{#5{GLA9OI1uGvt?}u-@y*aM`(nmPAN%F%T7@;H}V zc!^zl>7`asl#dw^-yLhTR3vc6`qg&#ZBuOzO9YM?JJxQw>mGZ5+GqCQBab;S$wH&x z2LV$CA@=F%024qT2R}?AOSx22WsuR*53Sd#fiRMj+gOioxxS_{w@)AI+_kgwRR<=3 zHeB@j|2JR?maasAsnJb&0UH8eTG(I+u4b__1eT_EF5;(35*8rI_z?jnh+Y6DEhq%A zP{j*C?*DLi9sqV-)wy2Nd((`1@6D3jd&4%^7;G>tfa!!{n+~Bozy+I@MqWY+Ef64> z7DBTDcX02vx>ZLSX;h|2)BDW(zJ2D(vXMhbMDmhjU)?*moU_l~Yp;Lxl>?{jcTj-% zmKNPXc-r)kUwFuI`BaABlg~+O@ikc1+ClZ>{Z9kaUANv6txe5QgS39sfe8uN!`!M( zn2#P_w~44cO6VU!W76c8*cu>I1Jf3mY&V{w|B#}8UtsdO&(@d(rYV@%)R;10dX~ln zQ<*S<2rDMeB-9L;GKT9WJJOMsl@HXIGzE4Xt}y`?nz|0xmsukp?*kPl!!O->-h}JL1fXF9dkdWLW?k5RSnBG)XuwMM~bxSCYTTdqSs*LcoY9 z2Q((M4$O9j5qAOrnuPx9FaJ6oBj08b;8cK6Q3zNpL<_E|tmaqRenF+Q$~qi*4Bu8kHB!|g;+yX%fS5F{odRP@HijhmAx^{kVRkClt(FfIVW;;7KW zJ}OEOTLEK6n&xeFx^gt%F{}-F2vco<>pkn&#S=Sr#H87C()@R}cQN+v=)=ddd4v^rLHpW`spu-vinJ#Fm-m+;ESpN^owBwJ!lBsOk_7jmr8X%V1U#5h1&!4S6=aU}zLmyKm>sE!pw};C0pZ(Mg#FB75n)0=K4K%y=I z+Mj*y^RWvaXer&@~ON8ObK1G0UqhRX*;Hodkd^^4s6nwJs}JIY+%YGL^O|C;&NqUoi zT&k6jU`QJe_vpd z7~Cjh%(p%!O?e5JH09}IVq8jt&(xSS<jdgB1tMF{4R;fYaOmJ`M0LLVgNbk zV^Z*tkdi5n4P!s1R}gV^(Rt^y-WsyF_y3?N&%804{Ut^Mvls)AG!1HIEy7;D`>8c? z+s~;ey=rONY>P3|;ed#TA9ygTnD;t<(~a-(i7lJs;l^!o_Qe;)MVGxSHm-h(cH&=- zIn~ttLu<*CHb9_Uo*Glgs5zJ~=g6FHHtLgxS0^=o4yNv1Uf zLd-?@nnnfjHCwg;qEu~wv39{_&%NXYamj069;MSs=EUUFO+;z~$)vVM*++jOO~;im zS$#6;fDUn5pp6kYpecwzm=S<`A11v~La%33P*MxumlQ4_3`4U!IOoGVcSI}KE5|p| z;~4j}Qf;N1^B`Qcb;Z)D6RBc;K~$HaY2hdP>R)~%R^RnVoK`m}>H(oj$~QUYUe3{t zxqb+3sUHcygXpqRs-G0XfE%bCHVG}%nu>y97vbU+L@;&%XqwOr8tO;;vfgAjiM2iW zwchrow*U+X7MOC20dIvxg{`dGfVhhY+uD6^p`J}A2VuYi6+P&ePD2B(VH8l`aVkHvm)&1#bKS#Nx+3}N~ z-4tI(3sw_7_uO;iV;}tpOAS9lgEpG4?*Ltm|NQf@eaFtIBGi8Ff(3EvS?AF9;!l-8 zlfX5E4>6}P)nm>ppE;07m?>ol|VZZN@OoK=GkwF{TuFC-v)y3KQ<6k}Ug_4OaeZ$XrBw+Go z?sta)6U@N($MQ*BLuF5^%U5%k$o(Swt%J$F?-$dM#=rOsP~uz9Wy(uI44gw1dFS3; zas0^{Fx`E}ZP5ZSRk6573z)Q(TQqT0U}Bs`wQVB!L15}6xZ=5iiM!;twd25~ zlKvk9CdQzTsTa@MQ|4o$5g!`MEATO0gvO+{Boo%>Vwmyo>to6R6Y3I!WfOAGj#;W1 z>1m4}e)}6SqrL)7qAcc;v|35X@U~4`!;&^DvDp{m4_S*z?*Yt9P1Gd25c~g1!0T&& z`;XDH_drZ3!~6o{8bo6mHFDGGIBW|zH3^uwZX$c`**VC4rI&l#gDLDSXiO{4ID;Sa zJ(#QQ2MUm&M>NAY*z{wk+OZ4j1iT0kM3auupQsn6ic$rD)RDy66%E`+c8GF-}A%*o%+EPOJk6CnubFa=iAPZ>DAXWD;2WsFF>a41;a}6ooY_QzYN7 zYM(db6Az1%f`&1Gc{UHNtAj9g6K3^W2tMxtvkjB^ZU`!GI@wQVi2jrpsXN9z#I z+lIfcZErh$t_asgOPq%PYlQuG#p^DLSySrLIQFh>jK93*FJdCONe%eZc$W(Uf(*lm zH%+JduG;ahfmx~e??eJ0Dw)4g<`GY8^wn2J%>t5+6@CD2bM%JMMg{)OTertC%Z`c5 zE`13AkqHCtDM2B%aNs&AuZBe(9)<@n&SK8189D_+0ZO!!BAT{8t^*hzU%Q$Nf~sVi zODP)c?Q+AwS$l!VKm;RzdzoL$Y`On)Wq6EMRcoq7;Ik5mn%3E8ogE)||A#1*RZb1V z$5I`=ZH?RGkKg*%xcqgO1DJ#CEBr&lz44>(emic)96f_1;X<_3CC99YAKml|0#D!( z0HP+v0)*{)OnD7f%z&N*rZRxZ+K>g95WVLie3m(2nl*DSKwBRR7XeJHTb{W^GYXntoPWz3z#s;rQtL5+|E4&O5V=+1~}|lU(u*2?M@g! zH1wq-x3kkZp8otl3`{IwTl@alv8M@O!pFoS-hJoo(XylI4+cz$LBgOnlVJNWMJJyN zm@GEy9qO>G#Z+Kl7Jwv&eQ)Ua~;1)raUA*4@(j~ConO-iN>>-v^SJPdfMh=2jAEZ;$8W>_3Q~=oBg{80c~L z{SU`kXPh1ln!O%fjW)SC9^bq<`eDW|zx-9Pxv?oWJi3l5)Ǟs-eyPGOkMEji}M z+k17GIbiaA0mUQt+z<2CA^-NP-X3#KMq3lvDkz?_E}$EeW){3!Mb8sPW(KJ0q?t6%*xzK{Q75x}!*#W4hUbjDBazb`I%$&2I7 zSN<`99B4RbJ_9hQ68tK|O4ppXr?UqMe42wz^Q;X^p`;g0YRjh0(L~S&4jATPC@NTi zGUl`tL1egdm?Y{cB-c{&Zsye3!{^_>_nz2@(|+PHOK9HLkxYYq_=6C@V3E<3_LJk}@P1|~eu{XW$b+KyYs-$72ir(xsruGnM z$g}d%z>10quE12QPftNa5!g2N#rS&m9Bhq`f96xMi_Ws;gvd+V1~?hsZ$*Chnn0sa zWN?nAJi(ufBw#X^QUSHZ=wFCrI_VUCb`U}C`s+Rw^A{{kX7nBKqHAcCv1Qw~xbE8P z;=&icFj~m7tR>>`j$iyV{^?uaN?MFfDfh8wdl}~tG;bbX4wwX`iR5+(Os=tIn)2Xv znUBdaQ+WZJuAocr%o%f{7GO#mQvxP#!F(`)No`5MNkW$N!FZ+hBOU*xz$8Q9zLJVE zwr4e~lx?`@8l51M`HIGeei$faj7?y2=Z$~P=JS-2Kz1ZjfFQHKErafnF##tg`<8^1 zJGGysx5tO}Z)!|1t^>4l-glrS7A~2WB!~y@xjS~kl&WBgWk^wWi0nf`(`w8YSWHWj zz%*dmeGse>IjL42-}g9DR9@<|%8faCeJ_>;+DT;6E%!VWBY=VVQPW6R%tIQsd#=7o zuc2w66mvu!z*NBb`^eK!`YHstk7UZz{P2x|pafFI6>-c8bBb+lmKswX%g9|@NVx$*74|6-i7YB7w2 zszNhJjOF{k#%Hu_8O19X&yTkCTVvhBt5c=w-7U1@W_~6T2Hk}jbp%tD4|-R#I$V%> zF!?Yg7;+xL$p5mTFb-%(I?*X^MFxXGq}73*j@0`5^;8S*hoQFYYp0|QDFNio*jl4< z{%nvOfU2SWIOp`9`qez>y<}j>@DQL3!4JduTJeXO&|60+csE*69cIQnD|^$P`>(J5 zNG#4bRJxe{N&92oilyO+#o=rL_$yk=D@ny}bX(_HD7PyEF19VfMsa+{>7U0rgS* zLA@;<(Y3b?+QwYYebo=O$IH(-mDV$>qGjFI$OCL~cE#7eM5_)M=0sYt*P<0!MyLnM zX$RPQsTV07R-q}#a#{2k0brj%(42bwvM89+kn+W}b zyZxAP@jbGC?Q*TwlqzgEzIl^3~ zNg@7-;p}f4WD=x+An7s0SPzAd8wi0(FJqz32Q6(d#FG@UGle_+CFz0b~(8GKz3?%(9g!S8MM4`LUEe z(bd-#RW+J(xw%B&K0%WlxxJVE`0x5Bners$Y*wi*{(AsZLXGTZa^uf#O3jFO5}J79 zti97Vela(>Z>Dfu&Y^iBeKnng?>kxk%bPwt4t<{e38erieh+4qzRt7EF`4p^qS_C% z64~-JFg=Krw`1d`|Czvqba&}R0@F1aFySPJbwYrN1F=E$GZj;45@32?224{X|NDW- z;D;2YB%?~RedK@KZ~|}+n3Nm^rgF>{DJqYRf$N&rYz#KL!4I?WgB&eja%`iRPzrE- zn**W+%1E=v?)()E@g9%!&NvC?L!=dcl>L;<(cCk4@dAD{7@P4U?QU$Q_4WNJ|ECvI zSaAtPU6_^3r@&G*IQvJ~#A!ZhHpv?al#(`_Hm~6Aw*hJ9LaCUWP13Z$UP+>-J;0W+ z`SoOW5Lq=yRqkT)ND3I!K};>n&pka>o_$7)0V+i>$FVM&>*?sgj2M&}!mN-|7J5kv zAEaH2;BI((Aq=LpVnS?qWKI0!n_rFq>i@hHYgDY%n;62D$ zt%11yi7l9(24Xtl!qsR%)*CEh?OgR@=9MhsY%ftwV#kIJn4^evBWbmVz1Bf=o7!0y zcB@`T;Oa!Sy7`-gD{0+gvA7DLt4ZIqXSU_P2?34*{F;pukC)ex%&O7_GxR*q5{fUo0 z_+U(8o!Ss4Ha6~vLIR0~`OTo`XH6Tb%cB8s!s?uasUrasiDHaZU|JF7SPkYa_@4(% zqDN-RgBTKnO6Zd4{2BkG5N?qo0nK3_@m>Z@H1BdlK%5z($gXT0uDda@0iFdTHxbWl zM!uN7I*u~gNjb;in%OhJ38F~fWvpqij+w{Pz(j5WYI#m$%796YiROI&(=;Z4$vh^3 zsaTB(U{dFNw#IZ2lVAo+J19v+4ZG*mn6OXdpGY=wqo0rj4MUZw%V>{u_^#TOo6h5) ziMlW*i?~)0L6&Ju888tG&6=VyF~*|_Oi6Qs`Dn^ZCQQs0W8E!;RDYHB5p^*GEnpA^ zTtawk7iNqFOBUm-AnI}Z=6Gt;w#4+m_uZedp=)Vvz7Hlzf+E_BeWFlGxi=(CHnOxy ziO@vD87T2LdqMD47*O(-$x5|DXrF4HGRJC^>$c`Sgqv63BOzA?Y#V}eTOf3R-yT4n zJLi>`#-it+2f33Fytg%0-*Z=#lKeYm*4(I{F*k~*&cy6RO*tgzJ|Ydvkcf-;ZaFpK z3aO^|`Ro2Ho_hSgc=1VdsGNUHEI;#9!>-6!2b zk>K|;!P($}68u$3zThgnwg3P?07*naRN>>!W~$-?J_3Rb1|M0oj__@Q5}*z9`KmG7 zPN0=WL0xt9K`VXCgZp_*t?3{Y#G6}_##8{ z;*$bg3#3OtY65LjYz}$qsVB!5C{biGw{0ypZP^x|xaQhaWOy=K)Q8@86*V*g>Akz- z*F+Mx@tY}FT^?D#AzIPGO9>`|yK>$kA|9(_3gDC)t+F0kTb`{kQSYAd z$Vk&mM#bxN4v4W0P-m?rmXIT7#K+t-wL zkfNSfp)ob?5SXZ8_d9`!HGw%9l_xOi1kKLoj6d^m6Y8Kjz=RZO9+RC_T?Ffi zsV(_116^r9`qGmGH zrMWhBilX_+he^2Zp*r@|_?s{OBbo&U!LGhoJZDA<4Cscb^un};sJ>7DO`5!kyB{!u zo|%_qgVz+q!$Ue0q|)3AP`nnb_EOD$9zhoK30BC%nJ?*DIbfLYnEx|O1eF@ogrxu` zlJxCA{za_2``0m9zY~#dHicWbWO*z)eq|KTm>dU(&?5j2YuZh&ogBCP?3Vb*hpvvZ zj$ITVd&f&-_OVnoff-`hO;yBGg>3euLQCqh%YMv2iD$+z1}H%&kynoyj|e!KDYHp? z7|gqv-dbDsC7@lfU~%lE-NaYE`@?8wi`Gn?L{!&of-n$vptByX>BGpyqC!m8oPG#k z(u~)K&q*dQNPxr=dU;$(i;)^YaO#v9Xjjbfh&ea_4|DvJAKV&GJhV2J)lY&Oa1ZIh z&-q+Dg{P-lmaXjH9yEjE>3~HYEz+U4gH(mMZ}r;f;5(na`Wl3zVyb?SfXcmV<}SlO zlz@WyK%t{bL@(Dz+XKW$dNDcfC%A(-uPQ1>^Cj|+sJm9uHNJS`x8ujR-$s4KiD*Y< zDGaJ*z=jv(j3avPB<27%~Rw0DzVK(d^M>xwJq1+-Ir$ za6$s3DtvOExca)-Z9W>pOn+ZTypmS)%V$lCn|}1e*tlkOOq)3)+VOopx}`D7h_D=z z7SSxHQ*LPrf|sUPiyI#anDQ}|4x^DSg|5mm3(s4yAeOIK1~Ac>veJFTrnh(}9tpIC4l)ou?OiI!YHuaM^!XU@L$|ubJp~K&3K2S>nl=KGVk(x*%>GPsw>RblI%n$&w`R?1I z8VR@zpt|mcFTm8=V@}P4n1Z&{P4cDrD=8leP%0W$%7y6^0RC`C0o(i44|+&aX5KPd zL^yy+IdA$UAD~9!oBsH%an9N2GMNNucu8U5if(y=$VLHfm!F22r`;NC>8G9Rd#n-?1*;ug*r@B%g zQhhJZ@iLNZQ}qYOn*evDfZo z4XY6%s)@8@E5z2VJ7Ygdysd!yLB{YIdL6Bxf_nh~-hGe?A7~l-+gf8!3xX-WC#xv5 zus4dJ(^|~dU;N9@#r~$=U} z1P#FnGa~}7!kQ*MiNQ)17L65rA(S+?RhVY$FrW4Hq4`p|qJnwcx_ukIrW4~UU;0w& zc&)GOmsCH$;nSaEf8P*a`RdOy;(M+%xyEd2&YrCX>F00H!62mO(0&1YazOV^%IldnAIAa#O4%;0CPD|r|4L0Bzb`PgAO*Hkb87Lj`E1m&cm!bD&caqgSpU;BrW`O?lE~l( zTPwQ?nJMpSU}|Ucy&H{bM^n@91x(NIG5ro;axG+h0#hH3*a?)wu|2m0^jpBhdvkEu z9zBf-8biBCkyuEpdm1;zEkFELTyX3hI746T#x%C_w6h79UW9q8ff|K6>e+c(obUo4C|}$mn7Q$)u*nB3okwg zMX-r|gpR=mDWcG6FE!Q5ErWyK41+8C4`70Unb4chToF+bdtr(Ps3$i{bG={=3^%v@ zRe1qI1eL_iXX?Qe_%FBJ9pCuDFX*3CNaWo-sykFA$={#`8`v2WXFsJPG;OUa1LR$G zwUB?+MhXF1Ss9XlK0dNpCH-;!aZBR`XPiPEJxbj2St*jfg?2m0d8%ocmcaCD-@Y;W zckGW^%B5J#G654BrjdHX+|T3t_r%hZjw4*Xnm~tLv9ob^)KHJFj3~3MlpFfQ``=Fx zKv~>#*B#NacTdVy+uKSI0pRrd*Ssb!CXa3pzNiu++)5F2zxcUNMeFW8m|jbX45UCW z=GH+3iM=E_Z-#GJ@~E5jRj#q!y}&dGeQD0KH&YqrJS#>BOy(a+7tA>A*L(1LVQ6E2 zn8;g>05uT-sGt4nF0Dty=_D9pF98}~`G>!cV`+2lHA)alE`HIaaoI~>5le_V{M4sD z93j zVK(_3jrnHB(rGiIg@UUBNH69UCGpV_v<3vEV$8}TXiTV1351K7Hn-n#>0I+cHEYm{)KV!la>d`XSLEDu&S- z9{bJxv5t`TeFjYc0&9`{|N2W`q(wt9mBK%igp%+7%Z-@Jc2XU|qQE4IT7S}dMEFWr z0OpJ9GG}Afn&aArqAT@#7)6?VF6`kVm@{P+MmiBkV6|cQ4 z7SEj(14OI#8jwPOg?WT7F>oEph9={xbGHwIe3Oq$?O-A@(=(mxkD@yOHk8 z3Fg^Bp4CC-W&%Jwkr3zu(D7#SzAhuwdfSf1c#ySLqp2e_U!Rz3bor}a9cP_=PVC|u z+%)qwOiPWQvG#lALM#uyGS*HX;O_3uc!K%JV?Xq>AC1^F3eU1c0=U3GdDm5b(a_(9*CR>(BXLS^&t;Q#`annT0*BH-$ z(04PXpU=UOn%TVU6UPY&QC;F#k!!GRO3SCm?~> z*^vXLWbgMoj7bxuyi%hNcKmb@=vb9EkQ!wyc; z8$KP)Kib31moXx7h_OiE<}sDv=r#XgxTrF=G#^YwT>IF}m%Q-2`1EzxB_<=7I3EHN zmri3UK(gFVQsfoyes}EJy&GVfK`9ec*vamkxrLtp`bm=1p6 zL~tZ{=XXlBDa@a9CZb^u)MWTTPKFlL7fm~wW6hfN1Z!+b;l$T}@?-eUHpG|z@gL** zt3MLUmM)~W9KcoD6D1T=F69BS@O$pTM-GSZT{8FHwZ&#@uC22rv%?b zAiUjk$8AXLMX`dA^QX3LqmuQLG&Y=1Lg<-*Gi^Yq&-dW!bqPGImT!QCHV_%1l{cI} z*Rm5I-nK2 zW#4GV)cL2M`wU%`Phlb|So5uM-SwZ3t3UB!{Bc!r9oCU1A`xo|m|3x4RuXQBa*qR; zMAtR#BWjS`BqAeYpaTsKK?e?+fQh>*RG?zbv#4hRZ%Bh!wsZxi+Hx8{E{PQ@k0D5C zfQU>6=|Z^oq*M1 zYJg$HAd>=O90w+WFkRnqa7fIIBL_se*~$HVBw*?#>~+p^G$y2j)ek%nJJ}py7DSi< zQvr;-2Z?Wxb0=WpzGS|Jb|zqwDY0o%6c(#-bb!f*Z^T#eC`EI~fXU`*nY42xV6v>q zWvKQyBw%7Az+BY43|+Pwx@_Fa$t~1byYfBniCxXj0Mm3VNti=8Z$8&Dh{R2{x0M`v zes61a%~6u1w*L&6*r2SBrm7Ue&pkV@ZoCu*%ef?8f8UAps+MQ5)G{3Ou8ro51Wf#! z&u99WCS$vQ*^5Z9ectn!KY)vKrgq+tfytxp1zA~m__W9m7 zZ>06f-Zb|efKsX)09j?gl+m}FS<)`VB<+|4rlJH)FeGTBh+aHl|PSAE}`b2ECV#yGyabGx)Vc_x9|g3WzdUG{ zQ>Hm^TzNc-F)i+EZjZ12-8Z8WkeypqPa8EG*%ebM9ZiHq@xhXSy9joGW*6t$gjJ-1 zYfd0Kbor{4apEbbP(8dFASTQnACW#V^M1Mknbh};_xq?+Ax#w{kX7Kj5pXSarJu$= zSl7`lRt=>VYrsK-+fvMgMyyKr0+ZEVN&y^$AUxa~3Um7GdJ$>`CTZQk5WC;Xy9@}b z#Qau8y+47Y2Tk-8BJ@7`(T`!FK{!RYxZ=w9#cN)DSu9&JEA^!6#jN+v118DD%_t4!jqG}8Ea%Wm&fdhjBnO!V8;Xtl zG$6~zXePKhXIotV-!G1VYQ4` zAcY?t0;a3p`(7e^nqwNkG@O7537PA;65fv`{T)M2V-lFGHI@UWB>geBf)gLX>@r}| zRO#9YSgGGt0;Wu3GLOkMHds6dOy<$JZRbpx7B9W{;4ZXX30hZOFejtAH`sg+OPL zFXlish!Vkx^-A^FVC*9>-rwAP7g2H32<%uyi;5OB9LAV@xzZ(R80<++bUqZK^!=5i zv6N!&3Cv(asfa0Bm3xIfv3cG4c;KD~VDJS*5zdSa%wIF6xEH_p#j*VP7Z5kzi?+5U zZoT6!5~ELv`G8Xi^JY7EYv?JsDU=9Ed-stD%=)idbxc+T1SXGD1;9lsq5fV!wF&bz z8!>m9FJ-XOMj8vVE!%OS2gtRloH#AIwl>E{Kl%AMe&$?~Uujjrn(x38kUAH$Am&?X zTD6=|YSYd=>PMQlbP0@`duL8N$#LUq?APovR#izRN9LsujZNPV7f$P6q_u!~=taOY zU}f5@S@DIx``h^DcfS)2B!_qNefv831NVme#OtT359oK6XtsPGl&rHBXJA0+=qX*bMq;QW7#}DMV6kp%jlAQx2FcjG0V%Nn>)- z1g8Dzy;{s2CL=x@n4C{RKL`Aowq$+@umV%{`x!0UA)q4xnN~9*$^CnY@=aZpNz}B8Mx?gLAs^1uQO`Gy=^>Z9CRc z7w>Cv>GMvE1_0=ZwHspkf@Lx1_>+k+BPo^N6eF=WVUiidSv(*6`i=Ajk{)!D@7S?3 zF%vbWl;a{OC1A?vm4E4zBm-hzVM53_~o-tqqT#hEa5 z%}L2B;4P%X`N|G87o(?Aeiw{T+8$A3h2BP~P+(IxO|w=N*W5P*6EDKYux-N{=3q2t z&Ln>*!%o7TDf)}nz{HVoiC=^ zW@UUezaGHH000|a+G0XrOuiv#3$$gI1MobGSxZgYzQ^pPd13ll2UeOF{ebEK0U`Ab z(_{6oAB^vO@8>a-b|j-{2zG`RwAD~j8*u59bF}f}8me=@^(}8k@GXhmnDx4-^=Hzs z8k}I{HHyu%;}>lRBkgVNF+r`AdoSjDCSz+#G-$y*t12|>9_Iea_aorpwEFk#?^7*`Deyf{7~l@>v?|%<{rVhmPi3Cw7fuf;5K>&zC=1h{8 z3x=^F2wF!Qm~w=hk?)~@S;n+>hu-)Dy>Pw8sU&-~cf*&Nb|$^J7O9I|7=Qv6k{Fd6 zL=&H>aKodWFw_nlrbOF~w!?7DUtq%&@wuaC!?2yxgy}XOB`9JR3Q+umBV042j26C1xju`M=K`s?MA!`+NL-KTFr<{R1T5^&`>ES~{Od zq3(DDUxQLe3DQLhL5Ig=ro6*6Ca#`Lc@VcYXnj=zlTr^uIVu{Hj9x9Q9A=?Oz?=A8 zz@!9x2_0-def_5#MB2>)Ycer%24}>8f_<1wu6WnGqG|UoqVhE5jRO?c97Y)6A5YX-szFP0oI_0=KCDc_J%6 zG_bK>`LfI6#V@=Fb1qUnpQ*9tV@53{2*C5@z{hKP9;2VK*Zj>gr;#Lj;tND6tiJEo z*tY(Wc-~3NSg|~OBTY0^J0&U?EQuka(}sxDDkoX8dHp(~qH3dY``)uhHUJFi1KqTUBFFk%9y7$)r`leVsds@sV z{J2{u`aXJ3!H5?wSsDcsCNVbyl=13Cd*J>lfi!#OEG7Vd$hy^O-paV|14Q5T5dB#N zzza^LXlBl32{h27c98;lFmVz&<~^!)df%E`H7Uw&QV2c^?0;(H2FrlR)Ov(J;HaNK z4%$sW_<7uW-(#tsUIjvS# zLQG}-p)Cc0|0O=AtQMks2#DJL|EcSf8A0c<34W;= z7~@n*v-QOu!cecc>dM%;D+8vn{--sjL%`I6#)NdJ#w24*+CehqF$Zikj|5CxobT5nNk5OQ{8g(~(6$?=J<(c)_`~uD zJTWZ$NWfH-d~aUM&7R1K*L30p&>=d?wLBOnES*8$BptiM@#Kcam@;n(jEPp-_yu}t z9Z`-DHH^8@1_!_S&BGE-tlscMv{0I>s(dt_cifDafELD%#4mvmf&{)FQ)Dk&whtTR!QPRmnJ}5X zTAtoF8g1!<8HC;AtAfd!W?|sL06A41XihDdL=O;#U5QhF#j+(h*AZZuyp-n3yrzTu z6hwv*Hk_AX=v9A>`&Ug_fRK(977NIw(zjL4{f)r`zIWr#<0)z!POYd*v;xMabB%aD z=Hejqy5t2fh!?-;g|P=qKoLO_(%=w567r2q%OX&LI)zXp^hXm#YR*}>H3?;`RVe`h z`kG8owbYOPrb58=<2QUN?tb9DsHfys4On85uJTw}6rJM;I3H@-QJJ@%y7j*0H}TYeFXh+?e7+<>~H`gF;|J=g+xh>p%EDfk#G2ttLK3Pr` zc`{<7rivl{(!vQuJk?_&sbix7fTMKh#Eh2(S)>zWz~swuI`^eV0-@{$9|>0J&FnSA z*}=vf!PY%>(VVof_uWp8L-TIHfQ>KjVE^X9xVw>t43VuwMNUz9Fm3CJY07i+{9C5H z+}=|fa&ze<-9fFeTYmE(+5wxCO+1Cf&T3TmgGLWQlxljK1+*C_CpgnS1feHi5oK4Z zuqTh=B+ny+bju!k2W`e_%=tCtEFq%C24wv>%3UlqFJ2@&cohy{?c4V~aDU2`m`oV1 z%-fDC<<<&){Fg$b8z^4Pg{OYGjV zJ6dSHZus;8+NJOBZpD;2E#CUZ3n`OC@l}|ieo8hk%wM61J&Dhy3!mSn#(kKtI$-SP z>d>~kZbCHf-br0In*Y&$gNaLROy*LSXlld6n1??`gx$lO>c_Oz2*k}vHRCz2%mAO8 zj=`n4gY0j$B>hw}cK3pnyZxCwW|TcMh!j;`Q60@_x9{`vzkE6k*ydmx}(KF}c%bZ-gW*po@;z(i(S0;ZJ5l$Lp1V>-e;OWZQU!vV^#Wdwp_w(TI{(v)&p zI6FqV55~Q>-A*&JJrELO0Tjg9JbW-+5PBb*s0zEYV|#{=2_pHof*%}$$Kk-#NdCty z*z5B!9~2;APNK!R+2#Th^q9a-!22y=f{;frchpn@5;%_Y%j;q@;o^^PZbmwkk5KXi zLYJ8-?Fl5B7@b}uvyJ1Wk|3Mbq&Z7Y&IE#8;N@npxW4AO0YJI^IR^KA+4TM zvxE|$x0UAA4L3f=&b1~B9|@R{BK@9GSZblp{Ncc)w&WZaFiC0(UQ5s_Bbvx@AVbpc zho3P3H%Ia_IT#*l51EDzNqQcDxfD}TIZ}5ifh=!$`FU~L@e5#*tTNLD=eKAJZHO_sHeT5Eb`w`3DLXG}%2wL0 zVlLd#RwA%x&j1BKtqxIYm^}i>t%OytYu*!A{>l5|)o*!2N{7hjym}Knk7JfOxlZ~r z#sFnQsS7m^b51pflph15GeOlTt$HeF_rZ*wc;eBNXjx52cbVkFOhn zWdg!g71~n;G*(Qt0lpylpe<7xhy-6>`#T#VwxfB~n+u1)Vv7!|Nw|(i*4Y+g;}e^x zlJM=6w^TtD1GNxWtN?m=WdFXHb;eoo<~P5dXhuwmFj|9~`l+;F>ku_VgD3LzzaR+l z51<^eZ}(mVD?k9c&ovX&4`sis5$g(xk#ag&dyJ_HPURharHOcCj zNSVhL0+yH*g;7Op9!4e7Z8$8Q~Z9QpmOi@?Of zvH=9`R)GnIBrpjw&a|5?0Ugqx=S~|-P#R}kN`xMQgqoe-)$ipR)!@TPTBv7d97$v1 z6a7mjcjmDMWt?BLr6Idc`yf)gUV}pZkb$ci+#{a~Lw9-S!Q(8mNzC!t^A<*B&lu%; znxS3h57SQ@e&BZU3a_~GG6E@Bhf1OXnR}!J*49U+0S{o>vd&%=)vGBZ!CqvH2yG*r zrx1oOlUFFR?w`zfl=r&<80X~ib(gf#)Ql6+6Eej1+?Z+136R3t! zMHPhVx_TPcH6(EAAZ%NZDu)pd$#kKsJR%XnMk5y33-ARByso+TvG_hdr(#?JrIcV& z`YqskHxper@wgM>BOm?%n!sG>1e!x*7{UzNjz&93zLE_AO9(|5m~1Mjd%!e`ZQHhE zGK2@glU$=(l7|H*uRUqzjClViJ{G@z@IFk#yw4gKjM9h3ZN+h;P}P8v;02+oIZ3D9 zi$I@VbDkcwLbdWDL7(pn=7=q1*Eysy9R^HNux&{e9|lY-k6nQP zVvq>bBt`);oJZ5x74%~ILSkNwSLW;#)={Oa!h&E<#Vx{ z6D^0-F*MwXgKkwcxtSJx4Dgl!|DNe|_lFIxX_oh^u=8k`yCz+;<(&r%^%@C!zG&QrQ#sDF z_a^Dy$f=|;A)0l`WRaSyh(gLtd8ptJRPr$?Ez6(;0Pn$T@tj@Dzq7#|Ese?C6M;#M zNmJgY-2&5Ym~URpnmr>HEnGIsWG*s`#4-<5}3?s66lh2&)m9RNy`}rs2tTAPH!fAGUajhT1Cw}yE}KLHYL z`@T4^d2O70!YnqnRjT`9SIYp2$OAEX4&lPgL3i)|SUwxgV}DcJ{Jo#WJm#Sg?Ys@f zw-0;%K_ul723n{T%k*gbFXwl#V)(xn0Nw*UG%{+&D`sFavJr~1=A%{jQUvvhUClB7 z*j2IOw9}%FB*!Jx9vc9JdI%`!CU|MISfq8d$o5{(RP3o&obO!$KZQ;AF?5Ca8^{e_tl z^WYFwD%N9aoH}({oXOmkn3o1U4*~opF6T2}1~8aUG{oO_9WZjUuy%6X@!g-qukN@X ze-=4QfZS%*U>(|4(VV$)^(Q|TC!b8oqEVQ)796hKiFvVuKn=|eXYbL=kdN@HT~%Dg=%KXmt?UpD=xDeDIo2#N7`)5Y+@i3^88{Et+5D8`4ZAdvJ)q z%K;@#hVlfLMq$oZ?z}!N1zg$d`J2qN=Dcev7@&eV^(rAk6XPf1%ewN)tK!CQ-5B$x z&5W14@WS|qzx^AMx(A{fQ%gC*k_oo9M_Jdjm-{FnS8*bH%yr@3o(q@=FiINJ!lkKJ z-hzczeprz-rs7iQNj@uGLFb3GCNA?_N3v`@*M~>(eMjXZBY0+U-I_O$4nUcp!bwO+ z(~**BW{0XeLh|guFt$xJrm;L6dk}dBOo9kErJBrX1S4-Yos;*+?>PaMWkRf=TZ%?j zK!*OAk2Q(mSq0ar<4&Jt`hHCtdxnhCe^hXy8_yroc5q?rtu+T$@a zCh#?0aKU-;p$~o#ej{2@Af|B6WtC@mBlS~PbM$AqEB)*c?M2)Es z3AGH`c^RS9uYScV00=gx=Lg7&aBiE5k=7w0S+1z4z19 za3PgKCIe!a{M=T2fobR9k7Pb`o_z}tEOP(=t4`y3?t(NjPepNp)qlD2F{2;g0K6}b_}pz78~XYgYPA1z{oW7 z-E1$;@DojVPdD9mFJ|Lf%K2fEht_7#oJ#FMl1$0dNv=HFdhILb2S9=T7HSxn6e-aPTSF0H2ysPy9 zE!OeX)lVcz^@9}7Jr!S77v;?$DvJXEq;0$Q5SdHQC4(urFUS_bi{8A@b1#6{xP2#p zVTV`t6WXw$kSuqc- z?Q@^GAy&>KVi|r=$U5~i{(M@xtlzyqML{-9z>JCz;J&g?(~&eLJNDXMU5&{I#Q6xf zD+omD#|K+%eMF`@(HI*%3A^5e{M_dw9BNF`?H~ML3{t=R#|!4IUGsXn3|qsF2)R+J zF`(IKYN5>}dxLEGILH?XbdYLch-yfJQKlWGQ=}V9H^-uT+X9$nPjpNcvAGNKbOs7c zloA=NMr*>HN8m$@j001emJF1h3z(9GGENLRX(f9P$p($X=4$q_vDI`By-n`B?RG+4 z_a{ev(wMwo7*j>X7<0ucZfbP>xSY^J5$J_})Q;*lsF6)Z?6XGbZOV-?a>oz+~`)_mP?M)Wz`M zaDReL@>wx0+40-d=cdyqlQbqxd3?p-hfHJAhm`wmX38r7QOg0Q%U}M=c>N#!5uH0J z{K~yqiJ}ZTC_$3UwuRTq2vAwm+I_O+qu`aezqUr$5$TB-aE~fT(Oj6bO z0_zBT800fG9PRa*6=B(_E}r*bw4IS#ZE%JS6pEd^cxk--10SH;1OgD>pGeIzx5!O5 z|B`@(*|B{2a+olJngGD9xlWKGny{6%^=+WtGUq+`yEbiEhvZJ!@WQzKD;m)R`lIR8lEHm`A5Hu2cO(XC)c6qU@iMG z55Dhx?}`^*bROm|gcSR`6V>QEb??{}U6^vq8OJDo7Q5G05}aZXjAqGQO}pbDT9<`s z%K@h%1r(TkHRi|3XTJ6ZbYu;6X7RXv-@J=inJ8O77vlc-tSpHMVZt8W%tBf_RXE0QcYX>o|^7 zff3SOs?e-^(A*2F@ICKsOZ&G9e{}u`CQxX_nvF*Srd)OL#cE94r`5$*tfGjkJ|=+4 zXxEHaXY|Ifldvo89qIUQ^D(Vm^F}N~Of-QABZK8^kStU@zX(as>fnWz8Ir|;Vi2N$ zlyZthQVz{Z+*yaKc?g$Wphn+GW6jQ~uT@?zNfT}V?SCe{+kmYs!v**r)I z;%4?%_DS}KNGc&@#2`WV+C!#mxTf{ZidnFd$#v1)+!XiRemjwRILl>pFb$(>##rcX zm_`hC9VwTlj!@^Ui+L-Cqizk&_(x>vl4W^R8(VOg~H;SEQ4S-;V8Mlm>$^cxiam_=~Lvro$D`&?3y*pzPCZP?xdt&0elcHhS zaeN<|8E%cvryh^r{OrfEsDYkFZC%j?U)j<`)oDa{6EO$T4AM=ZRo7s*pFrS4A4#KQ zjIB)2;F8+@(``c|c04U+K6enAww*kfOI~|Z$pgV9#tk;+P1qFv*PO2s{!+VOe>3`s-Zpx0q*pgYQ zD2b`~txjA*pC{LfK31?z_9{TYL&lc0R(N`$K>`F&4T zX``2bkVn^S!k<}%)`ZC)sl68eW@#;^Q|5E$u9j$QY9U{W&bM7u>qc@Pq1@2y>9Z(! zIz%JS&N%7#5)lZ8FT&>B& zbxKLSYz(FSRPqYGU|7E0bgd7nA9)B+7^VG3T^&{1iMo^CSJRa7jMGnzCG!@Tz%k?Us~gZ;y}>i9F;hW`Yn5zMjz zlm5B+3l_%8m09ou)50N*xyRwDEfX_2Vi?za1l5~u6%WCjQ7I}yjg4IMZg1R~iCD_QVl!+}xTf$%h zx+6hRbTSWrQr@tE4}^&G8Vf2L5=vCMV8cL?429>UacNc%h_Z!Ob;&krHt>`O;b-hM zC2Xso50Wiz&J{Ek6%7<{Y~C4n-gowS5Hn7AUQC!V8&eJ@k^JuHq%mMKOr;%@ zM>+GjmB5HCP0cARSQ^b0^R%?nB!IOHF4#%AooWciGCgV(Scw>qw2iq4--rzeHxik4 zkQ?~WwO7;gX=*&aVO^YX@`;RvWKYb*JGV5)$tRwg?!&pzB$`T}%J~KmbWZK~ytoiDg88 z!&EsC*M0bU3KQQOAH4Jhu~-I9mF*fpXMpiF6Xda}xg~Z14&6*^5%)3bnopz=;o=z- z@thk|XHvPL944-?sTb1?1aMfldAQSha zhIUWKbUcJFX%`yhQ#)JY$xWEam`j$CYU=sX03Vr2a%cfss#=C-eMH>ofKi`z#>uga zY6qRDL<*D!D2<|74^U3YhJT<4`xH|a=c=q}Km!|#Z~x%O@w2<1h>@CEarx!1iPsW@ z0#(AZWCQ~+WTp!rP+Rj}>L(tgyb(cH09+4Xs*o{+P-3p+eu7xq@I_VPV=f11Dv2~4 zf*w9efz|cfw(wo{7lNz%dw{^BSHJRA30%JZwXejY`SVkvxd9QnJNQZ_46=IZZ`MVv z4}#!-`IQIsz58 zQT3o5FJ3qc&^FP1UpkK^trY6GQhH98TyS)SY|ixyJEb@|HV0~{?j#Ro3V z*mA;#uX6%8FF8Xt_NQNa7ulV7H%}jY(=Q#RBfEltn)apYr_8bFjia`LvMbdnc%t>CGF&zGIqk1G@N*YrI zz|`_AV46lr7lH+-c~(c{)iZ&KVR8+_kd59+z=XD^)>cpsFyYJ=m^1;s{SBGMWP)G? zYhud@69_G*BC{BU3A7`%U-jPiqz<+dr%XlCgE4Y`?@4g=`2b8&Uw|7v2Ta+sJ2@Lq zdhx7({OvPc%|l=cchNnlFm9|&!P>szcgTW78bGfF1m zv!LU9NhlX?_!?oi6xl43~cF#mNv8v#mtk=r_S1BXpMk~UF&1- z>PKQ|PgCq^+>K;8nJBup*s-?-dgMGDYQsrerX1v2N z1&e1svUNwCaORou7UsAU;cMO2O%y*p9l#x+vi25)kHJ`V{K-i>Dnc0RB?Mf5l_i0O z2xd?tK0qSqMk-R&Q#f_WqWQE~u_mHN!{^k={0~AO=KlPU^XnN$VY0P z87G~xD5lI%+K1+t=MiYCmrk_32Z<)b%S&|J;C(wuLuk2G)2k_A%tVl1M%^*cyhek?ieuxhh5Sl%6 z5@1TZcud;*rQKWZsDup(pt5Ufk`;uPEnW^Ern)$qQwB_|aj_;!g}_V$&HySKXvW$2 zIGvC&`po~&b@-8**3mkgOUIxoTj3^e?UOlRB3_>`OSJ%*Mn42JfWyq*O8F(4^&kKk zjKGv-%)?1AH;iddB?_vPEc{YzyH?mWs&BL(TmOJQB)^BbASKfB%xe>F9Y_KlN#700 z;&}*m&`I`MAj+*Uno$x)_{`5$p?zV6Wr-sXVAAwkJ)t~0U{VQ~P~A&dC__XOFu_pl zNC4_fhU*9&_R#_+bAK|8iH)en1TZ}TF!|m) zUjLf7@`@{>i^wng9}NOJsr(H@Wdfrc_aLUcE8cw-&D0vB9%f-h2`Tk(U^<9q;C&@Y zl+T_9C*=~q?!Ui%WWa*wE(xi!NvuO7c7W~%`bzqcHqOYX@RL~%`Bl;cd^ddE+rbNhyv zh)HKBolYAl40_M{os`<46b-=ReJK6PXraVtVEp+A0{v(g0+Yf)(x(AfX-_A85kf*E z3}Fpg*M~m-e`4`D=TL2XI{_7Y;`B35Lul)x#@Pm#%FLJua1>&Wh0q~NwSUZ?2tVc{ zHEyH#QR6lMX)b-brVyNh#0tkAA;evaLCQY@n4|*;G5wZT%qjZ^jcEiETRy_#r?36< zxb5aUqmcZ(vt~_;HypDhO3;|fnYZNFN22ba6-T?KYR*-_ei-5WouGXm`Eae>Yn6Jjt4oFpico5#gi~6|{=ISIln)5WY+;DZ+x##r5_RKvF^U z>^YackTo>F2|sEX^T7gXVjP0rY$?+ZSoU&H6(pG&g5KED8~^v8eoE`{>G3BY{t&7e ztH*EbgXJ0~QpTUuj{l|`Uy>Hg68O<5{AL&pEY}mPyNgmq2(3lzt1{Z9^l-gv1*RR_ zG3C`Ki;BTP3^}HfF#qXKT^pbO+^6HAhaQZ{_;RHsL&D{SOc`^;eq;_!WX^zz_t+Iu zl&|C*qiE1|SOV0b1ttTZEbXN))l`D1fRc&0y#RGZC3P(cUD=8zI!1pba0xo2wMZQ% zN_nA`;d_3a^4l#pQ&o%y@ z7w~mL^&$1nuOu5s@5Z@$s_q)Y!+?P zNO|0l5l)4KCF(yaCwj(6zGA{ztsXA$jAC-h8wQl{BYA6X_#`qVGoWYq&QJ^3z&fT=G7CNvH0|3<`^0c`}9 zz?AIkoOu8#N{z{hHX={6!!r-Z#RlMWeKo zSX@6AGpIFY;nU#)fau^@%sJtF7#hxZa-8n`=PyMC+3>R}i(=>Y-84rdpJ`1al?`yH z%S@pa$Eu^=MA`;|H%7P@L(eObWGu3(;Agp*)T;?iprEXphKO$&(i zh(#pH>zq7Dshgui7jg@jscF7n3USdAk4VHAf)PvG`xH~KKOx8!eq&f+TR?D&?Y~6!P!yD z`O2YZEfxLHe+SWMwjS?hLkPD<+OVBtHQJ>v+cwzzJ2FzBHBvv83D3@yXA?Bx&H7#bv*RQR<4^D z(=hWjVAdW6xP~yJ)ey~=%`G&GMa6bF9WdV=Fvm z;67m4CTL+4&{fE@H&-6o-2{MV1b>>QyvH}LPd+BAEEF++9qsM-&}!lffAMFr22=6X zSAR0*P)|_CZhLn5n4rOF*$V;=183i)Cstekw`(pisfm^YJXNe8@iY7gdPX2N$IU#c z+M2B5hc{n_8M%XUQJc5Zbr)0S5M!<-IArEzg+hyI18fR~PLwp|Ui2RhQvjr&Ws467 zrj?2J<*CL(3n@-OVB$AP>+v+XgVV_4$4|$*hkuy+63^q`JM5kR)Jx~ghoq`NkhkWE zH^GCDvH+u^0(z=cQ8tK$l-UmgP9sbL#sVe+i5)Ja)SfEn;$$n#gK*31fgLsx?JG!; zXR?v>FhdmJTf%e2V8`W#Y~Iq~)wps9nVtn69)bt5nrq7;5RKz_`aIL5{4AOA#y|6Q zDl?NzdGu7ll-DjW;bR);rq4YkU?e2ZJ%~h`OnDGqB~C>{I-LyF`;=hIf>;VibKapN zjXMp9d0-5j>u3R!0RdJ?NTxjMgXv>ZV=}+y-EVs%{dnFH?U*JcTsKoPA23D%(uQXT zFy{zNTiK`+$fdD?n-t}G=1h49laJ}hz{FRdlkQvut4JqsVw}mx#MhH4j~~j+lg0!U zWx$kaO!Wvh|6yPPXhsMlHUOfRd%NdXKa3>}V= zxapt29IIwdp#9`f?8SuB-aQ--6LPOf&TwaQW=wK5V5Ne;>v7f__+XWgN+eb*w`=B8 z=z@bK6W9TK8c%L*ic`)xH(vd&cbOPNi1$;p9Is5F;yvA+%ttTGWD-?8I^m-%Mgl{ri$1Z}wP%_OSt zG!&cHCmyp9VU)5roVN_1Y1+LHFW98muwf7VtHxpqKrjhVffFXCYvE$^f+Uzhn6Y%J zX}t+h-GyoW;!B?&Q_egA4VD_5EI~2a=P3KJ81+drAq$hnV|$BXw1;Z6)NajSAH=%=*A+MmE&;DIawBSH9!<1Nvz-{ud<%Kkf`<;X2xphMWb(1DW?X+oe z&F4QGzkc|^s3H_zi%J=$%Qiyi%b=0#ue~M~FPIyzd)?(JpRWSIwko_JkOLQzyd1yvTo~SE4lcLT(LwZ1J7Y>vFGPW)~_=?9kvH5b84VnX| z1WGwz;xpDZedZ1O9cPKtriVA;YZu=lrFrBsKr2YFLh42ax&(`IwR^k1;&c$Hc~CPUuZiqxXi(!IC*AQnv+j;WYH^6>fOQvrW->rhNvc3M=;m>F9hx;O5*`G;}o z8LQ|Wv@bf4^w(2wZGDsGOKRTXn^W>nJ`-+I1~37Kw-q&zo{*-gaKgMQZ95BAdKiTv z?ZlU|e%Jq_?mgi1JgfWfqrK%J*_OQbUfFw*1tA1NNd6GQDipdXq=gn}*_07R3uU!X zN=qp#4Fm##kmU@A;5f0JINp{e*^+Eawx+eE_xrtWJ&vKFpYnhC^zD1wkDhfu_qeWe zo%uWGw8gvM_x_l3+UW=^5=D3Kj2W}*sA2%h3t)@aJPE-xW}JxCB+$XSbdNv&C^5f% zfFjn#@>R?EJ}cfS!cDcSfMC6`HWy$9e&A5f0c(&yUIo`|>x#+Iu>S}##CH?GA12^k z8{Lh2q6W+6otIn~vnb|T*oV7vg3*uT66`=Q9cA3CMlQS2QW{5*%)|2Q9HiVO)sYYv!V5ltMCjU(X45j=et6cdgZ9PUeA1$nD$ z*fYAe-9#EEi)lGACVngUVJ+YXd0#;p3IgYSUctm~6NmLUJp7loiKpz%bASEV5W(+i zlb`FBPFAOaX~#3aN6Zd$nd-$QXrxu;b(rh;QS&q{89II-(nxdMwNr!zVhdp?p5HJT zkg9Q{R=yOXu2_OKmn9)c0v%PX@S8zZ#kjEBpPo>qRa_S4-UZh2L2sT z_m#NJsnAkVs4^Q|q~M1) zaNT)co~}H>8xt3vKFcPF-!lup(PA!KTXi6fc{ar%mDhP-fl6vc6v#~=Rb zpJalG#Uc#e%`wTv$&$sSU_#52ke(AxzRvbdKbrXCm~)ruoqqN-zP#_n%22?WV9RKF zMai9zzz?j*Lgp?*>!J|A%AUjhF@5oARQOmBcYXbA}C7p+|s?T0Ayg7xv_j>g#6O3VTQ zqvBzLq{~Nl*-YG>rO<+P(Ckuc1Hbeg<6sY<0tDGO8um`CfFWv`z5i36##*YRl68CR z+`lKzKJVy0LLvHS*nb>YF`7A=W3Kd^|3@K z6d-3^SfXZ3W`?>g155}x2R>O|xaXjS;S>3B=Z_wUPkic2G_b3OhDa+o+#KZq8s2vP z*|D6Q7@&nIK4rpZA#oIACG7~N0R+%6^ehBk==G>tG_&UT97hT3RuSX6y!j!QPLjLXwJ9Xmny50u1%k8Y#P(21Bs zjB2vVAh+Y#vO<%^WRYH(u!60aIT>Oe1{ss>C=_+e=f^d#2SN_A);uvP4bg*&st(e# z?m4d2Ttq!B*7WPcwN*BSd_}?u``eDEvOO!;tjFNRiHYlw#T-4YEfoFjOu|CQx`@1C z?z#f~lB6NiinqTV$S=yz#sW z;OEkRW6MF1f)z1m!J^pHNTNNqXd&}HK=SY;=5pz5=wKr5KV|@pad91T%Cxz*k}D4- zWcf1sZUHx6IG-ZNYu957fEySyB*U_GO-%3te<&#=W{{p3=XBikC-<#;a4+1H^Pld} z57NikAAdBl<#^03fW+l5Rr7(*uNn>AoB9KQ23x8H(NPT53eep7Z!gTTdl0|+C}pUh5P%gcT5Vp2$@^7EXl9MsC>SyQ6}MsXj4 zNiEx+F0xT`v2^8`XqLK7($d}5)-p(sGmKW}xpSyI=}9+XHf>H4*9!q?^*x`(G|sV# zFxjTz8pyN65D((HEEYsJ58>iz=$SwZFe;cVi?0y!^Kr^H?QiSEh13Fn%8x()Gzr7nHX;lf2sxL{Hpo6e>Y9n>;RhyCUgI?c(u$3z@tgxuY+OZZpHOgQC&%H(!a;tvYwa;iYt#}7h%aag36mV ze>HSi694$6FUHx6Xt6!N4)}H(Icjg-{7XKXi$uYr6{@csnORxMB?psy7SU@Mo@cGZhSHJqz6xW+oKMMc|we+aWAU}~0 zS*0cYE>VaU9)d|`#r&2ca7VBR1wND$Pt|g=vbagW$8b4z;r&<)NaoeAy(LPjCdd0f zavO0qQ+VLD#6&Atx69|uq}B{%)LMI`s4v=-KT^2F>CXb*XacOIVqT2FZ@0i^2m1e0Gcw+sWfkyjC#z6H{l#Sp(F{dn1-TRtWIKk--uqdL08-hc;QuA~Fd!vmmlM zY7iR?Pl-E<1vP??IMrXWCYL3PL>nO>@^CYtCvi$9B?%=h6G$3lp2BV%#L)my*fBJj zLM*TXG@k*@Y!-&)Xt?jRrDurP=LC<0lVzkWL(P1+GGS}oDFiDj3Zs5@O|%`@3o7qE z{ILMj1q`4a4IwB*ri_f6mu0e`9cA%9!=RG81%@c26e_OOBt1EXuo8F*+Ahap>GA=V z0WoTK0S#DNaI?ZL?_+-dk-dt zw`B4B`1!qex}E(s?YF|jcAvFdj}jMM~V-E1iBX7dUL#Yj=iYGL|9wG}{VoyY^+90x~08svN?zYef2 zdzkipAPoD6q5RRGya_0|m1m#`ya}m_J>xjS^$fymtPXCa^I??RQTPT>y-YwASH^*s zBP9PmOR1tuW7e$M(bUu&yLaz~hVi7Z&X+D)io0?$R#XpG5bj&HV}*gp!8N~)%^L}--g0*ethy1A19W!gO13JF&*n$9-+m=P&&y;Fv%Cv z?LluUJjr5W40PWKcqjt^F}aEum2p%&9wr`FBFtBa~qjg!Ofh4^A>G+T9nanz*yvQ{Ojk9{9 zWbV7L@INSg#uZ2fptU9*c|0cp<#!BPx>p2Ka`Rb5T*1Vn7NT5LG8%-7lnTK*T0wdUufGY+P{OaF-GfiJ$ z9^C)b8FkUz3>3X~Dmgcm1f)r= zE?X4Kt3bq#5Nm)FDek24Ar|=w*8;)vaiq`DcEJ&f12%*cM8R6cN@6@r%sqin>emg$ z4vC2{W`ihU{a7en&Q$QLBIR+B3BbXE4 zr3cr?#V3Y6#&aLBxN$=Zw1z01T$5UC-1Fcgw4VShfU9lhtXWtzm9hPq9ncnqk!Q|^ z22f65+#-){8mL9N|Ineh|FMm6KZ5B5ic1k-7n9{X@ivqqp#J*Z?+T06e)?0N0tB%b z7wZoZlIAjHIajHQIajFb&%svFNerJi-n&+fs7Kv+mE>s&G#SDx(9N0}M$q2;n%Bmk z635bN?SMA#fB4b((pSC)r$ymFAsK;YPYNb}mnXVt>tBn*C*;m+bh zFqIe4<7NQMX$S(KIz;{inoq%`&~v!cdi6%r^()0K?*VzjMC=db4W^(Y$4?a*n5c3R z32SlXwe8(WXUKa}0%185YikE9JM>VOY!F0?jrd9lnpU$YCc|hdDnK#eM;EfJ1?8~^ z&^UW(J>7-&+KsEAAMLXMtB*VR-z%6H8;K)XIS`_x^`9)JUl&Z-05j$cwErjGOEOl( z(9~D3b>q&`bgw4HB>2Ide`kV;1q)$7D9k?=Q#<9Hrr}TjCBTNN6Rojy4i*E#OJ>-E z`)vOq3ajor5Oc_5T04sxk6d#H@r`GREj_Yz587dQVxU?~Q(;WtGLd_EQCCxgFu}z` z(7jyr7yk+oOloQZc?O}eJ%CM?pLJ%OcJak=oW-(u)oK{sAcCne>T%!Ateb{KN8hZc zo{Y6C&xk1``W`!aFgiM0WB$T=gd*dG+iMJ9feTQDM#YRp!b7JWf(e$BB@(i)M4bc` z&oGB02itL-b;e@iXdnCdm*SC)TWH`no$F%_b{&q%l;3&D-1=BlPG>XLnn85u!Wir@ z+VB8VQ^0-a;nwWvAP*6R&PC69XTD@WSWdh)d6u(tCB8=Kgy!vA)q^6I8tJjdTr0hR zQQWhV9Harf4wl{VSfUv1OC(&~94L$9N_UDuV@TeYy}48h{7l|y`7&f$vLab&Nx&UP zIF2zFox0kggPUJ-V@#*GErBvzWYl*nhR&2tC@rE7O+_g|pvYVLp$BdiAp&%pSYHKk zrp(+%=F4Be zio@C#&B-+zR}~)r(T{v636k%A``hulTW(=qK1Q69{K85SgAwcseHAzdMg`vS&e~xx z=G?Em*{;NvOv~p!6jC}-P1x=R0YB*v-8b#ilLW;=>>Pcmg;nn1%sh z5v0bS1^ncG!_RmP zot|=>=f2~@8s3oKNbpQFi$ek%NibYYWl(voJD!FFYXUzo$6bOaUM`7TPwi}uEt{UAb@<9yKCLRX zGw3B5^Xb;!c~Q!PTJcaQXZXSimPk;6h^j#b;dGV{_UGKRq z9(`n2tgE8#qftxF&kF!rEGWwdmW`I4T!!8Wbj!R9Lcc|<4~?DvKGGn#Cu1h& z!Wi>Qxp0h2LY~1;k(J5gkOUKJtDox?oMV!!pRsEaXaYBee9yJ3Ix1Y#`s6P@4@v+D zbB4*Yzw|Z^bE@PsbDL5VN%@WE^5JX#2%>5Q%3ey6>oHt+M}WW2S~xde|9h{B{Az9v zX!Hh609Euv09B`^?rFq|Lln;I6i-bsn^? zlfXKe2e9<#f}HyTzzUO?`{eNl{3Js zo#d8OnD9Eeq1i_(%kShFNf1MK{*!MgpuKmM+RSyB4w{bZS<>c-Bp5&7b)rvbFW~~7 z-gGMpgMoi1x+q!>4n*?-OzR``l<9XdVJZU+*Oxso3?!C%OVejo#{SiKCcT5dK{!r@1UL2z{r5<9?`j$k^t2TO54!Gz|^ zB%E3+@A+d)8RL5n{NVVjHNp@$#t;G84{aefHy(?P+o*Sj#;Zc2eOm)OJO$#p zk7*sYTF>Kpz09<#iMRl5p^4=&m%!zas8Z zu6`^kg>Dl_zE1!f?P6wPRFQ*e%Mh(5=UH@Va-VULC5%@oii8e=99+DW*qqAGSHJSrq%7!$+(ftJ z_kG}lAoKue5!cmXs$krbV3KEGFlcg($G?fR{w zW3n-HbJUC%YwfzW6BRg~9*)i9rDG<_fIgDo(L=@DGfqhe@tK?7OXMVFTqJ^Nh()6|SWMk8lNPnI5c}L7 zMDJ}UO%dq}q2~=VSVxD*Y$xaFr&}riev}C{n3!p@*$p~q{?_;o%mc=!+fEmrgX93| z%e6SGLCP0ieo4IN&)*uY2lrD#{z3MkizpN5^-ZF1X5E}AT>CU)253sfFaykCj>I`Q zF$_n`PD@B;f=!o-5RBtG7^IqH`_aB=+C3VF5!}NtTU#TGdTF3|=Dm+Tfz?Q0mTRxA zt%Wja^?>GI0U@f9>pm%zDKi|Y`nO5|_ccO1MNP`+!*~h7^tSHUMqyAbl6U;^o1Pby zHzreIf(y{DatXG3_w5I{1xhTV74F*@Qw9muPc9)?#5E_7a?gqQoTK#LKiL7YvCjmY z$CBU%SPQXD_}*1yDs7oQ$e5KKVQ zk}V-sG3z{-UFcQ1GT+X;9@~LT!bM~k;!*lWAugi(aA#~LKK7y8z7+3%&wJvk>u-#| zeD{0f{zo1qe`-Fxc~A=I;9Ezx)=Lmf!A^pRAv5kkll zMa543)`x=9O*&HGc@F<5`1l~vFRLM;NFkX_ZRL7KSS!OkwlwdLzk1i7M&0T~xcdPs zU?rB-PKw^ctynn-CJy5o;9zO38Z6fvs^A?bMrONo=X98B9tO4dr7wOHx8B*YjAoCE z*R7{~$&~19I~vb!eU`jCdRw8f_T%22#@gLZeEJ<9_-jfPk!TC;FJ81LUUSPW@vU!t zJFdI#I_mVjJih&%@5HA*^BG#i*C1RGyqwF%mb$Pq@Pjv}74QZ)T0JIBfI zm`C}*^KDvZh5$Tgj+e+UJc~SAA(VIz=aDwGF6HzoekG50pTB+IOUCCftOfE)6Dzd0 z#sf4m+u1@=7fcn9g85C!bmNyJW0WX@(bvwXjML6q5-Zlsi}GqjS0!v5sJ2XA4capQ z>EtX{<07*G@yk~3de7Xhg8jT1BpSvLiQ1aLEoIc~m zxbAd=$tS9`WW18!9wu`gT_E4`hrbs)2t+^j=mRVyH0;Xpm^QNlp;V1PDkFgp$SEkJ zf>H(#zy!kpL1#=+%}#epKG!T#a2PFIR3dzm3JkPi*pU})jpRH5#WkPH@=g0F^?%>S zCjcFq$8@S-qG4GQOfaV;m^=^H&SY3%lXoyi@>>`$gT&)Ngc}e{TMqGsVDycMIwDRI%Z zcjI!z4G4``VWk{k%O>*CKJyPWWn^5hy!>+dZLJ33w>RGT&c8t8ABtyp?urk6%orC_LHxsXu1B~!NgE; ziShvDg(3oa;X^JVS)~3T#kK$J4Hu73Cn>cG*Mk*xJE=C@)rq!+3$Kt)l%mqSDVfJ8 ze%=9k&KFSYtCrtP0|8fq^-%%w=(^DVo+PRSjFrLeoSfI(rW(#z(s)77k!?N|t)+I- zwCFlA62EwOdu-Xfk8?|OSRP!o+LWXWSj>LUfOnJQi|D#}1qjAfgGeUCK{5a=(PJ&5` ziGP#n!;?C5`m^T~ObCOUZ@3{@OcHQTFj;0vqW-T16Z1=eeE>hYf=R2VVbhbeWp9i{ zbBH5$i&i5@h&!jP`EcqiI<>qg+Nsn~1a$bxoo%sc=Yc52vJr)6W$Rf}r>0f~cE+8? z=UvtggfUlPL3)3AFz^9r?b)`0Sbxq1F$!Z`apq|;_l)&eM!4`0e$z>)bg}7{(T%kE zi6`*g*T$Ur3zNY6k8ggRD%8tpYd#l-Oj-hQQhPeV<{2WXi4o=AjmjjI1nn6!AO>jD zF~)iu1*ovQp(Q^5ueV3@(eC)WFMcUj<9>So2R{Nx<)iMaTJjnGE;nKD{_sC-8D}%g5f>icx%_g1iFH5B z7<1gayp_b^N%-4JV>aU^;sJThp3ADwzQe1_L%kIY!;BpuZIUwW*>>ZaJ zf@U(p&knT?d=J*l4BUl(_jjLTj=4dQ!dl56{lq7e5>twDG=znuV48;#Vs(*Gtl|Gs zFd<+Y4<1b8Fca%vo+(hpwl7|k!a@nONrA9+n1f8fTS*!p6X-Onp-G@#r{9S(f|fa% z{Y_594KI69+;ro$9BX4r3S7Brc1$4#R!kq7qB5jp5w-tV4hmQu8Lo6Vq>3xmA(kxh|L`{(~J1=CcZ&=@D) zL1MT$SDp#B-cQP7Vz4AA1-e9#NC2X6`&|j93h3o#+N!?{q>*H%3pR7*84T6@=NgzZ z_?=snibHgazJ^QAO7%1ar$Yvg;uYGTt?!@w|_fU zESW_kyG7gxZZ&j#%K$OTxNsAtr7$DJ{69)gJ@lpJh{d5q6>3+6^1$+bP5M`O*(#kelZqvdE9 zl>?5l4rup)5~n+OdM&^Y)<}`sGK22A$!YZxipqknr{oe>mF0gSW07^y<$FnlO$u@S zWzW2kxlE9cFSNcSqntIBL8Y_zT3P;1nc?PZH-YSkm_#Fqk#jP0tY8_@$CwsTQ?8G7 z`tp}w6H8Vv=2~!J^;5W3gPDCY!Nlu{3n}1(blo5<5}V_^y1j|$pd1i4!^L<1<|nD6 zxG(7}z~QeozJ-cx7q&_~WnA`s3Pkt9wa_Kcs#9mb-xWW;5vf zQ#RhZmY;8vruZob%btAUTOTg|Y`hfL*_OUT;ZsrKG7_K2d;6=dWlSY3cgrq1xmNg@ zTI;lFR3csl@guSIH;%xqkKyZXX=_1K?TEwe9e^SX_VHT=MI?z89g2qut`VP6csfyv z7M&To&&-dgshLDy`h`?zu8+el9W+wghs%XPIq0Y)fRNOq2tFmYW$kTOVJyP-2zC%H z<57ZHtk2#y?&hXkbm3WX=_ThxOKTGXwui+{*&+Pw1pqekd;n)xFv7Zt0YS8kkXMxw z9z_H4P6A7o7z||?hGCy7s{-_Pjl}N9v6%Mbt0#A(0Q5*^F}0-_QywMet$owk4N_$h}};=L;*_|CW)O07FZ3c z0=Z~ZUO(8>#JquaqAJ8TVoX|0d6OzA2USc}@+oM;Kyr!cRFG*uiI(aLCflc15HjDop44mHP;6$|1mfAIR~B(^rW8gvY(U^%GQAqc}~ zuQ!9=`^))daz|WiC@(0mIQP-&?TT9cQ`t+yCF?*2GM74=Qr)F8Dum$QeDvc{41MJR$oSd)8{#&~VHuAtXWu+{ zZY>@uh0{1d86zdSqa3|-!ZZ)uE6>h@-lfc9qT0I_6Z|>#BO~15bz~2I?azJBa}o%= zrFLvv-`z&J}q; zKq&(|J9p}n~b0)EM37GV2t9h-b-tJ+FhUQ1xKJq*Ihz)UJ z3Z{K9gL@y{1R;~u2~pQVxW5X1_@9X}{oe{EiA;(~?1I;hh>OXOlPhmRt3DO{pkPWn zK94J}94T{i0zbT%13@d$mX@T}>2DTHUI1hY6O)t+zkW6z$wFkDnI1R;?jT=Zxrwq*M`M6?0wBce?r$rPbiwTLueITw7E+fFB`^c3V?j9iU zJ|%{zhJM$EU&I~vKN_QeHfrY0kN4g7N!G>m_{P`%HNNw^N1_ZhY0I&oO@4sO1L2tYi3+URFbhIEp9t!1c$aE)tyr3c63i0L;u!Q}o5D>tgP(QQ}e zs8z$fv$M2f1%OiK$y$(|fQSx(M7;FMi{rdY&W)+H_$K?0@@tL(FB&73rxogZpJb~! z6r}4nXNmRUO)1f`VZGYUWCU06ITv3+JP`LUZAID-AE$fltYnQ(K?n(A=*GoXjazTq zc1kT_3BB}X*TiC{p1|`hrP#iMcyq}#{FtF+0ocC zL^TN9)WH@gx?P?7Q6ftbMj!m}N1|*p9c;;sy8GS-;*%&RBG(j5Met(4RymlOmnB%5{hgeSqF?;r`1iDcv8Hbh?l5Wz40zpbpXeGhLYwsbA zW4e0xvP2@OK#PuLUZToT6;L1~5D8uwt zF#Q}?-WdC78B8UnAp$R-@v~N*=q+877CCkCI6?n!zz@l==0X!H4=eGH-uBiIl{X0{ z48KixulAAZim_j(C5gD71Q+L7=C4`rmKe^sT>!j&gXH6^d2dAff>;cq=4-A6 zgR-I+2rj*&gEWmBs+=A#;F=p+55@xGHq+8neDoDBl0Ek4Fua41SCN{F@Jc zB$mvW5|u;UaehsGtj1;Bd!P}`o1i$T(>~~I?NU4%&}eHLo{S^M>12yF#2`;4(b$qa z7A;okk`_gI>9%(byY6xfDY?g)qY+@`3FRgsxG~LcG8*|q+W1UiBb~U3VP%g*CkYgx zaN;}nk@#|q+O?gnPjUqLRgbj5bt*vk35rl=ZIuuk97ACcrT27e6UAN&;*D>4JqgWo z>9dGp;pw0`Q&W=oGhQ>X30z86la?RH^%92|BO48(S;SV{rLDx!UwGXO2&SsoyQeXZ zBA8aMSOc23C6%$tVkEesp~-G&XLmzGy!86(Q}Sz9PcP{y)8i*UzB@kqxzE!450ol5 zY4v}{GI^i4mTY{yD91>_G?ZdXvq{h%=}*_`flR27Sn;IrZn#L04wB+3AUbGfc{8flWbM~ z>J`e|z4AbhnFN!}$}J~+7)U?K&AId-Jw4wxisG1fh?nFMy)H!@&&Ip5${5fG2Tb2X z2cmP60A$xBnBH>Jr6gqYwe#r7xp+xR#xZl|3|ca;j5$l|V5STN)RPzqL@t*VCMmGt z&|#`CgK*hH0N30EHRKw!wt66!V&z-#eRp=~FcBQVV&$QE>G=vNMWaPH4Dk$2bpEWO z1pgGu%uFZeXbuHB>u1$QRW;gPIet-#z#FaE|1Yj8&m$9-e zJ_U(LxDY|Pep)Q5nRPW$kDJDVf!6#pFkXo^Q%z7(jQ=nUYk(L4dIVZ#1w=&_IBv$q zZ9@c0d!irWG+)4yM#h+a{>bB4OjuUjw~V`5QyuNNSY`l&)g=F22`1eg1!$8tIkWA> zt?ztC?Af~)t*bIIqqGbrgujBvJSLTcO5!Ui*w0+sK+o4!a>(X9H}{NHq59of@DdniEWZCOtX- z#+H&+$Cz0w&xKThbs*Z2vZFBnU&j8Ye;KQ0O^X_=dIf(TjO)mu*4T4^3f1-VSsKj+}{{i-}IW8vut_%=;uF; zvo1J0PD3Hfy&j0&vS5t5AeqM#kZ zk~9(8BF1U*xE5ZdHS4;S=DF>ZsA}~A6VMe>&ds<$2I$}9T9rt*#2@4ly!P?yo6vJ< z#B(I3!*g=$@hr65ul(z?f8DnS-qFJv%2kr^REZbjVW}j}GQbDcoM@p=-cjay`ocN! zwm07#lg5SqlMK&V9-;k%wB!1AttSfrOG#xQPr>Aa^MR90N(HEf1TNWquszPc=rRhn z)C-M@l5oy|cmFcGYl~?jf0sP1659?MekH!Og6KYc7)Pv$sh86a)kKaZL z3$8lo?5FoX5TE<}7r8e|7~vJNM_CaH_Wb%L3(5!Y{ci=6n{KWrgO>t&0T^V!ceLR<|7!#ydF{G%gv_* zQ3)3J8p^mG>GRi_0rDz`0 zZwnXC1QUoagwt^}@V6_N_U%I$BX~HrbFE7@!9)K$g2@%+bwLnG+e^>9=5wuLKlh-& zE|?P4ve-@)Ooi+_38AN8y8fDL$&LH(2_~&$D<3GpB~B5R(ru9p3gw^}*nc5O+`Bg3 zAKlFjamK=#pgu|VR^eb#JxXQkNmJ@#<1e0#{mt#ff~sQ%=bl5i+7jknSBP$5`!Th5 z5TgR>kC;sG+~Wwu1`>4N@sW?me%!$i{bFNW3HV{o!g*L4Xu$|oHQ@pj5BvEXMX)s- zXo@v!)c9tbo_T8)4h97C9lV`b;V=&X)ifY3P# zSmC~3Zj05#I|gwVfBGN)8P{A-tX;P*KJbC}V-Z!w!Tkr~kN^0sQA=9E`86eRZvDI% zCmzy^+uItCBdnPQ1k;@QX)({L+8p~>S8wcTQ*fau$%ovSKP?N_wr^zOx>Vd$07|t^ zQ7D8r=^PfFePazF$(sTe%*4<**lKGWJxA&3HZPp{SbbX38?JZ*m35{qa!YY zv`1meKJMr5MKJo%O>V*2Zr0{u6y+`yk$yZRS6y;;Ty@$K*5`0kvxY}S*CL>eMJt># zS7T-|<&Pn;7>{&xN;i!nqzXW8wo}dJsa=h+?(Fkp`hum=bof~OC36=> z1Kn+R4_SH}@7=f8T-dqB(?7uPcO9=sd5O?E%o zX#0*=+>5cL3(r0SEB{$k9LAr|IsrdSqBFopKlTY+j3|l-rXPUB{4A9qsyUDGqydWD zmZJbJ0gXAr+DOT{(6gJuI5By~OWxw_WN`6=eu*MJfyzV3S-?2A$tf~!p~aM0(COFw z$vrFTk|~>;+}w|OPD+4_^m#Y$IgLDj)6q_z%>>(YW`8>&xw0n*Fca+Q7q7eNBJK+< z4g!UbB|=OG%Btl{K%q>Ey*nFY|IR}&bp!(WOE$66gg+s=4G67y&A z8QOBn2?14ye^pBJE}KCvKJ*tVUp6Q}n+g-q{SlF_Cjn98`eG2APbA1a1vjG}oC(5$WNC|UH!0X{Ge zBA;CRyBiOs7!%OH3Bfc8iwUN5ip8W4_4zEOQQV`ytzZ)1kdLL4k5-gfOn)A`_dX|> zoI9CG&XuRdq>!lqp{Bb=g3@BrKWy9~b6dEOepN7~05)2n?1iro7AEOEXE7y$dmh15 z&2?Qv{N?%=UrStvzCtWyn;N+ zG{1y1iNydrhxv>E4tOIP>xQ4-8wU)wrzSOW=sYA!nNh&!_6a(Iow6db=onE^IXh0h<3)@g@y}b!xPUCkS~pM zF1srBA3Yu){oEI$i!olmerXV*UFm3hxjV6^cu%Xr`GLk398kELycb77Wb-WSv6%#Dxy&BtgRwGEIAo_n1;vhSr5$H_rUzj(tfS0k7t z3>grNABLq(EwaLd!fDf{(3@s?%%m?%2?RUD~nPdUxto>aY4v~4kV{hzh zJP0!BXi|FCzvb}`n-aPF(2dJep|tmyg)F@=M_vY1gBT#gO4M*@>p2Ol?S6v zt~}-~x$UJ3#KaQ;hM`YM=-r438oDEpkT`8D-j*j#?YG8D z!i*WmGMJ9B!x0u#^C23!!ML>s>PXhihh89{7;W4iPjB3Sl|~$(mg)xTxL_d8 zQ-Hgs2$!ZhE#-h*uvm8E?HG+^^O z=})Q`Sdd(+yq6Kl0&G=!pY++n4IBW)@?VBL4}pSkuDJ^z>tH?*{ zjPvKujO(tvB)M{@T49}vwRV5!#3 zqaDNr7t;lIC?4PTbn>{|^zxgcdG}txHrry&qLt_zv~UNVX_==p&O0|cjD6xIaTF#2 z%=w!SeK?+aVslIZO;^mg>n_e>){H|U^lW@&x})$)!4GeG?W^L!2kxc*<{>Pol4t>0 zI%mn!c>f1K6kSy2D4RMhzWak8#^w};H^(ng(!_V}BsC`A7Bu3PcjNUL_(2nV2rEPZm6f4jAw&3+LAtFyb6zZ7 zHY??UShuOv8(|Ukvxtk3&Z7z}G@W+b729{SNT1z zF6NksPM%A7&cF`}B^R^J^NM*kVa=km`w_+uQXXd%*Ip)=auyTu3C1)hm}~{WWv2-l zRl9P*;)3|C1rtZf#h7dq`1W^dG40J^NQ3(se67#tjlZ2pv`W06m z1ftNEnYi^c@4Uf{L5vBPL!vm9gi~TnIq*YHFd2`^1e2vaGppP&P_Z~wFqwi-tl}Ya zWD)CIxA)HpT(jkX07SEuqcIL)ncV(Q{}7W2`i~5DQv+>z%mx`}N7GJXId|RlApLUC zV9SV20u^73d$y8u=`b=Sz!EAg#6u@jS-yjwMU}PFV=r#83txC;ta|Ya)WNS(}4+IgAT)WUNLXW6YawS`}6WQ@U0>GGlm9lCUC_@Yv>Oq8I_N z{^Coh_|YHjRA1P#^=aJC*U{{-4<5BKR?J<(+|$jtVQ&;7AkMttyf})ISHx!u^J&oK z$FXuAesDw7*G%V(P&hPZYmmgC7_ZzwN&EJEQ!xF-pZ*bfaCd+v-3=I`f_C>kv10w{ z@y>U>Ct7hcS5U3~yWjsIH50#*EL&@R4);^#9Czk2JW0aEwU+X6 zV{d6~OV)i2CAL%s%At)O>vpA!V%<(ml-Fi_NeejxASj%&Yj^Bm3@7*XZEo}DIo^=_ z?78O&!IaL$e(sxI6C#19cn!Jt*<~o0yeEFgwWht#K672Nm}E-2dTbh4S*2hAVv4(< znvRz9mrg;GTL7eT8etxMtSn4RvYcR2i(qnn2n7A3gDk||)Gd4Bsb``M_e3$@N2!ASt8ket}O)z;`%%Y?Vu6)kR?_mB3j>yG2a1w4> zKs@nP`rbgFde-6Qo|KPJM={seqmexJ$YTVf8vsfa6aS(*c-K)XNB7XL2Kx!WJUT9m zN#b=3AY+7%iICbyg~?I^3Z75y2bC{MsLN(8GZSjzPf2(uM#H^Y*}RdA`-e7dAzlFs zN*0rX36nXw@;DDxi8OkuE6*5HDyec(Ffk#iBvIF|lq9<4=2v6#BM=m#oXKLGsxNpV=HWTP z#5h7j<;(F7$8hLfP+@5AFMoMRjef0m8yf$Mn2p(zIhkLhx20oRN|u~aaTjn z`7nhM;zb7e+m3d?5a`NE>_=;CJK%|z;JRCQ^$X&@d+&?A4F}?e8*d`EIo*-w0=Y`A zy@Oaz6M|_D%)xHE<+y%Fv6!snePq+-Xe2hZg7)JjC_oks9^;YdQ({fD3v)urdWd|z zis^IXto7$|Eqzg2w>ZA>;~&Sj@BRsGAI4(=4EUoTdT&f)C`h`9pMK`=qkrdC$`G-> z05^1W_L1NzB1=Yu@_>u$1hL98k~XJ~F@8k*t=G1wG8km)eUl7}%$=R`^| zcgWA2YyPL>=_cY+zE$94Yg8pn>&aM(n@r7Ha`QQZKxM!2GuNue_hEg&v;a;r>P#8z zQ!tTE(#QD**hgKyctSAw1_wJ8#wW!t68bl_w?+-EFp3dk#Q>fv8QUAKx&*6cHp{FF zMTC4G(nsLf2qt-jxs;j3q{cr^EDE(UbI+r+Y<5t$G96T zaD^tC$=z=vaxMbis$lxSyZ-{1`MK0fgQ~@#eSP8cofIsMR<z}Q!}ne64iO#Ph6)2 zpw>X`*@Qwq#Yf*3zkKrPI7qI>5GSajda!=udKUV^*^8s~z>#?5p-mJ;ZHIA{ z$EwwHW^U zfA5woR^IzrKiTdc+vB8~aeS7DnVS*@Zf3hNNE2jlxc+6SZ;eGUldF{PB)ftM#-{MG zR8DH6?6VIPo)PBD*pgOVa_KR~gZR|7dZz%DJ-%mWH2m_hm`@C+5cfD6e?Q|e|x6wnD8y6E`M=0-p~9i%Q0w}Md>5x*+HwU&nq``FXl zVk;eNSJ%)+g(}$N05e3Jaw2j1xCI$1G;QONZCFg_UwRe7c5>|7*BG;xEQ_yv@B4Ar zgAXIjiny1h#2elplMyztZ*TnI6Q7B>0DtBXV;e)6YsZb(i(ATja0=AW1x@Q=m<00D z5;mnM0;Xs*(OGtYqOEGfr7%=2r5p`u$%;60PW?=4(%wVGSbMH{i@o+@B@d!>roKxC z=Gocm&!y14=l6F)yhUVYlqb7llc>@LE%7gHCqLsIG?R~qfvRP3gnXec=6={XB*#=? zO=w1*9r)ug^T&_2#9_vB{*oneg8MJTLVW4P=fwi7rFxW<;&F0&s^D7+Cm1=2N5sZZ zh_qK(OXFDe-l)bP&dEd;SY9i{qTv!n>{?E#Si0JJ{N z*qvbiYhLuCc%sRgeF877BFuLdsad= z9qA|jLSe+%q>r<2vpv#o$vDW(X7^*WEhQGl(z&x^dNp}OQYnnYoDgF< zX2OoM09p=rP`3T)*oP+CjW$q5@R|eR2Amoz@Z0tQ9@q#YKXT+SP{x`#>zt*r_Vfj~ z^8gQ^6_jaVV7sV?!S0eEfC^ZER`ljv+`-5YP0%P`w0Cz4i@P-%5q9G+w3od21;9FA z7EiFq+sH4nUAt8&`bmO3hU@1T)gXp|-R9x5&dMZk4-ij+lTS>U6(RCL4VI3SMpXfY zUrTCm=0WT*mr*oc6M73Fs1$?4^4L#2>*pJON$6lQ3m+37tF0EOd-t&;pq!lW41t(R zrf?1kVKku&vE0aQu=}U=yhe*EV~-Uu8V{o7k_+_G>*BJ@F2(nK0{2-aRAe4yTar3C1K{Cc(tM#+bMzB@_QMfs_+Q ze437vSy37I!Ouh_mB0j(;NX2OaH^YA<3Ivix^!tQS+Y0@vlHZjxOH~*r1*{3tx#7G znHOjNQYJiEzzW7B6uqdtIQPu;@rQ4GP5kA1-y3^(?TqP+{Y%eU6IWA-1fK@Xy)2&G zzB}3xKGwKfeZjgo{lW_YNc=Q5JoIS1@Zxjn^2)r?=&=q#Ac*@AF#{2RBI5?hV|f_b zIY=C+lEAv%PUoUL-E{NoXs13MJc~s`5fIQ#+kRX+x@oYwN^y^rAaskcsAYnk^#9s$ z$G78*`YBNlxsMSGVK=xq^(S$y6;G;-7HZ;ces&*F^vhz>j5&$U8(Ujr8gA~deecfr z$%7k-t4x6wBi5g`Dkk?Gi#os^bFg|AupaXfkbc?TsbJEb!yGu7&K>`>jH+Pb735q& zCeRjfSL!<+W84)W3LRZ~L&VM!5%3#Pmu`Mv2D6=BL+LSk_EF`Vmr2 zi1*P#pZzFqy5f8)IJ82`V=;X? z$+66`POehYHq@+#;6a7utS^pLMA@q2RC_qk(hh=uG*+B`1|5GjGcq|5O*@)Y+ZtWSV1-eRr3d=4O zlKURG4?zk)CLA$JoUIz(w*nwq0fNc+ezwWGCLVm?C=bc_nWG~BXAU&ACc%_C!wP{{ z*`E*?nVuj8@hk2pThAULqmJxo%!c#(D?%pilD^LNo_K%ClST!c`jk(eduhD;`L-ON z$^9(vidS4o{X%0)Fe1c|vh zTQ(OGU^`677X{OvrWQ)N>tJDEQH`poc)VoZV&INr1Pr%z2la7p7)H!6bpCj~L5I0?$KWDVS0<2UeX8{B}Is7*B6&!1C%PX9HL{b)+t#B+-TEo)r(> z`(PY|!CUIzIKeO$N)MIMk7F^NphDLOf-YH0POASh?wrOXpGBd?R5o57lL~?V7E=C& zT3dOz9mY70#PYirOfUli_Is(_wWH|}?!qCgu4;%1Ef?XhK(hIk6$rE{6gb8hw4M}B zPIVDX5VJD9{;dSlglX+>azg2(kb+^P4Z<8G_%E@geoV~uNx{Sp3DL=Cu9rTLOg_7K zo#Wi~>Tb}2^VmKTr-K2qBSBu25`Lr*PiuhB+-L5ry7=pN{%P#kwk#%;S|*~+z4 zQCWwg1jA$=dzsUzvuDMZzVRK(5Rxk>q$9^!zyBnxh6QtG z0!9#Ql#Rb`!5%E-4y=?OgrVe+ycpb6sz0fk+etmWLbB(9Y>pOksZBjbi?w_LJy zpH?6YS>ZIq{O2S53b}T@EDDx^%JSGj@P8jXYUlte*MjH`#=1BjPQon&R@K5QR*u*JI@aJULE2rMZU(Dd4*Ltg};XLL?R-tl@b0 z;YZ`D%U%Fxst?9ExWFZjExWgR~bag)Zf# z#V8oYF_U0o?sI}EAK`dXFuBeq##x2Nk$z4vbs?Bq;34FDaqK)5VEMyIl6lTL$`nG^ zx<~j`!6bXg_Do-${F4szd;%-&;kVHK^K8lW_&lFOScw+>DuMB?{k?0)6X*e;jWy)e?eOblv4cyrS`@U@9-1Q%`NFMKBKh@M^z&&YY+QMO%AEY~K1z>_%Gm;KL|V zvm=OILhi_nDKl8-dE|WTN!s$o7o8OsU%DPmf*;$OJzax>2q~>#N){6%qbmzv`F;AHH^P`b zhD7SMSVj4wbI*u3{=pmLeSiI45}das!L%0S+X!g1sR-|*#9U0^<}J*jf_I~zgF2V;iPMB%WQ<| z4S)FhTjJ6yE{TJiAB&%S|GQC(XW}f}wj$RoXH|lwTU9d|3yjzkg3Y3*Kp3HsvgjeF zY!8BIXH#3OJ%=(!v*$)H?$~1J#wh#ad7Gk~Pfj zAW;A9#NwM#u1avZxuo(Kj}r*KQiQ9Bg|0m23L`2TQkRP3nI6gZNv=Ez z{phi7Vpgqqz(A35UM-(Ntf}0LWP9g+%J(PdiO){%r8GG8UD_%ANu1RM{Cr!Ie%TR! z{p;T(nEYJ6B17@0c`yF`Yv_WkZYQ;AN%+hICgdd=G#$o50W&~;0ZFU`r6=PuSc^Ml z4UajqW~6#u0-cN#_V8p^IE~gGlQ?yrp5Zt6^MuT~zz->(w1p!UP_B*RN{>wm>Osnw-do6h(X4t&@m@L#%(+( zwE}@FBLFcsSbW+Z3eX%lx!|UJp z`dGSXe!TbXe->A+Ss0fupNl6CzdKs-c1p}lvVI)y&OVrGTgS0@Y|EZlykK@pJZ(oo z8V4+}sD37`DkkH~Ya=KO+!_o12;qPS=zrCZ8+M48RVgl=^=r?Hn_hVf<*R0MV~h)y z)Xr_2VF-OOZ)SZw@yzyEuxe%0I+(=^OG<1@Zc<+ZlLB)#WF@N!**tvCU)WeLIKx_YJ z4UIUlo+?mG!p&!fHH(+VZ6Emn zwFz-m9%_tk3fVrf;X!&L4#wHbmJrLHoSHj=)QFPFCY_Ixn}t=U0!U6`5B;z9uwh7VJ23T=>Vl%r<}_;G%}U3B<2KX?dIN|+1HR_(vHRqpzkKjb{=hwXAU2WUJ`$G zllE~PmUDB}6)~5?C6ObnJuCupmrGH5gqC6PdvxpGjqhHEWmS zikp#2gmls?rm3|BL{LKE=+7-=>jdA1kFN{BY%NydS z-@7AP4m8tJL#Q`|&aIfkG=y;L8KG*#U>Eho(7;X#rEIX=Hy?@;BwkZaC(~lwm9(2L zLQvrm#x;XVgBJ9=7EI#^CM_lf(=Gy*<|0_ntQ7a0C2@4^CCdl`Qa}wd52?k-1XI$T zmk$An67)`f8^L7aq}$2Z59g54d7x8?NMXn@W`c={FgD~q5>jR@At=-JKr4J^46Xu1 zPm_0i__Tl%(5B~M*F?eWSynte^ z)x}H%4DH|UydO(uah$PkEpFXIEbRPv=*bfSKl_Yx zmz?sE=9_)AmQpq9lY$9g01Qdt(~hgjI&a0emb9)6h%3~zf>Qz}v@;cySSbaMv!r(C zQ1`BLtn1GB5-2)^l4W_c#Echs4AFEr%A7DBZn@{|UH11^KZAmjhr-yj5bu7z?!ukc z4Xvx$r;<^edzkZ%vIY(U)!s|I^2}?m0a)`YmLaXs_w6Qjh|3O8%I)9!c3`W!i8Ig8!I2L%~1qX6G_%j;=Vl_R9^kUCDCCSwUY8^Y&=8^>UgYQ z4ayUtvvc3Rn6+?WeBm4a8qXd;us{P6@)+*WGM>j#(&yJsjk(mvEQYofDZ=e-)1Io! zqv&)}ytfG;LfbJKRPr0aJ#%mgFI_Mv+730xGoX405FQo<$Xdr^VL|)G`Lw!R}P`SX=viizFa^}n_0?)^=IDd&ax^#divINcQ2BVC?<^ErH$xdOc4B0jWP7>0qy zM0!->OCKjlr#nYMV>u!bRbulOFI^A|mMn;BVm!ytY7Wv4;}BMp+K;BRnag8p#4L_V zG^-kt8hus{&c)p_jdlUmEL3aZ3AfY@k-6~%L2wo9rv@^J7HYP8g1sUvdV59*QZ}R6 z9Ee?e+NjLX7dPMdl6b@GUKv0Cj~~#TvLOjU6K93Ujstk$X8;5cOeFQT58>BE;Bk1i zY_b$g1%My^AI8q-rK%u`fLsYqe1$ zFhL*pJE3V6D;PrHzkvxWwW0q1$qxu80m?d|Bw2aW^GkS4tUsYW3~Q-OE{xmUn3mlz z((m=85KanBl{4Td87Bh%HN#a0^X-92^~w5}RiRYSC%d_OZ<^WK1o+iD ze#Fq=`DBDFGA~pE^LIBa+?~$B(u+UCe4Ay?Y!Aoth@G*v2u$k$(|%b7JH zDo@0hfGI0O7GbJG@6b8HdBTLXgrs*+mnvkq3W>t|vi8W%mNhKZp9D~a>qVo~&|>ip ze!A({GEQv1g7LH!n2OJHZQXWi`LoFVk>)=+RA0i&B|qqMMBovnqW^_EY5wy z6?PiJ{enHh-PmYctyZ#lBF4SV3VE~X$%^gVe#oAB=70?j<*bmKwbx&BnZ5osSJ-Xe zzQra6hrKVcP}Hilx)zUEDQ1?f*l|3%aeXxyhy{hOH?cvPejM z2wS8YQY(00vkLvJg{oGqmMzLTrIJ~!xzn~C7_z7K9kfUyX_08uGR<6*ai~FO(O&ng zYOvV&T7UJ2P4qFV(gz(|VSJ*TuWf2#l5wh9Yg?OD$`$Taw5F!CUmy~uGk)z5*R5Ky zSmYiR`lqoDF+^nxhq(vubF;Jlxv8m%aY$M;67@EU7iydvjYcdMi_iw0Pk$ru3a9F~ zYK7mqPL%Q1roY_T-}rm~y5`@77;AkLwZhPuA-9R?qE+F^Q&%suRc#S_QBSiiNT)29 z&D)p0dAk)#HGA9ZuC`^FxRnc8J3KyV<1-W1yL^$w;AbmVEESDg6`CiT5;ihCYW-tb zE63aHj;HtAApBNL&}aC)tF7B!`N~(>l~=yfUtu%}TVm#^A-F>7hp-wx50XyLQ2;8*F-P%(iUZW@o(cY+HBwnO2E3+2HuBg<@%I zPFHPg;-F0?;YGwV{a_OQc(EVlr!Y-*ul|^@__ZlWwwm9(vF+T^xJwJ@(L3TdV?| z&;qWvA}FC~A!ldwt+3v%4y&N&s*F_V5S{W!^HjWP7` zq1P0=S5;xI#RW%%?RHIWB=vtwZ8h}^}QVrNtOL?p0vz(B( zMICLnepR0>Te!FmOko%xW|>S3#YUsTD8h&sn!EWPC|40v!ortUD3lT6kj+e2ZO68Q z_RQ8pHa=Mfn6h(#>DxzuDGIQ1wgXeyfhlhjr5QV%8>XveK&R@U5rZk>h)Pp50Vt(W zWJ&fE-^9R06gQo%L9pion8I*kzHBc&_iTId z>ZMkk9=9wCV>~-)OBZ)p6hN!OGYNRPfFTe|Mlot;0E(QIqiuF4#?8S(&B_>3Rp=*U zrv)48#=VlW=8t06(DJE0hsQ2z&5<-Y+JtZR17cpT|kfWo!Pg~?f{tb zfRgduX(w;AWqm6xo}rz(z9c9Nhec*--q3#_b5l?hnqa`Ve8P)GjHDdOyHG4UZ8cU>DgiJ70w^;#I^g@VfQfO6GhTV< zFgh`Xg3ehSppM7l4sN2j6gM1Y&COB9UrH*%H3B|h1U0N0!Xt`BN>IEfp7i1MAr3&t z6&k;bHwv`#h++#aLvOe~?`4BJFlngy4mhL*5t{;(1WYr07mm{|Xp_(9?Eputyz)|e z>l?4NIPKf|)YBH>9tnOg1F(7@hm4yyJ!y|U`j~aLw%Q9%*??k(Pmz@@{cg%+EYsF* z-}}Yg_QDsx*iJg_H0vLoK$*8#k#Tz9{)g=BGtaWc7?=+M95JqO+ue8D-H$zLRobF0 zjPVcyn4015vsU-ovR34DW*R+Fuw*J}gVX4|c*e$yA=|WVr{ysio12?$@!|yl#)LJu zG&}DolPN332+pHx1Pd zbzB+BmDtNV59TfMTalZV;WS5Z6iqnEbrLWQ!d)v0m~ys!QJ1~o4){^8@XG0UEovyoMs=5-5NY)?%p`vGLJ@J^j>P+qV5M0*BJc&(wkGa{Ixz zZ?=h{LHC%Vp%`FU2POtL%iv6uvo??)p%1t!TrWmn(g0F3VALFMvJAi!g%pIxy5nSIR=4PY9S8H-U)GDd926 z0(ON2gZ>IIp)8uPB*PIDtbhqC9dh_RBOEWUNg$-_Md5U{S-U{hEos`^@Q5T9>W3&R zfE5qx^D(cOJ_%`>5Y|23EU06d(wM#Tk{8&zrHgC|1@QN`{>(-cvU4lhn&sU(suXG0oyvBwOBVErMwcXs|#Oz zzWu>_e&5g+Hn4A(6{ki~3{m%(esbSKw*E!u*pl@pTZtxRv)l*f3{Oqlk8i%kHb45f zo%e#%?VPjDg1_KdWClFIbI};a0Bk^$zp-qyd^c;UMLo9d(6Ig4-+UVU#gfM2FC5x$ z>(dE)%}Fb5F%g1pjIX3y9?LMlmL3U&LaVWb`>OFc@Q6F_+iGa{BDw(MR6+F%C)+;red^ zP=RBGv9G|#4aFn-Gt4p48B)aB!gU=qiy9b**QUhZG)6k5if6R5P_Wv%wf2z@{IRuk z5@Fc9#XSdk?=zEAL?{wg$Y!m^_)d?F+mo9%+tBckopH)KTfSs5MsOM(1N~syAOG@x zJMW^4Y}uMMc5r9}Kxqc-Quf5-o2_Tj61?pm+m5jj;+)&>yWf6t-@O(lS`d~IMN}w) zA-ssOdeQ0?)+W8D`vOcwcsXYYOLZ=>&HIMzq0LWON7q8@U9p_8uh;+{T+i}lc4%P0 z21kZ5;4@aHO(FU&P?FcAaT6&@<$?o~q5v5Tb9qda*%{ZHlCSxIX>umV7$)$_F}??e z0mG%1Zfa&68I!7jNmut5-KD;Ze_$g7P=+XPou_o1J08Iq`u51C-$Jxu-t7o5`JDn# zLa+P^FiGIq8rO0_tY0v6$=d`>ptO*h9kjtcHcLSu0aFtLkqQ-I8vF}^QB}ai&hH9k zR0)6Nu|x|Dh`gn69A<{WEsAvyFr||au$IRw$yx8x1z0fatu>X#J3)wOn3Q}(1B(?B z025N&042Tp#&sE(z&gWXEJWvme|HFbxjDKmw5{ z>P^5DvofK^8GvcBl(pf~1WK4t2wEdmjaUny)UNOyz?9~F!wRg@MkxcC6v2@G?tv*B z1JxPaR4V0zDIs^|URV0A44_NENGKr`h}_>Y!mHm-2$*0PDIYP7LV3DI1DFJUb3G=7 zfCNkmD+wTD&{VdOh$W`-@Ez@Hg!hD6vZ~Z}(GXqaKqh{41y!eLm2uD(2uc80PIdSK z{Rm(ZQjVnvFJnQ@4%nMsvcb+;y9y;+up7VgQ`CO?H0 z3}B#iVOUdGpadsGalvBn#KECy8_h=Sp524?Fd@`%XDfCY2F^^;&N|~v`|FSWC4G(A zz@BYZ1>{oDsGU&Woew+;Fr91NYu5p6gu#&Ll(aUM>ge`ew)>f_cGe4CU`0Zolhf1i z4WNLcD3`>AAW%iD+ka-Cxbg1+-x%R2xU2u5oxiBlu045;Me&}xG1&4L zbPN$PL@=Fp%TTJo*8-&?;~`7bJyK}u;Zk@!0nrrBbEu$g9!i5p#M24`j*L%PHivbO zXD+G>m}Gn@3{`{13LnJ)TY11z`Wj-5+3}l?BR>W|38WZ%4$^&OBx%b75+%HFwN>{K ze>&evdGWr)7*~*eZBg_}1RYdB2TjrG2`*Gb^lG$6z1agW9Y%Nj=MVjft$N`Zwr$fU zzjgu(qKel%J2_2+Cr0{U26~xww|CmkUAycT_uXSF0H)Qw%dM9^MmYBecm2%HJog-1 z+`9sgX`Fs0ttH(?dSDl}amZG$>b0@#j1|#ux8Hxi-SVT`EsQ)yWPIVtMshQ@ZgIE0 z7$YJENX77E%J9Q5A-$GGy#Q;>e)h=IcuOHW2_q&&s^iHmk6Vg+FJ9JT4?q4mp+j_{hZ#_s{n6`!I0@J1T{aX~uI|@t!Q2|p4q0Pcn z69ChsWg%9{LdB{zVI{X>Ew#t+mI&V^h|SXw;TrwKqJv-psNkQh3;Il3epfjrg?uUq zasoGBmZDp|Cb3@%!8yll+?q z;k*RlE&Z-Mk{CJglJF1+yX49F6$y9fy1r?w;TJKaHm9YOG4G`S^(ryYb+q|=2`sd( zmJ^5veXQgOn~mCa7jLwepLIG)-s}c|>6!h9EuCqy*PXS&UU%;4RvaHhq2r}vmPb)C zQc@Lwp(x`Aqp6LU{=va%>z}CF1N%npuC045*Muhu!%mJ)*(n<~*iARyXr1k8JGgtR z#kntzgtZaM`^AG#*gAlzbM+dOCJL9|WY{F&%PRSVryqO5*7o(;5FUHO%N0;Gp#xb}4Wt51B^zVUJtcrb0Hm?dZ*kg* zggdCoThh2F45%$IJTxk7{L#3J|73W;m3%3_6#oUk>w0>t@h`(iCE8lTbCUPupe5nr ze-SHK2CY8C0j3zT2$orvuO|6!7B8a&4Rrk>uDK&$wKu)xjdt;M*V=)dJM2LJVLYLX zRSE01Co?uXHf`DItm}y|eOuYvYuhmv4jkBLqoczP44KxnJ+x($U3TSVw(iu8Hcmc9 z;NJ!?4IUn{F>;V=*Q`Ww!4J^rhxgrQU-{NINmT&UGP=x<#exf;#?l@%YH@?8upM0wQ@}c`|*Y53hG7+OPJpRz*k6EUj+!Pw7 zj82lD0yTM+oFD`IS12zHZ%EsA;+>tcYK4W7sTi~u$>sy5oZNHxWMFvA`UgjBSvS)Y-qfY3$XX`c>fp#+dud(AA9s z+2=3kAKArEbFLS<{JQgM@_HO>svx$22?l9`92poh4bUVbdB6iVm2gB6&?~*ib%)Wo~w{0B7 zz{PO&0|2dJ4HoObB#&t}NPK6(S{@g_km+xPwz+Xv`je@DYl8mN^X+36R>3 zDks5f6^}Fre}}o(qGjvt&A<1DcK_BL*3yy&h$igpwr2aivre-_m2Yy7i4oup{~@ZZa>k*A91o98*r+@V zcq~pBTY%zk+0@Z=x!*h)k}VJ%nlP`OtiE@ z7lqd_2C(Ksj7^x-LtOwMQ3+2AmJAs_Lwd$O$T7+&58#V)j@be^J?5+WhZ=kqT#>8t z-_s6taMB?HBflp%Is!~;1ILP2#0#NI-*_$sYlN=q9uuZF-{Z=|{b`PBMtel)gqR=a zf*rWwf@OqV0~LmF5%w4sXBZ_S3!o%obG!yZdlY6*6P8%9u+!G|EVktxoiJcfU{Ql9 zi&X64pE@|zVAvRmS-D+h1W0X((59%|Qv`p{oym$%W%c$iLniO86l(H)XT|lQx zgrSMz%^_&4uoBp8(M+dpJvd}r_EMEchy}`8nuMuWF$qASH8=2UxDuiCWj}060?X)WMv7U^xi!teorBI zQaUP-P$*Bo2TaCy@JdBtn_>ZviC8yu_un~2@#sc)$FEP@oGJZPEe(37TkD-(r63dV z4HgAnx7rLyPan3^sLH$Ur595D*k)h(!O!j9Z96Q{(QeV{DSP7uXV_~`TY*2X`gz^M*F)jehx1uZUZ~F z5vC~+HJ~rh^l8Rt`G$?w-rI}E#K3V)ez7oU9YMkEd1{+wQThw9hD*>ODQ}i8%;G_+ zzY0smo6|NvTeEk*??X0BrC2Nc5~gV5HEa6pRcn@67dfp$VH%4WOPV&2gs@OS!Y*2i zj)5nlkY3<1!BzB2Gz*F#UWUxqCD8wRgn!u~*$EubtN|Mu*hvXvwZ_7YX zXjtRwo)PEhx^X-)XC{8vZ!(IsS7G45Tb5U*q&*Fk-r$-V^9p+jxpvP9y`Wcl2aGVG zE)+V7Ue}c@VivkwkC3w|aE~&8NKA!%9icjOz#w71Xm^MG+4Y~WN?XPrf95H8GiwW* z@S3Km93_-hMlWz#?yL6DD>|9S8x2GIw)Pf6ZYA5Y;~Cp{+R3(X@gl|sg9TlaB4RRw zCwb`5VQcH|vJ4(_Q&X!wv1^xo<|{W@Awi!2{R-Yv8=!I78K+oZbJC{B8zsh|CA8HTiO$YfywwO}KY5O6`>wqGc+m_l**Obon~u2*d!A7vOf4pS`a?68&H-L|^3+u~S{tyo}D@&Gvmu*`UeYyF2uQGDtoLZ-oyHANZa8(w`CR?wC9qwjpzfvJdF z?FtxyD#8%c^lJhToTWlA#XS*u@K9BUC_ehAsyzKe+#n4NXO{>m#_GgIXsh&1RXIxH zE8ntXU%&nE&igG%^_2=kdU|?LqR@yE&jg{#pi++Bi0DX(EK32C+_|WP5k|yeq`bZ( z0H%Eh$z_b0auKUK_xpeyV*^Oai9aETyyZ zqWMn8Mic^(7G(49oNvYK!M*%@XJ~$}ziMcO$1#LGfl?|HQZCI5Sx?_~<5hdT-ErcS;j!f98i`wkNmz-uDsTjo(Gk65_6J8E#GvO!xmEosSl#>2h(l+fn zY8qBOhJqi`TZ0*bocdH2U8xZ@VmfI z;vH0XQY^81zioeFi*4*(MWPx&Q>Y9+pP?MQCK}MGRucDjJ-)^M^shgK+%#D;Je9(b z`J;=@wzcGQn??rV9RMRkg%_Y$rMj!Rmf|h~7?D%G&_@9i_vcjrAi4@LWbKB0JY?-^ zgYK@BoQs_&i{z+=s79MWna3!~Xd;TCF{~m>!LhtRf61`t4tcM;x>WE-im_23Q3a4? zU`6StfWJhWaDE8Q!yg8UP!E-xCTC)0Vy^%k1#T zs2?v?SrlK=i!ASl@rItn+xf`H{=!bV>I&Ppdxt&#_#?K0ir-cYr95Mvoxt!cKnKE~ zH4Nz*24W7GFVnYpI!+t1HaI$HCvRA9ixw}YN)s4?zm&&R7;s>C#G24ipocAL>9SoE zD*Vkq+-PIUwWI4)CASg}{mRoeT3Ur&RjTmNdo%GRc7PP($jlTWz@qIR8L|-!!x$y+ z@@zXWp7Mm^zES+T^YLtw*u$+`~^ z?{%I+Zn~fh0F{exdg>VpASSJ^uh*K$%ZU+mM4cfxN1Soo&Ub-4HeR_R;4ZIMd=L~n zz-#CY*v3C7+Vssy}9S;S4t=Sqa`cH7|Km<

8xPWlp zdG>d={JZ`1>1|e|_@PL}SsDd&>6&hPjq&YP zuMsYgXH-(zDm0RplYk-85KI$M%d&*rW8ycy6EHEF*3#YLz!V`n93r$)L9nXCa$rh%KNB^z|=0*G}tOVwozQ8~j+Dp7dNCdJPSWcLcA8P{xOb zM(yT1?kA~Eo|5lpuxNW%_1Wh?{~1Eh3vFuuJ}Zxm5Q^3W)Uu87dndq@Mlq?H%z=q^ zVzW3f<*A0+`q*acNHj6k0v_C4Bh6;XEtf5Lm4W4^Qd^d}3m<4>K zCklY-7+%4|44w%3q?wZF?gd@8bje~{yl4Ss{DfUe=4;ZshpzD6@)KE`pm=14*R9)j z*)WxIljOjH5TFb~+Rw!IRQD2lV&Jg#7xVVtzjKRET;02Us~z6PPgKei za<5`!W7w)FtB4NL7(|%Ul!xA$POxhAD)LjU6qjHmIxr=fCdA}k=6Y%VqttS9ONR{u zte^Xre*|<0>r!o;#sI&JaPJv-TT_I7J+}@JKXzcsp4dCU^o2>%8-(NWs^m}0Q}mL3 zWJCGa9Nx2vmAqhyeg%fu?--ag`i#+$?HDj>d@mg?spd0sC(-n2GtUA{;<5QQm+~XO zJ$H{uMnq!^{oe1rvX&sjFV92;5Jdzq&qWI&CLPg4+bV#g$*5vvrL#TnBGU?QC{2Y6 zJPrdgjN|}_0YU++6PUuZY66-?;Q=K`V+?ST_YqwA#aOtftm?IuxTgy9>6ipy6sHY} zofiugs%WNc@1B0!PY7>M356pz9;K`3w589gnJ37RN;qkEi)$K>G~ z1Xf^oyvM|?>z4O%z@+==$9XoDkt>hoz@&K};uBqmF;-=z3nAwn1)zW#8vv!>8-4W& zlki}O0ru)h1}sIvL2+!r&RMe5KK!<~+wJ!}V7J`&fMrnxnm9R&QVC;`u36A(Z@uir zcJcx|ouPvy)+w-oMtKF{QE=2wSqNUAg%^Kt|6}%(Jro=yTPz2YcQLv0hA({HHuUvc z_V6Jqk~B}jM`h@w5KSi~$BC{^lrjQA^92e@BFCT$+S#6Z4k(06l!% zQASY;ACy1&$S406Z{mO@$wNeGTY`{Kc=(X5VCLzCJ>7Q6+I5!7lKUWpy%0q{#XLQy zIkeQPgA;UAg`SVO14lsD0Gwop&l#gSKuHZszLduelT#f842DLhgYOtSdgMIMig~DuoE4mzyjqOwY_Dk{E%-MQyp!sws>K;b+xw>LP}XLrO_(R5x;T(jpv&1 zT9NQtUY-Vd;1Xb!ASn7#M+f1gRsBewb8UszZxBtYW|Kv$BkAl$D!bcBAX&GMYW=%WHgZ~-s17Xw2f z?kf2u0aKLFPm%V_5)O=T4Z=O7Dmp9+z&jDO15#<```1rS{y8qC!*?< zf`k=il81T3p_*Y`rW#5@Scaj?oGf5cwFU--C-dS-D)*X*5%cO3WdWx$R<42@ItDj{ zrUFQW2erolhy^5o*Y_^7Rm&+B!1_`Nr1CsfEFD$e4H33GaA1%khfzR@=rY7nf)X4; z+0}swVN~*0j+2s`*S?meWXLn!e(QIcfIC8^%`Czb0Mj&9`~*N%g>fY?9W*RP7bSa2 zj{7S*OQexv^kL$|Tws!aCClwZfJs+ucuZ0t?lUjs$a+Dm_mdx#35QnNSiY< z^p?`r6;xp(&qPVzJbAQqChatl0zYwJk_XoCm@pvf%-aBv`9gVt7bHUIzyHx+Z?ncm zz@*}b6eZFweEg&eBF%3K4r`p_XZdyNSMv7_U?N55MKW3{s0y&df)!fo>mu z>s##*rM+Lh?RMh-@E-S*eh%T0$C+q*!O4C0>a#akrZ{DtD8UGdB2PF;)px2UE$JLA z-A8sDu)n{H%DiN&72xj_iS!$8xWQhqex2n82OOA^ZeS6TLJm4tEVCF27;1U{B^$7~ zgAjsZ&pgN~0eB&cm`5XzT;Rt5QTV`|CyG!WdHQMlz{jtrAcu-lyfj*EvDs|?HuGj^h#cIiA7E%`Z+S$re8m&#Ge zxbmS9jI$Cz&_ApSJ?8#MDNsLeBTN&P)%-7& zd@E=2OIC9Dmml9r0TZ4b`JX+kuyN{(US!|+`q#+?#%=$mM{WGTUi8O|d#Od1Y|2fm z2n(5zjB$TWs7EcKo9G8JxPnQqNpyki3+1<=nzx}qxHg2Y&}tju=5`CQ3ddKz`z^ce z&hOg-mQXuqZI7MZ+iO$(L)Ou~$PQ&ocE_ftY~T0{d3P*ybd2(Akk;i>2A>LnBr7r+ z`0wu>6zcN!ya1DPL*tKYI6m-N2PNkYwXM!KymgQ1*?`FlY#NuF^XgKrlYM*cz=Q?V zILSgE{ew5xBE*Yhcma>*iJ?j_BMF~Wb-qgm#FjM~nf`|*|z z+w>F+LjEhxfD0HUE6N*r`83vIAUvwNB9!+!0n^p?!+-m(jg1Y%+*!BkLMWeU7;PFQ zu1c6@Jd73^Cy&Y3$)m4c)6ez9I8w$wYz$D9JrHR1MFj)D88D%H7TvPVHJVfSg z%mYlIjt+HTl9CZ)H;Ny;vJRzjOpd7@%KQHXOoU&8u#kGLVGB5GUSPtyQ%@R7Fi75V zjwFt)1Te@b(3*U6fk|rv`gO$y{ir z=|I2z!Rue=itwx7`+wUH$^*nvC{aCxte%6gS9= zrnt*y=yOY33ra3)fBvnzt&nQN%H>`t>@VE#MJ9uvVg>RAg^>YrMYMx)n}r`bmMyg? z>pm(hi``44LBIrG&>?+(`j?Mb2}Qq@Jd*-KSrlEP03eOAaof-DvJd?Ar)}ZtwKhcr zpiO*0yH;|K3zjXk6cdEAy^Q#G6E$L>Z6rvHwf)ee~e;7r^gg`*=8)KqKdlYNS z|M6aYEEp9qDIX*wG)%6o39s%D6O{K+Ud&+GAjXbpqi8__t3Lzyv=E9@NG^f_nW413 z6`JW)c@o_Za3&8&9-t;t6h2aM(bAY5b$g3>9bqzdD!mRzK8i~6hNUb85Re2=?zuD zYFtzdBwDHZQH3M=PCKaWH2c56B&M4KOz*qy>Y6N5Rq3d}p^POog^-Oiu!_fPA|<0o zD24N&Gpyc0q#YOhY7oBXi$ss02 zV%0K}zCba8@&G9SrpWx2zxW?NwXOYAD3^AswrcjrfAXL09oN0Za#YC83=neD-by~G z)pJA|gK5T8)s<0+E74BBlN0p{}~(D!cwK|H3-e z+37KR=&m~$0~YN?XJx54tN{EKrUs}|kf@Y{QkE*W6@AORs#a}MzRl-5dZAUGJQKQ) z98?^`s*J~)CbawD=Ev*{H-6gAKW(kO^fXrHz&MmC^3>3zed`wwT9N9#n3i~g*BhH8 zB`|6u zQnAjOHLE>rE90fXA3^(qGyQDZplda*)_B!dNvsZEDvI|SKD8}4_Nc7t%(m0+)a)fM*I zx4qGB$79;HZ?}VrLJun15WhGm5nESyGKtj|=F1}lCViO|}f2VhcL#K2B; zz4KesX6P1YZc7czzk;DtWUs+*VSqVCa=nT1`~A1P(e5V9^Vp7E%#sioiHhY+RyzuZ4*U-;MCtf_qgCF4bV_j})O zfB2sFlH-}ODS#=$vsp+5tXC2$LQrpSWt} zYWwUbKWQ6@zQh@yq3uuEA=bFd;jzXsXovAUGYKZLa=wUClyO1T@QOZ#@ifN;K12Dc z*g@-X3Y3ae^J-b9Y3LLtAGv(hD!c8DAK7QWaJ{|x%8TukURJFK81?{~-@5A|n`YJc ze7arI?cEmbwFUZKoDtAd@+Tux->Yy-id(uQm|q03>cHf4d>~%vHvuLL4tbo9vGRHW zI;~#4%7ICAuMdv5E#TOoZ63PwfeSu2e$!X~VZiiV5n2)l>%)VE_t(p`I1 zA)N>?>At$od>#{Inm3g9Tlbg{0EJbYu4)U4TMENNEc_-K9}i5j=)|Tv=Xj4vK=N#x zgzZEiXq@n!b6pZOeK57nIg`{4`b#K9y5%q3oc78%NHQt-^Z)oY`}%FSS##F{x{K$-Vw>l8blZE*Jl&dEiYPQYZZkw8 zW)%W-cd{^c=nkY(w2 z7CKe&T9m_y0#MrJRkuP5T~CvzHKA539r%)0@|5@t^Ggjl?|7J&&jog0$eC+uiiYpd zRjZ1OG1(ZD<2?Y!XJ3+tK4>msPJoNtA)-EBkeXMF5oEM64+~|Xp#&VOmc<)5#D|CX zAFv2s)0lDv^z$Qs@*#WuWvuu?9+2u6+eM+o!QK0;14HG|ewM3>;`wk-8RU5YUc9t& zRWE&)*CAk%Z%ZDE5Rr0IdQ?XUuvJ-m75cC2>$Tf%zskc2U7HU>`5wB>Bnk3CPh;;C)9bBBYjijf-3~y>k{?PjaPm3 z?7*bs{Up6PW>b_(d~)I*)6wgN-t+n^Fu))TfTC3xW;8<@q0LYU6ahGL2-zU>hH_M! zl*#6mS<^+pgfq_Vd5=<$!VqCR04-Y-;WdF`=xnL6lO&auhoB@$s;Ww5lu}B~ewSbg z2uPiRR?KlA&Rw7YBOE4i%5lJ>cUxYT!a!+4j9O|%iyw0k(_3)!2Q{OL1z=ztx$!WuLd$YX1wNV_s-e{Qs8n2~>Z%IVbCWb-P(X5I_ahlD z09D|9V!*_Cx*xkXe{PBzfor#5&Pf&ROvf^ca3b&dc^JSD=5socYo5OK}9p*#mBHEf6l1fPdm?+dq-F8^8G@8zx@Yv9!-_ zz4d!|Bng`wJcwq@`tnGsR!w(wu+lV@kkCy8(_9q|uPCDEgI0mq%L)*)ctYKHNTQuA zRvlBqL;Ls_zR0BB$E}H+1h%@hp}gNn`TNDp32bLMq68)G1n|g#@pub7sq-zhI6$W!auqFt8o=>T$n@812)8AA6Eww;0h&p-b{yZ- z`j+b+*^UFIV3;(n!4-q60)XgawNsn(gKoYck-X3db@|j9%cMOc$5Hqz$MhI(65r1XQQv@qSYr1LXc1-gTROv#P zE)Ga#GAZj@lw_`wKmhoHAX2hSV7C!gT%IPrpT$j9k~BgUPFb;FV(C7D5Ya)2K~7Uz z+u7C1!>)*Rw`V+emDF4Xc7-t%qtv>7;UW`gNw&98y58Q^Mq5Xj;6{0^0FYiuKvY6n z#k!F-D?S+HVR9;))lwZ=v{s2&U;RM>LbyOc;yYG*W79fow3Y#u^>O^l5A%oe#MlC+ z`8+0to8|*190COm?%W(x1_9Cxd%5uQ0uvnwfQdKaEAFe~v`au5 zGqW4O)KG4+@+6tZ1CxhmTqGKlIr^$|PZ*fIQNcBwmAQsCsYuio^u|zN92u>nMK#P# zYG*DC)eN?A-Fn-#i$ywFM=eIqD2rlOzf>MxB*&4IhmLig9UWrbv=)2AE6!s9&gB-O z`c2PoY9%i^MdBUAv0LwXlCt03R){s*H^2QITf2It&5R9@+9Gs_!KM&XobX%>Pl%2> zFjQqE(PRmrd)_Fv1KW1nIG)gAlJt!*ij1)_c>XUxf1^E()gQyUjS!Jo2_L>~<3`&U zEm>fXhK}d0c6u+*a&5H z_AN|!erlX~UW=D{NHU5t{_+id82?`8NwNxI{cy_{C zJ6dcl3$O}6MYjN$prKR)`b^_Rx(Np4nP?a`3#rQc#l1hbZ-4(*yM_l+e)o>sZS$Ue zu*(7>NlA=D$rE#yltWk8lWRISsbAs?bcKLPUY~aFFF#1(V-mUUl#D4y%|JMSxbeub z^8yq7aF2;=Ynuy98cP9_R(@yzlVm~0QHYBw% zx;zHkfu#_ps$_^x4X|v-!GpuBBQ=J5o+D;1LBPs@3xl|~fJr67JO#9~%{DA=Wl>aS zQB&=coyt<8O)Q+`?KBKJK@v5GfNS8xa6o}F%Mf7)#mZ%ML~)~&D1pwdX6x%)Y`wjU zNx(8;jl{4#J1rLK3mDP~2}QibJOu<^UI>v!mQ2FpL2=da+_bQ%%PIE>0ZOu>z)%~b zEL~-(GsL3vJkO_$w-(|z0aTh9;0}0jav14qfJ%h~IQG;D0aGx~WPV_JwyM0}1u!Wb z)3B~Rgiu#F;vgvpPric5+BsH#U_Hn@p{g*&DG5Sy z$^l3fOAw`urOUJegmO!V0m{MshinPqI?+(`KyvW?4ptTTs~f&#gDh=OLfSoa zDd3RTNZ+3eFp0hzo4LRwx@mbL0h4l+nnW!63Yb)I;wNxk<4k`Q2X+>;DHZ z`AItQ*iGcq1)Udp^W1?c06;(eU2nOBg?O1rOR0BfSDHl*n{4624(nv0#023pSyc&2 zToqCn^$3j+Fl0S-rK1hbVq*s<1@56v(9x$9C@j5j+!)oNrI;T`! zg>=n?bsU)FjY)W9uCXZruTY*=jn=?vWrLju2JH5qKj4pM)_j|urAuJ|y2E@5O$!y{ zALB7`DPQ?PAy393@R(p^U&M7dSj5#mCYF05AuUCu-9ejcX|NS`bB~F~O$VVo9*D;n zKmu`tgG@s~coLb9-UU*NDx*FtZf1f zD62q`&AG|kx7<&y(S5ZAuTpjzVa6Jv(HP5;b~unQqlA?lF5!k+|X)?Aexj*#&Kb=_K)Qo_U^0+jac*ke6BK{W1hf2s;m z4i8XP{8wMR33)158`gMeV!|$8zRX^|as~ONVGrX~DbDc64Pf}iSwvEbQ@J52N-xFc z9#Wv9v%FKNrD2EH_#Wn#njx0JvGkp>mLZ3(VuyIpQIEG!PD;meit~k(q-gHz>~TeH zjMx$Wb3XJV#YNho6SKjlLfA7cfN6J>4^Oih(l&C*M5Hf09wK6u& zje9F96Q8AjvwWeDqjE&Ppes?3K|<~i^|N9Ho@;@;(*j9Z>)O@fm33gd!0;#|(>Tz^n1v<}Fu;F>mQsIh@Vlw(pqBv+z@KwRNq ze6=~iglzg)NR<&ak}*KYI!z>M^>Q8*NU9}@(S@Dh^b~K4_S!t>kDSv}d-CaR&JQbB zt&+VCpJ15qn|D|T&jBF~OE7myV_V<-cgVofUBUJJMn~?YTLyR4#g5%175+OH*1x&H z)JQW(9|T5>bP-oV&j$JU;98+Se*fjQw$21=)-ma{tH~0qC6#DW(i38HVGWWVGp+|| zMIvb!r^yC~3$|m|FeTX&EXK&BRgUvSA<9Cl=cZtiZaNmD8fg-1W)`bX5|9F%^pr`> zG4P46v`4E8Yb~vs!XbcTxJ_lK+@DF6Csz7_w*uhCz%R<{%Ft%OnP^?MPKe_vtl*aS;Vt{rrOXY)ycxChvuGLI=DT+qPr4PF} z1z?Jkgw;A>RlK=Kx{F0z1x$y%T1vFneTUo)VLo<`T5+;foyJ$NpzzXjYE&T>KxH0SoIw6#6t7R zExz}5&vTAWR(l$+x`zMv?{se|K)uUIo_bmx>squyu-zB!5z@j&|V^t=Ulx zVU&POs3>$S0AOj9&S~q{*-Otn)z0apNP@hi3V+5>_K)q_YulJWdBfk`fMM2&b*;s3 zSx=6bJM8KSbFBlZan!sR)FwMd3+e?GQl2=w!$cc8A6$PN< z8jT{KwH%P>rMy|7{+zVO{E^2MJOhj4pkIR@^ePX>t#v7j#<{vqY|uk_ z=e+nuJWUp_i+<|CfC8ovUbjHS`;msew6BR#&|}w!i2A(kLmy;4MLb7VwNP&wyBj6& znDWz&T(Wm|&L7Ez^KJu}oP@eBvZ;FuG>|KZPj9#)G}k<|7INw#QXdKqtn67FEJ#cn zw1is%8IMC>ilEhKou%ZH>(k9o@9^vO^{o`^yZ+%4L8JBRvJv!vOVr1d&vU-lVVXb6 zmwNl0Hx0E|#QBPI=K_<1viMVU=f*lluL7lGa!jB8%$sVREZdT4#Y;((Y$g;I6f*=$ zOYzn)IhP0m8Dp|qI7&$_&&L^?tnwtxf^B2g{3Ns7rvPO@n!hOVF5+&hawh>Xla!@~ zv1C+wS;2G@@slVjt@~B03y-{s9KfU*0TJ>>p=K(O$V+*(n25!|sF0$G@L;R*O|2PQ ziW|O&`4s}FDslHRo>7e7H72s$G)GCGq{R~Ts0>vXd6)%)E9NZ~B}Em7NhR3IVZ_5M zTQkjLj}MI4=)i>KDfUnZPhjGCreOR6U;-5Xqk&0X5HL*%mJkU0kuPgkl+@qFU(i8*% zQ;x!iEvz8EWC3g8QSmk~OxZG4_IK}j)V}iVTM2)$7%Y(r9stENm|kF~>hUo%lhP|* z7O#i+NujhMcgiDG>-?%!5|p#Wrh%F19p0I|4greJ@o<^wMBi0_pu>*ejsTPBg;J1W z(5oz1(NbFgCcNR8K<{xc27r(H(qH`-KD&44-*c7*iLL(P-kCnTCkJhGU03{~6X(dN zRVWcH@%})xL(qHa^2K(>3r`P#Ngble4h{gKua(Icd7eQh z&r(kwh+%Y2V+g$c!ymM1@eu3lsh7U<$8$*Vz;7Fv6uxdDY85^Xm{jJtb0=;ETjqw3WRU#*HSovz4bb9_k;p!^{_$L6|*Eghdvj7=b!jCzzLCBT$;i ztJ(bg>Iz1JBVf{6c1b25hFe%fwkfcXrBEs`nxs=JlS`qrHKzckF87|gS!+*9C77Uw zb&itqQgB%wF0gg)LiZ9V`5p*@20-?t04##XluocJF*C~h_Yb2dM)8`cOst3S8oeoFKaPXE5%9)XgFY;{omoTEw)1;al9lg@QP z<^0BVv|2gCM{$et0DO;^RbvmsgJmY;u3y>XrNKQbmfMO|tLz>g`gh?=Uuu`X=Cwib zfh>2frpe_>xW`cddT5-0Nzmjg&r9LLe`VHr+q!uxkN56I(N=AM3BlcqmhhO4JM88k z{*+J?59y<{c{#lOM=v?g77(Io~B8<>!I2PPHnC`#0~bP+m>NDL}YVPnk&6)5S|aYZshR4IpE{S{zB zE_{%JW;1x|K(KR~k9!~|N59vhaXe3_PvgAj_}btbo%@_TCU8L_+1Ntg{n0yWnG98P zB>WBsl)m&Awx+saz~V7U}^I$ zee`n}8+|9i(x1lfdQ5}{%z=s5KnRuO<~<5gMGHR38itpLQxhLDVOO5Kl%*cLs9d40 zdEIqywyR$KYOW^*ng?VsW;jpxPcR-4m`MM8p<)L-fgbnf5e~a|?6y|s>1p|&DHLV* z;wARUuYS{Rzxx3mb1`m7iYl&I-)onx>9Y(A&qis73@nv!>s~p$egUY88zxcOT7xb_ zb!+gTziAty%)A30QTW=G2i>F7A__YdU3$o%`hVQ!bmF@pWEXf#GAKmzr1HLON6(VP z991qpHu^?1ufJ+uqHe40sKSN@e0J|vzyd(2s?Uod_^j1f6mFc=ns$VnHQhlIT7B9B zzx&E0ig-Y4ExPQUFnK|qdtxOX$TuEI*}re!W&7!8o(PJfA$oFl2Jh(~Zu&c0*Spd} zEN7OXjQc0IexH!GK(A)~BLjB!*=JfO`eulSiR!vy3$a>=i}-s!&Q@j!rLz>1fQe^j z6$DHl`jCxdYzvsYbDZa=a*~3%fe~9L1lO3miC<1Ml&1t%8^sq|UsQUpkLBA+zB0MI zOpI}Eps8CvnVd0@fhQw>7DHh^VA8lB-Sj@#rE8^E8lR88DmC&S1x!D?u9j(Ho)GKZ zk*aZTMpl@1B~4{vRr)NC@z(+udlf4hTs`Ok0`JIRI02xXCd^slfR_KRvDSA4smL<}0F>-f` z)1ELeL96+;KpF9y6hCyF>oGlFIi{dkA^;}WgtVpM9eEB#pu+A-OhT#O1r{n6U~?U< z(RZ&&V5ToQ2L-8B5j2kC7p?f*+T6-R;;0T~uycf^-7DkRw*pKb`rrpVJgvtSC|s>& zd6a)hxc9GOi~yK|P#(Y~u&1!*;Qm7Z4HH_qzGCDl#_3Q0_9lCf1wYf%Bdm>AwRfF+ znk}WQEsf&Tv;}TSKa?L-u>{j9D7sLHPSuG5MOpE!l&TL?GW`rC%gt1~c3_yK;Tah& zT5nEZ;LowbvgT^~bIjHV-#K5?VYhDUM+lh3V_pQoZ=$zPilse`O*9gJ=+&jeEqDQ_ zj7i13k(On*OgR4u3n9k>!Z20kcWus905@S98nR0?ABkE){nB7ZJrYC zS)v?wQ;z>2ldQ*~Nr@O}jN+7${(k$w``%~&=}p&>Gn}v$J-zn8zkSaJ`WXwtF4GtQ z7o2|%a)QjWjG2mv8WZYOz@Yv(AF(!}KL`;cC&HxTNygyqf`$bzjdJkc zHZav#ib}u~MM*W0SL$7|ko-K4%z%$oK&5c5I6Z-$R$+rC3|BdBK48Mc=E%95Mj_Z4 zZe&A!Kl+{C>zm&Mn8Yg1BmB|7=j1UVtjFY-zI*qZYHgV)6A4MS(!hvf*f5Gb1@X1I zQzduf5B=jcmm@ie?q<3?-UH>RRpv6)(srIxs6xM`?4)=VvXT|9_x|_=(FMK`$Pmyv zGUgJnMo3SUXsSN)f)II59OX$^-bIuk$E!)=m1rJMf>`}9D~n%v{(1JHvi?c{~EGL6-zPU@!aA>pyQh_a3y&#E@Oa zRERg9vdXHg=a<0(R$+x6>!OEt##wPe23kMIWijZQc*1TDFGEH`8m~znG8}AAZrkbW z!Y!rb9M$A27f4C9DDT1?FZKWUEubg8ho1UX{NzXRsyRklWK`?$3BUw4nm?si_o{R; zS2%hR1r^GYv)ZFv5kgS&Y+7FF&{HmU%eh-BLCGXz0`h~oGybmFu zQ7S0&DhMH*_Y6Aa_I>+pfILhIaGHST8oLY6InO@%=YK(nw!*^$JMGE4?y+Z{+Tz7j ziojfS!Ff!186ii@R0HJ5dm){N-tr_oeNh3JJUo9K23~JJ>-W9mgC8XSM+lWXqzKkf zF9N1AH27@;Q&p9lnI_U7bAX8y2_(WpROk>r2wf7iN%=4plvVJoX5o|LJ*NNdfa#le zzPZ*+Iewc~2d2jYrQrEQD1Jsv9+O;c&4W=QOzY%T@R&-Jv6h%HQmHioN;9@?*C5LP zjZg?7?N0K!jk!fhMi1h7m<_-SldufQBViX&#e5a2_#z~qqm+XS%t}-w`I7qxs=S*j zz@>kZ0S`UE@&YF9HCb3#G8t91p|D=@vh(eX(>72(tS4BqY(9gfM=&TnAahxznnkZ> zs#-tZT@@uD3o}Q%iU6kOGWZhjN^)}_F&?7jyEsvbMF1yHQVLN^=I>=GrJvmWpvR$4 zv??!*pe3;=!&CzmnXtU$@VISf-j}ax%vk7IF#@25U`pDh4BR4iV4~uYr(|ZZptbO( zdrSzU)PPqbLSN;3MCZAAoW^zZs%!tcP4|=c;1fGJR?l1F^3S?SdC9xdIT93Eq+ZGo z{hn=WC^-GDw)^HU8UPqlTmAGvi8B6-r}#pf&i7sC^3~i5NPQt-(yLpAyw^H(0yur= zz{IseL~$8!lWAf$%!$hD=f9oT>nWs{bb6JCzy=O9HtV56EfOn6}CpfY8^Sx zVW9MCcsY9PMhL#tG>LG|r4$8gP@xqj0!N-Bk$X$H8L_ zzHz!EUaF4OgmrEDE?5grxvtp)F zabp*oN?sx?DWv6>ivaa74>x>>s^163$1Ml&kHG5*rdo8fBE%Q2|BS6(*~|0aXcsAl zdw+44jf@WSoTwJN^yM$LUCc$AA$+U*dPSaiNAdnT|7p&oR`7SAVwI0A`wuuUEm^e^ z5R#Ex=TzFI=>p0#{W@S$Uz~U8i1Qw=qKwiU%8Sldm8ar|IM;20A5*Zyip2}ODi4;! z8NdL7whCIS2trksiXtciRe^`MY~AU)uCK2TUS)tdwxI*m7V&;#hvb25%=u2gi5kCc zk4a<}1{!Wj)bXPUGuXfQ1U(XI9(5ekF%_|{%SRkZz&DEFK z#TTAq0|)o>c;x}>CdB8K1mP5g7$%rZJA!*Z%}Ntxz(~s!=^0NoRpTi9=n%>%(JpuI z>HSe)qFvlu!+9()@qjSO#X}@WPZXFizrBDX!ZTy?A?8H!*m$k7ArDo`L#uP^nKS`I z*F#zw5&Er14on^6F){oz^-|GLl5?xh?=H2uIbg6@I8N~!Axgym0 zPKe{rhJewGG{*4|<~t{Z|BOj zz4nR~Jo1GV&y(b1Rg9wrl1UPva0LEpFUAm$kkQ;Jx@} z{t{3LXf$d%UXO9nLazd$1`suXNnoZna%pda@@2ZVD)B~`WUSKe1uXF-??ea55Y}~r z@evu+R_P(ygaV2JPWVm8OU!1-{XBya@Qa~g>xULu=$dEtbrR6~FCX}zz3S>KDMFdF zHVn>tetsv9(%Ejy2#3Aw!t-tS&K;x@v{+9>o*ap5dUeNvMrcn-def@le@>D%5 zV=w?EWRIKcz37gWccgitf4=kGwPcJL&RAD1SltPfcml=f!4PsgH3*Pova99^RasLg z8p2Cr-p}w5%hC_B4E^pQlCXJpR)avnrx4a=+~<6A$Yz)*C?yc%#tIRJd2m3g4AO85EPHmZ=HfmQCI=)2 zJ`hv52M#K6Zw=-D1ty3fLL9#_pwA9WoPZLWZ)&h3+oD^cD8!ANdgLu<@Wm zSfN>7Bl0i_U=$@-eqDG5`Ct{GPGSKda8zfO?T(Fzj2i2&S3K%o$^JSZ8UjraOlVZkOmv$%US z0o{VtOoDOIkZQGVS@1`JNh2bKg91gdtBnXT*ABo-->WSGNpMELlKk_ zugjklxjqAdC}QN0@AF)*%;W3#W4S&|_2OoVMY7OgQtRwNi?oHAD9>?Xdz%y0Ys2v!bsvf(DuZENnLSS3uvu=N%tjiG+e&) z#dF+5Qt^exS^wtdn0`Gl1@zQyLjU;F_txUkU}mvq#y3&nlg3ICQ>zLN8yv5K&;)~5 zqdG4;QL@2<6C`%WSvxPsY7m48;B%ZH6gPP)!cPf87o`>!8O8e%Q+Xi*!WAZjCt!*) zC&WD_8c=2qQjO(?LajWz2eY8UbDbo3kk>nerVAK`FEnvhbCpz<=@SfL?hwFK9GLQ| zs@J{d3cKzNSKAhzW_kCWcak?_eird(O;(hfSj3vrvif=`nZk>KWi4Qu<1O($2lF=& z1gVQ4oTNB1G=SL87cj|Uo6lnsaC(l3;s;gbsd_KYTp7XX|EeBSpk%~yzq%c3$<6(p zF?cp$lKj;9xFG<3CgbD!Lc-BFr14(T!gee$!P$JTWsMZ#`+wPc6JXu4tG;i4cb@0_ z=J|E2>2`|-1Oj6aAb|kM3<9;#gg^*`4XIG1>^LED*{%pGsY=RmVv2y79YTc^1PWUM zY=psJYwA{uR*!G!H^2Mdo#**}`Tf>D=icw$d+zH=G=liNckcInXV~M~>%Z38Yp?Bm zQ4^+%rF5(}t5T!u1}%KVv?FZ| z%k5}WOEOXJd(M$HquB^L0!u%;9LsnYFhe?LWE;pN zoIA4g+h#r9mC1PsQnsz0fQED`nlJ7({#hgZ7e04Zz@ZbT%-%64SS>U6qXUOdDduI2 zx>e3@%#8yGjsldRrFJL%D%^x1;$L6Lf8x`UX|8gI?Ixxcrr}r5!hAjs5MSfjRRqpu zOoVEVzxXeIHl1bha*aCQym}>FKwv$OP%waX=*E>75XPB4pdIp&`9MiEU6^OxORBZA zJFUS-ejIJ-=^Hoj9v((;Mzf=Cf|G~S4J9y@T2xMR=WnB6_4(15GBAZXk78(@(0s4w z@PWW&2il$t#AoS&V50Ni+-oWvMKL1047+K=95NXQuDN2u!4 zI!74OET^ZO0Azi85C)y=Qgzb}djc@Dy}QvAd`{De^F>|Ek4aYwHiA+t*Px81D^QtU zFgV7X%*1Ex8`n>1)-$%Bzq9!zH@@#|;VBmpSS+_AhqiV+g)oR#nHS$<=7dLhZ;r9* zV&}sHxVpnL4Ct&W$+W}Cf=}ThdWh%Cxd(rHtTQGxYqm3vSBYzRjXCCz1K5uN%9}Vs z>2R_JIQ`?F_;1pOKJdOYJ9ZmW=rrd2JLw30u`)l)7CV3ppdS0Nk)L!Bp`ZA)4!!U{ zwabpa{`A-jm(z!T=4WDe?{(VQuxd_ue8B({C3exRG#g7=;J;O-Jn1VB4cjrdrJq)r zCg4wbJ%4)1$Pc8AeC1rr{A1ZX=HgAQ%RM!(t5_5%ci zli(d#5$F+byu{jqHo-)&<5&Zjwg9FrV)W@^uIVr;LhS@5U4bLOA%uXz4R8XyAjCBm zFs^WJE#bboiX$n`4)6H3x2E@e|M#Rz0Mozw{g1Nc26lJ|UhQ@(TtTfJW*P^v`i13+ zMpx5dfN^^0z2$uh$){+QhskSZ%KK{xOf=;V-SK1RkbtX?gMmq@Gr#5t@PGc4ww;Lf zN(o4QW_FYR3hc4{fSH;IG~(|(K=WYAPdebCKi*$cTfv=t2ub`X!1VfuA5L%khBu|- zhlW`bGno3>YTRuK)^SEz$KBi=nFR%1QEp65r2p>2Kbbyv;bM9R(Ivj?$WR&tBsNL2 zlf0c@a;=^k1Eq;Fl|q>=)vnEJ1xzwyVW9JU+a;UFop&$Dmr2zdih}=4E>bUb)X0Gd#D&K5R86r5#Vf zh!`#;9pE%YEJ}yE<15A(KjaBLECe;aogk}om5YeIu235P$j|rq4>0-HJ~BT(!`#o> z9$OB^IR^82oO_=n63V1IZliflv+erVeB~R{|MP$TDzPx{5`=y>1P6D{p16IJK3aq? z(Z5kQ%FR2Tz8j`p`X%Dp0h~Yo+rOQD^N&83TA6EFrfsmHe!&1!6Ai5cLwuZyQ79gg^)Wj@aov6_nm?(=4$r*cWOlFR;c}!{op{BtRmUVP9K4$=XWBm0|ZsYu!o_X`3XvpvU z-RsJYE`h`%$amV$8lZ(u0ZdQv>{Gn|Bw9`r+rZqpGaiw}AOF)oMYADf+T?i5#Vq4l zIdS_|>ZV?{tNcM43Ue}LRPzjgB{SEZ%_2Pf)1Ut7^qI?-QY(AZtgabshtJ*7k6T)pmBQKpFToNPCFo^3W^OAZd zE8uG&l}>GAw_wp?^%P!tE6lf@J&W@L<+bqyrtPCBuO(9UCy+*TsXM( z_-3Dc-9wvr+Q?|`)=lsA_|k3iZQ6NyH}Rodw4 zovCycVA_mlnq%%rg0q8V5CW3F4O%J@E-(mP^cnjxL3|F}%xKAm`uV)lwb^FE+h5O+ zl=feytQ@8X(=?sG9Iw1wh%B7YYV>8)iSlGGqKdv_z?Jh-yQ*bfa+PeHTtEJnWZ)H< zQ)ng9ocGAjqpIzMz1k|)0_Gv+R_A$lD-93zr;{g7remW=)49W^(wQ@7($g28Nx%28 zkEF?KSJE2>M$)6_PNXk$!C;ZDIYoASs?`J|F3J}nh5(U>#Nt??JeGGyc*2s62}_WT z!Or_sAQBXWHyIbRtj!W=Wa!dNH{jyjtMk6Z@nO8XHYm?=Nn6GVH1fn6z?XN*MeECi zML8JHoVuv%My{6Qcby-gH16ph&RArOS}y0p)XJpY*rEdyp3yhDiKW@&`-}^(`;7Gt zMn?H_I|TV{w}TuzFabE^%s*=9z(V?I2AJ?EL1Pk_CeyVNn5dT#9c*KX@BhERB%MJ6GHn=U z1}4~%Ot|u(ZDY_?-lXM=!O0+vZ~Q5lM#Z@twWq&GDfwWBQg=JGl4 zHNC0v$lz@&H5%8DtpgIqnVTjG_Y44~30n`ia0X03l7UwZHKcpF8e2Pj#ON*am^r&~Vefa* zC}RORG}c83^+?4o`17E31GHJ7#m40LwUvrdtC3#}Q|GjBTC-IxBy zfBw(Y^Ea+D|4pm)80Hq92@+Sdmt65-D%-nxi=KR0xH(QK@Kh!!vNze~^ zur;ZlreTHPDqwP_@Ju_Dfq!&vd}hI+Ej=SJ0mA3boPl@f{FQ0LEc0RR`Q%kzml$`h z`Q0l5Q>33?YWQ1y)d%ucL;2BH3{3y{pS`nw_yjSbdPtClZE4NBCJsVO14t2lbRZ*+ zikXp&9fIMEMT>koaFiaJSQNOm)|#fraU;LY4wHOdox|*b*{+F2gBzFv6B}xE;co7l zCV`2e1TYb=EP#o5DJWG>D8bKW$wQNFBf~a*@sUtBF&l2+K*2J2Y^xAAt?D9Lql|y& zcf2iq&-c6|{l`E0_v!IZeJb^{engi_`BbSb@rz!Nkobr?r_z%&4Gyk+!Q`dT)>Ji6}oc0q>zwJOCS%P}2K^UpK@03+=#12}pI>N#s!V!nxd9J9e6cmstAK!j}+ z^=P9kZpsJxrD5XCb+{lI9uUhucu#@AcDLM~G0#Niv0e(A7jQMdW+#X&H28ai0UVLZ z;XhY!c9=y|`Xn0DQ&}2_WmG`RsES;ES69Ot!6Kc-J@!=11moS=-<4 z<9lX`#dWR(Fhv7-fA?{qwS~kA8J<4Nv=nLc`n@!!(#geKYCrMe@2HAn2sT*Jt$gFBLFvX3c9d>`njH?gZ?eh#w)fweF9&pdg z2LjVU?`XKU(e82V3>co219QXxrJc(G9M^5SJZg8R#`Wy!39X{(3rq@dvD88Ht+q3m zXD3X!g-r+#!x-Li=Kk~*L*1!wWj5y*%%BmNxXf#s>SQE31NaFS!qh~+!)O$U)R>Gc z-Vxf9V|VmZJ9LT0jORg=buG-^kt5#sG8-6ii=GaV<{|CahjGD8CzPQ1h?g zDAqV1rKVPblatr3gEE?gZr?gi#H0_rfgEFeImVoa@vsdw+s_dGXB1ExgvqZF?ZNG7 zLbFpCaBh#^p|L`9&O?QnPCx=U^whI0+47jsKZ%;5>TQdgMd$v5~DJSsG%@J_T9_8%eQND}^|Z52TH8U0PI zUM4WDbSXD_CW04BT60_M(kZppJntqbRg^%+bO#JOvu5V~v{I1|`|ST@r;(3sx&UEh}e z_+x*VKKt0?M6+OVtpkkIqTCKjdcp4aLN?`LT!Bzw7-7oGofW_Yaj7xMcvRUJ8#E>* zV|iF5Fp)&bY>R$>Ngon^W=OAIV~R%4RoR#Kig&)mY_-K^dBYq5B%`*1~>Gb;gBTYIG1m;vwcA-#mOMJR5xyykHm@$sOoV=!U&_|dK0dS`&pXr?PU`2tko{6pJFhXT{O zA+=}=ccymB1sw#`WUQEx9hPu<5CBf#z1Ye7n%#B!aegYmi061Xn6>qGZYwlLzN;;j zAnh}{bi*~mjF(M$ma#ca0@}Ac@d*Ch{9L+3o}E4Y2->WtoVuOFsZQYZ|{n zoI*f-1FcWT6G3tv6I}}yqIMiIns}a>xAftE@eAqCKK&V@*z}TDN}Yge%$ZRy`!~-q zQ67GW#>D4VlWqIj0F#?cIX)Fh;*&n})A4VAKTl+Py+Y5PKEpG%BfLZF*Y+W0=Tp4O z!q?8VOvTju9>8SoRmUp@rmVW=6l*{9LvN{{I61%~!0y!2gQ<;(?)`v8KMP-VWo@Iu z1yaGrM#$hqvH2Op5@2f94nT*jv5rFzA6*wyeTe7~o9TrY?xx!}=F(l(J+00WE~KVg zZELKz*ab}F=^`~(bw}bfvN6w^A^H}<3A4Pwv@uJZJd4eqg{)z$jm8 zmy?y5?NUW`x z8Qg7l6Oh zUn($dVe4+gr4t(DK%0Sp_Bubf`)N%H4M|()hil{0>E_)jo^=thuD{G3!2neKHFpS1 z9-3KXqHeA3T%7=xCxtVk@EHfSjLnm)1>t#T&%tw5hgW49GD^)yHR$I__xoVO%H$sz05=OXoiUQOx)h#;7WGr?ke_@vvU8&8XjT#fYhZ8oC zfeArij>(~m)N2g~q}zb!pW`0B1pjJfzqU;@uoXf>ojQ3UOo%R3>A~!04%X8+eMU52 zK!}(kAnXh={Q3?ZX3h!lByZigb>wg}&Q<2v)I3U1DnCgqw56DP_s8DD4zo_wLoGCD zi{DLAW`BCfM`8@~UF(m+m+bVakO=Bq~YTo>Ar^!rNI#-r8f4Phky(lf+IJJ)AZV*BT>bj;58#v zMT-n8B5@j9(_AzdKKRTh8H{qBb9$OMURS5n3m3-I-J5JLVRUjp$KkaO04AbDG!Jyf z_R*_KWu)yifuVa@*y>9n=5-KmbWZK~xK`xly1d4_F)TnjeJWU?$xF zCa+0-6-Q35>P93L@0o@sBGZr=28|Vjw)f4D1R;({wVbW?Zww2ZoHKumvt6ZGh z39~mMxBa}tQUx6$7Fc}mer-;x2^cVmD3>99Y?l$tU;}_#FTmEv`vx6#en^c+9-=u= zFcNgl*EFrG9)tpiEgUpRx4I5dmaMjCe(Nx^4eB(`E&*fAqitd`o#LIR(3r-x=D3L< zZc?!z89+DUi@+~6#Vq0?4Ychn#O5Y4|#`YK5}mbk1Dh#uXxv;j57@s zk2ZcpS_ROOPs}rsY9>|Rw$Fh8XuwC+*|S5?xJmC3(LHi*_~crrb7_}e2$Sw9 zmM^GCl>_k!rv98Z{ zGWrjt7N7(sq-9KYO-w*;*4xwg_-eXHD5I;F#&L*P1elOQP{G&SbE%j4EdsgL31PIR z?UaJLaE!ppakc zn9#-5u>jBi3`{Q{n520@WgD32YZ+fK_zI$nOAfl?q;rBk{<;E_f|_+zUwzre|=2zMZ_p<7xDi9FhR+(l5Bi83{nK2<+H!_EBa zM1oj^*2wMy$DL@B%pOw0!g@&aNzWmQA6#qt3#QU7Q^ zF2+}X#dm*k`fvafpH+tq$U|k{zG`5CPv(4!{G(KU$2Z`w12Db!`@XBbxjM(h-+DTJ zUw=Ar|44F8)dK4tHt?I&e185UJH-;2p_}kQiB;6=3^38$T{7Dk46cw5iBXM75kO#4 zEdxh*3R5j!Eo&|5=Czr0?eawW?Bkaq+GYj=ClU%aE4IC0atmp49d|nfod71a9b@Y0 zULL@tDG@Qrxh13O1~6hKTf>dqd}<+q5edwxXe6_VScz8*fgn zLnf}qq(;yhj!33ajL+hm0HSlkB+d}m*sG|CIg?>REg3<4iTM}>&3t4)Dodov%k~$L zBTdR8qwi*IPrO1Kn$sL$rD?1i!Egj;3XO<|7~ew(pPR-Lq`ZdqnL#sJ=3HS75=o(n zp{;OQj&lA=^R>W!`TCQ`Q(D`}EROTEb+v z!d_?P9%q@Jiq5t3_$olT1%}?P0#-YGqoli{A^0eJDXPBnGb2_!7Cdn0h27t*AoNF6~LrWA^qVDx zDeo^{V+xvTH06EpN8evxSscgf1P_ic8cB!Gu*ETOa{KliA&A(di~y;}NBf8g*2k)L z;)0<~wYA`Yz)q4b%yX-AJ0!x#7iPTBn5d}CY#AVFCe~h!9d8%sHd*JilK$hLJVjW3 zHe#Evr+GA|EhM-Wf+aRF-&ALBsEDHnv1MRF9oHOznr{3t_lSy<4i=cuxntf+Q`fhC z^PAJVzxR96M**hG&pjW(Xj>_VyvnCDC{bFj2~5%cJ3aNH--7Y;)N2Av46xFaCn1-> zWCuEEJPqRN_Vi)Oqg|qW#l2SzOyo%>k#D2M6mv`tt^-<2VlD@t?_17oU^?g>o;m0! z8H8zi?E0)K#JzyYHY(EDo8Kz|CN~3&?BFgs?rgxM^&pjDMu-{amrlZH-+uUTI)Qn& zzj-ws>LHkFR*Wc1E=9B^-*vy{wMq}{MjMnY?JOu`Xl32t`y^Pkqf~iW7 z60BrWItrME*(sLAIHl)et9As8E$-iBUgi#gW@|8fcgAgD;@0la+~r|=8Tp~4xrhYL zIP@7!ZH74#$eiOc7w5w)hf@GY*zRKyaPG%6c6@~GaPXL$XMdz+=5l5MuLS@{Q;tz2 zoWBxK6}WmShXR+;Bh;i^C>lZqWiq1sCYFS%~EcTtcIqAY?O{|-7#Cwipu5iT_T-1V*v z#v8V0oUay`3g?H&F~HIE9dBq%(1&wO(o<>53t);qipQcXxd!qU-}8D|tNiIDfeA{= z_apttg*yiyugH`aGM+i+qxQicdp}#tv*4_MGo5~5D2<*vgetz7u3nvCYv?5wrLCtn zI$w?H%$ebI7T5A4hgkPOqzDXg1XRV2Oee$kNs+SvC(wn?$3w>qk^Wgy4u~P;5x(f6 zfndj$(>1*HZnMUTy=GGrtE^iGu#xwfj1mzqLY!_P5L;D4oXuQw!K~g-<`)x>n@CVg z%;xay*L?LG)AzpjyVLLen~yNZbOjF_CO=8e>XIUFl;A{h?nOjAzyHv5kQ?r=8ZZS@ zVZc=#Ix18hlBTtFpbAVfJJTsmdD~od&&xhj4nC>b-(Oh3B%SU*@~ZoM-7;Hw>u)>c zKlxtcU&CXm-8;8{^3SG$8JP4Yjksq-w7!pd!griFk$Pt*QeWG8I)c{F&2t{PolKrw z_#d|Sd?T9I>oQT+@v{F=enw^~B(6r7D;MCpw@_?h zBY?<|VP1Ocxy^=S^ENT{#@M36MY&x>KX=|Xn+pK}#=dUlwuVbVa`7@~ZuhHZ6@*lu z^=u=}KIV6Oae8oHsN*App)IYkQ|~lw;-YX3QO$t1aq~3U$vCt2$C#(QYDxlkUdB>;aNtK5Hlt630G{uwU*mzZ@ zJT)fHOreph1&ZJHNhkH+JIAEpZr(db)PkRIMm46O$(?Iut=gcW*94d%>m43dd+&eo z&Uzc$Ui1vq)3I}X>CmYoY=l+A&3iuGzP6lZ#xYai5!XUP^z_uyiQ_%#JPUGLmm0`0c58$flzYNp-~gjCO`Cw(W^bBfW&7(NelWfF zUGHGG%0EuC;}dw4KwJ>AKx24zuW;=M2h>~zzMe=C6rQ;B99%w(OI30}B;msok9 zz@xA;bz*uuVn`xlknK1>kTQ~Kj+IBl%e1TmM55a#BeKEyVYYC7K+;8H3a>m~cOXe{ zjix*aDV!e|yf3CP$!HHUmmLgDQi=piYs(z%3(_fpX%~&hlf^KxeWPx?Z@!${46Jx= z??(W^-iu|LyzfyCs$HMCh{cn|hn=0-&hG18g|1}GRmTo6S!SNF?0n>UR%HZvhe-)q zfF9QA+EQZ*7h?LS#@6f)v2*Qr;VRxTaT_n!&2)??GK($bKQ-5lWkJ&n+bCsi)}ah*+YrmjEM#kb}R_Owzc_% z$1CO6F(QB*U~1L@X)nk?TNEh{gSM9QisdyaX5Tx-^HCw*HorI(3`}jC93ddemd6U? zj(a=K54iBtfk(GviNC3Tw23$J??@`XqZPx4 zUK3!7c4z>m_x)Gzs2lB}Z?GjDAvDpz2%G%jqJ8zse7b&RIn7QHq>(Mux1d}|6Z>U7 z4dB{6I?|a2*gpN}p{{fYP3I`Thx?5`h4i#Z$dVR3Y;faVM_u0nm6ShPn_0iWMC9De z7L%hb>B^0z^wh<>Y|B24T^fduhhF~(k{~L7J1*$Rkjx(~aT0fqZ5o?k00O)2u{LHl zHy8;)DF@efY~8(>4f|SIU5|#=jJnq1DU;y6O~l&>V?s@u@gk}wqWrAgwD)jec(`Mv ztL#;o1*YHo4+L=~-!^_9I(!Jxc?IxYLZYL!LqZi4^=9*=?4;ur7htxt(tHv7_5vo& zJA_-hIYD@SvT`8_06OeJm3ZP3nH+H)Z6KD^@dG7{fyDas^+|I+&r?_8r~|#wH+9uHq5yCd?l_~W7mCPm0CA5?tt9TyjY1a!acRR1kch?BHF6sQy4H_1guDsNhPQEiSR$P*0L6voMs(L9 zYLWU(@%<@)YzrZBi-D_|DO#6|qK&DafEB=p5Nkw=Fi+AxFbE&sb?m-U(4$Y7F6|S^ z?|P&7LI2pd_FJ6f-|#l~xX&{?|12L|gUdbeL)XL|#(`!N?N9;3(>C%og$KvGr-u<^ zo+BjD98oXMojnWBW`Q+zlLzk{J2lC`RDG|FG`$nW=bNARsAVK?8<*o@C%>I7FOs8Brm)@k$A8alYR}C$SvlwHUPTpw+~a6UMRlRa4@O?Nmwlm zoOaj#;04&jd=~rY;`+)!0Sp53sq-d40g1-*dJMiYU@~wl`35j`M|ANFOq~HtqYc2+ zhUREA06Sin)eKJAv(W&J2F`3_QhbSyTEKKL1~Ae$)tJbJj&1-Zh*i71_kE|5xzcX&V#NRqMwhdS)^>Z1HkeKZCSHqP*rucSk_q^) zATY^rIYU}inx=iDX=3D4Q;DwPP~xU%Sl|t<+EzRddkhOg3lX;mnMe7$BgfJgBVaT! zA9#o;`n`C#cF4txBi#ix8OvIy2droAjkpJ7f&dehVwTUYR@r9qmc zxR0><1>U#VvX<=q&fA ztt%~MVA4Cf1SS|LZ5!=tMWPO{bbvH~iA#O7@pI1uOsvN`clJzp8EWPgVs9RLVC3V_p$30j-fp)aoDp@88d+n@C-BZ8t#DKh#Ce*qv{$ zvW*2ha89u1XjL;342R0OfL5fZ?-@WgoF+UNEb+p+SwT0zvp3#bGpZpkEdTUAot(U>S*^x8qd zq;_Q+2~w!;GAkyJ{j4_SIZzJJn2LcUCE6y7M5tC{(ql@daj&-f!qu1%bOiRQ1L^za z0~6`dA)4?!GRUwf5)*AJ+VCiC|K?*S(if@iVAdOCTnzSNu10H$=Y1zmnUKPWU?eEf z|6Y%_=bp4(NW;p+c(F&GE$*M92P zez0zsja33oHW9+Y*g1gXkT_;ml^|7 zY00y)ykkPlOtZaY;NW?C&iGq0aaPXk@wD0Sx-ij$>nDrGy%w2Uef!BdHrO!@y zZJ!Iw|);PP5_fJ%NecLfh~L8a*`1TpFgl5}1@$NpHW#1aZ-3RluaQs+P5* zF)4k*3S=IViS^{Wb4-mIlhU6cX00^;H~=)I+E4cerreqhRV54B)~1do+GENKs;g)_ z67aoBVA@WrF_-+kx@-;4@ox0EgWAjSr{R9%R|!m}SaFmKj`!c9F0TNX$~nJnU?SmI zMo|FMQ5fx8Sm1b`F|);j&py`djrKDaS=jQzgu*+HD49Sobc&MMK7AZbTmTac^T5OB zsJ6^o2ZGR?=%ec}=(G39ASG_tr(5lZ(p;>y)ns2Frc zcSUHV24ISHN&r)U75SE?JU`cDI?jK2&um_*7X`97<|xL7(xeAZXuKr&Ff= zihv2lC+Cz~z4pKU&flr8vyNbSnJti6uGY3`7^`w$sXb2pME@8KPRj+`zt?NGFCU z0bPjTK0I838 zt$sG~>%+vS7f+1_us_}GtNO@|Uv=-L0MiJ-q^II0^I}E=YoT09Gp^^dWS+rztm6S1 zlO_}iKw#1iD?{A|CT^6f_dZ}^P|2J;c4K#;ac@sH@zZzic~HE7q*o10nZZ@&^$LK=`8-+m&Ol^D(4MU@`F0#& zj?;W^K6xsg;#!SK|6azyAq2m6Cn;%HfeDrn02HH%%Tlm+Wg@v%vXD{^+On<6Pb1DN zK-fYGT{3PpCu1&LW}Oo8jwv4&4`^eK2;AcU)a=Y0T25;^f8qo8|FyIBWo5|(XG!5@spuFn{%3gPV$#R73 zL;;sEZ{Nf&cb!1GD=^s_&KPQe3L={Seb9O5>}Vn|!KX0gHOq58pNH*3*L0B@BJ*j**4{!J59_(oIz;J0IrHUg@9zc?KuLNVC>$B z;zJC6mXx{Y@$H{G<6-;Kt}h2^OmapK>8kcKAN>$>@UDYkk0Z=-ZWmrdW160>ryEz7 z((Ri_B*eU1-C*M}Ja`PfFCoGDIUxjon4G{A0L2eKiQbkvHT!K6VHpoS-sTy=hO-kB zl?`dQdXkKZS{KH-`BiO;m|#epkSG!rIY2v2PFnK21g4rOE|Jlg?i}jQgK`xB(67Mc*f9l9x;TIGH@9altIx$hJHxm6^lOg9otaalN1AFR0?`;i zbd?3PQ#dj-qtSN} zEUm3!4h-5vi1M0DJ~kJ`RHzGfADY?dKp*+^;O5TwqdnHKjyTFqSfMLScrZ^tH*~b3 zKl1kin4oI`m&*Y_^W@>&lEAb~owhW23r?gccv%ihyGog+@R+?gJ&~5JFdCZ?Cu9@? zlll1kDwZGTq*`ge3{3aXP z1T9EQ@&|Z>3+@G`(3s9K#{@8i$06-gbsVTM@vu=kVzfusXPT1^?{&O#V9Mo-zWbNI z_x*KOQfm^j+)Bi*IhrG6+pU``>GrKvoD&Rb4}(NXsbdy7b-FJ!ki}K8XA){+X^@dLa_?o`l}I<;W}WYDNoz4 z3Fz)=N2p0Px1)zJp{zA<6Nd>bTBHwXlC~zjf!5wM(lL+*oBJpiP7hpj7fKso|B)Mi z|8)igMCCxNa(+k*{8mhPBS?i?rN%^=DQ0+EZLt{88P(9|U|yPhY7E*|9150bdqQWX zTWlF$IzL!A7f(ivD~XuAB|tSHKcx-barL(8>SkrTWdrDyEG3+*F=6&_9)Tl(D!w)T z@g1cM>$UgT0VeO5m(CA@(?P)GbLL%|xCGVQ&-psHDvzj7<(+&n*Nvh&sBIYQY8zA@ z-m^ZQyALJxd(AN^=rl_H=3^=cmCwPRSJP+aM)O1C*yXw2LAah%45+U^aXcNizmeDv z^>$#+!c-1@n`Q`4@kxu?rDW!OkK@bQMOjGC@8PJ!vsT+S_S0`!6P7-5`;_%|lrBL3 zw^*MvMmR6Sf3@JXN)T{p!5C$2SS|=DMChJ%gi=jK$5;^C$vAALB91M`vgIt76bN{c zel)Fl`UR#=eOuI|DUYT%ogH+*kPZd`f&r{BJ>1vjSzr>p6+FWcfp_c^9UP3C7a9}5 zL|0{C0;saLp4tJqQExS)IV9SPz(4A} z-FWN4$LOgRO)>%#bzMb}(2ZVTG8~f5BIOa)<&Ev5;p_G_zeJzxy0ZNvXvp$YrpD7w z4`AZu0sKLDyUCMBQdMp8l*{`y2H|feDw?k&z5c z8_Y2g93|=-lgXGXo7|%Z0CG-FZAm>!;4vceGHrehk35688VNc~chUR&iU09q+eD`} zQ%e6zT3OmsgN3m{oaQM*aczl*zpIaffQe#~N4P1I<{VySf|BONJd$#Gs?yvZFi^4i zmh)ZK>E$202Xao^f#^W4I>wS(2P z-8Gj_F08N=fyMGfOxTpRUR!8_GukkJcF&!aKv{BZuu1L8-W6@viBChtLe@M@1-$W@6zP#v*snc&w5Htsp zRGmGPmWj}S)-=Jd>I?!?HIr2mavlf3Llb=2!5UDi@PVwF6`K}iql}PtJa3=L1 z>PgREx&VQ~_#Jr=Q9Ikd542^isfW3yRy_8?l`p!VOm-bKIQdl_uLPI`q1Y=2bAoes zs&!j{lHO1)lwHlw=;F(0RM#ix8h|M?uWh2WNo*mWjd=tIM=^NpTqUMfVxGw)1DI%d ziBVv(IPSpabgJc$P_2casDt)$Bi_ovJ#Q$7PDq z!bP6>qdJE&W$pYnINg}X4ZzaOwNBc-9aGsOgM;boPM%0Tgdp)j)p=m4|RlaJ|um@v+!fNm}cL ziT9(8JKwd2$xKsM1a^)EsA|RtpbDjE(zBR))sA!AzGa*|`iS<>4Wnl=p5U#n#c^s) z!Bl`#XAwjk;F7XJstyRcimyAzL=$P!3nmtHZfRvTfGf;*jq=~Znd27oXj^DYbu}jV zjT)1W2bl0hu7fQo-NDi58fyy|nMPQn{ER-HR9J)WFm_g0AG8KADIBOVIc`WM4%?zU z;#!oayt4BteKkC~bAxYLaBQGsEA3&@vcLGk4+Lf}dA6gq8IM$c+XX-zJ2Vmktvp3D za@CF(_*_6TC) zAHExtkw_~JR0nWKg#e3i6VZufUI9!}kxiyb2Q_N4i=GO(jONOp8IS_U18qHog~86v>TtK@s9wiZ(L=?h zR|`zFW`F}=Qd{zDwLM@AtBgW5rbb|rsRx~JGg3(v8iw%lprff`fRw`rae($#(%aFP zoI7-dhnh=Jf26VNPcX?cjmA3yLbb-EU7kWz99jDDy$qi85#{j=sfe<_lEbW`u9220 zRAyczEf5}XpSH}cSUlSBArtf{2Wk1)cr6}i{3HEQ78M|j4u)+EEw}?|zIl8yeFf`v zzV75n?4Z*GA)Zc;oIVbNaf(;=`O-BevsC-9INT9rvWrSqi)C6?SPSX4bEDNV4TcNB*TqdiQp7t{IUN5V{~Hr*xYLW>4_h%yM2Tv8^LJ!4BS8VLz71@ws&U>O^d%e)sMS>mlrLGz2U3YGS-hUU?*osful@Qr zrl0%eUjUfqF_Ga0kMo6H4eSC=9k}dfaBAN|gK+`W%Q#*sFe!01p)nb|%-uFiVA6i< zpk5Q07$`R;=eISc(ym`J$oxq?U=ZcC3{WzD+QyS+@l$4{hUA=z?z*<#fg0226_|2k zItYVdQt#Yz^d{wT(7dd}I`PjVgK&Pk@q50q=kEb^3iD?vsqTFRP?y#`qSSmXe{EMH z?(Y2A(u-8^mih7Ux&j#V+ktth9ftV^%({OIx9sH^)?Bc0f7FdTpy z!h|fTHE%(eG+6)+Ujra%4Lo&;5W&3BGHa8{@h?3KNcMRNOzY5B2aAF4vgzRkLXWYU zB{eg@tGl>dAxOp-IpzD#i_c&`yo#sfP0G1WJvM0%fvGjj>i`~gaO(@_|?2D3W_TKk(%Vj-wofdHWDBcf#XxS+X_g%--j^;EvG(eC7#xpe5#P6y@ zni7~UUb-A{3I!(nY=e1hwW+GNtY^tn_NFNy{-prZr=IwMdJEv9v@J-;^qsV>V}D&) zWtUV;dKa&uGLKC&$26Z#9Un*!oIis780`uTsslu=<4{0Mp0q&dqRW@Zn0ULMCT<~F zt-6A{zrRMjgl%smiHNG1g^`J}Gm4&Cr%;ImeOlZ0l^2GfE30+@V~wN6af z&f}K)p%1<%z2z-`EBzn8^mEKLt)$tNIYO-vIf68|Fb}wRu8&CO1D*Y;r|!1vGNb*+ zL5%Me0TTot1i>7Wo`(X{76TwOCflO`rWJtcI>2;$YL4f!qe)3cB`8_aAn=lqg4k(Y z2Rcoo&D^qUnh=Nte>En5YkRIf7}dK5$Ierx`*P-(qK9}#5HdeMOM)*4<4)?Y@SB-% zK&ko5zP}FqU{Cw*WHV^>d(N!{p!yb#?7w>-GBcS(0ZjCzz@&+`6JR<582k+uI=+FO zTNkJ9;8d^?z{D!^tT~YW%K;|M^G*qf>s13=j8&Jw17}fH*l;v z4o2?D?XmxEbqRu;Lh#u8hQF}Ka~+WONt+blo#{LOUX)AMQm zPF`07{6tG@P7;_pm}AmD9a@uM*Z@pUq~=<|C}83-xg>!^&)SiiPb5E1K+4KAV?$ZN zl=p%6KAOJa>%S`f(y#vtarf$JYIz1}O05a=fKC7qLodKI+&z%`>;118FsUijkkCS7 zLQ8W|vTpR+k8Kk}tgK?@)08(hHLrRBe{+D?L%I9&$IkaK4e(Qkf(%SLnCKbB>-3}H z{B7ekHKx}Rn5av*EelNK>#^GU^Dp(1ttWyRcPOZtqwnm6dFY{9%{`n^(U`gPoI4DQW{2U8t5Hh{H&rm)zUEU zv93L&>`u&OJvc(N!}Ogmo+7N!B#U+L0&FwPDH-=pgNF_hYDp(CH7((NxQV88_V92z zI@AYaMQb;jIPK!@*Md+)swXd*lui%2XS>hW0`A|FnCBMRjTQ=qe&7Lo)7%#{PG`{6 zX$iWnVU8oPU%J8b&)mL)Q!K&9G|`paYiIM{*oD{z(+bWPSDC+l4kwQ-=3usnr>WV{ zV97oRTk>IYqp>jmZwo5WfwWMb(u4D-RY%ZI6`_<5TO`V}>kLrh1~tumFDH9(();F|n#8A?0%FO_Nn;x=86Bts?KSf)vC`VjUR{kM^@1ppD9| z@QxlWPOP~VayQ6aBNpCLy70_mdiLq5bo<66nYOdgwwD2kv}GYk!`OFBb3&N-=0JE# zVR}Zb;5ii96$3%@g`lO%y{Xxc=coq~fQ;AQc@GuMiHmrky#L*grnkK5Ytp~^wSSem z*pO{ygIzRfKcEA=pc@mtAa577cSS7Q{seLxuVo& z_n<=aEXFjL{uYypMi-w*zxOAfKssgNBm;hgd4)BIe|>2I=~E)i+M*h$-cY0vgs)^B zo{cbYHL-Po=_bIW#^mBHy(9zB7%~3aCT2<@5jPTr_q<@Z{!+5^T{~ZQ@#uW=*?X8q zPRBHNAEorCL@Y?o%uI*-dM6CP_9)+Me9K~J(os|Ly#OYz*`_jK0kZ1Y2Ch9fb8qaq zSp3w~xxdtwD$|IM`F?!HhkcRHxVZNZ<%`E8Fi%p7kK(!T@bqetsh=Cng-EN+dIU{s zvTDLj{0KC526Oc&4EsD<$IPAE>8l<*kEH<301aNbUT;hpZ$w!r(sXQC3MtN(vd#ST zSN2jPKd#AWN;lgmXHmw+4=o>>?=om z+4uIX7>BuFrY#EB@zC z8^X5m*nWNc3-`)_;&VR4AHT_yf(H05U$Gt4=;Rf!_wA$h*yBHfrzIT?Av?cgqhx78 z(*}(Ln5?=khsm}@7VNpmwzmgw8vzNmSl4g8btIxTA}kn#&K+g1-&#+9{^VGC{+U|@ z8^g*WR1lQ%aSAI;iGZ*ruy^m%v2Y=C<$PqWSxd`A69$R`Mw43HH6i z1janvSqLpqZJ+^$K(i;0{8NpopN#@91u*HAXT+A!Sm{$a(!PW8TlcD?lQwS5D+?FV zM9~JmW3%wL`1HGZ&_@1?>~fyg-Tu^~;ypJxl`ptnQztOju9ipb(~tk%db4q>TxUdQ z#6eCD6bFRb3YZ3Sss%8uETMJKXfl4^G19p1&z7x)$<-DZY#mKxcCH!Ekcsr<6IasY zo#nK}d{Yy1QEkkn_%8iOLEJKV4FcB_FAvnHAo5P0C$ONTjONzfuB483fhOT(t1jcM z7+y7pt+P;2bHRtc?|tbTzV>U<&;Qad0)7xF;APCc4c0cz;|wvkGK;jeLMpX%vimSG zzK{^zVgsPmR#zSj z{;I?DBqRr%htjWZryZDeT&t1?*zPwaKg(GHlTx*G5d>=a`B!zQT^)2(m(F|+xiNl#+ULB^a%L7RG}TAuFgE?0O`N}*ikg6 z`83GAV=#9bFTMH8zBnx+;5ES{+o5~w98$4u#8~rR8HUJVC@4iK$dIOsFgjW@?AH#! zPOZs7?Gew#x1B%UQIpHSBm>N+Qj+_lu_6{m-<1We5!W5l6ej>KJ=oK(&Tg%`Y zMc2=`JAm^XgSdY?8Lo9<5zTO(NaPBY8%TSS(Fzt6ouWpO>{k&ueco&3;rQ{~U*hr_ zpOjCx@mT~-O?y78kRvEBaL$oYwwy3;%JPV27^mB&-|8+H z1aoe}j6cTOryE#)>Rml(QA`1VKnjgWN13tGPSQy@LdeKzgM89Ye0MBaraULbH-1Xz z3K-=z9;i5^y?n}?^Ru)DZ4H|+5$ct}*nAW`6b`C@X}tud-2J=q^4-rjFy&EfU2O}q zi$77H_*GoX_q-w~K@hejzw-FYKdhs)pW7Jio6BlU0+Vl*NA2;?{&;-@la~=1WYFd* z@xT`$20Dn(mZlyEygkj&E~PtTlbA5p)6t`Y>Dci>I&hu=!n_njAb{w4E&y$8;sJDH zKK=P;uB5A%CSlfq2-{J#x4M-B9Z7FMHX#YF$%Mmw6Czs2`wvYhG#NhSBZ8!XWu2pe zih?k?jMV9*KEOl>pmj_n?|aX8r?32~H>6+t^wUTWQpq36ZkCn3h)AQi51~ zt|K{j=uoT=7$A1p0D(2t{L}&&2Gp<4eA+6WhHU>l3lQQ2QfKWGk(6KEpF!8^^| zQYX*o;N$b||KwqLENN`ixv#`!4g7Q+Q3qj@X4o2sE#LVklh%o29s!oXY2=GzmCT(1 zE-*Ed|15NV69SGT4COz#ng~= z8P6IM{ewmtz!as+={EkgJQDK9i81exrWNBJr9-XrS3dsT<)*77K}?zBn6b7fNb zQoe5*8I*F5`%H|$;#190LBnfmOty*9h60$%1WBXzXOH~@!pvdf!mQQ=5V*OUoy{Kr zM-&Ii)=kx7s?yW*}ap!H2|rtw=#1hQRBv(uE*a7IXH{KY(glpJ$R z0@Ej%V=7E}cscC?Cd@Vwse(ZuFvY;6PBnpv@*NCJm?&lNoKzw@Fq;r^*ZX(xA%hQK z;`wc>9tJ0VxMf0yZe5W*7NQ_HJ$fYfqR^=7Xy$3B$zgp(dmUz#@YgD!R6)u}B#-i`MA_LRBBLJ+*;AuzA9^C+wpyt>J8jo7+{uAZq`3yz{FvTMR6Lf`}^b{J# z9E0yBQ7SIrKyZq^qXZ?HmwA`qq`4^;p29%1P;et?Ljkh`O=JvBaSDz4KGrOqW)WzO z@^&hiK;vozRxa9*x`h?Sx>>NIJjEA2q7|5z>S8_A9BU^DxkVkNWd*fm+k!FVocFRI zC1bNL@T(2R*BEn6Q!MyzX5CJmc3S1RHTp<_(=9u6GBT*G+hwqMTB2N~ig6 zjWTUeKl|4{@sNZY8F$pfcDIdeUmnQgl=p12sA;^q`;Q56yL>Oxq-~wGQw7dgqea@L z=UB6;+W>hNCU>f_5^FF66LsWT)nR#@W4d^8FEADJjTJ9N{VJ||$$3Zbd-ylL^V{#c zda$_Wa{^PYU#>4d^0U04mv-zKxFO{J?Z+(=Wbg*be;FFpM5iS)V$MqunzfSqlfuth>sS`yu;6MH%yOHU~SwOq3}>EB=3%U$?+7EoErNM6PRK}r5>#dO-m?tP+`h>kKwasP z`_85j!uhlS1|60kT2(eK56#%6#+2KWdp7EJ4f1y#p%wwndCYDGsg>a?$U2|ow7;6F z0_{AWgcvt4*`vh+LbnN-=;<3|-Vf^tJYj1wS>Ko4%e@g2?{N$ZXWqi`MjuPjN zHBpACOW_x0tuss;>e3D{^#(9);9;lPxANfffxtuu+OPW#(mEhD@{`+9PV_7Q zI+4T|ms!KaZeuG;Fksv_7bj-W8dzh1_lt~aV4yvnJv*GfdU?mv-+j}5X_`wJ*A zxM$M~?$F+XXI^jnW;%7aBR%xM;dI~mA?)cBltHHpi2{It7+WzXHrH8rx5k>R@uoD* zmLHz;Xilq3t^;u{EOe^yD0w-+q_!kD>0Y8Hq9l0`FhS`6lXF8^V+saON6PfwtS{z1 ziVocS;WLgRnY~O)6+W03X;mG17*!n(5a;d8Tc-!h9FgZ?!d?qP-TNZMSC%`#p|~IQ zE$XuO6@N#WD{jOPkm@Ur%KVGx)O)S7gTfKP*B3pqv&3%+xV4A{I zXC#Ok8s1VN$kiyX@55B^BxKGR;2Rw|3=e1}O8G*%%RKcA0>heSNCXR|cyZ4@Ma=Vf5NgVEk!>e!9vTzv zZ-feIrt;wO=M$K8UE?obj=K4Pre52}LmG6*!>Edho#`>Aw4!v*|`kVXjA5)?L-8;n!c7PoMty3+#r=JQnj*ow!x&1=!wIPXi-u z>Bz}}bo}&i8XE0mo`=c4HbV6PR<5ddO_i<&?YNrHOx4q89>1O*NBg=nwuI(j=ds5U z6Rb^L4A_2xM6#{>Rts}N{Hp^tIzVhP|J2>OkskS?W9h+jgK3yudb>L@ueC9;iiuB0 z3Z*x9d2GTfaFcE1r-%=D;mL_KbDIWUCZHqhxLVYrBnF5-YfPmnFT0D^h$zv{gzC`n zNLocQ-hj}%l)iu>y`y~BwIBx1BsT;X$5oa&Jfu+V>iu}vlwNf* zEg42=rMy>l#4`o)RNXJH1=EIN{LEf-I-X?UQqu8S*uN>WW|Z*iQ-b64ckGI5nfDwv zkN3+X6E7DzWUAitns4gqr)#-+$%LwpT*j(xpJ9pm%`k_`~DKVOuf#0Q6CJWX?{5kp?ZAD;Gs0YJprk`uRT3|`C_{7=xBP| zmpqcDZjQxbqF^+%g$vV-+}ws>B$Lq%I0Lk8j>y8C!wP1$3{1q@TbhG0_2Dwy5AA1% zmU7Trmi>nbdez$P1|}JR>l7DZ(06C%0bZ6?&o%rEe|KQYnZ~@Q7Kn#eXiw`l8ojuLhjAD4 zOx?^2v)x>pf)7lW$OB3M-bn+&*ZIB>9FPxqCY~!Eu)RDozsVPEH`}!2F>#e^dRR8Y zU%VIX{9`}!_v!-!NX2-#xtchFx87Na;H~pSa;R^#rb96KUJzb;vJ#Ww*f%l5)wh^O|r;3t%94Tp&%$GX!gfa1_`_JZlh zG|*uaxGo70M0XW+^BN~rTIQzO52vS|ypuln=eGhlSJ!k9Ktk8)3DRm~uBesh;%=$XDWe_mlrDYt9@a8*)Mq=DEpVOH{bCT^?d2l(0G4g%}rg%=N2;gyz&m19rCeW0c z)i?l#8-!8P_@*jo`nE;T67Tz|8PD}a_K408($^5dTBqh`Sp3zbTfa!Um_5L}{WL}0h5N4^X7ar1zw!{`82(e4> z!DX7Dq!np615Xqo+QG3Hq#b{Jf`dOhp2Ydt#=nhe?7B|#Yg70=t25 z)l!F8hT!iU(~lOlQTH6uZWpHB>o;x&FdaXBEZoHfvQ2nNS+B;|0&r-zs>A2KFR=RQ z0X0~{03msX!3zxDtM$eFb&x8b-7aI1vDbYj##oj>g3g)*Pr;VH^_kk=|JA==@9Y%m zMZPWOkJdsXnVxb!gF#2Uy8&#*oe-fOaDm`-A|nN0a)ma8*xi&y0AxU$zlJ*S)aznv zaUv4;wWO}zwpinEi8-c=oJ(vx2xq2ISJKc>TYBi>W9gAEJDEm~u7n!3{yT_*z*;7Zj1d#8i2*>;S%!z&a3eljExLAAD z(>w%2!JDud%}IJ>Fzg1V$!&LWSD$B%i3PR-lZ{1r7)k+3+rY%{h#f`;Ir!C>>S&T< zYD@r=>TU;8Td%+bV903IRO8_JN{XZ&*>;;B%M43~LYdUCV(-dVqzdXNjyZduJl`C{a51L0X!)y0J6&6mtkoz>C4{YhH69d}pAzT4>{`>Hr8cu4%T#q0uB5kdepYr-ba zX|#x2{H|F}0f=XO&q;X?C2!rsJsFgQGa8NSbMwIUV`qa2?GS%(e zDK`+$Z%(xf#=EqU4?j5z%}JgW4i5mcCcQkLh9Jotw$2ZNcQb7{iz)Ip-g}FH^%lRk zVBnL0re_lxu;%s+?0>FR5}24ki2Rq)_ByEdKo{YOsQWzaatn8UR=K22)?is?`$G_t z1{-N7YdDrVmp%T*AM2Fc%DdYKi8;qwb>wkkE88x%cOG4)y$LXPSO@6RIEDg~Cg62k z@FSu{MS{|X!pqnlcrF@iKRm>`guvyTjz9Js*6&{9Ear@9zk(ysF8O)>LIzpDw0~@? zY|$YD^Pw!cZFW9Gb0fcUjxN(OX(I%B+RiZ*^uzz#k1%ft5zz@t%q7iCvt2PJJV6P^ ze+U#47KC~D2bofXo-g-u3m_z%x zm9AX4lUC*^G$shIox*(grC)kFedXW0KOMp4e3M;k4Lh%0Qs4bn07I3~?c3KW$0`eS zKli!Y=~JJ)l%9I(CJoMW1VxqTnwi_`IoFq3m<{In|Ti@W@f3 zp|ht+jnMo!U9`hpTzeu)yQZU#3p>-%>E^B3^bF}=pv;@gq}w_GGhtycUV0sD(XM)C zd_C892oy2LbPHhm51$B)M^oOgraU7*1TaZ#bOL=OC}m)xljyX)ro2Xti6Mr$ktNAU zqnd5(yu66HpsNZ_Bpn!cFo;Nrn7;A!U8%!CrXz_GPC3|k1%Ut=872~wt%a#hBK8Q{ z;xng_uf^ZU-@E${2|S)D(uj9?EEl6lL(mfdxag@ySap;{Uv)3Qv*L!Oat^fWaIUDV z+ur+~w6_4Hi)agd+sc4z=WK!DH8bEewW-xe^Woyn?`{O4cFMniSKEC9!|6N#bmz_( z%nyg1|Bt!%j@9f;5BuKRd+%lL%-rdmA&0$l$t8EWtuU1+sD@6z-B?4T6#PC11 zfdGjCI|*XgL6q2WAlryaB(3CaTy7(Gmz^B;nc?(4xA*$pd%s)q`#tA=oZ;{ev?L_J zXXd+KIp@6REzkS(g2(49u}ru2??}~f`8m?~)T43Gwb`V-KpqK&vCYDOnAmlLb#U%# zfg#~!4$726M4v#FHHSpf-CUjex;kO@h{IzXZg3HI=2UXyXky8MG1rhk0$i*n-}oG= zFn*e$?OBLZ3AQnF#u*NAl6-h2IFT^*z^NXINb|s~>O8Z*nEOdL zaFftlnN^2};n}%JO|;CMl%wv-)BjNzgdFQxPtHI1XKc`{6Im??1&*1JFtHls&7{^g zV_#w{L*TY)@Io0xww!Di&aFt;y%X98J7o_U!}Uk@EAuKt9dmvC+3a&@WYWVsHV5B7NM_szY|xq#UZ=hFEkEgNkG!^0y2 zFMRDEe**~_-VbrtbU+IW0)ma{d;l`WI9NkYDo`<b0_4FD}iS|d~( za6H-Efyh_c+{6)0Ppy#{c9hKJlVm%mY&pb1w{2BT>$s~gG$57}JXel8y1@wYWo0ER za2V@p-|mKV>%nGXDlj;s@~dX&nBs#A86R$@5T;c;5a+R_oj5s|PMsXWgOF&6JaHYA zV@H`6g#{}O9!8~C3X!@-msqzf6WzAyaZ=ebq zM&GZE$$#KU6zqiyOaXoEWwgHqKUuMq;_A ztq#W%&${8H&(c9;tE0ft-FQw^sq~TuS*@cMs)CVk0ydg}{dzd_M%vT$S;roe44W&B z-2gnh72v^Hn_bPVshR6XC+T{p$m?z2c9ea1BHC&&I;OuK?Qv>QUo3aE6tr5c1 zO!}U37^zha_OvG=RZ_CJpEb=Ud0#;S+uz%o>Os1CsX0aohk>YJWZs&Kw{J&ui&_Oh#m-R#aN00^S1$8OWv+8RV@oUjqg zV;t=2NWD!B*pI~r_D$sE7EU*MfNif|CdA#*klLDt4y zLMX}-&n4O10E%97h#t|h) zVqG${bWgl#_6xWvA)SYtjaanRW6aC_;U>t2;8$;3E+ z!fq|)Go1Y!58=tzR!5Lb&IKbVvU0dSgu~wyXv7uqE?(iBwf($AIFo@2LKVb`wdMUo zN%TYF#PjKA6shG#+b{a^Klt?x){_kzavFQfU?jvM=P9rg0B1#U@3` zWzzVRag712jw^Ey$CVtl+UzWzO+)i=p$qBqrD+1_87-Lm+F2dnnJ-*EMWe%tsc-Bw-?s3Bt6_d`TQzBuvDsWP}MI3mCf@ zVbVpPb8f=xY)qzxsv-~LNZW90^x;%r7EnrCClCXFxTik-tR>JSRM)FAk1|SGe z7`U+TFpAc^XR}%QET9TlKJH`C43LMqYxDAty>V}V`E~En78i<0B~pd({TF)Yxm(?*T4V#yta6Zb9SlzwYtcaptOjsUIclU*HU+B`@Fq%Jm5_i; zME<7bGOlPRk}}*C62(laqLla~4BR}-$=&<+rlql|RILgaWU)^BD}1(uOfxY~t+hDq z(B=yE4b_NsBu$-xSPai#;{=*#umhQc@Aln2At5<$Zd}!BR*H>H%x^ay_Dv)fun>sP zIcA)G=K4I6;UdIx2~X3=GQzV;k?1aw6(PqO8PFOkJgyB%6IjqwtpUTjt7~6Jk(P!b zWcT#-r8XoC1C1*n@De6fb$Z2S`hv~gN#$ZidH;|wd9Kj^>^x75J>l=dysEnyoEVnD z3FQ#~6(lEXnCZw6RcqNFiwx@uUThPrK@|=nh^P#zluo3W29$YYpifo8tg7w{VI}qo%tSTzsv4w3rA<>Yxmoznk-a<8 zEHS{=uq-m(^7xQ2NtCje#96|mbk0P8FTuO)xjLR-!MbZJ%GUPdExONy5nn+R7`FsO zcv-`{voO=w-zg0F+Uij490X>woq#_sbl=*pjI;LXaDbo*36Eh8_CLE%;wXKvUyg5+ zz_X^oC~m4aBknOwANG*B{5${TH#erH;GB?Z@CX1Hq0LlNjWXa|BQnz#Ba8zs*G)bT zfqPkp8wsx>91UtJ7O*v~r|wSb(>HIV1{@3|Oeh8@kY*J|CL3Y86aulzVAdrrtB9R* z8)=-nPm4J?Kst{@8!3S{P$?R_G!w3!kW{+M03mR?qB|&oe~tL5#TwK3*P{NajlO6@ zsvvHuv#m7k-PM59L^km{m{o#zZAGMN)lJm_65R!YAx0jL7&HUI0vUoZ&@gw!^;3o zIRf{lYvD_|gX!f1I~l+kg##TPA7^ZE({ODPrWLsc*NT2;u}PU~L2)>KwmpEyTxBIl zU2dFkW?+Mc(O7^3K`8zU6cuo_svK-fJ3^$K;*x^iIHVLY3x~j`Ekoq%^&SDhaQkw@ z0$kPx#DjCOAGbKD0qu`W)0jy`cUZ42mfTV5X7B>0fE6Fjj2NYX)b9S-AGQ59~`bqZ4F%fVojx zf}0fXtmLXawWYoquQ81J5GE6)nq+kp)gaox@IaF=b>k_vr>ilDMU02}bB;1MYP%(N z`{?1!#6Gv*ab*2>4!l=pNHty#;^h5)H-2b_JvqgCGjXvtwo{3ur_>x6P%wQ(Q|!oC$?)$5wVAGtPs~Q=ixod!0M>>YicgF488(6gP>)IS<=XiqZ(R= z1Lsk1vt^t@V()N&fUx7UP5$XYZ1riSVb{S#$?p3I9pL&LV|Vv8=#>{51n#BXt&V^X z+VEMd_N3^y|L5Nd!Zb4jheR5o)pY`Ek)AdXb0KrI4uCUxVQdRE znazL*nW;#Ah#F7{q#RGUwz}1{t)n>gwrx-bI8V(cVwZ|gRUqM*eYo5O;5surtl+A; zhRSetxr)?3Yw5z-32Htej#Fhbx0pFRtdOaEiOP~Cx-$^~ysj8duTep74uVPH^BQ{k z+WIO~9lE3I^KVTV%0;(UP(P{;JCmvXArmBXH62ko*Ka6w(d;sPp94rI2{xRj?DP_` zSF7wiV3Y!gxa;!D=a@j~L*r^4oNq;{M;vTxuLpoAP>A?HNi2u6WSNke>3q6!Wf}=@ z45Esy3Bp^3~wVy6y4|9 z?9MqGQeaR#7mh4$rheLN0A;wF&o7%;7wG>37oFO+SlKckXQZ8yoCb~b6AF2s2oIB!hBUBHSB}oX#z$~_2Whp*+?)w9CIdp(zb0dRv$4j zGnN_sStm2x=V@yi+lCBFo_l7AUAlG8?ld_$3DVj~O-R&wgH?irXqCOj9;vHJO;m*r zCJSOehDvIN`|7YYjbWF{bKjl24bTOtxVfDP)?BXC{<+~}lk)rZhx26e&uhP4_bKiv zitGxDAb7oPGnFw68T(=cn@ur$q*_M|<`|_`NV!X_^(vZ%W#)BiX(64ybOoZ>lJ*mG z*u>thQ`H1ac?hEtl|bg#ckIK@^e5Kt!yi8U5NaXD=-8qS;BpQLXMy!9G60_EFOa8C z!lb%wg?A+;s6`q^<#z=;*b@C%V2%b!Q`AYdUf0i}nfw%lVnN#~Fw=-v9uo=9OH};L zrx7Y&Z?Gm6ctGZX8-X$iGIN~+Pt374a}Ylfs?tPDEBh1%vW{%+Q-gy@XiZ4aRm6Nw z!C+(CVI3+VC`B+I#_9+-3(W5l_C4MI%TeVnt73$4w2u`?p6)5_H|{^B)CIf@P4L?Q z^CKr~J4F!UtquvI{|bc0_xLB?b7^8sTiv)vCph^YDR zUd;A<){l=eDslUke~NzNfBHH*xPnP9#=c~zg=#Vo7DNcp2*lzlIG*B7!bDdB@#umH zWPm($C|F7D^~-5zZ)w`mozf1ZriN;WcmQ^HB7M{wCWMjsrg^RlcCB#t1=e*66CFAOq$#m@a#dPNMRR|RIWhk+v zJ8ry~L%BG}5uBTdUq=oQ6`O4miD?{9F7y7ib~IySs>^^C=}rJq0t!xuO)CO0;8jAB zgHxZwqiO{SHb>AQ4`8fj2{1Py>%&ylk%1s{XUz!w)YJq3ZjrZo?kBM*^I`p+aQ$Qh zEBb4ba;>Wg3Z0#q4y0VeH3D3y2Iy0cjjH}!TaZ8)wj|C=jC{GZeXjsd3Ca)}GVfmQmCTbo*2rNrJ0t}cdPF3q5 zD!^6{-QnGR*uSQjgTHygRE)GZ&KMRLZyjqb*S>}dP2{4p5c&klMHHn89DD{w$I_1W zjD=U4n#EMn(1FJx<5k)S zutkxk+#YjnY^_6%9HUTts*smyf20_Aq z2i82iQgro_@gV>g)38baLp~-aOJu3PV7j^OCC~odUP zrdA?uz*xT`5~nZKaC^2>ATB|eG8ks=Q3U|30&#ft@SZ&%Lo}Qyr|4UDuCTdS5yk>J zktjLOUKO7i35ZkV5O5&n@u9ra^Vq~Wyh!Clja5^nf(4U!BV2?*R;|oU8}rh_yXl0` z2B#ts)h1!mb|VKHNl+Q9^P$?zIS+Lkb6^}#4N|Z{rBxuTa$r(KGR=AjN7`rRaq4J+ zo0cPY{FST?gLep7+!P>IgG#szcvSM$S;8u=jYwdou_ zc-uUjD}~&^5b@i!YYJ)=^U>WXF-1aD)^mcs)RWY;2Igj&IIWm!;AjGA89$*kj(4qn z?Z;EFi@3!W9#(_HfDDV*Dy@E6!c=gS5%D?q8Db?~5oTfpm3uAEaq;cDlBueFlp3MV zQx!SE8ZXd^Df&0$df?4g4)Z5-C&M5Xk{Q)qyt$V3fjQLHnnPl}h}MD(eb@*;M%~`Tde5fgCq7_r&!tBmc^FL!6@giAnU9{XuIS&ZM_x}? zM~Bn<9~_6N?M`3*;uq28w2^$^VtVcEBkAQgUk|&Lge#{jID5{Ro&^xHF-fK%s)ng) z)7a0Z0z~UlAjkm!M0SpGJsShuu z1``@yHUaTTH8W|?&Jr9BNL<;Dp|ljS!#hO6Tnn0KM5=oPjgZG6000d3oF#}M@qOv& ziHUUV=m3dgb#=7Vh0lStMKW;Nd^i0B3 zgw#4o%BKm61+L)z*xpl{8VOibvb60eXU!&wc9HUCh~1b+{ir=j;FFuJu#7b}lQIaD zZtV+r`EBd$O}8C92xnf+CYnx1PaeaLH4Nv6ij@s661EK`r7Ge#+DO8w8gK$GP<2ct z8^bIVs_3Rsi&Q|2ia^8?LRE#LZHtMO^Gv7~ggwZ|=2|gV9MTN!iA)&%XGNJCS2I3y zUar`6_V?^PJEz>I#N=-*35P_{cOjv0(724xCW2OCQq`u^qqL+XQVPOZXRd0Pn|Auy z#hSK*sOlM0iK-9!rPo>5(dd5=X$c>X$l`@Ef>|&o%guXDEBfLt4DHHU+F4zzDQo?J zz`S>nIdwjCju27#ELAfi$Yl)CmaR4yn$njD$skIfAxCU>Z4347-@b!3fH9brtt6(x zxHIT2%!%KC^?>QkxpEQs)g#)WUt19-B?q%Wj6;~LifIx1M_CkN9q!@gMn=0}kN zs*$p{x3#7=_H#2tx(2T=86y``cu|TjaNP0WkNnBj?IS;Jeo;~tTe$9-3RTA|2nRMn zv!i4#`_7yBHkljFp{OMZl%97Gc%)ODfzvb$&nOJU6zgAuH=%1`VpoY-fiSfoF%_du zml!TV2#3kA!DeSaS zhYrESmIMK+<@=t!dtsWJ(=3h}GIhmB%>VIE|0w;@AOA7DX$edtnJ2J67DGgwyC6)! zhx@k~H+>l!S;~@NAU{5P+5VD|6BiZMJA1d!pXvaFT#NYcCiMetXVR?8?QhrKg|k?{ z_?bJi({W#+PjSOX{s<3QKmEDr>HqHOjan#w2}x1S;JpHoP(seTSO6&VfaD~?$&AJ& zvU^Nio^xv;?&31Mboi-=jZ;@XlXmT(ob-X(bg-u+wUp}y41j?+vcn$grX9cXJJ6$Q z3uA;eoMsnG((%(1k$Pxo0Jrc}I3uWrls+?FJ0kg$Ox7Cpo7DBPW3BE9JOf}5BKo6>PvxcAlPf?hB}`Ho&#nf6 znbM>ZWQK8tu}WBdb__a(<-!CENa5@sW9G^9jOEG0@h_F9(b0T*i*!n_zi}yDxHJWK z3<6}#m^#RYj97=gX-dL0IRmjL`_TuX=r|&=aY&v!REb78GkSE0&;p`6` zI+&h(>WS3e*&5Yqj~_plzVn^$gmKq+s=>h_RHaSnq0c;+nmRj}zx8zT_{sFayGPS1 z>M5(73M>|}`|w~ITyc*e_OWi+cmEd7#in;kulp^oVhth!-gZ10bH4fc8np8>gekiq zE|Gp2V`Ef6F^H=ORc8r?{ub7x6~?9(*sbDz36^W87o^gi5~ANDh!mvfq82t%CJE?D zFH;3O6jt@I{oED$C)tSXrtgvmmB?d&gIe2124nVlW80Fm3GG8%HC>5J+3>lZHUC2w@*u$WGWCmcoFo>^WAR}%FO1JAzbGxPapewx1~lH{P#|s zv7_&~5aai@O6lesW{`p^iGkyD+Z<7J1j7&In zFsIBl_@)AQQB_~TzLJo+X^fpK1AewT!l8f@&Rwjy?|ksH(%&^G*y`?&|5jiOZiFuz zDztSYVbVL;`4kR3cl~+SajPS??{z1Np8RjWyio>XTtRJ>)5s5@kQg{HRY0aGQdQ;W z3Xr&Vq77_H5~~$B|0NQhZs56QUZ=|9IUEhD(ya%Y(j7aCQdc#LyxDyx$5{@($z>cFQF0(faFVet@qo~7_4w!R zC!0h+Qo&oPxdV=vtPe$HNCY@NM2aFdPg!`SL1X~{G_|ZOh?|j z$o!d_A5I$Q2&fY!>F0wmZBU|PXMaoT0GPFHXfU6IAkAU~_4ye>M@0p&T0=*|VG%zG zOf6nzOLI%<{Wso8|M2(zLHfJD`}c??BR+@CczJLj)ngO+;Nq$Dzx=CznI=e`W3jjY z`uBb(eeGAjO5$8xXC>0C`Ru}U`h!3CgY@Tr`j>pq^y|OzTi-w`-JLFz3(nkf%OKDz z=PssGM^B*c0Re-k47hfdqLWUQpE;x{0&8p-=E!&F%8PYn_dVN8oC87lF9HvlN6%^h z)86Pk(G+tZ^G~x;_T08P<7XXhYM7f1Jlf`m$5SatrWY=yg~3(Qzj6T1JcYvI8ZXcd zYX>5vEmd|B6N!r4u|!6RQR&4sm0&1Is^)5kVQ7LNOUN9bh-(sw$#IAT{e2t{yB?%a z>)IIbs7tjRu|!F==`89R`?{;Qn>IEmut~WGoE~a<-VWAZg1?N1-x}UGfAfS%IA_nX z-ZQ}PRjLTALGfcI;%<(5G9fYRcJBrBp50I z@7=v8HFkBUH;%ray0E)>{X0}S@Vsv34?XZ8$pm^(5mIas2GY8160~XdpN<%FgezRQ zaz358cm}2w8=nLccKSkA+Crv9{s(qY^*_8 z@(`vC>?)SvW?ea;))@C|NX%MO3xY-^Viij?5o|zI4*$V8r$KnxXE>)EJp=V4zaZqfg$4BB5Go+jGMWrzzz}9!RoAQn1bVuzHjy=8^aCLDwf)u z<_$$|u{}gr;7_Hm<9$0cpi=#5V{ zkfihJfrsvg8->GU?$-ET$tJmY`g}U}?s1I1jd(EXS}jt_09YUnLIZ5cfMh_-d0_xR zb449vD@4ujo^LYe8lM;0tm{6DHY!2Ts>vi4y83I^>S#ye!$YbPq@s;W=BO4XrUgW5 zZo2tV>V^?O*#glof^bHzI(Jm8O>7%!B?k$4H4O8+&ktCnEmcL<3=1N~1{FyeOVY~R zH=~xR@@B3JPE;v_{z=s2ZZjfe%nLR*{z=$GY8s#$k+dX8qc9sCNTCIyG{aAyJT|=R9q`AQ7?vaOC!Qyf)(t&|=TFytQmyXyCFaab z4syjI5sHLB<|6enkmp%SubZaGSRixug}RuytMkKy+?z{#C7jI34s4cnjLUuDx`j6+ zpFRF6ue?}=%|6?AzdO5n4-ji?bRk4)wgCj6j~P9Q`e zwh{l9qm6v$}9lf#{{ZgDFtGjnb<5`!R~PNHzfcU#YK=q%w{xu%Rn_Q z0`YSJ2vn({j=Ah4!^B+r{`ZfjAO7fl`qILNz(thX%^EO`ibN!da2HaLwjYlwxKTHJ zdq-)xaBf4&-N>0m22P>!~o_^xV^w|B6r2T!n)6z82?^j3DPha@CP?Uc8Yrm5I z!=L?O+Ie7idi?3nry63V28V~(I2-Bp*I!M2JK@?{t9Rc=;>6%y1yf-LX^qRnUYly~QBu4hVJ* z31#H$KzG93Gl8*mmZR|8kwwBjjCE!t*2UZ2F5{x)JoszF$`Lcz7u1{80y$qU4#W_ zt5Lc#UMz=gXPGv(L72KA)(d=>v)0`|LpmAKF1U+ll$D7IC=F2Kv$OT-2vZULHuFOX zoc#s70w*9$jig#KtfH84876XZWDpg1Y3ggiHJWR8w%~AN|A4V;(u95b#R-!LE&FfZ zvoQvf5U>P1iS%G*EaW#tzpTfk1d(H%Rp>7P5BMlyYvKjK7<{W89KWhWftyf2&NILM zUI}wC3A0y^B-Bh*fD(wBL`YV525D&qTZSmye~w4BVh|>!Dw)jK-r4vH^X|9C*3Pv5 zz#-sxC^Q|158fKE{p}w-6MJ+P;-pINvBw`znM$-TF z?|vUia1IA09G76E&zw02JguiTvWsXBmN;fAHXh3SM^AY7$e8B%_>n)t1N=7EM(00r z?$dk`7TqIEXYB6+BjLM}Br~^Vvmt?Pd1iB3H_sJ4_+NbkiIt7CslqFXauy_~#Jthg z2KR}aUPiEL2~GxTDAgeA#E=J>tzhV%##v%w5^iwO}s~}J(*Vf$78as_M*;uc{wYoH&I69f$e&Z??bH)0Y`XB}cP9XXX z%TT1012G9M4KKL)g_-o|XYWg&|Kfw`r8k}<%^wl@Eg0_`cnX2O8%j*Xf)v9wsK%?n z`9RePsER(p(eisOTxliP$*j-j`Z9Jv0F9-D%ha*KJ6w$Tkq=?ph;!L*0UmL|<_y!0 z;E*R~tC$Ss`1YlD-yKUo{Nb_m%B$x=S~x)vKURr;eg+lEFqKcOxg(cau5lcdMjb$` zbYxiun_ENj5qU|NRP~AYN60{Nc>m$__kQQUNzHZ5X<=*@`w9e&IF0jXPp9wv@L9}Q zrRm`(9!2<#ocnx6mJkJ8}im2~^v2Scql36Tsn6#y7n0WJ=t z*IqdieLZ+^e@H0m>DNIL(_@qAcI;HssDGp+iur{Eu>xxu_Q*=OQv(u3^7)W7P?3S0 zXHZePj#I?@=wMNW8bqV-GR(~K8t&J8H$*^Z9NlP@6!dZv9#`QYYd{zq^kE8zjRk^P zYZmgU1#dF^AIW#eQW32x5d^!#%D|*BZl9+e(JdrQYd+V$C{dDl7J?gvJ6~?7g6cwK zrDRZUTCOlop*Dhe%phH=cCrnD#1iNRhU#F_0%4LM&Z8=_QgMeXhuK%rYHoG2VzJ?J+2`CG z5pf=dL4xFV@jd$=yDdIv(o$iZt{ZkBp0k8u_R`=eT{0sBF4zR-XPF=}i76vWFhlH% zW%@D0eX8&#S&KOsx*7<=I4;%)2Xs_fsvTW*7SDoH2VZ>=~u?BBC5@W*R!y%BI>{}<34ede*p(oARrZa|47V+d*VCS8$^kehCUp{gO z54Gfj{Ocl2PLKQedV=Hw{T`jLBcI^N@H+$EJc{o5&2Mb1>*CBr8^cq9>$UFI4ODw& z{ovF!u&YDI0EvX8nxFZlFc$;`=||!;w@`+Av<3CRgg8%B0mz+AvkAsJ9X!yLcJxx( zqJnI{WDqZ5Q$qV_BO^=cw0_31Cggb{+S4E=Rqc{ z8t_7&u}z_Z&f_8X_~#x>pMUCs^vYX5hxNr7qm8%&2vaHQybXw}gefE@ylyJV(qOSZ z2~!pA)U#*}a412P_-^lRPPs0HcNZxLK9Ux5XL8&N z7pBrnubfRUzjB&XOBiu+9@$|2byuGzoy{!7sjH(i_3zk~AwwDzsfq|ij7mz{K_pD7 z!vx4GkvpR6KmRBHj4bN=3Eo58M?Fyj5Hq&%gFU`nP}ZZz+a`0T}yS z8HBBjEFW_`w-I&BzyAGynNFNKn!fl;kF%z;VcQUy&WfDa#1iH6=Psp*p^3D6XMd>E zRHLn<(mQqhRB-;rTZm~C?8B}TsO@LsLHOtyC;&P7K8ZnsPxX9K9d@0vkT&9rUl^)${+LQDoI14 z7}@50noCY^I+hzGCC}{QWryqT*SH246XyNy#tXFrYf%lM)t;4SoO*fr5=KSJKF=63 z?Trh16R}nzrXo1w4gxs0vBpA>1X@BX@tVQ~+hoQKcvp6{;f4;2D-#l%o%xK-D+1-& zN?}S(bcf%48g27jyP3T+=Sne!FT(nw+PY*^d;LB-c`W_;xBpWR zB?;5+TlS}?zw%{>BOZo?jx;s2ftQ5MCDOUIrZ?VxCB5*{Pr-Spt|35UqvPN>yo+&a zqU)hODp=eL>&>D^UvKtDIH&;+FoQR{w z72G`brV`X~C__+*TCzV>002M$NklegaJywJC4DvngINniC=%IH{?s5LHKDd@z+GC22^11Q z=hQmk%80S>Y;Gs;8bG=toC@MY2SgMQf;D!BqPH7rej0>D9=;jgFI}2U=gv>0i+JUY zk|=f&XA@(Zz_TF@iNGrvYYnj#(`3L9iL2&vF(aa$c@IYc`Xw9+7@n&|LLIp}lK#(s z{=YK@;)+n~p^QSkhprq#Q3TR`=J_8mAFb(;FMSTJ@C^Fn zbmrU#Y4^Tu7@bMkf-7qYYE6r^K?||D;QBr=nu?K-6ro=ce;RLY&aj1fc zuun=2M&%onXk11M$hJv!BnVnH*F4wGkTFA>iiTocfL+g!Oq_ENNglYc@2;`Wo`c|8 zuW0}7y^&Jr+{rWP^4asDPTEV%h-rq(fujasY7yx}iC6|kcXpW&rOO~1{ANm_f}KkV z1z{O@Py)^ffbba^E$CIiKFGQzS-EpObCT!jP8~#negt%Ly+}=DG>clihf_lfh#VWqGz8yw@3}BA#2UCJ%Mhg|)HZmFY4-$U$_~lw2=R0;h@XVXP!7NQ zyNEJud9>viMMPP$VlIfo+4Z$~J>$DkVq)?m0J#{6aS>dQ120^hm?36~ECqNLDzy$w zjL^bb+DDzKa*%r`Oi?Elr`cAY@Sra@AxyF77?|%$g(Bx*24Wg;RDtmw#ch0)e1c^- zwJfv$7MYI~u4TqDg2Y%%xEtSLo=yB(1`{+1;UC63Rl*c%RJ?7AP{}&KGA;N5<8F>) zk8`^@q7sS$|E`*!UPZNoCoGFPr%Arxeaa1I=k zU^MF5ThbT4^u<(y{b*)-k-ZAusR5566!dIwOYa^%lD_l3Z>9Ej640_=3<2TEFj3f} zVG>;Db!`}8CZEeuc#khP`4bXr z=IX=mhkxzYNqGnIVp4rs!lI^%q^ltFG7#9>>O4AUL|vw@j4TbMsQpxdtPxO~=S3xQ z0$d5swG2Bs@5&7zMLIyY!lqqHElumxLuyWU9c)NlNH4`JvvAH%vkQu}0}_$LF&MH0 zCx%5NuM@|I($L^s>h9=GJ9hNqgh2tnanv+;o6StaS(1&x3=7Mok`bvd`WmSuipmEfOAu0s%B2+$CRAFX#$kc0=t~`{y+#QW5|cKIU`sY< zC^&vMG?Trq6NW$vu!s>%tF(l%Ki=Wm)*(oF$`vdF=w?Eev!5ATNHgPPW|&3QL#FhF z1>47|1MWxkr7F|Sr|o1Of}*>46@;mK`>v#`X$LN*?Qmb+c>EZ3J_RxGs6`c2h7=J1 z0Jr_@x1LSCbreJF?xQ~|c>QF7e)A-wtbjxR2^?5`2e!W_9!f>j&zez(4R^Q9e3~)Z z%o^W*=FiEz@qQ2{Mu5Q^rUc~bhBI-dF;){e6qGEMQ2=ijF13rOTSvoltfRSwcWO!v zkAOH?8~PM>6W&++j@lxR0a^mvjB|rF7h}V8p$>%4NF>7H2F!e`p0sR%F1*mFbouH~ zdgQ@J(i4w9P715Z^rL5fK$~-^k0|?ITyvWsfX9y>rIr|;hXhm4B7KOQMYh%kGACXU z$f%m<2f@*x#M*D5?oxMF&W0~$9rzZ=MtAdwJpvviO!hJ4WxvN&ATF4+3OMdLB&AVe z#8j_128pU_VDAxrbhDPzuP5fIkHp0dAQf#SI`ycw)cyGqjF)!Q6^PvSo_1`rq%R`v zkKUwVh*#X>xpam3g^`1}7bGU_Ofo5fB$=-uzFcleZh(!KM|QuTf^Y%HTM?!*)QS?& zS^JI?$+^)rO%%nq*)UIX=-re{MpULjC8`1!2k< zJc(51^r4Q0wZWtA60kUJCJpRFmcLl$nu4_Cdt6VSRF@eqsEv#SH>m6K=`tBsn%F~4 zsPaG{Tx543a1th@CM724I$LX{CNLmGNrvSbQOZ6^6rB6WCc&PX0?u_A@9XMKcM^=- z0#SeS-FMRPp;&O0HXf%K(lWk^Qw!dwHg!1-X6 z*E&((r2xq?mdE8BY8TFLC{NQlU9l@l7t)>`wdtPQI?^pW>xugS2=a)qIwJ5qrz*s5 zITRcyW-c$L3m3;}FQt9^_NHBXd)X)$pK+l!CC|h}4tq-Wnj~Aq968u#L12q!XGV*p zNIX70k~(@C($0O|si_@q4jSO17njplSYx^6aC`vRbP2CV>Z?UE@ZHLydJn1vGrNUc zY^MH!)5oTe>6QU{5QFtn=4Z*_MY^K#k?F_~k%uTLc8ffe3{>T<<5*&1*mbG_=P3!# z9s+a_{Xtj~CF@}2NcOctNd4YDx1>iN{TvPk`&kE#=tvIORS9 z0%WZnY7t^p$q55lE15UF6xWcLH89u6Y$#Cyx;j#r1;TO`Y7vz3I19W2JAyWIF9GEj z)~Eu)v>YWCmO+L}NY1@uv(oc296HmB_MjfA15Z9auS3XIK`?Uub21~?*i_G%!btDe zUE4Zg4w1z8PS-b|&2!m<=-oq=n-7URL!O+8bB;L(6gN4Xh&i;svDphK)_2C?yh5y} z$c!K?Dd7tDW%R?W1GBuBg0!z7nViJo#q2O^Tr)xex?MPCG)c@s;Jb-?t0A~uRsW5L z$m>R=b?q8^B?FR4CV@~lKW9=i^F+V=EP)EoFXmSldw2+njkcsKsC5Tmb#D@*ta78T44Zm%E{m9xD`oTGImg~CJ;k<2ic=h%76y5s8 zuOp$usj@ju`%(;tRsuoNBP!~Ppr|+UT<$Z545#HHW(hdy4fh5TVlxN`VFMB2QVye} zl;oD&Usp~5T4!~-=k^W=6gjAtud+ccyA0I_*{|WTl?N$rOpu!B>^$){;{aDm+mM!e za6PYVgiw@`VHd(8_cJwCgp2ZGI(M41KU0fol6Gcre$c4_Jb`=f90ty^w4=WRuNM#) zMGMO?a#WGi3)o@9V=zaG5XqRoOwXcqjOcZ6@b##il$cCnDbg|{vB?~b$}YjCAdxVA zykN|L$zg_DG5Nc+t>rPo$~ab{eyXa1yW&}D`9!$uNOhJNn4hCcCTgrGO#pj9gul(^ znHXO{!a*V&rQjI^_pZYarYE2LGC_9jxQQcaAr<6E2~&p3p&n#hP8)(|gee$WaW`>NhiS+jKKTSuTe?IL8DIV(WA>el~oj7-axVi0V_pZIfR>90bG7y>)cgNvy-)+FD;tq)D;K0>%7DQpZnqdPnF6Jad*_g_ZWV(>T zW|1~^^>th#NC}$i!XhxE>Sz(3QCri%&Ll(|vc{bGJ4g`(>ipIto%iF0-v+!{ie0!~ zWsF6r0U>{BU~D>!@&?zQzI-v=b6^)qkQ+mN6P66FH@%W@ZIxM?CSEOzBaD5>^Z6+| zfYV^Qph^*c2yzf#h)A!!$EP5my0T9JixR~wD?Q%0z`5%+2NPHc!)ae%yLdT`K%^^( zy_p^uNe}PnPYn>)Vwl+N*hDH_NY##?;`k_G61mDyh}a5*iSH5=1|D`xh`qQ+wcH|% z(Kz09i%O5oz5K9QD+<{ixGm;Jk{NMu5Rur6yzAhhXWlHTwlY=8z#l-)+UbnNjkGC2 z5cd7U$AJIn7hi%fL8yKAoWEPhPzMn>w6v{%XR1Xr(1`XU))8cV^3-Ya3y!2-&i(CQ z`+92Z?xaF73|S8hCGfTa+% zayWtJh7t_sHRRE&N&9)#mBanxn#@{Ijyj(fXOJ3(SJLEJ^2MQdT>_weuVo5Dz2j`|s)8z|`>CCB_bQLw(B+r;zoP+p?7*PR1m?}ZI-Q=6=?Q5ZMA&3*g zq$H@@?HZzd6+{eyfH)kGJA`q08v6xAQsh&`s~VL{X*F*2*w^y3zY)e=#>(K$j$Q@< z*LB`hJY{k*NQ$ds9on=khNUHfN--y@2+V8&EJ2)zi-9oe20k`64hM>zg;MdWE9jdk zKR<(=u9O6}ZLQl9`DGB3VQ_d*o866$8-# z&_G=bn2)qEgBPT-L7DxR2a>C)xPNKmU0 zsa}nmPjA5B#u&SSh+S0>Bj!zs&J;!3!Xo}nZ#J&&-L@@t_jHGSX=H3PUA}xJ%?w^h zi=zVo2^c1qS;M`il4vnDjRe? zJwg0cPgfi2L;8J^x3oNo#RKz`__@RVbd&So|jQmdY^&BI;M!? z>Oh88wq3=A8qmdNiS~fk2rlXtrBl`FeCSqYl3s?%vaiC6lA$&@ok1kVOWMm=j}j!<nsFx3|pO*4y^q9F~Y<(x{ZUy zHi;AS<+Xf=jv(z4+=?6d26#o)b{Px@s2+kn7oh`6z#%ztt!ilPr{Sk&a0D)&+P2$OAkK&*;I}cT~&h}iDIkOz(@;K z3!Z)Un`DT21u0Z>ie9V4QX=IE2NGz5S4Ax)>?84PR0N2=dmW4*eeu|eXy`x|u)zHS zOM$qARO;M*f>_lO<4@%$A9;iO)&h}$$*1wLgT8UTb}-EMwkH1=qTf2?gO!k zT#a!t+k?EJEQGr9Rt#qpv2;vH^6ULoAN;l3zE(b`I#rnVw-x3!T9 z1S26*O#{aN4Y-CS2-DIic8L*QxK=N_f~eOj>WJy7$4)?8QfU<&35aDW2d8;;2`{Xv zbZKNR5juy?o=K;#7`V7!qbTinHH6gE1A!_<Gm48hmfLa50Mp46Zsva5%Zf>)3B)7G75f}O8kgQB}$%A9u>>EDGjUZ|8 zQX1906A?!W1yN)sK>!j*Q}=899LEkdKg*c0-AqrxHDN{?x;#t#!UfXQ)TW-!9*}Hz zy6x7(>G0v(LEKCp2*_N0%e;ySq^uwiIX6N)5u4x&(ts+pC5`R?<1{*NIu)fff*Zyg zsDtz=W{NO#_yUr`RU{B&r4AANr`O+^bLYbID`LLfkZ{rlxrWCa?*3(Pk=jSh;idD1 zo`JeY@9giVEC8jhuOgA0IUDUL-PD#YQ$~CZqR@h*gj9lPX(kI3Q+MtmrbZI>g9`*I9)05t2-`^NBECldK*#sh&p#QqgVUF;lH79!RpU~+ z{gzwOiHj%M?(zParK#!+Gn*E^{}G%lp;Q(RAm& zU1=!u*DQyia&FtNEDt*9Zg zP(VcV#=DF}y+;2Hz?Cprp2PFvIVB|RMFkB{R77Bp+meMsWh_+t%^^9>qqbIJavill zswGjKXtodcgx9aPWI?+WT_}Yb^dA)rbq6+FFpI?ALH6y z8xmSuZ*RKqGmjGd3qC=*Z*6W%y{JU%n9o1_C(=JWofWj5umjRBC2v znE+~L(N~=ijIna>+Bf^*k&Iz1RPh5N~zZZa8FZ zaguJ`^iTtHvqMM+<--hn)ECFJQVUjoVF zi3dLk$BuOifZ3?P7DWb!r8EtP%4S7Pwo1Iy=-5g+du}pa9GW4Ghy~9GTvVOlW`sCZ z;=s|?S)aD=Y=?VqqK{4p_A?inJ~!gYN1f%An?-_|q*4N_A#b#3CF-fZurkM*2h#e zhLcGj9G%jT_{@6+rpUYwFogXC&QcqTpY=ML#k+2n@&dko{_~&1GzC32X=L zwKrKjlAuf^DHB*~TavJ8kR6|#OQ$amz+tZAyt0FNCmX_^127Mjskf00%jS6TxgV$Z z-+UdSJxPis2nHLf88zYpHX8G#8DqAyXM1|`v!6--_n-g!^pz){fJhz0lW!J}&Ve9A z+MB-e_@n92zCGy={`Ak%Zlse#d-ta8s0Q)xArSdxKxaLD{y{BGxO|U-Y6nyr&;8RC3%!KUo4`L3^HdP2KLH z#N@p%PA72a7$);)F|HE-HtFNY39-0KrkGjX<~{KXHJUEg_YaQ)*$3KD>JV zV!DXh@19+|ao#9S)wsZSG>`?O33U?f>HKGzq`B?Un;%TCkx6OvTjE%a?YIU!sDxyO ze*^3zGY)g$e7b%fnIz|!={kR89Ip{7)xDk*=Y!`9H)Xee`1fXnDVQPA*j7it*k&hg zIDd1(RJcBCv}n&4ekquV%s0P4m;hY4E-5K%5$V$7J_nt>gk}fj!i1-q#7WVgmjm5k zqXOySjZ}mhqPT)dts+Z71$L#?sni0}?<4I_UsGG^s%s(>d2Oo3L7*ZS0rm9Qcvy(F z65YI!qKb*Dfe@^tl3Pd0n!zjY;^j$@!E72u6=)H#JSv=0)OC8e)MHF+YN-YhH;}=C z1wkKf@UVobn9bd=0WebPa%O_Kh6(h%*m;&HmN;)oYXGkpVp>!V@DtP31OaX7sE4Sk zx`J3y>#368%gK&m5km!{2!=&^(vbjOGhPyNiPeVQG^qMO87z~wJSSf<9BBsx;hr8J z$)UCyOJmrI%JZbrDeZ+z*Bu#e9lXEhAikp`Q|S~^ROE7lW0m+Q*O#KMGiJsBJhL2| zbW+Zb#={2WA{r;7GlHASbVd@5s4s)@B2+k>i$t9uP&b-{Nv=H{G+am8Qw2H4dCN06 z-zJw_f-vdA?g%BnIczQFcdG-TnIXYAC5c#F%_^$g7Rm+e2qf)hswXMs5s2<3T;1

g5BKBl&@aAkS?*;zy zs8Y3y){quUn~e?(6A;=fNN-O*a6dMlmDGc6?eM;RczKTE9oUdgojps>sf7hmy)rPE z9=ZJvh-qv3Aq2L$r6GL|LN^0}dGnpOv29MIHi&68hQk#Qx)}ltb(jFzEHD@C*s>1p z?86PY2{(9DWUP^y%k76=e$FL;1^kw$t4NC%2}YiS6E9+ZVIk6!w;h5(!*+uj&6uZk zI3ai^wh`Pq2+F-B*Q~QlNKB~JGA}XK+U4e2a1Bf*tAUz%4F)Y_DfX!l$ByN1Y~B?1Rk&Br;abbM0A2JIw{iM$2`7mY*zx-D5^ZGdN{I*RBRhy{$0)-fynTw} zlMp6Vt8@+mx-|ctev_*CrObX2uIBkZ=pE~h=`tgsS!BjDZvdGdI#we2a zkuVmCu!P0pq1D(=D`1c-+4t`CkZ>W$O1v^05lkLy;h5uptbf>YQ2v;|=KN)A1&G8m@G6!27@pBNgjKL||^@T6#fEJvdpbIVw;AVUPuf&OyAUzkx zBZyIS)-FqwvX^3^VrJ-J`CNpG$!=|Z^1=i8uQV3CXJ(+%+$<{zsT(8OF>b_%VPnJ_qw=4MJb-W0R@LCjM z!?rQC5+&7nOf1_4>dY8*oz(Emc-P92oV1$_rI`V|x|XSt#q~j)Smfl2TLg)I%7n_-yr4iIQq6z3wV1Nx?(Vw)Q9qVGYt* zDTv_D{_+{@982km&wehQK6e%AWP94y+mEX{1s5SqrL?t#zFu|M7asi#Txkb6@J6C4 ztg6-LfA(S&I&7$I#AY{%^wX6N9Oz5$zVl9c5idQFrgot(-2Xrt$CZ70oHFN(!!$zDK()4fw-|( z{3rOa&TjtL`+Sy})E+#CgN-3Ek^P*0S#4TxQuFiaUcJDxmw?YT`g!!~V2r7iwOeOR zcERPhLzwhZ3^=F#PkMY(!lX?_LSQ_R<6$~LPn9^I;3&jAtIEr9jf6?U$}F)zMRrVr zz4VRG!n)su->9z5+C))6&S|?EfiMlD0+$gpN1$-K&KibiALp~`3NisQtKb3{mX5CN zsi(USEdmZ11j^3>8?K#+dBT&F1jj9N&-P6A<{hNRD z`|0F~qqv#3z|`c>ELDZ$PCIdJb(j}q<{~bTwasvcW8pJ%5Y8cdkRUj=m{(75r7-AG zus_8Y{$#&q-(p${C$4)J2i)w)=J4ZZBN2FDhmW7TnPe7BlfW5FJKy#`{Ux+caB?hT z*iS-4RQeH_V+)a^!#hV~p_?K$1A$&5$YM5%8mpKYhAE|vbS|hP(58n=qnw@25lR$g zcs?1zR#Sz#4CLK_x~&<`T5q@xR5shlgVYKVD=jfykWuq`+Q1#5k|?PKxpMFP29X93 zIn(ct!YvO_-eCsg_aX>JSZi*rkH9pO&sK0<8Pb|Ho79@i#mG?>({AN7E~evn^HF$= z*ivjJJP&vGJkAO3UN>|x(nuaj%Q>zgc0)<1Sg8x8dMTVtJvqW=L2gG+oJ{xLe>fey zbq9sOtVRlvTm@kjVe4Y-+{kz!N(HqZM9KL^Awl0jJOUK4P0Og_E?yW*!{^BAut1Vl zoB(PtlI!t@HU}pQq$gBXSv6dm00Bw#LoLTVnX%1D2=5k9;Q~<8VsJqsnb13PUSh<>G^_0b!UVEiJ!wDfNXr-dt5hl$zRMP`h7>}^QKs4QEWTT>Z2`FYxuIG#Ow8VPAC z_3!9Q58QVTYL7KG@u_t5z2oW5!*?LLA*C|b6%5X~++6zXgAb-rVuPN2=2_yKj*(;( zc%WzX5X^QYM(tp#*EH02QtS`D1ILlY^tC6S1d-rE&7w6>l0c$V1nf?OSish4``!cT z%414Mwt%b(zZ;=lZSz0s-4OpDmn(JMs(jg zU#kCP$Yj{`niaX4Ra<68}J1nHNVF{ zu33%q5D-p^(8NP%fysIf<4T(gNbsf}(sNIuWSI*$arP{`nmsfA@_Ykm9kY%-!n;?` zL9KNg&uuzK%=2zs9h-oZTs0A}t5>G>$2-XM@bH5VrJtj2zi{~~F`Ea0v4-^ek=ICa zJsGerp@eHr_uT(5@k_gKs=_V=Q$9a6n9h+9K&#gh%$FHxdI+pt&QT-~4UFEsCc@U1 zYw*2gFyth1Fh$N_%Qf)i9&WwwsbcoGS{^ojTHW{-p=1vhh>hg zl{)(96@rGIFS~Mkj9Wqmf%ijF+PuD_*RwTa~n~oG&Yi$ z5#(urp|Kotk}fe32A|pcp@Mg|4iITyx>=6G4H>B&YgVQydy~}2m zP-2{BZYt1CUmY1sKYQubw4=W#-F462blYv)C=S-frXcxdejF}`4dFwTn3PR@p<>af zv5o_Uc%P>+hp6B3rRfUUAx17PlZ3R5TxkT}VI+qq13BsahuRH;wEA%S$2Nw$^RI+W z(O;s33WHZ6&c!|H=2Zd`A?b*rgeJ#qzeXrw5$h%q0kO$JhV;wze%|U>hr)}r0$BjK zA<~N^=v1~bgpq4^aw+58d~N>h{n<4Ii*k8%MxSr6Nqac*+^W&l@$y9;NL4G_xDp8IL~?svaSaBX>d z?B4s+J%`iM^{7M2GH_S~s_IQ!h9ZoFc!fE=NWkjJ zp&``4Booy$5!KB7dv>LIh)E@rQ11Hi{AJ;cwQ?1zXvek;%9SYTnBaHSZ+a){+P*+P zYw3%gjs_UlgV+piUIA9Cm=hg`W>DSf;NkKJ>jqps&nas6gDFH7-n1E22Ff&`w zANGLtjxJ&|sV8^i0@B2tJGN2Uh6JP_aKj?DI&Mao3Y>U>ix;N#4iuKcT^xXM>xmb= zw$P4Dl#G=gZXpQ(w@`~9!`vkDIw*U<9Rd=v#jw9FgAnwx~Hcnq_UmBiwSP^{Cn)7$Eh6fCJf?65T>)| z&H%I6zi96&PBFjym9M6|9(W{OA!ykEdIRZC9(ygl^3t=|U8hnTUV4owdI4Du3-gt7+A`SJ{ z>e)++;NBoirKshqD_8MGA$vJ)!}V|jt#GZKaGlK{y~;9TT<|h0D+0j~9cvQLCG0Wd z3zL*e9!(QesLhdeq6lKDN7^Fp#&W-I(keER83lmac*!NGR#9c;26sGVq*f}M#11fl zqy?87scZOMn@L_ndfWqG^HWt={O@}YK7_2fpgg0B+9>Y9ikwg=BZ#kGPP{KbiM88`TaG1Yju zvU{JI-?3Af*RC`e0f`K2@5}Y)T^>nKxol&zv6u^yw9}wo+>I@P;^A?xci6@v8^Cw| z`%8T0inv~ckTvhw?1)dSrG`lfQ6LRT&&t`dkLGHfD>} zj8X;?MAdl11b3pKBmfmP2*}l(f0jI-M0#n!$)gR|?*6_V5VGO)*k?YI+Htg)f=L-5 z*5=sp57L*PdMZ8gz=P@H`3vdw*Iti`y9f5}1@R#{fJ{_NTClBwc0YLWxpV=S`3;b@ z@N(;}UFmMTtDbxPwRDj(4`2J@Q)w@mIR3@I`u(Uubno7MsfR%1BGlDNX^0&mvC0g| zBxqaG_9e@)&LWxI6OJTJfjA9Voz~Nj%{GCdd=F|wrH@M5so*|?T=NhmUn>x7@02jf zY^(%PLTV63nLC*c=W_+Sl(vW*HYz;{jYrA@&&_b|1^@#-fv61tl}U-x?Rb&(!4Oul zf0bOVyt+XDEM2bjt;eT~kB$bbt`n;DqgOA}4rdwn1G{#n9jNsdP-WkYFck<;0T!;k zXL}*k#QYfBRieX&li&!eTM;JCW<+U|FbPxcY$ePk?i;{XxWF7D{%poKLBRQ4*!B>{ z3etgh2H^4@>SzeED%Z?*i%k%M0t?A_!2`gggh^@4K;!en!*H%;=^<3aQ%Gv6e)kg; zfBzl#q?ccMF&%mL?I`l7ciuP-I88VmJ@JL7)4oG@1e0Hl>RE@KV@F?3ufFm_Dg#gq z5ZtGCqa}FOaR=80-;flmFl$!|!US>B93g)Fq(@=gpY*;@etqohPk!5VcNE?GtAA@l zcj`6>Qw^zmEKyN`9i;+qGbcdFLoU^&T_)YmG&$c!anT+drL_0F83^PLKvcX`8E6y8 zN>sBMfi~tA;edAOYZ=6(tYis-gdK>i<=cAd_*{q2KE^ya77e_`s7J6!@D8^}iT8y$ zL}=W}%MoXnku-2xOKYS{nlC1Q8-1;WLzI|U-PMR=oj5F2VP*z{cLhY(P>D!_1Kvm_ z5T>GWl7ix*E@zlYQt0kc%dNm+t2%L0NCd!P97s$cLNNsU6F`NC093^k`1|1OrS$zD z{e*Etyl{W-r6l^}j~+<(+})S@wo!7SnVU$5#N^O%I4~R3_QY6L*9n}|!pZZBi2GVu zE=`v%jHh$QC(`(3q(X?FWvz8Klt7grHK|*7lLavm&u_lsH+PYZ#ee>69(r7x z;VF=>9$OaQ3soR1qhtyp0^#bAn9>oFvkIUEogk#2P7-M?o~ zJZsvdqo`!ZuqEAd=bh_YuZ@kzN>Q)w~Ua*zs) zE#u78-_Dt2UE3_OdJqz5N*W^H-*Kct^9YKRt$B6#?w#0LvAL1Gq#VRlDuU*|Rp2-e zVJc-3m7=sO zV-#_8Qg1kvf^K#cU`k+?7-d8$V{*14Ov04o3kFZZ#7n{~VN#1>koLK;v2aQ$L9&>K zkT6vNOYtnBm}hy1N34ryY2}_=w-^t7^^skv|j;J`}N=W4OES&-@tR*dUg^f(L$`;a(d&) zPe?)Z5;%)~vF;KkF{(K{Wpdn3L72dEwU`sCkx+v$p>g@7hkNQj-y>TSW<0uB^yKgS z%0}-t91j|ieyH|}DV)gw;X|05kP2*01_&B>s^qc;f}EX2dYQy4X_gf6#QrScd@zq^ zTn?v#RfvHY+?4eAi9;X&Ah}bhLKSY-)!3JI?W{{X`|5EWt_87Jx*0K^O}EBIFXq`L z*kg)F-IF65_xKc(zDTZ5eEw zNoOTc#G&%afLWqfG(%NR2;Lf;iIM;EwhO331f~F7fYXhd+0`UW7#|_f+E&nsz(GOW z$bRtt$#dzu&;AIaj;#zyvk{N5LkBw2K9bjNV|;!4ncFVne*oNoGLVA>4wabl##^Z$ zpHTAwFw<6s{TWUBU!vLPdl`3g>ObUl%WyQ3~n{0FLb@lK}LY^fKGz zaP4pg89=(=y!^r|-e^RQE{I2*&VCoENjy}u1l`-*%yL}5^iGQVH-X4onR0&P*_*%d zQ+z5Art7~*>}!@HZ=xBGEDy)iTmC3AF1phNqmty&@Fr z-7I2ntf_25^)?#F0l1}2{ z6t@S@T|l}#8JXYp($k(~iS#@M{3YPiGVnS^RG}SM0~Ti>yzg8%&loq-R|$6Rpm(cl z+IP1mKr%gB9s-T<#G4-rgh{v+#)Lp`70yB`=R;h_tjOTG-z881_s$grsvOPBC0xgc z7$=k_RBhVTvZwG@c;{dCpF5d1+m{fPGe_Y;3LN=7Jxyb8vVM!;FSARm^SK-~sJ73( zo&7k*RHh%i^imqfi;u>Fxwiay4wa%3x%THOvOoORZ~t~$VV!4j&X{NY8VRcHXl#or zxj%UBFVlM`UJr+%4er5e&y2#zixR+1YE(>9RSjcaiwnC_Q#pIK_)|zsF@aky#$WS~ zZtze1Fd@Oids(8d|D*qMqqR-ncy!wmrbxa@LLrU&c!ktzctZt;*(r=~r5F?eb;Nt) z9LQ}qPmD&67yvxmCQQOp7ykiXWujlVa*j+ zj3Jd~py|Z8BM8^#&aH40>IH8SRgp z3+e&YVO~>j&@DEf4>K4;bc;|kzI*&Mgy|=t>T4!jL`wtQD9K}+@IX^t*}c0yb)(kX zwyh^}rKuk-hA0&i@SE9~*w`Sg<;7Ceq-%J44W)sz=%uIN(D1&JV52C7z%n+7yvac` zkz5kMW+{k)N>oJT;_AxXJp#pZyCLwWZX>FmlR3j;uX4A;%IO8d% z!f1O0)vf^xmPj9&z!pd_^}qNZ{%JbAy+0lK)-&m+-+3m@4i2TmtXnTJU=8UcacclTYm?DwY^UV9^QJwE+~r_%7Lv+4D>-U(IsLytV1 zIvMjDFZ_(WZYL>O)0ytajlG&g&ENX=4^nX*NoyfiuOEFs{n~@~r|l3d>q|*Ow>k>8 zp&QR-WG2W&KxEjBz_K6NX!iv}GxS%Wg8U(PdqciQ=y(fM9`Jbi3*f96)Kajrn z+;c&k1_?pX*`$qX)DkQq28Ir9qfTYgMhp#N5{gVPZGljNsom(bs*?=KQ)J{>DDajQIMbhu9%7Mk@O4 z|M7P>YVml(Y7(lp67tSf;?2{*SM;1@592$wY~O6rrTs z$Gv$GKp~c;hUA8&>*c~dM(WXY?(#rdM|$hnO$`m=h?a4+tj4Ij0&z4}#srtEIBBeK zKN=PY5u6n{_|^#qS;Z!nCj+_&&2w$_?QyMBVZl8oWEM_%1>JvvFo|SV0d(0a2~#;0 zGnXK=kSgp;crn2_y?^oo>203FofDe}8KFDLZ?zYxu7Wg7t4k#3tikPbTNN(MyVAD4 z&ft6u{myj7(!7ca6Z1 z4u}LV{>f({pEf)R zxt#5ViJ|nsowuc@?!1fo+B2!2n6NI8ZY}nwxyj))eDY*!LP}UdlF)Y33KBHN$i%Ho zaLk``z{7&zuV@6qkwD332|_drp)@naAkST7oMtpa1U;O<4&)Itb z)>++m|3B@$_p~g_dk?&@@xT}i1`?nofoz&VX7j!o&FV<=Pt!I{lV&u7{V;nMZ1?X#czqTPukIzKf7+owoP$N$!Y19OsS3>>PSU`uYUOR@7l#H zXMr=~kvN`C&^RHV&QnvxO5wSH^9X|96~lySfKL=W*!Y#il?!g6>0L%m&i{Pnt=;~{ z{hXTd)-E1>wq_^f9bb5-KPg#3Tu^7c-E626<0tu?GU6~U$D>ycw`ezr7+QL zOfIS{)CsP}6NZ0ii1kAhQEj^f4&p2%DM)HnO`rx0+wx)xV+F?%{HTho;!2>I-W-9z zbgoo+;pC=kCoZTBWUn|SZj5^YIrCJfgL9IJLy}9anm8v(7!tvimR$hs>|+j@(+A#B zKpX?IlBy|+cbDqGkA8jU;nQ~D%`*_1c!GwhJWH(8jJ!15lj#SdcwaQ$f=PZ$N774@ za|s9H5@g7Xavp?zq}}O2E!t8~)VWd3cw8+lrbw`i{&JHKaHaBy(m5gwx|`zJ^eO3BOp)Lr65W0{aH`Djy(0mt zVr2A&io@yeXtI{NT6<{4Vw*j0I%?(|3qh6K$0AE8Lh5R2v)#`=PjXrIL(QWA)<6uj+^F5q;cSHX`V!Q*n0_tJQ-|p`Md^$bZ(-*kG9usJmv(ute?=g?KZJ zM8T(m&N+#h0gLbL$8tJHFsnopM%CMImI(^orlEik|rhDz;o_+Sl z(W5rCWQwgMm{=JCwn3a^GnHCTocw(o?xfCFhrL4Zu-wNB3-U=KOXhG0*If_ZZ>7gi z*v{=cUFs!CbQ_SKVn7Bzc>XswskqSYx@)Z+Ir^p*rDxdIKm6IIQgVGErD?iQ<@s57 z8Ux4eyP0Obd9OjnPcyIY*EP?Kk&ksf%-R>OlszY)u5$y(_=#9aDtFfk4%wt37zPK) z$jkhFuF>k6a>ku!Pyp?r#tnvz_&XXO%3dc!H(v^cv##HuJF`as@s%HyooTMuT z8L3=EN`Wmd2%WwvF>e>rOc+vV-sEE9kCYf1Z&@)(;E=|e8eG25TRWM`50;f%QPE_Z zLrSe!+`(0jqZuO7f_ky2xY%V~*|GCg%g@ar0d4^q{SWajc;C^#c!*sx1S%Fdj-X$D zaFHBZQt{SHA}m3r)&*~Vo+*h+c#8icpZd;t={@;3wz)mrcl?pDXWsa&)uA!d*pF@6 zHPKr4!dHS^H1;N=fAcV1p=at#$b+AH*q=$t9hJH8LzJQjfK4T{K~5HlL@59k4`GTz ztsujk(-#APiYgD264Vlbonq7H zrQ=}*!lopK!!|5lTpygerbjVLbWEiOJOTwEfrvzaTH27tFnEhA(gO9vT?;^$lFW`Hlt@1oAWV^#1!0<*myYKafP%C&6w6>>UP>TAV-!-7GDWB+ zlydfIRZ4HK?BxB}>iWA;QDL}`pgcwb>ZoAN14cJ}BLL~0CBihsdj)L{fW2$R9v3iZ z`%9bd68uiy9VP-+_l&o?Fh~aw1|4w{(h_=epWH~uU;eALhyVaU07*naR4%&u6k*k; zkc_x)&6!Ggh~t$@yn8uL5M0VW*H1=?UN+?{6oFGctzPVxF{qL6oj1#JpoR$rR7J&p z)C^;lkeO;zR?W9OlC7TIz0VTyVD&>Rqp?%Mi>X%*;X3%!bJQ2+VgSN4$b86qvzzC2 zP)n?b&#TxM2MEdNz%y?q879_DpUSgAG_q|X;gW$7f&_5|8P?&xy=3`POPw;^2F{k* zANTFXnI_5ZeaA!g#_PMR0Vj<|Hr|aVXO-0S({0(>HFk#cb+7feEk)IyXs;hU zNYz)8q+)AcwRR<%lmz?H&z`m{?j4RjZw;!f2>P$sE6E?ufUmVE1j@f5sE7N;JB^q_ zIZw?~lQCUxO=%r#ZLuWWT9e^Z#7S#fW2(QxKa$7;=L!i6H;6DrV{hvRk%vO;dwBL) zn2c5ky*xMZ&2STQo%n&EwdQqS?GqXkUuO5PEW5ld72K7in7uz~)2@VjmQS*httm5NI1`ixZux%cB^C)Ax7n4Ub z(in-HF>yqLUsa7jtbIIlp2VgsAYt4u1qm!xCKY1EVzI@CW9#%Wh=Mga$mj*M-t-P` zob;)enV@+qW`ctgtZLa8Cu+h)!J5X;X%B)OoM4~kLR;XDA?x3_*x4<;%wt1X;L_K3!g01>JnjL( z$SX!ETT}tp(L@l0GuqQ3amXT01c6vz8yS@AsGCG?JwJ9FdHhAdIj8Y%p2T$Pgei&+ z$`I;9$G_t+gef2~F;-b*o0yrOZb>7ADTJJRezKxRAfGEWlV3rmyp&>xWMEZap>jx9 zY)E;$7`6o*QSl zW5K(3Ik&#mHq+oOq0s)JG zG!LPW^a7EP%}2qs-AEB7NZ=35o@sMIMw2!y1$OY*dwH+%(r<3e(a^8iygl;BW~&0}@7cZI zI;e*FLju<#s0Ru5em5E9Y43V#@ts*tiaZ;&OI6SKC8qsjMLrD0_->K~N ziKwX~=%<)k1v1BQTxp5I#K;OUf+A#Rj;2riqUmM$pdWfKy^|YYcWm?@)&ef{-Zi85 zb2`_!>u%oden~3ij^moH5#bKn)R&+KCf=#@1u23LwUFE1wh?#nY;wwBL&4K6oNNWL zBu-5w$!c~+5_)-ZuZ5$hrJAiK%ab1TA|O&ar$s0N<$jM4EbkbI6)!Q_clvPoR#^*W zGVUM?M=s|I0aEvPVJ;>k-z*S9D;;qDW0qHOk-p|B$;lR+z*3YDe z0Kgch(ESoe0{8;1Bg9G916X8JKuWfbdJ>IZgF|WKW{CAf2$6g;#c@NLMZK3)dH<8d z6o+f5+`UDZq;B!yHh!Hjk%}iBRbI9uOc^AJrGIceVG_UPwZ(fX|DJS|T2gXKZ~y_~ z4VB}9T*0-(@FMioLSVHZ%^^Sb>smE^x_J=z$PVT9-u%Y{R`2|qj{pq8caJ^orr)a2 zdaKR#>lU8{gfSv4K(5pEPDLP?W50~raxl=Q%+E}s=bZ<$BBbqXR=|fVjwX8z42}X* z<+axWS1Z@vXgJ*5oE$PcXIUSyN?q8I-h~TvGHUEAwbj;%q@RN%QGC}b+w+?jZ3qv* zv^4bmJZl(Oj;9|=c_gGMBq`R-AznHxSN}eoa5^AL&8QRGAxd4qjt>~K5la!lfZ7;WSt~OU7l^Yb1m8QuiA?k+wWez zg1S;uZRfTfc(1luD#T_5_fzh>a}{MI#3kUD_g=POzU7er@7mF0_U4Jxcz>bNW)qTE za#Ce#Q8G|DZ`4Dei|5U>`D89Qap+B}Dz8F~m}+xoOy?cCZRg2j6y8j+%}>0W7bXMy z@BTnQc(pA?@{5%UoJsO#M{C#{3o$Wn+8v;|ZyF_}L(XAws~@r{$|F#ckbX-IDI}|= zPX}%KWYau~I}ve0=?!$qm7b?^k}}~B^FE3<@o;YSZcJCm6Cqdju$e>Kfs^<(7hqRXn zF$7~knEI_KFAGf;f&mhh4`y}<*J5R(Q8pQ6-gJGYzmibYcfouBFSwT){fD8K_bQi- zYXKZG?f^Ty(HVb4qqhe+fp(aI4gAe2GV!cdx_P}E{W9T2oeI94(}4B!9}f;XX@9~Atn5&Yi} zrXdDjglYSs({}LqSvVrnDPcFtCXe2%{0u~Lh&Z}n8A%10%6typK$6sq0b~$DH;$49 zM~IUoCr?5E&B?z#NONN?)Xeh(K!LKqM!ra3mE9!79+(dB^c7fuB7?Js`G9x5)fOp^7g+HC|`?`N)AW%W1#?fcy; zHk=7)(m^4ry zD-TI&&4PIr#(9mzFSRh=n?C;uiYd0)>u>C{b@$$5r;i-BeLMEsQjlG3X_cjC7JRmWtfM){lpcG2&5cx<{{g-hlk%wL;o^ZlgPD0|wQqv1*309@@ zRTJ(KK~fB+kbDfAJRbWwCmg4zBpgXvna5s!FJHCVUfsT(;*aq*CpXXDJbD<1sy;gn zgPBbou{EpLB8kInLpGHzsT<;!iey<^Uu(a9;aR)ZbPd9k1XI=HVyAp!VCjztQxc9? zF%TblXHLc=G6@x4ER2iBsvj>fx&T~2qrd0~st=+kf}|8hlI#TgBC6a*qvqae>>sD{ z8@uy=_&pI5s-jCxuMD{SLn>Ri@%imYj_74Ha85uFeVA zoHQuOiP0E>862djNRVoz1`Xp+h^RL!FbeU|(+vxZ06hjrX>Vj7kbY9uC+w9h8W~_I zs@B+Rm##D-<|2-wCdtW3B*hG=^T{Bt`bH{G0oZbXbk^u@^(og#xl}6!Qa^;LnX0wz zot%q7I}!;6mjMUpLJcBNnixlUIX4QaDT=7vKI$B`)o~_umn21H#ARb#)G+-o2@}^1 z^L$B62Ovxd7&@KA1Yw$8pbX3q83Zb_n(gTp0x9IfU zFHTZl$8Sy zE^*!v3`wX;EA-NLzhgt^&e<%LX2)RNNU-jz_daGPAQq=~9Kvlp)mCp@4uex?myR5< zM8XYXP(^{}Xs!dyqwAP+S63q1Hc4n8xNi3MgZ%n|?`G6YvkMDs&EmzRrYW@jM~`rP z$nJV*6Bz}rS^~9sIL)4Y?sxWy_q`hxZ;UnmahDxDcNVYKbX)%5eb#%q%=VJaB8qp& zl5056PJZ~7rP04(_co$_ha0m+mF2@nsqLglkC4$Ez*Mbz~hVL{JNO?(G zTfm~U7X7*>_S|l`cokagLb585#z8LmxYW;`Mlnkqr~*yXtbpABVG^??qT<1)_X(?j zrh}Z4>=hytt#FP@ytfdp#&n29>s(yrX4{hD750$luV(hhkhj||b3&-=ma3$6a zRqRL75Rkljo|mBFfN*8$NYH6$tdf#q84nULbG`;icvTuDC*i?> zsr-g=Sn9WD8xPFHs81$kH0~I6#9KGqZ*0>%dP0c4jcQUrxD5M(jHHj;?@6-37RmEY$@? z(h&ikY(d&wrf~*JzWK}>-bV3o2r*Vu*JqcnkZqTkC+Xi40qDFuqz$CCINYdXu`?<8 zYNV2{LeWB8WIdBBbqyFw>0R2}$^2X2Dk^BnndAv0&5>WZ`Gn9}(Zf7&n1${`s){6Y zKzt}l7qTk!G!0nmH7droP*FD$E(%qn20>FIZ#=mkD=^L>c>yn*zIz+7Pn6qs-zEM5 z456q9UfzAw4xc>l^vJ2`f~CrvgUwDHr1Zqj&6`P=Jxi!n#7U@3M%+jixfi+4x!6KP zl=NF8DSk3old40V_CO`jH}6a>0yVlk(nij5AJ=yV4!Te0{(sY+8<&azIb=2hvjM=? z!q7MbYg!oe>P8>k_~}gEe$*xIoKZK>a~N)#H}3i-!X)WgO@}ewHti3@ zZYdR0Hwk?eeA)wt8=sU$Dww;iU~&nB3aJIox&_sjn5S%1c9Ztj6p_)_6)T%Y!exx5IJ>XP}Y`m%^Y4sqyYzsM2Jk zP%v#f(uQ1%r3L~N;pRiEm6>fJozJ?(i)_<|JF$3E?%?DJ)|hDMxHkkmj*B}GM6hbpYD4qFcEPA=;TXqP%mVFePjhMCt4 zyk>hDmu~PzvC*Wi##aVMpm|ZIGva;%aJc50R*N!9CJ;kXx6zjw}eb<7}2_8WT zd>3k$+!(C6an_}}pZ<+>R%ZvU(7x;Y`7RZSyKc-4bsMQMuU{DCxF%5mqt)vHJ&1?7 z4cvdsCD(rjj=RO+Ufy;1`ti|U0*gsRSKsAXr%y-79oo>drP`snez2$=vi15TgSp_{ zU8@s#lybX?2o1xvs?>nE-N;x{U=bi0g`+?$h$7KX!P!|K{lL5JDmAChojgM>KI&$1 zkHKb$9YN+k*1#ahXaFJti;Q9TUmBYr3nxRs=#+<<+I zS27L?Vt5_H1dKXq3CTyMh*v7*nyQg#$N_ZoJ#hpE_fQDXo$P z1JFh+R~hpaKWi><$o#M+kfc>v^2Mw~kqAIi&r?tTl&QnCfx4y_LZ65hC=N#_rL>aN z6#;B09g!Ssl7PWfh>|2GW!7+njBAw;TjGHn6O?jFM9HmVjl5fHS|~R+p&WX3%MHvl z@ikhML7IF0Ap5mzBEqC&x?A9BA@d%%&)-W2x#>3AFz+p1r2vs*J2B}F^9CIkrmxH91pREdyxWclC+VBQaR_U(J z9wa7;46-PqknDN}dr&u!_LCY+p*Tg*F;OcEB@0Cq`SHT=(i><;SKCf3OM~>dUN&J# zFH*JjvEUTNA3&-atkEr&_yrZ~>Zhzi1V|{1xP{-nw$F}3nB;kr$TMZ1m|K*EF&cvs z#7=?5kcJVp0ezuc8hky&aR}^ie1Zss+w@yRo!@WSM0~va=rj=k9>u+NcW-~-Jp!NZ z!010C*Sm!|dOzQVBdR=K4S%brTyr*b#!dxUhWDytf^&}yH4kN*XuVFibWiST|la|hK?Rvch}vvYW-b=D4;S1 z8NpEj7o?0KqjNiD3Hp&XQklP0Zn^(g&)V_5uiMJW1r|pcjs}oQC-#~g@|)qwPj1Et zSKu{3tLw!O54SfnsZlPU>V>g0Z%z^RCn93JF0J_u1#U8aj9pe?~k;Zy(e zHQWE&-`a69E`0E@N9@X#a+^7|gviN$d-Ko4A90T~$}4mrwTS4b9#SL-aR`-71FE?O z!X`R#oXAYivS}qVEQw^S>8JgyzxxJlOt`6Mi6CoU>*u9P%J7IGd&$n4~X4!Y2`hVbxZY##orK`QumD*_|CNcw0 zIl;1nDb}H~cqogA3Th@XMxAJ0CeNFV140svc?QG`h8BsY17nIuuX7bzeLugI^DjU!#@;hfdVB1CU9W3| z@W(b!tvddI_PERsd~t-0i#_Sb>#I8F?e`?28-&O^f7~(N!@Usqy-t|)5VjDwUwsCQ za7oieHFW=N=Oc{Hhg`ZmIQp=4I&C9x$hfWUbQHsmON=zht2> zNC`YM8N$?qo$Dg1w01nRRPDA6X2*vrCp$0Slv!Zf)LE84Wg0{%35hP+*{WX07(a9V zT#D9ZYk(Z=MFIm|NOK4FA3!yf?4$x^Y@edRLwA7YmP zv2CGbgK7Z{Fjny-o6W{sOvrecb@=nAp0b|ZyX<}{_YUD=K15vDtjUwDku~yWRfP>w zO?vIdH5QNb_S^6M1Q+XkTe#wO+xy~HyHZ(4JYuxXgLurEH_sZ%D(#hRJ5U>T*xT>j zWJUAl+Mi!~*~+g}AytHv8fm7@T1D+G)M+p6+=WEigY7$-;*N`L^6a_xpWpfp33=z) z@;g@BzTJDMJe_8^8{HmN0=ZG5MU7|*@U_{ z^0vnx#g4}K6JR}vDt7?l(2M<7-y6tc9|_Lv<@_Ot&E`*k(lQq=u$t*N z%gZaQy6TF(vh`&$*jzz7#rY7o6nSCN|3nBsOAUeso`Nxtm5m7rR_Dp4r(B+32$lkN zH3*X`*9tp0`;Xv_z6S`D)N5J`!CC<62Rk;=KH)8v$+)H`NV^oQIUE-30Q)A!L*51I zqgu#{_iy&MP}aMjGUK6SxR#nmNdi;xmXoJmejY{CqA7^g+JMV%c4|yJnBB8 zgb-E9-vr@ELhTnv82}kx(Vo!(<-8ICf-nI9R9mAAy5G)R?69+?I4|@>6W5{2xJVNm zHkD0Bcg?&rL0pevG=xA&WlD=fC#$Rq@gM~RITE}Q@6kmkw)$O`dGG=WQf%osMMXw^J1rAUcv2 z)Ax>cQhOm`4MP;dAtpsrrx4^>L`DH5cNyA=kx`l>KiZBk)Ot~*y(z7~YWq(dw?`j) z2M#_+Ogz_l#A%QKk@9oroeC%hDX_k=(TacF&Y-B+G(qSEw!qWZc%&Xs>?z-z|zv6qymRHZ>#L((?--lt_chMdo^E*lA>=(cOjomhPj?G?v8z4O72+`D%>2`Y0A*&_X z?qXctV|llI2liPe1bV?8%isW$ZO03La?TT*9(mCA?%rq3*P5*6a-}U_z1(K3TxoD+bAljYQ3udKHv~W_uRwx0Hbqzy#N+;VK5V5( zc?b6$woDjNmof`HM7CUrHgg_M0Yz+oK5S<5?%rs*^XA|{mWsp<;YO^NHPk|d^ngLxxa3(E2?CV;Na@g|adAtB`P16g^F_b{#K^^8 z$?<`HJ5{78TmT7h*FY1(mtE~vo3i~+)U-x}HSG<4gg)qHf;4x&4)MiJ$GRsU@Hf^` zmY4*h{viy67-*E1BazfO+1bf>%~6La6i%pz47x1hL{wBMAYJkrLr7SJ5P_ID^r;Z5 z)J%%>q56wUBEv!igb4}8&!C5*Lk$IJ>skmxyWDNZPE+))E7ID#A!77I(pH$uwvFp3 z{+{9#!jYQ7AWRVus8|3zmf!Jk!m)5GVIX{Mk-TImfZ7i+h)#;O++r35me>JeuKHA# zRx1(Uh*0d3(vpxoOtT9zoa=Zf<0ZqZJcxu{9mgUjV`LT_^40T!7^NB!L5HDp zDv>CC3DO+QjR2mAPe3wqs(_J8bU3hI#KhyQ#@|ofm?#!O>M_reVbs068>0t@t90Nq zzJsph$;fW?;X3fTYjr?<4&2lGE-6dr3C<;9>Dd}1*=1A*N6ovB?x$aRy51+S=>+e) zVnU)6FNR&SnuM;I`9)SQ__>{O)KE~C*yTjURq{TNJ}Hbzw!VoCZ`(Y-wD$3;qI+G zlu_8U7A+!!cWyqe;yCLh#5tGmdQ?_YJI4W!KJ2KGIC_YH$AKJ=UN~!)uh!b5?|vto zHyRB3j$D9t0YVK2Uk7sTMsiY~K_$-3!jK;av!!}Y;TZ1~Oe!4E*VCa z%+9uxcrP|FU!h1@Yq>T=Dn+E-t2jn%+4+Voo-^BKmrP>}&eO+in>KTX{op4*BQITw zt(-sKe)7z7jyQ&*Vhh8gFBVC~<&oqqMoUa84w-`5uNoLBL4BB;m}=Ypw3S?mVK#O0 zB+D(zwfzTraUGjp6hcT$6 zQjvzz$N&*fsqiq>4q|i?AX@RF&~R8GL@o;n4C0}ubh)v(n`cwoDHHWy23~g2xSNOr zl80VIG@NQjxRqA+QP-v(!W2a*3oqB58*zA_K`^tATy~7FQ|pDou|&iDN>WlufCN-| zar{=uS|W=i5u~BA7ace}bfCh^L~6<{&VW-T=N_FY3R3g0e+r z_$JQE+pp_&uEX;;-A_ln=f9OPG5~mi`{`bKk|Rvs5AC?Q={Y0MzkZ$0aGmt}rG*pN zv|FBZ9^qj`xLyQNw^2=^>x9XTm)^yDU)@lIe&ojb<-SUC^pm@-6GTiH5sV9IVNfGC zv~<}bjN^|#{vM|aQLZ}0o`^6Nfo#G+zC$1wWv@ww!5F?$X6K&zg`GtOw6drWBn4t7 zSW_v1E>~Q#xy1!GZOwZ7&5M7=d#uOSkVI9^3foZYQ9qePuSoT%j8y%!2aM9m3$?Qr zX2R)DCx#^ndyxWMrJ<<6eY_kbI7Ie|qJ{Hp-kQ}o4Nx~JF3C!YrV*eHQ3)5UW4l1* zb`|MlkoQYM)i=y%CInGI#^D%&PmrC#2SQtO-$tAcP>(Y#Qg`*y#~9APqU9L}L6yWVWzjHFXT`mPsi3tOCBXi-u<9z2HgM24NXc;1r98#!n0OiP~^vspc+_H5m5^QTTDVd^s$j#j3axTbz2+c=)%ss}jOWRgZCCK0BG zKKi7c!qMVrb-6vf;Vx%K>_gI82>~iV4Z9PmZxDxwR`5zFV>o})-PDPTX5Oe`4ACzv zoP>Qo)2`xd^7U{0f7Z}&4PvC)bKxXvjd#hhMVK%mz*GoFdTWALb@1 zii@%#L8S;nGm9dvQ5?i6np}J7sP~fNkx7&v4yi`5CGFi208q5;-g^aiP|_9Q*yPgs zBQ_60gq%kldOZPXBpj-YsjNVJ`T^KzI2`&Ag@lHH1W}46mmkSeAryFh<1s@S0ti?F zlAhFu9f;SEPY4`H0b(=+mQp<b03!j3QU=Q?fKWtUNYimOa5fUvJuey%JOo}G zkwkEDJF2PU#Cd=%g7Na208i7#1|}9V^N_fXj`Y;A8z6M4ZM1uAV`tF)I&XplA{^@1 z=oUCtI&LS0x$Aj5-obt4k5r=LoA-`4b8?Q9xKs7cffk6&*Kfttz*mfFfgV6`=%1R- z7u-nd5|QOE2oDB>7n5~GOaZdD+j=1sPdxDi2Jv}F4z>2$_U*QA%^DERBp4zPALHMK zG}4PxL_GH#L=xKetKV2UP6p|y{roUTQNZ`5<{Ddi$3g;EOYD!or}`*JQWC`=_Ma%$ zPB(`3A=xpo&m=>vm+Ed!$b(wmaYcpk*&ZMa3)1 zZMv+Yf-s7F?5Q{v@2Ge;GDi%gHiJWd7$)*c;`8J#{IveUOCpFcY zQGv!m2ttvRGAHFgBv7~GC`E@Ug*;$8e)*hTK5)q1zvd2$nl{B+w!BQrqf+K&7$*i? z@NpK>*bKY%t-5YqjxZ(Qy(d?9Rmv5Sl7mqPgjC*^F))7#5KMSJ5I7s!6)`035TZVC zkdUJShm~3?8}GOw=24!EDjX_%@VYzK7`i~K-G9)Yec?sYlJ(oNsZ*VbPsKwuHd-}e zI^|>B*hyB7_yl|3yPvSbXHVJDs!Mj?raRFhh1$~)-x*oC*s6&sBKwTe4K>o=Z0uU; z3+K3yfO_n4h530_1Yt^tU|l?Q(*EUJ|K?Q3vK1vDDW<~QsC)&G3t^FGatS1RaRUP*g~W+)M*MODm4}t=oD-{s?8LKG?ECRQs!GfDz*?D z4&+fAj#^JdN;aWn!A&R)lcH@vIPDQVUlZC%{qG z3&*KssUi|md5P;#3V8*%YJm!wstT&Or{Ik~^1N`wZjUfQ{7_AHBjNpN_g)O_bvo!= zQI&jFMoQ(vlmCLU0DJp9m72I8FNKqy1nOko6@4zia$FHRRE9nVB%$LsHt+nKI3h=o zJiyUkdGiDXBRx}>2Whe~1#et4!9^a|=3V7Z(f#?NrzmD4So4Ti@EZnHdAcq@boHs& zE-#zK_&WyfOWnhV9jp0cn^|9oOGrL6m}l$F{@Tlvf&F~uzh>>!3f|%Id8s$ zfzB43d$1*Sfw)=O@Ix>&*qSaMJYtO$(kn#o9|MyVOCHH)k{z$UdpY%*dhH+q)?g4c zhhdgZW`;aePKnqeQ*nyei5FV+wR-2a-il|P(gfXs7n)RRF}O45fFSec%(ZYh>v~jk zx%^hDE7_;2AWYMWry$kA0OINmRdO~}m78g}TxMk#ORW&~;{ZXgQ6P0c*QvaCFR-t` zb!8g4c;PgvcwD~=3RtJ|IYdeU&ZQl2Dl8DQ2)i;KD0_fnVlE?bRa8->wha$gataUaaIaK}Jlxsj)gj9IX?#GQRQEHAnwvGpsR56 zB^0Yt5pgg{inUS#)F|em9S4#Gmpvf@+h9A%QJ*Jm1B5OY+gM4-R2TYjp|lEF1_q(A7Io@un>TZ+{q)B_v%*oq}ftgQ5+egB6)z^j<}EEro=piP7jD^s`#HbG&) zA&Bl4ngYV5RS{?_*3i>FXbpNqeJn#6xOE@!7p%FpqWg_&T07c;w}}X#Z2N(A9Q?}h zdxbw_9}cvToTaN^QpCjzLFwhhJ;}Jn#(rI(qIaPHU@T4$>DY2oaK}!;xS6JM&>&ww zDmGCHdBaJ1Qq)*1izFUNDGK7G4GM#*4p)YuXtItETP=&arWQv8)N4cJhZ9i}Sr7tI zrCCLdE51TiVe@lSEj?MKol)h9!Q&A=h?gs!E|f$%F>c$iF{ zLGDaErZ5l!Gz$9FVsJ$Uxn6F(y%@u#((@*XcM38eq7_Pef~>X!KU*9XX&@M zPy0pS6#vD|_4DTDuDN-a_X`^iT@KcCmx+MO%WfdT;q@WVcXcNq$&9_{4YkAABR4TK0IGA08GnfzL+$q7cI2H$K3pH@i}zDw@Ab2vxk*LJ{?Zq}Xup2$zis`xb#@nO z8mZ#S&Yea|XOSbNsXkOyO*ImA7{>B+3n7d2v2CwecxO9VL?n$3*xEa8vxJ-i+>eP- zNgzNM#M=#H*VfSuw@xBtVx}6&ApYl9w_8a;p<`(Fz4@kPPbtKS}1R z4nXGEg^RX^zDV8Hh0{Y>De6LyoKn?<;h{GS5+0^sB2qmNyPdCXM_rbSijCS|AZJNa zKGxjvv#0HRX(|c3R@4# zKf7|pPVd-mEf+6X3^6*r#D>Ka_My@el7vKzx*#&LhYdo!hmcHC>3`m&0<PInj5vcCLP-Nui{t&A^2xTJU6QVT?W6^?>g93mRoUF7x z$~C9uDz$$iQc+EH4MDj`R~f_?fd>>D){LZ!F~ZiaTT52(xj1K)VT#BaS?e&ysNm55 z+LoXE9Q7u|82ePG)^%FC-SzlmmNI9iRT19?$%N=6*_XfcCEK=jD+NOf!ISY;36qhA zl&q9)@~VqM`VQl}#$^D`nFM75ioGyBFf;>* z<1v;;^3^m{cFDNMB|@ME@wjnSS>Xl*0CGQ;3%GzoyOhy33}BLX+7PalgTvT$!jq{q z+Cv;er?s}=y#&WKz+`h3$Xw}+SPpc`hk8$L>%~Qxe4;Cl971A(TM-3=(1gRKDej;j z3N+-$Awiyi5Gf6ea+QTp*;a(f2VsgtGK=LMBbd|>7DyC;CihsmhKhsiQy`-V6MYs! zcK^n}MFhk(K$!d@Odzp8y|LFSSU71IZUrn#og%4B`f!bhBs)n=?w_8`<^{=W$#YQq zG?98Hn|Ar=lL`^u+|(Ja;J62dfzElK&b9*5bT`l`e^Jx5H-KUEC5`}&0R8Cw`tEWJ z(hrYFYL6HieY)51zKr`TKG=1B+>v0!Ot+KM|FElBFzymJRvuwgKXlydgUI$xMqWph zKxKStEh|}Q4#A@*PMx8e>uvT`kO+`&gAg4KI!`#2AooF(il!D@5p{^f=v;j1&-UDp zpRz*WCI^Wm4&aW%NkL_|BSEm&q?W?2BnO$=Yfa7wzl4~apa1$u#@L5;Fz(-1!OkjNp=Auu?T4{0tAcSv4ujg`a#B8 zrP}ttK?w|ON13RvQb2mjcA^p9y8Tr+{$z6NU9yF!k|d!w;!UgMudR)oM3uUiA|a|-Co^ACEGKn>_Z>?0FxVP&%O93du{g~ zn93Skzh2wjf*7r8AHUSkQK0+beeY%o*&_sJ21Gk&Ib1s;ekGBI7B9r)0<> z@rs8^v#h*ai^YFv zvne=BNF7KXD7#iwV`nLXSbnL@+4~OG0imh}t;RZmU^)R?=9=P9TJ@|mNk~DY_ z4NNrex8;?s_Jz-V9_~`bODT>jH6_GW*&3vMioiA=3Q>}ikFu_SLRjw(fCa=iwE%qI zw38nq%wpUF%gJLPg+Ferg|XL&FsV<$n%2xjZ6dYY#8=QiKvr6#TWP9qhi>WmB_Xpm z-S2m0szIKE1LD;Idn6@EOuW8BK45nAzCPzVUOb7!t4L6B(F3H+5V2r#<*mcyYO*!h z5>b`v%}G5>jwqpqWC-p@!G>XxxS4hgSyOW#-YS$xZ$tM@Irct$rU^-Ph)*x>2hqlo zrYJ8PqcG2n!$qBpUc`|gk@Q1^SP-(;3=yQO`liF!q9nZq=0JqYM_#<44(^JC6%S!j zsu2}-lbS%BrlP!EIwGsQET(Ioe3-~R7oJb`Q3FG1QibD6$(`1(Fm!Duw8ufYhObp26AK$q(18g`A2|ErsISV zla-3?ve!9C^pT;U@$`8+_1Z2=W}d122;#vr704#PhqXfR<}X}~G_>AcdzDh#WH`8I z-8%dEbI;n`=_Q0H;W_rkE-PNU!an@3Un52^7kKkoWn~r2Rk&4wP>YHtTW)Ti)z)5x zFkP@!D{lwBao+}pqHrJAiX4Q2ist-M2v;Mn)KiNmlT%G@Yp5fN-cYgR2+kxA(3LuJ^c<|o4?ZHhOkkHDUlhGukkXY9Ku5DYbvh1?mvwDTg zYLSKaVI=DV{kbE?&Gk56V55mZstki@MM0G0Q5c3)(*ykMBiMGy;w8Li6op1>kc4my zLA}|_m?l6>C4H-MwSs=@v3nIW1{no;B%Eap;|x+)RgHbDl@vwEr284P;|JcbD;F=4 zj;zt;_>(j&X(bai=e;;fWX_*!FZ}XZZ~__HnZsZG;ScukdmnyUszex;CqMQv-2Y#3HZ`ei<59ClgBQh+MuQ_| zR~Bv&j&wLiNJU$arYg(9nh2As5qQlbNP+KKv%v_Omfl~m{Kh?Qteq=xf>zyV+i+mN zK4s$Z6R=mI*IDf8}AzDS*6ssf2NegWVj|E9e%`Ftz z!Yi<;k!rJ)KX0cpsF$!6iGlM%S^UaEF>Pu#Dn-hzqpcEQ@u3pH-3nyGcOeLfk~r)k zkePJR$Sz1QkTKFNb>oqRdlYWx*a-jvJh~C<={KUiCQ^EO<;hce8Z~kDoZ`OeSKynp zol0XP4v|&m*_a?qR|xja0-;Eir`RfS*iaP~lCG3r2u>{_6^btom9dR7J*ZVFP zm9IQ`cN4y4T-P>2o|{ z<`$!R#yBd8sBC$%qn3h5RrW*%^Y=ggxIM7xUaF8*+naA5w^USj2}l}K=FTS9TrxJI zt5$aT5<#8UZ1bkgHgxo){l6dl$l`$UBMrOJY&{O%f?N)^m4gf zMB-V0*9N3`%4Bqq;Ut;bJgC^@{yhxBYR5(TT5T<^$Jwa9u|qK&vCQS+LkI1pKmEzY zUp+i^284iOnRp*|5PK&w8b+D!7~HLgAOO9n!Q?V6Pq-e`TlMv1;?By%!6CzrlwPr| z6b$^}6YsaHc$8Ihth5q`kpstw$HGkj`2EMPe9@L;GmGH*ScrT(F->UR?aZ;GcBSGn zQr7!W+1FY;cAhAReKhKBFs3Z>&=30oXvk&S0$IF3x6^yBc6W48MC+j*~H zmxUq}oD{Dy`rt?2B_wCB z|Ku5~yHshJWDSu@v=T4ECSZBN#tqho^w-aO&s(x&*1?Z#Ur{-AdFV&_eEf;vs$^>2NQVe(u3QtmKK(lgltF!7(%hCj*da zF3DD>P=Pi(JrwmGiBOaMc%LM(V7#DMAvl$@R{pMj41_ANu7YWv1SS=p5H1K5M+fE8 z^MOOwwA%mxKmbWZK~&&7I(&8w!qh;u+Xf`3Itu&sk^3ctfLzcV1QO#QQcy-39)Tp1 z&7>Yq5_;os=^sImA|@e{lpsjb)ebQz&Q1jpq9iVvkSQ+M*y`MC#YwC1RwSd4hWmOf zgh~0Plm);cbrC||Sj0l=AbQI$Hus%@gB~Tq^(jM!A5V`Sr^?%B6{tZ{;WkB>L^LH8 zg#mP8YWxstpb(}=`h1MuI0xuczx7<#e|oNo(PT?%((~j9;d;XT9LnYd;2SPmlkWa= zoW^%u=#aT?FhSF49)wv#F(x*BJECQQ~#P34wCW%>gjFn55+800j zS$qD!|67A-cW=7K<(wM^sf5D8*CCaNFeSiXEGU{{@eriTCr;Vk?K{m^S7DQ}vxK zcCI(6JG6Z1ZI*#llE}LzA$i3>^ks{?h__>Bb*<$xR)gF-nl+V{m*bMpo;Y*X&DGMS z%Lw%CwklGOX`N>y(G7QClZFZGfS|^qmP1Eo<#kA!GfI${a?zO(R>Imk^X5q_JAcvU zluUET4p!G>8BPW#Jok%dAl`>*teOb?jiVoa*6xZ~^T~5rV$c8nMe+e4Q9<}Z z!Gj4no^Wfc$EClA`!B-NFJ<~PyT(|xLo{;=ifjr@z~19W>~mlGqLXA(A)Et*7-T?{ zRhUs~y;!6tdFr}#0$~!eaUPSxkwKa?Il-DxgkagNd0__!-rhajIt#LJwFD#>1Z~_P zu1>I8@X+_%MW|Uh> zL8rolp&^J!Acz#)#!IH8lFXH~B*fUE-nN2VbjNZT#qH5~@PT;yk!HF`{nHC^>g!_^ zS=8Zp*T5`+I0!mrbLBDQjYnFdUIIP3yZjKPVLaSA$ty)13Kn7CFrG*R`w8(xL@0kB z&KD_BI6@HUN+xf4;Yc$HeA0xHAcj)^FwpRZAaOjXNCa7n4M+(SBSn;8Ep<%{L?s>- zhNLDDAT3bI4l)obUdt6LWWOU1jFB@Q8kBJB;~H8>cGL3GanyHr&}1V0N4W2;no zs*A%bJ0dCKF5E9^Qpi*ilWbcWD;K)~w=2=trLqTjB|V5!jDiYEjMi@m zkQz*y2Sd!A0KpL%xBC|U=!Q1{V)TU`xf#93`!2QP1ec6IdDNbO96D;(TYh!*cW{ge z*4#pJEAr-}%KO$g%E+XC>ODl9KL1;rv}lFB?{EIr-Af@CGJ+4ng-2kE^$o)y!En`+O`;SP zUkz%ifBp7%TnP`_wKe+$B*0Q6-zA} zX~~ZyCO3M)l7>r&OP`KgS3mjZ7%;&PO{#4ls@kE@Xc#>DB#g#<%jS6+8_PdG{k#pK zZHOg!^ow8kJoX+m0I21lXh?y{xVe|vF^JNVg$uwJ$&6zfjt%kT<-_R$sUehcs;R59 zW4m{eg}(rT3B$^}OJ$hM82|32mr&bJw#5)-KlrDUn36Umr3~ymp%8*WJSCMrOwz8h zl*WK@r2CkMdUfO}BKg0l@$sZoP@wy|o6^IpP$!D&(=heGr={+zy<rmNrMIFyWzUP}~EPOYNjdNJ%rNr{j@BW{7OLYm)v47h1V1$73`USJ#ORsY_}! zf?Fjt$e}``sd$kP927)}6eYEe>AtHz58@dSfbc6;DD{(5oO4w;QVH1Qq?=|uON z=pxp$Q*{J8E;oNSFk=pi`Be6X(apUf@9A7?IndbH#F&wh8`p5%=O4fNWjw|hXPByI zo`2p7@uGY3Lm$EJl5IY`?%Hu?P-f==VE0^EsV!K%&@xE!>Z`tBufOyM+e;8`DBOJk zhV@hsqjEV8f;96XO4Fv#AbzRYb{suU{=6Pr^^SMhrjLHoIR*`3|A{3o5YEiGg)3yj ztMc3nOw~B);?tkSkPb%_#(kwE2f<&Av9v@hLzx$!IX6D3U?wA)x)&@J*us1 zJ%g4rsmR`j;a_>(!qAY&JzjA`SE{Z!m|ePLv9mYIxk%;C3#SxYG?Ehwm~n5l*YR4* zMLNttij;jJ9cK{567AZ($2M-f+ew8N&Yic@Cr(>s`4zYRv{v(TGHuC%g)kOmtAH4G zFb;)N$+UoEHq2bg{uBv=edh2nYplMCh9%XasgMv2zR7?v)m*$_-}=_STN-_xHKoXA z7EiW0)21Qa-~~u*V^wJxW#ji*5l$>!IKd=jq*-Ze6Vg$i%~`hCQt@g`&d(#3iy+y| z90+TSOAmGO%sKn~*Z-dK9F2Gq$2dkd5ot3W$+;JGVv-|Fta+Rrach;|3gx)f=E7doksvJ?(bQ>U z&kp(UKfKMan590ax|0fu1psE3%A*SKQBNOAkqVawBWn7mp669Y0O3R zFZeY&PlV~#n#b+m+7c8g6EO+e=s>U@x>o_&j&0gA1hH^|xXgDqY8L7C6>R!m>;emy z+-CprUq7;a$Btop>9w^hZ?~tv{XM`g%xX`bmsZ%?@IbBUY_$a|mY{mdw8$aSe0H?h zsl5m6==RsGzVx!iz?9`-Pmx!uq_j3P54}h`5}}pIH4D$XhyU)YNF2%LQ+*{m)=NG- zd6+3TS~}{t)2N|#?%897MU(AczVlrOA^;6?@4>h)+fFnHPZE>D9@?@0Bw<_n%=;g+ zIr-$UYpf#^H_{%`lWO`1u9Q$zL`ld=o~JI56`~Y{eMGgHLSgLap@X3T*072T4pQYA z(=Fb1c$Cq}aI_2c9liE{hOmM>gnPk!(Th)xEvLoGHH+gu8&z-`-i*!CSe<>E`B&RB>~ zf_>oe$E~TinKewFP2M?<0r|;yziV~4wm)>|I=uL*NwA9R`-~Yjt)R$Go<562U2n0N z6Y_A1*>m~4RpJW1{=s{#XznaKezBB1oWrPT-|3j&SD5#UG7#$Vy#Cr6Cv zh_o{fH}R=`3LF>s-C7fF3Vwy&Iam`$ankw7!H|#r0}>Nlyc`ZhpoBnltuhSsz(q)s zLgN8|5nT=d4Nwl)Pj08=qzJr!5^d?S5{$a!s7u5Bvaj6{C$tz46wiLt3Sk?>BTn}Y zhwIXdi>ndh^*%LmlW)p>j18m=tlAuLK* z4!1zlP%DI^4hTn0Cp(t7KUF7H|5ZS307M@~aBMV$DTWO}gdjT%{}3igEs7Np0g7eu zYv~Mu0Hi>aElN_9^uwAgRnyIY%i|76Oys)4yYSWhhpeosmf~#~p>HBg(rlwj;oc%B z-h}fk-N$W`20&f_rrt|x93P^+Tet*W!LVWQCf166APr~Tvz$(I75Cb#6Ynul3bgbTFAnk%lL=bt^M>{IzirpAhI4j0+pUAjCE4i&}1)f-DbbW zby+UeJxIhua9mo}vSkU$DVw~4Ut8;H?f<;z9kzMp66>g`v|Iw;tE;Y1gcC^t?pQ zn;Dyj@4e4H{?{Ldu*O*K{A1X#aDu`< zq}-M1NLD+Z`K6scaM1qdeebvP%-QeBU>*bEc4~cqE+2xFoSb3D%F66~OT9huiI3WC z4?TeNb^?{s1~IO(mY%4_yZch8@Az_k83SyojJU89anjHN~+ykZO%?U0pXme ze~@RcxUtqlKK|7QP~|}sK;)5#(xP`y)_h038p1E-0VoPYoX1>rgtt0NjLRLA_oC2H}?Ani7gKPwnX4sTVm zLL`!gU!SoMuNZ(P3J%ZNlK^_z5F#`&s5BH=EtQ_8#kqLXKV`3wXHKZx(4~V%a(V?D z(`5)#CN?G!rf^A2OtO40oFfMJ4NSbae*uUl$4wB&csoashEX;0Xw;<BxxwZwUt)#ROWgA4!TzB#3iZ`B z5C|A3R5s^Nog&$AD4DHygJ`djIJk%8pQz6uR8hc2?#A`D=F=Z{SqWEReHugOiH5+ zc&b%ZlmoxeDDj^{>ZtIcsFBWO16EC>wvu5#JCI?1X4hY}-{tvAHVx_FyRY)>h zk%E-5KnPOIscgu-jOD}kZnisDud$M;)2tq;>B-N3&aUy9z#I;7ug`w+Z=Bt$i);*^ z{^UondABhpB%ww%clu@C;hA5Nw=ly7uwO;;?jhP(fRZ4G!3h`Jx~&XJ_Cp{4h%LMC zZmVl;rkjjEI4}flMcL&td@}H~T(Zc1_{(3|3wU2D$Dc~^B;uHnhP_Qpt_qwAuZY`s z3=#xI%gVWmi?|?FPe5b1I(Tz%>?qv$;~ul`7WKG;!S~XI06yZ1z}mjP_9wsr<8D4` zhiqxWWkK5n_tS3S@jwgt^jF@F^8*Wq1);_6)wf}=ILCF8eOT*J%@IE&@g8ZRrwf9C z3S)S<8@;j*TT?nnG?{GVv6e&%qG4sr^(CSv34yb3aOyzWLBf<~i0K0Fb8p$0xF5(~ zo?((229bt(aBFR9BhxaOA!TJ5}Cw!h|6lqNs6nQWNjYEg?j_FM}Xy$fe;B zr^*(Mkv0WSpxnYpikzib;Ur29U{BN7xKu>yi7$!>{c794=NwtBn?b5_<{%{v^WvAI z$c?$xAh_lMTaW`>cO5>m7;8V0P-+CkA(L6 zwvaSCS6PPoF5BjktTh}dL~7l>RyjMEUBgpMaWyqH)pqv6IlFz?67uM!;23d|0(kqZ z12tDDTv{(K%sJ$-oBt@K6fT!Lb#oGk^fHbPmr*ToD~A}B+NOy$8x`W5lcHMi*8R61{hFZR7j4a&JDshk zt+BykmkNX*6fKZRrY`X z=F>2Wc-RSWVM^Pu$5pcb^<8`HEGo^Dcrw!+y`!nK)U_d`#*qfXoibL`_&6C z*h>&F#Y2g(524EY`rrS9RbISolXLUz5o}?@1nVncUtujx=g!zOKlrivkp?p%%t?%A zANIZ&f@B+!VpYZ>cg0Fua`)YqkEf&`2`>WaQWBjz4q$Cz4p6T zkh<_@RDMt8%8Nl_Qp{Nj<1$E$q;QC%z>W&>T>y%TUitb$G|sowfMne+NOQ{~$bQj& zKlnBne(+BBZaDV7Uf57zH3hkGkX^EwPt=cz7|IB)k-9a|LO%B8hy2PJr{Gf8bWC>@ z6&*P%T=5ee2A?v(plqZoC0+ITlz6olZ#gEH`c0~Kn~_O;0bG&Xy?aOu+koM?wyGXS z3dAP}N)(mTL*usq;+w)!B+P7|rr z{93Hx@*KjELA{qpmaIC!t?RqM8HUoL*c&@Sn6lDXK&Uvfqe5LmT1i7a7^R0mG!iBi z94jeCW2E0+^F9^EtIWZ1(UiVm;S$PjFJkzv!I+)`_ch3UX?WN2l$Le#S?isXkh1#M*lR3t0|i3wx+ zj#GC2{8f;f><4&4G6s@(bzk*dGo|m0@W^jI^nUK0^gen)^=;(44v%W?jmJXi$ng+t zf>L9QZ=Ml-{C=1Nx-5F*0!Dj;Y&%(7-u?PY1Wm!Pw?FRCbE3njnuIw^Fo%Ps!C~tx4@D%ow;}&+oH$utgGl~6Ih6YWUQ$ou zWy(&6&3Ha=_8R3L-u=N(;&r#!zW(LEqvjJyl(`~G2{c*D$^lnXU1dM|&iAoBMcRXR ztg)|Kw(!#PWQ<6r*`T!C7L{mQbgZ0Z|$uAG}b@#IIyAc26W zdvxN2fSS~Y7U!2g{HZmRS6U+YOzOs_&UTzSIxUY(A5-VeCcoYq3&Df14}1{9n$IpM z@ZvMk(sZDf?Nzo1n1n;p4GjJHc;3z9Th3j&r{tdo_iUw1OS?@%iGf?nn@mEf2opDAlMUyF zok%1B0ssYwhAWcT9tR-G$S1C%wGr;Go(#pU=;Glx!g*2Y(Fc(jG_(R{pEw~ny8#H2 zAE`_W#t|A$kjtu64vL4;=7T693-L%TOwI>Tq99T(i?rhcxgRgDEmV>Q$Tyk=p;}2v zB%#ehosf-3Uv7-wGLvv$<-BfeXTu-~H!O%Wf#Fg2T+=}Eg*SI<$fWpGXe>kk0`oKtvv4;-NezFitnJ zGz17y0I2z#XjK3Y0SItoFzS!WlXysN)K2%6F`t0?NHX+L;@E5Gs-W~%+erW1Trr2< zr(j-iqVc|!hCL9tQcQ{9U=Wp+Qs77uf6u$#WgG9l$G-6&-?n3C&sYp9)%zcMoBcHi zF9puLsp5(Y=#2;NuhrM1hQ4a|-FF|w?keyut#$WUx8@EDK{9H%blFbsJ!p~GS30px zMS?&vykeuQrdC-C?$VU+h68EGF+-}es;gIB%#PTtA%c1Jk{T%&r{_jfNSeN*Ld+%@ z^eZ2F%x0@tW(W1ASX~`dWG=3Ln|G+gICxS_K>IN}35kna+(bn_b~ zPM*0Ybp+?L4)MIJ!47oy=7$Mf@3$x3@tEbtr=db4>p1q%yC1m6O6E}79nZiXT`qcxA22>#rZ8*-g5kr(fA_#YiOc7cX+DiZt%s1ketUbXoS{7q`4( zhe;~y!_&8mSVl=B5jZ8tR`}V^e#SOz*kIrJ&;PV7*y7~aqL2%}h!>3S2OfXS)~&k3 zW-MF8YeE!xBOhv2X>8yE?AJK9oTFSuH!)Zu;>sg9XXzr#n=!@8NZ-^!%+#h$o2(m# zp%R8+;gY2=&jeK~?GkIPk6_+mnCT;^T7UE0b9VYPxfTgrPDN6VMWPn2bgC)dTX5t_ z_-@E=`r~Hd7Qx?(6AFHjb1n#aNb5Q1f$M8W9Sf-S0!z|QNKbIWq_aI3(znqq1iTA! zyS~eT7Qgg#*?09B=uN;&Pn93E1zO0XpSXvN{2(+2Q0~c2?ZbC)y!V|mwb+H+bVo@n zik|N$C6Lr}N@=5gB3ODEIZtR4vFXR@K>57}aiH))j8q#*03^>HskmI{fOo0jl*)$x z01he^asD*z)1b&UC1(a{6?~}h!ce1$z7C+H*8Sv$EtSMDh{Xp`D(}+&&)R#y>3Nk0 zzUP}hqbV9`)SGR|k~=oVxZ&6sY&H-lgpdtcvUhJ1NbW)?;U>S_n`D=egoM2*+=SF6 zu(=@>5+`0d#@HBl8(byJmSpu_ruVY{|8w4V=9|$sHsW%mZ{B|DbDn;l6C45rp^?0a z;|uXZACH^eayYh?9nHkQB4Rm{Ne`MKBpne_BqgSW{T&n4=DXMzTR?j^^Hqrn!gSX? zL6|x*-j0iG$fSjGrquD^aFN&In6txS#%88opLJF=Xkv0GtzO%auDxM%dev*!vryM} zF_QCr1CwzA)smrR8esdHi7_}izBD~>KQ67`+KDSXsuB7@&O?Hv>xN^MIozTwu47Cb zzev`@#F4&n3>GC$`Nai)We(!&1@~>);)KbMvQhM`&%UP{(K_HXlJM$Daalbn@(VBh zR~SR^!EOUgq1JOJ2v~{Lq+v3s;Q6s?r)_ZyXls6oV7hN zbgKVOvsm*=Hl$NU{VcA|Mi+4D#FZ>|U4rED+!Id{SoKg^g}2xtBqGS{gKhy0$a9i58n)OLMR(-SS!t>MXZ@oFStwLQ0Y(IPVJ?RMu@zvY5 zv12T4VD2brE0eY2YW?KCL+ObfJ3>NSk3`ar3w9Ttu1h<**@FCN`isweChga@Lt6&G z7s9bNT>DK4ReSFth_{Yfq4l93+mLQ_(z^8~0`%Ve`g9?6I&z#4MR*Ci6lC+d4QyrJ zlK$~;{yy#8{#@F`4!v)@`Q~&9O8|yPSR;<_>G&lq(@jE z_TZzBh8;^ssRR4>rTgx^H&nPCI68E)*t%V?Ws03j5CrE9?tZQ%ERd7)8ej@iT_E-3Z3OX@D2F zfk2TB>{hI;0jU}}SAwL2fwml@g}V~H8LlZKMZD2Ot02hmn4+QeP-yqNpMj{1BQ7fv zYfNheXmvXu8Bt;bu~*RZqvF%<ufT|u~~Lb83FOm+^zBvN5;Pj zaH<|aFqK+sr&teB^AY7N0?E0SH9so){_JO^TWm~3n_AW=&du?qa-E-FCQQhlsCkS^ zUr*mnp;8(^rKj3?48&pp)PA^R04DVV!|Pbj^2tB=gR~w_Vjrq25zk5NBUfE@B?N5{ z&(i%MY}MY=Y4iE#fiPRsx4w0M8fNkAMH|nf!frvq=A6K;Ywl!=p4+vDZPUAuo=&B% z<;&9(+n-IJ{>vmgTKBOmFlGs%@r41KwWt6 z;KB6#p53vXdkbnj!;Edgjl7?_8;tiK?zkhJ#{OhXQ1cr?J!^Nm(7BD!Lv`SqE*RhW z5)%wIW4ze9qf*6e?|-*RW>*KSS}BNO%?A<^aZrtU0Xzr`3sbzwSe1CB`nk^*o9jB0 zmHEYczFXYT1~`k07+Yb{lvSGLS6sj8U2kxgaNw5CqnCY8<~oRy73K$x;FwiIy46mM zV+0)Khii9#FG>ULK&aqcpsOT+f&s%T>N*z1s=jH35E%-{?b2QAq{Y~^M6gvMIjN%# zM+hn+afB&FI`D_*9LTwqdrLw5Mv^#k#2q||m&G>eDv#^2^FXhdJ6US-I2|G(ZDAs6 z+h{~TT~C=!96JE=WvzATij{33l}q7r?NN}gizvBCOg#5jlV1;wy(NSUrg0O z7?qg5cJDmG)JlQPV2VlFIi)&o8dq+jTS$mAaWXf~xuyq+^t?P>eeKqC`IW0cCfMv| z&d?h<+CvK+A%bjSF>gbq1GVUMdicS;>EVZ8Ob3XN(SYaF6m4~FiXK4~B* zXWx~60UOkd`}T&NQYLEa)~zsYqv^nb12AI1DEV#x!RhM%zyl8e3sY>oiF6IJ3tKJY ztF6?%iavYizp>+POS&HW#unD*{L2?VpFZ{nzn507WhsJyTtK2Nb?Q5YGK>G~Dv13&en>7q5u+13SQzkV$U7uW-6RlTc!XmE1TcfrwkDVFKvVKOk-V{_p-#`sn-KpDtRqCQYMuoCfJPgOv4L974($aj##wGIb+8=tVn* z^rv%yQ65IHnO5lQde$0=++;56xvoQ!83&%1EbB>s`kB8=|MbDtYdPlRjHLQs>CD#T`Zj|adK^)3~wH@`ME9@`N6RbycT$2 z{x^9H=H?7tN%9X+2BVVgS@XZiS99M~KV3*vXDsMMN}6Jc{Sa=p%uyyI=oCWbGhM@{P~~Y(sRv2u zaiMyvjmQhF7;!yUO(kJ+F{wfSR3kQE1FEoqw-LL_vaaT|X&sRzR(3*6baY^oF^J&{ zyN;(vAK#sx-+clP9EeyO>mnN2QwGUFg5<8J9+lkWCNp7D(MK5&3WF+k13*`-Xh|EE zcOr?gb0$b4wpeEJ@?n+~X!rbXchZhh4^?>(CM~KVF-e&4;DReOBn?fRB}|j359?>x zeUs2iB{9lJ_+z$yXD*~yy=F6nX$!`3xY0&9^aj?6spf>!Zp0p^2XA|8H-_7W^z2hd z(<2YQm|oo57aosI5ar;udB4?3i|%`2qSmvw@&w4&-k0mnHmNy)C|{oYs=QNmL?s|$ zs>(XRSi<0DbJeNLIa{~NtLisof@%{i>ma<=OjXiXi2!`hCaG4+gt_LIXWbOqMUHyE zxeIVJl9Qf#ZolH9R-=HA!MM2HcRxQ7&G}x%ty9$FevNc130iyW;>Cq|LjwrN~(|E z^p-c{N30|EY7f#wRRh?F@mabvZbO(~q1d-RfS?nsD^`dUwePBP^T`VE*_Ox`>iu6jPT-#`{*b_)o zeGuA1cpG-(rDU+qW|D;yahMiQriUUl;Lttq}_iWG(}*EG4Nq7kC7w3AH+aXje2gNGu?i}fTp zJy>QF1fu~jw26oyf!zgF+hyC^qabe($sCv2VW!utEeN660(@ z7=(!dn?!0;s*^C$14tTJqd-)&FeD~pdo6`9UAnv}ZDO&XI%&67cjAO64TCCYQPoFf zU(qM1^5~;*ekcjkUEe_Uk_poki;tnLL3k&Sn6MyGF01r$MV=!}N_wmnsT+e^Tb8c7 z;i7cKRqL2^NLZ<#a%mKkt&vG;u-NHwV*0f#Pp8=N_ujjA;3|F$6_Zs$ieklmcxlnc z_TMlB)?Uq(&us`6gK=~0RGBguyyH1P-*&A0W?|xe0jO3Ar^5vg%pnk2algDSUsEDs z-q%du3t@7s$}~Gc7;9P$SLTyN3M)T4>sTOp+T4sya{v!636w~spXdz}+}A!ch%_W@ zsLnY_aNzap*QY=G#2*94 zinU4FHh?ckDziqqfGi+f>nX2jvqgje(n9gnr^@Py0o>E&__ruD~NS>9Q#Z&Zu3**;X1LU zNFA;bbHS`(km~V_8$xPPEvnnTwxMpk`k2`CCvN;q=vD~!7|sLl{NRVuQSxnNEz~pv zsxw3@_ebG^HJ~f~Xp6Srg*BxzRj@;-Us*(wKKkqbg)KL(PS0=OK`cT7L_<`1kW|}Y z8g#gLdiM+IJI`-V*Amb0HP>Iq8bnlu*cJyFQ>9u}Hv5heMFUmeliRna_k8e!*aF$E z9Rj3=#a*(esRM=s7-x6VB{*{vG4X;hW#@+=Ow=m~ll@*ZIg)d*T=SKMHv=~?7&Vg2 zRdmQ%&rqR)<7y@6PdNuYkDo8nv8pF)){plVQ%@S@M-5x`4D{T2<8I*+&Ip2l_CW_W zWiq=UFICDltX%(-x2w$wJC2Yj;teAuz{Q*XCqpJJzs%TrkIXVTi6nM%@Q^v zqyvyH8UaGr4(&fOm>zs=4}l{0Vf+V4)ZUFPgO@l zsPYJ(kMz||JuYFpj!i4sCK(Q^m(9|QPbUW!oh=pTn=(ZQ+Z}YIcD$De8`lY54EWE8;a@orRjeTD=!jVK>d2xY1Y4LXce8DLK3 zOQ|v5;cm1+4CJ6loH(b;DPxgI;kYyS)>}1%!7&ZrW9WQ8^XL7rUvZv^dVEwoR{{zC ziLEy zeEx)Sj~3%Q{)~R2)Xe8_x1za>P>eu6tV0Cfy9I@klisGGE2qB=-}5RA=Wn|8*7S)_ z{0WT1GwEOd`CroOZ+<-sRDoxyB$L#Q`@VkH*HHy^r7wN`Yw6A}e1TYPThp(w5LCit zIi0u}A2@O(J+*U3+IHEcY(RM^{l%yL3c>`V0!);IDfgLw)ez+N8kuf3`s;R7H&2u;2CrH>Y2E z{|BfW0j2S<+`DTJ8klv2R%5e8xZ+XT^UWt7OGDxz+SLgWZ2`x)NZvL!;Ba#I`0`2is8Ts>A%N_lz z0~vGJz9k8p_Ss^DBxZx(@}3SMCULUmNSGY9vy-(E7Oj7V|6ERtT+H9OZ>(RwiglL7 z19OJ#hOZ8QXFmma13nwPh-CtltV^(R>OB9I@?|ThIz1RnV~_ znsVYs4T_BG1xTl+)Dy{vd8?{9fW%2|Cz`R98OJd~uRUEpfht66wj0I(*@bN@tbV>)O?X!tZ7wAzVlu5{9;;2?)jJmn!us-ok#bw z{|-(X<2X~$=4ptpsu{pO{xZE#`7jQ}Sg5>|DA~b+yNG~}qzoKTFeHnS?EfY?M}vJ3%*#FKbdqpI@QSa;Eu3G#}6M(hYlQM zBfjJ5Z$I}>Y0sWLFd3)QRTp2H-uH7qpH{6{mG&QEe3ahUMriTc~|8_hX|3e3D2HI2^tEoo`QXyZu(Aa5nVBO+4F-n6n`INth$K z)N<@3I^~!}-_p)#D{x^FHhjnJOx!j@B1gvQAOG7YQWx5i*KXL%Ep++7zQoM3$h;VP zVF3KV*z37iDG8G>$NW)6+5%A$VQOOPoQ~yd6e9Qbz<_C$gm?I%d=FpDDfOzVXR>b@>ssl$z1QBnr40mLC;Y6S^5 zAu+Ww1H0gax*?3K@yv4{8-Y;oE2B;8{m128=X)MKk^dOHb(;2dqxYUkjS}{QCKeHMY^2832)vaDy2vlq)$mOKvpMCo}?dhGzQf?t3j?G zc<*~rS!0wZpGk<9`z@^_I>>{MKaxKFXMc)476%tfQL4Y<;!DyE*Ib{rY}rCoiqmPw z3(uz~cRZW6bDRKaxkhdnwUOjIqLD;XymFGG)&jod((7?98XN9U+X!X#hadZ|7~)Zx zqvBNs=UTUV>M)LaP-K#&LYhQ-Bd4N!94*@!+kM-IJeQ-FfJaCm+TMsd?1_K!5FW5HFC5M5h_TM+E|efiAu8BfpuR z!Ck(Mc85ZSXKH4&PdN5bRjZaqls>SIAVdh0XjIR;Gk8BTSm`=E@BY*6Z%;=L9!lq} z+Yo9#$6AKx?uYJY;q1Ef!ohv%lYjqrX=&lupxUtkuh=L{xB+;z`8t{07Dv}`tN{=v zh$cUo?*`_?5^$lOwBk~kxnPFqKVi!8F~tmsF3xko7L_by3A-h6ajw-$;l+$-NH(PW zw}kWDJGH(-dWEsJ)9g3LH6O*9S&P6eAI#qODwYCqKjRYS9DN`xSe~>Y?#HoeK7xM% zmRvL7AhIxn7Y|A*np_bnZQV7f~c9fCE@zX7D~8Jv!o4V-VBjg0%A0%r7qARdctL~WfIs7 zWJUniX$FK2?Zq+E20 zp^qYDKLcyHluq*Rgp=ta|85BrzeYGO!-d$vF0i$|5yLM~=v=o&r`b~^qfCgLml4Wd z5Ac4r-F|Z4u5=oA)C;z4z|dMx_Z`Xf4;ervwv)33XNE?kB+lr?CF$_~lWEV+!%V6^76LLBV~xc9%uyf|jU`I1m+}W# zU}F-odd9nqmw&b2xn;r>(&nhnDX13f>+uxS)jl{B`kF5mCc)FOcXFxX^yjr+P`bH` zr}cz#0_qkmF;Hz2o6eCLXeNF@LI9{@e;_Vzm;|zTV(Fa6f9sd)5=W_85Q+F=!5`mU za4Rk$9>AL%U=S1TmFfoD;yueV|FSI0R{bZz@m{D$xF=!KD{qkXOjbLJE7(2Hkvmae zNxZQ+@K1!NQQoBo?xZSVk|2pFSFjhLkj0x|!!QLSDCwG9o2#-EJ z{Q58=W=8`&y+DFSG;yCMLuJhpCd!kf=-sATu^Gm%j&T_zRM-qrSw8XmA7M?^RtE{V zV=ggXO-NmG@FKiWv5`Y2OpJsLiXSbdpJtdZail+dM`p*og#Q-SVf8-yLb~JA|Cnyr zd@&A0PT~wJL6G>~{5=;gIEk@W;tG|X*JuS;fe8E%bp~W-WEUMBj^IG@-v9C&cS1^+S3G_OPCBd)QEGJ+6IptXyOc- zA@`dg_~kcze?bzqDTmGg2UEoG6Oi-#Gm**3;Q z8&kYgjZ=&1L_6#&+9ll350y*a@Mfwb07*?z`%qzlz(@kxU9y*l2p_=1psISHV`(#l z2?+?vNu&x*<9jP-rg92!5_U2`Jlac;%u`4t#F(n1!i`iwFb-g4MCx#aXk^uT1HVR) zdLyx?^wOKcyGNpw^$ZL?+>(tInYwvkY?Yz+AHdy*M_@nki-uiOMH}k5ughXR<+lT@ zt8n4$!RetJ!n72LrUOEy1k(;6Dk%-)VP(GwNWmFaL4+OWaCu7`lSI|sM=KEIk;+)w z+j}%kvkTdV&8v~dD1+E`OK^>^Ac;&sFj|n(8lBXvJJ5}O7+v`Us*kqzdKTkBBzVuj zt&I?iwpJo4qq=)~`{DEm&K*Y&olXb$o=9T@d_RSw$q4mhD9lb`9sR^qihgopAelDl z{fIQXq!aZU9CLFp53Om4b!UeN`g-Cs&OI;y06+jqL_t&pvEW9iUj?TOh@h+RZR5h^ zzEG;LM4$qS2v?xf_}orA8$etlvl98l_VK*eTBtjK4A7?i=07K<+j*$6aBB|$mtPPJ zpwB!^F)g^x(}Qn7#^W#$Why7LIl6YGye^;TBZwHcd2L2vGn62bzY{TU(5SR2rzZi= zC7B=KThy=M{OJ@4lRp^>mlFiz#wU?Ca5V*@H588PYE(_?-6>)a`Pm|WZb0dM`_JN9 zP-}PN!D+urkfMKiQ)KBoE??+|@uAYuN80v48Nql&496f&*4x~XB!G8&s1amrgb<@H z3=Q_CxBtkS(trN>cjAoDn(KfnyAkP0by_f+G3%ZAvr{Y&fWTm9@C!Q=&jGYd2qaAO zMGH|I*ls#~`s06*u3U2-ng>E1F}`7cA;*lU^SFzo1q7w7k}!qZh@}pO(~?kC7^`u| z2l&13(Z|zA|K!ivI-MN@fTeLJy^?dN1Ib%r?zk}pXm{?H>;Qxskojz%3RimRxwT0X zxZp!S^Kl{J92_nw(d>`!& zcCXrreqfwRX=Tqh@~Dq$4;wM&<$^Ovtl9T+;N z2xX!URzv9o#IBdf2PX(Td5WmcV=PKV5XyioFas!h;K*&qWMW9|&`A)FHYBCIgxU*7 zQXCkD@uVW=oix-&(rU9>h6-vOZlSAC-*q9OwIV_2Ri@3#38wAG^(wZ{0TOZMw%C1i zX3?n}xbLFkA?yJ-I)L;gNM~jSQrn7FJjdZ~QAHBC96dFY5KhVkw%XO|Kx)Rht0#<+ zYruSFY{5E$k&VOM$_b7Rft&~@xDVUe!DC}-_4zDZ#PMO2^;+ZX?n{(BCJ96pFQ)Js zlrX7YMY5dcS3$ZQA0Se46U@UBoG>~-Ani;l>Np57Jl#7+46|XjmEN14dh$p*eF|?T zh|@UZmC;i)clJ+69Ptd{w@~M8zHkeUDI1Ugq>M5xNM#Uz)f~}3jEzVv2o^bka^gNc zN4=QLOnUp+38>A%qt_-UjG*h0zy1j>Ux_3p4o}N>Jqp0w@T2*O0UJYd%y0-igItVN zrX9!$WfGSBc^!F^k6V!F{NTiPQu(HYITKU*0Pb4?BN5fs(2B2rMNdfVhOVQLu#6x% zNk~Ri{h23udDJscWNDeI)>OTD?L1K0Qqqb!A>#ux=+AoFKcy5e9M&uSQs=yYksI6; zt+~OTQt=3|DFZp9JrzFlQ-A(30s*hba~0QYm?0&v2nI{*?77UNGTmsbV-}63zCNMg zIK(~52{I1y)h^VGltbL4^f!O}S7~!cPr6_gTUnzbw5>|f%u3GroL@vi7e~p+kf(Mc zrJD*{*^Ex0F5_H+q}Ss#F!322NACL8g9LH)8EG5ykPyNbjvL_vpX@H21dL<=h|R|o%$kSj}GDLzJ&WD^uNSO z0yY86GfjigG>XrZmKaARCM6|pYW2L(2wbqWpO7j&xmmqdW*};vtnPm0C6}i+UUy@<_`J>31SkbSC((uPJG?(VyL)@uvFG`8 z_#^==*?6t38MOq984U*6hG=mh6M0 zs0SKY$Va6+0G{=P9omc+U^n$@rJZ%O)6p@seKTqnk*k|wWx#M?vPvR)hd#CyU z4zQ=vf&G2y#ohhs^vUV8`-PK8hn?vcf9Y4!#g}Z0`h_%#+EV*jUQ=cpC@DEfM&51YN+*8@8Ku`$=1s+0~WzP2OHDF59~{KeD2ZooyQN*Zh%X-U5PnNL-2+o z9eFr7V89JTLs^V#2b}jWf9ON$);GS9Wn;9*asWnEcZQlWjE^cGy&UNRa_}#ckjX+0 z)E)IvIL>4wECLUMupBkt#gF%9kDSU8hAoV&Byo8@o3GC#EO9#j56GaIeX^VsP8N~8 z1VfvSlRfVHZA^kpn9R83K0P|=%sb|V?IRx%+yr@8x)M<{#Hl$DY9=sxELt7oPP)I6 z&(%1@o?Asd{njY>Gf)E0gBX&x#7ButXer35MC2+hOKT0%DezQr!RE%Ae!@1Fk zj7_OZ3!_*1`agdGefDs=xQA`$nd@0%B43N(-S`#x1W}@L!4>nM0-PBKa~RlZM2$I3 zpPq!EKe=m=W{4o|4bbcbau%kkVBuZiPV9azOw=YYEM?}oWsPu-@ z+I4LALAW5d5YcUZ6mH+~oJ2F!)3ZF(cM_yJaE;C<`Csl69V_@K$~QkUWqc$E)Q1ji zC>Us?TL3K4g%U=8nL)~ojbmAk8>e(8KZsjMlhJA1D5V1S61ic@vOZ0J@0^iz@gE}6CfZL%}%n_ME~(oq84Ltoo>bM14pd1 z1Tt*}$iwjgRh9<)Cb;KDxbzC1bCX0GuTx53LKtunsKLHchnwbxbo2GEOIKg{%G3!E zPT>)=ci-;xou?j8+n;|HXNJ?L^BM@MT9?+b8*V!*$SuoB7F&>q8czfvRRtIU64y(` zHTU{Z7w!kCKfS9z9Xd{PiIPyqELu)ow&H}aZ8?Eu={UDjM|n=QP)U(@3u`|_J`$#I zk*CT6MFt2*fS>Ki!%jcb*$rc9`RY1UcZ42U29lu{%j9_HC*_^uX!GNX1^2m}qCcmQyyxXZ8D;+E7uI3!9WI2uT4^Ej zqJ2SpA#(rtH=jwjy%9-)z{QR1LpC_n3)2Jg0`?_}WsJOtE>8A&#ePu^%#&jmo|K#m z<2qtA08*;snJ(*tBvnq8n4|zKn8RwG3SB5qsRzN9T_n7x$Q|^Ia82b-A zF%F@b!QNCyjJ#cVDV~4DD^knG4U`8gN?|Eaq5yIU$$XJsh@P=4LQ;wHkk`0_~ z;8#_tYGesUFh+9kNUb$fAaOFRnaHz7ieSr8Vf(Ur{OE7mU~XlOC5$P>x?h$!ryR;Vx@8b9bE;91@8hj-Ksv|#TnAQB062$)h{H4kFk$|InpAn~`+sU?0&&fIlK{W# zfhO3OBVXV<*~rh*2FJSF^g*kYeDpAy)NBOE8AG>C)OV1HG_zYWBw|5gB=)5(AFlk9^J5jfxhc_C9 zQ6V(eI$%l-d1RgPh1eU7sY`t_A}t7fHFdpk84K!g-ENkf(!~el zjwX5_ws-83@PJAtu{iB!c}ACSvR{9#%EDG)BX5s6d!x zi}M;CRF|V|Y3H--^|PG_)YzKN3=Xgt9QL`?^^lUG$16UNXoV`j%XwUvZdCfjqp)xSQfxRo0@KnuLjW7pot9HvfR>s(j4X>xE^P*Vaug zzE%oVQSoJo(0p$iN#nDh`Ak}mJM;}Vyq5Wm4Qv9 znlO2;q#;pqA!AuXL_4IJwjZ^w(7_0C`Z8JG#96kyE4-Z8G)30LL5)8nZZ;xvIVz76qL z=K~G9N-=@(g6lI90$w`0kMmQt7u*TNNnegOzA^goGU8L+^1AEN6&GzKj@5CZW8cpz z@uR7){|x%?=1i0}vCj=Q)Xs(>BzXuS6G7X69z_C>>aRcp44}_qLUOJ;V2Xver$Fl8 z*?A`2|LDPV{K!yhLx;Tq0(Su`vDaastcQaXxk6#FWnp(ft<;KoN+bU;_Ja`rDM5*> zHjGd@eWr?T{rOAM`P;fu%Thdlric(tN2+O*gU+0xgh{m~6B9yGahnVJOZ~sGr-Umd zrYUxxot$h-Pd|A$J@mkCY(VI}PmQMa>o%p2eE7rZ{4HznI>bPZ^eCZ#1>jS4w9I}8 z)6CQ{R2>IU;Xt4!PJ=k`4r3y!UJwDrCX70#a4%FMlkV8Nl<3O@#q6aX)J>!L$uko~ z#;&BV-?Ke^=_`*AwgpEAClV8;NmT<3LL-g;lRx~u^t!9BfzaaZh`Kd^!A?Q_6c`EO zPoWus2+VSwUXyeuj>r>nwcyVpfG-E1Gh6WfOI!*ghy%6Xpsw~}t&}$sjG>u*98K^8 z+gPMoJQvqdTKQ<+u|$CLZgQbksGxJRkIXa5@=j4yb(dH-)o8U+sd_BuLrI+KDCGXH z-9^m4ed%p%3toqWQV;A+VH=dOsFs+5$q#Oc!j*O?JXEmEvr=M`cBwvV0(nfM)@;O{ za(dU^bmu?cnQnqGt;RcWl;l3kfg<=DoZVw^pXceoDKTQNuS*hj^S3^=cu{JOf7nlGJw zbFu8sesk{5_fQO-`rTD;UVQ{`3v;xn<$lJQoP-SV_d0-^n^T3|8=Q(!OBn;5Sgh|8E2nZv)bU+3<7Cd*MP*=ufHelE!^R7Pq0;w0$o+$=$DxoKf( zUR(3(5F#| z!2`Qvn!texPnofh*K_yIs0LyNoGc+#mNf%#z zH4z_Bu{*DsBYZ*Bd>BzFx}7C%dAU?gpd35$VN&pwol}0F&#MVlb+(~~tiDloz8JhZ z$MbLQ-_QFnH!?KU zY5rAyGJDUu(Po*87$4?v!d!~zyAwc`|F#DiwmH8kL1r)Qqg5IVWOZ0p9f^}Lk`4H5_> z4R(th#0|}jxb&`(qv^oTw1!YYx^d^t3v&Zp_-e&7F@h+{;z=9<1ovQ?Kw=u0UXu14 z9!(ECvN!GCaU@|?O0U4+xtX;Ktq{ox0x{ZG)97tSAb5V=#u}0AE?!7Xybv~~l7k6A znHy2dQz~44US|-d)^3dJ*m_h+7^)_S5cP3QgTZUV+a?Bxg1N_>jiZ3gRD%dIi5m%C zNtB1-A&4ar+J^_}Q(VJ0yPW_kPT2$_LQmsv-pJM$IDpJk*Q92~*61B$NSgzMqv<4qMmzid3?Xs5SCQPZ(=hj3{);?K zi$ON$kR(itFvyGMB2m;$zbt+Ji;lzmJLOf3O;O|Wp2rxnnjbN~RjsW4CV#g`LG}T5 z&e=2vUbDB(s)kvW$ZGDbICBX@nMH{(CCVc5DHe|3bI;vrE$Y0BFUk=b8c>Bfe}ZsJ zjEWX&c;+GqbhMccQGKV4!NswdrPgC-2mHLK2rSA;B>#Q-wJ=&2RgmBWVL+m{x6Lqe1HAG7u5AESL)|f-q4F zMt4CEEOuG;zjYUqY^ylL7L#A8b}l<_UN-UOygTJZ%-xvN<(=vynbpWIYEu18^*fGh zLA1PGO(iT{cxhUOB*iCbzF#p;-%V+U~ax&Hcewf3Rr=Pxi zX7}zFh`7s+o2QPYBYpiyO>8ecR*%OR+%g7nReLx|!tLk)ERrKDF&xCf?bU~e*s1}7 z6Kiuorfr1c=|pmB!oD#zI*1$f1kzFus=T#n?FyX%aFT#~bIa({>>$}okW9U6v?DdM zq}o7>5+;$U_7ZIv*~V0=r)0vYJUfrcCv8BYybtca+gvssA+gJdZogd~@d3ha^+?A;uk$iN`R(;cCTD!3`owv1{@ci@- zL06~o!W&bKMO~DRL~;@*e<2~o(YE9~!&`?p?MwGou`BIcw_S*>7LZoEmu;XAa66Fe zM9R~(yR~&Wxx4GGwkSAHoGT{r#v6wyO;7itHe|JX;fXg@c<6%*2>rF&qS8{So zXpxhWzyx-t;o;V_bJt+{`+t5c-H$u@I7GgJWJL>N0bm!zrk7~TH(h^ST8jr(7t-3M zRXwp%zKc*y7hiEnT6y6Gv>(I)M;p!^wzUfKD()XsB`Mew|9?^~6^!{sv8rrqrE27` z@Ht~7ZmyMTmE|#a8dBe;>X25kx#_Z-AG6DG$wqiaA4`vB7lc$_0lVQZoo7xK#Cw*aSz z)W)ky&$A2c>2E)q&LcYb@=kWo#X-V7*K~krX2Ge+_<7t2qBKjGBzFAzd4C2~|9AEs zNLSoIWOgE=*J*d6P81S^Qa_1cP=K%L#q5;P_9%`&^-q2_ol?O?47k%p;OjFT4c2R=ri7+xPvvgvWEqIM0e)L%*-i zqwLf2UU|$N<2S%9<6M~;Jv1|jt>?h;gXx7sd($@``%Zd(-x1cgfW!!YGtiIH3*)`& z2My@1st>(3c?9wWipR&Wv!K$e!=TwlsGm0MM%|r6dBE-@ajL+{U1ktu%i}Z&bXas39uPb0;)4fd)K!3)+ zn*<@~VG}FD$xdQn8D6`9+2lHwh(5qXouK|Rb)E5C6{_mD25e?^dK*HR8tU0%y&1_# zT#v1396Q$(&JT_CCvlBE4Urt6t~f^^G0DK9faFE2evF~>SG%3A{XyI&I#Fe|rMvHa zKHd41X9yyS!wKGdst&;(PeOS?P!!!?xa9wbWfncM%?LpEU)aL8&=CA!r{<&1LsG8hyzbLyr z_ee#72RhS_7M5vV)GzWeuh}1;sTwO=N=JuVbCG|z!q=#8WG|MikyIy}%DT_JlI!4g zb$h&y`g>BO^0{GzMwu5!j~q((qt5&H|Ncj)A8^RnWQ9O(w*)DwTuRZlmArC$<0K@f zXs1Y&G=W=mYb(3r!iXJvVNZJgtKUqoS-+8(kjCocO_pG|t4*f7i-n;~kf?=h&{L1$a{XdDc#5E=5T;28(jbdF$B9QbMeLv{ zki46l#g-D(%%%wBQ!d`akf}t`OcwZwOLkuBhnEPQU?(m&bx4BT7SVZ76d~$C)go#iCm|HU>`9M4hVf!kGEnylPMEh zh6G}p2^0MS;c}ggx_ir4lFH5=tw?h{+`|i!oj#i&L``+=aI5X?O}Gj*2vpvQ?OG%3 zbb}*(^x-Ky_+Cr{gU6Zo1j;4ygUdMNeCtsCP0?Oec@wzj27>`%Ql->?rWsGXVF=T9 zw(&lY&h!I_4W0C_A1nXiT?nxW;zXy2czRIHUVG&w>2Mz4;9=KcYGyXg6A!(2#9n7e50OkF-D(6j&~$X5mB7^sKg|3nh~Gyo53de z1g3rYDU&|V3DjBscdfqORWJA;wUCHZ*p9a%0=F@ za8NvVlr>evx8=PuZ^~U%8U~k%FHGhcKg;ERj`Qb}{JgAplvP!?>blIH7x3lRCR^fo zoNIEScdaBY6(>2|`OjP}qwi`J75$LgvM^6y&#)aCMvLd#o9D&CFYSl2WgaVk{ih#; zFb!j4IzgPOyI(?-d36o5pzZMo@ zb|Ez_WqXH?78bE}Or|Xx+fnIt62b&~1CA3CrqH9(VG|&RKA8NYy@aVJywDUA1&dBP zeP$>fL5JN3(P~nUjiJ>Ah4OV_D*$4*Iv(>JWo7|1<)8VbWY3qa%`D&lTF;u>!v8dYIyiiv5qen3sRcH=S# zQ%`DN#w4nz;t+#yHo)P)!PIfX{xY_p_TpF+7?a;2%2J`BZCo%c%sY_T-UX3lucwvW z>mW$18KXTd5Tb?#yh0#O3Dqk6Js)rcxlH4^I6O+MKqRKaz0a{N^o!JUG<7dGG6#!I z-8qCJDx4f|I^dhNAWr{{qiV8E$4Tt4^GJh4dq^^FKN`POJ|sjS-f093 zrF|n#OreH<@bB(O%K`h9*mFCP{3lVJw{SGW3e1;kJPFf`5|hj#uuw+`F1L z-|*KSBoI}Md9Ck?-aqRROU7a1fIq8Zf5kXuIY5creDlx0IZ6CV9xlJ{{ro&6=-j;G z?R;36$GnFxap6Mo+KEEqKfPMjA8`(TDtOEJpYm`{!k=r8FGavx+HPjm3{E*_kx|({ zyk>v;o{V|4T~*$Dex7wqNIb)n#E|0Ogn-4$;&G%UiPFeu1sjqJZ-L#A1nh8%uw1|; zI+hF7dP2IY97VSVzyGK|6#`iG z2J9P|K|&&EDY4yVaP1x2B|gAwdtr3afUnC(zE8_b?-5GDFU>BSvvr?9C> z6WR%4yJ~%RTD`Fg<3E#?&_sD17ucWni*K?wmnco*BQXGI`8db=!+Ag=f#g#U;efIc z)@Cf7Jk^^B!IxIHEn{|22j<}nlHd%E5$HH*fptMmi0VV+?4UYXN9#t@G2F)w9^1#A zVHV{Qa*3@qaFz-}OF_{dJ@Xm~<7DmniPHYWGyUucO!V@-c=)jeJi<1$1#y79wByyG z4m4`DDm+J9!ZdU8M0&?9uTO75^BIBa%ZgIl}{p z$M&E9n_X;KBrXdAL{`+t=pznjuA7_}m2!Z+U)I%YD;$%HQw@i;QhZz0zIdPS0${vf z<0JbZ@~`o%EHBFDxs@-IYGT$O4xB!oKK=jwb^4G0+mEy76Dm0RBWf#Ak|0^tqPo*C z6%yal4w_#CwL5WWtD%pKXkkI_k!C+Xd&ig4(B6aTywxnU#Y1y~C>Sj;HdZYYCM6~y zh#!DJKk81-5yEr~HT%K-zI62us>Wj*b(xo7U^0%$O(_~heha-51KCAwc&DoEF|PUJ zXiITA?@+3kyea&ZN87J=r^mBXRUr!=Hp)GO3cPqwj*a=u!m9YwOY-No!EU3Y%IC#V zc#VL!v-i!jDBJvmqw<+g%11Tp?6-?2rTZ-5^Kt$kFWa|S!?4J=WuXfnTesO%R4fks zsj9bQWIIhRCs2D9OdCL1dD}Z~XR914hO{$@*fGsyn_-Q@_!LBeeU66l<Bt+j6>mK9 zn+%%|Zu!GViv6fCPYtIdN7>Su&=ms&$gHqvxDf)}LZ7rkNLq=rXF$p_SgH$}(oM`URb1lh z*xCd@bnY1mBy0s9Q(_D)OH#D$$7w9ao>0A z=?Cp#b=Z7A|K-o8p0&%e-~k zPoe3|!%wE??t2J_n3bsq8_^V67o{@3f)A4*A@9VI_x%ROYJv>wAZ*VcJVcJZ@-a^{iDU6y49g8GrI)^C%Nc{G!S7YNfI-qyLL{qHeq#l63J=RWahG+%Vq<|NQVf z-#TM(R-*=tv;G5SosaQ|!`ME8oIThFHx80yHid0MU8IeV@Fnz+lU)Rpk&GYbWayO& z#Hkf8wx!sY)~#s3V7>&2i4D9qFGoksDsrA{ixF|u1Go)L*lE;=qYxaUIh;X#H;A+_ zj%sVFvLqcjF_!l1KZEo%g0q86GlT;GkARNHPoy`!`jzQz*IkW4b_a-@cvwh7!$=do z1z`f+p?;w^P{9miD{96)Jr=&Qb8c4`YQUv98GvYXf{-XF01OdCbqK;df&@pWpy~pU znFOs&b{7er89ITD=}fxiO;_QZ!A5~-+nNd|0QZN{8_nRYu{As#s%p`G@{}Nr;EI3f zP=DI9=VUr~=nPwR58**I%rmy~j;ISwxZF1r7#6|=qBfAE0VIuIr|MBiHC~r1@{Qs) z4cBkv_xeWm{~?0Lc^7o0?j8~9MBZM5{v8?Co<$U9h~W#nPNpy4yO$uZ^do(#w;vcJ z22i48NFu!_>mf`LP*h2ZUxiq3S8uy0UH9r&hoRebX{TWZx(OEADx#)-*&`9;Bk(bY zIzUM6{6wme7kwO_Q9N$dAj*3sV%OFulGQq~L{ygs|5Uk0viDXtdfg-r(UL@WN*Kv+6A*HcV zZZJlwdXD#{hyUS@w3_|RR$v2eV16~(ei5q#iSe9Pr3X@S9ih5yUCtHfpL*h{w2}}+ ztIj`pmWz>cNYaGFuy7*9v8=sUN#(9V>{J4?vXw|rI~8iJxN zxkd4}6?;~Oy*z&QuW#Ab3M%jrerRjpHt!W3v;%ud{(~ib>T2^)-$rm73Dkeu>)uxo-_9G{@K;zIq%d;Q6}Z_`MkV*cacxx zJ@bn;6zwX{!AZH!-sdN08b|WO{y6uHy2gFqvEJey^8~-$`p&n_w7~sG$P%Roxgk_n zb$G3fvk~7Aob)(rU9_|31+P3O#pENuAWSqYY%pFIz~X9krY;i%Lww6rTD!6-U3eY~ zQ`a|RXKH6NJ#0b*Y^~5Z?Ff3GF;}P)as`uc7(#IL1mSWFCW}3&uBAH&({_;Qfy3BT z*qexDz0n{gnc+hR(pxdczwNr$q-P(#Kegj@0ItP14M_r^4F>}W6V)ebE%Dx%JZ&zR zMJ?A+msYNBPs@Ao)MI^)9&(dRj!}q6@0nqw89W9FRynA&20}NSP{-2l;0B+1Zby1y z@9uQVtyiaIE8Eg?B8{(F-4*s7iOZDgF&6vM)exe|bf%YR>Zm3A$>*78_om~=hSITP ztmmSSvCL66?)0>0!YB@4!a9ETiwh?64yG~!oYQHsV3)4~v4T|GRkxKeOT#!rNSwA_ zv@)H)rISqyStFKrQh>-Zv9TkaJT{u1dFCh%4@c=C`jJ_v1Bt{}L#^wurXlJ|OO_|% z3kF2e1aNKJus&UR*<~?yJGO799mkN2k+?u4fB~>)i#_q2c?n!KFxCQb_yxy6Z1CTpgNf_mEXo|JcEQqUA>NbRLf%! z9qK8PGx9+uBqBcMfT~#$tnbAibDy6guc}+3eRpEEO>=qWdV1~q^S&WNB~1gmi#SDi zCGjbVQQYU5L`vyOY12s9?J$@V5T-^n10&4C=MKJ@e)D6$md?9kE5i&DXMPxlOr&3q z?F@)NV(3w>Xu51yCQQ->RJDwYs$`RnH1(*QzxAoVO+CQh2Da>IH9r^>D1{T7A4s0M zNt|Ysx?r&Ur^@w9ciqjrSxMl0))%6JA4jt>#@=e$y>uROn+xZzM8~yEkmu<8vi~w{ zh{Gjb_9>tYoY^MFNELb=T8COG2S5c*0nqB7{olY=G<3@B{mgjh>xF|{^b&Z=;K|HQ z+89e*XnSY|7#kg2%5e(qQ1pWg7~LS==AGH+oLirgXvSQLIiu#qItU}Sk9N}Fk^!^9 zmKXJjfwR?R?c@tG)48xOv~6e275On{nZT1j!#541;+#4wk$=>g%GjP^FX$2+>Sc4;dvpCIk_7X$Hg$I~6|(naGYQjc3^ugs3rXT-TJ= zt*%S2e8sY~W~Cl`AYW8*Epky%5nZ=s*qc}qT-J7{du;sx|F8FqC%e z>Lc*0(TVXM17Igcj;E1hhthw%{Rh*VU-OFez+GQS7pz@D$b2LnfTfRh6-pJ1cOZB7 z$YEtG92(n8tid4zH}>T#X$OmhB?|U~gpyX$W~4Sa* z$VOv3(!-BGlQwU?5J3|!!ogGN{0mp5)$5k>J?(4k23Xm4fQd7J3hd-5B%_nP5T=oI z`0$D7r%9tYVz8I9Yi?rT;7cbb24@k2pyeAHfE{?x^DkyzTnIa^7pcQ8*T>qhNxU9A z2#UM)Vmu+QT!u4*DjFud`#Oy+N&B83OizCMNZLynBsUM7OXO4#6(@|7VBxQcEm1as zOg6#kiUi!j)jh2QPA8=Ao65=VdAXO6=L?gBs#fx@qfje_Dl2;7?5DMJJNv!rJGNu7 zB!a7z^0ww@KZ&srDU*WKaHBkGh12b)Rx10gd^TIG1R3N}D~XuR&c4pfBJjbv|J*Q7 zq}9OI4AT&zqeSrknRoqUdiQ_+-U_sB@*c18u`Z`t;xZ z6ZT~03CL~Kt-qOkqr}&gpp<(KmD?eNrzdk$mZv045+Vz7jg>>_Gf=9x;(ja~rad)N zZb0ovWLyg&+sER&NHj-+MUnnbOHdiYUS}fjJms1F=EIV44+2*Z9`j;F4fb%q=CuV! z=TohuW!WI{i=~{6mO>o#4)*&A7 z#Bg{>DI6&#)|7zaupzMlAhxDK@;b%#5TgV{-G6L6?bzMNB*Stt#X?XdigCQedJpVP z?|AbY(~sYLeR}vCUr!s^GfDf>2n3ort{e=Y72{Hk>Dys z4%Z37749x-b&|*dX~&wx&3dG_E*(BFkd7Tk<%S1)L*w%F=o3as-<4LaS%=+gIK6n_ z#nibB+tYHL2n_$Sg#OA1!cFH!@uC|ocH3>jmZzbd76jX+{1E`+ z+}FsJ(a+b+z6>;QO`Pa8Rfn|OcZNl`*xg#$m+8FCo$0bGm#3c9MpX85HmK{ZPcLlm zP0v1c6bVkyqOFmNSzZ#RD3r1UaKp64g3`_-{TEVgBNgLlp zo_xUna?6IIpm~SICZK9LAaCBg<%RjzN+J`b_;XD!MVRQIvy!*eHc`czDeO|k!X}HdaszN!gdCN{Z3u{3c);kxJ%Zh7nG^V4EJ)I6c@L0NZ)hhNR((dUT zf-6=%<6MF;O|p~(y>KAa>BjbS?_AWRJD6m9Xx zt?2iVFpGD>yVd*UGtbB?4?$F#C-YN$zevBje#)Pn$4V8olH@@0TPrySm9R{6^~;sayB44QRcJ6AgN2DM zGz3-tdf`Lz=U*NT$HSh_$1&OzJ073?Nz^I6jON=98L>G3%5ZSOd|!CeP2`z_Ew_15*s;^Y z*o<1zF(U8md~qP{JA_;N5R?^3YZ&{|i5GXLx8Hhm`sp9OEj@nUH`5wo0Y_E0(5ToB!J*-La5<2Gr)Qo!0D;@ao=_nB=eDB;&<5p}(I{?kU{GgL3>*F9XgQ#Wv8+*& zD8cNJRKv7p!h}=E#2Dd)96W)L6M-ZD<+=cJGtKuY!oG(=(8I46!Ze1bVFP|Jbi+_p9p89s*$!u5~~+A^=SDPAe`s(T!RMpupzK-H(`iW z0rV64M)6-md`!5US^O>kIFW2itrWmn<3X&oQD0pn*(bGm%Mfz-NHCxWd`h4(Yl(Gn`}Ee&?47nB0=O z;C#nfn-Go}a!lY83DXpI8*MNV#iea&`V{*)ef949V#i(uO!wZB7>&7J5~AXH5GU?S zpk!Rfv3=T){=Dap|M}>-&k9KMTmT0B#c^SK0)}b~pSQBh^TIFkbD$i}P_d^&1*scUshGjUfKE=CQP4 zZ8K3Iy3?yJUzOG^196YxYRy`zDas$G%m#kZ0D+axRAq7kThAatPftM{_7W5C1&Gs; zqr=3&W0%Dvr_zaid(+$Acw>6+JKmZegD|Zis<=dH49M+69XBq7Lu^HkZreVI^Fz<- zwzTb%)oCL^S?gOcJl7jkmS#XK_2Qf0+pxO;*r0s${J2C969Nfu#RI6oj-QxHcYWiT z^zA42BAHxL)CCo;M zXW}crECD@gVc&R@=ESqem`}Wc`lOKtC@~p}utLb96?#Fwf_{cTwRfaZ7ID^cW~GyX3B z^35X1I;$n@f%(Zhi}QH%->tr?o+Q=x7e5D*BtGTXE_kt&z@jJl69mK~6_hEdm0prK zW-N2|XXRw(xp0y>bx|?)7yv~kvYo}xdak;@YIm zSTj1upz1k+Q_~-Q`XAEM+n-CzmoJ}{mc*T-VK0I(QKtN)_mplXfXh*wacr{V1;`cW z_NhO{g!}nCKeO6~{i>B_iEFKg&hj!+=`Y!1T4bl29*yrgd7nQ&jvN1W^07wk+XHto}7W+Yb_YLx|c75(?{_pz!ezN zNvYS5{oq<4w`d<81bGpJSs=1lxn3>aV2ZsML0%&U^0pW9iJX!8C|hQ2((b>20^(nBM{yJJZ zXpi={CL9*VQ0YykJMVlv-Tx4(ymf5pLD1Ws=-C_DcH1c1qo}voPZ6DZ0Vtf)y!hk9 z7C|W$N(3cRS(QhOI`sb|LnmYW0s-(ij*dl$))|0j=?C}CLHk@YG0O%Ct~m+1xVRL8_n8_X^7#dNnfr%#EXz6ST#vRx{2)vXNKJ=v89%mTXS#gd z8u9|NG`0g5r7CX%dA&+S+HBaXhsdGyR8AQbQVajmq-ZJTWS%Q_St6FDfE&hNRojOXBC z?w)7XPc|aDvb-)XY+vmpQC_4`e!%xpkLn{?sTz;&Tac;DvanMtl?+y`XV$JP$LD1p z-m9J4+&jK+bu6|#mfy#?UGn1F=NiMVdKx3y}M=^4gy=r50E~?&R9DYDH`Z z-Ao7)*Vfn&x#&q$cf8Jz9dL00AHw~6oJlc5u-A6t%XPO;q{}Z_o?eaAw3;fG@vVW zrvjmtg4?E(AbmH_Bs=R%v4ymW5y(8Mv z1UIT4*?tRnE=JAHcleAjF-Ms22qb>r6hwMx-~`X}KxW|0z!vAUAqJi-;>ceou7s(T z0GI}X4L55DY^(%q&W@Uis%(3OJ5) zd5jb4+zRp4Ye@CeC^kIR4ztprbuo)L7UvFVu56U@UH_@3zEvMZ57f#pusxcm002M$ zNklAQsqHp=MR8l^>1MgCcB zSkzZl9t+`zwm+Yq_{KNWt2S*w>yZ_mdZ^)U#r(k82&bk=Jzim8K6lT<>7TxKA20!t z!!9y{_m`?X%?Kl??`>@mCfm-1cnMi4)Oofh`rc=p_^#e$_VX-J64;7|cBQJ1+=!;V&bGk2Hf^QrYK^Q=BDs(qmR#bBkJ7iS+Sl|HIHov3qB z?drP4dC}Ih?sFsec7`{u#LU;I5h3_;hCP zU>C1M@v(&i!*9VsT^Rw@cl<$3z}giMlqx-4NaeEiK&!AWN|#g@mAZw(Bofa+Umu3| z)3}d&cOb1_)s(Kfd?iuGSESWFdSsx&BJNy+M9{K;I1PD934{<$eXRXCbd(JY2U|jw z_s#pBVGYnRRBO1I4)ms<_>tSv2jBIMbk`UEEiJ{lL4Z|-*H6^eAt)1OuV}@z1Pfid z#IpxG)A<*4ru7?I(*}M$%OD`rcs8K!)AJ*UBgDzJj}oX>iO~>0_|hPiG^Xcv^rh|W zn)}R-(`nbQxK=Wy-Wa8a zhfW27SMTpa#)d-Llbh8=*NGVJ!m209N5Z5~s5haDJ0+r1L~-w0$`%@1*Q7OT^cPQQ z-|i#)2=+<@bJsKF4f13(^2D}w0W1MMRGt)*h3mNGANr%2SZR*8Wf}3oca*BBjV`VQ;QUtj)3)Qn>At=*=_h{Thto$t`fEu4Fk{$N z-NS1N^;@`cTTZTQ9=o#6W=@**h|GmCSD52?Eluq^m>&MpSJ?xNXedZ$%|xAW=Vcvo z>e@QkCItx&#%q{%{_bD>U3%b|7g&Vvk`JUL9gHMQ!0#yMnGgjojdQOvAz-dhAFCAi zsA2R^)XaOij&t|OGul$Tu;4J;cMI-Y#!IDI&(DUS8hI?d6)?`tc^?=3{1O#dm`(X= z9-;-`7kPgV(<$mTuWDsMaV#^gu6~BW>O0k+EL6c4AjKGgkE$9~+4SmbSjW`K@B)&7 zaD~ZC!7_9*umHGXLsWyD0`LKH3u(sKsnf&~q6D|VMF*!S`X{Qm#(!Wjq$bcai*;GQ zbNVzYZ8)>K5mb7FDcabUuDEnnx@b#JT828Uu0oJWYb)|)U4xz^#+fn<(J0Z*kDQn! zig;ss>e&yQMsT=4WLq|9Y@*mL| z1E3%0db(*Tk`vAt1Y}*a8iLeA^oDhl!Zb8+G6;}^>ONQ!qDez75y;l{9TF@ZL_9i&+aj|5EMpqd#~QeRd9o<$5okXRWNF zjKv!4c$FW`o-;QYe?v)0>YS5G|M|z7KkSd2FIb;G`N@yNjIoA@4f`4*YPYr`zOZ^x z9w+>l>4}tqA?Cl=fz-J-iF#W{X|@PR&wTx!)XhBW!TATJoztEdfodDeuSs;(NJdKoVrC>$oCLxXm7ad={L?#^OWhZ8K{tmax#ag*8kqM5*Y( z8DrzRPKZ-W>cL~F1ELG;AiXfF8nA^ya2t?(RC&y>Cdh5d2L_tc&gTcxm+yKieSxjX zCn2JZgn*DJHX}vEgo^1+fdz50D<@*Hm!5QZjJsyjuVJR3x zR|#d}lL5M5NSxgK%6oaRGr@{;i4f9~2u~sygwT7cX|g9r0UvV*+gqTFxtlM0wm9%x zzgqcuTc){SoL{ztmwUk)EH>H0J2F?~<+7Q5F0x$=LA*q3GeVxtW$whhs_R#dnabTd zq#*N@xeHig5b_kB#MXEwefYz_lzx_VO~W|aFm$8|)V)#?68V<(%w5gjJc!JJ7|bZw z{KpEyRFAjju}2}fndr`Z&9y1gs?)tz5NjrUFBJGw+7 zuT+yTsXb5usN|%6Mpa&^#sR~z+-*utWvZ;YL|5WibqJP$P*K0=F|8oPtxyZ8#Zds{m3Eo-~s5;2sB#ZYZMZQi9 z-kD$_={Spaj~_k`hs|Jw;dL-|;)Jk)V5pnVZ$olgl2)-JY6ph;8T8>5RBBDwM(UY4 zeFGENamLe`zIr5-ed#__c>`<^INiJq0>HK!7p}&kVmgc=%xP>s9JiTe|H2F_3?K0%T~idv3KG!c^xf1`BL$qv1ZZeu~+ zmFhz6!J^;E@%D587wj+J^&~OL9t!EM3C@s~^A24GaaK|?c3cAsk?Z-X)-y7f#!z9 z)I~eayI?J2Hjxg)srH@5DFQ@NU*~FgrAk&uI-i6ICv`EILyC)=J`f0V-dxZY{!8#7 zMoLwdXSs%z(!N+Tfy^SinhzJbNqtnJo=XxmRcU^|lI`+WD^*aJ=E+d&Y3+L=*u{<7 zwZHN1VlMDGFPTT`82|m_6@Z}w5@(qxG3MM?f@(yBKXKwnx{ftXfBUz8!5T*AG;8jF zyKv<$=DFqCpE2)?3vox;AJI4DKdOxhEkuj<+-ILl2OoPPZC=R&a~RXsPTgE(U?{h9 zMH;{m`m2BPm+2wwQH|{^9<-(uB2mgx6QzZ%yC6&eWbQMY1Dx|KVYHessS3|2!|<$> z=gTsK(URhompPo{>$AB@7|&HUQ?;7Ii_K>9@+X178Hn#8oy%AD+V_$&T-5&ZyM_JA ziiyNbJphfanlxtO+vmE(q*ksB@zavI+R_`E0zI}k`oX=w?uvRd)j zazUSAbEZIuimL*V`vFbzJ57imB@hj<6}56SD)5>KS z{83yAq#`T5^!^un?*VRER@QmHC*ON+4pp~u?5fU9N1(~rEkQ&C8;~T1fltK-nGt6k z9~~GKb@+IkQ3r9<0R{OKF$WN6x@l-S)7^Bes;<73ZiSn3I_dlU-*xtld*7~GG)Ze$ zox9K3VePfode{1|H_Ia#l`ve6;TTHP^3$WfpT>!Ilw^fN!y~L9sS|@&Rz(8bJZ7%~ zfo9BI`43PEnD|g~4n0kl=g(k}ar+&Q+pV`h1c%0R;h5$HNYYioZDx_TD@RR1A&-#1 zD>!KmVDirQNuTx<$HPZDy)5+>m`lH=NEnSu`2j~;g4;WBtj~H6_u4oS9@Ma*&k$!a zL5NjW5LqpJm0h@vjKW*CZgBCh` z{rK6p#-suO zlOiSq(9{UToNn#0M|%70{eSWaJApM`mIbWl54DLnBe{i?;GE=m<;Up%uxI@03!Au9 ze>48*-SzQP&@5*c&nIl zb>87Ocw5KaxJKL5XNwgT2tOlwU3pU02ul}~sf`!!ujLSvDHMH?6^tf1eOx32C`o6K zXOb@CFgL4Us0vKVN;%I8Jqs5Tt)#UVI7#OurMOH~6&*sHlfwLX&`3~0k+nyNq&;`ZnY8RB zj(0O5lg-sI^{$TCwO4F}Yr;GlOTU7$=OPyKG}82b$fQZLss7}HnJ3KA=qL`NlM1ey zhU*!&!$L8fC=LWmWi@X0+T$lMv)|X4fr>SW76dAD>*tnj5$s6dKL#^2!V=)WVbzPKmOx4*{}TS&%3Bs^x}HZ6)i|NEP%jb`FdQKAki4} z3^;w<2D1I+1-tj_B;W z`MSoNYVM%KAK&vjf+Qcy+znp0k@M$pZitq67wp`&i*;rSSWTj;z&} z0!}pKrCY<~xzFS;H8&JMf zB-7y##%G;RQh(RmYvogefym0E;a!p@0ZA>Qc5ry|Hc?+u4wJHu$#^CMFo|;t;=BqO z%?n5h&e)#ao9)j(@xO@-((P^vil4V~BG()GM{JEstk++A0muaLG;=J_vMe<7NHr$E z@u*dY2M9NY11}3o(NPMwkx}xyec%uN)NXtHuyvBVX%-2LMA9llbBg>ZWt}I>Isr}u zNKvdjO_ClQnv;H>JGIv*ys+*I)0+;d(*DbV$&cvhS#3$-d)B%+jW{!z!;imHlBuLf zAn|wTop{fBAFRB_+hXNGev2H!EE98bjYAGh3Fnhq)Ysez_e8X_^0d?Uk^)nkuGs5~ zu{6NcfT*Z}bmRK2Rk&V9Tv7^0(HRnYO_B*#GD2C>2VHIz~W$lF^S zNsbAx2xm}qy%QV+ld2|RNw-FZ$KC064h}>umO*4DBLykqH7IhRMoOXu3@F;$GQ`+x zunilEaAHi>EcvAWm}fA|xPKD$EOx=cl{_KgPQ-Zoceaol22e!gCOwtv9JnQjUxWa~ z7ijSU&y}7Bn8J?&bN11+v0ftpt>C$G5p+K<&`#|wfL6dfs= z$Ms@9nMM1!g6hX~k7}T@7C!1AwHRZ-H_=`wRDT`hwBTFM7XOw{SAyym43@-k%M~O@ zHv~@NNh;O}H{Ex<*M9v2@3SBOu{Xf!Qzq83u8?0={L&hP_gi|p%z`t66aFj_K#DAC z=X;OY$p;^@UBuC>ksZ2nd<2vR)z4iT@A6W87<#<_3jzwP`@cFUWuZ)90bkQZN z_HN`v9oMs|c1e1d1sYmWpz54NLrM3jh$OB%yjOg^b#9%n6gqB2o!9#_wdJY{sILYV zA;~dCZf-%*1KG-vRAM_3TU8=FOpO&zLc}3OyYwIM=+-3T0JtOOFK(*=QjMIaETlk9 zOx&_E)5Na9;XXTX0OMnm03Pw+#0fBw1rAN@yA*-auaL*7P-Eca9F)39#onwG71Hf6 zc}ra7)@jJ2X9$8ifPa&Cp*BMG?%32s3io#MKVd0|-YNsQNsP!%hynpk(81#XV(>sU zscUF>s)EyO79WqAJ^aW?`})_9z)8$w@X$q|+IBbU=;>~!On?UQS0kZZbiam*p5Ra?_$JQQnVMGu=0|*0pvb>S+x{MG!O*K`LIIRr8N+QI9kv@Rn03{)j ztfSoEfl(>qaNq*y%PbPwJ23XhH!-OUS?`l4ZJGeL0w!hMRDH^XpD7Y@;?hfY*t4(M zYZsz#>m&ty1IHRzILKeBhMvhM141)7SBQn7Qi&WSn#N~8d#646NH5?YfQi~sE*q~w zYk6DhXWW)J&}sjqf$b)w6uWq@CX7&e{BVhTa{ZVFtyVuVo1ULk2Tu6=>5o5JlNM!g zhtpe3aSIblRBp7_wb65;cUrnnnuy|_;bkSASo64mNt|{V+p@Y9FsUyEOsvTeF|)Xv z8ny3x@w4r>KKLsvUeJv;!PlZ2TE0cSuyP-+Qmn-^z@!Kuji>?UPn@#T4?b)?JS5Lz zDTS1w%G~nt&)#ky`TzdO3N2l@UN9ff#)@FD089cYj%l7y+w;|bYp?dxC}$AYA6oAz zo#UTW4&j<`qwZfMBB$DL-;%#e`)kQz|0=+Y&Wkjn@1h7jF>X^m*S-0B#orb5xU0`a z?XmidUOBlE5lybV&yPo4yW*tiXZ>4!46Xmhg*#H4uM{vbL8E(UM#t2|M}RR&iiSCX zk-Yi=C&{xSe;YZ>Y3VM~7bq1m36(NG%f#2xiD(PMhekvKirMHO0O)Deqo~1(axjiR zlei^`pkxIoV2b)0T|9M}$EPW8yLPtQ7V;#ukbqA{5He(NqXcmUzA^<&K*Xa1V>X8= zwZiFdB5LxU9o@DK-=ubuWb~w*2?|F%@Zf3hR{S(tp}FP0p8+d zB^FhRj=56N{`sH3W}o}qSMkvr=W-CUzGM=Kdau*|8mw_!67lfBk$o9R2oReobPO1n;c)zLlkE=%%P&!*whBS5Jwy z(8tlQv)xCu!Q&RH@^sHqo#M+E$cGkNeLk6+N+^sXl4yT*>5E6!0>{Ox*8T=C?Q_w# zEc04g+nE4!D9MRGM=(Yy#WfLTASyXPiMV~$!uRkKAR>x)Gb^n;);A4JX(Dz5ZY`1t zKz6VKZZzP)#66l20g^!+>!imJx1be}WYC&ar8HBN@hrh!dwL4Ce}9*4+KAW?i#u5o z$wWH~mnLzQtlY#!DDKuMdZ!^`=8a=zr&Q#-wvixjS0`mC2_T#iN%NG7Ly^1{?Law? zn2M>RR$1%-3+dQW7y|$+HbK6nqeqAB*im$0IHI<+Y+!Qlc7q22WEN3l4$)zOczXrB z=G`=&@_{Sxn?!JMA4iW5+Sk7JkUe&I$VL$n%4bTxTv>vwstrqUjv^Fi|7j<0D@xGx zkprz{%q_zNS?+07IfKQM^3*7(tnw$#z$Fe3O`;Q;ba9}{QVhc!T;=|KTkQM4|7v^o zwR_M>(QjB^DVhXtN%>3^)I%Q)*qONz>QA3futLld_pq?hZn^bAVgTN1y}hSHe|xQ{ zkoS@n6Uo37gitlj6%;>%OJbmAc>sS5X31U+7Yb z_#nwDYkZ98)P&HMp~Eb}SF>5GUo8nk zW(PV}9jZaWKw#pRWe&E~WQ0}5){w4Tle9Q6ozF_|!|y`F3?X##ws&s}4!6js2_RHJ zwB;6B5WajZ^z1z1v^3(aT(QB1`bpwht*q{JN^Q4=Y1>j{lNp~~Rh=bco zg2Eebyw+a+vMb>j5gyJCBD!J1kXbmO7OaM%KB27X5p;Jl0D+6FwORjA#s2AE?y|3Z z^&v!==o@H#O*&zN2*hgPuj`hz0!1%W$)Do|MMbfP+H`#3BAq|GZN0aMv(+*( zJS_Z*9_+v*&>}KR&PC@nyY!+9?4y71Ve8pK8jOfz@`o;c%Vlr7Q1L)h(poV#nJX}9 zwYkGb?8rABv7OzWR%Yz}E$J2^(ARvTmA)@tXd^9-RacG8 z#iy$>;g}vA^_yS7Z@DSxSm?IUhtV|)eXARnY-=r+l2edSfch|Ii$5B3;gUm9eZyP{ zwNtj2f88sI z{D2uV1`RTLkgt-W3@F|1IOfIuCr?`mpps*ohM^Sjd2+k7wKt*bXtM1)y8$EM#7(}L zoDpqRu?CwVHeQK|5XOMJnA~JCDyuO*)1)jP8zY|*NdQaeh%x|$9DI?ESp&~$X)EJ< zBT-%hD}Dedt%@N7r#Ib2RbgZY?B2l zDW9uCBMA=4z?;d#iBP|)a@tT1SWflGJ%isyKccWFk5Ai^MGHeh4;GqS` zrvQu$V#YL?baM>SRsd;ZR|hc!n{C%ttQtw(k;CU{lzupJWYA9ZvG1i%NQ>UzPuL5WO*rKYf7N8CM{>Vo@Y%h7qRa_QvOrhTXZr&OC z9eqN{{u|>RPc8_cD2!NE3%lce_SoI`*xpSWiLz3%-~Ys??eq6MY{kwVf-uweECPX& z07ec>LBHhFjB`wN5c21Sckuf2_y};KJqH}zwD(&1ZjnO6Q9rfpkM3V6W7!#g)StWZ z@LIpaqGNqG`twz9(Zz~SQr#B1BC#^7-`I~(e$p2AEM5H_1EwE*=iBjPl0sa12a&KW z=Yj}eY;?>{96e@3NBa=`BGO|HZzcxT#!aoZd(TGOv9rrAx_BEaf8C3L1R?>MD$kO8 z=`@b8gClcfphfROkgOVV=q&o3be6!OwSocgD$Da58BCqsDT1mN zus$iO+6@p*!Yz%%fiVe*Th-zKa1wzEf;~bmW!IQg%7`BOPLARn z{HWb>+c)gY0HEz=^Z+=zJ)Hz5>|eHr<;9OukC@!gXi(68(%XE$#`6 z$D(@|kA!bYW$Cyws%mp!s!vKj!s&NKnwL0_9UQyFyj6SaTW+%7{NOLrK1wbD$E8C2 z0W2~Z|KhYHWeD#R)rb<2GDt6-#@DVwCV@PL7B~aiJzu)b_ArP~pmX}r|M^opbZW$k z?Hg>E2rg+v)XFNYQZ(;KpWs{*2V@+i2kUR>i~18epqP=`qCV0!;fE^fzjVa6dNG_{ zempv#xTh%f-(uN`S1v!X#>Bt;!WF+%KY1m)Rzx_B z(Zi_|eRlfEBjj8m?iCRjHse#YYfqY z3Z!~~gCLeMoR;Fc$()w^nb;LsVjh`$nN?&ktiZ|*?nasq1`tU0&Ul84N&&NN>0-_#8flk6k1ie>{H0Tvfxkv2q;So3}r85WA+B-q&&w=7roLv5iFD%+2i`rzOudyK_g`iDS!c2NqsSo=<&!gbYeIN-+9kLOy5W8 z7hNl35K)Q1o5_ZDFDnk8i)rKOD`_w}&8>HW;1TPdRJeyJH+Dcdm~kp@lUQfHn?Nba zpJcsdt$F<#?UvNu{+gn1jcFh0^}2FG^etaBjnzDTS!1;)F0%y}h}&-Y3~ASi^N4O) z`c0sK5B^1fm*QduVA8S0_R?Z@7SVqW7lw*_nxM553(~tjf2%b!k32{+z(4+rzq2Vt zCMhWQQr(|WqC5yAe6(EBZ;8@1Cupy8Iu<_OMmnZ1zV~mT06)3se45&n zPoQV$^9?`$E}{*IB$%Y6W5GmRD;KSNC&tIgcXXWWuzeo)YC}(p?c2WrFDu0zGhFRrZs!_%)EWdwgmR5!5gf-+=X#l6)E`yIi3S)8sU<@5)TL zOv1{V$t13XdW~sPo_A#crY6kAaRh9v5~`@+;)iFfJBeeltJIrZkJ!O!S?RLNf&e2RE=!4fS`h%>b(`7Kd-P$2p1}{ z-DX4h$Fm6ZI)`)WG{#X8=C{wum#c#Gx{tXhcudfZi%6D?h|!^RQZznC2^Wg;whdbuves z!D&PwBO_&WP1E>-DSuVoIy$>3hs6ORzgFJ02&X9NoCk8H9~%^IiWTe}E7^H0Fl5S~ z!>3B2qGn~-l5T->Wg&qf#VG>f9(?kYJ@k!UIH+OtSu9wj0|uBBl1W6P_Y2hG%@p%T z?;N+OrPsfCxTAI4rl&h`$2=~KmH7H4Ik&F$Xz$*2PZ5w_d=-)(<%2fsRi^2%>}3HJr~8X`Dzh%D<+% zX@UzN6v3UIS3h1qzifikP4(k?qqVl^Y_wnN+GXcxqojM1Xfn1R779%$O+*<8bK%P9 z%Ub2FbuPMp=|17eBqx_s%g0H&X-1Cu6suK5J&O__9Tw)^txbj)j))1= z31*6twI1f5elyrK-m*;G58-!HY};Tzc+*?$GkL^4-uADbj+=8YK#4z$@@=r7}r9HMj{;1jjXvg3%Zq4{xsqIEO8^8cYDO$0FEq-G4 zS(mnIcGt$~MXo6v^Td_Vr%daM(1ajjWN6S{@`5YvWB>Q}Fc@luPNfAx+{a2sbQ-uN zRi+L~b&yJSya^!xi=wtpFVNgzH-Z#?317VTRX_&HiKjB=p^F?k$wsK zt18_?9h4$2NmZ8gMzxISKY$h>s*^gQbNb@{qhs|G;qs|TWIN3%fGK}m6uRrc&X(DJU&R+x0Jbm3{heZi9mwg*yn7 zyJGNlptIRSM2IT@){FLaS!0IGotUAEqftH@IkpPqw49Y9H!EO5^jIPKgEH{KT%c#d zYOzerFW#(%C@f758@XU*#6jtUh)^G3omHdSOtJQ4mM;CA?vwZs@m@&_0Hw2#?ne(5 zhs6T~Z0t3Vq#O=4XAk&&ed7^VGL`(lYRYm^r=2XG{n#2{S|6O zB@3A3dqxGkHsA6;NhA6k9vo_%RPTgAw#IXG$NKMDqrf#zCDl5qA0$_B+)$bP5HLAi zF<#VlF3!^g!08;~JdJrRq$WpC*{447QM=*BmqJs@J;pUiCgw355F0&|;<8}ry;k-MOQxbhFNvbGY-95)( z^S0$r042_GLf|QI(m7oi?e#Nh^J|h$u5o;=`;q`-jp7ndg<%xmyU*lv5-adDC)OI~ ziOamTZc2Eb=Cc4y;eyob-*Yn(BdzR%B2tItT-FC5oj7{b{h9JiL{&ojbaoR}VISFM z3H!65D`neBtarsFWJe`jP=RO-CD?z_E+o!mPaG^#DwJ3~6%>L5E4R~wb9R)FKZkk& zC%&INgTKQhM4cuBZgV?2A@m_k@Od0W+nNao3!oIR_Uk0sT^mLh!NC@N2cQ>#Nz=N- zhgBOvESMtsUt9MD_9c|~eWbOJGp#HQ75`X4UOOE^?cdXG`*tdtgv`A)rqB<; zJ&8P0aCp*VWvNpJOF>Qa@@Wt_OpF6eq`444D9VC9?u-aWq&nwlg97ze@Jir}f)N#R zjUYt>Opb_(xG+b+*l9AzPNO#}V+f&zfdDeAz_7T0WsrmD(@;-GqOe8b8e#;Jo?c<{ z3k+z6(U9;qJCA#Cz--4S>hO?(-Wt#79^AH@e_k^O*m2W zd|6K_duWRCluS8Pdx$%#0A{H-Wq$xrH( zRNj)SR)5v(7PpD`G1V>MN#|H%!fjMKspero$J_@!h#}Dn2SC!E`l>8eU}^`g zQnk>NI8QpnW;mylM<21Dc>9~}SAX@F@M~;hQNuEeu`e;V)*}Is>cHe90kCjjf`VBH zmY_u~MsZ{3K*fIN*FI<;`IArLR?=hhh{0=E<(66OG0XrdjS)b}KmOI*Q{zot7`Uha zPJV!c&NbCrJ)#X=%ia1DyXl>u$t`;Bsxp(S;0u9O*K=TvTl|&d?)i2eOKiHt6VR6f z(;647@dCQlY5Jyj-;8*j309rXQDr0@9~~vF!ePvk&oIFS;n^HvZkmbsy?f6F+k^4L z#;&YS^cP*wX}d5mXeDG&8UqN_HGmU@52z5-sBVF#kVQpANjY>*Y0{9Nvctzl5bup( ztf1%-9XRTCBNpRkKtvpeJkrxxt)bnqc8mnNJBfWK3p2PQiIsfy4rc=dGNDT>fOyJU zJni~p2Z!tweobgt7;q9TG5{)5hy!P-Goad$wwGMLi82k7ccq~SqPVQ;jeNW^&x(!# zBw!NdcrrY|dTb2QWL0qiRYO*h=ztW_K_ZU=ap@GBPaIzXos&W#>A4EfD7m}h<3+(V zXUQQoIbO3zA3jBN1uPf|YCFSC@_WtwdV^jq8JFlY#&hot8bC}<)nAU2f_I?oC~ z9F@n@gA)TSXk&Cignx3ge|$+?EK8Fr#rp)7eIpZdq^6&+<9!qKtri7@iy@Aghe)F3 z1@_{s%IL@_5D>zNw|~?|hls2}?y8D{?kYdMW++im%$6j7*Yydv#FSD$q){7Yo#bm` z&LmD8yxY3#5L141DET?j6|47aO+>4&4oB3|VX&{hBJsH1)q!b|QwmW|7R!(**)15w z1#?)25ShvZI^81TypP(&m+XfOh-$)OsE8|rz)40E3?@F>>%+ZbcQ`c1f~AHvC($&? z%|x9M0e|uD|H1y_e}2E^i5EGod~7WGijD0g{X~v0=ScmhkF{{~()_m$^N-`|ex1|z zh-2cR{yXgbul^)%&nPg}>yr4egcBOOaoaOmx3~vKW$s z*WLUx#5aXQV6u_n!|9VJNho&$dGipz5ld;ZV%06SUShiT4g#d^+++$2x@kj}Ps;Y~ zX(lB(vFYF#6ns_SG!H?~A{tO2Q`TdUnM79*H=k1&0X%W=v^`EfrC~TG$)58>x|Xt4 zfFx>kWi%7MAV5(c6xXX!{!>ieErbl0$+rfC3;zBqy%B}gt%<7K&xr3FR^g;xB1(>GE z1*Jf>5{uOUt_eh4RYYcrXpup*H$=o{OUr^0F(i@EWeQ#dtWp451xMKeisTZyrhb6) zi(fiuw|@vr=dPhTsqe z9(h03-G+z~R? zP+p}Lk^V_wtXy0ve8{01$p@DX89LYvA3pucGpN^;s(IrwU-&yxNt!@$Y{9$Pmz*(C%xy<~qY;z-tcsd4A5(uf#5 z+qEK5}HHpC);MF!xmYO*JK!);$VVz+*^*G>(njhH0xZE9-6bRFxwGLaxaNI(M+ zAvk6UZbd#vd2~lb{6(5DbjSgCinh@SH`zt`8)?Jt4LoZLX1j2Qy~p|x+YO;JQb;3s zPxM-HdKSk=H}W_PIwqKCq_2ZpECWPk0807@iF-78rO<;-j7-~^zCl7hO}TA{CLS*O zPEq{RK3A*l^=?UFhg44Dl~*z%CQ@11W&OuyBxd%^YAu(^j|Daa)>o;fegaaQ9^z{G-4z@?EDz)2t# z%JU~eYl`ynzF+(`X#GBPR85Rk7Qv**n8R2~B;;V^3sEYfz5a6UuU&%P6@JSHT#qiNDwDCd$KYbVFYJS>q!dyPzj ziaf5U)VfAV1KUWMyom?}dv+@&dNaOZHslO)4om1Cye_iFuJtgpdA)LlO}-vZ`x=t!l;Jrv=U- zk6f$Fif^u*C+&L4juJ-b4E|FyOu{Kd3l2<_67tXpm^4sX>0y+RW%AxkEWT@>wb`z` zbc1!kMWu+Aki}tErmUKnmDmttLpTVA-q^nTH=6!m0I1Oxtnnl{ik| zPl|Y-Y-ql@Sp~Z#Ed>jPNg|a`A&L`jEAg5DwhDlz05PT1Xd*mQD`4Hdp~)`8!FK1K zZXBG45!;<{A2P-7D-+2=$M-!j5c*(MFdQQ@_l8$SN7Sp2h zku|FdjjoR-eOc#aB^80ASBi=&5zGJXul*N0c;HFfylaPDal?zO#8|FCTi{Gs)UmLW zBtyVdanYWZeBzWOncx`!m(vIOrV@rtzxhA@xBc;-{3Q`xkZRz90FnD5Xelr`C^;~3 zYaM-}Ixe&q2aVdg$aO*3 zXi{)c^2k>RC=IMx!VgUX5M#q|PYmudCgaj=DXbB1Lvu}IX*gp$cX!!KzVBIf-E%Jl zgk}*F4tQ=Qx|3&A-E3R|VdqPVa)IeC?IU2bEWXFrFfwFV zS%4;e2PPgR$OxKZuF~M*u}IcT6lzgDIv>`gabV;}s*pR+ID{xzEdtbX{% z-ejxrGnx_*A?F5JKLNd3F25p($bq3^C12xJ-cJT&mq zx4orSM&BSE64b(KT&AO8QuWKAgaRB>7BG;qU3rv-28Qj-sgsE0#)#nEK`QhH3?kqP z_P4QbB_cQxxw8a3ttwm(8}b5K1i8sEth~UfN;>rZp^`oFBqrp?N&Ah%EwU;6cla;} zc$5=rCr?enEtQ>nO37SRGIU--z$7M& zA{?9qlx4(m5@n^Bpws1X&);rdG|_w^DF59DAasU&R*&m^G<$jQANrjQCIw(~gHjt$N_K#7AY6IQ~N(O3ay zZIV1xIm~>YWjDU!TH6YzHb?Y`IR%J?I<>{U1Yu(e#W4~`nQ0Z9=w2Dw)nCDH0KVB+{vj!AA8WyIX~KXAZq ze$Ou>B^ifHC)zXIxOAG7z=P|66z!=?q)!Wga`wO!UU2zem7%sczkPSNfGB(^DW{(I zhSxhV>7XJ%NaUpqpv{HP3jhE>07*naR0V*jfbDV)pg?0u=W-)6y9U)1;YWrBhL|A7 z{JJ7~j^-vN-VPjZceUCD7j)Uhn38u989~GwOsX+3aAO8+dk90MDBF`$Sv%Y}OKOM_ z>mxqfBy)3_2|Z6JetmMtww)v3zKleJ7&*wBT?45B5nd5)DbFfj#ubA@MBwh9!Teb( zMbcU*Z8{(;;+J`~fXTs$2GG0A@WT@rN{l%*W-a+LX5_7KHLZvyNul1#La|v~ON~h0 zaA8FAuFaEcD&OIe(Z>Oy5x9su?!4Cr*`5n_V&ygqFiCVqHe3u1d9 zBcGeKt=kcOUa`aWU%ZbfVi>-U z&jA|4h}MSak1{$x#HuU?e&B|k_Oe%8Y5UNd&6kG}mrPNHM1xF9$YCk~jB;R=kxiCl zj6su@h@vk26PEt8Vbvae^fXy)4+2d6n8(9OaZd=b||L8sDA7uE-_p-6YaV z-jZgv^*<*(BdNUj8fa`LZR-Ih4SvzSrzeO0^~zf8rJ)5yT`53IU-;PnuuWLtjge*g zt6zW6_FQ_AUH5li;?6D`uh`>Ezr-m?Vmiaga z4n?f*it=5;>P-GUa`4qBh4?9m^Z*v>B)wOLbQ;4WB|h_j29s}%s0yt9eGqD(EC7=@ zCx#9!FiBbR+sVqwRaSj)HLb-t+qxxdS6;aR@njq3d2cy?26*l_UBDFUt@#`vJZzy#R!45Fc{4f2FDQmXhX zue}tGrUIu2$fLKC*>;%-8!73{sANuZOIl8x4SXyhj0wAft19*$3xk0(r)V1kh_>T6 zyvc5O-X`0P&M%87wOk!@Q+PS-Dnyi=ZRPJIKP{g$e1!@J3@11^SS54qkUjkHpgr(F zpFQ$OA1O5yD~*7=pqEqzYB%>E6L`89=UgY{CDivV`nqn8gnCHq8n=bPvUJESUKO6R zzII3glEqTvzA^sdtH8xePAvJVX}!vL^(2&VET* z@+W`C%AuB;jhHNYCBCVVQJ;Sq+{aJ zv?xnA+b{js->^UYKc6DpSdVk?f-|)oWaxj?mj2-aBlM9+*I1GmRx3U`f3VF=1C%6OlB!;acF>XiJ%B^Nq=b4{uv# zaHgrV)&uwKxpJ-;Fi_@9Hif%Cec6RD35+= zerm$nF(#TF9k(~W{H6BcU;GbN8$M&>_^u5QGwk#C+-pDk{{L*4+`0jTmZsAE3YgR^ zYMB5s{MIo(;+l96XY@=0ArmC{Q-Ar7_6xu8e&RZI!=a-yhK9wpYjcRwExM0w@jd+K z;vZcTE?H=+2&g0#i0dX@z9uk51uj0tzeVM)IK1Y$#CpdSa{k!z7NGM)G0~QK{*S)Z zIi?`VkjM)sP6)Nwh+YZ8Rop7&7s?U4jkP->A}r7((gUz&Smn=75@!vou^9rsO6N33 z%JMRLn+*DmRwl?j`!?D0UU;!x_nh5W@xk)f##!-WoxlWJwI&;yDB2?j#_aC}nSjB0@PX=18=+gLArQw=nGMgq!cnXv8c2ZE1tG$lSW}YXYqK?XArHJ2fZd%nUO^BR_#q9~0G{Hz*B7ZX}tjnI5 zkobi=+HL0+k4qbk2#Iamf-BnrdKtKW0Di6F0N) zA>w^wv(`%fQ4yG&`rr72ch6POauU}|2*vQHeCLi41s z6jK7hz5BP>i(Y)GU3cvcax4*8G(%!MWOOMe>Hg8YJ%Cl(9d{qb@m3aJT@HQ~L|PdP z1{x3rN*uV6hzJe*&N12MnZz?pj-DPGfSi}8ijYUSYzKA9AmW1v z^DY6HETdt^bWa5Fv?g5rBU(cR&-XOeZQB5!{TE)~@BS14VS5jqgd-ywLu&)?&`gZL zR=Z>$enTB_IP?8j2r?1Coy=kWriGw#OleRaJ(IRu?>J_+ed%!JXk&muoFZMLSY{G&$WXk{*64CMf+GOi(8TU+eUKJ*)Q@g{VR$9j z-uSMcvIH{GuB;Q4ui>GnooN#(C3(d^rD3(dU1d&O<)Ce$%V^UNFS?Wm#rL+l(bnU6QS%r^}jM(wsUIIUozX!Iz z=25MAI$Tz+KzXeGOE}&r)%vy_UG}UiwqvTepoNQ6NDAfS2Mw4+u@vi^5s|wbLtWkZw1j(KG#arh%T(=azw9~bSGXJ zZ8-5$6(0lI^F`~v6w$yPjNoi!mlTe}?PL&565O>(>CLxoWg^a#`eT69?N}564oXwe zh9mKXdt2?AE8v`P%&pE35@$?C2CN;$NuoEBNKdKOOQ~jn_cWrihwZV)MmXOIxZ+@l zd9G4aNSvO*aX5pYNSYO?+?h1`D;S6foMp1E0WbZIOuN-L(kn?^*G5xq-BENazYF(n zvLfw1IX(i{hJP03>07sTFaf~9vMN@lT7T~a;?U^gIfRIroo=yPzj)OC{FC?FH_$Up z5lLOsOs=60LeNA2G%6=O6`bLDU|ML%RbSMyYdP$$y5XBR5I6noEnbjkS1 zun@_}aE84WY8B|eCVi1};w6$_R&AE3;?KYAV*A){e!v<=j@Z=cla|HLuGrpbM@L5O z@9w|X-umu$TGvH;ltkK}<-o*?iZ-N8X?+dL?HUoGbc!anw|-j2&xaZKU(%3HidPSs0Zc{=fT6kt-vN|YyH%DYpoz^cl`m<9xNO__kKOo@m9nh@FH zbV*u_BZr105+>kw(MnGd-TMZ zeGzWt%eNmT-X5H&+D}F!5-GYMggx(^7I@%5EN90#L@DmWB+f+?FWDNCOOuBw%AiDU zKu1-y9lM%|_}*g|@7rokIM9xc4kIoZL{ylu-Fthu4nq)P_@w}E2NMClxE=ag?>|Fi z@qDJo4m>_H*8+qnKr^rwD&FhgI%h-*dNJ^|BZ7c`*^fp#z>H%RDCwN)-UgC`vRTzzU)~1|1xe zEJ0^TI+-i9+u!`%=k5P_=XFm~ftxXiRz-t;!cgc}(CK1qhg`00&beD|c9Ptk3%U z`pDOGz~(TwuE7~KAnp?|Rp6|qnM})YTv^gRG$@2lLld#yF!3h7o6O4v07`E@1L4l{ zA(l23f_p*VBgfAQb&>Cg%rgQjLWH72A-%&2pZqs7` z(+R=^NsWc+c_>5#$P4Ht9Dvvfa2ZMhB?l(Xxju)%svxPwChAQfx_P2(&tS)1m1Uxa zp7JZ%Fe%Ve_Ux-S*<}}YlKLHaJ?}D&&MgNp$x(P3QAH4YU@!ux-Fx4t{lh;!PDF18 zFp)1xWcrkGgE&g1t(Jz0mw;=6i;|@S7Jp9Ww4{TI@jAP8$K3lmcVkqp-9<`$Kla4L^jNh@>b* z>X-mbQb5ISi|-l%N3@Tx7SP7zUISl|?i($GwTx&q=WO^OmPiNnZv*dX~LNqi)t=Yu#*p5dygN1J5=7dImjl5|W( z^eFA^M1CN8LL(C{hT>oj1{wh7G{z67;h3hT^e(Vj=xcl+X{_=@-qh<@5%D{P`3b)3 zx1J-hCUr($rpW3F=vtGd9o&kb%ndWjqvVR3wF@t3Gdbn%BW-)S3fEX-rAb@K4@?od z^-)k%d0rjvowtAZ=SQ$U94EkPBO#4AhbUtXb6o+Go-N&;M5uX0dP**+L8r+^{vdNW zHqS7i#}KVx5Kn6RopvM6w3l4kh5wilh_=ZZvlx!xhQ}@|tKl?Bo7r=v#OzpO4eMdV*=H|SS zXyyF2RtvRjXA;Mx#jT=4_=YuLm1kEl`eD>^wrFqv@t?JS{Z~@1BPQfZr*TOu6sTC^ zzAOc;UVts6xZq2e@hL;}SU>4khzjG|xqkX-cJ(%kyK1dsmsero?(@JDHN|p}iH>TY z)L*(|X|txD^V&Dm3Mk_}k~C+uy2ulUuAx~OXqf^b(gK{2h(vjs1U}Mr4EGP%i6ckt z^s(b!Aatp@b>X|Ff32`pUPjKkl-G!8tN`FCKal5KfeymLNq8;8_LiJ0M--A z)J>~LZ5qG=NxgvHK^s`;-x~hS@J6qtEIF?DX z5(V^R-I!JM2kx7P3p_Jev(J6;3A^vXe&YBw5iJ7!GF+I9CFV(yE3uw3(6*BUrvc6~ zU62S*zzr9LeyK#pS^<-cP2|sYIl%PtmtAESUD$!HYL<_DzS?OcOz?>p_@ysCV1N6! zw|HKq?K?N%)QdU!MImL$)~!YSwc4#`V=I6+LrU$m-E-GTu742cT6BiAT^8dMcf_P# zA}5Ju0x-!0KZpXoq0awVR|}jIFxA0ktp_atRQ+0wJJEF(aF8f3tNw!)1tQYPrm)s* z#L#ADsL$T`;^*7Df8;gRMZadzug*_TSpy5uISe40+q@j=xt8TCho_&Rt zr9Wo8C|Z7lMAu%f#%wT1N@*{eane!beUb!L8qisG+J`>$5&O`Ge~&q)h5ChgNBZmF znu2cDU01@mbU^aiUi8O*-c9v?<~C-SXRy5Uz!ZAM=k0`cO``w$Y`@U8sq22|rdl50 z5FtsYBx!LP;P5a+D8Chp%lRG=20)^Kj|$S*f9kXyf2!9;1_qY_lIo+$U3r2+N^n+D zOs;u!BgM8>2PT!R1a=}eIrD18Bw(s=V->}$oB}n7{003x*VTau;g^fD8j2(%YZhl` zBnJdc;f@OEZYD>|Ow6QcAbd{^$6G~Ekb+opZw<{Xd%bH3aMkKD6b~@*NTGS~+k!Wu zZvGziz3Noa^?*eM{aK1A>K(+u+f7#73*eZpzivBRl(@@jB3qzul0z*bk$IHJm}U?^ zugPxt@JBa{;D^$$VkcjmkI7X zFsVVli7jC^)M6%STL7LF7f7fKofqx4tH1w+$hpbv%s8wfR@TDO zE&U)B%^t1}kTogw(vccsdXxi58qiG_H=t|%iv8@*{U;;_&@A%hkf4-zh!%o__Lr;)aQdV<7TRjlt$9zPB+;oC!4BGqAG60cW78B|qJ364uEog5CkjfnFEN=-4A7XiIk+3lLi^!=<;{KtIjpK6D||KtkZh7VyOp_Hxlh(93zTT zCrR0X!c_`lH`x_B484+YZ&RVYB&uRy^o6$;C}o&=Y8XRl~r98 zaLTjdY;R4GG%;h(zkaU+)K<*lQ#H)haehU{No(?Ch5NU5`^p_p+3k10k<8!>TrHv$ zR!~(q7JR@;a88Z1`G)Q;fC=$OhDaK`z6?Pm=3r`}+_dZPkk{cy)oRyXy@&Mn8!;*Y z_~9azI-`oi@O&v}M|wx?V}J6GcH1ovq2pqLfqPVWGWy68xVD)*Q7u?GZXg2s6bS=I zh@3G>;MO^uA=5;dkeE?Uzux-ZIO-{JP2z&jb4(FAta~rIPq--x#S?p)`{K4>J*Ey? z1SP+Y%PRnr%*Dkq382D+0U$xQ(l?eTM(kNTd+ZN>={>d?+NcuZDvLNzhD}9)siMru z^m&c(dT4yuhS5zu?+0(N+=dP$4sxtl`C;i2YCzd=OA0;a0}76*!osYIWvhUxp|Q(O zo*uC`z2&D!4|AABY8&U6Z#4c@=@o6m;jrn8{_{>@u~a|)&FU-`b{F}Jb z7Z&Sm+d6eFynpKQSG>B`2sGA4Q;0x(7|PIVRbSJ$VucwzB$fW0Dsz4P!FRNwU{UaHoF=#{eci{tj59A)?Z= zBJqI0&w)zy@KK;IT&#~obGlD8XQhfK}io>hLTgtY68eX9HYL3Lh?9 z6qP_w=CY+qv)y^mQM>)CPZ9}Tc5P(bg#(!rnZP9 zSXoHKHLN_Y;h@nFw*|_mpIMmLwJ2K(Oj-a+$HWHRYjIL76KrUDz~21w@3UX~sW)3{ z5a()guoZBuFXQfzWl?9iEGS`dx}(ib;=^}fe9*4G@deg-;V$SIt5S*JwTRO|q#~kf z4nQY-7k4RMwm=1o=Teux><8eOWD&>O9-@%w zD7sgfS_MoEh>~Q4AOO*-OJcXvC;IF-I;JUB`++DSDkskHR83-GPUj#`mFaX^6u&7nvaXta2SS6v$eu47P-HY9v-*9{(o2t6D@3wsMvPKCpl!Ej%K>eWbd-`f9kf zy&K_hNaRPeqzU&lUnC+^SNzTVDZiNMqJ zK;B(ItUgzN`iJjw+~fxroZr^l0^Vu~O{acH+@e9b;sT8y{nHjMsrMZ_aZEa=Y_g(V zzY1XTlz(%R)<|IC-+J$Vuva~6ztslLSRS2GN{;W+N3p=crkwErs5BE|i~AlKJZ-zK zxy<%F=SreSAPz)gp~x^Uf{|oFBS@g+;G~yST}TW%%|ZyN!8`!fSnRMr{=c8F5B~P= zBRN2S%|g|q=WCpCi_Zu85^eQ)L_Y;gi^e$71?Rr^r!0NexfkVkCM`I=$zs3fZeOz< z66jdeDR7Fm)WtWxvL+FdCSNU9@=WFun@L2V2oDVah_a&sjpJOa zNnAwlgkS&e{TtrPvqG+B6um^=YkbY;*rT!CUR5Cf2Xfdg7QPB9_N*^no* zXYe^dA->|8GViXc>pfLBzEBrk8Gy-8dd--iF=IC(T;#J@xxpp15dmTs0bnn?cpLs& zJ=Tuj)g*Nq87bRikDmsdj+2W@vGK&s5nhOipp0mAY-9q?Nn94-!(uhlShbc8@?f;D9Cq$RoDPWLoUApS#CC^H;aqfhPtD zr-ZS^oU8?!$*PULHF$Sfammm{dx5Eg6&#rG$EQB+X*-FAmz$a-y>zoEIoBFB#-|1eCuNv<9=Y8j=afg6ou9D2M1M$IM{LlB>mNc|G zHbNYHiQb?4l&NYbT;Lt0Kqfv(E6T<`6v45vsLUA;%-qS5hxNu#Q2?rzHm_nrU!2P$lKSXe%KBcuXU%pMpg1D*`aME5P2L*E!FzMrF+~SlVNO4O((o|{ADI`!u6Ed>yERLw+B*eW*0V__U z5#I={zDm5q45q9#tr*eKD3!WEsSP8KP8@z)2}nCEARt2eG>)!{yjMeMC3(8ED*E_q zF!+2TVW0`1e7teO9baX6!~joAtcdBgZ1!-g^p4A4oc+EBY?1j(Y&wF8=2&m+8QZ{Ei|IM-EL-thf06rw6cKGnHJ^t8ftQJGa zCdKp1DlQ&nN>mCyGeIyR`Mas*X+Pdqbl`igQ;&SNM;E7xYix_ezw{#MI^s6JI$==}M9@>iC+#&a zzTW=u*WPEP)4i6N7_}UWHE}hH9huksuMgF&0DN;8LLGxHr-?9r^$)!Q5g$fK80*Zi zs47SY>g`AQTEq#Ye4OwVR||=UrMHx*I0J1y^28zgan~=jXWYr}{IPSPl(lLdEnwEVW?ko^d4J_ii^RF| zj3&?3$2j5?y5zpp<*)pqnigD&rKXuzDb^*vky*Gn1;xFah9DJ=N4c3qCUda-O2IA> z)WU=GK>sh&3ZzseY>^uh@IaY8>yk3hP&qBgt(#g&dIV7`gKAp75r9LO9z>XY>cAu- z_xlA9$j8Ms$r)D8q$LQu#w08!UPYP6qPNmBW;v%GS{gUjDTDK_R1Z;S$({r&wI zie#+4yV=XwxT%w<4-Hrz!Yu*362}yoJ$eYdULW)0<9$jY4x26N4O{cNi=Oz<(3u|-Cuw9Vp(-is-O4% zf%bhQ`Z00GqF|Op4os))M_>7U_B+4w9-Hq!YPHD`%Q0r$B9lc@b{=X#mx|2U$K)If z!83#aJBdU1^Ir2xvrY^bC~$@aDC8Ny7zN(P~HGxftM#C;$&L+7ZOo zAN=h8M@1a-fRbP;%p<8iHqQfx$*?XSRf&*#5DzB-j8S6AwW?WZx zKoM}jQNh);5fZ5f^YV=VQwu9JGcS^TWB;@ISLjAc`mcy<-QduEb!9#mcyi&X}=VAVNIq$14mfSD|l)Wm`N6FAG4 zD^M8I1rCkM_DOT2Z<>Lt>F2rlM#1VewdP2fz8Oc_W^2MPjPBbj|i98A>i632#xrz{zHFz=hnG-07vuqU4!w=duJnB95TQ-~J(J&#oxU{(yj z67NvvlVPEt`=h>8A4nYJNBH4=&9CpJEzqY3Z1pDEj@?AVnrLITq(K&Q>EfX^Cd1^3 zh*tNRi1rWvb->4l7wqT!)Z%gaNQ*M*Ox*$&iu6&%`(#;6O^poNPrv0g_Wt+2!%8D3 zNR~Nb8Ke;<7Ds8^5;82>(uCDYGbYsQ^9;UG#?(WnPTI9MzSuf1zXZTR+JLxUzJZzr zl>SDMM0^m7R@BrCR^@DST@^LRvM9_FQRT1x=2rVp@B4LMl#0;Q!YM5XH30x-nn8F2 z9bNJf9W!Ruxh1u$N)F?1odtfIE{}}D;vSbodk#!>dRTeiTIH?0_H2G9)d3P?^FOPK zNg8LNr@cq~p44;S_!fW^vViCWO!MUVnI&rYJR&*=C?@DRz)FKU8XT_d4800X1+ej| zgc?y6l@gIUX@!S{gmdCP!eD8{;kKa#`7$CyWQXqS1oA=C0g!?fQ4k%e(h6qU2)MMg zV0PZ1Ndzthf1Wu&WQ4Tjd1i10mlf9~9g%>=mu&(juI<9~eG44S2Dpx*qcFlZKxv)< zRKeeA9*07#rfm!kY77(ZS-1spT?&n)37V<_5*kMQnL?DViNC|LvfSm>@{FuzZbBc>cQ>pnCu|WwLtHda)08mpCvqVTAW56P=>nPYI zm+!Ui4RDymIGY|H!gr}^mtC^iu6ous+qZWE2@I(ikhFeyKFYy{EXi2c=P?k8N*5$i+Lm&>>FX+@OC1jVGIAg;2sQh5TVuwc-n@x940 zIqmTpaoggMSba8W0Q#i4){U!+qAj(>-;}(f)gogNh!*Oz0+2$m6^h+QezN^-%|`u>ssa;M!A!*UB;$@&H8+peXbFY0ACp zsi$n)Wf$3{FTWAdJmPZn#F7vwqpgTpi!%o&rx(2^T)0Gg0F*>@b8zGZ7LSLZy|=#o zJ&2h5z%C>)EDTgfT_e$Q1WM6f<+#TH$;{VF+CKV-d6PP0rA?KZQ;=JEW<+26DO&C_lvPV&5B{~zw zltT0G0aYPRS<|Vq8dj~%ii%LQF1Uu~CRqu>$?%L4jWEF~Pf-cVtn{WCR1|WJYdB#&-F{wtO#e}nCAFQ00^@@x5Pl4MxQmyz^fp(QuJ?u zNTCSVmO~@OD|t8{+Kq~X>2NwoHKiX?P*fQ+NP(-Bw#58yYEQd4h`FeYlmd9qr>KK; z8x=%uL?&VZ0Nes@o`wSsu0+zs6w%Ii?A~B6e95)8b!#VqbB_~p=_xEF=k2-A-i-)v zxApV@N^n;au{Q92Iah|OV(L@&fHfl?F`+;au8+*L%2hQ-a>WO}K5U=<^quyvpCd*d zb}YrF9=J^`so+Lsxfety0y~HhV-#6zxhPNl^>1j4NO$EciCzWlahua}+=(@gtp$#2 z+@{uN)7YzDr7>9l8XZ)9wCRj5P&_VT03}WM{w{QzIHTF|QTy<(|C0Uan|=^>Xb2aO zF~(cTCQc0^L1=p z`rLB?COvWW7TRl}6<1&I9X=x6f83TttGZT_r6eGhn&TPWlA3OfsUZm*Rn!uSHu5pO z1uEi&049Nu@+k?Jrbfn?Y*Faz4uHdfr;dcoalZKgU@)j2%YaE|X&?crU&?*5C~J}! zt)+$N!YJH1DZ{W56SFuIfq-Pua)=erBydtx2!Wdz{jCI(>;{y&a0qRLb4ekNPtf~ZP*l{t(D5U;`c%n?o{183EZ2y81HTAL?F!@r1JLz4tr6)=fgDrXwu zO%UnvL~&8F+7md*fFe(Hggo}y4UL2w@;pwmP85L9bco>6OypIp-G&FoZDg28?2H|F^y_eF{dVnDTkZMJ+ie}B;g~5+GanHBLZTJwri6;(S~Dn~VF{fJ zb5Wv>aVVn{8|b=9^Ii6pFCVc#``E4a)jN)|3MLqCz74=vDmlt<(TN5&o#nv9LPBWk zz0U4?X!D7vK6$UK_L>7G;hqQ(R+hEaZ!K1mM1!LFUV8&$RY91?;cDntEJJN{(&h(nxfn!eNg~i3 zKqH_~Q09{?_#Qib#$NrCZ(|ih7HYTR1-4lHImbj1P99DodN$<8_!sEp5#5(rfz2^c z8;e^BP4sd5&ENVEG>$(LfTTKVaT;+<0i4t(`sl5Kp;2-hC7nr{DeDcFblZ3Mc#gK^ zrB`pfj(Q|^V~$Azt|S_dE$1u_CUN+Q{&ARj5)#odfgkbWHT)f5x7d$UuJ6w)4&hn2(TP2IMktHpr{rU2QO0+GV`Fjz{= zv?0O+qA>v!hs%K6H~~*5$Qf0|c%v0S*nm$~v$!a6hk(W$;y!U)RmtZto~UFAzk(=j zh{_OLRxjSQ#Q@vr4)rgAdw@cXfsW3nt<*Fc|YTbY{%@j-Np+0#}6@xO_ac zN)g8-$6z_kHeuD&+J-oiGK+wdI3|fX=P`-fdg9wW&d93Raz1>M6Co#Eb&?9H!$4AgMDDua&|= zzetw^Cz$TU&?04@`OJOx@jv^D9X(DUT4L^1m{Fu(3mlVxPKyCJTA7+l0qwI7#hv9A zKq3C(;X0Gzb5elKlJ6#z7gb{UBmu3_ z*GaPeq(Q4XXlmEC@N7NT>k}#!aEi0_V=4N_7bhIh1Rn$?{N-J0EV$;T$L-Uf{HVR~ zMOU**nc*OSq=ksy2~n;MA~iXA#%6K7NMq!b<3)1N&lL6l#?cdY*^6FaT~}TKFp;W; zMNmp(MK9}sL{STEro|Uh0mfO5IGO$?GIlDo*%xlT+ur$(_xhO67Mh%5YXB1k>3{tb zK*>SM4~ln7*g{h*JE>ceI0#)B+H2V@-{g;&N}S_!q=$~(XlE^Gffce(wKTqkftp?vE+N)ef87f?(ie^+}FJw%WNiBCQp$-jZix? zB%7Na9YD{7^9uy0$ukn&>BUu95t{Y_CSMWpUB7kS<>8C~kIn#P`n zHh4@pE@qy@VvT>&yNr&E+rZG24Gc^k{ks@CAZ6x%54|*1*n5dFfQ~6{v z%+zzfBGdrXN8%<;qHqZS-YiiS%9yohD6a^>H2_QsOO%1jQ3K0av;r4ZlQXVj?Gciv zk9v$T7-yN7v(1G2AtqjijIb$Ig|4RMe(wP7>k~Ft5n*A{o_zcWi$?&ckpQVUDAKe% z1MwctjBftDr4!$#M&3zn#U#Z)wY@?(l~6X1jZE6a(7cVF5daR`#TR$mYkufTyXMMm z9$})0PEJ;P2M->%K{5yTZ0WI!F1rA6Ub{7s;kR5FrR*7?h{-^ui(BKL+FR_Kuu1K1 z=$YK$0sybp+U<@zj@$2kRV`R3M}yllpaO0(C9aVnHvm?hqRwLH z6F|b~SesLbFWmjwc)z%9VQ|KsOgQF?p@egOn7T*hhP@gx8JN6&8WXx%ZSOnw`p(!{ z*QO3oKZxr5f7t5>-@@i!_`R^_dN`n9up&;0bN-?0=j9Y$vd{j@Ut&D7i7^bVVaTHx zkPJAdMObhvof)$66Q^u$Xu$F;SUa>Bf!)!k(n=cZdk#iZ>=|+`#`hAT> z+5}_rN8k7^yX)@9t+`_h21pX4i|n92Cf*!?7V6DdmN=1@Q$UxvE$W+<=e#+5A-VF* zx8S!wEWX^exQO^Wqy`avt=`0`$mj6swnci;l`KkpJU`uC%g?)LP=`di{zr2Pzj;@+ zxxey?y<^4lVitbYhr+_|q%TAle>*dybT$z%F_~jJI*rxN-1G!2K7K=-6Tzz@K13iG z4Lku+8emd|_0M0P$zK(d?@k`QjFJR4phRzLFEV*{*#_clg+T^5usX_OiKAO*QM68C zeQ<_Mu_sQP#8Pb3$FD5-#9i&$z1_BN+hkid5j6sb)6xuJ13=|4ESRHSkgz|W8ifI< z-^i|H@|uxI4(?BioGhK1M`VzOGg1d@^^dhVDk``xfN5erZ6^SjVL}AWGa(c(Jji46 zUBwBJn$Qse{?wqFF9b|s($3l-9Mj<^j_`WKp=(y8Z}Badh$Ci`^dTIdbDH%C&qEwov`PiL;ArVxZL*dZ6_!!dNbNw`I(L$J7Z7w z_W5AlysgK&yK!2k3@qkZ1R#23(v(%92OrcT0#Hq*xL<>w_swAN0WU~GML;auh4b&E zee{oRvoGFy0NooPjmN&Mq@?}xcaGkZcM_N+c)2iI>A+$C=ePt#O3HoI_nX5iH&k z>cx3~4ebFX#)<9-fE4x`CG)e>Eb!XxU;gPYtb1cKFGBPdMB6oJv?}62beE||ez3t| z8$8-;CDLQG!Sy#lV~+@|-`|5cm%fO6K~=BrBj<_?68SyvfuWEOLW zXkP|YaoZv{PK&5L_!BkDx#*adac1>*P`%Lx{pf~Ep`)53e0(r6Ar}~Ejj#!2bWL*;<8WG7@BvJDmR{1S z37EhUxBviFqCB{rlj7Xqv;<5=B64So2=F8xqaLAe0yr%Wct#5YXyD8!`Ke9<#tqid z-VLxI=0GjopYEx zBaO0zB0&NXMUX)T5k-c@uB(1|m!A6M zvq-i|)(SKs{YNf-p1@pq=;%N?J~%?|JwAiu?2$Dcn$sC)Y>e@0YQ$vN$QJg@l0>Im zcBCc=ZkRw@62loY{0*g2PujU_CjI5dzn8xBN}Im~5&{Um}rddK9qa z2-v7B4_IlF1zZg zwB@qPc%RKPYcQr0eP7xseanq3u>X_a|MT?j-~SU#D7x*nML1x_^Qu)CR(s@(&hnzn z{7gZMRtBahheyvDZWgz_8kbJi78uiOU`#L*2%-c`bJaB3$mqaP7!w*3+C~Xtqx~2Z z&BmQU33%b`6}JShCzQJpvszhaC@QFXJhdSawGmC%g2trw)ItzJd`N+mt+SyMnl*yE zVPb;%;z)!d!Z7HelTH1a-7rz9 z$FYpD$FtdpAgP9x2G(ptVJ==DqpWeQk zR7@-+X3QO@zTUM&;j?ChJ*ryJDm&U6s8>yvIYwKP2{ywt8!+RHv6cEyZtG7U`>XG! z>%Mg#8XlR*-OEZ-6^vU@vyY2rl^hP}!SW+tx3>Jc%(qr}X#n-|7k{;agc^H#WXSF%u$t%u(pE!fo6*?3Gl<(06aeQdt zc)I#oSEi4A=)IAIcy1Q!gMkl3iIVt%XAm+)DxzFj5pK9Cna7W%!}o1Vt(eqWTiVm- zzx~6s@%#(Z>!?*2*PYCQ!+e?x>Z(j?g&dVAN z>_U4qvw~wOkZChk6KO!xG{q7$g4};~>5>aKrHe0I8yZtvBTi_fVALJ78KMPQ4^n0y zs(pu!q&x54M^FSoD`=cX;>nN(j!p0(BL!otrrO=rLR4f63w68LC~$Myv}s*hw;_=c zVl;jK#$D;tgpJ>M*AYepN3`KGFZ`*h zIwKlhwsh}-Z#2)!<4T;x?|E`E4yFI{Yrm4NedB+}ER43JncA%|F!N3^(P3goKwHp9 zN@kV75dw$??zub7V#Vm~>`7m^?gweSzByg<>er;EPMq|-yQ%}h$bg*hUw2db<~MFk zpa1H&({Y&KJi^>eWlW{LGD&eR-_%o*Saehk?~2=6cB+PI#r=osLUGHo(rcOJPF{Dd zR$G(wXyuXlcPj4*2KiVr0uPEY)fn&UniuP+)Kvla>PHs8pKP8MAL2{t^5?w@#>C{q znCxsB1enHVJc=oA9L6-K=?j>1>kH|UeseXXGCm}=q~3MC01Xj#NKB@}kFCOqdh29| z$}S`z&3Q`8G9Zzn9xl-agJ~m5&*ZVzsg&))t8mi?pJ5#h>yh^DJ(hOwIslU*DJy1^ zS;}ofE8Bvv>CCfE#jny0LeU=EUzv*xI5OEv0wwY14+I^57mUy{X3>V4$S7I%9_^MP zyJo6{#>SJF?Jy5g@E}aNf12=b)=ZUKKvcM{Is8PJ3dZ{-Bm^gY8U{26L+d3EpajC8 z4vD*rLEP{|2|5)EL@W07Hl}mV=u4Meus)qm z=JKvq7R<7HZX@Gwe~p|oqr^DGj7Rv5U{V=0MgkW(Ky3qgtKmbWZK~$P=NjtY6 zN!wsp{e-X^#1lZlnyYr^X6vb^r_=GAwBP(y#p5ucf}z z*6OaTaTL!vk}T!crERxu11DzEAN=u0(hu+12H3KRC`>E&8E2+Qg&}*9{Vl2}=A_2B z=JQqb-a~mU;5g=BWvR6_@|Dl#SEQST2mP#-+c8`sm6zgefF&!XMp=awEWH?_71QYX z#5hwf?aS-%jcPouRa=cIT3VrHJ@Q%CEJ)b%+-P;$nE}KZ9Hi6uWoBSZCDsI$U<@|I z(Y|2IaaQ zlasoOt`2)08zwZ>zCDigzmM?iJ-ZKJCL5}7e0(fuY3m3dKjX};X~V{KLCmLFlca`Z z5>x9josy&G_ssEbZ@~o^WF5~n!kkL3T&K;NDrJiGXc}%VI1K?FK_eN#tU1D1OoFs5 zSE3xuXqeBKpJERb_N|!%sD@j`M6A{2ruXKtqfEWRv!0g9) z8VUEVdk&HZZwK0FCmF~))4~1yEXM7}|1`=x&c}Ej+&7et9Wk67&8fQw9|QT$7`x#S zG(LP=^K1?hq2e-0m}U^fAPDvV%^Id1T586fR{&J)3lgXrM5^{n|3U6B$#E=gfBdR3 z4oeZR>OH>Zr7iE=;yW2w@nH6(e_eJFM^0#}&UraGVUS$-s(u=M1md5IHBB#Oz)c$o zf&b{e?@Uj9#ufNok0I>Uk+*ROoJL#0V&Xj46MZS;h$(|%#R~FcN7K>myHg9edEfo} z(okTS zw?IC=SrIxd0%Mppe?$)q=Co%&3?R$GjZI#3LP4t@_GCv!Nf>ebNth-heOV{H;9NmemJ z2Qcl~1lsLbHDmY35ST7LZ)1AmV+jfGWt<3LD6{ZZah4hxd!5gNWYGDw z%DL8fdppO~Wv^t=c(NClzwq9M^>AWzM-E<}eh40c1>+8a8gXu_&W;jgc0m|M+l3>W#ZD z2|Kh|3EcL*8_ zVP=(DMf5tD^=SV%+tCwINLfv6HPY6JFOE?92BM=D*i}~M*!4Iuyq+MuG z>_mFU@pRocf0C}d?shgUl=xt7H5C;7ccaG8^kl>mDVY#`$eEIpUDn~m-PIkdeSUSN zS3mt=kg59p2l_qtxfVTNiy$ka37JKQj42pQr2{#$ajgBG_bb1ipDio@o}aD59Cr$9 zS}U|vh~F}%U;EWprC4;~vy@BPpx((5qgUH*(GqTYJC@E-=^6je3l}sYNl^W`{-^=%B}`&49<5cV5Cjb1Lm_Bj z7VW}8mf!+gVIJ@-)=2P0NP%&BhHTxeOByk%4u+O9rp%v&HlW5N14P3xTes%9=Fpgo z6l=gYg%7H!74sZ9x-cakK715&(^wi{FDNrbn18E@lqiiXz%?IUCxo{Tr+zQKn>Gl3 z1NSBHl35W($uHYFUD5z!5)olcU@8j)O;)K9ON{`A0}P>DBs?>T@Q=j)uc0*=chE@N z)RM}ybrMaozuz!$Oq5nd{7jkUD>9KFf}zuYM2<9CL0C$!l*=KY9(~MlaTb5 zZWcDO25GLOHiVyt=p-UF(hMQ{oBCSP6E8nIU4H4QjEQ0LEPRe+mJ$sa6XElO4o=TC zr~B_ciaGCo{0Nu;A8~d%WosJ@XB?9%8YxLy=cb4}9BoOr{%B|V@|S;{zINSq##60M zK?KR!cF_e*tg%)}$%NlBq>Q9yFyZj7?hr8_LgYM2-`O&rA=i{#zA-^ilZ)No{jT3lPrLGo z2#)N$j9J*l-kFI^EnHI`|5$*XY2#?q2q_9lWj4i}f+-z@kxXM|f5&@2oL=^_m!@aF z=vhRriaS~Tc^mv)i$Nn2LT}LI!MTZ@ds(|ToWA(Io6{eC{8MSJtvk&Sh%^UWyZ@Ms zNnw{!6O8ntJ2~h>Wa51Vm<jFxc7@^=zkwdtAJ=V;P=3veUMMu%#f5Go;a2T$09N>4s~`V2z}+1 z>{YHW{ytGw#t^c55!#Q!TaRRE08kUc=J0TTI{l2z={^7Jt?BgBHwQy11Ap_R_X-~{ z_#W9>!~+6oT+8PAA-8B>Agg_O9`mZ)g?SN^{*XR&M+{SZVc zZybHBMUY*uG=%`$!(7cAJe+0_I&ZvldwSC!egut)uyJ5N_f-kTq`!%e)l2w8-L@e-Y=b0Scx_0|ic^*CGe#lc!JHUPGd*a^>mYc- zMYjf9XkwTp2!V#NT7Yl#I#6c+HqA zx+!-pjH!ohHM(I`?#v5V)2NIw1?P<*1*(G~LCr}O24lm2Mb@F{gp6q%hBC@?q_+%T z(>RFHNF7bCvp^eZAryXqG)lX6?ZsELCaqb&0AxU$zb-T;nYxR1l|0Rlr-T|~g=w>; z+?nQ3c3eC=HAX;*ZD)CK9Tm6rvA&5-|C-r*sSG1>zbAIqzz0AJ$ZK})nZ4<0PdbM! zF51y3(VFT+?Tj(0>n+T8u&Z!=`qbZjD}5DB=;DjcO|Sjc7o;=JZUvQ(L&z{r>dc;V z4OutRK_d}(<@A}a+>yTewI8Q@?%IPj$7nuxE_Q2nGHMtq9E?ff1He%`VAxBTU&7qu za6RBdNb>W4|ERU+%R^hUFK}p#PpW8yq@1BV3_&FS~5pKgeG{Ga%i>_f9rsgd3=s@ zn0IHvQP(P!NvqWe>>q(KjSP&Z*T4DQ>CumQO#02={&j+&%!6lhp$Apnt2GYO_`F8& zX|Gj?KwFw2)5~o;_NUjs=YwgO?OA46?l1>q()gf2m@_7Qy+uc8Ohwe1#xP5TkQaoo z=$)bpe%@=w#J^*|m3erDKFO@7a4G z9Y=F=Lp#lBuECnbU&M+ifEY$V=X&uuZR+buo7a+;59Sto(x5%fW2;<%v6$H$yog1& z3usK_;KAnvV*>Y;&|J~)#%MC9Vfr=98ly2j8q6Ab(FnKJ_71YO472IpF0whSPwU88 zrzy{^EFv%g=HwRTnkkJIEX`t`tRuTJHo~E!Xp#7U)DS0TFzd0kMN3b8>LDp>2WGo+ zqrM&deGrCeBKRiA_I}ESwg`Z@?9vVF#pK!~%rK635T9n~5B{S=<@8s7@l{gI{5YL| z-r4E3uYEzf;F8|d&@w=~(3ghC{XatC+sPlxv(Prv=fcct^s zKR^BMTVJ0#$%W~WX@v1L{={0sti{>I%CIb8${!)}x66B&(q|Al@7aAQT?=D6G;aP% zBJ(umao>z9n(`!)k+7sveDw&|VlvY26XcFJ^7^yZIXu6RLjn3BJ7{wa@OsFq$O~GG zf?UoB0|@iMiywvl+OvIJ} zTI1|B~KkXC%9GWM1*#mM1C1*qyE!c%GBD4YGjO!~UJNgR24>S8z9 zb{FS@2-DcUO(5wu=STZTv93+R(6DJkNLxt~+1lEMgx3<2VJ3*fEGRX6T&Gw)K@j8G zzp1+grqPJ`tDU}_24gyP4H+VMM$Ki82j)Nq_9G`_I0%P%Lz+7Wvz=jK>^yZhL*$Km zja_J|dzN?bkfl+=#>e;|P=X-#j=+Ta76iTVbobr&r*#`QpfRn*lm`M;K8T!*2n>dI zT)f*Lv7crSBusgu{U*wtLMucv#6$u+PT*ncUfY^_H?ZY=4{fa*hAF{7G0{zqz{D7* zv(D&E&%El~v~m4HYHz}3=$wdGMF*}P}8LTYGU{4X;Nz)Af?wk-ut2SgWK;*J!?0zEe*^Pi-(=D4F**; zz-$*=>*Qn;v~tWUSEA|)NM>_l{n^d@cg>goop01KN!#vPn&?B`2Sa4)xUT|aCrP6bL`|5>OvI1CV&RD2{$Edhcc4-EJ!#^3*7$3i@ zDH#ai;2D76`({5dblSzTb9};$N@FL8tQ#bazU^H@B;A>(Z4IUrX@y`avoIx-jJkei z3~gkX0EhnL1LT)uvoa*MX8buW`m5(NiwSWEzm@l#oI04yBuv0ayk;0l3xu`{26e`o zzO;q9nnBW8qLN5~mu8U&mDCoH9_6&I6_Q!bqA|&sng~;GFyjGrn2|*krIs{?<}(3v z<1M-g!Wz>Jf(+8Hd+*yp0?IXM9cz+gN@aJpHLEjik~O;tqR-ds2bv-ze-Q1fpY%us zhBV_xo7RlNzB#1nOI>T*VNBg@J70$;HOTfClr`T>Mi|2I#|JS}meP}-e0qA!Wt-Aj zry)#W^PfRDs7JD#o$Deq`0?~-AN@+Y`;NnCM{R`D_dkyMocQ# zqUo~ssiCzb%wOILLS!uUopVyvnx9v1nkiGTI`CtFEh3jvoRb9HGa`EH*?9*+F|{2ZCtD?=m; z$^K^-N=#BSKH52=>88+-+}7g5?7?*Dqb>q>SkeJ(%gAG_D#JBPP>pk2&6w0y+%`QZ zy3GJN?!RweddpjXA2@4CfAIddruCb9z+rCk3C6@@wnbcuDwNYCFc4M8M(ijPkTJuK z*=8`AV;`ua{bT8mKk|w6xvzgO7!%@Vz@RimR8@#FD@8uG)#rRXthV~FTblAb^1nG_ zindg)#smMs{?x)H`<>^o`^7cc$;{v?tJBfUM)bsWF&2mdR>SO{J-GDm;@~g6y!=<` z`EPjcg7)E9i(|PQS@i%d^#-uxY9Ne&K!)LT{OEB2WPzM_Fbkl?5b_pGZ61@D#RQP{ z_I6VE;5FrIYow9`pF>>o^S%NW{NK}o-?UxBmjnqDe(25Qa zLMj8Y6oJ|;nH$kEX4Tg43&p}#Xy-hUgoe8|)1G$f?}bsym^KrX(8xfzP530HxEV}y zb4Y3Ps6JR4v8khRQSLn2fsCnvEg%|Tj&(9Cz*!^5B!gg@78#o+ta-3jGuJfdL6B+N zj$MR&cccxcYz(GE@?{nw(_9c-#$;G*i3>*L&9ddqc)!`hFevWTQt(kt?PiNj4{c?7U#fgzP5}p8N6fy<^=+5G|6jDH(2U#|LNb-2S4`rY=f~Dp|dUE zO#D?i&_5St+3E^`%g3tP7}MsB>th|08;=EJ z(tm?=P6`Sl;zX{xNZ#tAS{K(24H1P0vyd^h;oC9)oQrLz^?8W|>}ZSH#ePc!KOiCY zq5hwawzQtza^2G<7X8w{X%KvtFz9(hppqLVt(GCep5b`(sVj+Kp?G{9vA~GEjz6+o$_5$(Z0)GN!2!%y`Ep z)5H*raLNU}Fe}=Hp%Ek`(?eHl*y>FMWElF9L*&;RHT>Q+D5R8~A4MP-Pmd+B>(hVf z;`FeyH>Ez3^ z;)qlBJ^O0gy^}prk9MKySq60A(7yD`S3Nns?|ttect!$&5a~FHSPt$#kd7Ta7UA4Igh#isBV`{9z;#OFXg2+9!!V|X17UAQ zYifphwYfnMjTyr{CzCG)iXn&4*P`qO?G2$E2w@kz{?1ZT{Hv_nK2PSWpVLW65WgPA~68a%=_0;K;bu@$?a*)h!$L$tnZ3wLek zFg|v&9b(PXn2ul_(MSd?hlI%Fm?ir?vP8hgg8TecOR_`yKo6*Re=; zhR8nVOk|#&BXe|lG@X5RZ+h0%k4jH?!r5u_7Bg~?*KbZg75=A%PE2@H>F@sjdqn8n z&Q1rCGebRSqwV((kd~YBf-wakdSmHP%lxqPsR#DUdTSB*la~;qdh$n>-Id7Y#I5@5 z^;-7)2g{ghmA9-(Cw?MO)m6qjACLTe;-Sh|F=!bf!P<;ag9U@>c=5U$?zrgtLH>`ov4)Ayt^&e;+H zQ2-S)z znw+ucXPckTncu4qs}Aj+cKBUdl%F%e4(P6a>g&tDmtOhKR}fmqenW;_11Q-cX!4;hiBVID zH9IT{-M4oiL~A5<_rY991!UBm0IOuCuSVu{oP?``Fq8#Cx!XEANf*S9vxsP8qh!_w zV0%a<)Y}77V=D$z68XMqj|{%3i$Dc?gsjH<6D+LVjn=fo1$e0X`kPAB<%d7Pydtmx zlUY4JpdPfh^@NG{qBU(GGOrJx7;{y8vz^wb;Z03Csn2B88e~3)-DypoFG# zHhfGl>`|P@W4u3%mNPmDhEczZd`t9e5=~9UR7W&kwxtCbyfLvgHPRonCX9KhPO~Ko z21i*dG;o-RJT#{=6F#T!jrfBkpfk+zc~v((szfQayj zU^mPCpovwcpnRS^*P0_5UtvsHFRS0lhO!1A)AN7#gMQ^h`0vpcXWsQ}@&cC?DBxra ziZKg~8d?*~$<|e137Qw{D=Tp7o$4bW|LSW+-%Bq;W0EmNyn4($k2%rQ?av5wm~DpP z4B*E&6yeZou~B!E+1t8jwGqQGd`|1t7-8XuF~5zlc1fb!h33+M<|J~+gxtEq{eik+ zQ0-`3h(KsQM*7JV2%11!5gE)&XI6v$%37g=Y=1F>R#S(zq{d`OxAw|x+Xz?=k$mgO z`fv)GTn7#7g<$tlz9Hn36Z%ycFgwM^HBd7koy<>sH{q+{wPrhS;Nuhw5z0EqK1!p( zn6Q6S{s^W%lS?WQOrxzCF(iR@%L)mAix@jQTVTAM^o_-VTr6$fG{F!C_cp_1 zWv-eyTySg#bR!{k5^Ic6Vy3qY{WzNLbUNeozVw(!pOYT{_)F477jMO!X%>lk_G8+c z{_!7gNH^WQlZ+kAEA{J3%Z~uk3YOGfu$Vvn?|VKO@YlNM^=e|Df0y88NqJZHDQ95I z?^sqX^Pksi-OKA-US2RJ5I?_PefGO}m>t=d#ox5Uq{7o|rQV33^o1|{Lu`E_W0J_o zn1VUwc+b@^g9_9@4muYXoOy5uJhfbxiPQnhWb#UHd)ptR8-MVV^ideo1&=&0%@K$* z!=m*jU@|hih;IV4nhvySN5p!ap((QI|29;lccH zBCSul-%uSsmKj6WJHl?a$Js+^62@j2!xW5Y zg#JdTIlpbEJ}S+hPKy0<1YOYEZ6*l%Y(BvS60X*h;8Ny2MtJ$~@e$)R(0E+K1zGSz z^}%R^+0qn{ld52TgxUvFg0;vL858ph&ZEK2;Xl$4*epXAsRE%SUJ_XP$~EUjk%1}V zKDyf+i8?VcZr;?JE@1)g`R8v<=U+gWJAJtMN84hd?atkUXlO)SsxnFxsKbwa^W*nu zGrx)s$N8Z;ME+Xg=V~1fVO5OT>ibhuN05-3nM4Ca>o&oo?hA8ZXf6z$^#$*kPwFI+J`Kkv$-JDbbRR%PN z!1m+2cc$OKoHvYhq8?w9!j>9S2ma+b@NiWJRcf3pI<(MunDs&vbZ(l>=jcRhPrR-0 z7|+|6cp#q(x)9YA)s^AriBe8{%U&sZ=fpGre%~BCXfg0YVo~;xYf61H6ojF09uttRvcWbX)TE1i~)_;iWw6{2Xmqz1)7$rcA{_&6V7=g zwfDBA4frr$uVV`B7H04Ri=PHDH0dC%kS?!#y zr=+!*@P-c^N*ye0UC)Mu^)Owd_d*h+Edrfuu*!s3*P|(@DJg_0JCG(xe1$$NyqIpTxNps zSjV&xtxtcMHgn&zzeXFX1%fP!0%Q zVmw{xuTLU&C06@#s_DmUCH0Kah~4a!)s1j)(FLbE+iBOHqiOq&qcDGEXtYKR^CGTl zaEy+GjE39Ms9nOf@~A~zWA;zL-O3khT(P#IBjoKGH)}=y)Y5P)zZQWPk!m4Rt?MT% z&w6UrRn$+fMUWiQT+^Q!;4=x`2lm&YO_V%!|M=w05$%C)=<$D!!(y$VMw zy8Ym;?AC`Vq06vhK0Lzc0Ci9M3i5eV1N=wRyE*qt_? zwJFu%;2&lS_A+aNOuDM{kJ3l@GRa$ENJBh7O0<{>ShW#{bi#m{2d@W)(TkrdwtaBR zhmdKI$e3Qh0zwT^fdzF)X0C0LK;Ms_=`aj$Y+TQe9GSux2@}D#kYBEcB&z40Mv_w` zuYKFMvwmrwAd8*|i=Kt?MW94zB=(Cy2PLHVN3E1)G@uKDM*+$s*s=zQx*O-Y2>^5k zjmk*5-nB40GBEV^biimFr;O>15uG=PZG3!!oO<|qW{F}fv%aIA&~_tXHKWK3lsaQ! zFU(90OYKUCcFbb}Xor$p2@d2&fs^=-#%YIvA2KZg*a%0XAX{PZZ6?BnNh%eqHJR$E z#NJUd6D~QEeDFcxL-R93h_E63?g%_ZJLg%C(ugLjIzGgLUy@AFPt2|*Bx2!9NV9=K z#Bv4&5rd&ed-=QcP=dEFMTdPWx?cO5Nb58g9l2Dy;vO$$jH$+pD_+Ph*wWcEs3$OPZ4h*p2!($oBB7uX{D{hq(>L zRA!ys3C3i}e0)ji$6w)Ja{j#o&iJ7$@wwi7maV<%?>_!%nBAYJKl-D$rYAk+@>rAP zPSY*=gs@-)EKnY;t-+YonDiZC7Eo)_VuHq0?pmMz;Ez6<{_>OmOb*I*;F6h2XaV@x zAg5i$5nT!xUinhSSiGLiqi_@B01O-TTXZ=7LCY$0pThtpFgF<{XC+#!S7`zNYt3!E z?*k9fQQd~0zet5&vcHcUNw50P&ra98_$gRZ24XqSwtEkzzxnu=)9#)9B+yA0&5KRXsbj_NRULu8xxiDBHOe%>@K#Bi~m~4=N^_ z(kW~E5b4I!{vA7b!<}Ra7svE9i@7i|qeFp%RDv^kPj!e}}sa2lRc&##Py(4340^Wka$MBv&bXpPrtLx6bgU{g8d}z`0 z>D}-C-E_rMA5WkRegYQP<{Fclw)5)|yw9;iyHPOAG$$*}7!znvA=qREZ|zH8{=zrX zJKps^_I7$hdg{|2hwqeB$5xGqq?G{> zO5?GRjme5#V95Y5em{|0gG?Ou>Grj3@eOmTYr`oIfX_i}8|9R0Irt^a4nBwJX=>kK z8e!u)GjKi)VjP9A~EnADS5aH<9 z`N2ZTy=aUBmN8*{e zC7IlK|AGFA-dVMuWrx$M&ng%a^JS2WRt?jkNz;msjP9S%j4~!*EtLv{%jF;L2E!;Q z&^I4S&J?NT^0pMzx4KL2C~+KL^v3i%*S?aJX~02=yr;NS{^k>3O%K25iRr$(`qS6G zd=ulSDb#)46bx~D2uBQ7d=Mt$Oy%Vo44Oe$G7P0x5c~k0f_P(s0+8z=aDL<2Rk;xxf5 z%rGHiYN%%$4NOvX+KLGb5Q&L92q04ov^)AYPATt4|qYnq`^ANV@E@OVV3Ow7O384w z=V^ALJ-zCc|0R9vo42OxufGuizJ)QdUB(?iI~bdA3svwg>aK1;z(b5AN1q3+&GHuL?1l-7XL z@7#3dhIHUspl118kM?w@5$cP(g8>(+FoP5SXRcd_kx1CcO|APK%S7+RUWc?6;} z|I+!4oJwGnPy~t?Va~4D)_g!E92pqPb8zxy6NW&1-Y-Qv|O0DJyON>n`IP&PIf?L(JY{~76C@i^D`9URl7mIG! zxJK6)kAj;N7lgXtRyNj!H)>uK7i;Aa%QVrIuR78d5=oU=YnC&y_+52RQY`Ka3?55Q zzT)!q&Uasn|8pp{b^(8wbR+yb`V4&NPYR!sj7hVaNFi&>@K9mLF%>WCv0iBUp2OUG z=Uv;E%wipP2&@8ZZngf^3==OT=V?2 z{eB`;zj8fh`dPNlS&N29jR=_SLmuv<(&rRzif~W?cXVPfJ?8OerQdwR)wnaDyCnk9CR);0 zK7V`q^N)Nzop;V7(hIJ6Mf%LY{%gAL{`*KWfL03+C_`7WGJyx`)%@vrMSvD_Tt>jr z$2~>lrTf3>&Y5FLOBSd`J74+Vu3lK^K&r-UGz+DhMe~U@FuXL1KWFeze`*;gOEK!1 zoAK2X^w7|S#Lejsa_N1IMN}=#klF9ol|2WW4DMV(IOs^F}bCM z{-oEu=4I(6uY5k{7E*hG{BDC0%Qu+Eh?di2o5Kkv-;8O6&Xs)+#*`5lmutW^D8C`y zbMKDyhBv=G{nyw1r*zfxu8MGR+n|L_ zqct5qHjIU&1CtvT8}F^=kN^b3@TCkdDzi;p2)>}ltN|gjV`(eV#*caICFzVax1>Sb z0pIxA_sF8Y5B%(f`Lx2QNc4<}PwR)#XZe~h*L4{Xgt$o^*0=Hq4S(fjEvG@a3vPuV zP;@=UNrt|fgL)cj;8{fPR^Vz$kpNylIXW+W?CI&Z-gGrC6QZLQuz)S}rW?MsGkx@f zU&U3kfY5$%I{RT~rQ7ek9UuE%<{Zrl<`nQ~x%LrSAIe5(Plo?HR=7!sOm=5X&5OG3QDbAaOV!pVfFPARz77q8_`Dmlqpph zlUqXfbe3RBoqX7Sy&DF~#DYSM83dn!3+6=I+)ld8mJubQ)5ck=vG>4Gy8E6zWD(zk zi7=%zPv1Z~piOD*+Ga96xNZrlkaj8!C;_;^UL#w=8%!}wlGpoojijA>hKVQ~Bywej zz>ijZY8_~BEHuV8*G_Id8B-H&$Mr>K3^g+ROhg(s!jMq1;=4>qByK`trS* zkdI4ojeCs0lb$g!nUgcbC!V9TLP~r%F(orF9p}VI^y#UFv}gB3y77kH>4!J%OZ)e; zu{@g!I(1kRlb%Q?E)C+V$k{hf5)1*d~!Gf3y7 zXg$6K3lJB96@|E9Ofn~eC5jl6V-Q*n^r1w6Qwid;clW;Z#y9`Z^s<*+ot|^e)oG64 zhg>tzd?xXcF=cIB*0Bl*ii3Wz0aoCX=a8T^-H($Q<%1vnWcuvqzn(U3+JfJc%qLnw zR&xa4K^J0zs~MAHD6y1gAk9M~{G5Nmndt>D{AC14qI>rpOgDb-&UDMo+c0}KLYUWv zcH`R1MeWEjguaD!A~dsN#*~lC@}|2bPA$Q#);o4adb6s7{u>~bjl1!obL} zbn&CMq~E^wxwsaHDPY%w&X$eor?(tUfB2ryuoMA5JBa|!IOoi?am)I2_q}(f@nI7d zFz;A~va%}zN6dla1>FiJsilU*0mK!kb;{f$7C`vE@`X=aXd(!qh15ZApx40Kp+-{C z%(C_O0O8q#hmWOB*t!;18Bj5J#d)lR59mP}T(lUuNHjy5vR_^-mWM{oqb zL`-KgCX9({E3~s7#AMoNM-^j=mUAg*Oq`c$Oy2XLhfY*H@7b>a1L5Nm5`4bFg!b(j z$Jewk-E`v~)+`JKvs%EEs;|)0NGFyTWJNrXzrbi#b-}|qF^kiWp@~Dh?4zU&0)K16qohsXB8IjXAqF}zw({;vN6d9MPffZ zbSzzR$@%H+Z+{cQ9+nP-9hVbGpUhU0z3@fP!6N6f25}072b0Gn0#CL%Wn5bFC+dz4h6NAZnzQ6LQ`A*>R%I8mZC7?p- zyXfG0@f{+vF>{{%u)g&AYoDJsY=k~e9ZS6(8`FW^v*|tW`gGc}iw#Jdx`~=TlFq{q z4}VGaK1S{U06+jqL_t&o$41a4py(ArID_|Wykgl4waK*1CZYRf<3|v@waJ|M30A94^&}HO7F91VcMs88zO+u39hN{#cdRP)!>Uc)MU+i zT`;F^w1+MN7Fy6qW?^~*!bY08}1THQ{Bv8ynm2DH;AJ~F;0!ro);6fF%P6CwDQ{c`-h>Ms#jK+iQM zzkAPTmVaQ{q&%9!%&0kXE__EV>F~j6G^SnYJKx!kBN!l?ZU#wZrbx1=PV+F$)l%e> zF(w*Tc-ZP%j48B>Y#<8XUCl*Dg{~JerYI-FtR+R-u8g=Yx%@MK=z|eHi;h~?oj2Ra zi!qan=Sm^*9!198$}?&LgT(W`;uY7VU;EWx3Ea?RRdViN1j2e~OTvm8wjWEcm|Tp$ zRBQ|;rPv69x{j!XMZjp!I^ftnyZ-o3K9bHjV`F;Bi=P{MTUbn{*MbBrl$1zH(f(i zCswryk}S0J;J0s2fBT8=q%VB#2Uxe(k~@?@F$mnc^=kkxgc8i2xw+Hfah$Rbv&~$O z=V{8Ki zeU@mvQA}nN>@c|zX=06LCnho$P+>M(z|T|ndT1QR#0G&( zBDOIGhDK?7MB>Uq8#0C@b5fm+gsd>9I-D;}XhWR@DD-wVrB1R;C@l>GtVX)I7N-G; ztea%1!I;XGz=z7>QW4aD=q6!)kO|cL4BI{Q^);n4&*)1RpSy;;s*w8BkRoAD0#*nw zw8hB3X*9eNKBuyKK+*1lN6YE^H*QZi-+UKb9sd%c=j%80F>&264>cu1WrVbS2Ly@@I)Gs)Q{g049IexIUtpA^*1%7y}L%!_iwl#Q{J9*=pYg$ zJKoNaNVXoL6t00_OuW8|BO~O+FNkPGNA#!|&*`GQe`pTjH8iGz;&Oy4xD}UH+lJ6{ zYjn7lNmJZ%-1FCLlSh7z?~aE5^LsEA!jsbfc=Kb(j0-cE63mfi_LVt=wtBLGf8vvWk3dyUFMjc}L*SaioEMAkSM68aw&f!EBge4 z;(7okni$L}!s4MnQ_~~q5szMg}?v&>2IcQf9rPm9L%cJ#x;a^ z#+%v67&vsW))D@w!ke@y#=h`o;l)Uz{2)dlf>H`ftPSJP15Cf|o%89p-*8QO_@(P% zK!>o}5Ej1Boo@dA-t@r_d?qxV9x`+cvAekRvlUl{^Q#c+n$+Oi)GN%Xo$Al)#M1Q0 zN1c-%efb5rM99_oqXX&In-8#YQiJYqc*V_jm6%m~2)$Jvs>bwZ^m&+G>n z_0T|!b2p_Bt)(+gt`eM4I!rSJ80#Mr3!CQ=+|bQOq!>lLA0ER_&gya_W4bZ*$(S0^ zI_h99;fKNW=SXDmmLZ<$j><5Mxw%mY{ygE=jp+%Oo|-y{Dr_}mTmO`rPDB{GKn&^4 zP(VYlWvb!zCnBhMBM#rA>3cWaNkr0KOp2suA>C3hJ~{`wqn-6s1~U-hXl98<-k*mF zP0-gF7?b(&+FIBS1f&J+kTmcm!I+xSm}E??XiT;bVUxaV%hySrnen`4Sa~aZXDNM2 zD9SK+nV<8vfN#&=5tP8NdQF0pX;3jUj5TKmrXmSj4y_Evr0H*pZ9V#r&7?bSJCJVr z!OnEg-A6G$S{NoxhR7%z-{vK7+rlab<>bc-d->wMDn?TEYSq=eh$ZKEwDRY~_n6{% zZn3DUn~UjR3f@~%c-4CEs(uu|!UNfj z#oOXc0$`vEJ$-RHFzm5v41h_nd|O2M?sd zfx(z49eDy=Gm?=Z*{1K=!rG-R-EFB8Q;v&n4R@X)%k=~{>oRL?pumxu$W_^z^=6o$ z#2T5AnH(6L!0yahVr+MRZo z$2{80G^T?|9HKs}X{!^aq>o1bPG~(amO9=Msl+Qy6_Lk}Hp#S%+S8_4SDHu{o!ymA z+1SaZg?&hMj3Z_awWcx%k~ud4Vj&JNITnY zw8FSMJ25?C5@;hzq8U!ohDpLLF*H${mDvS?ZAf2cjVM7JjL2(fgfTS}Do!|iGrKoa=2F=naJI4yVW2^!z z1Nqdp>Zw~zEQk$*WQ61xn&`wB>NWc??Ye&;-TZ?c>8`sDr6Y%phy(@*qYq{X2E{)O z$r&_f5G-<4Kz{jppd1lwhJ@TBlhEQ@kcuFLacAit1(sMi%zM5nIn4eejS+qtRW4qS zyHU1C>#soU#ru`A^9C)gilNSgnPr!Q2-XVQokntVL&>6}Hc@d-ir>i@%<}T+Le`h% zH~6i2EtViqrv4rCy?NZf{oZu#Z@nhH;$_$H3_%DW$~-1*QxpkTOAl%YI1#rhcLbgg zHYK)R;~sU1%f^s4VF~d3m9JliX?uVAPyhK9X=<3wK!LFkn!xFpA8@?r@EH~NYk z;CJ9H^MDUF_Gg0L%&+orkv(!ECNFXqURVyt>pVTY~rYpN9bP)b9m>&Mf z)6>(R@fhf32Px8K(-*(^?R5LC`w_Cq+*3J9kA2p27LJkRahNp-nd1@_9m&1{0_wU7 zS8{Jb8`wf9cP|lT!m85pIF%0)lrV;-Vl-U`J|Uy&)R^3quWq54Y}%yvA*yd2Er-Yk zV8af{h$ODr1U4Yand?nvQ-(1$sa;^oBSwae{m5z{VP;{U0TgDno`JC;>qO^UF`YFv zU;-nYt)Ax^&@RfDnap7)p~;brFVrsP%;xwlQs7-vNX3d3W_R zk_hku_|@nlXCDk&zYwL<_BOIQETkW?O~emx*_HN^f@oI18C+gz%SFP?oV)RpxhY~7 zYqRtvWorn~hJfD|gFNsZxoEb5J~TGNXyGq@VrzE#-)M4No>kHfUzCiAaWD(J<~mnJ zFAxngPbP2|*-wtM)qDRerVn=3HKh`P7Unf`8xfJBfi`vG7c)&$54rw2I$J|4Gn{yc zV3MPxRNB9HByGF*Fq+cAbo?lR9N5-_DTRup8t$C=Q4$gnmmUszM#Pq0_gC&m^m`yF zbi$eRsFkKfgqf`nA)kwXi`Qhi^PEn*7NX|>xG^x8V9FE7J3R3B3a z7l=>QmlnTgfdY#^7r&T=qn38&rk6!W=)MX`1&8CHKPSd27?W0wSu7QY(ntUI|4fg# z{E=x6lUg|XVNBwh%Q0jEFBPK{I8nAUE9j zLsEt9NH2TUOIF30Vyr3C_QzaOq-~6~T!dsV=tpKwCNU2Uu)o_!|LkMw#@p^o4ZWMe z3r8etb_PNDuW0$QKbcAC#Kr85fF}DCov#c)J`fPOGW2bFb|5|OiD#u3zVz`3#79Gj zZmjQ4+wSg9|MaOF($4MuY5m$$81GCdaJ*`n>}sXzicswWT(Z3dqs1PcV_=B9Q`$dl>F@m=Xm6asVZt%K$^H+A95z9W74i$7qeZs{DUr#ujnN{_no z>V=Wv0SMV3g^(bXwo8Ov0OjN?knO+VU^ z4jdk(Y}0Y*6s8R@lg^IT*c7n`4a$)A78s2wgh+D-!)i)vdYWUsQay~RUh^H-4R$E2 zP0@D!Z4yK!To>GXRwLDBZ*a;K1JqBb<{`ql``NC26lQtv-G_rQxfj+5i=$mE9ohrs zs4cad;;6Ti)b-g0j;(~-yDf*&Qv?0u1U-zRC5^_;xic)FEVF(oGX;tTIiW(!Vh(f0 z6!<~;9{$xl65q2M(FcD!2Qd$P4`T1r@r-?=g9V3ZBs=7V{L#gyvUg+}UaAxncQc`4 z@j>oYns?&T@^4h3akaXX<&PBo)RstYuXy1q8B?Y1Xu0e`qRyF*&@h3YL38*xjAeF|x$y)W-l(c+^@7u%rpg-VAHOvTN}Z`Vhib z!97QEu}f9=g^yYZaBB-+hQE*jh7j>*<8Qm>Wsgl$3+&0)Ku$!=xsB!C^!;z`Ngw~S zud@866WZ1umLb=b>aLg}=xZ7cJJxt%4yOBY@7%L@~u!*Hx2fZtet2sebnbwy67YU-x$mUQkZo6^>`n~>5;y#vr3 znix$-Cr8sTYmCa++nQM<>%!4ivV1qPFvpd@Q>@z=7#N`u_@&%RTvZ&2D?*-ya@S31 z_eKTQvERY0w)4aQQvNVAHicy8UO~1eLRzH+fQ1QI>r12_BA7uY(v06m#?%C3GR0Lr z%*RweIjBX{U=R@inCl{132mYkO{SL|belJIq*KYXuz|Hnt(ejXoCatyS!#>tJ%K-@ zG@Zt_g*8r7v~=6<;q>+E?nt-Xx(kaR1B<4qZCs6x;X@*T8EA5Z0rWD4JuI4C*V`QR zZCqz0pBfmzjC3YL%67s9vSq3abRa577i**-PLUNI!cdxTLZg{U_uhFl-SyK$=>VDz zDFU(eqHSu*L-P!cl#XU=EEoy;9L$Y&HBgtyVG(XHA!-BGOiE^SQjGutR8Jq6KXYF3 z6yNzvn{1b3XPQ~A@|_Pe;yG@J2!UxCGsv&|%;Np{8dtLS_??Fs`&jiMxml`tI4;=} zwQTG`*m6df13;xv=dhOK>ev**WQ`#@kEDGpVeIN;X$$RB!*uC|j9YBapfPLyiN6YW&P6paxUnu&wcg>;3IzHhF*k}%v!U&B!92) zy;dFy_zIub%C~CGx$qeHxbl_2$L!Ctn{>05zt86j$Bwa?C)#;(Za7_b`8nxTuf38C zLF6i}BU1(vSwp!y?Z1B^efa&KOGgh(;qzaEOQDjx6Ph^>&6jqLkFzMZX_Dx>bJA7M zd<3*Eb2r>bfYq1&?MGNENQzB>ncE-bN9lqmTm@qqg0PXb+m+%uV}eNtsA>)y@SQyB z+;h^or=AJ%n;>P+p|lTnH^{vS2)*fMN<8nj3oS6HCK!{8YUkjVQ^fphmv$?NSeOUF zldP4k$BYhqS|HwCFtlFs&NYHu``Jd~2zfvzDASGm0)Rt21(B$%p22ED(r+R2f=tM4 z4QeUjiy@-TAck0*#N$#G=RG8LO>`1h{xy9lr7fE^^GWPkpQY)M_Bwv?0g1G`hvr1{6%ItaIf6e-lsn6QNeiStI(%R{ z{rKj6M6&Ep2ho~lG3k{dDzXw8Nybd_?^*k^@J+Qq$A@unI2jmen{#dUi+JA|V*U(v zkl>k!Gl)84sif9STV=l@PWG`LhgzQyg;uWn?BX*#pC7rj=Q|#*k19NDla1r>9N#Lx z)eiC~#q0itGbF&}(y!`PGPrv!D*RdXPB*w`=1KV`Vk8VXEB3I;@r!(#lY#EdN z(_hBNz$7kMC@06{gvO*l%gs9tU}8-q@J+tFpWd0aZAW8z`8BI%OmvFq*DTm*5{5Jd z&MOET0M(9!=^kq{W8*{zlf9aD{=uLAefrK_`>-^ufrz<}Qsz_31)HLIa_k}~D4-`G zh<4*Moqa3%M)|H-r z&65aVag*2SbewD+pZk}a(>-?`OKZB$2wLQFhWsc!?D1DEjAP21948$<9Ba{p1W*KH z0s_}#j(YsVFG&}kbzZFDxog{ww0Drbk(k&S04C%W`lYGK>;O7IA+InWfu3x^oaapj z%6BtMlmKwIR3^j$2D6ZQFnet#{@txB=E%vm4V&^_9H}E9$ecZk$;0d<2LzbwA?Pjj z)2Xe21$8vxl$%n^YOpTKe5&d1vBx3-^(^ddmAf=o9WP%w*i665i zyhArI-%S!g*piMOnoB>v>0rA3$A{9fBQU~*#9MY=SKTk zzRYG8lU>jt?kh#BxoW$cmgA2lhy7+$GNvSB3Ovlt`Xllq)2h6lA!@!AdN8bbFRMXDwp0VlN>FHV zak=N!93_aOzQd(Q_B$t*OYbqKm2q5t-Eqhmc0nV&MEhzTVZvjsBPVXI%Z{ z^z2t(Np#sb=5DPY*_i*^7rqKCcBe;Q_9$Q!_+l<&!YYiZj7Aj07%#|}GWcPVfD759 z0w#!7@iERL+7;% z)BL*bZV`YSStIGdE|RLE?fpGY@_+o+kJH8t8-g*pZl;+4qlluVvSQ*-Ko;%DX2rji z4>*wHMb@I~zqFrmbTOz)JY0Bt&UstX>;CI=iOgk>x%mUoIDNkD?ABdQpZc5cq+Q!_ zX@Kjw(B)`*Wc514`TXTqvih>j8`-`5iR?}`U-|dyvkVn9Ej)63%Wvkj$%7jf+mPR_ zoVl0dPW2t*1?mz-Ang9ublo@Z3T>%0-^(1KB}3Oe z>{{uxOP@eOU^b5%8v$uCU*$tdC!$k8=^7%=9tR+wvgI^Z<{n5pcJC*XHmjnU@e+hc zlLm@4#0zZ&X5ggNA>l`?0T~;}CzR$fK`EUZomPfGx^rbSM86Ye)W>${y?wnzskwe? zGVMHYFdfF^FwOJO;s89Qy=X9l)qs{H6KW2Bk~k}2REiKdWlEfBk;q}ROq~o$LMEttn=t5(BTwJhNo;Ix{)n6%A{*=ff>49 z$rLd&v}MNegsG@<3q)#)B8dG96BNV=B{l5Y`$!vf8)-pq`N2W_Od>UAMEc+$1rqo^ zW9JdtkYA`H=9e+>pHH~4BSRI>y5)&>XcvK+z9a$yc+KBUK$+VaxCX0>%FH_@+E(`au)fRI>x`^Am@6nxh*Zc`hA0U)W9f} zHd?0|Mb=*n&KRBFa%A-F2BJ=)jgAptG7U02$HG#_(+_hHut?inHX*ZDaIrq;N-e~X zS?8l13BDRH=V*gdq_32E46c#bXFC^TRwZv(2qFk5&W&xi?@A@+x66k!Cvu)mYU-Fw zR2H^Mqz48E)0I~}F+JtQPe3Rn8p^P3g${6a`+eKlk#qn%|E19`89{&lAX?9Mw5T%) zj#*DVFdq?&Hcn5#*l3r2o;u11ts3SEQZecE=^sD!PpmIIo?iW$mjVN7QY^dR9DGzn zVC=`(a@`zr^)2MrL<0&~W=wYM*bxkAhG*JH)76J2)Wo$mi2>Tl~)CQdJyYi+@fuN6?L$3}4}^9)TTy z3PDk21mPz=?Sgd8%dfzFaR6dlPk;#tUCVv!|2CZd;gerOyCXuEoO-5Pi-=}M((i7R zsup3^6p>9lF{sN#&I@x02bCRQV}5C`XJu7!N^%wL^cr`Zb4E1;L+6M8@jRE}OCBtw za^Qs-{NldTL>n}ZTHf+QD_J&PmhasR()>P+=7W(xm0PCMi+|;cbkXBCrG10jQ)@5G z2eVyQ!sHN(z4)y%;ScGcgZ1}s2 znj82%ZBPZoz?ZzQmSj>|qZ0iT!H{T`hpq<_Fy6M~WJgJYA4EB0m=ZS~-)@{JVJg;2 z0CN0n){zq^8H8Y=4<@n~N2M83+g95pvruIU;$_?7!zEwja6GFu!x&#^w=3|4FE!~d z!e6@}uq~wTSi1(F1NDY~uyT%ez=%RhraXOtBji&Y7{m{2CVwK)WK>!Eg>3{YU?H|e z0bcWVRG*y(HR~xDg5x41>&Ke$*e70^E`9nX5EjhyP-ca+2F7pOz1!G7Y${!J@r69A zvmT$+v14p?a(lYy!VB5@VSQ?1q5U)g69xd3WJVAtr(32OX~cjRnVcHS2S4i159Rlj8XUl1)q%IiK$Pm}i zjzCcggS;_5NB18}^X%mOk-z^;`pLe-Fs5~}5L3G0;nE4`CZ8+E#JLwhWBNoMetIQ; zCO`ZB%0sIGW_WyPZhn{$M5oXiww~Rap8cXHq^?cSC6>xeqvfeH%um&)?|$>g=||t* zPVyHbbBSJ#1;T)@LXz+$4RaZaINa#+8W|I=lLf3jWwOJB@X0t)GT*t+S-b-(`EboM z_a$o~IP%>?IE;V({y&VpcaWV|cHnnk=bXFIXyi-~3?K-m;gB4X!x3jjqO=;VR%@57 z*yT_kBQvXnOqtuiy8+c<;IAo_x;bzkPJRXWzWqBJ+TwK0_-$|j%2HH-9^-@bZ|G{{A{<}X;)1%7>?ft+(%NXmb7~ zrIw=~NyelJ(RRlM5y)*<+EiMA3BmB1kggi|HfqoX&l*_1fm3`KbUyCW6DA=OIm{o+ zB*2z18?+-yu1{MwYieYe@NF$%`dh+}R7bAS!g6c6bR|&@VkDhAM?]y&ck*TdX; z23(dyLXRH}BG!+Tv1zC+ZG=H}v6RIECFM;b2LHnMC2ce(#g%Fm-_@7`Nkb~8TQVku z0ZXJ@8>>mLzBHVE^u43$>?xwKFe`-51yE!}6_xE1zkp9hxHyx4$5k*O@~~Wz#HcM9 zqRdCcH>|iZh@zShM2)cvFf-~a3J0`N(?wfbD?W?XkY3dkt}`Y9dVN!yP%@Nhv4Nlx zhPZ<7PugO_0L!e2SlE`@iI{FQF@^@R#=Q~&8K(~qt;cJ87hl1cz$;J2`{UJ~2S154 z6BptTSLA8wPKKEWgkcB?9^*IGECC$ML&j_lhsAl-PITn^p8WA|esX3qnmL06LEM(n zw8=ZeJbd=t1)?fhVHl=6ONkd-;j@~-*=B9l@!s4r+v29ZUgo7%rc~@^-LmbwH>XFQ zeh{-Weps0c_++5K8*jcH>kjSMzAai*3rt5r;8zd7kq+FmKMf3RKsaiovi#dH{*|<2 z`*!kH2rhzIScJL8ZwP-4b8{3)>^FbwchX&V=@|b-7zuu17@ERO5L4SL;B6N~A*uf(&Jye2gU>~(jg;iQGJuA<@U&Q{@)fs*km9hMri`GgpWNarfCJP-_p^IRBUM)2VR1S z4~ol_2r^^h2T%;hJ%nF(PzfLhBJ(7jobcWRy(S`V7S)_&Q?#Wr1F)c!NQ2Y{L^35Y zTVODN6IutPgwRO2GK?cM7nqw8wQW?A;49wm06J)XM<53~O0*J9)!P_ADJ;jhfoH5@ zOaP={eD&B?Q<=y=d_@*(-MM2Z-Lq>Tt?TAlWG*jPU|>LTjuJkjV~McYE9vCvNhT{b z@n}Ikl^Z%K+0;d7cs)sKuBdC3ihdSZ93nKmuPZNi0l%buwiram6sH+f#-1a5KuMUB zNSmc~P@878kz}~C+@4;4v|t0LsLL1HL;MAlKw~x|8=;i1aG^p-J&Ri8$5;9pK{s0$P;$ zXKGEhC>XkP24Nr*jr^QqUuG-w3T6SGIJPHGpNWL`1D`s8pl1|WW0+lS!z+YcZ`rbm z&*spy_=CUlQZTH04<1N;8;4@v9wFN93I&|++P5!t46IA@nAVShft~CM_h{y_?m2~+4uPIlSJu`Q-X$7p!+H1#YckF z%mRHEhtH?C-+7;Twu3d(cBU07i4zwDkO361_=up5zxd9N($Ue0)YaJu{Is^fBss6S z%(pq>fm4n-0M5hCnX151ZBcI5|L5P#nA#}MCS%ekOUfu3!gyM@sV)7?FFck82xMBI z2&rbvd7_T>n;d=fQu@)~y-vk#DsZ=Tb4-QNz2(Ke_ApIW4w2FzQU zWHqC!CHd`hWlR!k@yoV58)a0%Xn2MKn>5YuBI-}PHULcl%CrFmIB`;ZQP5m-;~rrZ z=*^$_7r-K46%%nSX)IH7qiMs=&h+4ux23j?tVB>VgEfqNRs7u@wFBvm@3X+@b0<>6 zdtblwN=KNLm_4SR%*CB9&S}B8|d6(vDzE zTkn62R+V8)(@1S3wFOakpq-3<*r)>0Kom+7++PrBv(;DRd==n4fU)NuI+KCVH41nz zo(3f>7|SvZtB0Ai@aArq(mIf_8`B3-r|Al&j4@R9C5ZzjKY=MD^SLrTi!35=(@YtY ztIpyi3&unPgeZtdvtlr!4bhJLJ0x5vrs|<1EKw8)iwv9^pN#1`bF#0RHyHgU24e*i z=OQ8TRvaE2>`D)@#PHs2J*>yniH(_OV$NH{0XQ~ZW?~PsEIDgMvE0WB1WKl**3=Fo zYGP@NI^fg5OoN^i>;Z8nRD1|+%xdDToHsg8MAc7MB7md9Z&`=tkXRqfKG8Xq+7^l^ zmdYIjK3q=U`}Vu($Ol(|F$=iic&wt8;K)qLf#yC5MVRS;MKB(T7)N^;Taf|!$a@ep zU`XIO$c)5>1mkxToY-+;O9d^Xr13EsaS4GH zMvk@2D9m796DZCf%s|`}&nnvI(}jbYlD;LSV$Gc&9X*yVW1f5RsV8}Mj?|mOS7N{4 zdgtBn>F(UQgE0gdx#!}=OX%$@t0B>F}Fx zr2{ap?*4vov@X4L`1SP4>u;pncI`;dJoCl01Or;4M7^4{q(h&En%sr+7t-0Y=UMvY zavB<153VDC(yj;YGjtfHN=GKqpbowEI@7P5zWB^frlo5HQt*rtzlGtqzxws`gIC^2 zi%lIcTFqnGgee1x+_^l&0gr+?WdVpe6VKo=Y>rd@cK^oM#KG%-7$+L#SS(RRMT?9y zT}w%+aUtD(|8~mr+zw5|EkU`o8MHkaN#k-)`rfw=QI-G4Xgvf_QS8+P70XtKflg}O zktyluHxjoVlU@sf0!>8II^NDFnUR{40!BN6U(hEK~9W@NZx=K+P ztVRgLo%Q|c@{##;?2VCBnqt)n+H!67YTCT7CvCa2C(YH{4jN50perY5(+M&-^BVEhDLcIrX4L3=rYBZ|laiX^;xvNIsV^4Ze5m49l5cOjDRN zDh!m;P%tEjodcPZVhs<5Dw9j3R#T%X&kg_u!>A<$VHT7)@NOk{nNp!KIG`^3tT{p= zFfSlPO!{$yIc1=Th>Qowp|)UBEC@u~x8pOqW9MMnxpO_MIqFy89fYbc%-2Q@y^EJA zFpCth43bR}yFUX{D-&*67i%t|ZRmqTb80pz7)_~{Lb(0pHTCzA1BMUFdViWHoskto z)eBP|%sR9jKmXx1zr? zC6Pav6Ib@Nv;!BJe>O9OpNPITyVe!ak}Wi}gIa4=jcCQ>qSh3PG5V6&vhX0Gh;a+; zB4!Y0t2n`4L}NyaJm>tn!8G!bIDFF|MwNG|K_WpyEFDiD>AkUk)73YAL=$fOyuMcu zEDx-Hzt0ptlB_Ig1OI4WZln9fc4J>6lYIu6p?P;^dX9k1#pik~j=Rsq(~)lUk$$0I zP7Jq++!AE(G2KAs=6ejfutuafZs3Vq5LO-;-~3in(zI!{2glGkb%X#1KR@%6UySzz z!GcK$$1`X-Gf2{VZ`(suDH;J4z%LMNG(~RW?PxwFocab*oFMY{{Mq4j;Ep@eKyM%E zBy*wB93*I>VW6KEucW{H>#wEDXi~d&Zcjh)g{KKkpMwz+)Pyj)gob3`fabe1XU?KU zj53CG>EONhq$@DZLx)~X&wTN7Y4aVor!qqE3_(w?qZLg}&7^1k@z173%n-WOYNR#5 zSerIU1N=gILE_0$G-o9G1(~k=lqI((kW?MlhtluN(~4i z+!+0~6kKRfzg=S8q?hTml z*Vm-c*^5*hTEayEBU->3GTW7=h8d6XM%*xHN0eFX+5r9UCUk84^s=!O?szP#TOuG&~KzCTZ6vWek3dZqKTIX9L2|()c|LD-GQ#*o? zGmqFuB7j-VhI5b}K*WuBi;3$#_d(x7`_Pxv-Nsr->-*B~q5gEw&aG*~-~gPxJ)L3! zz31L|H@yzhU7}*aq9zTW!}~R7*#;ejWfF12uSDDQ1zFRs&H?1JL|r&$bCn@M$duw4 ztN<RyEPGKpnOPI&yPyhB3AIXQLw(|koV8WY`jw6z&A2;yL>f~2H2Y^pmq_NPso zdOx@KzmoX_xg|Epz@DFEQn!L7{hR;lp18_NgzzVt7Sno+6 z)3Sjaz$1{HYr)@OOf4X9Gkq$e?XEK?pA9_ZF`5vY@sD&;#iN7rQhs-{PM6gpT7ZM* zMIE>cXuiCHE&lMWaKOy*U3`?}Vw$TKMSKb4cJm5sp5&-UT&V4r-1t()a@S9!lL|^7{ zw1sgB={q3lgB%Cjt0w8kcg5qu(X1KEjC@ZpF$jq(vo5$Gme+Dmz_a=se@KLj{`l=% zH~>RJq6cS%iEZoa>ka1f;Ri?4V-Gz9UKxau@0E~UMiV+ed?DSjZ!f{Jt!W9ybRLaH zVQ$Or9SEHU9VN8zV-Zbv*Ph$Rox|b*p1k(PkJEwsvbhfxA^iyvc_p5?W!tv&DB4*C zh86{uAu?0=$*x`yLa z6$W-SJ@fNak${oq;#)L^>1L(s!NHz1$au+s+%o2{ zL%Rc4!NC}3+L`epe@hc1vJdImF|9Ds4N7rE@D|K?hTkh}00{CXH?cf?M;Zm5&Cu{- zi|1B<@YL9D#V-=6Q3i}Y^?mbYA|c7YY#XEq!r}<4$Li{6q9!WES66ydWwIqLT!nA} zi+;+zE!N>HO{~Xk&8Lu)EJZjTR|E*FRX_1YD`3CK_FwfW4N9a&}kZZG5}4EjxvGs zGHW)25chpMwxk0)x1=3?oivOHE+k;rJ(?!>=jHc~rXL)7BVELtHwE$X{SBz9GA08v zBG(6k-l~+**%pkV5oGfj5^kA{6Jnmv0`F9EbZ(o?FM`(cgDxY1Ne0+xvP;OTImx6% z2~9aJ@Yjreyo2S>jj%F|y#v!#ql^k-+r+bsC~R)FG!Wf}!T1ad)6UFdk}<-D^_$Fd zY6D)U&!0_ipZYK@62YUUp^4DQKPyzUVhU@BoQS?gtDZ}g zA$m$~i})QDAzZVQ)V8C1(D1o&!o)2f1dPzwEcGO@(5z#1-w0q3Mi8wg5E$Mb8lyZz zdv+toVxrKuHVBjqxSO(4?ZCH@@u??Suz`pTO|<#L2v-?S4LA@{Y=O=^Fpg)yX5Wf1 z<#>GJBHHJGtey_}xPOBO%6oOB|oV+i$EuO-4#g<_cj4)ud zx{b~)Z3Am>1%tMIt3L{0$3Hrr?mw_U80#DYq$aSp-+3=gJLWM>L%`aIgu8@Be;1o?%S^Y-3(d+J|5NJ*}d^#1WLVo8HG7DoGrAN-Jl$F1qW!w*sqFBgAxiJVtn zIh>yP>7Po|FtzZBFb1F?4DO@!Z~x#cn3MX#Cm}Ny*TT%Ic#O%^)>D^0|1%G#O}o0&HI{-X!JJAYzNaO8N;8=DU;Z#1p`KBdPMpkO@>%wW-`ua2AZwMr^~NSD8eJEu zHrz|kDX>lIK_{k2mRX;#8w-I3>X}M&?P+BSA)QaHrQIk}kO+_{V;x$8o>%6fz;nQ7 z0tQWFUOh}(#9Ef9!#eOUX^aU?_gyHLOMr8%mQ{_^Cfi9R&egOVn3C$ zZ&}wxK2A?CqcOCCD_7^>$|T3OQ0knrL%C`Jz2HSIP-f+W1~R$30o0a_8`9n#ThpFR zLuqSod+LKA8}A&ZDI3Fzkgpv*oxb<->**}MrfUGnrFJ<5dEr@TDOTI=hiGKi%2q(azu4R&X>C{2c%4&gC8(+LMr;(3}#%!;3$z+AF` z5U>bBQtcsO)+@0r1sKKyX@Mp$kd7wffNjB4*aOq`TE-UQrABBWSi|%Uc-XjUBk&~< z!>C2Lt=gtD#0nX_C74mxU^8Q)`v@Gg%Ss&AH;=lB;DyHIh!n>?$Mv7Ym^kZ1vA>RI zm^9coshlF4l6*V;buFnxnD@yck2w>nX^)0=D@37o-H=mOulTZ|)< z&P47w}Rge4A+03YUPhU{3!$_k}jFQ`m7j;c;0XS{-2~8 zXpW{r!{1}>(KhFZV7LVqReTGLehnbS^=pHVadJE|OOQF`U(QLi2Kn=mEHno)97T5{+@jzzEAB1d4m^+(&yL^umYf zDDK76cn1ITv)NG~T5B%Zq!nmK4Rq1f$(J!hr#fNPGuy@@Pz@Sp?oWqVQnebt^DS1$*2V%V=2o#+DGk&6l&irF{=Rh{i;2 z4n%JO?PP^Qg|%?8p5ErP17C{KX#2MGk_bhp_)I088<|V*QhoZPqZi@W*piW+Yakv1 zczGG^hKXv@=pYXBognT$Y}vb+5WSd*S|Gj+sKa$?WgwJx#Y!jZahyDtzVpJXRmKF# z*IWdHblD4Yd^)LKzF}w}$`)Bkp+N^RfTpWK%x1|4(21(kb^$-qG`8&-K68ZLdQBTa z8$GS8h>PjbWha6;iL^{wH7bWlM`uLB&?cFYn}d~6(qDu&8Y0hojbb#^6Kci)J27%t zC8LDdvjqlD!XV6T0@LIRN8U*n=cmvBDbr*dd2N&};a+p5YGAyJteiPZcKONzlNLm1 z;u9sCxY+kg7@V8{4YePeF41=tw6sz+)U?5pOmMn(%z?8;VIehb#L2u3BBFiSfC@M; zeZ=$#;n!agk#yW6a$ChY%pJ@+ku;W+pbbX82__*F%!z%sdTRAWRgd_^+3KuvqUVap zaR+BT7Z(X(`c4DZ*kl6Q)|KAWfMrfH7rU*wWv$K7=K$s{CzZGU-Z;cLo%i~>)p=R^YArq5I6&GiM5bpNI%13=*t$J`Hqt_6pT%O7i6{FFRp z1IMjr3rSZT^i!CsqA%c>n_2jdsZnhDV)gfUpE*NTUNB6(j}@$+e(G_|c1D`jq}N#^ z=gvKQm^)UOFSC;DQRZP++JGQ)mXb#=z5Zr;@`=YHHR2p*>(71ZXVMgA!lTEIr{|u3 z37^yqRZh00`wrX_c~j%my8HB_pN8-t&;g7K%`@wDJV0<9X0bj0|oF7NqtWTf*iM!LLJ-unVJZ|tHxTm#< zfG?75XGZa@qbao3SjnP4EENilC5U-hZ6V@!WlW4P&8SWle%-uDGd!EeIP-!!UZ!kV zff?bU!7STM!Gk5%i915F^vQJK?mNNa&}0fm8+rS(c%Q zHToLyN7Yjfu8ze1m0(O*=`e4b+M|}wK!$azbl>N{STTfiW_lb`(G(|Gv39T}Jw(yI z`w#9)TZlmG!`Y`lXL_L_jZZhHSKl}f0=-S-58Si96Z<31S2U#g*=rCy5=rqT-M@Qt zx?|H|WNCL8U4nn2mTMZ&sH`6sHc^mk0q6T`M^C|+4yE%f!#-_2IHgx?Pj;{s-;}Gw z4lr>HFSj&FoxT)^za~1>%sGN9WJ-Pvy5JE()E3Br@J9>w;!c=T7oRp5Q!}BvH$v>miT`0_($m! zmD^27^kaJ^5)w~+X4VNT!K~+|n1qyuBB=}*;n0SW*e*$!8iero(~Z42G(P&SL$wj6 z)Py!@VspjVZ(7-hcDrlGwzO`bm%dQCiM4plugOf5v7`;!jP)zb%v!GxW{}hM<|cmmDW(hiFvj3mURWFt<|N?NCcU<}-fP@=u9DR!Z3R)OerF$n!VZLNjV;dK z@(wQK{sqz=*4iS95P#0cw;=npUjkeZx8mDtWm5LXvCj-kMx%tM**M>`8ZU12oIS3) z&RjhKHMwC1_36bXQ_iq2juhYKq+~2Dj@3BhxW8ab4w>UUZ$J~rHSLO{ntIK* z8i9$t_0~H?hOWcB)E#~&o#jt{`hoQFE3c+gtbz2*=RcRW@7Rf!Gnc;ht?#C%9)B$L zY~Gx{`_1p9Ygi?o{)wm3@W@Cy^xk{vXMg@5g_+J;e1~CLZ3s~R^yhyzt;g)Awtw>U zY0P;$BA?LTa|8#y{@USm=f2yi;5~@>YX%c?D|vZ8igteP^FNU;o<74qK?P_UGSEN# zv%g7GXe62)g;mTsIx79ibMgk{GlaxjFi*j34P0>ySC4aC9DjHw7LdG)@W{?}#>8G% zTNi;Nwdv~2<+NpAf7*L+8=Wm|dQm;(xH|v*%KOj*hTO_J(mEQNW(fvs!7n_{`(JqJP`ZrZ@gzdPU3+g! zW0yw>z(A~#eo5=%K~;S5$@~TkY#)c6M{rRvo$ILDgDnW$Csl_q@miQtspD!|Zn>0} zDGL{+-pJ>yEOx@6h_*wi5a&v)Fi|1+V;LBf$XVjn)UW_-jd+xa)BI(|hQLMMgy0N3 zBJhFvR=WS0FTt3uqzM-Lg3S|@(3~DTxFtRQ>78l&=1xpz)J9qygP1}jO4P5S1jXxb zkFu)d;WRu@wBW1=^iFJcE!r<2R^`PzOOf2>|;%PRYa~T9ASE zd`x)y5T+x3OW2j(^j}#})x@!vnUEhTq4EHc*4kt%NV1wM&HQe{bk>LjRbqdE$*QTN zh6tr*h@S1y-l_(q`O@kO*7wtd8Ig7{!FoQnjn)yqnn(oRn6_a)u>yf60vVIlkWJbR zUy)MuDhaX`O;Qt@PF|xd!*m9t3fRcu!dWqZ2^i1w{+l<2hT;u3pK%^j5u-NLq5@lp`mn+ z0EykZc7*2t+{>?~-Me?C+xG0nIx(Ambm&mJ>%iUkDqGS=XjVs$o`CVprhoH4|Lb_4 zi;JE;cRp}wXv6yGkVNRvD~Ezn-gnm#(oIYhBk0%xMrUf=UMn_Kj#Rl{v0os7c+>#s(^EH)6JGC0I=6 zCe4s4yUKx92zV6J@#Z!Zt&PNZT2U|)_F`XuaSb%4Lc~s)v7BI?tu}nb7p{(`e|YZ2 zNMe5a$qzr5$Af{;<>&?=fHtPdXYhW#;C9B`Y7)vU)))w78ymrmuaOv0Vlp zrtXw@hShkacbXqdyPxrqLGsHm_OM+t`)ZVeRA zZV3NSdgbtF`sTOZCG1lbtR*y;4oqxYH}t1FZ`+>sY#&OSxLbOA`>C!nb1b-A9dsD-)G zvyiqe-30ff7goXGy)a~Jla-Vn@s%+~43?qlgl+@hRtSZrK8uG!ErY-0`gSlk{BQ>j z?u&qpPMpi;lP#f%WhUg51r9PKCn?`NI*`%CPM%mrOeOBjK!S;=ReEK7XIKd-FpJ@o z0JB|ZMER!-GT`~KS>rc?kM)|sK{XJ=hwT@)!Hlye zrHSmy$YlbIIwC;g)R}YX)S2^XU|;}KaYH&uuG0ieqJt3UM?U><_;RgO{^99UXxI2F zHw;n7aVipNpL_9@^qar&zn~#$IarJ;5oga2r+q{N%2=!b^X`w|Nn->#JpS1yBG^G8 z@Y~<{etPbOSJJ=wm4D70Y6wlq9IN+_97RjRU&0dJVa8)DVijy*sxK0_XDAA>>Q*o& zDfz8ugXyhpUS>wbxN@5di-POgOH78Xr=mj#!3;eE-I(&o>ug^lN{!HRG^Tl0qpoEc znfYrlSr}6bChtbnf%TnzlxV{bOfzC8^1KRKg;T3m!Y}b!wH2+E4!JqTcEwJ*?~FB! zUNfG={VEmY+wk?Bxp*Nx58NHwr#|y&x_#%)G($dEBk-v}XX9x;qx#I^dA?(0St@taE>{@#PH8^E@gmW}(=fFu z-aosP)Xpf`)qbb}yj$cs|wUP3S_ZF=xgnD-ou+0CW9ZtqTC`0UQK zdkd013`v>6LY@_FnuSA`>$}np zKrFkZ{N{k@6(V}Z0M^v(6o|%pFc4rJ?G|;M!20y=iL>b_#bM_FUMwnwR3CmAkWXJo z57NNaO&dT6Dh(k01)gD#jHEFDt$!>k4nVYpemJcmAWl4)(;@|DjrORZjkRGe={|Rp%`kAQWHd8S z{RA;|KunY>?IK-{K1*cQ;l~>wgT9IUlbyRaaZc;!+GWe%tig;A9n;CU0z=Et#6Hq#(qe-AhF*{ z#xjBo^UMUiRtajYP2{gOpn8v2$xa!ksvpBEicJ%j_xLYNX7-==#xX7|qaA8`HDy9c z+|z22*1hY(PbV|-c$j%%ZZfb1YkWn&eansZxNoZi#K7Er-*5Y5Rlz*?b@O(kwXr9B z;+fIR*o)ts%;)oWmCVGWxP{H<^W|78mGdtIE#4gh04Gh#6mM;jroAF`y zl8b#Aarpf}&@mkA(`aEG5J5kq%!4L0go(+#`gIFt@^yWE=`_I>-}uJgg(>jvJMYXI z4*37#tFMxrK9sg^-_D#Qq7+MlE8rUJ&`e0Up~rvl$6ra`{o(V_A`*LZ?_{^u$r}iJ9;`37KK-*!E5#XVL84Y=kJN_*&C_h`;5NGmhFx6SH z!o^8ttaPA6-d5A4w9+`778}oSekM&546;%onwPqHWfEsAShHMA)CCnwtXoxDZX;%b zzy}1mF3dVLz*-)#1YcQXY?h#VhL*3?&%(6G`=XjoX){G!>oC`CC7bv0M+VcQ4-sk8 zJWU-VV%A;!js@{dGMz>9SSFMC{MFj@y=RYOhdz(WUr*%M5R9od?b$q(4%{}FHc%eD zyV)wTNJVhMt1yI*kd)3-SaKTYzDcL%ayT#s1kB+WJ9}Y_2(}4`Et3LiO5m9E3Du^v zMvPI$hrB1Fg}O0OH4{a~jG=R&DUfzx^I!uI5z*<7^y%O^>5R^s!{0QES)vg$-g+Wr zhT3{CjR9=NULVyArnDvOzMyAX1aN^UB9Mc}ABS%agu6`Q-w4rmGv;s62PIP{vo+lG zd&tw=Zv%lv;(69=8ljF>D>+*(hBbyiuL-7=BN-v15)a;^%~>tPf6e&Vx^S#_0M~9N zM7zWUqFB%KEgWeCRTn@%S8bP8|-AwZ3mj%pHZ8}-L$E>sa36Y zRz?vhdea^Ls1RicrUy*&Tab0ps$>h1cg4O&4}XWxgS`c2y0DcfDlun5?ZfctA<0WT zorvOvjg63h`Wq9~mws%sW!xHCMrbgMf$+C02DkY9cy|>RKFI5W305Df7G$`w#BXW} zf`Swpi!6dO!q4`oU06a#0$nhs@DcLc1ntmdR)H@d#&7$P-=AS1EPbZYj%Atp8k^_c z3IZB{h(0oYYE$gzv2;wU54cWg^1Whe-eAB)4aQB?BgI#Ke(`|fDBrdZ;lO98SQj6! zus>==a4d@RP%#`6v4w{%z`9Iug-h8u7uS)D>q38rUOG&0!ftLIJxl`%e&wf80 zKX)?Sd+(j;nI}G*I*8hBn1-f6>ng3{UWQtgvgN5SGVw7P-}gs-LtjJ#tQW$pW9-x!#N5F%uwZlr9gNG_nM=?>Es3nrP^f^61VzCHnErfdt=?hyW(nL7n0Z17_Y?Ou?g3?L{ z#GwcKw+{U-8mx&+CKLoPOicn@=`Q22cr3$^=)We)OKZvQMq}c^Ye)GEi zh_dU4A@u_X6Jkw_)n7HudRjGvn`f;k6G56D?H0M$-atAR=K_vvZ3Usc`5a8rP68_# zfkfR8KLgIz1(Pjh5|N3rSI+~}3X+kIcT3jj+XyMwbHGK$6q0=baE^y?@(IBYmfo>n zG7FhWq3PI`HkQAR#7OYQXL_9)&II`~;b##rG+EPG!I(&5r=5xw1!EG_ZZV{uyM-}% z*5`xtk=9VX!Nb*U?Q!)a_l1;R9X)O>P(w|RCz8>RR>h>mcpi*N0jq)eH$@JT*WR{$ zM`${49C|&y^zv)zw%xna=bnBlCZ%ENU-|0aq@Smv|BpCv6!ovjcBVH=nOG9)9?0*}ys&X!n$hJ^&3 z$HABsv=Qjrz^#5PH0!a{G{Tr%hD7|=4`W^^-eI0qWeRqI_Syw~xxB$a_Ag)Y7ka~* zRalWKM7Pyc=}RsC!Vk`zOy7Os2kGSSX#&{#(w9E}g|xeGW17D-O)ga{CO^su!9W8z z0kn8ov?bb;U(FfY;+yF|IgPfDG7o$>ik4HUqmOkL(3Z|)(wRvUL?}+p(dWi)XgX#~ zuCIZvyR`ZWO97S0TdE^CqMp$C788wS>i90otZKUib6SMi8PzyT;9UbutZ@m(w0-~m zl?^PA_t__Qq=z3MEW8~eXiPptw}vtj3pn!3cCJGeoun4jD{qgbzx#VCTAv-|1vCJM z*dU?h>!}IXNT-)@u)5l6E2?EJz?cT9rXdPCxN4wij4`G-R5_TA#AAsLTppcC<0OuX zfKGk~Kg#4_l97AsxR()$K!&xdHMQetRk|^!L1HL^#Zu^iF>6l&S&&B44G4glW^!UW z5Wl;vGwpyeDMhF;O%ug6fthiZF!dGsAhKn)&Jcjy3><%RSmT+vF@nrLja*FM;8G5iy@n6^aP1QA>7 z8t1_+8)iq}0lb4L!1%4^Z4$6dAxaMw#4_HU-@inA_4!-))pc}?kaH`1H}NXq=9nnYbjdsyCnN-%iyww&;yfRbG-L8PpNJPqc5FgjMpHmva;A^I25W_DHHA-?c3G0F zmik^Z<5TIq_dX;NY;U>?)75(G9mcvanm+yTL+RniA5E{m`da$yZ+@3bBCTl) z=?rI2pN&8VtA$@BteL5gd8!#f#dg~_+ro#w2^ZTUTHAJEtpFI&U;LJM7hCkrK5`)L z;}J173Le2tJOO4fceKd#_H?B6>lF675qcqvT4Z>(_c{(fPo@@{KJc-$Q*Ii>)py%W zzD~}bO6Mmp#2QmMQjm9`HK8X_Fswf9SidQCpgk?JgooB7>0blo-WsX@=T_i_kF@1c z86jF>J7mnZ2wXsXULvPxk=3m0sQ6NcC94j`bbMquef7IvPZ!5X_=S!=df)x&srx^j zn$=QZk|whH+`RFUD$eA7^8PrL;bw>YSw~(a6qwh-s7>P6vQu{rztRZ4AvC527Ia-4 zriQ?X~|K7^py#wiUPwl2)BcY_VQ)o&oqX2YjP^DcCU>QPL)6|vDUTIA~c=3FC z{*_Y^`O!n=?46riNuZ@z-yp?6kp{-DP7|)kno5|>#vvLDQX@=Q%Rz`_5_A@yj;gW_ z=fy=B6B^UC2^f=@3Gf5q90-Vj5|Z`XLasmpZo$XY3SsLcVXxQ41qq*qlLQ!ZZ9fN4 zTd{=FecorJ9E|W9lV=eI)k!|np201Vcw2(V&*Bf8oTKbM<~)a81x)jr$hMj>h53y( zI~h&j)y6(b65|a1AQ4~YX|}r3Rn(%R?aL5ko)(c&GLl8K1FIHW=E9Mn^E&|KPHpf7~|wSJT5b`jE&lo%qxgm zmC#6Rj8b#BWtIl8cue?Z=3u)ZFtjP!$2+q04&h*KM4d&(S%T+tWk9^Py0LfTM}~*6 z7C+b6Y`XySJHI=QHW(Wx|&CSO2wd1%9kP?!LG$FskUE z-{s@&7xS;gTy(rld(gK#j~Q=vgcWG7&6245Q4re)DfGI8uyP2Es1|%M>3xX&CoK(^ zsRCj6yA>f^5YP%Mn)wtkrKX8wOYebpU$O5lQ@7BFf;9){LMb+#V6QVN%356z>60jyH2K zEGFR7h3V7j^p(?TjK0OP6I`dj)}UaD7O`z`BU)2Gd(bmDd*n^Rm=aNMvE9s>VjQB; z%#Az{emh5I2^1Rx##V5#(tmHNfiaD;*y|tt)t{vEmxpm(Os7o)gXx(kpHBOC?xQ;V zBpRVm3W)OMz8^LY#p>YkTbKtD#Hl6C_AsOpnheouoZ~z^Vx{g1Wsl?pOV6uH)f2ZO0!p(GP^@i2mNC)$^;1JYV5^#sDzVZlHNnqjT0TGC!x8VJKT z#GZh<(tQuzTiJilP#8L+X;7_p>`Qi6xlm4ZTj*Ve=C*Xl) z!XIIE!krsg7_EbAarjN6(p@CK>B#x3>Gb7E)I10$lK-Mw0f-P$WDYG_mFxzsHjRv2 zWAVXR-bd3x2nmH|I=&$2FiQ#vidz!juzyO-AZ`{FZNpsH)@H?Fa(dwI+J0pwM$r{{ zDH6V99Z?oD-0y5-nQzD_vL$UI#~T-XgjS04&sK~hL6m~C&2{bTV$^iSuPFpmGjPt zUT7a2&KoZaa?E%5W`M6aP>D5d&}!G%WHyENq8~B#5o}D_=MHx^Y8!CU&p{9_0D)&Zc1oMh9#FUcUeZP z&?x7quQ_c+h%;kp5+AXegjH7zl2Bl5Wp2&^uSpn=;*aZ;DLUvQ(X?ubEj3-E0iC02 zwN}QSh1vOP`yA%t5Ly@?u4%?fQw4UkvDh!PCtDwF&jSa|kTLMIfV*Q3oq!4IJG25y zFP5F{+cx5Qph601C$Scq8X-?%c)3le&}RB=zMt+mz}-n2apP*k8ir$vehL5`4Cll&-t^WK8d<}n8bv<5)%E%bof z@-%dlD$e+D>hUo(GKL%ikN*=<|knMBG(-W_JR3Q4vCw+xRc*UM0-7(a2Epo&Q@6kgCj z1n#!lHiT*_^dwfKNIhskw2xJ7rSA^kavkOG2zFSg;Zs3bsE~G~Z)v3qo)Y7`b66u)F$hBF5n>yK*P&5;ks*GtCQ{nLN zD52*_8Q2O$8YLVB-bRv7W0?h<`*i?aP`N&e6JidH%t>n`k&Zx1eY8WQgNcQNggqK$ zQIZwOEyy@d0S*&t`<*X$*P6WeCjpUIrtzX|np~?aBiw2PhGGl5+Dz6|(Ind*i%W3%1 zOu8^Kldg=Zxq;9i6!tE{CNYsr=lZ10M z19CkrfvDYF0x#(c0faG%Wx;`!b-vVCEC*HMJ7K0swb{(^@pi;-UgFXUg`df9la@P3 z^3D@EUWr5lG8J4~#>jOc3DZWU?vT6#2zIFY34$d2d{d5SQ`_<#VKRiU>g@M(or%yZ z`)oh?n?L?`zj{o(y6#+#GdKRkbL^9M`akx)S!OQ2RG;Nnyw`i9f%)5)tojWGdciGQ z@t%*L&`J=|mbQ_Tg2YFLzaD(9AXquZ`dk!fW-yIUj1tr|Jds)mCw8Gfm#a~^8;3y| z$fO`QgSNT|+(!xg=_HxgBBN0Ul5tTxHJC(;hQS4DVbOoz&$|Qu{PADZe}tIZea9)j zV^{MnVy@R!Z;JM_?|d{o+3OW3Eo9o>(TS#pIe*9Y$os2>s0Kq;8V}DrmqS4#<%IsY z$@pSE@JuZ-QO&(7qFav?(?9>Oznr!Vu-ph^B0Mq<@(N*M<2kB1Lxu#^#?>&rcrl&6c!W@Im|&amqO|~`tFbG!k;Bzq(?_)*@|p-{uxP|0 zQOz|?1ec+4E!IzBT0|MayjZfd7ZoGw=6mRmb|QGi&wSzT${lwT`0&}C3>XN5TT{Be zO#HoTBQ@Y#n&PH!{os8}c&F3(k!iAwyVGNj?M)BtA4t2`Q|2D7SI3*J4!g)b^8`v< zx>Ad3e>Hu0{5+f$DT6`D$VG<%xQK1NyRq?ElKVzb3Jg#+62w0GJEj4GfYvNBFgcTa{r#eo)eKrtw{aMy489VGu*2;*^CHolNJ?o}(>f ze%mq3cSbU9*|a|0gEl&}9<7fWfoI70XiO4;Y9~T?2$IqsZfY+Hz zvuTVanz8!fW1;>Ob@9qD}dbJPC4X4$N@Czf^@y5O;(f5Fs8J{o@HvTwxhGHnCrQL(yK~q#4xv zKfWnQ6&hY>RshAZEGmq6Os1aKZ-N16YZ7pN{Lk1+iVI;x6O3E5)pq;I2d*D?rw~FU zt|ot)vS0z^{OBC$Fvl?s-KIF0=&dF{L%_o<=JACqb7=FJOd-?@z)B_*B$NjRV*+>8 zmR!C<_*iIJBBu}&>qsf&cm(3F#Y2H5G+gnIe`>N}a#c`50E=8R#w9dfaVhSDn9@IC z)*s$(o50VyYIf6CDPY}E4*78%zIs#IT$avDWl|p@zEC*))xh8 z+7rp@>?`27luiWc_-?9(6OWMI`rzGko|Od}pj}qyR+w$VceXf90Mo!w`t&^yVAKM? zp?O4)1!H#X@7GN)J5|cE?nx=wI!#F{}p)Y(lQanjljx2m|io38^YRj?UYaB<-893o)Do0tsX7L(fuN-lOnj0R6V6>C!t(Tc6!eAZ5WLVpx{8{k^VgiG<`S!P*R4zK zr9L9@$XSKvE-)_}duj=y!~MZ$8JCd}+P=DF858GXA8pVLf?)bT{K}J+4O^Pi1E1Om zaYk#wsTK}GTy_g+N;8<3-gJo^yK!@iU?pI z@w+k5AYuupa|=@Y?IauDeJ6!5dvvl=0}rHa1hFz(!_kK$G;(1;gmS5sHJk`J9Ue<3 zhUd~pXXeQh<9#>{Yih7VqrJM=U~hXRedfWv1Vmu|V3o*rVATv<4die#7L(KO!WUvc zo%BvlKmHCPMZZmUb^`dx^Dq-IueotuKIT1M>hkV#8plGy>BN!o`2Y)5iu&9h6F^$$ zcAXLVT7bBd?f_Yh!iJh3^R#?pVTGdjb{W45ppF^#KQPIuadgglr+Ikb`Yw3$i+8eZFx{8s( z5%k0R^LlXf!9Lj+8KEF>%%MTUS4PtD^QXwsWIeX1iwD5yBS68N*PjmFb{A{Yk&p^q ztc95@BgAH*0@~o26B~QOfI(}T|wLd9~^H!yjN|hu3$_E^<{8m8nf7+fBi4h zYj3|nuo(;M=u_|LNuPQ6k@Te}J{OhTxr|VDq+Z+@AQL}a7!~t3_@a-S*}yCU-)KZV z#T6<9Rq0>U7hgwV?>XwJK7Z&15_(Uj?K_7kfDo(nh{UdW@}A~O;0=ko3h=_T5oM;eOf5l@bEWMrcT@U5{@0(Y)HTnieK46eN|_T` zp9upo@+cQ9-7uZL|LibDTPLETcrAe5x_w>x!qa=w{ykJtD9M<#mpU*o46t?))of*^ zHy!_QCcW|Ai8M-_{SqOQ^p42_5e88$CaT173ET9!;VWoNsB{o}^A_qM;$1!MX>h%5 z!nVnPn272Y1IWMyJ^)iA04J|bpk2V(0klg#m}{U0)rRKNimEJ<&M=Y2m`E}xm#tm` z$t8kHR6%t33!RLJoDQ_DPHf%Qr8B8lq;v{IB?V}5jhY?6G>VRH2V)x_A7fFpVHhPP zJD8Gvl(xT*N(^`GMWt;)lY*(pSn8te2FwYh3yG0Fhz#{OK@H2CM5;PHQcj28onR^J zaZHU6CPFZ4ED3@{+6RyN@;@OrWPocgoWg#Kv;x!Wpr2tzVUk4oC}11D5PoaJ*F+Xf z-wDEIpGELG#xrz&kHu`b@f{ad{{&HY`tiSl4Ol)ji|@b%;UcF)v4=s?v=&+s_bDvo zeUZ&~UVoSm0e<;;^Ew)RF28Ru(9l=u%k{g8yW`uw742PpLgR#dW34S1AB29bO)Zut z+_KqknR$L!!JMiu&VsIo9!D|FC6-=e*$YJcIebfNEwnPtHp7;`<~~ zjFY5!tPoAxfSJ4*fzJn(iJDtNpq#xjo)-9Kax<5i``kavbLPPV8I!2J0wXf5Ld$}4 zSDg2q(cVy1a@(>=m@(yqrd)AVT;M>pUD5#a;4;$@Vru91*)210E1+O8Va~dgi<-FA zFX~8;-neB87qOt{$rI`A58hA5&zuX!1h2qi!8>?N@4CU%M^$cp;l28-dA-jw(5kPS z=SEETl(*R@t5#RZ)c@pR@|jMerBl8X*F+?hil0r zgF6E!=o;-%uuUwCSl7UqL)RylP2_D$?;QCcedVvdnmYU1aOrGgq0MCet(7rNeyU8>bzNN*|48^?BW)aGp36yBog>FoJgmQx;1hu*qMu8}IM8c3M}p?y>$-k$buCy}&< zhzcHN#cM0kF7acDsEhJqe>#4221nfSbY+sLD4Kvy0}+EG%a}kCSGc@5V!|q#ScHgz zbO6O=E_(Vq(%?`ZJ|83j?4@#IP&g*1DyLz|k$mTXQrTgga{JdN#+kIajFHO;w-fOr zl1m}xC{{X!X63h1W<5DVPF6q;llHIAX7Ydx22wQfX(KvEr0gKPTgj;zhN+!f=dIF3 zmMSu1GYaOyg|p;JjR6Z5037I`5boVDrVe~dQ=o=~4`~k$o>#^N#7w*(yhx8m>FLjN zvA+q*!+&&oHogAVNIG(C5(Kjh3o0`+y&W^&lMmgIo_=^A1rZ6cy*5S&Dt&~wWI|}C zgcMu#TP;;HS#;RrZc+!YLc5I!M32Qy?szFM0WsBXvZeu1;U0ezGaQV40{_|!{sK*= z^&FprkogS=!@J>kz>8b7Kbx;rw8`fej77o^aM*m8-)>cbK?!7yEfdx27q7a4`+#Tl zfvb+Q>pO2g62E*(L~^ciHaCmEHa-8j-tHTbOq}Bq=TwXV`%zGs-wbCy<~cs~^rw!o z5~gk;Xyx6&hvK-JXwe6csl{aQ2{)bvGxFQ}{m$A{)~GUr0^SG-d8@vl`b#`_Q~UAR z`gp7WVjzNvyl)Qe zc^(R@@yYUF3V^?#LWLCtSCl}9X)<5z4OtB%rE?3s11wUzrgb+u&0j_ zwWbxM1+#d#6cCowZXGo-(cuuQEDyGk)D0Ip-L-9N+D5ctpUJMYV;hMpM#4Qb^4Xl;#{un|Te zmh=^W)d~S5tll7lqB&@-2%8Z9dOjshc`_#FUUx&fd~JkE?7xq>jq2Z+PtmPd@p>-Z zx&5~E>%aCZ_+YV2VVm zU;SEok#)1~eefRq=bK|Kxd9@yYl+@25ye*rV{OC?-&w+EPws0Y5q-;)ja$TDwb(@9 zLk*@uw5JC0U|Wd5Yq2r}$+t*A;0n-25GwuAZ~t=Tl8_# zASeTpKQbnO3jpBrl(4YY7pgWLGH^m|Nv%gDv2Ik9CBg)0^#_9>B56;pNlAxEX(+Q2 zy(L1N1`-8TWxowgx6*=nQ!d@n2oZH9-;gdss*(I^_Lw8O@GQPBnd2-b!KQTJp4-y? zJ9?-_j8Bcr4CfZtBGQiuFR_W}K_@GWI?QZbSSD$1l1lDxzdxB?d2Ix(6|I{AbiEhr z4|b7!`*Tk}kPd9$NMXdu)DA*fo!PQVwmD2}JV%LB!WHI92}G43NsQuMoX45~;j|A` z1{A%qJ^49adxJUUYs@%snGF}8v-Y6{W?cz&gydmsCe|`Yexg#a-_9i7Wu*k(R(*qG znfHtEaS4`Nw6`Eg8NAo;xxSMxz5esYj*PL;6mA@KFSLv*3~oNdd_*f=YqO0ZIo@=# z$`pcBNw$Pxpzn1igzO(o=_ba+H4RR7h^}xAMlXD;AIBsZ62C?CcsJJzf1_1zY@X-C zrq=I1-~#V3^YQG=mOCdu1GIG?&yE4}X zZgNcGCzCZ9JdEs)Hn+ucMxy%%Ly?i&Mf) zE6!qDBT1)c((uJ$WGa^A*~HpNETvOn{!M7ggBHvYwR`BbAE)=;JpygElumAQ9w%VM zvm7s%?Qppe)f^(qMaUXsA{Rcg0*3H7q~ChHIKhY0(@UNz(f7p@GZLl_i*y^sE-EMKC6! z@{){6>y4KafPuj*{lz!G7E6=N(9SygGlegIeE3@Wm0$XD`r=PKo)#xq(H8N+h0|vU z+-l%mGGwhmT7s5CV*>Y=CRtJgDvQs*hUn+N{^9r2@BZE&BYc$8LytWi%Odr(*OAK# zQzrzx5v|DTE<`8L&rYh?n3u$Az%?*e(qKxRXn+LRfvKr^0iWLjOA#$lNSZ`rL+iyG zRMAxW{r~-!D{sC34z}0sbo*TcY279U1!-b@s)p4rN7D~~_+dJG6^V-S<<@_q{sZb@ zH;idp+O?H(IZbkKCkIYEI<$ffv`poKt1}JhgJa{Uedm!-KvaNNfq|)}Rzy}1Z*=4e z5qX!)KEB$Q!r1&kmq1%uGBb(-F42;ahQ^R6HNu-yJBML zujg&!)S#k=gy^ z!)YUS|0M{88lKe^xRvLq-SFXAPAmyrNS=KAKDDRNIDq*|4US31EfX)!`k#B9)KD(2 zu5LcKx|JzmHN7xNvCiXEfF2`RaxiTi=!ZE@lj#i;hS2HYx8l47XRRzRkqXlt&xmLH z9WToSHQvuV{g;^mq}_WMo3*x2GUY{ys^Ap2-WBgy)sNLzgx0o-X|6se;k2p67VY*2 zLR4nzRi?zD>x>A382cOxEvoum&xDTZ%EFkoIOc`cWWU`!E)&hsYS|ZR+C)Gsn7dcy z`*}hf4_;FpXZHaj+E0}+dAaZ6Q@}Z!b>jrR=Dqnp0X#X}A9RfN!!dB`6YK!7m}ibG z;@4fm9A1M2-hf}Yi2})^`k{EKK@Z`k2UcZpQ2c3xse}ebyInZYfF-MkyXYm7ApO^9 zNPmHg0VDc<-B&-|y zD=ZmH_`++cI8edWuuN5q#x^ws;L1Fxhj^blJ(6B{@h}S=o=T0K>$pA#K@@XGNDD(@ zYoXC47$~wP76TX#bm0EmZclgY*q*i_Fm_RN;KJoo>By-MqCjaYJ{Sv>HuJ7pq`{5k zGi~i33L#lIwGaq}@`THQKL}PWv?oWhA~`}&jl^4G*?SpO85q@Iy=s6d)se@9{f-ZF zl|fr+WE|?2)3FPu(|`Wz-{2Ow0;44GVxGkwFHXWhdeblev;T_plWnQQI&m$U8KGTu z^a+Xq8bE(|zgm{mVwuo%&3IQ4yiY(+zx%?g>DgD_!f3^Md$;eU!u+PRzK@bi>sY;^ z5pBDc2tDYxN=!TDiCXZHHxg=IMwnizhq<;vU#%Vi4Q!rmO$`7d$V*ONt50vin2w#eK*Sk{2XC&x*dpSBi8BY1UK$=@EqWLS zi0GnH5oN?IZUbS=!tU?y0%4GNP+x;F@e1Gvp#zoSu<_0H+JReOP6pCU6P9UyiAk;o z92j!$i02C6*|b3Oa_uOd6(Pz<`7{j1gt-smI!T>C3%dq$f`QN@`i3gbdz(=Ao5{~` zAwWwDSw~IN-~1dkqsA`foJgv%?5-F1`vFdBPomUsOe819GxZbD=OrHgJ7xGARg%rk*+o%_yk>hu?jZ zVx7m)dasaE)s-H9@BmemS-qD9-D(+#5+4bSM4c}lg=Q*~k~q>f|5jhbxOtog`X0_p zC>>|v9?P0=t-^v*u+Q|qGK4?En1h*#d6=f?9C8u&W(I8ABLOzPeLU|K7~Jpk-Td;C zeVFlP59fnln zh{&A%Zd}hbGGDcNp{3tPm|cHY>~LMoH-6A}`<QJZ zIZebY@PPm~;GYrgw76*YmfkoLYJgU-k!k9JSR9*SSshkG*u)x5cW#A_Hd46yDl`Uq zgb>=qbsaFAYZP^!N85bl;Qgrwf4G%!nu&fE@Uyrj^zXPjfr6!}Csu#0ggXOG1L4y- z;0n^NlT=HRe$3;*Ln8#3XZYckpnd=G&;K$V`tkb_O1{YTq&B5dXzpjxbbkF`{N#y~gsJ`gi!Y~lkDo}#xc5BtdX{!tt)?Eo_O4x< z(&mj_FsC|lDyi(E)&#CvlC6VUt33$t%_t))Xu2yz@68**jr&06+Qj05jdN6ln4urI zfGVtHI}1Kl7@N{>{MN5mTo~*k z`>yrrvrpc}(%pEENSJEi#g=e)+3hlUOmAPVO@~iRrQvgzLq*lp=cnGxQ95;T_#&pC zOCdcuCuC;9pddb$9vWc%B};`g5Z$KpuuQ}C1=%han#v#Ua{_XHfp<<`6M>P?8jPD~ zutCVJn=l6wJ3l)+fp3zM>Jl!{%1Yg8QVekna8b8zflzjPLq+MOGi#M2YhqOvLjh}4eMu!S6>-8(zz3xuUztqBAZPEb)bC}=f^ zmwjbMBHA0+5`RGoGNG4ApqrkpPak|Vm45Wx86+@}wUO{koQn%HXevl5|Ktmgrh|L7 zrEyjSwMJaYEN$Riix3cxqqj&D`D5)gVJN_ZlruBE&i#h)h z07*naRFS_mb78}t5t*nn`1O}wOy7O>?+HW2zTOE_o*YRJ-?ck!N04a2mM>!x(B(j2 zV9xVv&_rv1Z9Fh2H?NQ~Kv0(&ihB!bIVB_!#L?1Fwq@4UqA;t?-Z)nHbl}EMziK)q{sxa5#Q5 zPbB0@oc=a&VXe)kn&Jf{u zh4g?am}d<>de~X&AM8%+hgbrnX*q3&*7gv5p()le?;@{iGfcXRIDuM(3Rmndli$^b z>tPAaur;CaEujgs0)qh-iTH+jXxs1ve)=!}+Ama2ojMi8MIBu86wLjxP@s~m>UAv+ zMaBf;i#d<>)j< zZw=1BNqkD9tWo4zEKyln=?Zv?pawLUizJimgU~g#avva&NyGrrehEz`I2_?dz<@2g z25kxEhA)h16efpmD$3q>?>?N9O_&VNWQKrNp+Z782!W9Oyn^ z+XCV-2^JPf>V-MEiBLR__R)VECX=8KGV%)f0Ou~0)3?8OG#xs8nL~X%YdsTSfhC*g z)5CY~Opo73_$eA$nWz}ojH%^5mqM201zss2h#1kNZg~O$exY8X>&Ic5Gub?wCi7$ zp|Ivftni*XxNsestC3~VC>z1Xkke<{&OY>(HUOXMk_