stable diffusion webui中的modules/processing模块

modules/processing.py->process_images()

p.scripts.before_process(p)

sd_models.reload_model_weights()
sd_vae.reload_vae_weights()

res = process_images_inner(p)

modules/processing.py->process_images_inner()

process_images->process_images_inner()->p:StableDiffusionProcessing

seed = get_fixed_seed(p.seed)
model_hijack.embedding_db.load_textual_inversion_embeddings()

p.scripts.process(p) ->

with torch.no_grad(),p.sd_model.ema_scope():
    with devices.autocast():
        sd_vae_approx.model()
        sd_unet.apply_unet()
    
    for n in range(p.iter):
        p.prompts = p.all_prompts
        p.negative_prompts = p.all_negative_prompts

        p.scripts.before_process_batch(p,batch_number,prompts,seeds)  ->       
        p.parse_extra_network_prompts()
        p.scripts.process_batch(p,batch_number,p.prompts,p.seeds,...)

        p.setup_conds()
        
        with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
            samples_ddim = p.sample(conditioning,unconditional_conditioning,prompt...)
        x_samples_ddim = [decode_first_stage(p.sd_model,samples_ddim[i:i+1] for i in range(samples_ddim.size(0))]
        
        p.scripts.postprocess_batch(p,x_samples_ddim,batch_number)

        for i,x_sample in enumerate(x_sample_ddim):
            x_sample = 255*np.moveaxis(x_sample.cpu().numpy(),0,2)

            if p.restore_faces:
                x_sample = modules.face_restoration.restore_faces(x_sample)
        
            image = Image.formarray(x_sample)
            pp = scripts.PostprocessImageArgs(image)
            p.scripts.postprocess_image(p,pp)
            image = pp.image        
        
p.scripts.postprocess(p,...)

这个函数和scripts配合就是webui中文生图以及图生图的主pipeline,在processing中调用StableDiffusionProcessingTxt2Img和StableDiffusionProcessingImg2Img就是最核心的2个生图接口。

1.script在哪里做初始化?

script由scripts.py控制

ScriptRunner->

initialize_scripts在create_ui时初始化

run->
script_index = args[0]
script_args = args[script_args.from:script.args_to]
processed = script.run(p,*script_args)

before_process()
process()
before_process_batch()
process_batch()
postprocess()
postprocess_batch()
postprocess_image()

2.第三方插件基本都是通过在img2img、txt2img的主pipeline中添加script的hook节点来起作用,以adetailer为例:

首先scripts/adetailer中核心类AfterDetailerScript继承自scripts.Script,包含了上述before_process/process等节点,主要还是调用了图生图接口去做处理的。

p.scripts.postprocess_image(p,pp) ->

is_processed |= self._postprocess_image_inner(p,pp...)->
- i2i = self.get_i2i_p(p,args,pp.image) ->
-- i2i = StableDiffusionProcessingImg2Img()->
- ad_prompts,ad_negative = self.get_prompt(p,args) ->
- ad_model = self.get_ad_model(args.sd_model) ->
- pred = predictor(ad_model,pp.image,...) ->
- masks = self.pred_preprocessing(pred,args) ->
- p2 = copy(i2i) ->
- for j in range(len(masks)):
     p2.image_mask = masks[j]
     self.i2i_prompts_replace(p2,ad_prompts,ad_negatives,j)
     
     processed = process_images(p2)
--   p.scripts.before_process(p)
--   res = process_images_inner(p)
     pp.image = processed.image[0] 

文章出处登录后可见!

已经登录?立即刷新

共计人评分,平均

到目前为止还没有投票!成为第一位评论此文章。

(0)
乘风的头像乘风管理团队
上一篇 2023年12月20日
下一篇 2023年12月20日

相关推荐