diff --git a/clip.hpp b/clip.hpp index 46e52ada4..cfc4cb38c 100644 --- a/clip.hpp +++ b/clip.hpp @@ -545,9 +545,12 @@ class CLIPEmbeddings : public GGMLBlock { int64_t vocab_size; int64_t num_positions; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, wtype, embed_dim, vocab_size); - params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32; + enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32; + + params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size); + params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions); } public: @@ -591,11 +594,14 @@ class CLIPVisionEmbeddings : public GGMLBlock { int64_t image_size; int64_t num_patches; int64_t num_positions; + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type patch_wtype = GGML_TYPE_F16; // tensor_types.find(prefix + "patch_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "patch_embedding.weight"] : GGML_TYPE_F16; + enum ggml_type class_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "class_embedding") != tensor_types.end() ? tensor_types[prefix + "class_embedding"] : GGML_TYPE_F32; + enum ggml_type position_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, patch_size, patch_size, num_channels, embed_dim); - params["class_embedding"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, embed_dim); - params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions); + params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, patch_wtype, patch_size, patch_size, num_channels, embed_dim); + params["class_embedding"] = ggml_new_tensor_1d(ctx, class_wtype, embed_dim); + params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions); } public: @@ -651,9 +657,10 @@ enum CLIPVersion { class CLIPTextModel : public GGMLBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (version == OPEN_CLIP_VIT_BIGG_14) { - params["text_projection"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, projection_dim, hidden_size); + enum ggml_type wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "text_projection") != tensor_types.end() ? tensor_types[prefix + "text_projection"] : GGML_TYPE_F32; + params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size); } } @@ -798,9 +805,9 @@ class CLIPProjection : public UnaryBlock { int64_t out_features; bool transpose_weight; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = tensor_types.find(prefix + "weight") != tensor_types.end() ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; if (transpose_weight) { - LOG_ERROR("transpose_weight"); params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features); } else { params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); @@ -861,12 +868,13 @@ struct CLIPTextModelRunner : public GGMLRunner { CLIPTextModel model; CLIPTextModelRunner(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, CLIPVersion version = OPENAI_CLIP_VIT_L_14, int clip_skip_value = 1, bool with_final_ln = true) - : GGMLRunner(backend, wtype), model(version, clip_skip_value, with_final_ln) { - model.init(params_ctx, wtype); + : GGMLRunner(backend), model(version, clip_skip_value, with_final_ln) { + model.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -908,13 +916,13 @@ struct CLIPTextModelRunner : public GGMLRunner { struct ggml_tensor* embeddings = NULL; if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) { - auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, - wtype, - model.hidden_size, - num_custom_embeddings); + auto token_embed_weight = model.get_token_embed_weight(); + auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, + token_embed_weight->type, + model.hidden_size, + num_custom_embeddings); set_backend_tensor_data(custom_embeddings, custom_embeddings_data); - auto token_embed_weight = model.get_token_embed_weight(); // concatenate custom embeddings embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1); } diff --git a/common.hpp b/common.hpp index 1ca6b8d0d..da8353b36 100644 --- a/common.hpp +++ b/common.hpp @@ -182,9 +182,11 @@ class GEGLU : public GGMLBlock { int64_t dim_in; int64_t dim_out; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32; + enum ggml_type bias_wtype = GGML_TYPE_F32;//(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32; params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); - params["proj.bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim_out * 2); + params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2); } public: @@ -438,8 +440,10 @@ class SpatialTransformer : public GGMLBlock { class AlphaBlender : public GGMLBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix + enum ggml_type wtype = GGML_TYPE_F32;//(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; + params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); } float get_alpha() { diff --git a/conditioner.hpp b/conditioner.hpp index 9a6300997..5b3f20dd1 100644 --- a/conditioner.hpp +++ b/conditioner.hpp @@ -46,7 +46,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { SDVersion version = VERSION_SD1; PMVersion pm_version = PM_VERSION_1; CLIPTokenizer tokenizer; - ggml_type wtype; std::shared_ptr text_model; std::shared_ptr text_model2; @@ -57,12 +56,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { std::vector readed_embeddings; FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, const std::string& embd_dir, SDVersion version = VERSION_SD1, PMVersion pv = PM_VERSION_1, int clip_skip = -1) - : version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir), wtype(wtype) { + : version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir) { if (clip_skip <= 0) { clip_skip = 1; if (version == VERSION_SD2 || version == VERSION_SDXL) { @@ -70,12 +69,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { } } if (version == VERSION_SD1) { - text_model = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip); } else if (version == VERSION_SD2) { - text_model = std::make_shared(backend, wtype, OPEN_CLIP_VIT_H_14, clip_skip); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, clip_skip); } else if (version == VERSION_SDXL) { - text_model = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); - text_model2 = std::make_shared(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false); + text_model2 = std::make_shared(backend, tensor_types, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false); } } @@ -138,14 +137,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size); return false; } - embd = ggml_new_tensor_2d(embd_ctx, wtype, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1); + embd = ggml_new_tensor_2d(embd_ctx, tensor_storage.type, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1); *dst_tensor = embd; return true; }; model_loader.load_tensors(on_load, NULL); readed_embeddings.push_back(embd_name); token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd)); - memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(wtype)), + memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(embd->type)), embd->data, ggml_nbytes(embd)); for (int i = 0; i < embd->ne[1]; i++) { @@ -590,9 +589,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { struct FrozenCLIPVisionEmbedder : public GGMLRunner { CLIPVisionModelProjection vision_model; - FrozenCLIPVisionEmbedder(ggml_backend_t backend, ggml_type wtype) - : vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend, wtype) { - vision_model.init(params_ctx, wtype); + FrozenCLIPVisionEmbedder(ggml_backend_t backend, std::map& tensor_types) + : vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend) { + vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer"); } std::string get_desc() { @@ -627,7 +626,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner { }; struct SD3CLIPEmbedder : public Conditioner { - ggml_type wtype; CLIPTokenizer clip_l_tokenizer; CLIPTokenizer clip_g_tokenizer; T5UniGramTokenizer t5_tokenizer; @@ -636,15 +634,15 @@ struct SD3CLIPEmbedder : public Conditioner { std::shared_ptr t5; SD3CLIPEmbedder(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, int clip_skip = -1) - : wtype(wtype), clip_g_tokenizer(0) { + : clip_g_tokenizer(0) { if (clip_skip <= 0) { clip_skip = 2; } - clip_l = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); - clip_g = std::make_shared(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); - t5 = std::make_shared(backend, wtype); + clip_l = std::make_shared(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false); + clip_g = std::make_shared(backend, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false); + t5 = std::make_shared(backend, tensor_types, "text_encoders.t5xxl.transformer"); } void set_clip_skip(int clip_skip) { @@ -974,21 +972,19 @@ struct SD3CLIPEmbedder : public Conditioner { }; struct FluxCLIPEmbedder : public Conditioner { - ggml_type wtype; CLIPTokenizer clip_l_tokenizer; T5UniGramTokenizer t5_tokenizer; std::shared_ptr clip_l; std::shared_ptr t5; FluxCLIPEmbedder(ggml_backend_t backend, - ggml_type wtype, - int clip_skip = -1) - : wtype(wtype) { + std::map& tensor_types, + int clip_skip = -1) { if (clip_skip <= 0) { clip_skip = 2; } - clip_l = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, true); - t5 = std::make_shared(backend, wtype); + clip_l = std::make_shared(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, true); + t5 = std::make_shared(backend, tensor_types, "text_encoders.t5xxl.transformer"); } void set_clip_skip(int clip_skip) { diff --git a/control.hpp b/control.hpp index 41f31acb7..0cf081cea 100644 --- a/control.hpp +++ b/control.hpp @@ -317,10 +317,10 @@ struct ControlNet : public GGMLRunner { bool guided_hint_cached = false; ControlNet(ggml_backend_t backend, - ggml_type wtype, - SDVersion version = VERSION_SD1) - : GGMLRunner(backend, wtype), control_net(version) { - control_net.init(params_ctx, wtype); + std::map& tensor_types, + SDVersion version = VERSION_SD1) + : GGMLRunner(backend), control_net(version) { + control_net.init(params_ctx, tensor_types, ""); } ~ControlNet() { diff --git a/diffusion_model.hpp b/diffusion_model.hpp index eb433b614..710aecf68 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -31,10 +31,10 @@ struct UNetModel : public DiffusionModel { UNetModelRunner unet; UNetModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_SD1, bool flash_attn = false) - : unet(backend, wtype, version, flash_attn) { + : unet(backend, tensor_types, "model.diffusion_model", version, flash_attn) { } void alloc_params_buffer() { @@ -83,9 +83,9 @@ struct MMDiTModel : public DiffusionModel { MMDiTRunner mmdit; MMDiTModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_SD3_2B) - : mmdit(backend, wtype, version) { + : mmdit(backend, tensor_types, "model.diffusion_model", version) { } void alloc_params_buffer() { @@ -133,10 +133,10 @@ struct FluxModel : public DiffusionModel { Flux::FluxRunner flux; FluxModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) - : flux(backend, wtype, version, flash_attn) { + : flux(backend, tensor_types, "model.diffusion_model", version, flash_attn) { } void alloc_params_buffer() { diff --git a/esrgan.hpp b/esrgan.hpp index 33fcf09a4..97931f809 100644 --- a/esrgan.hpp +++ b/esrgan.hpp @@ -142,12 +142,12 @@ struct ESRGAN : public GGMLRunner { int scale = 4; int tile_size = 128; // avoid cuda OOM for 4gb VRAM - ESRGAN(ggml_backend_t backend, - ggml_type wtype) - : GGMLRunner(backend, wtype) { - rrdb_net.init(params_ctx, wtype); + ESRGAN(ggml_backend_t backend,std::map& tensor_types) + : GGMLRunner(backend) { + rrdb_net.init(params_ctx, tensor_types, ""); } + std::string get_desc() { return "esrgan"; } diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 59b325504..93e6d3e43 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -1010,8 +1010,7 @@ int main(int argc, const char* argv[]) { int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) { upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(), - params.n_threads, - params.wtype); + params.n_threads); if (upscaler_ctx == NULL) { printf("new_upscaler_ctx failed\n"); diff --git a/flux.hpp b/flux.hpp index b2d0f57c2..75411dfeb 100644 --- a/flux.hpp +++ b/flux.hpp @@ -35,8 +35,9 @@ namespace Flux { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["scale"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "scale") != tensor_types.end()) ? tensor_types[prefix + "scale"] : GGML_TYPE_F32; + params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -823,16 +824,19 @@ namespace Flux { }; struct FluxRunner : public GGMLRunner { + static std::map empty_tensor_types; + public: FluxParams flux_params; Flux flux; std::vector pe_vec; // for cache FluxRunner(ggml_backend_t backend, - ggml_type wtype, - SDVersion version = VERSION_FLUX_DEV, + std::map& tensor_types = empty_tensor_types, + const std::string prefix = "", + SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) - : GGMLRunner(backend, wtype) { + : GGMLRunner(backend) { flux_params.flash_attn = flash_attn; if (version == VERSION_FLUX_SCHNELL) { flux_params.guidance_embed = false; @@ -841,7 +845,7 @@ namespace Flux { flux_params.depth = 8; } flux = Flux(flux_params); - flux.init(params_ctx, wtype); + flux.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -959,7 +963,7 @@ namespace Flux { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_Q8_0; - std::shared_ptr flux = std::shared_ptr(new FluxRunner(backend, model_data_type)); + std::shared_ptr flux = std::shared_ptr(new FluxRunner(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/ggml_extend.hpp b/ggml_extend.hpp index e944deb69..8c84ba3f3 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -25,6 +25,8 @@ #include "ggml-cpu.h" #include "ggml.h" +#include "model.h" + #ifdef SD_USE_CUBLAS #include "ggml-cuda.h" #endif @@ -964,7 +966,6 @@ struct GGMLRunner { std::map backend_tensor_data_map; - ggml_type wtype = GGML_TYPE_F32; ggml_backend_t backend = NULL; void alloc_params_ctx() { @@ -1040,8 +1041,8 @@ struct GGMLRunner { public: virtual std::string get_desc() = 0; - GGMLRunner(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32) - : backend(backend), wtype(wtype) { + GGMLRunner(ggml_backend_t backend) + : backend(backend) { alloc_params_ctx(); } @@ -1170,20 +1171,22 @@ class GGMLBlock { GGMLBlockMap blocks; ParameterMap params; - void init_blocks(struct ggml_context* ctx, ggml_type wtype) { + void init_blocks(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { for (auto& pair : blocks) { auto& block = pair.second; - - block->init(ctx, wtype); + block->init(ctx, tensor_types, prefix + pair.first); } } - virtual void init_params(struct ggml_context* ctx, ggml_type wtype) {} + virtual void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") {} public: - void init(struct ggml_context* ctx, ggml_type wtype) { - init_blocks(ctx, wtype); - init_params(ctx, wtype); + void init(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + if (prefix.size() > 0) { + prefix = prefix + "."; + } + init_blocks(ctx, tensor_types, prefix); + init_params(ctx, tensor_types, prefix); } size_t get_params_num() { @@ -1239,13 +1242,15 @@ class Linear : public UnaryBlock { bool bias; bool force_f32; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; if (in_features % ggml_blck_size(wtype) != 0 || force_f32) { wtype = GGML_TYPE_F32; } params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_features); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_features); } } @@ -1273,9 +1278,9 @@ class Embedding : public UnaryBlock { protected: int64_t embedding_dim; int64_t num_embeddings; - - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings); } public: @@ -1313,10 +1318,12 @@ class Conv2d : public UnaryBlock { std::pair dilation; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kernel_size.second, kernel_size.first, in_channels, out_channels); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16; + params["weight"] = ggml_new_tensor_4d(ctx, wtype, kernel_size.second, kernel_size.first, in_channels, out_channels); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); + enum ggml_type wtype = GGML_TYPE_F32; // (tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); } } @@ -1356,10 +1363,12 @@ class Conv3dnx1x1 : public UnaryBlock { int64_t dilation; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 1, kernel_size, in_channels, out_channels); // 5d => 4d + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16; + params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); } } @@ -1398,11 +1407,13 @@ class LayerNorm : public UnaryBlock { bool elementwise_affine; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (elementwise_affine) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape); } } } @@ -1438,10 +1449,12 @@ class GroupNorm : public GGMLBlock { float eps; bool affine; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (affine) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, num_channels); + params["bias"] = ggml_new_tensor_1d(ctx, bias_wtype, num_channels); } } diff --git a/lora.hpp b/lora.hpp index c44db7698..43daba48f 100644 --- a/lora.hpp +++ b/lora.hpp @@ -16,10 +16,9 @@ struct LoraModel : public GGMLRunner { ggml_tensor* zero_index = NULL; LoraModel(ggml_backend_t backend, - ggml_type wtype, const std::string& file_path = "", - const std::string& prefix = "") - : file_path(file_path), GGMLRunner(backend, wtype) { + const std::string prefix = "") + : file_path(file_path), GGMLRunner(backend) { if (!model_loader.init_from_file(file_path, prefix)) { load_failed = true; } diff --git a/mmdit.hpp b/mmdit.hpp index 35810bad9..4a126c7d6 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -147,8 +147,9 @@ class RMSNorm : public UnaryBlock { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -652,8 +653,9 @@ struct MMDiT : public GGMLBlock { int64_t hidden_size; std::string qk_norm; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["pos_embed"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hidden_size, num_patchs, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "pos_embed") != tensor_types.end()) ? tensor_types[prefix + "pos_embed"] : GGML_TYPE_F32; + params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1); } public: @@ -870,15 +872,17 @@ struct MMDiT : public GGMLBlock { return x; } }; - struct MMDiTRunner : public GGMLRunner { MMDiT mmdit; + static std::map empty_tensor_types; + MMDiTRunner(ggml_backend_t backend, - ggml_type wtype, - SDVersion version = VERSION_SD3_2B) - : GGMLRunner(backend, wtype), mmdit(version) { - mmdit.init(params_ctx, wtype); + std::map& tensor_types = empty_tensor_types, + const std::string prefix = "", + SDVersion version = VERSION_SD3_2B) + : GGMLRunner(backend), mmdit(version) { + mmdit.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -975,7 +979,7 @@ struct MMDiTRunner : public GGMLRunner { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_F16; - std::shared_ptr mmdit = std::shared_ptr(new MMDiTRunner(backend, model_data_type)); + std::shared_ptr mmdit = std::shared_ptr(new MMDiTRunner(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/model.cpp b/model.cpp index dba8187da..3caac933c 100644 --- a/model.cpp +++ b/model.cpp @@ -927,6 +927,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes()); tensor_storages.push_back(tensor_storage); + tensor_storages_types[tensor_storage.name] = tensor_storage.type; } gguf_free(ctx_gguf_); @@ -1071,6 +1072,7 @@ bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const } tensor_storages.push_back(tensor_storage); + tensor_storages_types[tensor_storage.name] = tensor_storage.type; // LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str()); } @@ -1296,7 +1298,7 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer, zip_t* zip, std::string dir, size_t file_index, - const std::string& prefix) { + const std::string prefix) { uint8_t* buffer_end = buffer + buffer_size; if (buffer[0] == 0x80) { // proto if (buffer[1] != 2) { @@ -1401,6 +1403,8 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer, // printf(" ZIP got tensor %s \n ", reader.tensor_storage.name.c_str()); reader.tensor_storage.name = prefix + reader.tensor_storage.name; tensor_storages.push_back(reader.tensor_storage); + tensor_storages_types[reader.tensor_storage.name] = reader.tensor_storage.type; + // LOG_DEBUG("%s", reader.tensor_storage.name.c_str()); // reset reader = PickleTensorReader(); @@ -1603,6 +1607,21 @@ ggml_type ModelLoader::get_vae_wtype() { return GGML_TYPE_COUNT; } +void ModelLoader::set_wtype_override(ggml_type wtype, std::string prefix) { + for (auto& pair : tensor_storages_types) { + if (prefix.size() < 1 || pair.first.substr(0, prefix.size()) == prefix) { + for (auto& tensor_storage : tensor_storages) { + if (tensor_storage.name == pair.first) { + if (tensor_should_be_converted(tensor_storage, wtype)) { + pair.second = wtype; + } + break; + } + } + } + } +} + std::string ModelLoader::load_merges() { std::string merges_utf8_str(reinterpret_cast(merges_utf8_c_str), sizeof(merges_utf8_c_str)); return merges_utf8_str; diff --git a/model.h b/model.h index b7e3b3a2e..9b2f86df3 100644 --- a/model.h +++ b/model.h @@ -170,7 +170,7 @@ class ModelLoader { zip_t* zip, std::string dir, size_t file_index, - const std::string& prefix); + const std::string prefix); bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = ""); @@ -178,12 +178,15 @@ class ModelLoader { bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = ""); public: + std::map tensor_storages_types; + bool init_from_file(const std::string& file_path, const std::string& prefix = ""); SDVersion get_sd_version(); ggml_type get_sd_wtype(); ggml_type get_conditioner_wtype(); ggml_type get_diffusion_model_wtype(); ggml_type get_vae_wtype(); + void set_wtype_override(ggml_type wtype, std::string prefix = ""); bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend); bool load_tensors(std::map& tensors, ggml_backend_t backend, diff --git a/pmid.hpp b/pmid.hpp index defb4f05a..ea9f02eb6 100644 --- a/pmid.hpp +++ b/pmid.hpp @@ -623,15 +623,15 @@ struct PhotoMakerIDEncoder : public GGMLRunner { std::vector zeros_right; public: - PhotoMakerIDEncoder(ggml_backend_t backend, ggml_type wtype, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f) - : GGMLRunner(backend, wtype), + PhotoMakerIDEncoder(ggml_backend_t backend, std::map& tensor_types, const std::string prefix, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f) + : GGMLRunner(backend), version(version), pm_version(pm_v), style_strength(sty) { if (pm_version == PM_VERSION_1) { - id_encoder.init(params_ctx, wtype); + id_encoder.init(params_ctx, tensor_types, prefix); } else if (pm_version == PM_VERSION_2) { - id_encoder2.init(params_ctx, wtype); + id_encoder2.init(params_ctx, tensor_types, prefix); } } @@ -780,11 +780,10 @@ struct PhotoMakerIDEmbed : public GGMLRunner { bool applied = false; PhotoMakerIDEmbed(ggml_backend_t backend, - ggml_type wtype, ModelLoader* ml, const std::string& file_path = "", const std::string& prefix = "") - : file_path(file_path), GGMLRunner(backend, wtype), model_loader(ml) { + : file_path(file_path), GGMLRunner(backend), model_loader(ml) { if (!model_loader->init_from_file(file_path, prefix)) { load_failed = true; } diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index a276bff5c..55992b70a 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -264,16 +264,18 @@ class StableDiffusionGGML { conditioner_wtype = wtype; diffusion_model_wtype = wtype; vae_wtype = wtype; + model_loader.set_wtype_override(wtype); } if (version == VERSION_SDXL) { vae_wtype = GGML_TYPE_F32; + model_loader.set_wtype_override(GGML_TYPE_F32, "vae."); } - LOG_INFO("Weight type: %s", ggml_type_name(model_wtype)); - LOG_INFO("Conditioner weight type: %s", ggml_type_name(conditioner_wtype)); - LOG_INFO("Diffusion model weight type: %s", ggml_type_name(diffusion_model_wtype)); - LOG_INFO("VAE weight type: %s", ggml_type_name(vae_wtype)); + LOG_INFO("Weight type: %s", model_wtype != SD_TYPE_COUNT ? ggml_type_name(model_wtype) : "??"); + LOG_INFO("Conditioner weight type: %s", conditioner_wtype != SD_TYPE_COUNT ? ggml_type_name(conditioner_wtype) : "??"); + LOG_INFO("Diffusion model weight type: %s", diffusion_model_wtype != SD_TYPE_COUNT ? ggml_type_name(diffusion_model_wtype) : "??"); + LOG_INFO("VAE weight type: %s", vae_wtype != SD_TYPE_COUNT ? ggml_type_name(vae_wtype) : "??"); LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor)); @@ -294,15 +296,15 @@ class StableDiffusionGGML { } if (version == VERSION_SVD) { - clip_vision = std::make_shared(backend, conditioner_wtype); + clip_vision = std::make_shared(backend, model_loader.tensor_storages_types); clip_vision->alloc_params_buffer(); clip_vision->get_param_tensors(tensors); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version); diffusion_model->alloc_params_buffer(); diffusion_model->get_param_tensors(tensors); - first_stage_model = std::make_shared(backend, vae_wtype, vae_decode_only, true, version); + first_stage_model = std::make_shared(backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, true, version); LOG_DEBUG("vae_decode_only %d", vae_decode_only); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); @@ -327,19 +329,20 @@ class StableDiffusionGGML { if (diffusion_flash_attn) { LOG_WARN("flash attention in this diffusion model is currently unsupported!"); } - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version); } else if (sd_version_is_flux(version)) { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version, diffusion_flash_attn); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); } else { if (id_embeddings_path.find("v2") != std::string::npos) { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype, embeddings_path, version, PM_VERSION_2); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version, PM_VERSION_2); } else { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype, embeddings_path, version); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version); } - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version, diffusion_flash_attn); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); } + cond_stage_model->alloc_params_buffer(); cond_stage_model->get_param_tensors(tensors); @@ -353,11 +356,11 @@ class StableDiffusionGGML { } else { vae_backend = backend; } - first_stage_model = std::make_shared(vae_backend, vae_wtype, vae_decode_only, false, version); + first_stage_model = std::make_shared(vae_backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, false, version); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); } else { - tae_first_stage = std::make_shared(backend, vae_wtype, vae_decode_only); + tae_first_stage = std::make_shared(backend, model_loader.tensor_storages_types, "decoder.layers", vae_decode_only); } // first_stage_model->get_param_tensors(tensors, "first_stage_model."); @@ -369,17 +372,17 @@ class StableDiffusionGGML { } else { controlnet_backend = backend; } - control_net = std::make_shared(controlnet_backend, diffusion_model_wtype, version); + control_net = std::make_shared(controlnet_backend, model_loader.tensor_storages_types, version); } if (id_embeddings_path.find("v2") != std::string::npos) { - pmid_model = std::make_shared(backend, model_wtype, version, PM_VERSION_2); + pmid_model = std::make_shared(backend, model_loader.tensor_storages_types, "pmid", version, PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend, model_wtype, version); + pmid_model = std::make_shared(backend, model_loader.tensor_storages_types, "pmid", version); } if (id_embeddings_path.size() > 0) { - pmid_lora = std::make_shared(backend, model_wtype, id_embeddings_path, ""); + pmid_lora = std::make_shared(backend, id_embeddings_path, ""); if (!pmid_lora->load_from_file(true)) { LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str()); return false; @@ -633,7 +636,7 @@ class StableDiffusionGGML { LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str()); return; } - LoraModel lora(backend, model_wtype, file_path); + LoraModel lora(backend, file_path); if (!lora.load_from_file()) { LOG_WARN("load lora tensors from %s failed", file_path.c_str()); return; diff --git a/stable-diffusion.h b/stable-diffusion.h index 1fa328570..c67bc8a32 100644 --- a/stable-diffusion.h +++ b/stable-diffusion.h @@ -215,8 +215,7 @@ SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx, typedef struct upscaler_ctx_t upscaler_ctx_t; SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, - int n_threads, - enum sd_type_t wtype); + int n_threads); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor); diff --git a/t5.hpp b/t5.hpp index 79109e34b..9cf3279f8 100644 --- a/t5.hpp +++ b/t5.hpp @@ -357,7 +357,7 @@ class T5UniGramTokenizer { BuildTrie(&pieces); } - ~T5UniGramTokenizer(){}; + ~T5UniGramTokenizer() {}; std::string Normalize(const std::string& input) const { // Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29 @@ -441,8 +441,9 @@ class T5LayerNorm : public UnaryBlock { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -717,14 +718,15 @@ struct T5Runner : public GGMLRunner { std::vector relative_position_bucket_vec; T5Runner(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, int64_t num_layers = 24, int64_t model_dim = 4096, int64_t ff_dim = 10240, int64_t num_heads = 64, int64_t vocab_size = 32128) - : GGMLRunner(backend, wtype), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) { - model.init(params_ctx, wtype); + : GGMLRunner(backend), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) { + model.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -854,14 +856,17 @@ struct T5Embedder { T5UniGramTokenizer tokenizer; T5Runner model; + static std::map empty_tensor_types; + T5Embedder(ggml_backend_t backend, - ggml_type wtype, - int64_t num_layers = 24, - int64_t model_dim = 4096, - int64_t ff_dim = 10240, - int64_t num_heads = 64, - int64_t vocab_size = 32128) - : model(backend, wtype, num_layers, model_dim, ff_dim, num_heads, vocab_size) { + std::map& tensor_types = empty_tensor_types, + const std::string prefix = "", + int64_t num_layers = 24, + int64_t model_dim = 4096, + int64_t ff_dim = 10240, + int64_t num_heads = 64, + int64_t vocab_size = 32128) + : model(backend, tensor_types, prefix, num_layers, model_dim, ff_dim, num_heads, vocab_size) { } void get_param_tensors(std::map& tensors, const std::string prefix) { @@ -951,7 +956,7 @@ struct T5Embedder { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_F32; - std::shared_ptr t5 = std::shared_ptr(new T5Embedder(backend, model_data_type)); + std::shared_ptr t5 = std::shared_ptr(new T5Embedder(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/tae.hpp b/tae.hpp index 0e03b884e..ac061115c 100644 --- a/tae.hpp +++ b/tae.hpp @@ -188,14 +188,15 @@ struct TinyAutoEncoder : public GGMLRunner { bool decode_only = false; TinyAutoEncoder(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, bool decoder_only = true) : decode_only(decoder_only), taesd(decode_only), - GGMLRunner(backend, wtype) { - taesd.init(params_ctx, wtype); + GGMLRunner(backend) { + taesd.init(params_ctx, tensor_types, prefix); } - + std::string get_desc() { return "taesd"; } diff --git a/unet.hpp b/unet.hpp index 79f702c4d..2a7adb3d2 100644 --- a/unet.hpp +++ b/unet.hpp @@ -532,11 +532,12 @@ struct UNetModelRunner : public GGMLRunner { UnetModelBlock unet; UNetModelRunner(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, SDVersion version = VERSION_SD1, bool flash_attn = false) - : GGMLRunner(backend, wtype), unet(version, flash_attn) { - unet.init(params_ctx, wtype); + : GGMLRunner(backend), unet(version, flash_attn) { + unet.init(params_ctx, tensor_types, prefix); } std::string get_desc() { diff --git a/upscaler.cpp b/upscaler.cpp index 096352993..1cf34c1a3 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -32,13 +32,17 @@ struct UpscalerGGML { LOG_DEBUG("Using SYCL backend"); backend = ggml_backend_sycl_init(0); #endif - + ModelLoader model_loader; + if (!model_loader.init_from_file(esrgan_path)) { + LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); + } + model_loader.set_wtype_override(model_data_type); if (!backend) { LOG_DEBUG("Using CPU backend"); backend = ggml_backend_cpu_init(); } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); - esrgan_upscaler = std::make_shared(backend, model_data_type); + esrgan_upscaler = std::make_shared(backend, model_loader.tensor_storages_types); if (!esrgan_upscaler->load_from_file(esrgan_path)) { return false; } @@ -96,8 +100,7 @@ struct upscaler_ctx_t { }; upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, - int n_threads, - enum sd_type_t wtype) { + int n_threads) { upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); if (upscaler_ctx == NULL) { return NULL; diff --git a/vae.hpp b/vae.hpp index 2985aadd3..4add881f6 100644 --- a/vae.hpp +++ b/vae.hpp @@ -163,8 +163,9 @@ class AE3DConv : public Conv2d { class VideoResnetBlock : public ResnetBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; + params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); } float get_alpha() { @@ -524,12 +525,13 @@ struct AutoEncoderKL : public GGMLRunner { AutoencodingEngine ae; AutoEncoderKL(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, bool decode_only = false, bool use_video_decoder = false, SDVersion version = VERSION_SD1) - : decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend, wtype) { - ae.init(params_ctx, wtype); + : decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend) { + ae.init(params_ctx, tensor_types, prefix); } std::string get_desc() {