メモです。
reader = gguf.GGUFReader(path)
オリジナルシェイプ
def get_orig_shape(reader, tensor_name):
field_key = f"comfy.gguf.orig_shape.{tensor_name}"
field = reader.get_field(field_key)
if field is None:
return None
# Has original shape metadata, so we try to decode it.
if len(field.types) != 2 or field.types[0] != gguf.GGUFValueType.ARRAY or field.types[1] != gguf.GGUFValueType.INT32:
raise TypeError(f"Bad original shape metadata for {field_key}: Expected ARRAY of INT32, got {field.types}")
return torch.Size(tuple(int(field.parts[part_idx][0]) for part_idx in field.data))
シェイプはintにしないとエラーになる。
shape = torch.Size(tuple(int(v) for v in reversed(tensor.shape)))
F16は、形を整える。
if tensor.tensor_type in {gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}:
torch_tensor = torch_tensor.view(*shape)
sd-webui-forge-classic-neo\modules_forge\packages\huggingface_guess\detection.py
model_channels GGUFで変形してると合わない。のでモデルが分かりませんエラーになる
sd-webui-forge-classic-neo\modules_forge\packages\huggingface_guess\__init__.py
モデルの識別
def guess(state_dict):
unet_key_prefix = unet_prefix_from_state_dict(state_dict)
print("unet_key_prefix",unet_key_prefix)
result = model_config_from_unet(
state_dict, unet_key_prefix, use_base_if_no_match=False
)
モデルの読み込み
sd-webui-forge-classic-neo\backend\utils.py
def load_torch_file(ckpt: str, safe_load=False, device=None, *, return_metadata=False):
elif ckpt.lower().endswith(".gguf"):
reader = gguf.GGUFReader(ckpt)
sd = {}
for tensor in reader.tensors:
sd[str(tensor.name)] = ParameterGGUF(tensor)
モデルのロード
sd-webui-forge-classic-neo\backend\loader.py
入れ物を作ってデータを転送する。
def load_huggingface_component(guess, component_name, lib_name, cls_name, repo_path, state_dict):
入れ物を作る
if cls_name == "UNet2DConditionModel":
model_loader = lambda c: IntegratedUNet2DConditionModel.from_config(c)
モデルにstate_dictを転送 ここでサイズが合わなくてエラーになる。
load_state_dict(model, state_dict)
sd-webui-forge-classic-neo\backend\state_dict.py
モデルにデータを送る
def load_state_dict(model, sd, ignore_errors=[], log_name=None, ignore_start=None):
missing, unexpected = model.load_state_dict(sd, strict=False)
missing = [x for x in missing if x not in ignore_errors]
unexpected = [x for x in unexpected if x not in ignore_errors]
モデルをダウンロードする。
def split_state_dict(sd, additional_state_dicts: list = None):
sd, metadata = load_torch_file(sd, return_metadata=True)
sd = preprocess_state_dict(sd)
guess = huggingface_guess.guess(sd)
sd-webui-forge-classic-neo\backend\nn\unet.py
unetモデルの入れ物をつくる
class IntegratedUNet2DConditionModel(nn.Module, ConfigMixin):
inner_dimをintにした方が良い
class SpatialTransformer(nn.Module):
def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None, disable_self_attn=False, use_linear=False, use_checkpoint=True):
super().__init__()
if exists(context_dim) and not isinstance(context_dim, list):
context_dim = [context_dim] * depth
self.in_channels = in_channels
inner_dim = int(n_heads * d_head)
sd-webui-forge-classic-neo\backend\operations.py
sd-webui-forge-classic-neo\backend\operations_gguf.py
延滞読込用 Lazy Loading
class ParameterGGUF(torch.nn.Parameter):
self.real_shape = torch.Size(reversed(list(tensor.shape)))
↑ここをintにした方が良い
self.real_shape = torch.Size(tuple(int(v) for v in reversed(tensor.shape)))