|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +import copy |
| 8 | +from typing import Any, Mapping |
| 9 | + |
| 10 | +import torch |
| 11 | +import torch.nn as nn |
| 12 | +from executorch.examples.models.llama2.llama_transformer import ( |
| 13 | + ModelArgs as LlamaModelArgs, |
| 14 | + Transformer as LlamaTransformer, |
| 15 | +) |
| 16 | +from executorch.extension.gguf_util.load_gguf import GGUFModelArgs, GGUFWeights |
| 17 | + |
| 18 | + |
| 19 | +def _create_pt_model( |
| 20 | + gguf_model_args: GGUFModelArgs, |
| 21 | +) -> nn.Module: |
| 22 | + llama_model_args = LlamaModelArgs( |
| 23 | + dim=gguf_model_args.embedding_length, |
| 24 | + n_layers=gguf_model_args.block_count, |
| 25 | + n_heads=gguf_model_args.attention.head_count, |
| 26 | + n_kv_heads=gguf_model_args.attention.head_count_kv, |
| 27 | + vocab_size=gguf_model_args.vocab_size, |
| 28 | + norm_eps=gguf_model_args.attention.layer_norm_rms_epsilon, |
| 29 | + hidden_dim=gguf_model_args.feed_forward_length, |
| 30 | + rope_freq_base=gguf_model_args.rope.freq_base, |
| 31 | + ) |
| 32 | + pt_model = LlamaTransformer(llama_model_args) |
| 33 | + pt_model.eval() |
| 34 | + return pt_model |
| 35 | + |
| 36 | + |
| 37 | +_name_replacements = [ |
| 38 | + ("blk", "layers"), |
| 39 | + ("token_embd", "tok_embeddings"), |
| 40 | + ("attn_q", "attention.wq"), |
| 41 | + ("attn_k", "attention.wk"), |
| 42 | + ("attn_v", "attention.wv"), |
| 43 | + ("attn_output", "attention.wo"), |
| 44 | + ("attn_norm", "attention_norm"), |
| 45 | + ("output_norm.weight", "norm.weight"), |
| 46 | + ("ffn_down", "feed_forward.w2"), |
| 47 | + ("ffn_gate", "feed_forward.w1"), |
| 48 | + ("ffn_up", "feed_forward.w3"), |
| 49 | +] |
| 50 | + |
| 51 | + |
| 52 | +def _convert_gguf_tensor_name_to_llama_nn(gguf_name: str) -> str: |
| 53 | + result = copy.deepcopy(gguf_name) |
| 54 | + for gguf_string, replacement in _name_replacements: |
| 55 | + result = result.replace(gguf_string, replacement) |
| 56 | + return result |
| 57 | + |
| 58 | + |
| 59 | +def _convert_to_state_dict(gguf_weights: GGUFWeights) -> Mapping[str, Any]: |
| 60 | + |
| 61 | + state_dict = {} |
| 62 | + for tensor in gguf_weights.tensors: |
| 63 | + gguf_tensor_name = tensor.name |
| 64 | + nn_tensor_name = _convert_gguf_tensor_name_to_llama_nn(gguf_tensor_name) |
| 65 | + new_tensor = tensor.data.reshape(tensor.shape).transpose() |
| 66 | + state_dict[nn_tensor_name] = torch.from_numpy(new_tensor) |
| 67 | + |
| 68 | + return state_dict |
| 69 | + |
| 70 | + |
| 71 | +def _load_weights_into_nn( |
| 72 | + pt_model: nn.Module, gguf_model_args: GGUFModelArgs, gguf_weights: GGUFWeights |
| 73 | +): |
| 74 | + |
| 75 | + state_dict: Mapping[str, Any] = _convert_to_state_dict(gguf_weights) |
| 76 | + |
| 77 | + # We need to fake initialize the mask, to match with the llama_transformer.py |
| 78 | + for id in range(gguf_model_args.block_count): |
| 79 | + mask_name = f"layers.{id}.attention.mask" |
| 80 | + mask = torch.full( |
| 81 | + (1, 1, pt_model.params.max_seq_len, pt_model.params.max_seq_len), |
| 82 | + float("-inf"), |
| 83 | + ) |
| 84 | + mask = torch.triu(mask, diagonal=1) |
| 85 | + state_dict[mask_name] = mask |
| 86 | + |
| 87 | + pt_model.load_state_dict(state_dict) |
| 88 | + return |
| 89 | + |
| 90 | + |
| 91 | +def _create_pte_program(pt_model: nn.Module) -> bytes: |
| 92 | + # TODO (mnachin): Export |
| 93 | + return |
| 94 | + |
| 95 | + |
| 96 | +def convert_to_pte(gguf_model_args: GGUFModelArgs, gguf_weights: GGUFWeights) -> bytes: |
| 97 | + """Convert a GGUF model into an ExecuTorch program. |
| 98 | +
|
| 99 | + Args: |
| 100 | + model_args: The arguments for the GGUF model. |
| 101 | + weights: The weights of the GGUF model. |
| 102 | + """ |
| 103 | + |
| 104 | + assert ( |
| 105 | + gguf_model_args.arch == "llama" |
| 106 | + ), "Only LLaMa models are supported by this converter." |
| 107 | + |
| 108 | + # Step 1: Create the PyTorch model |
| 109 | + print("Create the PyTorch model") |
| 110 | + pt_model = _create_pt_model( |
| 111 | + gguf_model_args, |
| 112 | + ) |
| 113 | + |
| 114 | + # Step 2: Load the weights into the PyTorch model |
| 115 | + print("Load the weights into the PyTorch model") |
| 116 | + _load_weights_into_nn(pt_model, gguf_model_args, gguf_weights) |
| 117 | + |
| 118 | + # Step 3: Export to ExecuTorch |
| 119 | + print("Exporting to ExecuTorch.") |
| 120 | + pte_program = _create_pte_program(pt_model) |
| 121 | + return pte_program |
0 commit comments