From db77f1b48aacb3414c40240dac2e7cb6237a279f Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 14:52:19 +0900 Subject: [PATCH 01/10] add q4_2 Q4_2 is qk=128 q4_1 --- llama.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 6d8b706b98258..4f89ae8fdd4e2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -276,6 +276,7 @@ static const char * llama_format_type(enum ggml_type type) { case GGML_TYPE_F16: return "f16"; case GGML_TYPE_Q4_0: return "q4_0"; case GGML_TYPE_Q4_1: return "q4_1"; + case GGML_TYPE_Q4_2: return "q4_2"; default: LLAMA_ASSERT(false); } } @@ -471,6 +472,8 @@ struct llama_file_loader { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: break; + case GGML_TYPE_Q4_2: + break; default: { throw format("unrecognized tensor type %u\n", shard.type); } @@ -543,6 +546,8 @@ struct llama_file_saver { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: break; + case GGML_TYPE_Q4_2: + break; default: LLAMA_ASSERT(false); } file.write_u32((uint32_t) tensor.ne.size()); @@ -827,8 +832,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; - case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: - return "mostly Q4_1, some F16"; + case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:return "mostly Q4_1, some F16"; + case LLAMA_FTYPE_MOSTLY_Q4_2: return "mostly Q4_2"; + case LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16:return "mostly Q4_2, some F16"; default: return "unknown, may not work"; } } @@ -1562,6 +1568,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s switch (ftype) { case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break; + case LLAMA_FTYPE_MOSTLY_Q4_2: quantized_type = GGML_TYPE_Q4_2; break; default: throw format("invalid output file type %d\n", ftype); }; From 716bd8fcfa14dc4dfb162b47f68144401e28df0b Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 14:52:49 +0900 Subject: [PATCH 02/10] Add files via upload --- convert.py | 1081 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1081 insertions(+) create mode 100644 convert.py diff --git a/convert.py b/convert.py new file mode 100644 index 0000000000000..304c2406686f6 --- /dev/null +++ b/convert.py @@ -0,0 +1,1081 @@ +from sentencepiece import SentencePieceProcessor # type: ignore +import json, struct, re, zipfile, pickle, itertools, sys, enum, concurrent.futures +import argparse, math, io, functools, dataclasses, mmap, signal, faulthandler, copy +from pathlib import Path +import numpy as np +from dataclasses import dataclass +from typing import Optional, Callable, Type, Any, Iterable, IO, Sequence, Union, TypeVar, Literal +from abc import abstractmethod, ABCMeta + +faulthandler.register(signal.SIGUSR1) + +NDArray = np.ndarray[Any, Any] + +@dataclass(frozen=True) +class UnquantizedDataType: + name: str + +DT_F16 = UnquantizedDataType('F16') +DT_F32 = UnquantizedDataType('F32') +DT_I32 = UnquantizedDataType('I32') +DT_BF16 = UnquantizedDataType('BF16') + +@dataclass(frozen=True) +class QuantizedDataType: + groupsize: int + have_addends: bool + have_g_idx: bool + +DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False) +DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False) +DT_Q4_2 = QuantizedDataType(groupsize=128, have_addends=True, have_g_idx=False) #change groupsize it + +DataType = Union[UnquantizedDataType, QuantizedDataType] + +DATA_TYPE_TO_FTYPE: dict[DataType, int] = { + DT_F32: 0, + DT_F16: 1, + DT_Q4_0: 2, + DT_Q4_1: 3, + DT_Q4_2: 5, +} + +FTYPE_TO_DATA_TYPE: dict[int, DataType] = {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()} + +DATA_TYPE_TO_NUMPY: dict[DataType, np.dtype[Any]] = { + DT_F16: np.dtype(np.float16), + DT_F32: np.dtype(np.float32), + DT_I32: np.dtype(np.int32), +} + +NUMPY_TYPE_TO_DATA_TYPE: dict[np.dtype[Any], DataType] = {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()} + +class GGMLFileType(enum.Enum): + AllF32 = 0 + MostlyF16 = 1 # except 1d tensors + MostlyQ4_0 = 2 # except 1d tensors + MostlyQ4_1 = 3 # except 1d tensors + PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16 + MostlyQ4_2 = 5 # except 1d tensors + PerLayerIsQ4_2 = 6 # but tok_embeddings.weight and output.weight are F16 + def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType: + if len(tensor.shape) == 1: + # 1D tensors are always F32. + return DT_F32 + elif self == GGMLFileType.AllF32: + return DT_F32 + elif self == GGMLFileType.MostlyF16: + return DT_F16 + elif self == GGMLFileType.MostlyQ4_0: + return DT_Q4_0 + elif self == GGMLFileType.MostlyQ4_1: + return DT_Q4_1 + elif self == GGMLFileType.PerLayerIsQ4_1: + if name in ('output.weight', 'tok_embeddings.weight'): + return DT_F16 + else: + return DT_Q4_1 + elif self == GGMLFileType.MostlyQ4_2: + return DT_Q4_2 + elif self == GGMLFileType.PerLayerIsQ4_2: + if name in ('output.weight', 'tok_embeddings.weight'): + return DT_F16 + else: + return DT_Q4_2 + else: + raise ValueError(self) + +def make_tensors_list() -> list[str]: + ret = [ + 'tok_embeddings.weight', + 'norm.weight', + 'output.weight', + ] + for i in range(80): # maximum number of layer + ret += [ + f'layers.{i}.attention.wq.weight', + f'layers.{i}.attention.wk.weight', + f'layers.{i}.attention.wv.weight', + f'layers.{i}.attention.wo.weight', + f'layers.{i}.attention_norm.weight', + f'layers.{i}.feed_forward.w1.weight', + f'layers.{i}.feed_forward.w2.weight', + f'layers.{i}.feed_forward.w3.weight', + f'layers.{i}.atttention_norm.weight', + f'layers.{i}.ffn_norm.weight', + ] + return ret +TENSORS_LIST = make_tensors_list() +TENSORS_SET = set(TENSORS_LIST) + +@dataclass +class Params: + n_vocab: int + n_embd: int + n_mult: int + n_head: int + n_layer: int + file_type: GGMLFileType + + @staticmethod + def guessed(model: 'LazyModel', file_type: GGMLFileType) -> 'Params': + n_vocab, n_embd = model["tok_embeddings.weight"].shape + + return Params( + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = 256, + n_head = n_embd // 128, + n_layer = next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model), + file_type = file_type, + ) + +class SentencePieceVocab: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: + self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) + added_tokens: dict[str, int] + if fname_added_tokens is not None: + added_tokens = json.load(open(fname_added_tokens)) + else: + added_tokens = {} + vocab_size: int = self.sentencepiece_tokenizer.vocab_size() + expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) + actual_ids = sorted(added_tokens.values()) + if expected_ids != actual_ids: + raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}") + items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) + self.added_tokens_list = [text for (text, idx) in items] + self.vocab_size_base: int = vocab_size + self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer + self.fname_added_tokens = fname_added_tokens + + def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float]]: + tokenizer = self.sentencepiece_tokenizer + for i in range(tokenizer.vocab_size()): + text: bytes + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + raise Exception(f"Invalid token: {piece}") + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + score: float = tokenizer.get_score(i) + yield text, score + + def added_tokens(self) -> Iterable[tuple[bytes, float]]: + for text in self.added_tokens_list: + score = -1000.0 + yield text.encode("utf-8"), score + + def all_tokens(self) -> Iterable[tuple[bytes, float]]: + yield from self.sentencepiece_tokens() + yield from self.added_tokens() + + def __repr__(self) -> str: + return f"" + +class GGMLVocab: + def __init__(self, tokens: list[tuple[bytes, float]]): + self.tokens = tokens + self.vocab_size = len(tokens) + + def all_tokens(self) -> Iterable[tuple[bytes, float]]: + return self.tokens + + def __repr__(self) -> str: + return f"" + +Vocab = Union[SentencePieceVocab, GGMLVocab] + +def permute(weights: NDArray, n_head: int) -> NDArray: + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + +def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray: + # First reinterpret each row from a list of int32s containing 8 values each + # to a list of uint8s containing 2 values each. + qvalues_pack8 = qvalues_pack32.view(np.uint8) + + # Then split out the two values per int8 (which requires an actual + # conversion because numpy doesn't natively support int4s). + qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8) + qvalues[:, 0::2] = qvalues_pack8 & 0xf + qvalues[:, 1::2] = qvalues_pack8 >> 4 + + assert addends is None or addends.shape == scales.shape + assert qvalues.shape[0] == scales.shape[0] + assert qvalues.shape[1] % scales.shape[1] == 0 + if g_idx is None: + repeat_count = qvalues.shape[1] // scales.shape[1] + scales = scales[:, :, np.newaxis] + if addends is not None: + addends = addends[:, :, np.newaxis] + # Reshape so that the below computation broadcasts over scales and addends: + qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count)) + else: + # In this case the scale and addend is selected for each column by g_idx: + assert addends is not None + scales = scales[:, g_idx] + addends = addends[:, g_idx] + if addends is None: + # Q4_0 + qvalues = qvalues.view(np.int8) + qvalues -= 8 + # And do the actual 'value = scale * qvalue + addend' computation. + values = scales * qvalues + if addends is not None: + values += addends + if g_idx is None: + values.shape = (values.shape[0], values.shape[1] * values.shape[2]) + return values + +class Tensor(metaclass=ABCMeta): + data_type: DataType + + @abstractmethod + def astype(self, data_type: DataType) -> 'Tensor': ... + @abstractmethod + def permute(self, n_head: int) -> 'Tensor': ... + @abstractmethod + def to_ggml(self) -> 'GGMLCompatibleTensor': ... + +class UnquantizedTensor(Tensor): + def __init__(self, ndarray: NDArray) -> None: + assert isinstance(ndarray, np.ndarray) + self.ndarray = ndarray + self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype] + def astype(self, data_type: DataType) -> Tensor: + dtype = DATA_TYPE_TO_NUMPY[data_type] + return UnquantizedTensor(self.ndarray.astype(dtype)) + def to_ggml(self) -> 'UnquantizedTensor': + return self + def permute(self, n_head: int) -> 'UnquantizedTensor': + return UnquantizedTensor(permute(self.ndarray, n_head)) + +def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, fp16: bool = False) -> NDArray: + tensor = lazy_tensor.load() + assert isinstance(tensor, UnquantizedTensor) + + # double-check: + actual_shape = list(tensor.ndarray.shape) + assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape) + if expected_dtype is not None: + if fp16 and tensor.ndarray.dtype == np.float16 and expected_dtype == np.float32: + tensor.ndarray = np.array(tensor.ndarray,dtype = np.float32) + assert tensor.ndarray.dtype == expected_dtype, (tensor.ndarray.dtype, expected_dtype) + + return tensor.ndarray + +class GGMLQuantizedTensor(Tensor): + data_type: QuantizedDataType + def __init__(self, ndarray: NDArray, shape: list[int], data_type: DataType) -> None: + rows, columns = shape + assert data_type in (DT_Q4_2, DT_Q4_1, DT_Q4_0) # for now + assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this + assert columns % data_type.groupsize == 0 + self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, -1)) + self.shape = shape[:] + self.data_type = data_type + + def astype(self, data_type: DataType) -> Tensor: + if data_type == self.data_type: + return self + scales = self.ndarray[:, :, 0].view(np.float32) + if self.data_type.have_addends: + addends = self.ndarray[:, :, 1].view(np.float32) + else: + addends = None + qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8]) + print(1) + dq = dequantize_q4(qweights, scales, addends, g_idx=None) + return UnquantizedTensor(dq).astype(data_type) + + def to_ggml(self) -> 'GGMLQuantizedTensor': + return self + + def permute(self, n_head: int) -> 'GGMLQuantizedTensor': + return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type) + +GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor] + +class DeferredPermutedTensor(Tensor): + def __init__(self, base: Tensor, n_head: int) -> None: + self.base = base + self.n_head = n_head + self.data_type = self.base.data_type + + def astype(self, data_type: DataType) -> Tensor: + return self.base.astype(data_type).permute(self.n_head) + + def to_ggml(self) -> GGMLCompatibleTensor: + return self.base.to_ggml().permute(self.n_head) + + def permute(self, n_head: int) -> Tensor: + raise Exception("shouldn't permute twice") + +class GPTQForLLaMaQuantizedTensor(Tensor): + def __init__(self, model: 'LazyModel', namebase: str) -> None: + qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32) + scales = load_unquantized(model[f"{namebase}.scales"], np.float32, fp16 = True) + + bias = model.get(f"{namebase}.bias") + if bias is not None: + # Q4_1 does not support bias; good thing the bias is always all zeros. + assert not np.any(load_unquantized(bias)) + + if f"{namebase}.zeros" in model: + zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32) + else: + qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32) + assert qzeros.dtype == np.int32 + zeros = dequantize_q4(qzeros, scales, scales, g_idx=None) + if zeros.dtype == np.float16: + zeros = np.array(zeros,dtype = np.float32) + assert zeros.dtype == np.float32 + + assert zeros.shape == scales.shape + + # Output is transposed compared to the input, and addends have their sign flipped. + # Scales and zeros similarly must be transposed but only for newer + # versions of GPTQ-for-LLaMa; the older versions can be identified by + # having shape (n_embd, 1). + qweight = qweight.T + if scales.shape[1] != 1: + scales = scales.T + zeros = zeros.T + + # Output also has signs flipped for the addends. + self.qweight = qweight + self.scales = scales + self.addends = -zeros + self.g_idx: Optional[NDArray] + if f"{namebase}.g_idx" in model: + self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32) + assert self.g_idx.shape == (qweight.shape[1] * 8,) + else: + self.g_idx = None + + + self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8] + self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True, + have_g_idx=(self.g_idx is not None)) + + def inspect(self, row: int, col: int) -> None: + '''For debugging.''' + qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf + if self.g_idx is not None: + group = self.g_idx[col] + else: + group = int(col // self.groupsize()) + scale = self.scales[row, group] + addend = self.addends[row, group] + with np.printoptions(precision=None, suppress=True): + print(f'scale:{scale} addend:{addend} qweight:{qweight}') + print('possible values:', np.arange(16) * scale + addend) + print('actual value:', qweight * scale + addend) + + def astype(self, data_type: DataType) -> Tensor: + if isinstance(data_type, QuantizedDataType): + assert self.g_idx is None and data_type.have_addends == True and data_type.have_g_idx == False + return self.regroup(data_type.groupsize) + + dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx) + return UnquantizedTensor(dequantized).astype(data_type) + + def groupsize(self) -> int: + assert self.addends.shape == self.scales.shape + assert self.shape[1] % self.scales.shape[1] == 0 + return self.shape[1] // self.scales.shape[1] + + def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor': + # Old versions of GPTQ-for-LLaMa shared scales and addends between all the + # columns in a row. Newer versions share them between every set of N + # columns in a row, where N is the `groupsize` parameter, usually 128. The + # output format shares them between every set of 32 columns. To handle + # this, duplicate scales and addends for every smaller group. + # (In the above, 'row' and 'column' are in the sense of the output.) + assert self.g_idx is None + old_groupsize = self.groupsize() + assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize + ret = copy.copy(self) + ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1) + ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1) + ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False) + return ret + + def permute(self, n_head: int) -> Tensor: + return DeferredPermutedTensor(self, n_head) + + def to_ggml(self) -> GGMLQuantizedTensor: + # The output format looks like this: + # For each row: + # For each group of 32 columns: + # - addend (float32, 4 bytes) + # - scale (float32, 4 bytes) + # - weights (int4 * 32, 16 bytes) + + # Since the output format is mixed between integers and floats, we have + # to hackily view the floats as int32s just so numpy will let us + # concatenate them. + addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis] + scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis] + + # Split into groups of 4 columns (i.e. 32 columns of quantized data): + grouped = self.qweight.reshape([self.qweight.shape[0], self.addends.shape[1], -1]) + # And concatenate: + grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no') + + return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_2) + +@dataclass +class LazyTensor: + _load: Callable[[], Tensor] + shape: list[int] + data_type: DataType + description: str + + def load(self) -> Tensor: + ret = self._load() + assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description) + return ret + + def astype(self, data_type: DataType) -> 'LazyTensor': + self.validate_conversion_to(data_type) + def load() -> Tensor: + return self.load().astype(data_type) + return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}') + + def validate_conversion_to(self, data_type: DataType) -> None: + if data_type == self.data_type: + return + if isinstance(data_type, QuantizedDataType): + if not isinstance(self.data_type, QuantizedDataType): + raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})") + if self.data_type.have_g_idx: + sys.stderr.write("Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), which is not yet natively supported by GGML. For now you can still convert this model by passing `--outtype f16` to dequantize, but that will result in a much larger output file for no quality benefit.\n") + sys.exit(1) + assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends + + +LazyModel = dict[str, LazyTensor] + +@dataclass +class ModelPlus: + model: LazyModel + paths: list[Path] # Where this was read from. + format: Literal['ggml', 'torch', 'safetensors'] + vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab. + +def merge_sharded(models: list[LazyModel]) -> LazyModel: + # Original LLaMA models have each file contain one part of each tensor. + # Use a dict instead of a set to preserve order. + names = {name: None for model in models for name in model} + def convert(name: str) -> LazyTensor: + lazy_tensors: list[LazyTensor] = [model[name] for model in models] + if len(lazy_tensors) == 1: + # only one file; don't go through this procedure since there might + # be quantized tensors + return lazy_tensors[0] + if len(lazy_tensors[0].shape) == 1: + # the tensor is just duplicated in every file + return lazy_tensors[0] + if (name.startswith('tok_embeddings.') or + name.endswith('.attention.wo.weight') or + name.endswith('.feed_forward.w2.weight')): + # split by columns + axis = 1 + else: + # split by rows + axis = 0 + concatenated_shape = list(lazy_tensors[0].shape) + concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors) + def load() -> UnquantizedTensor: + ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors] + concatenated: NDArray = np.concatenate(ndarrays, axis=axis) + return UnquantizedTensor(concatenated) + description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]' + return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description) + return {name: convert(name) for name in names} + +def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus: + formats = set(mp.format for mp in models_plus) + assert len(formats) == 1, "different formats?" + format = formats.pop() + paths = [path for mp in models_plus for path in mp.paths] + # Use the first non-None vocab, if any. + try: + vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None) + except StopIteration: + vocab = None + + if any("model.embed_tokens.weight" in mp.model for mp in models_plus): + # Transformers models put different tensors in different files, but + # don't split indivdual tensors between files. + model: LazyModel = {} + for mp in models_plus: + model.update(mp.model) + else: + model = merge_sharded([mp.model for mp in models_plus]) + + return ModelPlus(model, paths, format, vocab) + +def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor: + def load() -> Tensor: + return lazy_tensor.load().permute(n_head) + return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description) + +def convert_transformers_to_orig(model: LazyModel) -> LazyModel: + out: LazyModel = {} + out["tok_embeddings.weight"] = model["model.embed_tokens.weight"] + out["norm.weight"] = model["model.norm.weight"] + out["output.weight"] = model["lm_head.weight"] + + n_head = model[f"model.layers.0.self_attn.q_proj.weight"].shape[1] // 128 + for i in itertools.count(): + if f"model.layers.{i}.self_attn.q_proj.weight" not in model: + break + out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], n_head) + out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], n_head) + out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] + out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"] + + out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"] + out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"] + out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"] + + out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"] + out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"] + return out + +def handle_quantization(model: LazyModel) -> LazyModel: + '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc. + (which resolve to UnquantizedTensors with the raw data) to one with entries + for 'foo.weight' (which resolve to QuantizedTensors). + ''' + def convert(name: str) -> tuple[str, LazyTensor]: + if name.endswith(".qweight"): + namebase = name.rsplit('.', 1)[0] + orig_name = namebase + ".weight" + + lazy_tensor = model[name] + assert len(lazy_tensor.shape) == 2 + real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8] + + # Calculate type. This replicates the logic in + # GPTQForLLaMaQuantizedTensor (which is executed when the modelis + # actually loaded). + lazy_scales = model[f"{namebase}.scales"] + scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0] + assert real_shape[1] % scales_width == 0 + groupsize = real_shape[1] // scales_width + have_g_idx = f"{namebase}.g_idx" in model + data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx) + + def load() -> Tensor: + return GPTQForLLaMaQuantizedTensor(model, namebase) + + return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]')) + else: + return (name, model[name]) + return dict(convert(name) for name in model) + +# Functionality that simulates `torch.load` but where individual tensors are +# only loaded into memory on demand, not all at once. +# PyTorch can't do this natively as of time of writing: +# - https://github.com/pytorch/pytorch/issues/64327 +# This allows us to de-shard without multiplying RAM usage, and also +# conveniently drops the PyTorch dependency (though we still need numpy). + +@dataclass +class LazyStorageKind: + data_type: DataType +@dataclass +class LazyStorage: + load: Callable[[int, int], NDArray] + kind: LazyStorageKind + description: str + +class LazyUnpickler(pickle.Unpickler): + def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile): + super().__init__(fp) + self.data_base_path = data_base_path + self.zip_file = zip_file + def persistent_load(self, pid: Any) -> Any: + assert pid[0] == 'storage' + assert isinstance(pid[1], LazyStorageKind) + data_type = pid[1].data_type + filename_stem = pid[2] + filename = self.data_base_path + '/' + filename_stem + info = self.zip_file.getinfo(filename) + def load(offset: int, elm_count: int) -> NDArray: + dtype = DATA_TYPE_TO_NUMPY.get(data_type) + if dtype is None: + raise Exception("tensor stored in unsupported format") + fp = self.zip_file.open(info) + fp.seek(offset * dtype.itemsize) + size = elm_count * dtype.itemsize + data = fp.read(size) + assert len(data) == size + return np.frombuffer(data, dtype) + description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}' + return LazyStorage(load=load, kind=pid[1], description=description) + + @staticmethod + def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor: + assert isinstance(storage, LazyStorage) + def load() -> UnquantizedTensor: + elm_count = stride[0] * size[0] + return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size)) + description = f'pickled storage_offset={storage_offset} in {storage.description}' + return LazyTensor(load, list(size), storage.kind.data_type, description) + + CLASSES: dict[Any, Any] = { + ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2, + ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), + ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), + ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), + ('torch', 'IntStorage'): LazyStorageKind(DT_I32), + } + def find_class(self, module: str, name: str) -> Any: + if not module.startswith('torch'): + return super().find_class(module, name) + return self.CLASSES[(module, name)] + +def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: + zf = zipfile.ZipFile(outer_fp) + pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')] + assert len(pickle_paths) == 1, pickle_paths + pickle_fp = zf.open(pickle_paths[0], 'r') + unpickler = LazyUnpickler(pickle_fp, + data_base_path = pickle_paths[0][:-4], + zip_file = zf) + model = unpickler.load() + as_dict = dict(model.items()) + return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) + +SAFETENSORS_DATA_TYPES: dict[str, DataType] = { + 'F16': DT_F16, + 'F32': DT_F32, + 'I32': DT_I32, +} +def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: + header_size, = struct.unpack(' LazyTensor: + data_type = SAFETENSORS_DATA_TYPES[info['dtype']] + numpy_dtype = DATA_TYPE_TO_NUMPY[data_type] + shape: list[int] = info['shape'] + begin, end = info['data_offsets'] + assert 0 <= begin <= end <= len(byte_buf) + assert end - begin == math.prod(shape) * numpy_dtype.itemsize + buf = byte_buf[begin:end] + def load() -> UnquantizedTensor: + return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) + description = f'safetensors begin={begin} end={end} type={data_type} path={path}' + return LazyTensor(load, shape, data_type, description) + model = {name: convert(info) for (name, info) in header.items()} + return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None) + +def must_read(fp: IO[bytes], length: int) -> bytes: + ret = fp.read(length) + if len(ret) < length: + raise Exception("unexpectedly reached end of file") + return ret + +def lazy_load_ggml_file(fp: IO[bytes], path: Path) -> ModelPlus: + magic = must_read(fp, 4)[::-1] + #version: Optional[int] + if magic in (b'ggmf', b'ggjt'): + version, = struct.unpack("i", must_read(fp, 4)) + else: + assert magic == b'ggml' + version = None + n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28)) + + tokens: list[tuple[bytes, float]] = [] + for i in range(n_vocab): + if i == 32000: + # HACK: GPT4All messed with the format without changing the magic + # number. Specifically, they changed the vocab section to contain + # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the + # extra pad token). Try to detect if we're reading a file like + # this. + orig_pos = fp.tell() + fp.seek(20, io.SEEK_CUR) + is_gpt4all = fp.read(21) == b'tok_embeddings.weight' + fp.seek(orig_pos) + if is_gpt4all: + break + + length, = struct.unpack("i", must_read(fp, 4)) + text = must_read(fp, length) + if magic != b'ggml': + score, = struct.unpack("f", must_read(fp, 4)) + tokens.append((text, score)) + vocab = GGMLVocab(tokens) if magic != b'ggml' else None + + model: LazyModel = {} + # Use mmap for the actual data to avoid race conditions with the file offset. + mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)) + def read_tensor() -> None: # this is a function so that variables captured in `load` don't change + shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12)) + assert 0 <= shape_len <= 3 + shape: list[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len))) + shape = shape[::-1] + name = must_read(fp, name_len).decode('utf-8') + data_type = FTYPE_TO_DATA_TYPE[ftype] + + if magic == b'ggjt': + fp.seek((fp.tell() + 31) & -32) + + if data_type == DT_Q4_1 or data_type == DT_Q4_2: + # See GPTQForLLaMaQuantizedTensor.ggml_ndarray() + size = 24 * (shape[1] // 32) * shape[0] + elif data_type == DT_Q4_0: + size = 20 * (shape[1] // 32) * shape[0] + else: + numpy_dtype = DATA_TYPE_TO_NUMPY[data_type] + elm_count = math.prod(shape) + size = elm_count * numpy_dtype.itemsize + offset = fp.tell() + buf = mapped[offset:offset+size] + fp.seek(size, io.SEEK_CUR) + + def load() -> Tensor: + if isinstance(data_type, QuantizedDataType): + ndarray = np.frombuffer(buf, dtype=np.uint32) + return GGMLQuantizedTensor(ndarray, shape, data_type) + else: + return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) + description = f'ggml offset={offset} type={data_type} path={path}' + model[name] = LazyTensor(load, shape, data_type, description) + + while fp.read(1) != b'': + fp.seek(-1, io.SEEK_CUR) + read_tensor() + + return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab) + +@functools.cache +def lazy_load_file(path: Path) -> ModelPlus: + fp = open(path, 'rb') + first8 = fp.read(8) + fp.seek(0) + if first8[:2] == b'PK': + # A zip file, i.e. PyTorch format + return lazy_load_torch_file(fp, path) + elif first8[2:4] == b'gg': + # GGML format + return lazy_load_ggml_file(fp, path) + elif struct.unpack(' Iterable[Out]: + '''Parallel map, but with backpressure. If the caller doesn't call `next` + fast enough, this will stop calling `func` at some point rather than + letting results pile up in memory. Specifically, there is a max of one + output value buffered per thread.''' + with concurrent.futures.ThreadPoolExecutor() as executor: + futures: list[concurrent.futures.Future[Out]] = [] + items_rev = list(iterable)[::-1] + for i in range(min(concurrency, len(items_rev))): + futures.append(executor.submit(func, items_rev.pop())) + while futures: + result = futures.pop(0).result() + if items_rev: + futures.append(executor.submit(func, items_rev.pop())) + yield result + +def check_vocab_size(params: Params, vocab: Vocab) -> None: + if params.n_vocab != vocab.vocab_size: + # GGMLVocab comes from the same file as the model so shouldn't mismatch: + assert isinstance(vocab, SentencePieceVocab) + if params.n_vocab == vocab.vocab_size_base: + print("Ignoring added_tokens.json since model matches vocab size without it.") + vocab.added_tokens_list = [] + vocab.vocab_size = vocab.vocab_size_base + return + msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}" + if vocab.fname_added_tokens is not None: + msg += f" combined with {vocab.fname_added_tokens}" + msg += f" has {vocab.vocab_size})." + if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None: + msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})." + raise Exception(msg) + +class OutputFile: + def __init__(self, fname_out: Path) -> None: + self.fout = open(fname_out, "wb") + + def write_file_header(self, params: Params) -> None: + self.fout.write(b"ggjt"[::-1]) # magic + values = [ + 1, # file version + params.n_vocab, + params.n_embd, + params.n_mult, + params.n_head, + params.n_layer, + params.n_embd // params.n_head, # rot (obsolete) + params.file_type.value, + ] + self.fout.write(struct.pack("i" * len(values), *values)) + + def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None: + sname = name.encode('utf-8') + self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type])) + self.fout.write(struct.pack("i" * len(shape), *shape[::-1])) + self.fout.write(sname) + self.fout.seek((self.fout.tell() + 31) & -32) + + def write_vocab(self, vocab: Vocab) -> None: + for text, score in vocab.all_tokens(): + self.fout.write(struct.pack("i", len(text))) + self.fout.write(text) + self.fout.write(struct.pack("f", score)) + + @staticmethod + def write_vocab_only(fname_out: Path, vocab: Vocab) -> None: + of = OutputFile(fname_out) + params = Params(n_vocab = vocab.vocab_size, n_embd = 0, n_mult = 0, + n_head = 1, n_layer = 0, file_type = GGMLFileType.AllF32) + of = OutputFile(fname_out) + of.write_file_header(params) + of.write_vocab(vocab) + of.fout.close() + + @staticmethod + def write_all(fname_out: Path, params: Params, model: LazyModel, vocab: Vocab) -> None: + check_vocab_size(params, vocab) + of = OutputFile(fname_out) + of.write_file_header(params) + print(f"Writing vocab...") + of.write_vocab(vocab) + + def do_item(item: tuple[str, LazyTensor]) -> NDArray: + name, lazy_tensor = item + return lazy_tensor.load().to_ggml().ndarray + + ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) + for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): + size = ' x '.join(map(str, lazy_tensor.shape)) + print(f"[{i+1}/{len(model)}] Writing tensor {name}, size {size}...") + of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) + ndarray.tofile(of.fout) + of.fout.close() + +def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType: + wq_type = model["layers.0.attention.wq.weight"].data_type + if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32): + return GGMLFileType.AllF32 + if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): + return GGMLFileType.MostlyF16 + if output_type_str == "q4_2" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and + wq_type.have_addends): + if isinstance(model["output.weight"].data_type, QuantizedDataType): + return GGMLFileType.MostlyQ4_2 + else: + return GGMLFileType.PerLayerIsQ4_2 + if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and + wq_type.have_addends): + if isinstance(model["output.weight"].data_type, QuantizedDataType): + return GGMLFileType.MostlyQ4_1 + else: + return GGMLFileType.PerLayerIsQ4_1 + if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)): + return GGMLFileType.MostlyQ4_0 + name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} + raise Exception(f"Unexpected combination of types: {name_to_type}") + +def conversions_del_g_idx(model: LazyModel, act_order: bool) -> LazyModel: + if not(act_order): + del_list = set([]) + for name in model: + g_idx_name = f"{name.rsplit('.', 1)[0]}.g_idx" + if g_idx_name in model: + del_list.add(g_idx_name) + + for g_idx_name in del_list: + del model[g_idx_name] + + return model + +def do_necessary_conversions(model: LazyModel, act_order: bool) -> LazyModel: + model = conversions_del_g_idx(model, act_order) + model = handle_quantization(model) + + if "lm_head.weight" in model: + model = convert_transformers_to_orig(model) + model = filter_and_sort_tensors(model) + + return model + +def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: + return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) + for (name, tensor) in model.items()} + + +def nth_multifile_path(path: Path, n: int) -> Optional[Path]: + '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return + the nth path in the model. + ''' + # Support the following patterns: + patterns: list[tuple[str, str]] = [ + # - x.00.pth, x.01.pth, etc. + (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'), + # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc. + (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'), + # x.bin, x.bin.1, etc. + (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}') + ] + for regex, replacement in patterns: + if re.search(regex, path.name): + new_path = path.with_name(re.sub(regex, replacement, path.name)) + if new_path.exists(): + return new_path + return None + +def find_multifile_paths(path: Path) -> list[Path]: + '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return + the whole list of paths in the model. + ''' + ret: list[Path] = [] + for i in itertools.count(): + nth_path = nth_multifile_path(path, i) + if nth_path is None: + break + ret.append(nth_path) + if not ret: + # No matches. This should only happen if the file was named, e.g., + # foo.0, and there was no file named foo. Oh well, try to process it + # as a single file. + return [path] + return ret + +def load_some_model(path: Path) -> ModelPlus: + '''Load a model of any supported format.''' + # Be extra-friendly and accept either a file or a directory: + if path.is_dir(): + globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt"] + files = [file for glob in globs for file in path.glob(glob)] + if not files: + # Try GGML too, but with lower priority, since if both a non-GGML + # model and a GGML model exist in the same directory, we assume the + # latter was converted from the former. + files = list(path.glob("ggml-model*.bin*")) + if not files: + raise Exception(f"Can't find model in directory {path}") + if len(files) > 1: + raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}") + path = files[0] + + paths = find_multifile_paths(path) + models_plus: list[ModelPlus] = [] + for path in paths: + print(f"Loading model file {path}") + models_plus.append(lazy_load_file(path)) + + model_plus = merge_multifile_models(models_plus) + return model_plus + +def filter_and_sort_tensors(model: LazyModel) -> LazyModel: + return {name: model[name] for name in TENSORS_LIST if name in model} + +def load_vocab(path: Path) -> SentencePieceVocab: + # Be extra-friendly and accept either a file or a directory. Also, if it's + # a directory, it might be the model directory, and tokenizer.model might + # be in the parent of that. + if path.is_dir(): + path2 = path / "tokenizer.model" + # Use `.parent` instead of /.. to handle the symlink case better. + path3 = path.parent / "tokenizer.model" + if path2.exists(): + path = path2 + elif path3.exists(): + path = path3 + else: + raise FileNotFoundError(f"Could not find tokenizer.model in {path} or its parent; if it's in another directory, pass the directory as --vocab-dir") + added_tokens_path = path.parent / "added_tokens.json" + print(f"Loading vocab file {path}") + return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None) + +def default_outfile(model_paths: list[Path], params: Params) -> Path: + namestr = { + GGMLFileType.AllF32: "f32", + GGMLFileType.MostlyF16: "f16", + GGMLFileType.MostlyQ4_1: "q4_1", + GGMLFileType.PerLayerIsQ4_1: "q4_1", + }[params.file_type] + ret = model_paths[0].parent / f"ggml-model-{namestr}.bin" + if ret in model_paths: + sys.stderr.write(f"Error: Default output path ({ret}) would overwrite the input. Please explicitly specify a path using --outfile.\n") + sys.exit(1) + return ret + +def do_dump_model(model_plus: ModelPlus) -> None: + print(f"model_plus.paths = {model_plus.paths!r}") + print(f"model_plus.format = {model_plus.format!r}") + print(f"model_plus.vocab = {model_plus.vocab!r}") + for name, lazy_tensor in model_plus.model.items(): + print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") + +def main(args_in: Optional[list[str]] = None) -> None: + parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") + parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") + parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_2"], help="output format (default: based on input)") + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("--act-order", action="store_true", help="If the GPTQ model uses act-order, activate it.") + args = parser.parse_args(args_in) + + vocab: Vocab + if args.dump_single: + model_plus = lazy_load_file(args.model) + do_dump_model(model_plus) + elif args.vocab_only: + vocab = load_vocab(args.vocab_dir or args.model) + assert args.outfile, "need --outfile if using --vocab-only" + outfile = args.outfile + OutputFile.write_vocab_only(outfile, vocab) + print(f"Wrote {outfile}") + else: + model_plus = load_some_model(args.model) + if args.dump: + do_dump_model(model_plus) + return + if model_plus.vocab is not None and args.vocab_dir is None: + vocab = model_plus.vocab + else: + vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent + vocab = load_vocab(vocab_dir) + model = model_plus.model + model = do_necessary_conversions(model, args.act_order) + output_type = pick_output_type(model, args.outtype) + model = convert_to_output_type(model, output_type) + params = Params.guessed(model, output_type) + outfile = args.outfile or default_outfile(model_plus.paths, params) + OutputFile.write_all(outfile, params, model, vocab) + print(f"Wrote {outfile}") + +if __name__ == '__main__': + main() \ No newline at end of file From f0b14e8c6957bee69445b59704ebdc95d22363ed Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 14:53:23 +0900 Subject: [PATCH 03/10] add Q4_2 --- llama.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llama.h b/llama.h index 7a258a1e16d35..890b111ba3f6c 100644 --- a/llama.h +++ b/llama.h @@ -72,6 +72,8 @@ extern "C" { LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16 = 6, // except 1d tensors }; LLAMA_API struct llama_context_params llama_context_default_params(); From ff0efc747d109b7f255a3318fd0c3d0812d198f4 Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 14:54:44 +0900 Subject: [PATCH 04/10] add Q4_2 --- ggml.c | 625 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 623 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index a26b4853f7eae..2081e26fd36da 100644 --- a/ggml.c +++ b/ggml.c @@ -411,6 +411,7 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); // #define QK 32 +#define QK128 128 // AVX routines provided by GH user Const-me // ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600 @@ -502,6 +503,16 @@ typedef struct { } block_q4_1; static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK / 2, "wrong q4_1 block size/padding"); +// method 6 +// blocks of QK elements for GPTQ +// represented with 2 floats (delta + min) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors) +typedef struct { + float d; + float m; + uint8_t qs[QK128 / 2]; // nibbles / quants +} block_q4_2; +static_assert(sizeof(block_q4_2) == sizeof(float) * 2 + QK128 / 2, "wrong q4_2 block size/padding"); + // reference implementation for deterministic creation of model files static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { assert(k % QK == 0); @@ -954,6 +965,267 @@ static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int #endif } +static void quantize_row_q4_2_reference(const float * restrict x, void * restrict vy, int k) { + assert(k % QK128 == 0); + const int nb = k / QK128; + + block_q4_2 * restrict y = vy; + + uint8_t pp[QK128/2]; + + for (int i = 0; i < nb; i++) { + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int l = 0; l < QK128; l++) { + const float v = x[i*QK128 + l]; + if (v < min) min = v; + if (v > max) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + y[i].m = min; + + for (int l = 0; l < QK128; l += 2) { + const float v0 = (x[i*QK128 + l + 0] - min)*id; + const float v1 = (x[i*QK128 + l + 1] - min)*id; + + const uint8_t vi0 = roundf(v0); + const uint8_t vi1 = roundf(v1); + + assert(vi0 < 16); + assert(vi1 < 16); + + pp[l/2] = vi0 | (vi1 << 4); + } + + memcpy(y[i].qs, pp, sizeof(pp)); + } +} + +static void quantize_row_q4_2(const float * restrict x, void * restrict vy, int k) { + assert(k % QK == 0); + + const int nb = k / QK; + + block_q4_2 * restrict y = vy; + +#if defined(__AVX2__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + __m256 v4 = _mm256_loadu_ps( x + 32 ); + __m256 v5 = _mm256_loadu_ps( x + 40 ); + __m256 v6 = _mm256_loadu_ps( x + 48 ); + __m256 v7 = _mm256_loadu_ps( x + 56 ); + __m256 v8 = _mm256_loadu_ps( x + 64 ); + __m256 v9 = _mm256_loadu_ps( x + 72 ); + __m256 v10 = _mm256_loadu_ps( x + 80 ); + __m256 v11 = _mm256_loadu_ps( x + 88 ); + __m256 v12 = _mm256_loadu_ps( x + 96 ); + __m256 v13 = _mm256_loadu_ps( x + 104 ); + __m256 v14 = _mm256_loadu_ps( x + 112 ); + __m256 v15 = _mm256_loadu_ps( x + 120 ); + x += 128; + + // Compute max for the block + __m256 vmax; + vmax = _mm256_max_ps( v0, v1 ); + vmax = _mm256_max_ps( vmax, v2 ); + vmax = _mm256_max_ps( vmax, v3 ); + vmax = _mm256_max_ps( vmax, v4 ); + vmax = _mm256_max_ps( vmax, v5 ); + vmax = _mm256_max_ps( vmax, v6 ); + vmax = _mm256_max_ps( vmax, v7 ); + vmax = _mm256_max_ps( vmax, v8 ); + vmax = _mm256_max_ps( vmax, v9 ); + vmax = _mm256_max_ps( vmax, v10 ); + vmax = _mm256_max_ps( vmax, v11 ); + vmax = _mm256_max_ps( vmax, v12 ); + vmax = _mm256_max_ps( vmax, v13 ); + vmax = _mm256_max_ps( vmax, v14 ); + vmax = _mm256_max_ps( vmax, v15 ); + + __m128 max16 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) ); + max16 = _mm_max_ps( max16, _mm_movehl_ps( max16, max16 ) ); + max16 = _mm_max_ss( max16, _mm_movehdup_ps( max16 ) ); + const float maxScalar = _mm_cvtss_f32( max16 ); + + // Compute min for the block + __m256 vmin; + vmin = _mm256_min_ps( v0, v1 ); + vmin = _mm256_min_ps( vmin, v2 ); + vmin = _mm256_min_ps( vmin, v3 ); + vmin = _mm256_min_ps( vmin, v4 ); + vmin = _mm256_min_ps( vmin, v5 ); + vmin = _mm256_min_ps( vmin, v6 ); + vmin = _mm256_min_ps( vmin, v7 ); + vmin = _mm256_min_ps( vmin, v8 ); + vmin = _mm256_min_ps( vmin, v9 ); + vmin = _mm256_min_ps( vmin, v10 ); + vmin = _mm256_min_ps( vmin, v11 ); + vmin = _mm256_min_ps( vmin, v12 ); + vmin = _mm256_min_ps( vmin, v13 ); + vmin = _mm256_min_ps( vmin, v14 ); + vmin = _mm256_min_ps( vmin, v15 ); + + __m128 min16 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) ); + min16 = _mm_min_ps( min16, _mm_movehl_ps( min16, min16 ) ); + min16 = _mm_min_ss( min16, _mm_movehdup_ps( min16 ) ); + const float minScalar = _mm_cvtss_f32( min16 ); + + // Quantize these floats + const float d = (maxScalar - minScalar) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].m = minScalar; + y[i].d = d; + + // x = (x-min)*id + const __m256 mul = _mm256_set1_ps( id ); + const __m256 off = _mm256_set1_ps( minScalar ); + v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul ); + v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul ); + v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul ); + v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul ); + v4 = _mm256_mul_ps( _mm256_sub_ps( v4, off ), mul ); + v5 = _mm256_mul_ps( _mm256_sub_ps( v5, off ), mul ); + v6 = _mm256_mul_ps( _mm256_sub_ps( v6, off ), mul ); + v7 = _mm256_mul_ps( _mm256_sub_ps( v7, off ), mul ); + v8 = _mm256_mul_ps( _mm256_sub_ps( v8, off ), mul ); + v9 = _mm256_mul_ps( _mm256_sub_ps( v9, off ), mul ); + v10 = _mm256_mul_ps( _mm256_sub_ps( v10, off ), mul ); + v11 = _mm256_mul_ps( _mm256_sub_ps( v11, off ), mul ); + v12 = _mm256_mul_ps( _mm256_sub_ps( v12, off ), mul ); + v13 = _mm256_mul_ps( _mm256_sub_ps( v13, off ), mul ); + v14 = _mm256_mul_ps( _mm256_sub_ps( v14, off ), mul ); + v15 = _mm256_mul_ps( _mm256_sub_ps( v15, off ), mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + v4 = _mm256_round_ps( v4, _MM_ROUND_NEAREST ); + v5 = _mm256_round_ps( v5, _MM_ROUND_NEAREST ); + v6 = _mm256_round_ps( v6, _MM_ROUND_NEAREST ); + v7 = _mm256_round_ps( v7, _MM_ROUND_NEAREST ); + v8 = _mm256_round_ps( v8, _MM_ROUND_NEAREST ); + v9 = _mm256_round_ps( v9, _MM_ROUND_NEAREST ); + v10 = _mm256_round_ps( v10, _MM_ROUND_NEAREST ); + v11 = _mm256_round_ps( v11, _MM_ROUND_NEAREST ); + v12 = _mm256_round_ps( v12, _MM_ROUND_NEAREST ); + v13 = _mm256_round_ps( v13, _MM_ROUND_NEAREST ); + v14 = _mm256_round_ps( v14, _MM_ROUND_NEAREST ); + v15 = _mm256_round_ps( v15, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + __m256i i4 = _mm256_cvtps_epi32( v4 ); + __m256i i5 = _mm256_cvtps_epi32( v5 ); + __m256i i6 = _mm256_cvtps_epi32( v6 ); + __m256i i7 = _mm256_cvtps_epi32( v7 ); + __m256i i8 = _mm256_cvtps_epi32( v8 ); + __m256i i9 = _mm256_cvtps_epi32( v9 ); + __m256i i10 = _mm256_cvtps_epi32( v10 ); + __m256i i11 = _mm256_cvtps_epi32( v11 ); + __m256i i12 = _mm256_cvtps_epi32( v12 ); + __m256i i13 = _mm256_cvtps_epi32( v13 ); + __m256i i14 = _mm256_cvtps_epi32( v14 ); + __m256i i15 = _mm256_cvtps_epi32( v15 ); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + i4 = _mm256_packs_epi32( i4, i5 ); + i6 = _mm256_packs_epi32( i6, i7 ); + i8 = _mm256_packs_epi32( i8, i9 ); + i10 = _mm256_packs_epi32( i10, i11 ); + i12 = _mm256_packs_epi32( i12, i13 ); + i14 = _mm256_packs_epi32( i14, i15 ); + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + i4 = _mm256_packs_epi16( i4, i6 ); + i8 = _mm256_packs_epi16( i8, i10 ); + i12 = _mm256_packs_epi16( i12, i14 ); + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + i4 = _mm256_permutevar8x32_epi32( i4, perm ); + i8 = _mm256_permutevar8x32_epi32( i8, perm ); + i12 = _mm256_permutevar8x32_epi32( i12, perm ); + + // Compress the vector into 4 bit/value, and store + __m128i res = packNibbles( i0 ); + _mm_storeu_si128( ( __m128i* )y[i].qs, res ); + + res = packNibbles( i4 ); + _mm_storeu_si128( (( __m128i* )y[i].qs) + 1, res ); + + res = packNibbles( i8 ); + _mm_storeu_si128( (( __m128i* )y[i].qs) + 2, res ); + + res = packNibbles( i12 ); + _mm_storeu_si128( (( __m128i* )y[i].qs) + 3, res ); + } +#elif __ARM_NEON + for (int i = 0; i < nb; i++) { + float32x4_t srcv[32]; + float32x4_t minv[32]; + float32x4_t maxv[32]; + + for (int l = 0; l < 32; l++) srcv[l] = vld1q_f32(x + i*QK + 4*l); + + for (int l = 0; l < 16; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]); + for (int l = 0; l < 8; l++) minv[4*l] = vminq_f32(srcv[4*l], srcv[4*l + 2]); + for (int l = 0; l < 4; l++) minv[8*l] = vminq_f32(srcv[8*l], srcv[8*l + 4]); + for (int l = 0; l < 2; l++) minv[16*l] = vminq_f32(minv[16*l], minv[16*l + 8]); + for (int l = 0; l < 1; l++) minv[32*l] = vminq_f32(minv[32*l], minv[32*l + 16]); + + for (int l = 0; l < 16; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]); + for (int l = 0; l < 8; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]); + for (int l = 0; l < 4; l++) maxv[8*l] = vmaxq_f32(srcv[8*l], srcv[8*l + 4]); + for (int l = 0; l < 2; l++) maxv[16*l] = vmaxq_f32(maxv[16*l], maxv[16*l + 8]); + for (int l = 0; l < 1; l++) maxv[32*l] = vmaxq_f32(maxv[32*l], maxv[32*l + 16]); + + const float min = vminvq_f32(minv[0]); + const float max = vmaxvq_f32(maxv[0]); + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + y[i].m = min; + + const float32x4_t minv0 = vdupq_n_f32(min); + + for (int l = 0; l < 32; l++) { + const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id); + const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(0.5f)); // needed to round to nearest + const int32x4_t vi = vcvtq_s32_f32(vf); + + y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); + y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); + } + } +#else + // scalar + quantize_row_q4_2_reference(x, vy, k); +#endif +} + static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) { assert(k % QK == 0); const int nb = k / QK; @@ -1178,6 +1450,112 @@ static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, in #endif } +static void dequantize_row_q4_2(const void * restrict vx, float * restrict y, int k) { + assert(k % QK128 == 0); + const int nb = k / QK128; + + const block_q4_2 * restrict x = vx; + +#if defined(__AVX2__) + for (int i = 0; i < nb; i++) { + const __m256 d_v = _mm256_broadcast_ss(&x[i].d); + const __m256 d_m = _mm256_broadcast_ss(&x[i].m); + + const uint8_t * restrict pp = x[i].qs; + + for (int l = 0; l < QK128; l += 32) { + // Load 32x4-bit integers into 32x8-bit integers + __m256i vx8 = bytesFromNibbles(pp+l/2); + + // Convert to 16-bit int + const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0)); + const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1)); + + // Convert to 32-bit int -> float 32 + const __m256 vf[4] = { + _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))), + _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))), + _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))), + _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1))) + }; + + // Scale, add m and store + for (int j = 0; j < 4; j++) { + const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m); + _mm256_storeu_ps(y + i * QK128 + l + j*8, result); + } + } + } +#elif defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + const float32x4_t vd = vdupq_n_f32(x[i].d); + const float32x4_t vm = vdupq_n_f32(x[i].m); + + const uint8_t * restrict pp = x[i].qs; + + for (int l = 0; l < QK128; l += 16) { + // Load 16x4-bit integers into 8x8-bit integers + const uint8x8_t v8 = vld1_u8(pp + l/2); + + // Expand 4-bit qs to 8-bit bytes + const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f)); + const uint8x8_t v1 = vshr_n_u8(v8, 4); + + // Interleave and combine + const uint8x8_t vx_0 = vzip1_u8(v0, v1); + const uint8x8_t vx_1 = vzip2_u8(v0, v1); + + const uint8x16_t vq = vcombine_u8(vx_0, vx_1); + + // convert to 2x uint16x8_t + const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq)); + const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq)); + + // convert to 4x float32x4_t + const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0))); + const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0))); + const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1))); + const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1))); + + // multiply by d and add m + const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd); + const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd); + const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd); + const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd); + + // Store + vst1q_f32(y + i*QK128 + l + 0, r0); + vst1q_f32(y + i*QK128 + l + 4, r1); + vst1q_f32(y + i*QK128 + l + 8, r2); + vst1q_f32(y + i*QK128 + l + 12, r3); + } + } +#else + for (int i = 0; i < nb; i++) { + const float d = x[i].d; + const float m = x[i].m; + + const uint8_t * restrict pp = x[i].qs; + + for (int l = 0; l < QK128; l += 2) { + const uint8_t vi = pp[l/2]; + + const int8_t vi0 = vi & 0xf; + const int8_t vi1 = vi >> 4; + + const float v0 = vi0*d + m; + const float v1 = vi1*d + m; + + y[i*QK128 + l + 0] = v0; + y[i*QK128 + l + 1] = v1; + + assert(!isnan(y[i*QK128 + l + 0])); + assert(!isnan(y[i*QK128 + l + 1])); + } + } +#endif +} + // // simd mappings // @@ -2318,6 +2696,164 @@ static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * rest *s = sumf; } +static void ggml_vec_dot_q4_2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int nb = n / QK128; + + const block_q4_2 * restrict x = vx; + const block_q4_2 * restrict y = vy; + + float sumf = 0.0; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + // Accumulator for constant offsets + float acc_offset = 0.0f; + + // Main loop + for (int i = 0; i < nb; ++i) { + const float * d0 = &x[i].d; + const float * d1 = &y[i].d; + + const float * m0 = &x[i].m; + const float * m1 = &y[i].m; + + const __m256 d0v = _mm256_broadcast_ss( d0 ); + const __m256 d1v = _mm256_broadcast_ss( d1 ); + const __m256 m0v = _mm256_broadcast_ss( m0 ); + const __m256 m1v = _mm256_broadcast_ss( m1 ); + + // Compute combined scale for the block + const __m256 scale_01 = _mm256_mul_ps( d0v, d1v ); + + // Compute cross scales for the block + const __m256 scale_0 = _mm256_mul_ps( d0v, m1v ); + const __m256 scale_1 = _mm256_mul_ps( m0v, d1v ); + const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0xAA /* 0b10101010 */ ); + + const uint8_t * restrict x_pp = x[i].qs; + const uint8_t * restrict y_pp = y[i].qs; + + // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes + for (int l = 0; l < QK128; l += 32) { + __m256i bx = bytesFromNibbles( x_pp + l/2); + __m256i by = bytesFromNibbles( y_pp + l/2); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. + + // Sign-extend first 16 signed bytes into int16_t + __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) ); + __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) ); + // Compute products of int16_t integers, add pairwise + __m256i i32 = _mm256_madd_epi16( x16, y16 ); + + // Sign-extend last 16 signed bytes into int16_t vectors + __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) ); + __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) ); + // Accumulate products of int16_t integers + i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) ); + + // compute sums of unsigned bytes in bx, by in blocks of 8. + // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000, + // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400. + // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ] + __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() ); + __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() ); + __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) ); + __m256 sums = _mm256_cvtepi32_ps( sumsi ); + + // Convert int32_t to float + __m256 p = _mm256_cvtepi32_ps( i32 ); + // Apply the scale, and accumulate + // acc += d0*d1*x*y + d0*m1*x + d1*m0*y + acc = _mm256_fmadd_ps( scale_01, p, acc ); + acc = _mm256_fmadd_ps( cross_scales, sums, acc ); + } + // acc_offset += m0*m1 (for each entry in the block) + acc_offset += (*m0)*(*m1); + } + + // Return horizontal sum of the acc vector + __m128 res = _mm256_extractf128_ps( acc, 1 ); + res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) ); + res = _mm_add_ps( res, _mm_movehl_ps( res, res ) ); + res = _mm_add_ss( res, _mm_movehdup_ps( res ) ); + + sumf = _mm_cvtss_f32( res ) + acc_offset * QK128; +#elif defined(__ARM_NEON) + float sum00 = 0.0f; + float sum01 = 0.0f; + float sum10 = 0.0f; + float sum11 = 0.0f; + + for (int i = 0; i < nb; ++i) { + const block_q4_1 * restrict x0 = &x[i + 0]; + const block_q4_1 * restrict y0 = &y[i + 0]; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + + const uint8_t * restrict x_pp = x0->qs; + const uint8_t * restrict y_pp = x0->qs; + + for (int l = 0; l < QK128; l += 32) { + const uint8x16_t v0_0 = vld1q_u8(x_pp + l/2); + const uint8x16_t v1_0 = vld1q_u8(y_pp + l/2); + + // and with 0xf + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + + // dot product into uint16x8_t + const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l)); + const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l)); + + const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h)); + const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h)); + + const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h); + const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h); + + sum00 += x0->m*y0->m; + sum01 += y0->m*x0->d*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h)); + sum10 += x0->m*y0->d*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h)); + sum11 += x0->d*y0->d*vaddvq_u16(vaddq_u16(pl0, ph0)); + } + } + + sumf = QK128*sum00 + sum01 + sum10 + sum11; +#else + // scalar + for (int i = 0; i < nb; i++) { + const float d0 = x[i].d; + const float d1 = y[i].d; + + const float m0 = x[i].m; + const float m1 = y[i].m; + + const uint8_t * restrict p0 = x[i].qs; + const uint8_t * restrict p1 = y[i].qs; + + for (int j = 0; j < QK128/2; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*(v0 & 0xf) + m0; + const float f1 = d0*(v0 >> 4) + m0; + + const float f2 = d1*(v1 & 0xf) + m1; + const float f3 = d1*(v1 >> 4) + m1; + + sumf += f0*f2 + f1*f3; + } + } +#endif + + *s = sumf; +} + // compute GGML_VEC_DOT_UNROLL dot products at once // xs - x row stride in bytes inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) { @@ -2564,22 +3100,22 @@ static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = { [GGML_TYPE_F16] = 1, [GGML_TYPE_Q4_0] = QK, [GGML_TYPE_Q4_1] = QK, + [GGML_TYPE_Q4_2] = QK128, [GGML_TYPE_I8] = 1, [GGML_TYPE_I16] = 1, [GGML_TYPE_I32] = 1, }; -static_assert(GGML_TYPE_COUNT == 7, "GGML_BLCK_SIZE is outdated"); static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { [GGML_TYPE_F32] = sizeof(float), [GGML_TYPE_F16] = sizeof(ggml_fp16_t), [GGML_TYPE_Q4_0] = sizeof(block_q4_0), [GGML_TYPE_Q4_1] = sizeof(block_q4_1), + [GGML_TYPE_Q4_2] = sizeof(block_q4_2), [GGML_TYPE_I8] = sizeof(int8_t), [GGML_TYPE_I16] = sizeof(int16_t), [GGML_TYPE_I32] = sizeof(int32_t), }; -static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_SIZE is outdated"); static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { "NONE", @@ -3247,6 +3783,10 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); @@ -3307,6 +3847,10 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); @@ -3361,6 +3905,10 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -3405,6 +3953,10 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -3447,6 +3999,10 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -3491,6 +4047,10 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { { GGML_ASSERT(false); } break; + case GGML_TYPE_Q4_2: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -5229,6 +5789,7 @@ static void ggml_compute_forward_dup( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5310,6 +5871,7 @@ static void ggml_compute_forward_add( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5362,6 +5924,7 @@ static void ggml_compute_forward_sub( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5414,6 +5977,7 @@ static void ggml_compute_forward_mul( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5466,6 +6030,7 @@ static void ggml_compute_forward_div( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5514,6 +6079,7 @@ static void ggml_compute_forward_sqr( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5562,6 +6128,7 @@ static void ggml_compute_forward_sqrt( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5620,6 +6187,7 @@ static void ggml_compute_forward_sum( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5697,6 +6265,7 @@ static void ggml_compute_forward_mean( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5761,6 +6330,7 @@ static void ggml_compute_forward_repeat( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5809,6 +6379,7 @@ static void ggml_compute_forward_abs( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5857,6 +6428,7 @@ static void ggml_compute_forward_sgn( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5905,6 +6477,7 @@ static void ggml_compute_forward_neg( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5953,6 +6526,7 @@ static void ggml_compute_forward_step( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6001,6 +6575,7 @@ static void ggml_compute_forward_relu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6066,6 +6641,7 @@ static void ggml_compute_forward_gelu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6133,6 +6709,7 @@ static void ggml_compute_forward_silu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6219,6 +6796,7 @@ static void ggml_compute_forward_norm( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6299,6 +6877,7 @@ static void ggml_compute_forward_rms_norm( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6708,6 +7287,12 @@ static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = { .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference, .vec_dot_q = ggml_vec_dot_q4_1, }, + [GGML_TYPE_Q4_2] = { + .dequantize_row_q = dequantize_row_q4_2, + .quantize_row_q = quantize_row_q4_2, + .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_2_reference, + .vec_dot_q = ggml_vec_dot_q4_2, + }, }; // For internal test use @@ -6915,6 +7500,10 @@ static void ggml_compute_forward_mul_mat( { ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_2: + { + ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst); + } break; case GGML_TYPE_F16: { ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); @@ -6953,6 +7542,26 @@ static void ggml_compute_forward_mul_mat( printf("\n"); exit(0); } + } else if (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_Q4_2) { + static int first = 8; + printf("src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]); + printf("src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]); + printf("dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + if (first) { + --first; + } else { + for (int k = 0; k < dst->ne[1]; ++k) { + for (int j = 0; j < dst->ne[0]/16; ++j) { + for (int i = 0; i < 16; ++i) { + printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + } + printf("\n"); + } + printf("\n"); + } + printf("\n"); + exit(0); + } } else { printf("aaaa src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]); printf("aaaa src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]); @@ -7010,6 +7619,7 @@ static void ggml_compute_forward_scale( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7178,6 +7788,10 @@ static void ggml_compute_forward_get_rows( { ggml_compute_forward_get_rows_q(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_2: + { + ggml_compute_forward_get_rows_q(params, src0, src1, dst); + } break; case GGML_TYPE_F16: { ggml_compute_forward_get_rows_f16(params, src0, src1, dst); @@ -7264,6 +7878,7 @@ static void ggml_compute_forward_diag_mask_inf( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7358,6 +7973,7 @@ static void ggml_compute_forward_soft_max( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7533,6 +8149,7 @@ static void ggml_compute_forward_rope( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7801,6 +8418,7 @@ static void ggml_compute_forward_conv_1d_1s( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8069,6 +8687,7 @@ static void ggml_compute_forward_conv_1d_2s( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8554,6 +9173,7 @@ static void ggml_compute_forward_flash_attn( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8765,6 +9385,7 @@ static void ggml_compute_forward_flash_ff( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: From d405209c45ce886c173fc80aac3fd0e9f37c103f Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 14:55:16 +0900 Subject: [PATCH 05/10] add Q4_2 --- ggml.h | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml.h b/ggml.h index 7d8b7a1829dd0..7d6f6a53f692d 100644 --- a/ggml.h +++ b/ggml.h @@ -203,6 +203,7 @@ enum ggml_type { GGML_TYPE_F16 = 1, GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_1 = 3, + GGML_TYPE_Q4_2 = 5, GGML_TYPE_I8, GGML_TYPE_I16, GGML_TYPE_I32, From b0c6171cd7617a511c29f3705b9f0a221289be2d Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 15:03:29 +0900 Subject: [PATCH 06/10] fix tab --- llama.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.h b/llama.h index 890b111ba3f6c..3f62077c6cc1e 100644 --- a/llama.h +++ b/llama.h @@ -72,8 +72,8 @@ extern "C" { LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 - LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16 = 6, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16 = 6, // except 1d tensors }; LLAMA_API struct llama_context_params llama_context_default_params(); From 75b39c4b261ef6bc287af9f4f3eeeb07cbaeb6cf Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 15:05:34 +0900 Subject: [PATCH 07/10] fix tab --- llama.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/llama.cpp b/llama.cpp index 4f89ae8fdd4e2..078ff5a072c0c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -276,7 +276,7 @@ static const char * llama_format_type(enum ggml_type type) { case GGML_TYPE_F16: return "f16"; case GGML_TYPE_Q4_0: return "q4_0"; case GGML_TYPE_Q4_1: return "q4_1"; - case GGML_TYPE_Q4_2: return "q4_2"; + case GGML_TYPE_Q4_2: return "q4_2"; default: LLAMA_ASSERT(false); } } @@ -832,9 +832,11 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; - case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:return "mostly Q4_1, some F16"; + case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: + return "mostly Q4_1, some F16"; case LLAMA_FTYPE_MOSTLY_Q4_2: return "mostly Q4_2"; - case LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16:return "mostly Q4_2, some F16"; + case LLAMA_FTYPE_MOSTLY_Q4_2_SOME_F16: + return "mostly Q4_2, some F16"; default: return "unknown, may not work"; } } @@ -1568,7 +1570,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s switch (ftype) { case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break; - case LLAMA_FTYPE_MOSTLY_Q4_2: quantized_type = GGML_TYPE_Q4_2; break; + case LLAMA_FTYPE_MOSTLY_Q4_2: quantized_type = GGML_TYPE_Q4_2; break; default: throw format("invalid output file type %d\n", ftype); }; From e01b2d04c998c336d1d586f345ff77f4a707ec5f Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 15:07:02 +0900 Subject: [PATCH 08/10] fix tab --- ggml.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.h b/ggml.h index 7d6f6a53f692d..1b20b58ed19d5 100644 --- a/ggml.h +++ b/ggml.h @@ -203,7 +203,7 @@ enum ggml_type { GGML_TYPE_F16 = 1, GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_1 = 3, - GGML_TYPE_Q4_2 = 5, + GGML_TYPE_Q4_2 = 5, GGML_TYPE_I8, GGML_TYPE_I16, GGML_TYPE_I32, From 8b9316be70148f63737b13b406420a0631cc1b06 Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 15:16:39 +0900 Subject: [PATCH 09/10] fix tab --- ggml.c | 186 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/ggml.c b/ggml.c index 2081e26fd36da..a6051a03f54ee 100644 --- a/ggml.c +++ b/ggml.c @@ -1154,9 +1154,9 @@ static void quantize_row_q4_2(const float * restrict x, void * restrict vy, int i14 = _mm256_packs_epi32( i14, i15 ); // Convert int16 to int8 i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - i4 = _mm256_packs_epi16( i4, i6 ); - i8 = _mm256_packs_epi16( i8, i10 ); - i12 = _mm256_packs_epi16( i12, i14 ); + i4 = _mm256_packs_epi16( i4, i6 ); + i8 = _mm256_packs_epi16( i8, i10 ); + i12 = _mm256_packs_epi16( i12, i14 ); // We got our precious signed bytes, but the order is now wrong // These AVX2 pack instructions process 16-byte pieces independently @@ -1188,8 +1188,8 @@ static void quantize_row_q4_2(const float * restrict x, void * restrict vy, int for (int l = 0; l < 32; l++) srcv[l] = vld1q_f32(x + i*QK + 4*l); - for (int l = 0; l < 16; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]); - for (int l = 0; l < 8; l++) minv[4*l] = vminq_f32(srcv[4*l], srcv[4*l + 2]); + for (int l = 0; l < 16; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]); + for (int l = 0; l < 8; l++) minv[4*l] = vminq_f32(srcv[4*l], srcv[4*l + 2]); for (int l = 0; l < 4; l++) minv[8*l] = vminq_f32(srcv[8*l], srcv[8*l + 4]); for (int l = 0; l < 2; l++) minv[16*l] = vminq_f32(minv[16*l], minv[16*l + 8]); for (int l = 0; l < 1; l++) minv[32*l] = vminq_f32(minv[32*l], minv[32*l + 16]); @@ -2731,46 +2731,46 @@ static void ggml_vec_dot_q4_2(const int n, float * restrict s, const void * rest const __m256 scale_1 = _mm256_mul_ps( m0v, d1v ); const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0xAA /* 0b10101010 */ ); - const uint8_t * restrict x_pp = x[i].qs; - const uint8_t * restrict y_pp = y[i].qs; + const uint8_t * restrict x_pp = x[i].qs; + const uint8_t * restrict y_pp = y[i].qs; // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - for (int l = 0; l < QK128; l += 32) { - __m256i bx = bytesFromNibbles( x_pp + l/2); - __m256i by = bytesFromNibbles( y_pp + l/2); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. - - // Sign-extend first 16 signed bytes into int16_t - __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) ); - __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) ); - // Compute products of int16_t integers, add pairwise - __m256i i32 = _mm256_madd_epi16( x16, y16 ); - - // Sign-extend last 16 signed bytes into int16_t vectors - __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) ); - __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) ); - // Accumulate products of int16_t integers - i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) ); - - // compute sums of unsigned bytes in bx, by in blocks of 8. - // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000, - // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400. - // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ] - __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() ); - __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() ); - __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) ); - __m256 sums = _mm256_cvtepi32_ps( sumsi ); - - // Convert int32_t to float - __m256 p = _mm256_cvtepi32_ps( i32 ); - // Apply the scale, and accumulate - // acc += d0*d1*x*y + d0*m1*x + d1*m0*y - acc = _mm256_fmadd_ps( scale_01, p, acc ); - acc = _mm256_fmadd_ps( cross_scales, sums, acc ); - } - // acc_offset += m0*m1 (for each entry in the block) - acc_offset += (*m0)*(*m1); + for (int l = 0; l < QK128; l += 32) { + __m256i bx = bytesFromNibbles( x_pp + l/2); + __m256i by = bytesFromNibbles( y_pp + l/2); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. + + // Sign-extend first 16 signed bytes into int16_t + __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) ); + __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) ); + // Compute products of int16_t integers, add pairwise + __m256i i32 = _mm256_madd_epi16( x16, y16 ); + + // Sign-extend last 16 signed bytes into int16_t vectors + __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) ); + __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) ); + // Accumulate products of int16_t integers + i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) ); + + // compute sums of unsigned bytes in bx, by in blocks of 8. + // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000, + // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400. + // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ] + __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() ); + __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() ); + __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) ); + __m256 sums = _mm256_cvtepi32_ps( sumsi ); + + // Convert int32_t to float + __m256 p = _mm256_cvtepi32_ps( i32 ); + // Apply the scale, and accumulate + // acc += d0*d1*x*y + d0*m1*x + d1*m0*y + acc = _mm256_fmadd_ps( scale_01, p, acc ); + acc = _mm256_fmadd_ps( cross_scales, sums, acc ); + } + // acc_offset += m0*m1 (for each entry in the block) + acc_offset += (*m0)*(*m1); } // Return horizontal sum of the acc vector @@ -2792,35 +2792,35 @@ static void ggml_vec_dot_q4_2(const int n, float * restrict s, const void * rest const uint8x16_t m4b = vdupq_n_u8(0xf); - const uint8_t * restrict x_pp = x0->qs; - const uint8_t * restrict y_pp = x0->qs; + const uint8_t * restrict x_pp = x0->qs; + const uint8_t * restrict y_pp = x0->qs; - for (int l = 0; l < QK128; l += 32) { - const uint8x16_t v0_0 = vld1q_u8(x_pp + l/2); - const uint8x16_t v1_0 = vld1q_u8(y_pp + l/2); + for (int l = 0; l < QK128; l += 32) { + const uint8x16_t v0_0 = vld1q_u8(x_pp + l/2); + const uint8x16_t v1_0 = vld1q_u8(y_pp + l/2); - // and with 0xf - const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); - const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + // and with 0xf + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); - const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); - const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); - // dot product into uint16x8_t - const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l)); - const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l)); + // dot product into uint16x8_t + const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l)); + const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l)); - const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h)); - const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h)); + const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h)); + const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h)); - const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h); - const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h); + const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h); + const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h); - sum00 += x0->m*y0->m; - sum01 += y0->m*x0->d*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h)); - sum10 += x0->m*y0->d*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h)); - sum11 += x0->d*y0->d*vaddvq_u16(vaddq_u16(pl0, ph0)); - } + sum00 += x0->m*y0->m; + sum01 += y0->m*x0->d*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h)); + sum10 += x0->m*y0->d*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h)); + sum11 += x0->d*y0->d*vaddvq_u16(vaddq_u16(pl0, ph0)); + } } sumf = QK128*sum00 + sum01 + sum10 + sum11; @@ -3100,7 +3100,7 @@ static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = { [GGML_TYPE_F16] = 1, [GGML_TYPE_Q4_0] = QK, [GGML_TYPE_Q4_1] = QK, - [GGML_TYPE_Q4_2] = QK128, + [GGML_TYPE_Q4_2] = QK128, [GGML_TYPE_I8] = 1, [GGML_TYPE_I16] = 1, [GGML_TYPE_I32] = 1, @@ -3111,7 +3111,7 @@ static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { [GGML_TYPE_F16] = sizeof(ggml_fp16_t), [GGML_TYPE_Q4_0] = sizeof(block_q4_0), [GGML_TYPE_Q4_1] = sizeof(block_q4_1), - [GGML_TYPE_Q4_2] = sizeof(block_q4_2), + [GGML_TYPE_Q4_2] = sizeof(block_q4_2), [GGML_TYPE_I8] = sizeof(int8_t), [GGML_TYPE_I16] = sizeof(int16_t), [GGML_TYPE_I32] = sizeof(int32_t), @@ -5789,7 +5789,7 @@ static void ggml_compute_forward_dup( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5871,7 +5871,7 @@ static void ggml_compute_forward_add( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5924,7 +5924,7 @@ static void ggml_compute_forward_sub( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5977,7 +5977,7 @@ static void ggml_compute_forward_mul( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6030,7 +6030,7 @@ static void ggml_compute_forward_div( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6079,7 +6079,7 @@ static void ggml_compute_forward_sqr( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6128,7 +6128,7 @@ static void ggml_compute_forward_sqrt( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6187,7 +6187,7 @@ static void ggml_compute_forward_sum( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6265,7 +6265,7 @@ static void ggml_compute_forward_mean( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6330,7 +6330,7 @@ static void ggml_compute_forward_repeat( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6379,7 +6379,7 @@ static void ggml_compute_forward_abs( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6428,7 +6428,7 @@ static void ggml_compute_forward_sgn( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6477,7 +6477,7 @@ static void ggml_compute_forward_neg( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6526,7 +6526,7 @@ static void ggml_compute_forward_step( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6575,7 +6575,7 @@ static void ggml_compute_forward_relu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6641,7 +6641,7 @@ static void ggml_compute_forward_gelu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6709,7 +6709,7 @@ static void ggml_compute_forward_silu( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6877,7 +6877,7 @@ static void ggml_compute_forward_rms_norm( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7619,7 +7619,7 @@ static void ggml_compute_forward_scale( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7878,7 +7878,7 @@ static void ggml_compute_forward_diag_mask_inf( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -7973,7 +7973,7 @@ static void ggml_compute_forward_soft_max( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8149,7 +8149,7 @@ static void ggml_compute_forward_rope( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8418,7 +8418,7 @@ static void ggml_compute_forward_conv_1d_1s( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -8687,7 +8687,7 @@ static void ggml_compute_forward_conv_1d_2s( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -9173,7 +9173,7 @@ static void ggml_compute_forward_flash_attn( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -9385,7 +9385,7 @@ static void ggml_compute_forward_flash_ff( } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: + case GGML_TYPE_Q4_2: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: From db4b29301c99c9d4ad50dbcdfc3830212ed93475 Mon Sep 17 00:00:00 2001 From: qwopqwop200 Date: Thu, 13 Apr 2023 15:30:14 +0900 Subject: [PATCH 10/10] Add files via upload --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000..f3944951a486b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +numpy==1.24 +sentencepiece==0.1.97