diff --git a/Makefile b/Makefile
index 6fdf7470..73dac234 100644
--- a/Makefile
+++ b/Makefile
@@ -1,76 +1,32 @@
-# choose your compiler, e.g. gcc/clang
-# example override to clang: make run CC=clang
-CC = gcc
+TARGET=run
-# the most basic way of building that is most likely to work on most systems
-.PHONY: run
-run: run.c
- $(CC) -O3 -o run run.c -lm
- $(CC) -O3 -o runq runq.c -lm
+ifeq (,$(TOOLCHAIN_PREFIX))
+$(error TOOLCHAIN_PREFIX is not set)
+endif
-# useful for a debug build, can then e.g. analyze with valgrind, example:
-# $ valgrind --leak-check=full ./run out/model.bin -n 3
-rundebug: run.c
- $(CC) -g -o run run.c -lm
- $(CC) -g -o runq runq.c -lm
+ifeq (,$(CFLAGS))
+$(error CFLAGS is not set)
+endif
-# https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
-# https://simonbyrne.github.io/notes/fastmath/
-# -Ofast enables all -O3 optimizations.
-# Disregards strict standards compliance.
-# It also enables optimizations that are not valid for all standard-compliant programs.
-# It turns on -ffast-math, -fallow-store-data-races and the Fortran-specific
-# -fstack-arrays, unless -fmax-stack-var-size is specified, and -fno-protect-parens.
-# It turns off -fsemantic-interposition.
-# In our specific application this is *probably* okay to use
-.PHONY: runfast
-runfast: run.c
- $(CC) -Ofast -o run run.c -lm
- $(CC) -Ofast -o runq runq.c -lm
+ifeq (,$(LDFLAGS))
+$(error LDFLAGS is not set)
+endif
-# additionally compiles with OpenMP, allowing multithreaded runs
-# make sure to also enable multiple threads when running, e.g.:
-# OMP_NUM_THREADS=4 ./run out/model.bin
-.PHONY: runomp
-runomp: run.c
- $(CC) -Ofast -fopenmp -march=native run.c -lm -o run
- $(CC) -Ofast -fopenmp -march=native runq.c -lm -o runq
+CC = $(TOOLCHAIN_PREFIX)gcc
-.PHONY: win64
-win64:
- x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o run.exe -I. run.c win.c
- x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o runq.exe -I. runq.c win.c
+CFLAGS += -I$(SYSROOT)/usr/include
+CFLAGS += -Ofast
-# compiles with gnu99 standard flags for amazon linux, coreos, etc. compatibility
-.PHONY: rungnu
-rungnu:
- $(CC) -Ofast -std=gnu11 -o run run.c -lm
- $(CC) -Ofast -std=gnu11 -o runq runq.c -lm
+LDFLAGS += -L$(SYSROOT)/lib
+LDFLAGS += -L$(SYSROOT)/usr/lib
-.PHONY: runompgnu
-runompgnu:
- $(CC) -Ofast -fopenmp -std=gnu11 run.c -lm -o run
- $(CC) -Ofast -fopenmp -std=gnu11 runq.c -lm -o runq
+$(TARGET): $(TARGET).c
-# run all tests
-.PHONY: test
-test:
- pytest
-
-# run only tests for run.c C implementation (is a bit faster if only C code changed)
-.PHONY: testc
-testc:
- pytest -k runc
-
-# run the C tests, without touching pytest / python
-# to increase verbosity level run e.g. as `make testcc VERBOSITY=1`
-VERBOSITY ?= 0
-.PHONY: testcc
-testcc:
- $(CC) -DVERBOSITY=$(VERBOSITY) -O3 -o testc test.c -lm
- ./testc
+%.o: %.c
+ $(CC) $(CFLAGS) -o $@ -c $<
.PHONY: clean
clean:
- rm -f run
- rm -f runq
+ @rm *.o -rf
+ @rm $(OBJS) -rf
+ @rm $(TARGET)
diff --git a/README.md b/README.md
index f0eb9ecd..dc45f4f0 100644
--- a/README.md
+++ b/README.md
@@ -1,401 +1,32 @@
-## llama2.c
-
-
-
-
-
-Have you ever wanted to inference a baby [Llama 2](https://ai.meta.com/llama/) model in pure C? No? Well, now you can!
-
-Train the Llama 2 LLM architecture in PyTorch then inference it with one simple 700-line C file ([run.c](run.c)). You might think that you need many billion parameter LLMs to do anything useful, but in fact very small LLMs can have surprisingly strong performance if you make the domain narrow enough (ref: [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) paper). This repo is a "fullstack" train + inference solution for Llama 2 LLM, with focus on minimalism and simplicity.
-
-As the architecture is identical, you can also load and inference Meta's Llama 2 models. However, the current code only inferences models in fp32, so you will most likely not be able to productively load models larger than 7B. Work on model quantization is currently ongoing.
-
-Please note that this repo started recently as a fun weekend project: I took my earlier [nanoGPT](https://github.com/karpathy/nanoGPT), tuned it to implement the Llama-2 architecture instead of GPT-2, and the meat of it was writing the C inference engine in [run.c](run.c). So the project is young and moving quickly. Hat tip to the awesome [llama.cpp](https://github.com/ggerganov/llama.cpp) for inspiring this project. Compared to llama.cpp, I wanted something super simple, minimal, and educational so I chose to hard-code the Llama 2 architecture and just roll one inference file of pure C with no dependencies.
-
-## feel the magic
-
-[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/karpathy/llama2.c/blob/master/run.ipynb)
-
-First, navigate to the folder where you keep your projects and clone this repository to this folder:
-
-```bash
-git clone https://github.com/karpathy/llama2.c.git
-```
-
-Then, open the repository folder:
-
-```bash
-cd llama2.c
-```
-
-Now, let's just run a baby Llama 2 model in C. You need a model checkpoint. Download this 15M parameter model I trained on the [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) dataset (~60MB download):
-
-```bash
-wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
-```
-
-Compile and run the C code:
-
-```bash
-make run
-./run stories15M.bin
-```
-
-You'll see the text stream a sample. On my M1 MacBook Air this runs at ~110 tokens/s. See [performance](#performance) or the Makefile for compile flags that can significantly speed this up. We can also try a bit bigger 42M parameter model:
-
-```bash
-wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin
-./run stories42M.bin
-```
-
-This still runs at interactive rates and samples more coherent and diverse stories:
-
-> Once upon a time, there was a little girl named Lily. She loved playing with her toys on top of her bed. One day, she decided to have a tea party with her stuffed animals. She poured some tea into a tiny teapot and put it on top of the teapot. Suddenly, her little brother Max came into the room and wanted to join the tea party too. Lily didn't want to share her tea and she told Max to go away. Max started to cry and Lily felt bad. She decided to yield her tea party to Max and they both shared the teapot. But then, something unexpected happened. The teapot started to shake and wiggle. Lily and Max were scared and didn't know what to do. Suddenly, the teapot started to fly towards the ceiling and landed on the top of the bed. Lily and Max were amazed and they hugged each other. They realized that sharing was much more fun than being selfish. From that day on, they always shared their tea parties and toys.
-
-You can also prompt the model with a prefix or a number of additional command line arguments, e.g. to sample at temperature 0.8 for 256 steps and with a prompt:
-
-```bash
-./run stories42M.bin -t 0.8 -n 256 -i "One day, Lily met a Shoggoth"
-```
-
-> One day, Lily met a Shoggoth. He was very shy, but was also very generous. Lily said “Hello Shoggy! Can I be your friend?” Shoggy was happy to have a friend and said “Yes, let’s explore the universe together!” So they set off on a journey to explore the universe. As they travelled, Shoggy was happy to explain to Lily about all the wonderful things in the universe. At the end of the day, Lily and Shoggy had gathered lots of wonderful things from the universe, and they both felt very proud. They promised to explore the universe as one big pair and to never stop being generous to each other.
-
-There is also an even better 110M param model available, see [models](#models).
-
-Quick note on sampling, the recommendation for ~best results is to sample with `-t 1.0 -p 0.9`, i.e. temperature 1.0 (default) but also top-p sampling at 0.9 (default). Intuitively, top-p ensures that tokens with tiny probabilities do not get sampled, so we can't get "unlucky" during sampling, and we are less likely to go "off the rails" afterwards. More generally, to control the diversity of samples use either the temperature (i.e. vary `-t` between 0 and 1 and keep top-p off with `-p 0`) or the top-p value (i.e. vary `-p` between 0 and 1 and keep `-t 1`), but not both. Nice explainers on LLM sampling strategies include [this](https://peterchng.com/blog/2023/05/02/token-selection-strategies-top-k-top-p-and-temperature/), [this](https://docs.cohere.com/docs/controlling-generation-with-top-k-top-p) or [this](https://huggingface.co/blog/how-to-generate).
-
-## Meta's Llama 2 models
-
-As the neural net architecture is identical, we can also inference the Llama 2 models released by Meta. Sadly there is a bit of friction here due to licensing (I can't directly upload the checkpoints, I think). So Step 1, get the Llama 2 checkpoints by following the [Meta instructions](https://github.com/facebookresearch/llama). Once we have those checkpoints, we have to convert them into the llama2.c format.
-For this we need to install the python dependencies (`pip install -r requirements.txt`) and then use the `export.py` file, e.g. for 7B model:
-
-```bash
-python export.py llama2_7b.bin --meta-llama path/to/llama/model/7B
-```
-
-The export will take ~10 minutes or so and generate a 26GB file (the weights of the 7B model in float32) called `llama2_7b.bin` in the current directory. It has been [reported](https://github.com/karpathy/llama2.c/pull/85) that despite efforts. I would not attempt to run anything above 7B right now for two reasons: first, 13B+ currently doesn't work because of integer flow in pointer arithmetic, which is yet to be fixed, and second, even if it were fixed, this repo is doing float32 inference right now, so it would be fairly unusably slow. Once the export is done, we can run it:
-
-```bash
-./run llama2_7b.bin
-```
-
-This ran at about 4 tokens/s compiled with [OpenMP](#OpenMP) on 96 threads on my CPU Linux box in the cloud. (On my MacBook Air M1, currently it's closer to 30 seconds per token if you just build with `make runfast`.) Example output:
-
-> The purpose of this document is to highlight the state-of-the-art of CoO generation technologies, both recent developments and those in commercial use. The focus is on the technologies with the highest merit to become the dominating processes of the future and therefore to be technologies of interest to S&T ... R&D. As such, CoO generation technologies developed in Russia, Japan and Europe are described in some depth. The document starts with an introduction to cobalt oxides as complex products and a short view on cobalt as an essential material. The document continues with the discussion of the available CoO generation processes with respect to energy and capital consumption as well as to environmental damage.
-
-base models... ¯\\_(ツ)_/¯. Since we can inference the base model, it should be possible to also inference the chat model quite easily, and have a conversation with it. And if we can find a way to run 7B more efficiently, we can start adding LoRA to our training script, and going wild with finetunes all within the repo!
-
-You can also chat with the Llama Chat models. Export the chat model exactly as above:
-
-```bash
-python export.py llama2_7b_chat.bin --meta-llama /path/to/7B-chat
-```
-
-Then chat with it by specifying the chat mode using the `-m` flag, e.g.:
-
-```bash
-./run llama2_7b_chat.bin -m chat
-```
-
-You can also try Meta's Code Llama models even if support for them is incomplete. In particular, some hyperparameters changed (e.g. the constant in RoPE layer), so the inference is not exactly correct and a bit buggy right now. Looking into fixes. Make sure to build the tokenizer for the plain and instruct variants and pass it when doing inference.
-
-```bash
-python export.py codellama2_7b.bin --meta-llama /path/to/CodeLlama-7b
-python tokenizer.py --tokenizer-model=/path/to/CodeLlama-7b/tokenizer.model
-./run codellama2_7b.bin -z /path/to/CodeLlama-7b/tokenizer.bin
-```
-
-Chat with Code Llama Instruct:
-
-```bash
-python export.py codellama2_7b_instruct.bin --meta-llama /path/to/CodeLlama-7b-Instruct
-python tokenizer.py --tokenizer-model=/path/to/CodeLlama-7b-Instruct/tokenizer.model
-./run codellama2_7b_instruct.bin -m chat -z /path/to/CodeLlama-7b-Instruct/tokenizer.bin
-```
-
-## int8 quantization
-
-The (default) script [run.c](run.c), above, uses a float32 forward pass, where the entire calculation of the forward pass is kept in fp32. This is very easy to understand as far as reference code goes, but it has the following downsides: the model checkpoint files are very large (it takes 4 bytes per every individual weight), and the forward pass is relatively slow. The (very) common inference optimization employed in practice is to quantize the model parameters to lower precision, giving up a little bit of correctness in return for smaller checkpoint sizes and faster forward passes (as most of the inference uses integer arithmetic). Empirically, LLMs can tolerate precisions as low as 4-bit (or even lower), but we use int8 here because it is a "safe" setting that gets us the benefits but doesn't sacrifice too much of the model accuracy. Only the weights that participate in matmuls are quantized. All the other parameters (e.g. especially the scale and bias in RMSNorm) are kept in float32, because these layers are very sensitive. Now, if all you're after is reduction in checkpoint sizes, you could quantize the weights, save the checkpoint, and then dequantize them in run.c, and do float32 inference as normal and call it a day. This is totally fine. But here, we go one step further (as is standard practice) and additionally quantize the activations in the forward pass. This requires us to dynamically quantize and dequantize between float32 and int8 at runtime, which adds overhead. But the benefit is that now the majority of the calculations (the matmuls especially!) are using pure integer arithmetic, where both weights and activations enter as int8. This is where the speedups can fundamentally come from. The version we use is the "Q8_0" quantization (llama.cpp terminology), where the 0 means that the weight quantization is symmetric around 0, quantizing to the range [-127, 127].
-
-The quantized forward pass is implemented in [runq.c](runq.c). To use it, we have to export the model in the quantized format. For example, the float32 version of Llama 2 7B was exported as:
-
-```
-python export.py llama2_7b.bin --meta-llama path/to/llama/model/7B
-```
-
-This creates a 26GB file, because each one of 7B parameters is 4 bytes (fp32). To export it quantized, we instead use version 2 export:
-
-```
-python export.py llama2_7b_q80.bin --version 2 --meta-llama path/to/llama/model/7B
-```
-
-This runs for a few minutes, but now creates only a 6.7GB file. For exporting non-meta checkpoints you would use the --checkpoint arg instead of --meta-llama arg (more docs on this later, below). Now let's inference them. I like to use OMP here because these are big models, so e.g. on my Linux box:
-
-```
-make runomp
-OMP_NUM_THREADS=64 ./run llama2_7b.bin -n 40
-OMP_NUM_THREADS=64 ./runq llama2_7b_q80.bin -n 40
-```
-
-This runs 40 steps just to get a timing. The float32 version for me runs at 4.6 tok/s, and the int8 version at 14 tok/s. So we achieved a 3X speedup while reducing the checkpoint size by 4X. However, the forward pass is quantized to int8, and therefore silently very slightly lower quality.
-
-## huggingface models
-
-We can load any huggingface models that use the Llama 2 architecture. See the script [export.py](export.py) and the `--hf` flag to export the model .bin file.
-
-## models
-
-For the sake of examples of smaller, from-scratch models, I trained a small model series on TinyStories. All of these trained in a few hours on my training setup (4X A100 40GB GPUs). The 110M took around 24 hours. I am hosting them on huggingface hub [tinyllamas](https://huggingface.co/karpathy/tinyllamas), both in the original PyTorch .pt, and also in the llama2.c format .bin:
-
-| model | dim | n_layers | n_heads | n_kv_heads | max context length | parameters | val loss | download
-| --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| 260K | 64 | 5 | 8 | 4 | 512 | 260K | 1.297 | [stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K)
-| OG | 288 | 6 | 6 | 6 | 256 | 15M | 1.072 | [stories15M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin) |
-| 42M| 512 | 8 | 8 | 8 | 1024 | 42M | 0.847 | [stories42M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin) |
-| 110M| 768 | 12 | 12 | 12 | 1024 | 110M | 0.760 | [stories110M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin) |
-
-You'll notice that the 110M model is equivalent to GPT-1 in size. Alternatively, this is also the smallest model in the GPT-2 series (`GPT-2 small`), except the max context length is only 1024 instead of 2048. The only notable changes from GPT-1/2 architecture is that Llama uses RoPE relatively positional embeddings instead of absolute/learned positional embeddings, a bit more fancy SwiGLU non-linearity in the MLP, RMSNorm instead of LayerNorm, bias=False on all Linear layers, and is optionally multiquery (but this is not yet supported in llama2.c).
-
-## training
-
-Let's see how we can train a baby Llama 2 from scratch using the code in this repo. First let's download and pretokenize some source dataset, e.g. I like [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) so this is the only example currently available in this repo. But it should be very easy to add datasets, see the code.
-
-```bash
-python tinystories.py download
-python tinystories.py pretokenize
-```
-
-Then train our model:
-
-```bash
-python train.py
-```
-
-**brief training guide**. See the train.py script for more exotic launches and hyperparameter overrides. Here is a brief guide to how to set the parameters. Look at the table at the very end of the [Chinchilla paper](https://arxiv.org/abs/2203.15556) to get a sense of how the Transformer parameters (dim, n_layers, n_heads) grow or shrink together. Extrapolate/interpolate this pattern to get bigger or smaller transformers. Set the max context length however you wish, depending on the problem: this should be the max number of tokens that matter to predict the next token. E.g. Llama 2 uses 2048. Next, you want the _total_ batch size per update (printed by the script as "tokens per iteration will be:") to be somewhere around 100K tokens for medium-sized applications. For tiny applications it could be lower, for large training (e.g. GPTs/LLamas) it is usually ~0.5M, or even more. You get there by first maxing out the batch_size to whatever your system allows (e.g. mine was 16 in a recent run because after that my GPU runs out of memory), and then you want to increase gradient_accumulation_steps to be as high as necessary to reach the total batch size of ~100K. Finally, you want to tune your learning_rate (LR). You want this to be as high as your training allows. Very small networks can get away with a large LR (e.g. 1e-3 or even higher). Large networks need lower LRs. 3e-4 is a safe choice in most medium-sized applications, but can be too low for small networks, so try to increase it! Finally, max_iters is the length of training. Play with different settings. I mostly only ever tune these parameters and leave most of the others unchanged. Here is an example of how I trained the 110M model, which I don't think is anywhere near optimal, but looked sensible to me: dim 768, n_layers 12, n_heads 12 (so size of each head is 768 / 12 = 64 channels), seq len of 1024, batch size 16 (this is the most that fit my A100 40GB GPU), gradient_accumulation_steps = 8 was needed to get total tokens batch size to be 16 batch size * 1024 tokens in sequence * 8 grad_accum = 131,072 tokens per update. Good. Learning rate 4e-4 (probably a little too low). max_iters 200K (probably a bit too high). Dropout 0.1, as that usually helps a bit at medium size. That was it. I ran using Distributed Data Parallel (DDP) on 4 GPUs on my cloud machine, training took ~day or so.
-
-Totally understand if you want to skip model training, for simple demo just download one of the pretrained models (see [models](#models) section), e.g.:
-
-```bash
-wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
-```
-
-Once we have the model.bin file, we can inference in C. Compile the C code first:
-
-```bash
-make run
-```
-
-You can now run it simply as
-
-```bash
-./run stories15M.bin
-```
-
-Watch the tokens stream by, fun! We can also run the PyTorch inference script for a comparison. Download one of the models again from huggingface hub and point the `sample.py` script at it:
-
-```bash
-wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt -P out15M
-python sample.py --checkpoint=out15M/stories15M.pt
-```
-
-Which gives the same results.
-
-## custom tokenizers
-
-In everything above, we've assumed the custom Lllama 2 tokenizer with 32,000 tokens. However, in many boutique LLMs, using vocabulary this big might be an overkill. If you have a small application you have in mind, you might be much better off training your own tokenizers. This can make everything nicer - with smaller vocabs your model has fewer parameters (because the token embedding table is a lot smaller), the inference is faster (because there are fewer tokens to predict), and your average sequence length per example could also get smaller (because the compression is a lot more efficient on your data). So let's see how we train a custom tokenizer.
-
-By default, to pretokenize the tinystories dataset we had to run, in order:
-
-```
-python tinystories.py download
-python tinystories.py pretokenize
-```
-
-The `pretokenize` stage here loads the Llama 2 tokenizer (vocab size 32,000) and uses it to convert the downloaded text into integers, and saves that to file. We now change this as follows, to train an example 4096-token tokenizer:
-
-```
-python tinystories.py download
-python tinystories.py train_vocab --vocab_size=4096
-python tinystories.py pretokenize --vocab_size=4096
-```
-
-The `train_vocab` stage will call the `sentencepiece` library to train the tokenizer, storing it in a new file `data/tok4096.model`. I tried to reproduce as well as I could the settings that (I think) Meta used to train their vocabulary. This uses the Byte Pair Encoding algorithm that starts out with raw utf8 byte sequences of the text data and then iteratively merges the most common consecutive pairs of tokens to form the vocabulary. Inspect the `tinystories.py` file - the custom tokenizers are stored in a special directory structure indexed by the vocab size.
-
-A quick note of interest is that vocab size of 4096 trained specifically on tinystories creates integer sequences with about the same sequence length per example as the default Llama 2 tokenizer of 32000 tokens! This means that our custom, tailored tokenizer is a lot better adapted to our specific text, and can compress it very effectively. So our trained models are smaller and faster.
-
-Now that we have pretokenized the dataset with our custom tokenizer, we can train the model. The training script `train.py` doesn't care about the exact tokens, it only cares about the vocabulary size so it can correctly initialize the model. So when training your model, make sure to pass in
-
-```
-python train.py --vocab_source=custom --vocab_size=4096
-```
-
-(The defaults are `llama2` and `32000` respectively, which indicates the default Llama 2 tokenizer). This trains the model. Finally we are ready to run inference with our `run.c` script. For that we need two things. Number one, we have to export our tokenizer in the `.bin` format, do that with:
-
-```
-python tokenizer.py --tokenizer-model=data/tok4096.model
-```
-
-This writes the tokenizer to `data/tok4096.bin`. Now we can run inference, pointing it to this tokenizer using the `-z` flag:
-
-```
-./run out/model.bin -z data/tok4096.bin
-```
-
-This should print the samples. If you leave out the `-z` flag, it will use the default Llama 2 tokenizer, which would generate a good sequence of integers, but they would get translated using a different vocabulary to text, so it would look like gibberish.
-
-## performance
-
-There are many ways to potentially speed up this code depending on your system. Have a look at the [Makefile](Makefile), which contains a lot of notes. The `make run` command currently uses the `-O3` optimization by default, i.e.:
-
-```bash
-gcc -O3 -o run run.c -lm
-```
-
--O3 includes optimizations that are expensive in terms of compile time and memory usage. Including vectorization, loop unrolling, and predicting branches.
-
-To get a much better performance, try to compile with `make runfast`. This turns on the `-Ofast` flag, which includes additional optimizations that may break compliance with the C/IEEE specifications, in addition to `-O3`. See [the GCC docs](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html) for more information.
-
-Try `-march=native` to compile the program to use the architecture of the machine you're compiling on rather than a more generic CPU. This may enable additional optimizations and hardware-specific tuning such as improved vector instructions/width.
-
-The fastest throughput I saw so far on my MacBook Air (M1) so far is with `make runfast`.
-
-You can also experiment with replacing `gcc` with `clang`.
-
-If compiling with gcc, try experimenting with `-funroll-all-loops`, see PR [#183](https://github.com/karpathy/llama2.c/pull/183)
-
-**OpenMP**. Big improvements can also be achieved by compiling with OpenMP, which "activates" the `#pragma omp parallel for` inside the matmul and attention, allowing the work in the loops to be split up over multiple processors.
-You'll need to install the OpenMP library and the clang compiler first (e.g. `apt install clang libomp-dev` on ubuntu). Then you can compile with `make runomp`, which does:
-
-```bash
-clang -Ofast -fopenmp -march=native run.c -lm -o run
-```
-
-When you run inference make sure to use OpenMP flags to set the number of threads, e.g.:
-
-```bash
-OMP_NUM_THREADS=4 ./run out/model.bin
-```
-
-Depending on your system resources you may want to tweak these hyperparameters and use more threads. But more is not always better, usually this is a bit U shaped. In particular, if your CPU has SMT (multithreading), try setting the number of threads to the number of physical cores rather than logical cores. The performance difference can be large due to cache thrashing and communication overhead. The PyTorch documentation [CPU specific optimizations
-](https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#cpu-specific-optimizations) has some good information that applies here too.
-
-## platforms
-
-On **Windows**, use `build_msvc.bat` in a Visual Studio Command Prompt to build with msvc, or you can use `make win64` to use mingw compiler toolchain from linux or windows to build the windows target. MSVC build will automatically use openmp and max threads appropriate for your CPU unless you set `OMP_NUM_THREADS` env.
-
-On **Centos 7**, **Amazon Linux 2018** use `rungnu` Makefile target: `make rungnu` or `make runompgnu` to use openmp.
-
-On **Mac**, use clang from brew for openmp build. Install clang as `brew install llvm` and use the installed clang binary to compile with openmp: `make runomp CC=/opt/homebrew/opt/llvm/bin/clang`
-
-## tests
-
-You can run tests simply with pytest:
-
-```bash
-$ pip install pytest
-$ pytest
-```
-
-This will currently invoke two tests inside `test_all.py`, which forward the model in both C and Python for 200 steps and check the output against a known good expected output. The tests currently run in only a few seconds, but will have to download and cache the stories260K models in a temporary `test` directory (only ~2MB download).
-
-There are also some tests in C, in the file [test.c](test.c). You can run these with `make testcc`, or to see more stuff printed:
-
-```
-make testcc VERBOSITY=1
-```
-
-Call for help: help add more tests.
-
-## ack
-
-I trained the llama2.c storyteller models on a 4X A100 40GB box graciously provided by the excellent [Lambda labs](https://lambdalabs.com/service/gpu-cloud), thank you.
-
-## discord
-
-Figured it's possible to reuse my existing discord channel (that I use for my [zero to hero youtube series](https://karpathy.ai/zero-to-hero.html)), see #llama2c channel on [discord](https://discord.gg/3zy8kqD9Cp), for any quick questions, related discussions, etc.
-
-## contributing
-
-A few words on this repo and the kinds of PRs that are likely to be accepted. What is the goal of this repo? Basically I think there will be a lot of interest in training or finetuning custom micro-LLMs (think ~100M - ~1B params, but let's say up to ~10B params) across a large diversity of applications, and deploying them in edge-adjacent environments (think MCUs, phones, web browsers, laptops, etc.). I'd like this repo to be the simplest, smallest, most hackable repo to support this workflow, both training and inference. In particular, this repo is not a complex framework with a 1000 knobs controlling inscrutible code across a nested directory structure of hundreds of files. Instead, I expect most applications will wish to create a fork of this repo and hack it to their specific needs and deployment platforms.
-
-People who care about deployment efficiency above all else should look at [llama.cpp](https://github.com/ggerganov/llama.cpp). This repo still cares about efficiency, but not at the cost of simplicity, readability or portability. Basically, I expect that a lot of people come to this repo because the training code is 2 readable .py files and the inference code is 500 lines of C. So I'd like this to continue to be a kind of simplest "reference implementation" that can be easily hacked in a separate fork into whatever downstream application people are excited about. It shouldn't be full-featured. It shouldn't take 100 different options or settings. It shouldn't be the most efficient. A few examples:
-
-- someone re-ordered two loops to improve data locality for a small efficieny win => instant merge.
-- someone added the one line "pragma omp parallel for", which allows you to compile with OpenMP and dramatically speed up the code, or acts as just a comment if you don't compile it that way => instant merge.
-- bug fixes and touchups etc. => happy to merge
-
-A few examples of PRs are that are not an excellent fit:
-
-- adding more than several #ifdefs all over the place in code. If they are localized / few, might be okay.
-- adding a lot of code that is very specific to some specific platform (e.g. MCUs, or some special version of linux or processor). These may be a better fit for forks of the project, and I am very happy to maintain a list of these forks in section below.
-- adding hundreds of lines of code to run.c that are only active in specific scenarios or platforms.
-
-If your candidate PRs have elements of these it doesn't mean they won't get merged, it just means they will make it into the gray territory. TLDR: I am eager to merge any mostly small, mostly localized, broadly applicable, clean changes that improve the efficiency and portability of the repo, while keep its hackability and readability. I appreciate all PRs seeking to help me improve the project, thank you! <3.
-
-## notable forks
-
-- Rust
- - [llama2.rs](https://github.com/gaxler/llama2.rs) by @[gaxler](https://github.com/gaxler): a Rust port of this project
- - [llama2.rs](https://github.com/leo-du/llama2.rs) by @[leo-du](https://github.com/leo-du): A Rust port of this project
- - [llama2-rs](https://github.com/danielgrittner/llama2-rs) by @[danielgrittner](https://github.com/danielgrittner): a Rust port of this project
- - [llama2.rs](https://github.com/lintian06/llama2.rs) by @[lintian06](https://github.com/lintian06): A Rust port of this project
- - [pecca.rs](https://github.com/rahoua/pecca-rs) by @[rahoua](https://github.com/rahoua): A Rust port leveraging [ndarray](https://github.com/rust-ndarray/ndarray), supports BLAS.
- - [llama2.rs](https://github.com/flaneur2020/llama2.rs) by @[flaneur2020](https://github.com/flaneur2020): A Rust port of this project.
-- Go
- - [go-llama2](https://github.com/tmc/go-llama2) by @[tmc](https://github.com/tmc): a Go port of this project
- - [llama2.go](https://github.com/nikolaydubina/llama2.go) by @[nikolaydubina](https://github.com/nikolaydubina): a Go port of this project
- - [llama2.go](https://github.com/haormj/llama2.go) by @[haormj](https://github.com/haormj): a Go port of this project
- - [llama2.go](https://github.com/saracen/llama2.go) by @[saracen](https://github.com/saracen): a Go port of this project
-- Android
- - [llama2.c-android](https://github.com/Manuel030/llama2.c-android): by @[Manuel030](https://github.com/Manuel030): adds Android binaries of this project
- - [llama2.c-android-wrapper](https://github.com/celikin/llama2.c-android-wrapper): by @[celikin](https://github.com/celikin): added JNI wrapper, PoC
-- C++
- - [llama2.cpp](https://github.com/leloykun/llama2.cpp) by @[leloykun](https://github.com/leloykun): a C++ port of this project
-- JavaScript
- - [llama2.js](https://github.com/epicure/llama2.js) by @[epicure](https://github.com/epicure): a JavaScript port of this project
- - [llamajs](https://github.com/agershun/llamajs) by @[agershun](https://github.com/agershun): a JavaScript port of this project
- - [llama2.ts](https://github.com/wizzard0/llama2.ts) by @[oleksandr_now](https://twitter.com/oleksandr_now): a TypeScript port of this project. Full Llama2-7B capable.
- - [llama2.c-emscripten](https://github.com/gohai/llama2.c-emscripten) by @[gohai](https://github.com/gohai): Emscripten (JavaScript) port, based on @ggerganov's initial prototype
-- Zig
- - [llama2.zig](https://github.com/cgbur/llama2.zig) by @[cgbur](https://github.com/cgbur): A Zig port of this project
- - [llama2.zig](https://github.com/vodkaslime/llama2.zig) by @[vodkaslime](https://github.com/vodkaslime): a Zig port of this project
- - [llama2.zig](https://github.com/clebert/llama2.zig) by @[clebert](https://github.com/clebert): a Zig port of this project
-- Julia
- - [llama2.jl](https://github.com/juvi21/llama2.jl) by @[juvi21](https://github.com/juvi21): a Julia port of this project
-- Scala
- - [llama2.scala](https://github.com/jrudolph/llama2.scala) by @[jrudolph](https://github.com/jrudolph): a Scala port of this project
-- Java
- - [llama2.java](https://github.com/mukel/llama2.java) by @[mukel](https://github.com/mukel): a Java port of this project
-- Kotlin
- - [llama2.kt](https://github.com/madroidmaq/llama2.kt) by @[madroidmaq](https://github.com/madroidmaq): a Kotlin port of this project
-- Python
- - [llama2.py](https://github.com/tairov/llama2.py) by @[tairov](https://github.com/tairov): a simple one file pure Python port of this project with zero dependencies
-- C#
- - [llama2.cs](https://github.com/trrahul/llama2.cs) by @[trrahul](https://github.com/trrahul): a C# port of this project
-- Dart
- - [llama2.dart](https://github.com/yiminghan/llama2.dart) by @[yiminghan](https://github.com/yiminghan/llama2.dart): one-file dart port of this project, works with Flutter!
-- Web
- - [llama2c-web](https://github.com/dmarcos/llama2.c-web) by @[dmarcos](https://github.com/dmarcos): Super simple way to build unmodified llama2.c to WASM and run it in the browser. [Demo](https://diegomarcos.com/llama2.c-web/)
-- WebAssembly
- - [icpp-llm](https://github.com/icppWorld/icpp-llm): LLMs for the Internet Computer
-- Fortran
- - [llama2.f90](https://github.com/rbitr/llama2.f90): a Fortran port of this project
-- Mojo
- - [llama2.🔥](https://github.com/tairov/llama2.mojo) by @[tairov](https://github.com/tairov): pure Mojo port of this project
-- OCaml
- - [llama2.ml](https://github.com/jackpeck/llama2.ml) by @[jackpeck](https://github.com/jackpeck): an OCaml port of this project
-- [llama2.c - Llama 2 Everywhere](https://github.com/trholding/llama2.c) by @[trholding](https://github.com/trholding): Standalone, Bootable & Portable Binary Llama 2
-- [llama2.c-zh - Bilingual Chinese and English](https://github.com/chenyangMl/llama2.c-zh) by @[chenyangMl](https://github.com/chenyangMl): Expand tokenizer to support training and inference in both Chinese and English
-
-## unsorted todos
-
-- add support in run.c of reading version 1+ files from export, later deprecate "version 0"
-- run.cu (CUDA) investigate and merge
-- add more tests inside [test.c](test.c)
-- add Engine class for use in sample.py that does efficient inference in PyTorch, e.g. KV cache keeping
-- make it easier to add a new dataset with not too much pain
-- (LoRA) finetuning and export of Llama 2 models
-
-## License
-
-MIT
+# llama2 在 milk duo 上的优化
+## 优化手段分析
+通过分析原程序在 milk duo 上的运行情况,可以从以下两个方面尝试提高运行速度:
+- 优化源码,提高 cpu 利用率
+具体可从*指令优化*及*提高cache命中率*两方面考虑
+- 优化文件io
+模型文件本身较大,其中包含的数据在读取时将会消耗大量时间
+- 启用 cv1800b 的第二个核
+将开发板单核变为双核,理论上能够加快程序运行,至少多线程将不再是负优化
+## 我所完成的工作
+- 通过使用 riscv 的 vector 指令集,加快浮点数的运算,提高程序运行速度
+- 尝试通过将大矩阵分解为小矩阵,提高 cache 命中率,从而优化运行速度(因为编写出来的代码有问题,最终放弃)
+- 尝试修改 linux kernel 配置项,从而编译生成能够感知调度第二个核的镜像(由于需要编译uboot 、linux kernel等,项目文件结构太过复杂,且不熟悉设备树,最终放弃 )
+## 最终结果
+### 优化后 程序资源占用率
+![优化后 程序资源占用率](image.png)
+### 优化后 token 生成速度
+![优化后 token生成速度](image-1.png)
+### 优化前 程序资源占用率
+![优化前 程序资源占用率](image-2.png)
+### 优化前 token 生成速度
+![优化前 token 生成速度](image-3.png)
+## 总结
+通过优化前后 token 生成速度的对比,可得出结论:
+程序运行速度的瓶颈不是 cpu 执行速度,而是文件io、cache命中率等因素,
+本次优化尝试从加快程序运行的角度是失败的,但也发现了程序优化的正确方向,并且将 cpu 占用率减低至原来的 *1/3*,且我在尝试过程中学习到了关于 vector 指令集使用、编译器选项含义等知识,加深了我对于 risc v 体系结构的了解
+## 编译注意事项
+请在执行 `source envsetup.sh` ,下载编译工具链后,再运行 `make`
+```shell
+source envsetup.sh
+make
+```
\ No newline at end of file
diff --git a/envsetup.sh b/envsetup.sh
new file mode 100755
index 00000000..38ac1535
--- /dev/null
+++ b/envsetup.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+SDK_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
+echo "SDK_DIR: ${SDK_DIR}"
+
+MILKV_DUO_SDK=${SDK_DIR}/duo-sdk
+TOOLCHAIN_DIR=${MILKV_DUO_SDK}/riscv64-linux-musl-x86_64
+
+SDK_URL="https://github.com/milkv-duo/duo-app-sdk/releases/download/duo-app-sdk-v1.2.0/duo-sdk-v1.2.0.tar.gz"
+
+function get_duo_sdk()
+{
+ pushd ${SDK_DIR}
+
+ echo "SDK_URL: ${SDK_URL}"
+ sdk_file=${SDK_URL##*/}
+ echo "sdk_file: ${sdk_file}"
+
+ wget ${SDK_URL} -O ${sdk_file}
+ if [ $? -ne 0 ]; then
+ echo "Failed to download ${SDK_URL} !"
+ return 1
+ fi
+
+ if [ ! -f ${sdk_file} ]; then
+ echo "${sdk_file} not found!"
+ return 1
+ fi
+
+ echo "Extracting ${sdk_file}..."
+ tar -xf ${sdk_file}
+ if [ $? -ne 0 ]; then
+ echo "Extract ${sdk_file} failed!"
+ return 1
+ fi
+
+ [ -f ${sdk_file} ] && rm -rf ${sdk_file}
+
+ popd
+}
+
+if [ ! -d ${MILKV_DUO_SDK} ]; then
+ echo "SDK does not exist, download it now..."
+ get_duo_sdk
+ if [ $? -ne 0 ]; then
+ echo "Get SDK failed!"
+ return 1
+ fi
+fi
+
+export TOOLCHAIN_PREFIX=${TOOLCHAIN_DIR}/bin/riscv64-unknown-linux-musl-
+export SYSROOT=${MILKV_DUO_SDK}/rootfs
+
+export LDFLAGS="-mcpu=c906fdv -march=rv64imafdcv0p7xthead -mcmodel=medany -mabi=lp64d"
+# -Os
+export CFLAGS="-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64"
+
+echo "SDK environment is ready"
+
diff --git a/image-1.png b/image-1.png
new file mode 100644
index 00000000..ae5cc0e1
Binary files /dev/null and b/image-1.png differ
diff --git a/image-2.png b/image-2.png
new file mode 100644
index 00000000..23297334
Binary files /dev/null and b/image-2.png differ
diff --git a/image-3.png b/image-3.png
new file mode 100644
index 00000000..46bbd204
Binary files /dev/null and b/image-3.png differ
diff --git a/image.png b/image.png
new file mode 100644
index 00000000..1a293b3c
Binary files /dev/null and b/image.png differ
diff --git a/run.c b/run.c
index e1a4ec24..f449ccd9 100644
--- a/run.c
+++ b/run.c
@@ -1,573 +1,728 @@
/* Inference for Llama-2 Transformer model in pure C */
-#include
-#include
#include
-#include
+#include
#include
+#include
+#include
+#include
+#include
#include
-#include
+#include
+
#if defined _WIN32
- #include "win.h"
+#include "win.h"
#else
- #include
- #include
+#include
+#include
#endif
// ----------------------------------------------------------------------------
// Transformer model
+#define STEP 64
+#define LENGTH 32
typedef struct {
- int dim; // transformer dimension
- int hidden_dim; // for ffn layers
- int n_layers; // number of layers
- int n_heads; // number of query heads
- int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
- int vocab_size; // vocabulary size, usually 256 (byte-level)
- int seq_len; // max sequence length
+ int dim; // transformer dimension
+ int hidden_dim; // for ffn layers
+ int n_layers; // number of layers
+ int n_heads; // number of query heads
+ int n_kv_heads; // number of key/value heads (can be < query heads because of
+ // multiquery)
+ int vocab_size; // vocabulary size, usually 256 (byte-level)
+ int seq_len; // max sequence length
} Config;
typedef struct {
- // token embedding table
- float* token_embedding_table; // (vocab_size, dim)
- // weights for rmsnorms
- float* rms_att_weight; // (layer, dim) rmsnorm weights
- float* rms_ffn_weight; // (layer, dim)
- // weights for matmuls. note dim == n_heads * head_size
- float* wq; // (layer, dim, n_heads * head_size)
- float* wk; // (layer, dim, n_kv_heads * head_size)
- float* wv; // (layer, dim, n_kv_heads * head_size)
- float* wo; // (layer, n_heads * head_size, dim)
- // weights for ffn
- float* w1; // (layer, hidden_dim, dim)
- float* w2; // (layer, dim, hidden_dim)
- float* w3; // (layer, hidden_dim, dim)
- // final rmsnorm
- float* rms_final_weight; // (dim,)
- // (optional) classifier weights for the logits, on the last layer
- float* wcls;
+ // token embedding table
+ float *token_embedding_table; // (vocab_size, dim)
+ // weights for rmsnorms
+ float *rms_att_weight; // (layer, dim) rmsnorm weights
+ float *rms_ffn_weight; // (layer, dim)
+ // weights for matmuls. note dim == n_heads * head_size
+ float *wq; // (layer, dim, n_heads * head_size)
+ float *wk; // (layer, dim, n_kv_heads * head_size)
+ float *wv; // (layer, dim, n_kv_heads * head_size)
+ float *wo; // (layer, n_heads * head_size, dim)
+ // weights for ffn
+ float *w1; // (layer, hidden_dim, dim)
+ float *w2; // (layer, dim, hidden_dim)
+ float *w3; // (layer, hidden_dim, dim)
+ // final rmsnorm
+ float *rms_final_weight; // (dim,)
+ // (optional) classifier weights for the logits, on the last layer
+ float *wcls;
} TransformerWeights;
typedef struct {
- // current wave of activations
- float *x; // activation at current time stamp (dim,)
- float *xb; // same, but inside a residual branch (dim,)
- float *xb2; // an additional buffer just for convenience (dim,)
- float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
- float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
- float *q; // query (dim,)
- float *k; // key (dim,)
- float *v; // value (dim,)
- float *att; // buffer for scores/attention values (n_heads, seq_len)
- float *logits; // output logits
- // kv cache
- float* key_cache; // (layer, seq_len, dim)
- float* value_cache; // (layer, seq_len, dim)
+ // current wave of activations
+ float *x; // activation at current time stamp (dim,)
+ float *xb; // same, but inside a residual branch (dim,)
+ float *xb2; // an additional buffer just for convenience (dim,)
+ float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
+ float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
+ float *q; // query (dim,)
+ float *k; // key (dim,)
+ float *v; // value (dim,)
+ float *att; // buffer for scores/attention values (n_heads, seq_len)
+ float *logits; // output logits
+ // kv cache
+ float *key_cache; // (layer, seq_len, dim)
+ float *value_cache; // (layer, seq_len, dim)
} RunState;
typedef struct {
- Config config; // the hyperparameters of the architecture (the blueprint)
- TransformerWeights weights; // the weights of the model
- RunState state; // buffers for the "wave" of activations in the forward pass
- // some more state needed to properly clean up the memory mapping (sigh)
- int fd; // file descriptor for memory mapping
- float* data; // memory mapped data pointer
- ssize_t file_size; // size of the checkpoint file in bytes
+ Config config; // the hyperparameters of the architecture (the blueprint)
+ TransformerWeights weights; // the weights of the model
+ RunState state; // buffers for the "wave" of activations in the forward pass
+ // some more state needed to properly clean up the memory mapping (sigh)
+ int fd; // file descriptor for memory mapping
+ float *data; // memory mapped data pointer
+ ssize_t file_size; // size of the checkpoint file in bytes
} Transformer;
-void malloc_run_state(RunState* s, Config* p) {
- // we calloc instead of malloc to keep valgrind happy
- int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
- s->x = calloc(p->dim, sizeof(float));
- s->xb = calloc(p->dim, sizeof(float));
- s->xb2 = calloc(p->dim, sizeof(float));
- s->hb = calloc(p->hidden_dim, sizeof(float));
- s->hb2 = calloc(p->hidden_dim, sizeof(float));
- s->q = calloc(p->dim, sizeof(float));
- s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
- s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
- s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
- s->logits = calloc(p->vocab_size, sizeof(float));
- // ensure all mallocs went fine
- if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
- || !s->key_cache || !s->value_cache || !s->att || !s->logits) {
- fprintf(stderr, "malloc failed!\n");
- exit(EXIT_FAILURE);
- }
+void malloc_run_state(RunState *s, Config *p) {
+ // we calloc instead of malloc to keep valgrind happy
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
+ s->x = calloc(p->dim, sizeof(float));
+ s->xb = calloc(p->dim, sizeof(float));
+ s->xb2 = calloc(p->dim, sizeof(float));
+ s->hb = calloc(p->hidden_dim, sizeof(float));
+ s->hb2 = calloc(p->hidden_dim, sizeof(float));
+ s->q = calloc(p->dim, sizeof(float));
+ s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
+ s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
+ s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
+ s->logits = calloc(p->vocab_size, sizeof(float));
+ // ensure all mallocs went fine
+ if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q ||
+ !s->key_cache || !s->value_cache || !s->att || !s->logits) {
+ fprintf(stderr, "malloc failed!\n");
+ exit(EXIT_FAILURE);
+ }
}
-void free_run_state(RunState* s) {
- free(s->x);
- free(s->xb);
- free(s->xb2);
- free(s->hb);
- free(s->hb2);
- free(s->q);
- free(s->att);
- free(s->logits);
- free(s->key_cache);
- free(s->value_cache);
+void free_run_state(RunState *s) {
+ free(s->x);
+ free(s->xb);
+ free(s->xb2);
+ free(s->hb);
+ free(s->hb2);
+ free(s->q);
+ free(s->att);
+ free(s->logits);
+ free(s->key_cache);
+ free(s->value_cache);
}
-void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) {
- int head_size = p->dim / p->n_heads;
- // make sure the multiplications below are done in 64bit to fit the parameter counts of 13B+ models
- unsigned long long n_layers = p->n_layers;
- w->token_embedding_table = ptr;
- ptr += p->vocab_size * p->dim;
- w->rms_att_weight = ptr;
- ptr += n_layers * p->dim;
- w->wq = ptr;
- ptr += n_layers * p->dim * (p->n_heads * head_size);
- w->wk = ptr;
- ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
- w->wv = ptr;
- ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
- w->wo = ptr;
- ptr += n_layers * (p->n_heads * head_size) * p->dim;
- w->rms_ffn_weight = ptr;
- ptr += n_layers * p->dim;
- w->w1 = ptr;
- ptr += n_layers * p->dim * p->hidden_dim;
- w->w2 = ptr;
- ptr += n_layers * p->hidden_dim * p->dim;
- w->w3 = ptr;
- ptr += n_layers * p->dim * p->hidden_dim;
- w->rms_final_weight = ptr;
- ptr += p->dim;
- ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE)
- ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE)
- w->wcls = shared_weights ? w->token_embedding_table : ptr;
+void memory_map_weights(TransformerWeights *w, Config *p, float *ptr,
+ int shared_weights) {
+ int head_size = p->dim / p->n_heads;
+ // make sure the multiplications below are done in 64bit to fit the parameter
+ // counts of 13B+ models
+ unsigned long long n_layers = p->n_layers;
+ w->token_embedding_table = ptr;
+ ptr += p->vocab_size * p->dim;
+ w->rms_att_weight = ptr;
+ ptr += n_layers * p->dim;
+ w->wq = ptr;
+ ptr += n_layers * p->dim * (p->n_heads * head_size);
+ w->wk = ptr;
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
+ w->wv = ptr;
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
+ w->wo = ptr;
+ ptr += n_layers * (p->n_heads * head_size) * p->dim;
+ w->rms_ffn_weight = ptr;
+ ptr += n_layers * p->dim;
+ w->w1 = ptr;
+ ptr += n_layers * p->dim * p->hidden_dim;
+ w->w2 = ptr;
+ ptr += n_layers * p->hidden_dim * p->dim;
+ w->w3 = ptr;
+ ptr += n_layers * p->dim * p->hidden_dim;
+ w->rms_final_weight = ptr;
+ ptr += p->dim;
+ ptr += p->seq_len * head_size /
+ 2; // skip what used to be freq_cis_real (for RoPE)
+ ptr += p->seq_len * head_size /
+ 2; // skip what used to be freq_cis_imag (for RoPE)
+ w->wcls = shared_weights ? w->token_embedding_table : ptr;
}
-void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,
- int* fd, float** data, ssize_t* file_size) {
- FILE *file = fopen(checkpoint, "rb");
- if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }
- // read in the config header
- if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }
- // negative vocab size is hacky way of signaling unshared weights. bit yikes.
- int shared_weights = config->vocab_size > 0 ? 1 : 0;
- config->vocab_size = abs(config->vocab_size);
- // figure out the file size
- fseek(file, 0, SEEK_END); // move file pointer to end of file
- *file_size = ftell(file); // get the file size, in bytes
- fclose(file);
- // memory map the Transformer weights into the data pointer
- *fd = open(checkpoint, O_RDONLY); // open in read only mode
- if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }
- *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
- if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); }
- float* weights_ptr = *data + sizeof(Config)/sizeof(float);
- memory_map_weights(weights, config, weights_ptr, shared_weights);
+void read_checkpoint(char *checkpoint, Config *config,
+ TransformerWeights *weights, int *fd, float **data,
+ ssize_t *file_size) {
+ FILE *file = fopen(checkpoint, "rb");
+ if (!file) {
+ fprintf(stderr, "Couldn't open file %s\n", checkpoint);
+ exit(EXIT_FAILURE);
+ }
+ // read in the config header
+ if (fread(config, sizeof(Config), 1, file) != 1) {
+ exit(EXIT_FAILURE);
+ }
+ // negative vocab size is hacky way of signaling unshared weights. bit yikes.
+ int shared_weights = config->vocab_size > 0 ? 1 : 0;
+ config->vocab_size = abs(config->vocab_size);
+ // figure out the file size
+ fseek(file, 0, SEEK_END); // move file pointer to end of file
+ *file_size = ftell(file); // get the file size, in bytes
+ fclose(file);
+ // memory map the Transformer weights into the data pointer
+ *fd = open(checkpoint, O_RDONLY); // open in read only mode
+ if (*fd == -1) {
+ fprintf(stderr, "open failed!\n");
+ exit(EXIT_FAILURE);
+ }
+ *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
+ if (*data == MAP_FAILED) {
+ fprintf(stderr, "mmap failed!\n");
+ exit(EXIT_FAILURE);
+ }
+ float *weights_ptr = *data + sizeof(Config) / sizeof(float);
+ memory_map_weights(weights, config, weights_ptr, shared_weights);
}
-void build_transformer(Transformer *t, char* checkpoint_path) {
- // read in the Config and the Weights from the checkpoint
- read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);
- // allocate the RunState buffers
- malloc_run_state(&t->state, &t->config);
+void build_transformer(Transformer *t, char *checkpoint_path) {
+ // read in the Config and the Weights from the checkpoint
+ read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data,
+ &t->file_size);
+ // allocate the RunState buffers
+ malloc_run_state(&t->state, &t->config);
}
-void free_transformer(Transformer* t) {
- // close the memory mapping
- if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); }
- if (t->fd != -1) { close(t->fd); }
- // free the RunState buffers
- free_run_state(&t->state);
+void free_transformer(Transformer *t) {
+ // close the memory mapping
+ if (t->data != MAP_FAILED) {
+ munmap(t->data, t->file_size);
+ }
+ if (t->fd != -1) {
+ close(t->fd);
+ }
+ // free the RunState buffers
+ free_run_state(&t->state);
}
// ----------------------------------------------------------------------------
// neural net blocks; the dynamics of the Transformer
-void rmsnorm(float* o, float* x, float* weight, int size) {
- // calculate sum of squares
- float ss = 0.0f;
- for (int j = 0; j < size; j++) {
- ss += x[j] * x[j];
+void rmsnorm(float *o, float *x, float *weight, int size) {
+ // calculate sum of squares
+ float ss = 0.0f, val = 0.0f;
+ float *temp_x;
+ float temp_out[LENGTH];
+ float temp_out_two[16];
+ int j;
+ for (j = 0; j < size; j += LENGTH) {
+
+ // ss += x[j] * x[j];
+ temp_x = &x[j];
+ vfloat32m8_t va = vle32_v_f32m8(temp_x, LENGTH);
+ vfloat32m8_t vb = vle32_v_f32m8(temp_x, LENGTH);
+ vfloat32m8_t vc = vfmul_vv_f32m8(va, vb, LENGTH);
+
+ vse32_v_f32m8(temp_out, vc, LENGTH);
+
+ vfloat32m4_t ve = vle32_v_f32m4(temp_out, 16);
+ vfloat32m4_t vf = vle32_v_f32m4(&temp_out[16], 16);
+
+ vfloat32m4_t vg = vfadd_vv_f32m4(ve, vf, 16);
+ vse32_v_f32m4(temp_out_two, vg, 16);
+ for (int m = 0; m < 16; m++) {
+ ss += temp_out_two[m];
}
- ss /= size;
- ss += 1e-5f;
- ss = 1.0f / sqrtf(ss);
- // normalize and scale
- for (int j = 0; j < size; j++) {
- o[j] = weight[j] * (ss * x[j]);
+ }
+ if (j - size > 0) {
+ for (int m = j; m < size; m++) {
+ ss += x[m] * x[m];
}
+ }
+
+ ss /= size;
+ ss += 1e-5f;
+ ss = 1.0f / sqrtf(ss);
+ // normalize and scale
+
+ float *temp_w, *temp_xx;
+ float temp_out_x[LENGTH];
+
+ for (j = 0; j < size; j++) {
+ o[j] = weight[j] * (ss * x[j]);
+ }
}
-void softmax(float* x, int size) {
- // find max value (for numerical stability)
- float max_val = x[0];
- for (int i = 1; i < size; i++) {
- if (x[i] > max_val) {
- max_val = x[i];
- }
+void softmax(float *x, int size) {
+ // find max value (for numerical stability)
+ float max_val = x[0];
+ for (int i = 1; i < size; i++) {
+ if (x[i] > max_val) {
+ max_val = x[i];
}
- // exp and sum
- float sum = 0.0f;
- for (int i = 0; i < size; i++) {
- x[i] = expf(x[i] - max_val);
- sum += x[i];
+ }
+ // exp and sum
+ float sum = 0.0f;
+ for (int i = 0; i < size; i++) {
+ x[i] = expf(x[i] - max_val);
+ sum += x[i];
+ }
+ // normalize
+ for (int i = 0; i < size; i++) {
+ x[i] /= sum;
+ }
+}
+
+void matmul(float *xout, float *x, float *w, int n, int d) {
+ // W (d,n) @ x (n,) -> xout (d,)
+ // by far the most amount of time is spent inside this little function
+
+ // #define STEP 1024
+ // #define LENGTH 12
+ int i;
+ int temp;
+ float val;
+ // #pragma omp parallel for private(i)
+ // for (i = 0; i < d; i++) {
+ // val = 0.0f;
+ // temp = i * n;
+ // for (int j = 0; j < n; j++) {
+ // val += w[temp + j] * x[j];
+ // if ((j + 1) % 4 == 0) {
+ // printf("val is %f origin \n",val);
+ // }
+ // if ((j + 1) % STEP == 0) {
+ // printf("i is %d, val is %f origin step\n",i,val);
+
+ // }
+ // }
+ // xout[i] = val;
+ // printf("i is %d,origin is %f \n",i,xout[i]);
+ // }
+
+ // 通过使用 riscv vector 扩展指令集实现运行速度的优化
+ float *temp_w, *temp_x;
+ float temp_out[LENGTH];
+ // float temp_out_bug[LENGTH];
+ for (i = 0; i < d; i++) {
+ val = 0.0f;
+ temp = i * n;
+ int j = 0.0f;
+ for (j = 0; j < n; j += LENGTH) {
+ temp_w = &w[temp + j];
+ temp_x = &x[j];
+ vfloat32m8_t va = vle32_v_f32m8(temp_w, LENGTH);
+ vfloat32m8_t vb = vle32_v_f32m8(temp_x, LENGTH);
+
+ vfloat32m8_t vc = vfmul_vv_f32m8(va, vb, LENGTH);
+
+ vse32_v_f32m8(temp_out, vc, LENGTH);
+
+ vfloat32m4_t ve = vle32_v_f32m4(temp_out, 16);
+ vfloat32m4_t vf = vle32_v_f32m4(&temp_out[16], 16);
+
+ vfloat32m4_t vg = vfadd_vv_f32m4(ve, vf, 16);
+ vse32_v_f32m4(temp_out, vg, 16);
+ for (int m = 0; m < 16; m++) {
+ val += temp_out[m];
+ }
+ // printf("val is %f \n", val);
}
- // normalize
- for (int i = 0; i < size; i++) {
- x[i] /= sum;
+ if (j - n > 0) {
+ for (int m = j; m < n; m++) {
+ val += w[temp + m] * x[m];
+ }
+ // for (int m = 0; m < LENGTH; m++) {
+ // val += temp_out[m];
+ // }
}
+ xout[i] = val;
+ }
}
-void matmul(float* xout, float* x, float* w, int n, int d) {
- // W (d,n) @ x (n,) -> xout (d,)
- // by far the most amount of time is spent inside this little function
- int i;
- #pragma omp parallel for private(i)
- for (i = 0; i < d; i++) {
- float val = 0.0f;
- for (int j = 0; j < n; j++) {
- val += w[i * n + j] * x[j];
- }
- xout[i] = val;
+float *forward(Transformer *transformer, int token, int pos) {
+
+ // a few convenience variables
+ Config *p = &transformer->config;
+ TransformerWeights *w = &transformer->weights;
+ RunState *s = &transformer->state;
+ float *x = s->x;
+ int dim = p->dim;
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
+ int kv_mul =
+ p->n_heads /
+ p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
+ int hidden_dim = p->hidden_dim;
+ int head_size = dim / p->n_heads;
+
+ // copy the token embedding into x
+ float *content_row = w->token_embedding_table + token * dim;
+ memcpy(x, content_row, dim * sizeof(*x));
+
+ // forward all the layers
+ for (unsigned long long l = 0; l < p->n_layers; l++) {
+
+ // attention rmsnorm
+ rmsnorm(s->xb, x, w->rms_att_weight + l * dim, dim);
+
+ // key and value point to the kv cache
+ int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
+ s->k = s->key_cache + loff + pos * kv_dim;
+ s->v = s->value_cache + loff + pos * kv_dim;
+
+ // qkv matmuls for this position
+ matmul(s->q, s->xb, w->wq + l * dim * dim, dim, dim);
+ matmul(s->k, s->xb, w->wk + l * dim * kv_dim, dim, kv_dim);
+ matmul(s->v, s->xb, w->wv + l * dim * kv_dim, dim, kv_dim);
+
+ // RoPE relative positional encoding: complex-valued rotate q and k in each
+ // head
+ for (int i = 0; i < dim; i += 2) {
+ int head_dim = i % head_size;
+ float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
+ float val = pos * freq;
+ float fcr = cosf(val);
+ float fci = sinf(val);
+ int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
+ for (int v = 0; v < rotn; v++) {
+ float *vec =
+ v == 0 ? s->q : s->k; // the vector to rotate (query or key)
+ float v0 = vec[i];
+ float v1 = vec[i + 1];
+ vec[i] = v0 * fcr - v1 * fci;
+ vec[i + 1] = v0 * fci + v1 * fcr;
+ }
}
-}
-float* forward(Transformer* transformer, int token, int pos) {
-
- // a few convenience variables
- Config* p = &transformer->config;
- TransformerWeights* w = &transformer->weights;
- RunState* s = &transformer->state;
- float *x = s->x;
- int dim = p->dim;
- int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
- int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
- int hidden_dim = p->hidden_dim;
- int head_size = dim / p->n_heads;
-
- // copy the token embedding into x
- float* content_row = w->token_embedding_table + token * dim;
- memcpy(x, content_row, dim*sizeof(*x));
-
- // forward all the layers
- for(unsigned long long l = 0; l < p->n_layers; l++) {
-
- // attention rmsnorm
- rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
-
- // key and value point to the kv cache
- int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
- s->k = s->key_cache + loff + pos * kv_dim;
- s->v = s->value_cache + loff + pos * kv_dim;
-
- // qkv matmuls for this position
- matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim);
- matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim);
- matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim);
-
- // RoPE relative positional encoding: complex-valued rotate q and k in each head
- for (int i = 0; i < dim; i+=2) {
- int head_dim = i % head_size;
- float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
- float val = pos * freq;
- float fcr = cosf(val);
- float fci = sinf(val);
- int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
- for (int v = 0; v < rotn; v++) {
- float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
- float v0 = vec[i];
- float v1 = vec[i+1];
- vec[i] = v0 * fcr - v1 * fci;
- vec[i+1] = v0 * fci + v1 * fcr;
- }
+ // multihead attention. iterate over all heads
+ int h;
+#pragma omp parallel for private(h)
+ for (h = 0; h < p->n_heads; h++) {
+ // get the query vector for this head
+ float *q = s->q + h * head_size;
+ // attention scores for this head
+ float *att = s->att + h * p->seq_len;
+ // iterate over all timesteps, including the current one
+ for (int t = 0; t <= pos; t++) {
+ // get the key vector for this head and at this timestep
+ float *k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
+ // calculate the attention score as the dot product of q and k
+ float score = 0.0f;
+ for (int i = 0; i < head_size; i++) {
+ score += q[i] * k[i];
}
-
- // multihead attention. iterate over all heads
- int h;
- #pragma omp parallel for private(h)
- for (h = 0; h < p->n_heads; h++) {
- // get the query vector for this head
- float* q = s->q + h * head_size;
- // attention scores for this head
- float* att = s->att + h * p->seq_len;
- // iterate over all timesteps, including the current one
- for (int t = 0; t <= pos; t++) {
- // get the key vector for this head and at this timestep
- float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
- // calculate the attention score as the dot product of q and k
- float score = 0.0f;
- for (int i = 0; i < head_size; i++) {
- score += q[i] * k[i];
- }
- score /= sqrtf(head_size);
- // save the score to the attention buffer
- att[t] = score;
- }
-
- // softmax the scores to get attention weights, from 0..pos inclusively
- softmax(att, pos + 1);
-
- // weighted sum of the values, store back into xb
- float* xb = s->xb + h * head_size;
- memset(xb, 0, head_size * sizeof(float));
- for (int t = 0; t <= pos; t++) {
- // get the value vector for this head and at this timestep
- float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
- // get the attention weight for this timestep
- float a = att[t];
- // accumulate the weighted value into xb
- for (int i = 0; i < head_size; i++) {
- xb[i] += a * v[i];
- }
- }
+ score /= sqrtf(head_size);
+ // save the score to the attention buffer
+ att[t] = score;
+ }
+
+ // softmax the scores to get attention weights, from 0..pos inclusively
+ softmax(att, pos + 1);
+
+ // weighted sum of the values, store back into xb
+ float *xb = s->xb + h * head_size;
+ memset(xb, 0, head_size * sizeof(float));
+ for (int t = 0; t <= pos; t++) {
+ // get the value vector for this head and at this timestep
+ float *v =
+ s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
+ // get the attention weight for this timestep
+ float a = att[t];
+ // accumulate the weighted value into xb
+ for (int i = 0; i < head_size; i++) {
+ xb[i] += a * v[i];
}
+ }
+ }
- // final matmul to get the output of the attention
- matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim);
+ // final matmul to get the output of the attention
+ matmul(s->xb2, s->xb, w->wo + l * dim * dim, dim, dim);
- // residual connection back into x
- for (int i = 0; i < dim; i++) {
- x[i] += s->xb2[i];
- }
+ // residual connection back into x
+ for (int i = 0; i < dim; i++) {
+ x[i] += s->xb2[i];
+ }
- // ffn rmsnorm
- rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
-
- // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
- // first calculate self.w1(x) and self.w3(x)
- matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim);
- matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim);
-
- // SwiGLU non-linearity
- for (int i = 0; i < hidden_dim; i++) {
- float val = s->hb[i];
- // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
- val *= (1.0f / (1.0f + expf(-val)));
- // elementwise multiply with w3(x)
- val *= s->hb2[i];
- s->hb[i] = val;
- }
+ // ffn rmsnorm
+ rmsnorm(s->xb, x, w->rms_ffn_weight + l * dim, dim);
+
+ // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
+ // first calculate self.w1(x) and self.w3(x)
+ matmul(s->hb, s->xb, w->w1 + l * dim * hidden_dim, dim, hidden_dim);
+ matmul(s->hb2, s->xb, w->w3 + l * dim * hidden_dim, dim, hidden_dim);
+
+ // SwiGLU non-linearity
+ for (int i = 0; i < hidden_dim; i++) {
+ float val = s->hb[i];
+ // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
+ val *= (1.0f / (1.0f + expf(-val)));
+ // elementwise multiply with w3(x)
+ val *= s->hb2[i];
+ s->hb[i] = val;
+ }
- // final matmul to get the output of the ffn
- matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim);
+ // final matmul to get the output of the ffn
+ matmul(s->xb, s->hb, w->w2 + l * dim * hidden_dim, hidden_dim, dim);
- // residual connection
- for (int i = 0; i < dim; i++) {
- x[i] += s->xb[i];
- }
+ // residual connection
+ for (int i = 0; i < dim; i++) {
+ x[i] += s->xb[i];
}
+ }
- // final rmsnorm
- rmsnorm(x, x, w->rms_final_weight, dim);
+ // final rmsnorm
+ rmsnorm(x, x, w->rms_final_weight, dim);
- // classifier into logits
- matmul(s->logits, x, w->wcls, p->dim, p->vocab_size);
- return s->logits;
+ // classifier into logits
+ matmul(s->logits, x, w->wcls, p->dim, p->vocab_size);
+ return s->logits;
}
// ----------------------------------------------------------------------------
// The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens
typedef struct {
- char *str;
- int id;
+ char *str;
+ int id;
} TokenIndex;
typedef struct {
- char** vocab;
- float* vocab_scores;
- TokenIndex *sorted_vocab;
- int vocab_size;
- unsigned int max_token_length;
- unsigned char byte_pieces[512]; // stores all single-byte strings
+ char **vocab;
+ float *vocab_scores;
+ TokenIndex *sorted_vocab;
+ int vocab_size;
+ unsigned int max_token_length;
+ unsigned char byte_pieces[512]; // stores all single-byte strings
} Tokenizer;
int compare_tokens(const void *a, const void *b) {
- return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
+ return strcmp(((TokenIndex *)a)->str, ((TokenIndex *)b)->str);
}
-void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) {
- // i should have written the vocab_size into the tokenizer file... sigh
- t->vocab_size = vocab_size;
- // malloc space to hold the scores and the strings
- t->vocab = (char**)malloc(vocab_size * sizeof(char*));
- t->vocab_scores = (float*)malloc(vocab_size * sizeof(float));
- t->sorted_vocab = NULL; // initialized lazily
- for (int i = 0; i < 256; i++) {
- t->byte_pieces[i * 2] = (unsigned char)i;
- t->byte_pieces[i * 2 + 1] = '\0';
- }
- // read in the file
- FILE *file = fopen(tokenizer_path, "rb");
- if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); }
- if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
- int len;
- for (int i = 0; i < vocab_size; i++) {
- if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);}
- if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
- t->vocab[i] = (char *)malloc(len + 1);
- if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
- t->vocab[i][len] = '\0'; // add the string terminating token
- }
- fclose(file);
+void build_tokenizer(Tokenizer *t, char *tokenizer_path, int vocab_size) {
+ // i should have written the vocab_size into the tokenizer file... sigh
+ t->vocab_size = vocab_size;
+ // malloc space to hold the scores and the strings
+ t->vocab = (char **)malloc(vocab_size * sizeof(char *));
+ t->vocab_scores = (float *)malloc(vocab_size * sizeof(float));
+ t->sorted_vocab = NULL; // initialized lazily
+ for (int i = 0; i < 256; i++) {
+ t->byte_pieces[i * 2] = (unsigned char)i;
+ t->byte_pieces[i * 2 + 1] = '\0';
+ }
+ // read in the file
+ FILE *file = fopen(tokenizer_path, "rb");
+ if (!file) {
+ fprintf(stderr, "couldn't load %s\n", tokenizer_path);
+ exit(EXIT_FAILURE);
+ }
+ if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) {
+ fprintf(stderr, "failed read\n");
+ exit(EXIT_FAILURE);
+ }
+ int len;
+ for (int i = 0; i < vocab_size; i++) {
+ if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) {
+ fprintf(stderr, "failed read\n");
+ exit(EXIT_FAILURE);
+ }
+ if (fread(&len, sizeof(int), 1, file) != 1) {
+ fprintf(stderr, "failed read\n");
+ exit(EXIT_FAILURE);
+ }
+ t->vocab[i] = (char *)malloc(len + 1);
+ if (fread(t->vocab[i], len, 1, file) != 1) {
+ fprintf(stderr, "failed read\n");
+ exit(EXIT_FAILURE);
+ }
+ t->vocab[i][len] = '\0'; // add the string terminating token
+ }
+ fclose(file);
}
-void free_tokenizer(Tokenizer* t) {
- for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); }
- free(t->vocab);
- free(t->vocab_scores);
- free(t->sorted_vocab);
+void free_tokenizer(Tokenizer *t) {
+ for (int i = 0; i < t->vocab_size; i++) {
+ free(t->vocab[i]);
+ }
+ free(t->vocab);
+ free(t->vocab_scores);
+ free(t->sorted_vocab);
}
-char* decode(Tokenizer* t, int prev_token, int token) {
- char *piece = t->vocab[token];
- // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
- if (prev_token == 1 && piece[0] == ' ') { piece++; }
- // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
- // parse this and convert and return the actual byte
- unsigned char byte_val;
- if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
- piece = (char*)t->byte_pieces + byte_val * 2;
- }
- return piece;
+char *decode(Tokenizer *t, int prev_token, int token) {
+ char *piece = t->vocab[token];
+ // following BOS (1) token, sentencepiece decoder strips any leading
+ // whitespace (see PR #89)
+ if (prev_token == 1 && piece[0] == ' ') {
+ piece++;
+ }
+ // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
+ // parse this and convert and return the actual byte
+ unsigned char byte_val;
+ if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
+ piece = (char *)t->byte_pieces + byte_val * 2;
+ }
+ return piece;
}
void safe_printf(char *piece) {
- // piece might be a raw byte token, and we only want to print printable chars or whitespace
- // because some of the other bytes can be various control codes, backspace, etc.
- if (piece == NULL) { return; }
- if (piece[0] == '\0') { return; }
- if (piece[1] == '\0') {
- unsigned char byte_val = piece[0];
- if (!(isprint(byte_val) || isspace(byte_val))) {
- return; // bad byte, don't print it
- }
+ // piece might be a raw byte token, and we only want to print printable chars
+ // or whitespace because some of the other bytes can be various control codes,
+ // backspace, etc.
+ if (piece == NULL) {
+ return;
+ }
+ if (piece[0] == '\0') {
+ return;
+ }
+ if (piece[1] == '\0') {
+ unsigned char byte_val = piece[0];
+ if (!(isprint(byte_val) || isspace(byte_val))) {
+ return; // bad byte, don't print it
}
- printf("%s", piece);
+ }
+ printf("%s", piece);
}
int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
- // efficiently find the perfect match for str in vocab, return its index or -1 if not found
- TokenIndex tok = { .str = str }; // acts as the key to search for
- TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
- return res != NULL ? res->id : -1;
+ // efficiently find the perfect match for str in vocab, return its index or -1
+ // if not found
+ TokenIndex tok = {.str = str}; // acts as the key to search for
+ TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex),
+ compare_tokens);
+ return res != NULL ? res->id : -1;
}
-void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {
- // encode the string text (input) into an upper-bound preallocated tokens[] array
- // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)
- if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }
-
- if (t->sorted_vocab == NULL) {
- // lazily malloc and sort the vocabulary
- t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
- for (int i = 0; i < t->vocab_size; i++) {
- t->sorted_vocab[i].str = t->vocab[i];
- t->sorted_vocab[i].id = i;
- }
- qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
- }
-
- // create a temporary buffer that will store merge candidates of always two consecutive tokens
- // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)
- char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));
- size_t str_len = 0;
-
- // start at 0 tokens
- *n_tokens = 0;
-
- // add optional BOS (=1) token, if desired
- if (bos) tokens[(*n_tokens)++] = 1;
-
- // add_dummy_prefix is true by default
- // so prepend a dummy prefix token to the input string, but only if text != ""
- // TODO: pretty sure this isn't correct in the general case but I don't have the
- // energy to read more of the sentencepiece code to figure out what it's doing
- if (text[0] != '\0') {
- int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
- tokens[(*n_tokens)++] = dummy_prefix;
- }
-
- // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
- // Code point ↔ UTF-8 conversion
- // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
- // U+0000 U+007F 0xxxxxxx
- // U+0080 U+07FF 110xxxxx 10xxxxxx
- // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
- // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
-
- // process the raw (UTF-8) byte sequence of the input string
- for (char *c = text; *c != '\0'; c++) {
-
- // reset buffer if the current byte is ASCII or a leading byte
- // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
- // 0x80 is 10000000
- // in UTF-8, all continuation bytes start with "10" in first two bits
- // so in English this is: "if this byte is not a continuation byte"
- if ((*c & 0xC0) != 0x80) {
- // this byte must be either a leading byte (11...) or an ASCII char (0x...)
- // => reset our location, as we're starting a new UTF-8 codepoint
- str_len = 0;
- }
+void encode(Tokenizer *t, char *text, int8_t bos, int8_t eos, int *tokens,
+ int *n_tokens) {
+ // encode the string text (input) into an upper-bound preallocated tokens[]
+ // array bos != 0 means prepend the BOS token (=1), eos != 0 means append the
+ // EOS token (=2)
+ if (text == NULL) {
+ fprintf(stderr, "cannot encode NULL text\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (t->sorted_vocab == NULL) {
+ // lazily malloc and sort the vocabulary
+ t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
+ for (int i = 0; i < t->vocab_size; i++) {
+ t->sorted_vocab[i].str = t->vocab[i];
+ t->sorted_vocab[i].id = i;
+ }
+ qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
+ }
+
+ // create a temporary buffer that will store merge candidates of always two
+ // consecutive tokens *2 for concat, +1 for null terminator +2 for UTF8 (in
+ // case max_token_length is 1)
+ char *str_buffer = malloc((t->max_token_length * 2 + 1 + 2) * sizeof(char));
+ size_t str_len = 0;
+
+ // start at 0 tokens
+ *n_tokens = 0;
+
+ // add optional BOS (=1) token, if desired
+ if (bos)
+ tokens[(*n_tokens)++] = 1;
+
+ // add_dummy_prefix is true by default
+ // so prepend a dummy prefix token to the input string, but only if text != ""
+ // TODO: pretty sure this isn't correct in the general case but I don't have
+ // the energy to read more of the sentencepiece code to figure out what it's
+ // doing
+ if (text[0] != '\0') {
+ int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
+ tokens[(*n_tokens)++] = dummy_prefix;
+ }
+
+ // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
+ // Code point ↔ UTF-8 conversion
+ // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
+ // U+0000 U+007F 0xxxxxxx
+ // U+0080 U+07FF 110xxxxx 10xxxxxx
+ // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
+ // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+ // process the raw (UTF-8) byte sequence of the input string
+ for (char *c = text; *c != '\0'; c++) {
+
+ // reset buffer if the current byte is ASCII or a leading byte
+ // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the
+ // rest 0x80 is 10000000 in UTF-8, all continuation bytes start with "10" in
+ // first two bits so in English this is: "if this byte is not a continuation
+ // byte"
+ if ((*c & 0xC0) != 0x80) {
+ // this byte must be either a leading byte (11...) or an ASCII char
+ // (0x...)
+ // => reset our location, as we're starting a new UTF-8 codepoint
+ str_len = 0;
+ }
- // append the current byte to the buffer
- str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
- str_buffer[str_len] = '\0';
+ // append the current byte to the buffer
+ str_buffer[str_len++] =
+ *c; // ++ is post-increment, incremented after this line
+ str_buffer[str_len] = '\0';
- // while the next character is a continuation byte, continue appending
- // but if there are too many of them, just stop to avoid overruning str_buffer size.
- if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
- continue;
- }
+ // while the next character is a continuation byte, continue appending
+ // but if there are too many of them, just stop to avoid overruning
+ // str_buffer size.
+ if ((*(c + 1) & 0xC0) == 0x80 && str_len < 4) {
+ continue;
+ }
- // ok c+1 is not a continuation byte, so we've read in a full codepoint
- int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
+ // ok c+1 is not a continuation byte, so we've read in a full codepoint
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
- if (id != -1) {
- // we found this codepoint in vocab, add it as a token
- tokens[(*n_tokens)++] = id;
- } else {
- // byte_fallback encoding: just encode each byte as a token
- // +3 is here because the first 3 vocab elements are , ,
- // so the individual bytes only start at index 3
- for (int i=0; i < str_len; i++) {
- tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
- }
- }
- str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
- }
-
- // merge the best consecutive pair each iteration, according the scores in vocab_scores
- while (1) {
- float best_score = -1e10;
- int best_id = -1;
- int best_idx = -1;
-
- for (int i=0; i < (*n_tokens-1); i++) {
- // check if we can merge the pair (tokens[i], tokens[i+1])
- sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);
- int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
- if (id != -1 && t->vocab_scores[id] > best_score) {
- // this merge pair exists in vocab! record its score and position
- best_score = t->vocab_scores[id];
- best_id = id;
- best_idx = i;
- }
- }
+ if (id != -1) {
+ // we found this codepoint in vocab, add it as a token
+ tokens[(*n_tokens)++] = id;
+ } else {
+ // byte_fallback encoding: just encode each byte as a token
+ // +3 is here because the first 3 vocab elements are , ,
+ // so the individual bytes only start at index 3
+ for (int i = 0; i < str_len; i++) {
+ tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
+ }
+ }
+ str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
+ }
+
+ // merge the best consecutive pair each iteration, according the scores in
+ // vocab_scores
+ while (1) {
+ float best_score = -1e10;
+ int best_id = -1;
+ int best_idx = -1;
+
+ for (int i = 0; i < (*n_tokens - 1); i++) {
+ // check if we can merge the pair (tokens[i], tokens[i+1])
+ sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i + 1]]);
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
+ if (id != -1 && t->vocab_scores[id] > best_score) {
+ // this merge pair exists in vocab! record its score and position
+ best_score = t->vocab_scores[id];
+ best_id = id;
+ best_idx = i;
+ }
+ }
- if (best_idx == -1) {
- break; // we couldn't find any more pairs to merge, so we're done
- }
+ if (best_idx == -1) {
+ break; // we couldn't find any more pairs to merge, so we're done
+ }
- // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
- tokens[best_idx] = best_id;
- // delete token at position best_idx+1, shift the entire sequence back 1
- for (int i = best_idx+1; i < (*n_tokens-1); i++) {
- tokens[i] = tokens[i+1];
- }
- (*n_tokens)--; // token length decreased
+ // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
+ tokens[best_idx] = best_id;
+ // delete token at position best_idx+1, shift the entire sequence back 1
+ for (int i = best_idx + 1; i < (*n_tokens - 1); i++) {
+ tokens[i] = tokens[i + 1];
}
+ (*n_tokens)--; // token length decreased
+ }
- // add optional EOS (=2) token, if desired
- if (eos) tokens[(*n_tokens)++] = 2;
+ // add optional EOS (=2) token, if desired
+ if (eos)
+ tokens[(*n_tokens)++] = 2;
- free(str_buffer);
+ free(str_buffer);
}
// ----------------------------------------------------------------------------
@@ -575,222 +730,240 @@ void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *
// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
typedef struct {
- float prob;
- int index;
+ float prob;
+ int index;
} ProbIndex; // struct used when sorting probabilities during top-p sampling
typedef struct {
- int vocab_size;
- ProbIndex* probindex; // buffer used in top-p sampling
- float temperature;
- float topp;
- unsigned long long rng_state;
+ int vocab_size;
+ ProbIndex *probindex; // buffer used in top-p sampling
+ float temperature;
+ float topp;
+ unsigned long long rng_state;
} Sampler;
-int sample_argmax(float* probabilities, int n) {
- // return the index that has the highest probability
- int max_i = 0;
- float max_p = probabilities[0];
- for (int i = 1; i < n; i++) {
- if (probabilities[i] > max_p) {
- max_i = i;
- max_p = probabilities[i];
- }
+int sample_argmax(float *probabilities, int n) {
+ // return the index that has the highest probability
+ int max_i = 0;
+ float max_p = probabilities[0];
+ for (int i = 1; i < n; i++) {
+ if (probabilities[i] > max_p) {
+ max_i = i;
+ max_p = probabilities[i];
}
- return max_i;
+ }
+ return max_i;
}
-int sample_mult(float* probabilities, int n, float coin) {
- // sample index from probabilities (they must sum to 1!)
- // coin is a random number in [0, 1), usually from random_f32()
- float cdf = 0.0f;
- for (int i = 0; i < n; i++) {
- cdf += probabilities[i];
- if (coin < cdf) {
- return i;
- }
+int sample_mult(float *probabilities, int n, float coin) {
+ // sample index from probabilities (they must sum to 1!)
+ // coin is a random number in [0, 1), usually from random_f32()
+ float cdf = 0.0f;
+ for (int i = 0; i < n; i++) {
+ cdf += probabilities[i];
+ if (coin < cdf) {
+ return i;
}
- return n - 1; // in case of rounding errors
+ }
+ return n - 1; // in case of rounding errors
}
-int compare(const void* a, const void* b) {
- ProbIndex* a_ = (ProbIndex*) a;
- ProbIndex* b_ = (ProbIndex*) b;
- if (a_->prob > b_->prob) return -1;
- if (a_->prob < b_->prob) return 1;
- return 0;
+int compare(const void *a, const void *b) {
+ ProbIndex *a_ = (ProbIndex *)a;
+ ProbIndex *b_ = (ProbIndex *)b;
+ if (a_->prob > b_->prob)
+ return -1;
+ if (a_->prob < b_->prob)
+ return 1;
+ return 0;
}
-int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) {
- // top-p sampling (or "nucleus sampling") samples from the smallest set of
- // tokens that exceed probability topp. This way we never sample tokens that
- // have very low probabilities and are less likely to go "off the rails".
- // coin is a random number in [0, 1), usually from random_f32()
-
- int n0 = 0;
- // quicksort indices in descending order of probabilities
- // values smaller than (1 - topp) / (n - 1) cannot be part of the result
- // so for efficiency we crop these out as candidates before sorting
- const float cutoff = (1.0f - topp) / (n - 1);
- for (int i = 0; i < n; i++) {
- if (probabilities[i] >= cutoff) {
- probindex[n0].index = i;
- probindex[n0].prob = probabilities[i];
- n0++;
- }
+int sample_topp(float *probabilities, int n, float topp, ProbIndex *probindex,
+ float coin) {
+ // top-p sampling (or "nucleus sampling") samples from the smallest set of
+ // tokens that exceed probability topp. This way we never sample tokens that
+ // have very low probabilities and are less likely to go "off the rails".
+ // coin is a random number in [0, 1), usually from random_f32()
+
+ int n0 = 0;
+ // quicksort indices in descending order of probabilities
+ // values smaller than (1 - topp) / (n - 1) cannot be part of the result
+ // so for efficiency we crop these out as candidates before sorting
+ const float cutoff = (1.0f - topp) / (n - 1);
+ for (int i = 0; i < n; i++) {
+ if (probabilities[i] >= cutoff) {
+ probindex[n0].index = i;
+ probindex[n0].prob = probabilities[i];
+ n0++;
}
- qsort(probindex, n0, sizeof(ProbIndex), compare);
-
- // truncate the list where cumulative probability exceeds topp
- float cumulative_prob = 0.0f;
- int last_idx = n0 - 1; // in case of rounding errors consider all elements
- for (int i = 0; i < n0; i++) {
- cumulative_prob += probindex[i].prob;
- if (cumulative_prob > topp) {
- last_idx = i;
- break; // we've exceeded topp by including last_idx
- }
+ }
+ qsort(probindex, n0, sizeof(ProbIndex), compare);
+
+ // truncate the list where cumulative probability exceeds topp
+ float cumulative_prob = 0.0f;
+ int last_idx = n0 - 1; // in case of rounding errors consider all elements
+ for (int i = 0; i < n0; i++) {
+ cumulative_prob += probindex[i].prob;
+ if (cumulative_prob > topp) {
+ last_idx = i;
+ break; // we've exceeded topp by including last_idx
}
-
- // sample from the truncated list
- float r = coin * cumulative_prob;
- float cdf = 0.0f;
- for (int i = 0; i <= last_idx; i++) {
- cdf += probindex[i].prob;
- if (r < cdf) {
- return probindex[i].index;
- }
+ }
+
+ // sample from the truncated list
+ float r = coin * cumulative_prob;
+ float cdf = 0.0f;
+ for (int i = 0; i <= last_idx; i++) {
+ cdf += probindex[i].prob;
+ if (r < cdf) {
+ return probindex[i].index;
}
- return probindex[last_idx].index; // in case of rounding errors
+ }
+ return probindex[last_idx].index; // in case of rounding errors
}
-void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) {
- sampler->vocab_size = vocab_size;
- sampler->temperature = temperature;
- sampler->topp = topp;
- sampler->rng_state = rng_seed;
- // buffer only used with nucleus sampling; may not need but it's ~small
- sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
+void build_sampler(Sampler *sampler, int vocab_size, float temperature,
+ float topp, unsigned long long rng_seed) {
+ sampler->vocab_size = vocab_size;
+ sampler->temperature = temperature;
+ sampler->topp = topp;
+ sampler->rng_state = rng_seed;
+ // buffer only used with nucleus sampling; may not need but it's ~small
+ sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
}
-void free_sampler(Sampler* sampler) {
- free(sampler->probindex);
-}
+void free_sampler(Sampler *sampler) { free(sampler->probindex); }
unsigned int random_u32(unsigned long long *state) {
- // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
- *state ^= *state >> 12;
- *state ^= *state << 25;
- *state ^= *state >> 27;
- return (*state * 0x2545F4914F6CDD1Dull) >> 32;
+ // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
+ *state ^= *state >> 12;
+ *state ^= *state << 25;
+ *state ^= *state >> 27;
+ return (*state * 0x2545F4914F6CDD1Dull) >> 32;
}
float random_f32(unsigned long long *state) { // random float32 in [0,1)
- return (random_u32(state) >> 8) / 16777216.0f;
+ return (random_u32(state) >> 8) / 16777216.0f;
}
-int sample(Sampler* sampler, float* logits) {
- // sample the token given the logits and some hyperparameters
- int next;
- if (sampler->temperature == 0.0f) {
- // greedy argmax sampling: take the token with the highest probability
- next = sample_argmax(logits, sampler->vocab_size);
+int sample(Sampler *sampler, float *logits) {
+ // sample the token given the logits and some hyperparameters
+ int next;
+ if (sampler->temperature == 0.0f) {
+ // greedy argmax sampling: take the token with the highest probability
+ next = sample_argmax(logits, sampler->vocab_size);
+ } else {
+ // apply the temperature to the logits
+ for (int q = 0; q < sampler->vocab_size; q++) {
+ logits[q] /= sampler->temperature;
+ }
+ // apply softmax to the logits to get the probabilities for next token
+ softmax(logits, sampler->vocab_size);
+ // flip a (float) coin (this is our source of entropy for sampling)
+ float coin = random_f32(&sampler->rng_state);
+ // we sample from this distribution to get the next token
+ if (sampler->topp <= 0 || sampler->topp >= 1) {
+ // simply sample from the predicted probability distribution
+ next = sample_mult(logits, sampler->vocab_size, coin);
} else {
- // apply the temperature to the logits
- for (int q=0; qvocab_size; q++) { logits[q] /= sampler->temperature; }
- // apply softmax to the logits to get the probabilities for next token
- softmax(logits, sampler->vocab_size);
- // flip a (float) coin (this is our source of entropy for sampling)
- float coin = random_f32(&sampler->rng_state);
- // we sample from this distribution to get the next token
- if (sampler->topp <= 0 || sampler->topp >= 1) {
- // simply sample from the predicted probability distribution
- next = sample_mult(logits, sampler->vocab_size, coin);
- } else {
- // top-p (nucleus) sampling, clamping the least likely tokens to zero
- next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin);
- }
+ // top-p (nucleus) sampling, clamping the least likely tokens to zero
+ next = sample_topp(logits, sampler->vocab_size, sampler->topp,
+ sampler->probindex, coin);
}
- return next;
+ }
+ return next;
}
// ----------------------------------------------------------------------------
// utilities: time
long time_in_ms() {
- // return time in milliseconds, for benchmarking the model speed
- struct timespec time;
- clock_gettime(CLOCK_REALTIME, &time);
- return time.tv_sec * 1000 + time.tv_nsec / 1000000;
+ // return time in milliseconds, for benchmarking the model speed
+ struct timespec time;
+ clock_gettime(CLOCK_REALTIME, &time);
+ return time.tv_sec * 1000 + time.tv_nsec / 1000000;
}
// ----------------------------------------------------------------------------
// generation loop
-void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) {
- char *empty_prompt = "";
- if (prompt == NULL) { prompt = empty_prompt; }
-
- // encode the (string) prompt into tokens sequence
- int num_prompt_tokens = 0;
- int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS
- encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
- if (num_prompt_tokens < 1) {
- fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
- exit(EXIT_FAILURE);
- }
-
- // start the main loop
- long start = 0; // used to time our code, only initialized after first iteration
- int next; // will store the next token in the sequence
- int token = prompt_tokens[0]; // kick off with the first token in the prompt
- int pos = 0; // position in the sequence
- while (pos < steps) {
-
- // forward the transformer to get logits for the next token
- float* logits = forward(transformer, token, pos);
-
- // advance the state machine
- if (pos < num_prompt_tokens - 1) {
- // if we are still processing the input prompt, force the next prompt token
- next = prompt_tokens[pos + 1];
- } else {
- // otherwise sample the next token from the logits
- next = sample(sampler, logits);
- }
- pos++;
-
- // data-dependent terminating condition: the BOS (=1) token delimits sequences
- if (next == 1) { break; }
-
- // print the token as string, decode it with the Tokenizer object
- char* piece = decode(tokenizer, token, next);
- safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
- fflush(stdout);
- token = next;
-
- // init the timer here because the first iteration can be slower
- if (start == 0) { start = time_in_ms(); }
+void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
+ char *prompt, int steps) {
+ char *empty_prompt = "";
+ if (prompt == NULL) {
+ prompt = empty_prompt;
+ }
+
+ // encode the (string) prompt into tokens sequence
+ int num_prompt_tokens = 0;
+ int *prompt_tokens = (int *)malloc((strlen(prompt) + 3) *
+ sizeof(int)); // +3 for '\0', ?BOS, ?EOS
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
+ if (num_prompt_tokens < 1) {
+ fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
+ exit(EXIT_FAILURE);
+ }
+
+ // start the main loop
+ long start =
+ 0; // used to time our code, only initialized after first iteration
+ int next; // will store the next token in the sequence
+ int token = prompt_tokens[0]; // kick off with the first token in the prompt
+ int pos = 0; // position in the sequence
+ while (pos < steps) {
+
+ // forward the transformer to get logits for the next token
+ float *logits = forward(transformer, token, pos);
+
+ // advance the state machine
+ if (pos < num_prompt_tokens - 1) {
+ // if we are still processing the input prompt, force the next prompt
+ // token
+ next = prompt_tokens[pos + 1];
+ } else {
+ // otherwise sample the next token from the logits
+ next = sample(sampler, logits);
}
- printf("\n");
+ pos++;
- // report achieved tok/s (pos-1 because the timer starts after first iteration)
- if (pos > 1) {
- long end = time_in_ms();
- fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
+ // data-dependent terminating condition: the BOS (=1) token delimits
+ // sequences
+ if (next == 1) {
+ break;
}
- free(prompt_tokens);
+ // print the token as string, decode it with the Tokenizer object
+ char *piece = decode(tokenizer, token, next);
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
+ fflush(stdout);
+ token = next;
+
+ // init the timer here because the first iteration can be slower
+ if (start == 0) {
+ start = time_in_ms();
+ }
+ }
+ printf("\n");
+
+ // report achieved tok/s (pos-1 because the timer starts after first
+ // iteration)
+ if (pos > 1) {
+ long end = time_in_ms();
+ fprintf(stderr, "achieved tok/s: %f\n",
+ (pos - 1) / (double)(end - start) * 1000);
+ }
+
+ free(prompt_tokens);
}
-void read_stdin(const char* guide, char* buffer, size_t bufsize) {
- // read a line from stdin, up to but not including \n
- printf("%s", guide);
- if (fgets(buffer, bufsize, stdin) != NULL) {
- size_t len = strlen(buffer);
- if (len > 0 && buffer[len - 1] == '\n') {
- buffer[len - 1] = '\0'; // strip newline
- }
+void read_stdin(const char *guide, char *buffer, size_t bufsize) {
+ // read a line from stdin, up to but not including \n
+ printf("%s", guide);
+ if (fgets(buffer, bufsize, stdin) != NULL) {
+ size_t len = strlen(buffer);
+ if (len > 0 && buffer[len - 1] == '\n') {
+ buffer[len - 1] = '\0'; // strip newline
}
+ }
}
// ----------------------------------------------------------------------------
@@ -802,172 +975,211 @@ void read_stdin(const char* guide, char* buffer, size_t bufsize) {
void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
char *cli_user_prompt, char *cli_system_prompt, int steps) {
- // buffers for reading the system prompt and user prompt from stdin
- // you'll notice they are soomewhat haphazardly and unsafely set atm
- char system_prompt[512];
- char user_prompt[512];
- char rendered_prompt[1152];
- int num_prompt_tokens = 0;
- int* prompt_tokens = (int*)malloc(1152 * sizeof(int));
- int user_idx;
-
- // start the main loop
- int8_t user_turn = 1; // user starts
- int next; // will store the next token in the sequence
- int token; // stores the current token to feed into the transformer
- int prev_token;
- int pos = 0; // position in the sequence
- while (pos < steps) {
-
- // when it is the user's turn to contribute tokens to the dialog...
- if (user_turn) {
- // get the (optional) system prompt at position 0
- if (pos == 0) {
- // at position 0, the user can also contribute a system prompt
- if (cli_system_prompt == NULL) {
- // system prompt was not passed in, attempt to get it from stdin
- read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt));
- } else {
- // system prompt was passed in, use it
- strcpy(system_prompt, cli_system_prompt);
- }
- }
- // get the user prompt
- if (pos == 0 && cli_user_prompt != NULL) {
- // user prompt for position 0 was passed in, use it
- strcpy(user_prompt, cli_user_prompt);
- } else {
- // otherwise get user prompt from stdin
- read_stdin("User: ", user_prompt, sizeof(user_prompt));
- }
- // render user/system prompts into the Llama 2 Chat schema
- if (pos == 0 && system_prompt[0] != '\0') {
- char system_template[] = "[INST] <>\n%s\n<>\n\n%s [/INST]";
- sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
- } else {
- char user_template[] = "[INST] %s [/INST]";
- sprintf(rendered_prompt, user_template, user_prompt);
- }
- // encode the rendered prompt into tokens
- encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
- user_idx = 0; // reset the user index
- user_turn = 0;
- printf("Assistant: ");
- }
-
- // determine the token to pass into the transformer next
- if (user_idx < num_prompt_tokens) {
- // if we are still processing the input prompt, force the next prompt token
- token = prompt_tokens[user_idx++];
+ // buffers for reading the system prompt and user prompt from stdin
+ // you'll notice they are soomewhat haphazardly and unsafely set atm
+ char system_prompt[512];
+ char user_prompt[512];
+ char rendered_prompt[1152];
+ int num_prompt_tokens = 0;
+ int *prompt_tokens = (int *)malloc(1152 * sizeof(int));
+ int user_idx;
+
+ // start the main loop
+ int8_t user_turn = 1; // user starts
+ int next; // will store the next token in the sequence
+ int token; // stores the current token to feed into the transformer
+ int prev_token;
+ int pos = 0; // position in the sequence
+ while (pos < steps) {
+
+ // when it is the user's turn to contribute tokens to the dialog...
+ if (user_turn) {
+ // get the (optional) system prompt at position 0
+ if (pos == 0) {
+ // at position 0, the user can also contribute a system prompt
+ if (cli_system_prompt == NULL) {
+ // system prompt was not passed in, attempt to get it from stdin
+ read_stdin("Enter system prompt (optional): ", system_prompt,
+ sizeof(system_prompt));
} else {
- // otherwise use the next token sampled from previous turn
- token = next;
- }
- // EOS (=2) token ends the Assistant turn
- if (token == 2) { user_turn = 1; }
-
- // forward the transformer to get logits for the next token
- float* logits = forward(transformer, token, pos);
- next = sample(sampler, logits);
- pos++;
-
- if (user_idx >= num_prompt_tokens && next != 2) {
- // the Assistant is responding, so print its output
- char* piece = decode(tokenizer, token, next);
- safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
- fflush(stdout);
+ // system prompt was passed in, use it
+ strcpy(system_prompt, cli_system_prompt);
}
- if (next == 2) { printf("\n"); }
+ }
+ // get the user prompt
+ if (pos == 0 && cli_user_prompt != NULL) {
+ // user prompt for position 0 was passed in, use it
+ strcpy(user_prompt, cli_user_prompt);
+ } else {
+ // otherwise get user prompt from stdin
+ read_stdin("User: ", user_prompt, sizeof(user_prompt));
+ }
+ // render user/system prompts into the Llama 2 Chat schema
+ if (pos == 0 && system_prompt[0] != '\0') {
+ char system_template[] = "[INST] <>\n%s\n<>\n\n%s [/INST]";
+ sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
+ } else {
+ char user_template[] = "[INST] %s [/INST]";
+ sprintf(rendered_prompt, user_template, user_prompt);
+ }
+ // encode the rendered prompt into tokens
+ encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens,
+ &num_prompt_tokens);
+ user_idx = 0; // reset the user index
+ user_turn = 0;
+ printf("Assistant: ");
+ }
+
+ // determine the token to pass into the transformer next
+ if (user_idx < num_prompt_tokens) {
+ // if we are still processing the input prompt, force the next prompt
+ // token
+ token = prompt_tokens[user_idx++];
+ } else {
+ // otherwise use the next token sampled from previous turn
+ token = next;
+ }
+ // EOS (=2) token ends the Assistant turn
+ if (token == 2) {
+ user_turn = 1;
}
- printf("\n");
- free(prompt_tokens);
-}
+ // forward the transformer to get logits for the next token
+ float *logits = forward(transformer, token, pos);
+ next = sample(sampler, logits);
+ pos++;
+
+ if (user_idx >= num_prompt_tokens && next != 2) {
+ // the Assistant is responding, so print its output
+ char *piece = decode(tokenizer, token, next);
+ safe_printf(
+ piece); // same as printf("%s", piece), but skips "unsafe" bytes
+ fflush(stdout);
+ }
+ if (next == 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+ free(prompt_tokens);
+}
// ----------------------------------------------------------------------------
// CLI, include only if not testing
#ifndef TESTING
void error_usage() {
- fprintf(stderr, "Usage: run [options]\n");
- fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
- fprintf(stderr, "Options:\n");
- fprintf(stderr, " -t temperature in [0,inf], default 1.0\n");
- fprintf(stderr, " -p p value in top-p (nucleus) sampling in [0,1] default 0.9\n");
- fprintf(stderr, " -s random seed, default time(NULL)\n");
- fprintf(stderr, " -n number of steps to run for, default 256. 0 = max_seq_len\n");
- fprintf(stderr, " -i input prompt\n");
- fprintf(stderr, " -z optional path to custom tokenizer\n");
- fprintf(stderr, " -m mode: generate|chat, default: generate\n");
- fprintf(stderr, " -y (optional) system prompt in chat mode\n");
- exit(EXIT_FAILURE);
+ fprintf(stderr, "Usage: run [options]\n");
+ fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
+ fprintf(stderr, "Options:\n");
+ fprintf(stderr, " -t temperature in [0,inf], default 1.0\n");
+ fprintf(stderr, " -p p value in top-p (nucleus) sampling in [0,1] "
+ "default 0.9\n");
+ fprintf(stderr, " -s random seed, default time(NULL)\n");
+ fprintf(stderr, " -n number of steps to run for, default 256. 0 = "
+ "max_seq_len\n");
+ fprintf(stderr, " -i input prompt\n");
+ fprintf(stderr, " -z optional path to custom tokenizer\n");
+ fprintf(stderr, " -m mode: generate|chat, default: generate\n");
+ fprintf(stderr, " -y (optional) system prompt in chat mode\n");
+ exit(EXIT_FAILURE);
}
int main(int argc, char *argv[]) {
- // default parameters
- char *checkpoint_path = NULL; // e.g. out/model.bin
- char *tokenizer_path = "tokenizer.bin";
- float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
- float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
- int steps = 256; // number of steps to run for
- char *prompt = NULL; // prompt string
- unsigned long long rng_seed = 0; // seed rng with time by default
- char *mode = "generate"; // generate|chat
- char *system_prompt = NULL; // the (optional) system prompt to use in chat mode
-
- // poor man's C argparse so we can override the defaults above from the command line
- if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); }
- for (int i = 2; i < argc; i+=2) {
- // do some basic validation
- if (i + 1 >= argc) { error_usage(); } // must have arg after flag
- if (argv[i][0] != '-') { error_usage(); } // must start with dash
- if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
- // read in the args
- if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
- else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
- else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
- else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
- else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
- else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; }
- else if (argv[i][1] == 'm') { mode = argv[i + 1]; }
- else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; }
- else { error_usage(); }
- }
-
- // parameter validation/overrides
- if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL);
- if (temperature < 0.0) temperature = 0.0;
- if (topp < 0.0 || 1.0 < topp) topp = 0.9;
- if (steps < 0) steps = 0;
-
- // build the Transformer via the model .bin file
- Transformer transformer;
- build_transformer(&transformer, checkpoint_path);
- if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // ovrerride to ~max length
-
- // build the Tokenizer via the tokenizer .bin file
- Tokenizer tokenizer;
- build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
-
- // build the Sampler
- Sampler sampler;
- build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed);
-
- // run!
- if (strcmp(mode, "generate") == 0) {
- generate(&transformer, &tokenizer, &sampler, prompt, steps);
- } else if (strcmp(mode, "chat") == 0) {
- chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
+ // default parameters
+ char *checkpoint_path = NULL; // e.g. out/model.bin
+ char *tokenizer_path = "tokenizer.bin";
+ float temperature =
+ 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
+ float topp =
+ 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
+ int steps = 256; // number of steps to run for
+ char *prompt = NULL; // prompt string
+ unsigned long long rng_seed = 0; // seed rng with time by default
+ char *mode = "generate"; // generate|chat
+ char *system_prompt =
+ NULL; // the (optional) system prompt to use in chat mode
+
+ // poor man's C argparse so we can override the defaults above from the
+ // command line
+ if (argc >= 2) {
+ checkpoint_path = argv[1];
+ } else {
+ error_usage();
+ }
+ // printf("before\n");
+ for (int i = 2; i < argc; i += 2) {
+ // do some basic validation
+ if (i + 1 >= argc) {
+ error_usage();
+ } // must have arg after flag
+ if (argv[i][0] != '-') {
+ error_usage();
+ } // must start with dash
+ if (strlen(argv[i]) != 2) {
+ error_usage();
+ } // must be -x (one dash, one letter)
+ // read in the args
+ if (argv[i][1] == 't') {
+ temperature = atof(argv[i + 1]);
+ } else if (argv[i][1] == 'p') {
+ topp = atof(argv[i + 1]);
+ } else if (argv[i][1] == 's') {
+ rng_seed = atoi(argv[i + 1]);
+ } else if (argv[i][1] == 'n') {
+ steps = atoi(argv[i + 1]);
+ } else if (argv[i][1] == 'i') {
+ prompt = argv[i + 1];
+ } else if (argv[i][1] == 'z') {
+ tokenizer_path = argv[i + 1];
+ } else if (argv[i][1] == 'm') {
+ mode = argv[i + 1];
+ } else if (argv[i][1] == 'y') {
+ system_prompt = argv[i + 1];
} else {
- fprintf(stderr, "unknown mode: %s\n", mode);
- error_usage();
+ error_usage();
}
-
- // memory and file handles cleanup
- free_sampler(&sampler);
- free_tokenizer(&tokenizer);
- free_transformer(&transformer);
- return 0;
+ }
+
+ // parameter validation/overrides
+ if (rng_seed <= 0)
+ rng_seed = (unsigned int)time(NULL);
+ if (temperature < 0.0)
+ temperature = 0.0;
+ if (topp < 0.0 || 1.0 < topp)
+ topp = 0.9;
+ if (steps < 0)
+ steps = 0;
+ // build the Transformer via the model .bin file
+ Transformer transformer;
+ build_transformer(&transformer, checkpoint_path);
+ if (steps == 0 || steps > transformer.config.seq_len)
+ steps = transformer.config.seq_len; // override to ~max length
+
+ // build the Tokenizer via the tokenizer .bin file
+ Tokenizer tokenizer;
+ build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
+
+ // build the Sampler
+ Sampler sampler;
+ build_sampler(&sampler, transformer.config.vocab_size, temperature, topp,
+ rng_seed);
+
+ // run!
+ if (strcmp(mode, "generate") == 0) {
+ generate(&transformer, &tokenizer, &sampler, prompt, steps);
+ } else if (strcmp(mode, "chat") == 0) {
+ chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
+ } else {
+ fprintf(stderr, "unknown mode: %s\n", mode);
+ error_usage();
+ }
+
+ // memory and file handles cleanup
+ free_sampler(&sampler);
+ free_tokenizer(&tokenizer);
+ free_transformer(&transformer);
+ return 0;
}
#endif