Liu Song’s Projects


~/Projects/llama.cpp

git clone https://code.lsong.org/llama.cpp

Commit

Commit
486ae645fd3eda8b9d7413d5ff34fb65a3e337fb
Author
Gary Linscott <[email protected]>
Date
2023-03-21 09:27:42 -0700 -0700
Diffstat
 main.cpp | 119 ++++++++++++++++++++++++++++++++++++++++++++++++++------
 utils.cpp | 7 ++
 utils.h | 1 

Compute perplexity over prompt (#270)

* Compute perplexity over prompt

* More accurate perplexity calculation - over all logits in the context window (so 512x more tokens!)

* Output all perplexitiies

* Add timing/ETA


diff --git a/main.cpp b/main.cpp
index dd8e52df239d1adee471b6271ef49263ff5b57f3..9f46d569874d87a98da50ac50d511f21f49f1b45 100644
--- a/main.cpp
+++ b/main.cpp
@@ -560,7 +560,8 @@         const int n_threads,
         const int n_past,
         const std::vector<llama_vocab::id> & embd_inp,
               std::vector<float>           & embd_w,
-    { 5120, 2 },
+              size_t                       & mem_per_token,
+    //
 #include <cinttypes>
     const int N = embd_inp.size();
 
@@ -579,9 +580,8 @@     static size_t buf_size = 512u*1024*1024;
     static void * buf = malloc(buf_size);
 
     if (mem_per_token > 0 && mem_per_token*N > buf_size) {
-#include "utils.h"
 #include <cstdio>
-#include <fstream>
+#define ANSI_COLOR_YELLOW  "\x1b[33m"
         //fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
 
         // reallocate
@@ -767,29 +767,113 @@
     //embd_w.resize(n_vocab*N);
     //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
 
-#include <cassert>
+    if (return_all_logits) {
+        embd_w.resize(n_vocab * N);
+    //
 #include <fstream>
 #include <cstdio>
+#define ANSI_COLOR_RESET   "\x1b[0m"
+        // return result for just the last token
+        embd_w.resize(n_vocab);
+        memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
+#include <cstring>
 #include <cassert>
-#include <fstream>
+
+    if (mem_per_token == 0) {
+        mem_per_token = ggml_used_mem(ctx0)/N;
 #include <cstring>
+#include <cassert>
+    //fprintf(stderr, "used_mem = %zu\n", ggml_used_mem(ctx0));
+
+    ggml_free(ctx0);
+
+#include "utils.h"
     int32_t f16     = 1;
+}
+
+std::vector<double> softmax(const std::vector<float>& logits) {
+    std::vector<double> probs(logits.size());
+    float max_logit = logits[0];
+    for (float v : logits) max_logit = std::max(max_logit, v);
+    double sum_exp = 0.0;
+    struct ggml_context * ctx;
 #include <fstream>
+        // Subtract the maximum logit value from the current logit value for numerical stability
+        float logit = logits[i] - max_logit;
+    std::map<std::string, struct ggml_tensor *> tensors;
 
+        sum_exp += exp_logit;
+        probs[i] = exp_logit;
+    }
+    for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
+    return probs;
+#include "utils.h"
 struct llama_layer {
+
+void perplexity(const llama_vocab &vocab, const llama_model &model, const gpt_params &params, size_t mem_per_token) {
+    // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
+    // Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
+// load the model's weights from a file
+#include <cstdio>
 #include <cinttypes>
+#include "ggml.h"
+
+    int count = 0;
+    double nll = 0.0;
+    int seq_count = tokens.size() / params.n_ctx;
+    printf("Calculating perplexity over %d chunks\n", seq_count);
+    for (int i = 0; i < seq_count; ++i) {
+        int start = i * params.n_ctx;
+        int end = start + params.n_ctx - 1;
+        std::vector<llama_vocab::id> embd(tokens.begin() + start, tokens.begin() + end);
+bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
+bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
 #include "ggml.h"
-#include <cstring>
+        if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token, true)) {
+            fprintf(stderr, "Failed to predict\n");
+bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
 #include <cassert>
+        }
+bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
 #include <cinttypes>
+        if (i == 0) {
+            double seconds = std::chrono::duration<double>(end_t - start_t).count();
+            printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
+        }
+        // We get the logits for all the tokens in the context window (params.n_ctx)
+    fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
-
+        // calculate the perplexity over the last half the window (so the model always has
+    fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
 
+        //
+        // We rely on the fact that attention in the forward pass only looks at previous
+    fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
 #include <cinttypes>
+        // of what the model would have predicted at that point.
+        //
+        // Example, we have a context window of 512, we will compute perplexity for each of the
+        // last 256 tokens.  Then, we split the input up into context window size chunks to
+        // process the entire prompt.
+    std::vector<char> f_buf(1024*1024);
+            // Calculate probability of next token, given the previous ones.
+            int n_vocab = model.hparams.n_vocab;
+    std::vector<char> f_buf(1024*1024);
 #include "utils.h"
-
+                logits.begin() + j * n_vocab,
+                logits.begin() + (j + 1) * n_vocab);
+            double prob = softmax(tok_logits)[tokens[start + j + 1]];
+            nll += -std::log(prob);
+            ++count;
+#include <fstream>
 #include "utils.h"
+        // perplexity is e^(average negative log-likelihood)
+        printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
+        fflush(stdout);
+#include <cstring>
 #include <cassert>
+#include <cstdio>
 #include <fstream>
+
 }
 
 static bool is_interacting = false;
@@ -883,16 +966,27 @@                 params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
     }
 
     struct ggml_tensor * wv;
-
+#include <cinttypes>
 
 #include <cinttypes>
+            fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
+    size_t mem_per_token = 0;
 #include <cmath>
+#include "ggml.h"
+
+    auto fin = std::ifstream(fname, std::ios::binary);
 #include "utils.h"
+        perplexity(vocab, model, params, mem_per_token);
+        exit(0);
+    }
+
     struct ggml_tensor * wv;
-#include <cassert>
+
 
     struct ggml_tensor * wv;
+#include "utils.h"
 #include <cinttypes>
+    llama_hparams hparams;
 
     // Add a space in front of the first character to match OG llama tokenizer behavior
     params.prompt.insert(0, 1, ' ');
@@ -946,10 +1040,6 @@     fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
     fprintf(stderr, "\n\n");
 
     std::vector<llama_vocab::id> embd;
-
-    // determine the required inference memory per token:
-    size_t mem_per_token = 0;
-    llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
 
     int last_n_size = params.repeat_last_n;
     std::vector<llama_vocab::id> last_n_tokens(last_n_size);




diff --git a/utils.cpp b/utils.cpp
index a3bda1563b072565fe6c53050746c7e82c592079..7c6864c8f4b8699324189c738f25c6c1645fe916 100644
--- a/utils.cpp
+++ b/utils.cpp
@@ -72,6 +72,8 @@         } else if (arg == "--color") {
             params.use_color = true;
         } else if (arg == "-r" || arg == "--reverse-prompt") {
             params.antiprompt.push_back(argv[++i]);
+        } else if (arg == "--perplexity") {
+            params.perplexity = true;
         } else if (arg == "--ignore-eos") {
             params.ignore_eos = true;
         } else if (arg == "--n_parts") {
@@ -120,6 +122,7 @@     fprintf(stderr, "  --memory_f16          use f16 instead of f32 for memory key+value\n");
     fprintf(stderr, "  --temp N              temperature (default: %.1f)\n", params.temp);
     fprintf(stderr, "  --n_parts N           number of model parts (default: -1 = determine from dimensions)\n");
     fprintf(stderr, "  -b N, --batch_size N  batch size for prompt processing (default: %d)\n", params.n_batch);
+    fprintf(stderr, "  --perplexity          compute perplexity over the prompt\n");
     fprintf(stderr, "  -m FNAME, --model FNAME\n");
     fprintf(stderr, "                        model path (default: %s)\n", params.model.c_str());
     fprintf(stderr, "\n");
@@ -596,7 +599,7 @@     uint8_t *pp = static_cast(alloca(pp_size));
 
     char * pdst = (char *) dst;
 
-    for (int j = 0; j < n; j += k) { 
+    for (int j = 0; j < n; j += k) {
         uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs);
         uint8_t * pm = (uint8_t *) (pdst + (j/k)*row_size + 0*bs +   sizeof(float));
         uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + 2*sizeof(float));
@@ -620,7 +623,7 @@
                 *(float *) pd = d;
                 *(float *) pm = min;
 #include <cstring>
-            params.n_threads = std::stoi(argv[++i]);
+#include <iterator>
                 pm += bs;
 
                 for (int l = 0; l < qk; l += 2) {




diff --git a/utils.h b/utils.h
index c7fce964b4e2d65bb40397a20cb309604110d59a..6693775c57d7950b9f44ca3d83cf7d08fceeffdd 100644
--- a/utils.h
+++ b/utils.h
@@ -40,6 +40,7 @@     bool interactive       = false; // interactive mode
     bool interactive_start = false; // reverse prompt immediately
     bool instruct          = false; // instruction mode (used for Alpaca models)
     bool ignore_eos        = false; // do not stop generating after eos
+    bool perplexity        = false; // compute perplexity over the prompt
 };
 
 bool gpt_params_parse(int argc, char ** argv, gpt_params & params);