mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Speedup tokenization On current master it takes ~3.2 seconds to tokenize Wikitext. With this change it becomes ~525 ms. * Fixit: it was missing the piece after the last found occurence --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
		
			
				
	
	
		
			621 lines
		
	
	
		
			24 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			621 lines
		
	
	
		
			24 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
#include "common.h"
 | 
						|
#include "llama.h"
 | 
						|
#include "build-info.h"
 | 
						|
 | 
						|
#include <cmath>
 | 
						|
#include <ctime>
 | 
						|
#include <sstream>
 | 
						|
#include <cstring>
 | 
						|
#include <thread>
 | 
						|
#include <mutex>
 | 
						|
 | 
						|
#if defined(_MSC_VER)
 | 
						|
#pragma warning(disable: 4244 4267) // possible loss of data
 | 
						|
#endif
 | 
						|
 | 
						|
std::vector<float> softmax(const std::vector<float>& logits) {
 | 
						|
    std::vector<float> probs(logits.size());
 | 
						|
    float max_logit = logits[0];
 | 
						|
    for (float v : logits) max_logit = std::max(max_logit, v);
 | 
						|
    double sum_exp = 0.0;
 | 
						|
    for (size_t i = 0; i < logits.size(); i++) {
 | 
						|
        // Subtract the maximum logit value from the current logit value for numerical stability
 | 
						|
        const float logit = logits[i] - max_logit;
 | 
						|
        const float exp_logit = expf(logit);
 | 
						|
        sum_exp += exp_logit;
 | 
						|
        probs[i] = exp_logit;
 | 
						|
    }
 | 
						|
    for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
 | 
						|
    return probs;
 | 
						|
}
 | 
						|
 | 
						|
float log_softmax(int n_vocab, const float * logits, int tok) {
 | 
						|
    float max_logit = logits[0];
 | 
						|
    for (int i = 1; i < n_vocab; ++i) max_logit = std::max(max_logit, logits[i]);
 | 
						|
    double sum_exp = 0.0;
 | 
						|
    for (int i = 0; i < n_vocab; ++i) sum_exp += expf(logits[i] - max_logit);
 | 
						|
    return logits[tok] - max_logit - log(sum_exp);
 | 
						|
}
 | 
						|
 | 
						|
void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token, std::vector<std::thread>& workers,
 | 
						|
        double& nll, double& nll2) {
 | 
						|
 | 
						|
    std::mutex mutex;
 | 
						|
    int counter = 0;
 | 
						|
    auto compute = [&mutex, &counter, &nll, &nll2, n_vocab, logits, tokens, n_token] () {
 | 
						|
        double local_nll = 0, local_nll2 = 0;
 | 
						|
        while (true) {
 | 
						|
            std::unique_lock<std::mutex> lock(mutex);
 | 
						|
            int i = counter++;
 | 
						|
            if (i >= n_token) {
 | 
						|
                nll += local_nll; nll2 += local_nll2;
 | 
						|
                break;
 | 
						|
            }
 | 
						|
            lock.unlock();
 | 
						|
            double v = -log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]);
 | 
						|
            local_nll += v;
 | 
						|
            local_nll2 += v*v;
 | 
						|
        }
 | 
						|
    };
 | 
						|
    for (auto& w : workers) w = std::thread(compute);
 | 
						|
    compute();
 | 
						|
    for (auto& w : workers) w.join();
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
void perplexity_v2(llama_context * ctx, const gpt_params & params) {
 | 
						|
    // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
 | 
						|
    // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
 | 
						|
    // Output: `perplexity: 13.5106 [114/114]`
 | 
						|
    // BOS tokens will be added for each chunk before eval
 | 
						|
 | 
						|
    if (params.ppl_stride <= 0) {
 | 
						|
        fprintf(stderr, "%s: stride is %d but must be greater than zero!\n",__func__,params.ppl_stride);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
 | 
						|
    const bool add_bos = is_spm;
 | 
						|
 | 
						|
    fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
 | 
						|
 | 
						|
    auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
 | 
						|
 | 
						|
    const int calc_chunk = params.n_ctx;
 | 
						|
 | 
						|
    fprintf(stderr, "%s: have %zu tokens. Calculation chunk = %d\n", __func__, tokens.size(), calc_chunk);
 | 
						|
 | 
						|
    if (int(tokens.size()) <= calc_chunk) {
 | 
						|
        fprintf(stderr, "%s: there are only %zu tokens, this is not enough for a context size of %d and stride %d\n",__func__,
 | 
						|
                tokens.size(), params.n_ctx, params.ppl_stride);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1)  / params.ppl_stride;
 | 
						|
 | 
						|
    const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
 | 
						|
    const int n_vocab = llama_n_vocab(ctx);
 | 
						|
    const int n_batch = params.n_batch;
 | 
						|
 | 
						|
    int count = 0;
 | 
						|
    double nll = 0.0;
 | 
						|
 | 
						|
    fprintf(stderr, "%s: calculating perplexity over %d chunks, batch_size=%d\n", __func__, n_chunk, n_batch);
 | 
						|
 | 
						|
    for (int i = 0; i < n_chunk; ++i) {
 | 
						|
        const int start =     i * params.ppl_stride;
 | 
						|
        const int end   = start + calc_chunk;
 | 
						|
 | 
						|
        const int num_batches = (calc_chunk + n_batch - 1) / n_batch;
 | 
						|
        //fprintf(stderr, "%s: evaluating %d...%d using %d batches\n", __func__, start, end, num_batches);
 | 
						|
 | 
						|
        std::vector<float> logits;
 | 
						|
 | 
						|
        const auto t_start = std::chrono::high_resolution_clock::now();
 | 
						|
 | 
						|
        for (int j = 0; j < num_batches; ++j) {
 | 
						|
            const int batch_start = start + j * n_batch;
 | 
						|
            const int batch_size  = std::min(end - batch_start, n_batch);
 | 
						|
 | 
						|
            //fprintf(stderr, "    Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
 | 
						|
            if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
 | 
						|
                //fprintf(stderr, "%s : failed to eval\n", __func__);
 | 
						|
                return;
 | 
						|
            }
 | 
						|
 | 
						|
            // save original token and restore it after eval
 | 
						|
            const auto token_org = tokens[batch_start];
 | 
						|
 | 
						|
            // add BOS token for the first batch of each chunk
 | 
						|
            if (add_bos && j == 0) {
 | 
						|
                tokens[batch_start] = llama_token_bos(ctx);
 | 
						|
            }
 | 
						|
 | 
						|
            const auto batch_logits = llama_get_logits(ctx);
 | 
						|
            logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
 | 
						|
 | 
						|
            if (j == 0) {
 | 
						|
                tokens[batch_start] = token_org;
 | 
						|
            }
 | 
						|
        }
 | 
						|
 | 
						|
        const auto t_end = std::chrono::high_resolution_clock::now();
 | 
						|
 | 
						|
        if (i == 0) {
 | 
						|
            const float t_total = std::chrono::duration<float>(t_end - t_start).count();
 | 
						|
            fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
 | 
						|
            int total_seconds = (int)(t_total * n_chunk);
 | 
						|
            if (total_seconds >= 60*60) {
 | 
						|
                fprintf(stderr, "%d hours ", total_seconds / (60*60));
 | 
						|
                total_seconds = total_seconds % (60*60);
 | 
						|
            }
 | 
						|
            fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
 | 
						|
        }
 | 
						|
 | 
						|
        //fprintf(stderr, "%s: using tokens %d...%d\n",__func__,params.n_ctx - params.ppl_stride + start, params.n_ctx + start);
 | 
						|
        for (int j = params.n_ctx - params.ppl_stride - 1; j < params.n_ctx - 1; ++j) {
 | 
						|
 | 
						|
            // Calculate probability of next token, given the previous ones.
 | 
						|
            const std::vector<float> tok_logits(
 | 
						|
                logits.begin() + (j + 0) * n_vocab,
 | 
						|
                logits.begin() + (j + 1) * n_vocab);
 | 
						|
 | 
						|
            const float prob = softmax(tok_logits)[tokens[start + j + 1]];
 | 
						|
 | 
						|
            nll += -std::log(prob);
 | 
						|
            ++count;
 | 
						|
        }
 | 
						|
        // perplexity is e^(average negative log-likelihood)
 | 
						|
        if (params.ppl_output_type == 0) {
 | 
						|
            printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
 | 
						|
        } else {
 | 
						|
            printf("%8d  %.4lf\n", i*params.ppl_stride, std::exp(nll / count));
 | 
						|
        }
 | 
						|
        fflush(stdout);
 | 
						|
    }
 | 
						|
    printf("\n");
 | 
						|
}
 | 
						|
 | 
						|
void perplexity(llama_context * ctx, const gpt_params & params) {
 | 
						|
    if (params.ppl_stride > 0) {
 | 
						|
        perplexity_v2(ctx, params);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
 | 
						|
    // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
 | 
						|
    // Output: `perplexity: 13.5106 [114/114]`
 | 
						|
    // BOS tokens will be added for each chunk before eval
 | 
						|
 | 
						|
    const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
 | 
						|
    const bool add_bos = is_spm;
 | 
						|
 | 
						|
    auto tim1 = std::chrono::high_resolution_clock::now();
 | 
						|
    fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
 | 
						|
 | 
						|
    auto tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
 | 
						|
 | 
						|
    auto tim2 = std::chrono::high_resolution_clock::now();
 | 
						|
    fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
 | 
						|
 | 
						|
    const int n_chunk_max = tokens.size() / params.n_ctx;
 | 
						|
 | 
						|
    const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
 | 
						|
    const int n_vocab = llama_n_vocab(ctx);
 | 
						|
    const int n_batch = params.n_batch;
 | 
						|
 | 
						|
    int count = 0;
 | 
						|
    double nll = 0.0;
 | 
						|
    double nll2 = 0.0;
 | 
						|
 | 
						|
    fprintf(stderr, "%s: calculating perplexity over %d chunks, batch_size=%d\n", __func__, n_chunk, n_batch);
 | 
						|
 | 
						|
    std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
 | 
						|
 | 
						|
    for (int i = 0; i < n_chunk; ++i) {
 | 
						|
        const int start =     i * params.n_ctx;
 | 
						|
        const int end   = start + params.n_ctx;
 | 
						|
 | 
						|
        const int num_batches = (params.n_ctx + n_batch - 1) / n_batch;
 | 
						|
 | 
						|
        std::vector<float> logits;
 | 
						|
 | 
						|
        const auto t_start = std::chrono::high_resolution_clock::now();
 | 
						|
 | 
						|
        for (int j = 0; j < num_batches; ++j) {
 | 
						|
            const int batch_start = start + j * n_batch;
 | 
						|
            const int batch_size  = std::min(end - batch_start, n_batch);
 | 
						|
 | 
						|
            // save original token and restore it after eval
 | 
						|
            const auto token_org = tokens[batch_start];
 | 
						|
 | 
						|
            // add BOS token for the first batch of each chunk
 | 
						|
            if (add_bos && j == 0) {
 | 
						|
                tokens[batch_start] = llama_token_bos(ctx);
 | 
						|
            }
 | 
						|
 | 
						|
            if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
 | 
						|
                fprintf(stderr, "%s : failed to eval\n", __func__);
 | 
						|
                return;
 | 
						|
            }
 | 
						|
 | 
						|
            // restore the original token in case it was set to BOS
 | 
						|
            tokens[batch_start] = token_org;
 | 
						|
 | 
						|
            const auto batch_logits = llama_get_logits(ctx);
 | 
						|
            logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
 | 
						|
        }
 | 
						|
 | 
						|
        const auto t_end = std::chrono::high_resolution_clock::now();
 | 
						|
 | 
						|
        if (i == 0) {
 | 
						|
            const float t_total = std::chrono::duration<float>(t_end - t_start).count();
 | 
						|
            fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
 | 
						|
            int total_seconds = (int)(t_total * n_chunk);
 | 
						|
            if (total_seconds >= 60*60) {
 | 
						|
                fprintf(stderr, "%d hours ", total_seconds / (60*60));
 | 
						|
                total_seconds = total_seconds % (60*60);
 | 
						|
            }
 | 
						|
            fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
 | 
						|
        }
 | 
						|
 | 
						|
        // We get the logits for all the tokens in the context window (params.n_ctx)
 | 
						|
        // from llama_eval above.  Now, based on https://huggingface.co/docs/transformers/perplexity,
 | 
						|
        // calculate the perplexity over the last half of the window (so the model always has
 | 
						|
        // some context to predict the token).
 | 
						|
        //
 | 
						|
        // We rely on the fact that attention in the forward pass only looks at previous
 | 
						|
        // tokens here, so the logits returned for each token are an accurate representation
 | 
						|
        // of what the model would have predicted at that point.
 | 
						|
        //
 | 
						|
        // Example, we have a context window of 512, we will compute perplexity for each of the
 | 
						|
        // last 256 tokens.  Then, we split the input up into context window size chunks to
 | 
						|
        // process the entire prompt.
 | 
						|
        const int first = std::min(512, params.n_ctx/2);
 | 
						|
        process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first, workers, nll, nll2);
 | 
						|
        count += params.n_ctx - first - 1;
 | 
						|
 | 
						|
        // perplexity is e^(average negative log-likelihood)
 | 
						|
        if (params.ppl_output_type == 0) {
 | 
						|
            printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
 | 
						|
        } else {
 | 
						|
            double av = nll/count;
 | 
						|
            double av2 = nll2/count - av*av;
 | 
						|
            if (av2 > 0) av2 = sqrt(av2/(count-1));
 | 
						|
            printf("%8d  %.4lf  %4lf  %4lf\n", i*params.n_ctx, std::exp(nll / count), av, av2);
 | 
						|
        }
 | 
						|
        fflush(stdout);
 | 
						|
    }
 | 
						|
    printf("\n");
 | 
						|
    nll2 /= count;
 | 
						|
    nll /= count;
 | 
						|
    nll2 -= nll * nll;
 | 
						|
    if (nll2 > 0) {
 | 
						|
        nll2 = sqrt(nll2/(count-1));
 | 
						|
        double ppl = exp(nll);
 | 
						|
        printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
 | 
						|
    } else {
 | 
						|
        printf("Unexpected negative standard deviation of log(prob)\n");
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
std::vector<float> hellaswag_evaluate_tokens(llama_context * ctx, const std::vector<int>& tokens, int n_past, int n_batch,
 | 
						|
        int n_vocab, int n_thread) {
 | 
						|
    std::vector<float> result;
 | 
						|
    result.reserve(tokens.size() * n_vocab);
 | 
						|
    size_t n_chunk = (tokens.size() + n_batch - 1)/n_batch;
 | 
						|
    for (size_t i_chunk = 0; i_chunk < n_chunk; ++i_chunk) {
 | 
						|
        size_t n_tokens = tokens.size() - i_chunk * n_batch;
 | 
						|
        n_tokens = std::min(n_tokens, size_t(n_batch));
 | 
						|
        if (llama_eval(ctx, tokens.data() + i_chunk * n_batch, n_tokens, n_past, n_thread)) {
 | 
						|
            fprintf(stderr, "%s : failed to eval\n", __func__);
 | 
						|
            return {};
 | 
						|
        }
 | 
						|
 | 
						|
        const auto logits = llama_get_logits(ctx);
 | 
						|
        result.insert(result.end(), logits, logits + n_tokens * n_vocab);
 | 
						|
 | 
						|
        n_past += n_tokens;
 | 
						|
    }
 | 
						|
    return result;
 | 
						|
}
 | 
						|
 | 
						|
void hellaswag_score(llama_context * ctx, const gpt_params & params) {
 | 
						|
    // Calculates hellaswag score (acc_norm) from prompt
 | 
						|
    //
 | 
						|
    // Data extracted from the HellaSwag validation dataset (MIT license) https://github.com/rowanz/hellaswag/blob/master/data/hellaswag_val.jsonl
 | 
						|
    // All used data fields are preprocessed as in https://github.com/EleutherAI/lm-evaluation-harness/blob/df3da98c5405deafd519c2ddca52bb7c3fe36bef/lm_eval/tasks/hellaswag.py#L62-L68
 | 
						|
    //
 | 
						|
    // All 10042 tasks should be extracted to keep the results standardized like other implementations.
 | 
						|
    //
 | 
						|
    // Datafile layout:
 | 
						|
    // ['??'] denotes json fields
 | 
						|
    // 6 lines per task:
 | 
						|
    // ['activity_label'] + ": " +['ctx']  - The first part of the query, the context
 | 
						|
    // ['label'] - The index the best common sense ending aka gold ending
 | 
						|
    // ['endings'][0] - Endings added to the first part of the query
 | 
						|
    // ['endings'][1]
 | 
						|
    // ['endings'][2]
 | 
						|
    // ['endings'][3]
 | 
						|
 | 
						|
    std::vector<std::string> prompt_lines;
 | 
						|
    std::istringstream strstream(params.prompt);
 | 
						|
    std::string line;
 | 
						|
 | 
						|
    while (std::getline(strstream,line,'\n')) {
 | 
						|
        prompt_lines.push_back(line);
 | 
						|
    }
 | 
						|
 | 
						|
    if( prompt_lines.size() % 6 != 0) {
 | 
						|
        fprintf(stderr, "%s : number of lines in prompt not a multiple of 6.\n", __func__);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    size_t hs_task_count = prompt_lines.size()/6;
 | 
						|
    fprintf(stderr, "%s : loaded %zu tasks from prompt.\n", __func__, hs_task_count);
 | 
						|
 | 
						|
    const bool is_spm = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
 | 
						|
    fprintf(stderr, "================================= is_spm = %d\n", is_spm);
 | 
						|
 | 
						|
    // This is needed as usual for LLaMA models
 | 
						|
    const bool add_bos = is_spm;
 | 
						|
 | 
						|
    // Number of tasks to use when computing the score
 | 
						|
    if ( params.hellaswag_tasks < hs_task_count  ) {
 | 
						|
        hs_task_count = params.hellaswag_tasks;
 | 
						|
    }
 | 
						|
 | 
						|
    // The tasks should be randomized so the score stabilizes quickly.
 | 
						|
    bool randomize_tasks = true;
 | 
						|
 | 
						|
    // The random seed should not impact the final result if the computation is done over enough tasks, so kept hardcoded for now
 | 
						|
    std::mt19937 rng(1);
 | 
						|
 | 
						|
    // Dataholder for hellaswag tasks
 | 
						|
    struct hs_data_t {
 | 
						|
        std::string context;
 | 
						|
        size_t gold_ending_idx;
 | 
						|
        std::string ending[4];
 | 
						|
        size_t ending_logprob_count[4];
 | 
						|
        double ending_logprob[4];
 | 
						|
    };
 | 
						|
 | 
						|
    fprintf(stderr, "%s : selecting %zu %s tasks.\n", __func__, hs_task_count, (randomize_tasks?"randomized":"the first")  );
 | 
						|
 | 
						|
    // Select and read data from prompt lines
 | 
						|
    hs_data_t *hs_data = new hs_data_t[hs_task_count];
 | 
						|
    for (size_t i=0; i < hs_task_count; i++) {
 | 
						|
        size_t idx = i;
 | 
						|
 | 
						|
        // Select a random example of those left in the prompt
 | 
						|
        if (randomize_tasks) {
 | 
						|
            std::uniform_int_distribution<size_t> dist(0, prompt_lines.size()/6-1 ) ;
 | 
						|
            idx = dist(rng);
 | 
						|
        }
 | 
						|
 | 
						|
        hs_data[i].context = prompt_lines[idx*6];
 | 
						|
        hs_data[i].gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
 | 
						|
        for (size_t j=0; j < 4; j++) {
 | 
						|
            hs_data[i].ending[j] = prompt_lines[idx*6+2+j];
 | 
						|
        }
 | 
						|
 | 
						|
        // Delete the selected random example from the prompt
 | 
						|
        if (randomize_tasks) {
 | 
						|
            prompt_lines.erase( std::next(prompt_lines.begin(),idx*6)  , std::next(prompt_lines.begin(),idx*6+6) );
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    fprintf(stderr, "%s : calculating hellaswag score over selected tasks.\n", __func__);
 | 
						|
    printf("\ntask\tacc_norm\n");
 | 
						|
 | 
						|
    double acc = 0.0f;
 | 
						|
    const int n_vocab = llama_n_vocab(ctx);
 | 
						|
 | 
						|
    std::vector<std::vector<int>> ending_tokens(4);
 | 
						|
 | 
						|
    std::vector<float> tok_logits(n_vocab);
 | 
						|
 | 
						|
    for (size_t task_idx = 0; task_idx < hs_task_count; task_idx++) {
 | 
						|
        // Tokenize the context to count tokens
 | 
						|
        std::vector<int> context_embd = ::llama_tokenize(ctx, hs_data[task_idx].context, add_bos);
 | 
						|
        size_t context_size = context_embd.size();
 | 
						|
 | 
						|
        for (int i = 0; i < 4; ++i) {
 | 
						|
            ending_tokens[i] = ::llama_tokenize(ctx, hs_data[task_idx].context + " " + hs_data[task_idx].ending[i], add_bos);
 | 
						|
            for (int k = 0; k < int(context_size); ++k) {
 | 
						|
                if (ending_tokens[i][k] != context_embd[k]) {
 | 
						|
                    fprintf(stderr, "Oops: ending %d of task %d differs from context at position %d\n",i,int(task_idx),k);
 | 
						|
                    break;
 | 
						|
                }
 | 
						|
            }
 | 
						|
        }
 | 
						|
 | 
						|
        // Do the 1st ending
 | 
						|
        // In this case we include the context when evaluating
 | 
						|
        //auto query_embd = ::llama_tokenize(ctx, hs_data[task_idx].context + hs_data[task_idx].ending[0], add_bos);
 | 
						|
        auto query_embd = ending_tokens[0];
 | 
						|
        auto query_size = query_embd.size();
 | 
						|
 | 
						|
        // Stop if query wont fit the ctx window
 | 
						|
        if (query_size > (size_t)params.n_ctx) {
 | 
						|
            fprintf(stderr, "%s : number of tokens in query %zu > n_ctxl\n", __func__, query_size);
 | 
						|
            return;
 | 
						|
        }
 | 
						|
 | 
						|
        // Speedup small evaluations by evaluating atleast 32 tokens
 | 
						|
        if (query_size < 32) {
 | 
						|
            query_embd.resize(32);
 | 
						|
        }
 | 
						|
 | 
						|
        auto logits = hellaswag_evaluate_tokens(ctx, query_embd, 0, params.n_batch, n_vocab, params.n_threads);
 | 
						|
        if (logits.empty()) {
 | 
						|
            fprintf(stderr, "%s : failed to eval\n", __func__);
 | 
						|
            return;
 | 
						|
        }
 | 
						|
 | 
						|
        std::memcpy(tok_logits.data(), logits.data() + (context_size-1)*n_vocab, n_vocab*sizeof(float));
 | 
						|
        const auto first_probs = softmax(tok_logits);
 | 
						|
 | 
						|
        hs_data[task_idx].ending_logprob_count[0] = 1;
 | 
						|
        hs_data[task_idx].ending_logprob[0] = std::log(first_probs[query_embd[context_size]]);
 | 
						|
 | 
						|
        // Calculate the logprobs over the ending
 | 
						|
        for (size_t j = context_size; j < query_size - 1; j++) {
 | 
						|
 | 
						|
            std::memcpy(tok_logits.data(), logits.data() + j*n_vocab, n_vocab*sizeof(float));
 | 
						|
 | 
						|
            const float prob = softmax(tok_logits)[query_embd[j + 1]];
 | 
						|
 | 
						|
            hs_data[task_idx].ending_logprob[0] += std::log(prob);
 | 
						|
            hs_data[task_idx].ending_logprob_count[0]++;
 | 
						|
        }
 | 
						|
 | 
						|
        // Calculate the mean token logprob for acc_norm
 | 
						|
        hs_data[task_idx].ending_logprob[0] /= hs_data[task_idx].ending_logprob_count[0];
 | 
						|
 | 
						|
        // Do the remaining endings
 | 
						|
        // For these, we use the bare ending with n_past = context_size
 | 
						|
        //
 | 
						|
        for (size_t ending_idx = 1; ending_idx < 4; ending_idx++) {
 | 
						|
 | 
						|
            // Tokenize the query
 | 
						|
            query_embd.resize(ending_tokens[ending_idx].size() - context_size);
 | 
						|
            std::memcpy(query_embd.data(), ending_tokens[ending_idx].data() + context_size, query_embd.size()*sizeof(int));
 | 
						|
            query_size = query_embd.size();
 | 
						|
 | 
						|
            // Stop if query wont fit the ctx window
 | 
						|
            if (context_size + query_size > (size_t)params.n_ctx) {
 | 
						|
                fprintf(stderr, "%s : number of tokens in query %zu > n_ctxl\n", __func__, query_size);
 | 
						|
                return;
 | 
						|
            }
 | 
						|
 | 
						|
            // Speedup small evaluations by evaluating atleast 32 tokens
 | 
						|
            // No, resizing to 32 is actually slightly slower (at least on CUDA)
 | 
						|
            //if (query_size < 32) {
 | 
						|
            //    query_embd.resize(32);
 | 
						|
            //}
 | 
						|
 | 
						|
            // Evaluate the query
 | 
						|
            logits = hellaswag_evaluate_tokens(ctx, query_embd, context_size, params.n_batch, n_vocab, params.n_threads);
 | 
						|
            if (logits.empty()) {
 | 
						|
                fprintf(stderr, "%s : failed to eval\n", __func__);
 | 
						|
                return;
 | 
						|
            }
 | 
						|
 | 
						|
            hs_data[task_idx].ending_logprob_count[ending_idx] = 1;
 | 
						|
            hs_data[task_idx].ending_logprob[ending_idx] = std::log(first_probs[query_embd[0]]);
 | 
						|
 | 
						|
            // Calculate the logprobs over the ending
 | 
						|
            for (size_t j = 0; j < query_size - 1; j++) {
 | 
						|
                std::memcpy(tok_logits.data(), logits.data() + j*n_vocab, n_vocab*sizeof(float));
 | 
						|
 | 
						|
                const float prob = softmax(tok_logits)[query_embd[j + 1]];
 | 
						|
 | 
						|
                hs_data[task_idx].ending_logprob[ending_idx] += std::log(prob);
 | 
						|
                hs_data[task_idx].ending_logprob_count[ending_idx]++;
 | 
						|
            }
 | 
						|
 | 
						|
            // Calculate the mean token logprob for acc_norm
 | 
						|
            hs_data[task_idx].ending_logprob[ending_idx] /= hs_data[task_idx].ending_logprob_count[ending_idx];
 | 
						|
 | 
						|
 | 
						|
//            printf("task %lu, ending %lu, whole_len %lu, context_len %lu, ending_logprob_count %lu, ending_logprob %.4f\n",
 | 
						|
//                task_idx,ending_idx,whole_size,context_size, hs_data[task_idx].ending_logprob_count[ending_idx], hs_data[task_idx].ending_logprob[ending_idx] );
 | 
						|
        }
 | 
						|
 | 
						|
        // Find the ending with maximum logprob
 | 
						|
        size_t ending_logprob_max_idx = 0;
 | 
						|
        double ending_logprob_max_val = hs_data[task_idx].ending_logprob[0];
 | 
						|
        for (size_t j = 1; j < 4; j++) {
 | 
						|
            if (hs_data[task_idx].ending_logprob[j] > ending_logprob_max_val) {
 | 
						|
                ending_logprob_max_idx = j;
 | 
						|
                ending_logprob_max_val =  hs_data[task_idx].ending_logprob[j];
 | 
						|
            }
 | 
						|
        }
 | 
						|
 | 
						|
//        printf("max logprob ending idx %lu, gold ending idx %lu\n", ending_logprob_max_idx, hs_data[task_idx].gold_ending_idx);
 | 
						|
 | 
						|
        // If the gold ending got the maximum logprobe add one accuracy point
 | 
						|
        if (ending_logprob_max_idx == hs_data[task_idx].gold_ending_idx) {
 | 
						|
            acc += 1.0;
 | 
						|
        }
 | 
						|
 | 
						|
        // Print the accumulated accuracy mean x 100
 | 
						|
        printf("%zu\t%.8lf\n",task_idx+1, acc/double(task_idx+1)*100.0);
 | 
						|
        fflush(stdout);
 | 
						|
    }
 | 
						|
 | 
						|
    delete [] hs_data;
 | 
						|
 | 
						|
    printf("\n");
 | 
						|
}
 | 
						|
 | 
						|
int main(int argc, char ** argv) {
 | 
						|
    gpt_params params;
 | 
						|
 | 
						|
    params.n_batch = 512;
 | 
						|
    if (gpt_params_parse(argc, argv, params) == false) {
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    params.perplexity = true;
 | 
						|
    params.n_batch = std::min(params.n_batch, params.n_ctx);
 | 
						|
 | 
						|
    if (params.ppl_stride > 0) {
 | 
						|
        fprintf(stderr, "Will perform strided perplexity calculation -> adjusting context size from %d to %d\n",
 | 
						|
                params.n_ctx, params.n_ctx + params.ppl_stride/2);
 | 
						|
        params.n_ctx += params.ppl_stride/2;
 | 
						|
    }
 | 
						|
 | 
						|
    if (params.n_ctx > 2048) {
 | 
						|
        fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
 | 
						|
                "expect poor results\n", __func__, params.n_ctx);
 | 
						|
    }
 | 
						|
 | 
						|
    fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
 | 
						|
 | 
						|
    if (params.seed == LLAMA_DEFAULT_SEED) {
 | 
						|
        params.seed = time(NULL);
 | 
						|
    }
 | 
						|
 | 
						|
    fprintf(stderr, "%s: seed  = %u\n", __func__, params.seed);
 | 
						|
 | 
						|
    std::mt19937 rng(params.seed);
 | 
						|
    if (params.random_prompt) {
 | 
						|
        params.prompt = gpt_random_prompt(rng);
 | 
						|
    }
 | 
						|
 | 
						|
    llama_backend_init(params.numa);
 | 
						|
 | 
						|
    llama_model * model;
 | 
						|
    llama_context * ctx;
 | 
						|
 | 
						|
    // load the model and apply lora adapter, if any
 | 
						|
    std::tie(model, ctx) = llama_init_from_gpt_params(params);
 | 
						|
    if (model == NULL) {
 | 
						|
        fprintf(stderr, "%s: error: unable to load model\n", __func__);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    // print system information
 | 
						|
    {
 | 
						|
        fprintf(stderr, "\n");
 | 
						|
        fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
 | 
						|
                params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
 | 
						|
    }
 | 
						|
 | 
						|
    if (params.hellaswag) {
 | 
						|
        hellaswag_score(ctx, params);
 | 
						|
    } else {
 | 
						|
        perplexity(ctx, params);
 | 
						|
    }
 | 
						|
 | 
						|
    llama_print_timings(ctx);
 | 
						|
    llama_free(ctx);
 | 
						|
    llama_free_model(model);
 | 
						|
 | 
						|
    llama_backend_free();
 | 
						|
 | 
						|
    return 0;
 | 
						|
}
 |