Spaces:
Sleeping
Sleeping
| /* Inference for Llama-2 Transformer model in pure C | |
| * With added CUDA support initially drawing from | |
| * https://github.com/ankan-ban/llama2.cu/blob/master/llama2.cu | |
| * and structured in a way that hopefully makes keeping it | |
| * up-to-date straightforward. | |
| */ | |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #include <ctype.h> | |
| #include <time.h> | |
| #include <math.h> | |
| #include <string.h> | |
| #include <fcntl.h> | |
| #include <assert.h> | |
| #include <future> | |
| #if defined _WIN32 | |
| #include "win.h" | |
| #else | |
| #include <unistd.h> | |
| #include <sys/mman.h> | |
| #endif | |
| #include "llama2.h" | |
| #ifdef USE_CUDA | |
| #include <cuda_runtime.h> | |
| #include <cub/cub.cuh> | |
| #include <cublas_v2.h> | |
| // Each CUDA function call should be checked for errors. | |
| #define CUCHK(err) cuda_check((err), __FILE__, __LINE__) | |
| inline void cuda_check(cudaError_t error_code, const char *file, int line) | |
| { | |
| if (error_code != cudaSuccess) | |
| { | |
| fprintf(stderr, "CUDA Error %d: %s. In file '%s' on line %d\n", error_code, cudaGetErrorString(error_code), file, line); | |
| fflush(stderr); | |
| exit(error_code); | |
| } | |
| } | |
| // cublasHandle_t g_cublas_handle = nullptr; | |
| // void create_cublas_handle() { | |
| // cublasStatus_t stat = cublasCreate(&g_cublas_handle); // FIXME cublasDestroy | |
| // if (stat != CUBLAS_STATUS_SUCCESS) { | |
| // printf ("CUBLAS initialization failed\n"); | |
| // exit(EXIT_FAILURE); | |
| // } | |
| // } | |
| // void destroy_cublas_handle() { | |
| // cublasStatus_t stat = cublasDestroy(g_cublas_handle); | |
| // if (stat != CUBLAS_STATUS_SUCCESS) { | |
| // printf ("CUBLAS initialization failed\n"); | |
| // exit(EXIT_FAILURE); | |
| // } | |
| // } | |
| #endif | |
| // ---------------------------------------------------------------------------- | |
| // Transformer model | |
| typedef struct { | |
| int dim; // transformer dimension | |
| int hidden_dim; // for ffn layers | |
| int n_layers; // number of layers | |
| int n_heads; // number of query heads | |
| int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery) | |
| int vocab_size; // vocabulary size, usually 256 (byte-level) | |
| int seq_len; // max sequence length | |
| } Config; | |
| // CUDA NOTE: The TransformerWeights structure will be stored on the host, | |
| // but all of the pointers in the structure will point to data on the GPU. | |
| // The checkpoint file is mmap-ed to the host and the weights portion | |
| // is allocated on and copied to the GPU. Then, memory_map_weights() updates | |
| // these structure pointers to point to the proper location. Happily, this | |
| // function is the same for both C and CUDA. | |
| typedef struct { | |
| // token embedding table | |
| float* token_embedding_table; // (vocab_size, dim) | |
| // weights for rmsnorms | |
| float* rms_att_weight; // (layer, dim) rmsnorm weights | |
| float* rms_ffn_weight; // (layer, dim) | |
| // weights for matmuls. note dim == n_heads * head_size | |
| float* wq; // (layer, dim, n_heads * head_size) | |
| float* wk; // (layer, dim, n_kv_heads * head_size) | |
| float* wv; // (layer, dim, n_kv_heads * head_size) | |
| float* wo; // (layer, n_heads * head_size, dim) | |
| // weights for ffn | |
| float* w1; // (layer, hidden_dim, dim) | |
| float* w2; // (layer, dim, hidden_dim) | |
| float* w3; // (layer, hidden_dim, dim) | |
| // final rmsnorm | |
| float* rms_final_weight; // (dim,) | |
| // (optional) classifier weights for the logits, on the last layer | |
| float* wcls; | |
| } TransformerWeights; | |
| // CUDA NOTE: The RunState structure will be stored on the host, but all of the | |
| // pointers in the structure will point to data on the GPU, created via | |
| // cudaMalloc. The exception is logits which is the final result of the | |
| // transformer & is copied from the GPU as the last step in the transformer | |
| // and is used by the host. | |
| typedef struct { | |
| // current wave of activations | |
| float *x; // activation at current time stamp (dim,) | |
| float *xb; // same, but inside a residual branch (dim,) | |
| float *xb2; // an additional buffer just for convenience (dim,) | |
| float *hb; // buffer for hidden dimension in the ffn (hidden_dim,) | |
| float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,) | |
| float *q; // query (dim,) | |
| float *k; // key (dim,) | |
| float *v; // value (dim,) | |
| float *att; // buffer for scores/attention values (n_heads, seq_len) | |
| #ifdef USE_CUDA | |
| float *logits_gpu; // output logits in GPU | |
| #endif | |
| float *logits; // output logits in CPU | |
| // kv cache | |
| float* key_cache; // (layer, seq_len, dim) | |
| float* value_cache; // (layer, seq_len, dim) | |
| } RunState; | |
| typedef struct { | |
| Config config; // the hyperparameters of the architecture (the blueprint) | |
| TransformerWeights weights; // the weights of the model | |
| RunState state; // buffers for the "wave" of activations in the forward pass | |
| // some more state needed to properly clean up the memory mapping (sigh) | |
| int fd; // file descriptor for memory mapping | |
| float* data; // memory mapped data pointer | |
| ssize_t file_size; // size of the checkpoint file in bytes | |
| } Transformer; | |
| #ifdef USE_CUDA | |
| void malloc_run_state(RunState* s, Config* p) { | |
| // we calloc instead of malloc to keep valgrind happy | |
| int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads; | |
| CUCHK(cudaMalloc((void**)&s->x, p->dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->xb, p->dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->xb2, p->dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->hb, p->hidden_dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->hb2, p->hidden_dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->q, p->dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->key_cache, p->n_layers * p->seq_len * kv_dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->value_cache, p->n_layers * p->seq_len * kv_dim * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->att, p->n_heads * p->seq_len * sizeof(float))); | |
| CUCHK(cudaMalloc((void**)&s->logits_gpu, p->vocab_size * sizeof(float))); | |
| s->logits = (float *)calloc(p->vocab_size, sizeof(float)); | |
| // ensure all mallocs went fine | |
| if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q | |
| || !s->key_cache || !s->value_cache || !s->att || !s->logits_gpu || !s->logits) { | |
| fprintf(stderr, "malloc failed!\n"); | |
| exit(EXIT_FAILURE); | |
| } | |
| } | |
| #else | |
| void malloc_run_state(RunState* s, Config* p) { | |
| // we calloc instead of malloc to keep valgrind happy | |
| int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads; | |
| s->x = (float *)calloc(p->dim, sizeof(float)); | |
| s->xb = (float *)calloc(p->dim, sizeof(float)); | |
| s->xb2 = (float *)calloc(p->dim, sizeof(float)); | |
| s->hb = (float *)calloc(p->hidden_dim, sizeof(float)); | |
| s->hb2 = (float *)calloc(p->hidden_dim, sizeof(float)); | |
| s->q = (float *)calloc(p->dim, sizeof(float)); | |
| s->key_cache = (float *)calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float)); | |
| s->value_cache = (float *)calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float)); | |
| s->att = (float *)calloc(p->n_heads * p->seq_len, sizeof(float)); | |
| s->logits = (float *)calloc(p->vocab_size, sizeof(float)); | |
| // ensure all mallocs went fine | |
| if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q | |
| || !s->key_cache || !s->value_cache || !s->att || !s->logits) { | |
| fprintf(stderr, "malloc failed!\n"); | |
| exit(EXIT_FAILURE); | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| void free_run_state(RunState* s) { | |
| CUCHK(cudaFree(s->x)); | |
| CUCHK(cudaFree(s->xb)); | |
| CUCHK(cudaFree(s->xb2)); | |
| CUCHK(cudaFree(s->hb)); | |
| CUCHK(cudaFree(s->hb2)); | |
| CUCHK(cudaFree(s->q)); | |
| CUCHK(cudaFree(s->att)); | |
| CUCHK(cudaFree(s->logits_gpu)); | |
| free(s->logits); | |
| CUCHK(cudaFree(s->key_cache)); | |
| CUCHK(cudaFree(s->value_cache)); | |
| } | |
| #else | |
| void free_run_state(RunState* s) { | |
| free(s->x); | |
| free(s->xb); | |
| free(s->xb2); | |
| free(s->hb); | |
| free(s->hb2); | |
| free(s->q); | |
| free(s->att); | |
| free(s->logits); | |
| free(s->key_cache); | |
| free(s->value_cache); | |
| } | |
| #endif | |
| void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) { | |
| int head_size = p->dim / p->n_heads; | |
| // make sure the multiplications below are done in 64bit to fit the parameter counts of 13B+ models | |
| unsigned long long n_layers = p->n_layers; | |
| w->token_embedding_table = ptr; | |
| ptr += p->vocab_size * p->dim; | |
| w->rms_att_weight = ptr; | |
| ptr += n_layers * p->dim; | |
| w->wq = ptr; | |
| ptr += n_layers * p->dim * (p->n_heads * head_size); | |
| w->wk = ptr; | |
| ptr += n_layers * p->dim * (p->n_kv_heads * head_size); | |
| w->wv = ptr; | |
| ptr += n_layers * p->dim * (p->n_kv_heads * head_size); | |
| w->wo = ptr; | |
| ptr += n_layers * (p->n_heads * head_size) * p->dim; | |
| w->rms_ffn_weight = ptr; | |
| ptr += n_layers * p->dim; | |
| w->w1 = ptr; | |
| ptr += n_layers * p->dim * p->hidden_dim; | |
| w->w2 = ptr; | |
| ptr += n_layers * p->hidden_dim * p->dim; | |
| w->w3 = ptr; | |
| ptr += n_layers * p->dim * p->hidden_dim; | |
| w->rms_final_weight = ptr; | |
| ptr += p->dim; | |
| ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE) | |
| ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE) | |
| w->wcls = shared_weights ? w->token_embedding_table : ptr; | |
| } | |
| void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights, | |
| int* fd, float** data, ssize_t* file_size) { | |
| FILE *file = fopen(checkpoint, "rb"); | |
| if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); } | |
| // read in the config header | |
| if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); } | |
| // negative vocab size is hacky way of signaling unshared weights. bit yikes. | |
| int shared_weights = config->vocab_size > 0 ? 1 : 0; | |
| config->vocab_size = abs(config->vocab_size); | |
| // figure out the file size | |
| fseek(file, 0, SEEK_END); // move file pointer to end of file | |
| *file_size = ftell(file); // get the file size, in bytes | |
| fclose(file); | |
| // memory map the Transformer weights into the data pointer | |
| *fd = open(checkpoint, O_RDONLY); // open in read only mode | |
| if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); } | |
| *data = (float *)mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0); | |
| if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); } | |
| #ifdef USE_CUDA | |
| // allocate & copy mmap data to the gpu first | |
| // TODO: allocate & copy just a portion to the GPU if the weights are too big | |
| // to fit in the GPU, then copy the data only as needed while running. | |
| float* weights_ptr; | |
| size_t weights_size = *file_size - sizeof(Config); | |
| CUCHK(cudaMalloc((void**)&weights_ptr, weights_size)); | |
| CUCHK(cudaMemcpy(weights_ptr, *data + sizeof(Config)/sizeof(float), weights_size, cudaMemcpyHostToDevice)); | |
| #else | |
| float* weights_ptr = *data + sizeof(Config)/sizeof(float); | |
| #endif | |
| memory_map_weights(weights, config, weights_ptr, shared_weights); | |
| } | |
| void build_transformer(Transformer *t, char* checkpoint_path) { | |
| // read in the Config and the Weights from the checkpoint | |
| read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size); | |
| // allocate the RunState buffers | |
| malloc_run_state(&t->state, &t->config); | |
| } | |
| void free_transformer(Transformer* t) { | |
| // close the memory mapping | |
| if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); } | |
| if (t->fd != -1) { close(t->fd); } | |
| #ifdef USE_CUDA | |
| // we cudaMalloc a region of memory, then hand the address to | |
| // the token_embedding_table field. Free it here. | |
| CUCHK(cudaFree(t->weights.token_embedding_table)); | |
| #endif | |
| // free the RunState buffers | |
| free_run_state(&t->state); | |
| } | |
| // ---------------------------------------------------------------------------- | |
| // neural net blocks; the dynamics of the Transformer | |
| #ifdef USE_CUDA | |
| // Utility routine to divide a into ceiling of b parts | |
| int divUp(int a, int b) { | |
| return (a - 1) / b + 1; | |
| } | |
| const int num_threads_lrg = 1024; | |
| const int num_threads_med = 256; | |
| __global__ void rmsnorm_kernel(float* o, float* x, float* weight, int size, int elementsPerThread) { | |
| // parallel reduction of sum of squares via CUB | |
| float ss = 0.0f; | |
| for (int i = 0; i < elementsPerThread; i++) { | |
| int j = threadIdx.x + i * num_threads_lrg; | |
| if (j < size) | |
| ss += x[j] * x[j]; | |
| } | |
| using BlockReduce = cub::BlockReduce<float, num_threads_lrg>; | |
| __shared__ typename BlockReduce::TempStorage temp; | |
| ss = BlockReduce(temp).Sum(ss); | |
| // serialization point to calculate normalization factor | |
| __shared__ float shared_ss; | |
| if (threadIdx.x == 0) { | |
| ss /= size; | |
| ss += 1e-5f; | |
| ss = 1.0f / sqrtf(ss); | |
| shared_ss = ss; | |
| } | |
| __syncthreads(); | |
| ss = shared_ss; | |
| // normalize and scale | |
| for (int i = 0; i < elementsPerThread; i++) { | |
| int j = threadIdx.x + i * num_threads_lrg; | |
| if (j < size) { | |
| o[j] = weight[j] * (ss * x[j]); | |
| } | |
| } | |
| } | |
| void rmsnorm(float* o, float* x, float* weight, int size) { | |
| int elementsPerThread = divUp(size, num_threads_lrg); | |
| rmsnorm_kernel <<<1, num_threads_lrg >>> (o, x, weight, size, elementsPerThread); | |
| } | |
| #else | |
| void rmsnorm(float* o, float* x, float* weight, int size) { | |
| // calculate sum of squares | |
| float ss = 0.0f; | |
| for (int j = 0; j < size; j++) { | |
| ss += x[j] * x[j]; | |
| } | |
| ss /= size; | |
| ss += 1e-5f; | |
| ss = 1.0f / sqrtf(ss); | |
| // normalize and scale | |
| for (int j = 0; j < size; j++) { | |
| o[j] = weight[j] * (ss * x[j]); | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| __device__ void softmax_gpu(float* __restrict__ x, int size) { | |
| int tid = threadIdx.x; | |
| int step = blockDim.x; | |
| // find max value (for numerical stability) | |
| float max_val = tid < size ? x[tid] : 0; | |
| for (int i = tid + step; i < size; i += step) { | |
| if (x[i] > max_val) { | |
| max_val = x[i]; | |
| } | |
| } | |
| using BlockReduce = cub::BlockReduce<float, num_threads_lrg>; | |
| __shared__ typename BlockReduce::TempStorage temp; | |
| __shared__ float shared_val; | |
| max_val = BlockReduce(temp).Reduce(max_val, cub::Max()); | |
| if (threadIdx.x == 0) { | |
| shared_val = max_val; | |
| } | |
| __syncthreads(); | |
| max_val = shared_val; | |
| // exp and sum | |
| float sum = 0.0f; | |
| for (int i = tid; i < size; i += step) { | |
| x[i] = expf(x[i] - max_val); | |
| sum += x[i]; | |
| } | |
| sum = BlockReduce(temp).Sum(sum); | |
| if (threadIdx.x == 0) { | |
| shared_val = sum; | |
| } | |
| __syncthreads(); | |
| sum = shared_val; | |
| // normalize | |
| for (int i = tid; i < size; i += step) { | |
| x[i] /= sum; | |
| } | |
| } | |
| #endif | |
| void softmax(float* x, int size) { | |
| // find max value (for numerical stability) | |
| float max_val = x[0]; | |
| for (int i = 1; i < size; i++) { | |
| if (x[i] > max_val) { | |
| max_val = x[i]; | |
| } | |
| } | |
| // exp and sum | |
| float sum = 0.0f; | |
| for (int i = 0; i < size; i++) { | |
| x[i] = expf(x[i] - max_val); | |
| sum += x[i]; | |
| } | |
| // normalize | |
| for (int i = 0; i < size; i++) { | |
| x[i] /= sum; | |
| } | |
| } | |
| #ifdef USE_CUDA | |
| // Use cuBLAS for matmul to leverage this included, high-performance library. | |
| void matmul(cublasHandle_t handle, float* xout, float* x, float* w, int n, int d) { | |
| // W (d,n) @ x (n,) -> xout (d,) | |
| // W is stored in this order: (n=0,d=0), (n=1,d=0), (n=2,d=0), ... | |
| // so W is n x d in cublas terms & we'll need to transpose. | |
| // Sgemv does y = alpha * op(A) * x + beta * y (modifying y) | |
| // where op can transpose the matrix A | |
| // Translating to our local vars, that is | |
| // xout = 1.0*op(w)*x + 0.0*xout | |
| float alpha = 1.0f; | |
| float beta = 0.0f; // when this is 0, xout will not be used for input | |
| cublasSgemv(handle, CUBLAS_OP_T, n, d, &alpha, w, n, x, 1, &beta, xout, 1); | |
| } | |
| #else | |
| void matmul(float* xout, float* x, float* w, int n, int d) { | |
| // W (d,n) @ x (n,) -> xout (d,) | |
| // by far the most amount of time is spent inside this little function | |
| int i; | |
| #pragma omp parallel for private(i) | |
| for (i = 0; i < d; i++) { | |
| float val = 0.0f; | |
| for (int j = 0; j < n; j++) { | |
| val += w[i * n + j] * x[j]; | |
| } | |
| xout[i] = val; | |
| } | |
| } | |
| #endif | |
| // Additional neural net blocks (brought out from transformer function) | |
| #ifdef USE_CUDA | |
| __global__ void RoPe_rotation_kernel(int pos, float *sq, float *sk, int kv_dim, int head_size) { | |
| int i = threadIdx.x * 2 + blockIdx.x * head_size; | |
| int head_dim = i % head_size; | |
| float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size); | |
| float val = pos * freq; | |
| float fcr = cosf(val); | |
| float fci = sinf(val); | |
| int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only | |
| for (int v = 0; v < rotn; v++) { | |
| float* vec = v == 0 ? sq : sk; // the vector to rotate (query or key) | |
| float v0 = vec[i]; | |
| float v1 = vec[i+1]; | |
| vec[i] = v0 * fcr - v1 * fci; | |
| vec[i+1] = v0 * fci + v1 * fcr; | |
| } | |
| } | |
| void RoPe_rotation(int pos, RunState* s, int dim, int kv_dim, int head_size) { | |
| RoPe_rotation_kernel <<<dim/head_size, head_size/2 >>> (pos, s->q, s->k, kv_dim, head_size); | |
| } | |
| #else | |
| void RoPe_rotation(int pos, RunState* s, int dim, int kv_dim, int head_size) { //s->q, s->k, freq_cis_real_row, freq_cis_imag_row, p->n_heads, head_size) { | |
| for (int i = 0; i < dim; i+=2) { | |
| int head_dim = i % head_size; | |
| float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size); | |
| float val = pos * freq; | |
| float fcr = cosf(val); | |
| float fci = sinf(val); | |
| int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only | |
| for (int v = 0; v < rotn; v++) { | |
| float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key) | |
| float v0 = vec[i]; | |
| float v1 = vec[i+1]; | |
| vec[i] = v0 * fcr - v1 * fci; | |
| vec[i+1] = v0 * fci + v1 * fcr; | |
| } | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| // TODO refactor vs C code | |
| __global__ void multi_head_attention_kernel(int pos, int seq_len, float *sq, float *satt, float *sxb, float *key_cache, float *value_cache, int kv_dim, int kv_mul, int head_size, int loff) { | |
| int h = blockIdx.x; | |
| // get the query vector for this head | |
| float* q = sq + h * head_size; | |
| // attention scores for this head | |
| float* att = satt + h * seq_len; | |
| // iterate over all timesteps, including the current one | |
| // In CUDA, each thread does a small portion of the calc | |
| for (int t = threadIdx.x; t <= pos; t += blockDim.x) { | |
| // get the key vector for this head and at this timestep | |
| float* k = key_cache + loff + t * kv_dim + (h / kv_mul) * head_size; | |
| // calculate the attention score as the dot product of q and k | |
| float score = 0.0f; | |
| for (int i = 0; i < head_size; i++) { | |
| score += q[i] * k[i]; | |
| } | |
| score /= sqrtf(head_size); | |
| // save the score to the attention buffer | |
| att[t] = score; | |
| } | |
| // above was this threads portion of the iteration. wait for all threads to finish | |
| __syncthreads(); | |
| // softmax the scores to get attention weights, from 0..pos inclusively | |
| softmax_gpu(att, pos + 1); | |
| __syncthreads(); | |
| // weighted sum of the values, store back into xb | |
| // NOTE: by swapping the order of the for loops (vs. C) a simpler | |
| // version of the code accomplishes the same task and fits more | |
| // naturally with the CUDA way of subdividing the problem. | |
| float* xb = sxb + h * head_size; | |
| for (int i = threadIdx.x; i < head_size; i += blockDim.x) { | |
| float val = 0.0f; | |
| for (int t = 0; t <= pos; t++) { | |
| // get the value vector for this head and at this timestep | |
| float* v = value_cache + loff + t * kv_dim + (h / kv_mul) * head_size; | |
| // get the attention weight for this timestep | |
| float a = att[t]; | |
| val += a * v[i]; | |
| } | |
| xb[i] = val; | |
| } | |
| } | |
| void multi_head_attention(int pos, Config* p, RunState* s, int kv_dim, int kv_mul, int head_size, int loff) { | |
| multi_head_attention_kernel <<<p->n_heads, num_threads_lrg>>> (pos, p->seq_len, s->q, s->att, s->xb, s->key_cache, s->value_cache, kv_dim, kv_mul, head_size, loff); | |
| } | |
| #else | |
| void multi_head_attention(int pos, Config* p, RunState* s, int kv_dim, int kv_mul, int head_size, int loff) { | |
| int h; | |
| #pragma omp parallel for private(h) | |
| for (h = 0; h < p->n_heads; h++) { | |
| // get the query vector for this head | |
| float* q = s->q + h * head_size; | |
| // attention scores for this head | |
| float* att = s->att + h * p->seq_len; | |
| // iterate over all timesteps, including the current one | |
| for (int t = 0; t <= pos; t++) { | |
| // get the key vector for this head and at this timestep | |
| float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size; | |
| // calculate the attention score as the dot product of q and k | |
| float score = 0.0f; | |
| for (int i = 0; i < head_size; i++) { | |
| score += q[i] * k[i]; | |
| } | |
| score /= sqrtf(head_size); | |
| // save the score to the attention buffer | |
| att[t] = score; | |
| } | |
| // softmax the scores to get attention weights, from 0..pos inclusively | |
| softmax(att, pos + 1); | |
| // weighted sum of the values, store back into xb | |
| float* xb = s->xb + h * head_size; | |
| memset(xb, 0, head_size * sizeof(float)); | |
| for (int t = 0; t <= pos; t++) { | |
| // get the value vector for this head and at this timestep | |
| float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size; | |
| // get the attention weight for this timestep | |
| float a = att[t]; | |
| // accumulate the weighted value into xb | |
| for (int i = 0; i < head_size; i++) { | |
| xb[i] += a * v[i]; | |
| } | |
| } | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| __global__ void f_silu_elementwise_mul_w3_kernel(float *shb, float *shb2, int hidden_dim) { | |
| int i = blockIdx.x * blockDim.x + threadIdx.x; | |
| if (i < hidden_dim) { | |
| float val = shb[i]; | |
| // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid | |
| val *= (1.0f / (1.0f + expf(-val))); | |
| // elementwise multiply with w3(x) | |
| val *= shb2[i]; | |
| shb[i] = val; | |
| } | |
| } | |
| void f_silu_elementwise_mul_w3(RunState *s, int hidden_dim) { | |
| f_silu_elementwise_mul_w3_kernel<<<divUp(hidden_dim, num_threads_med), num_threads_med>>>(s->hb, s->hb2, hidden_dim); | |
| } | |
| #else | |
| void f_silu_elementwise_mul_w3(RunState *s, int hidden_dim) { | |
| for (int i = 0; i < hidden_dim; i++) { | |
| float val = s->hb[i]; | |
| // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid | |
| val *= (1.0f / (1.0f + expf(-val))); | |
| // elementwise multiply with w3(x) | |
| val *= s->hb2[i]; | |
| s->hb[i] = val; | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| __global__ void accum_kernel(float* a, float* b, int size) { | |
| int i = blockIdx.x * blockDim.x + threadIdx.x; | |
| if (i < size) { | |
| a[i] += b[i]; | |
| } | |
| } | |
| void accum(float *a, float *b, int size) { | |
| accum_kernel<<<divUp(size, num_threads_med), num_threads_med>>>(a,b,size); | |
| } | |
| #else | |
| void accum(float *a, float *b, int size) { | |
| for (int i = 0; i < size; i++) { | |
| a[i] += b[i]; | |
| } | |
| } | |
| #endif | |
| #ifdef USE_CUDA | |
| float* forward(Transformer* transformer, int token, int pos, cublasHandle_t handle) { | |
| #else | |
| float* forward(Transformer* transformer, int token, int pos) { | |
| #endif | |
| // a few convenience variables | |
| Config* p = &transformer->config; | |
| TransformerWeights* w = &transformer->weights; | |
| RunState* s = &transformer->state; | |
| float *x = s->x; | |
| int dim = p->dim; | |
| int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads; | |
| int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery | |
| int hidden_dim = p->hidden_dim; | |
| int head_size = dim / p->n_heads; | |
| // copy the token embedding into x | |
| float* content_row = w->token_embedding_table + token * dim; | |
| #ifdef USE_CUDA | |
| CUCHK(cudaMemcpy(x, content_row, dim*sizeof(*x), cudaMemcpyDeviceToDevice)); | |
| #else | |
| memcpy(x, content_row, dim*sizeof(*x)); | |
| #endif | |
| // forward all the layers | |
| for(unsigned long long l = 0; l < p->n_layers; l++) { | |
| // attention rmsnorm | |
| rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim); | |
| // key and value point to the kv cache | |
| int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience | |
| s->k = s->key_cache + loff + pos * kv_dim; | |
| s->v = s->value_cache + loff + pos * kv_dim; | |
| // qkv matmuls for this position | |
| #ifdef USE_CUDA | |
| matmul(handle, s->q, s->xb, w->wq + l*dim*dim, dim, dim); | |
| matmul(handle, s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim); | |
| matmul(handle, s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim); | |
| #else | |
| matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim); | |
| matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim); | |
| matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim); | |
| #endif | |
| // RoPE relative positional encoding: complex-valued rotate q and k in each head | |
| RoPe_rotation(pos, s, dim, kv_dim, head_size); | |
| // multihead attention. iterate over all heads | |
| multi_head_attention(pos, p, s, kv_dim, kv_mul, head_size, loff); | |
| // final matmul to get the output of the attention | |
| #ifdef USE_CUDA | |
| matmul(handle, s->xb2, s->xb, w->wo + l*dim*dim, dim, dim); | |
| #else | |
| matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim); | |
| #endif | |
| // residual connection back into x | |
| accum(x, s->xb2, dim); | |
| // ffn rmsnorm | |
| rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim); | |
| // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x)) | |
| // first calculate self.w1(x) and self.w3(x) | |
| #ifdef USE_CUDA | |
| matmul(handle, s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim); | |
| matmul(handle, s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim); | |
| #else | |
| matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim); | |
| matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim); | |
| #endif | |
| // SwiGLU non-linearity | |
| f_silu_elementwise_mul_w3(s, hidden_dim); | |
| // final matmul to get the output of the ffn | |
| #ifdef USE_CUDA | |
| matmul(handle, s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim); | |
| #else | |
| matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim); | |
| #endif | |
| // residual connection | |
| accum(x, s->xb, dim); | |
| } | |
| // final rmsnorm | |
| rmsnorm(x, x, w->rms_final_weight, dim); | |
| // classifier into logits | |
| #ifdef USE_CUDA | |
| matmul(handle, s->logits_gpu, x, w->wcls, p->dim, p->vocab_size); | |
| CUCHK(cudaMemcpy(s->logits, s->logits_gpu, p->vocab_size * sizeof(float), cudaMemcpyDeviceToHost)); | |
| #else | |
| matmul(s->logits, x, w->wcls, p->dim, p->vocab_size); | |
| #endif | |
| return s->logits; | |
| } | |
| // ---------------------------------------------------------------------------- | |
| // The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens | |
| typedef struct { | |
| char *str; | |
| int id; | |
| } TokenIndex; | |
| typedef struct { | |
| char** vocab; | |
| float* vocab_scores; | |
| TokenIndex *sorted_vocab; | |
| int vocab_size; | |
| unsigned int max_token_length; | |
| unsigned char byte_pieces[512]; // stores all single-byte strings | |
| } Tokenizer; | |
| int compare_tokens(const void *a, const void *b) { | |
| return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str); | |
| } | |
| void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) { | |
| // i should have written the vocab_size into the tokenizer file... sigh | |
| t->vocab_size = vocab_size; | |
| // malloc space to hold the scores and the strings | |
| t->vocab = (char**)malloc(vocab_size * sizeof(char*)); | |
| t->vocab_scores = (float*)malloc(vocab_size * sizeof(float)); | |
| t->sorted_vocab = NULL; // initialized lazily | |
| for (int i = 0; i < 256; i++) { | |
| t->byte_pieces[i * 2] = (unsigned char)i; | |
| t->byte_pieces[i * 2 + 1] = '\0'; | |
| } | |
| // read in the file | |
| FILE *file = fopen(tokenizer_path, "rb"); | |
| if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); } | |
| if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } | |
| int len; | |
| for (int i = 0; i < vocab_size; i++) { | |
| if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);} | |
| if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } | |
| t->vocab[i] = (char *)malloc(len + 1); | |
| if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } | |
| t->vocab[i][len] = '\0'; // add the string terminating token | |
| } | |
| fclose(file); | |
| } | |
| void free_tokenizer(Tokenizer* t) { | |
| for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); } | |
| free(t->vocab); | |
| free(t->vocab_scores); | |
| free(t->sorted_vocab); | |
| } | |
| char* decode(Tokenizer* t, int prev_token, int token) { | |
| char *piece = t->vocab[token]; | |
| // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89) | |
| if (prev_token == 1 && piece[0] == ' ') { piece++; } | |
| // careful, some tokens designate raw bytes, and look like e.g. '<0x01>' | |
| // parse this and convert and return the actual byte | |
| unsigned char byte_val; | |
| if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) { | |
| piece = (char*)t->byte_pieces + byte_val * 2; | |
| } | |
| return piece; | |
| } | |
| void safe_printf(char *piece) { | |
| // piece might be a raw byte token, and we only want to print printable chars or whitespace | |
| // because some of the other bytes can be various control codes, backspace, etc. | |
| if (piece == NULL) { return; } | |
| if (piece[0] == '\0') { return; } | |
| if (piece[1] == '\0') { | |
| unsigned char byte_val = piece[0]; | |
| if (!(isprint(byte_val) || isspace(byte_val))) { | |
| return; // bad byte, don't print it | |
| } | |
| } | |
| printf("%s", piece); | |
| } | |
| int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) { | |
| // efficiently find the perfect match for str in vocab, return its index or -1 if not found | |
| #if defined USE_CUDA && defined _WIN32 | |
| // CUDA on Windows was not capable of handling the syntax below | |
| TokenIndex tok; | |
| tok.str = str; | |
| #else | |
| TokenIndex tok = { .str = str }; // acts as the key to search for | |
| #endif | |
| TokenIndex *res = (TokenIndex *)bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens); | |
| return res != NULL ? res->id : -1; | |
| } | |
| void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) { | |
| // encode the string text (input) into an upper-bound preallocated tokens[] array | |
| // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2) | |
| if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); } | |
| if (t->sorted_vocab == NULL) { | |
| // lazily malloc and sort the vocabulary | |
| t->sorted_vocab = (TokenIndex *)malloc(t->vocab_size * sizeof(TokenIndex)); | |
| for (int i = 0; i < t->vocab_size; i++) { | |
| t->sorted_vocab[i].str = t->vocab[i]; | |
| t->sorted_vocab[i].id = i; | |
| } | |
| qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens); | |
| } | |
| // create a temporary buffer that will store merge candidates of always two consecutive tokens | |
| // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1) | |
| char* str_buffer = (char *)malloc((t->max_token_length*2 +1 +2) * sizeof(char)); | |
| size_t str_len = 0; | |
| // start at 0 tokens | |
| *n_tokens = 0; | |
| // add optional BOS (=1) token, if desired | |
| if (bos) tokens[(*n_tokens)++] = 1; | |
| // add_dummy_prefix is true by default | |
| // so prepend a dummy prefix token to the input string, but only if text != "" | |
| // TODO: pretty sure this isn't correct in the general case but I don't have the | |
| // energy to read more of the sentencepiece code to figure out what it's doing | |
| if (text[0] != '\0') { | |
| int dummy_prefix = str_lookup((char *)" ", t->sorted_vocab, t->vocab_size); | |
| tokens[(*n_tokens)++] = dummy_prefix; | |
| } | |
| // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia: | |
| // Code point ↔ UTF-8 conversion | |
| // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4 | |
| // U+0000 U+007F 0xxxxxxx | |
| // U+0080 U+07FF 110xxxxx 10xxxxxx | |
| // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx | |
| // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx | |
| // process the raw (UTF-8) byte sequence of the input string | |
| for (char *c = text; *c != '\0'; c++) { | |
| // reset buffer if the current byte is ASCII or a leading byte | |
| // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest | |
| // 0x80 is 10000000 | |
| // in UTF-8, all continuation bytes start with "10" in first two bits | |
| // so in English this is: "if this byte is not a continuation byte" | |
| if ((*c & 0xC0) != 0x80) { | |
| // this byte must be either a leading byte (11...) or an ASCII char (0x...) | |
| // => reset our location, as we're starting a new UTF-8 codepoint | |
| str_len = 0; | |
| } | |
| // append the current byte to the buffer | |
| str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line | |
| str_buffer[str_len] = '\0'; | |
| // while the next character is a continuation byte, continue appending | |
| // but if there are too many of them, just stop to avoid overruning str_buffer size. | |
| if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) { | |
| continue; | |
| } | |
| // ok c+1 is not a continuation byte, so we've read in a full codepoint | |
| int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); | |
| if (id != -1) { | |
| // we found this codepoint in vocab, add it as a token | |
| tokens[(*n_tokens)++] = id; | |
| } else { | |
| // byte_fallback encoding: just encode each byte as a token | |
| // +3 is here because the first 3 vocab elements are <unk>, <s>, </s> | |
| // so the individual bytes only start at index 3 | |
| for (int i=0; i < str_len; i++) { | |
| tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3; | |
| } | |
| } | |
| str_len = 0; // protect against a sequence of stray UTF8 continuation bytes | |
| } | |
| // merge the best consecutive pair each iteration, according the scores in vocab_scores | |
| while (1) { | |
| float best_score = -1e10; | |
| int best_id = -1; | |
| int best_idx = -1; | |
| for (int i=0; i < (*n_tokens-1); i++) { | |
| // check if we can merge the pair (tokens[i], tokens[i+1]) | |
| sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]); | |
| int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); | |
| if (id != -1 && t->vocab_scores[id] > best_score) { | |
| // this merge pair exists in vocab! record its score and position | |
| best_score = t->vocab_scores[id]; | |
| best_id = id; | |
| best_idx = i; | |
| } | |
| } | |
| if (best_idx == -1) { | |
| break; // we couldn't find any more pairs to merge, so we're done | |
| } | |
| // merge the consecutive pair (best_idx, best_idx+1) into new token best_id | |
| tokens[best_idx] = best_id; | |
| // delete token at position best_idx+1, shift the entire sequence back 1 | |
| for (int i = best_idx+1; i < (*n_tokens-1); i++) { | |
| tokens[i] = tokens[i+1]; | |
| } | |
| (*n_tokens)--; // token length decreased | |
| } | |
| // add optional EOS (=2) token, if desired | |
| if (eos) tokens[(*n_tokens)++] = 2; | |
| free(str_buffer); | |
| } | |
| // ---------------------------------------------------------------------------- | |
| // The Sampler, which takes logits and returns a sampled token | |
| // sampling can be done in a few ways: greedy argmax, sampling, top-p sampling | |
| typedef struct { | |
| float prob; | |
| int index; | |
| } ProbIndex; // struct used when sorting probabilities during top-p sampling | |
| typedef struct { | |
| int vocab_size; | |
| ProbIndex* probindex; // buffer used in top-p sampling | |
| float temperature; | |
| float topp; | |
| unsigned long long rng_state; | |
| } Sampler; | |
| int sample_argmax(float* probabilities, int n) { | |
| // return the index that has the highest probability | |
| int max_i = 0; | |
| float max_p = probabilities[0]; | |
| for (int i = 1; i < n; i++) { | |
| if (probabilities[i] > max_p) { | |
| max_i = i; | |
| max_p = probabilities[i]; | |
| } | |
| } | |
| return max_i; | |
| } | |
| int sample_mult(float* probabilities, int n, float coin) { | |
| // sample index from probabilities (they must sum to 1!) | |
| // coin is a random number in [0, 1), usually from random_f32() | |
| float cdf = 0.0f; | |
| for (int i = 0; i < n; i++) { | |
| cdf += probabilities[i]; | |
| if (coin < cdf) { | |
| return i; | |
| } | |
| } | |
| return n - 1; // in case of rounding errors | |
| } | |
| int compare(const void* a, const void* b) { | |
| ProbIndex* a_ = (ProbIndex*) a; | |
| ProbIndex* b_ = (ProbIndex*) b; | |
| if (a_->prob > b_->prob) return -1; | |
| if (a_->prob < b_->prob) return 1; | |
| return 0; | |
| } | |
| int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) { | |
| // top-p sampling (or "nucleus sampling") samples from the smallest set of | |
| // tokens that exceed probability topp. This way we never sample tokens that | |
| // have very low probabilities and are less likely to go "off the rails". | |
| // coin is a random number in [0, 1), usually from random_f32() | |
| int n0 = 0; | |
| // quicksort indices in descending order of probabilities | |
| // values smaller than (1 - topp) / (n - 1) cannot be part of the result | |
| // so for efficiency we crop these out as candidates before sorting | |
| const float cutoff = (1.0f - topp) / (n - 1); | |
| for (int i = 0; i < n; i++) { | |
| if (probabilities[i] >= cutoff) { | |
| probindex[n0].index = i; | |
| probindex[n0].prob = probabilities[i]; | |
| n0++; | |
| } | |
| } | |
| qsort(probindex, n0, sizeof(ProbIndex), compare); | |
| // truncate the list where cumulative probability exceeds topp | |
| float cumulative_prob = 0.0f; | |
| int last_idx = n0 - 1; // in case of rounding errors consider all elements | |
| for (int i = 0; i < n0; i++) { | |
| cumulative_prob += probindex[i].prob; | |
| if (cumulative_prob > topp) { | |
| last_idx = i; | |
| break; // we've exceeded topp by including last_idx | |
| } | |
| } | |
| // sample from the truncated list | |
| float r = coin * cumulative_prob; | |
| float cdf = 0.0f; | |
| for (int i = 0; i <= last_idx; i++) { | |
| cdf += probindex[i].prob; | |
| if (r < cdf) { | |
| return probindex[i].index; | |
| } | |
| } | |
| return probindex[last_idx].index; // in case of rounding errors | |
| } | |
| void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) { | |
| sampler->vocab_size = vocab_size; | |
| sampler->temperature = temperature; | |
| sampler->topp = topp; | |
| sampler->rng_state = rng_seed; | |
| // buffer only used with nucleus sampling; may not need but it's ~small | |
| sampler->probindex = (ProbIndex *)malloc(sampler->vocab_size * sizeof(ProbIndex)); | |
| } | |
| void free_sampler(Sampler* sampler) { | |
| free(sampler->probindex); | |
| sampler->probindex = NULL; | |
| } | |
| unsigned int random_u32(unsigned long long *state) { | |
| // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A | |
| *state ^= *state >> 12; | |
| *state ^= *state << 25; | |
| *state ^= *state >> 27; | |
| return (*state * 0x2545F4914F6CDD1Dull) >> 32; | |
| } | |
| float random_f32(unsigned long long *state) { // random float32 in [0,1) | |
| return (random_u32(state) >> 8) / 16777216.0f; | |
| } | |
| int sample(Sampler* sampler, float* logits) { | |
| // sample the token given the logits and some hyperparameters | |
| int next; | |
| if (sampler->temperature == 0.0f) { | |
| // greedy argmax sampling: take the token with the highest probability | |
| next = sample_argmax(logits, sampler->vocab_size); | |
| } else { | |
| // apply the temperature to the logits | |
| for (int q=0; q<sampler->vocab_size; q++) { logits[q] /= sampler->temperature; } | |
| // apply softmax to the logits to get the probabilities for next token | |
| softmax(logits, sampler->vocab_size); | |
| // flip a (float) coin (this is our source of entropy for sampling) | |
| float coin = random_f32(&sampler->rng_state); | |
| // we sample from this distribution to get the next token | |
| if (sampler->topp <= 0 || sampler->topp >= 1) { | |
| // simply sample from the predicted probability distribution | |
| next = sample_mult(logits, sampler->vocab_size, coin); | |
| } else { | |
| // top-p (nucleus) sampling, clamping the least likely tokens to zero | |
| next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin); | |
| } | |
| } | |
| return next; | |
| } | |
| // ---------------------------------------------------------------------------- | |
| // utilities: time | |
| long time_in_ms() { | |
| // return time in milliseconds, for benchmarking the model speed | |
| struct timespec time; | |
| clock_gettime(CLOCK_REALTIME, &time); | |
| return time.tv_sec * 1000 + time.tv_nsec / 1000000; | |
| } | |
| // ---------------------------------------------------------------------------- | |
| // generation loop | |
| // void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) { | |
| // char *empty_prompt = (char *)""; | |
| // if (prompt == NULL) { prompt = empty_prompt; } | |
| // // encode the (string) prompt into tokens sequence | |
| // int num_prompt_tokens = 0; | |
| // int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS | |
| // encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens); | |
| // if (num_prompt_tokens < 1) { | |
| // fprintf(stderr, "something is wrong, expected at least 1 prompt token\n"); | |
| // exit(EXIT_FAILURE); | |
| // } | |
| // // start the main loop | |
| // long start = 0; // used to time our code, only initialized after first iteration | |
| // int next; // will store the next token in the sequence | |
| // int token = prompt_tokens[0]; // kick off with the first token in the prompt | |
| // int pos = 0; // position in the sequence | |
| // while (pos < steps) { | |
| // // forward the transformer to get logits for the next token | |
| // float* logits = forward(transformer, token, pos); | |
| // // advance the state machine | |
| // if (pos < num_prompt_tokens - 1) { | |
| // // if we are still processing the input prompt, force the next prompt token | |
| // next = prompt_tokens[pos + 1]; | |
| // } else { | |
| // // otherwise sample the next token from the logits | |
| // next = sample(sampler, logits); | |
| // } | |
| // pos++; | |
| // // data-dependent terminating condition: the BOS (=1) token delimits sequences | |
| // if (next == 1) { break; } | |
| // // print the token as string, decode it with the Tokenizer object | |
| // char* piece = decode(tokenizer, token, next); | |
| // safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes | |
| // fflush(stdout); | |
| // token = next; | |
| // // init the timer here because the first iteration can be slower | |
| // if (start == 0) { start = time_in_ms(); } | |
| // } | |
| // printf("\n"); | |
| // // report achieved tok/s (pos-1 because the timer starts after first iteration) | |
| // if (pos > 1) { | |
| // long end = time_in_ms(); | |
| // fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000); | |
| // } | |
| // free(prompt_tokens); | |
| // } | |
| // void read_stdin(const char* guide, char* buffer, size_t bufsize) { | |
| // // read a line from stdin, up to but not including \n | |
| // printf("%s", guide); | |
| // if (fgets(buffer, bufsize, stdin) != NULL) { | |
| // size_t len = strlen(buffer); | |
| // if (len > 0 && buffer[len - 1] == '\n') { | |
| // buffer[len - 1] = '\0'; // strip newline | |
| // } | |
| // } | |
| // } | |
| // // ---------------------------------------------------------------------------- | |
| // // chat loop | |
| // // I manually inspected the tokens for a few chat conversations compared to | |
| // // python reference and that seemed ok, but this was not thoroughly tested and | |
| // // is not safely implemented, it's more a proof of concept atm. | |
| // void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, | |
| // char *cli_user_prompt, char *cli_system_prompt, int steps) { | |
| // // buffers for reading the system prompt and user prompt from stdin | |
| // // you'll notice they are soomewhat haphazardly and unsafely set atm | |
| // char system_prompt[512]; | |
| // char user_prompt[512]; | |
| // char rendered_prompt[1152]; | |
| // int num_prompt_tokens = 0; | |
| // int* prompt_tokens = (int*)malloc(1152 * sizeof(int)); | |
| // int user_idx; | |
| // // start the main loop | |
| // int8_t user_turn = 1; // user starts | |
| // int next; // will store the next token in the sequence | |
| // int token; // stores the current token to feed into the transformer | |
| // int prev_token; | |
| // int pos = 0; // position in the sequence | |
| // while (pos < steps) { | |
| // // when it is the user's turn to contribute tokens to the dialog... | |
| // if (user_turn) { | |
| // // get the (optional) system prompt at position 0 | |
| // if (pos == 0) { | |
| // // at position 0, the user can also contribute a system prompt | |
| // if (cli_system_prompt == NULL) { | |
| // // system prompt was not passed in, attempt to get it from stdin | |
| // read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt)); | |
| // } else { | |
| // // system prompt was passed in, use it | |
| // strcpy(system_prompt, cli_system_prompt); | |
| // } | |
| // } | |
| // // get the user prompt | |
| // if (pos == 0 && cli_user_prompt != NULL) { | |
| // // user prompt for position 0 was passed in, use it | |
| // strcpy(user_prompt, cli_user_prompt); | |
| // } else { | |
| // // otherwise get user prompt from stdin | |
| // read_stdin("User: ", user_prompt, sizeof(user_prompt)); | |
| // } | |
| // // render user/system prompts into the Llama 2 Chat schema | |
| // if (pos == 0 && system_prompt[0] != '\0') { | |
| // char system_template[] = "[INST] <<SYS>>\n%s\n<</SYS>>\n\n%s [/INST]"; | |
| // sprintf(rendered_prompt, system_template, system_prompt, user_prompt); | |
| // } else { | |
| // char user_template[] = "[INST] %s [/INST]"; | |
| // sprintf(rendered_prompt, user_template, user_prompt); | |
| // } | |
| // // encode the rendered prompt into tokens | |
| // encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens); | |
| // user_idx = 0; // reset the user index | |
| // user_turn = 0; | |
| // printf("Assistant: "); | |
| // } | |
| // // determine the token to pass into the transformer next | |
| // if (user_idx < num_prompt_tokens) { | |
| // // if we are still processing the input prompt, force the next prompt token | |
| // token = prompt_tokens[user_idx++]; | |
| // } else { | |
| // // otherwise use the next token sampled from previous turn | |
| // token = next; | |
| // } | |
| // // EOS (=2) token ends the Assistant turn | |
| // if (token == 2) { user_turn = 1; } | |
| // // forward the transformer to get logits for the next token | |
| // float* logits = forward(transformer, token, pos); | |
| // next = sample(sampler, logits); | |
| // pos++; | |
| // if (user_idx >= num_prompt_tokens && next != 2) { | |
| // // the Assistant is responding, so print its output | |
| // char* piece = decode(tokenizer, token, next); | |
| // safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes | |
| // fflush(stdout); | |
| // } | |
| // if (next == 2) { printf("\n"); } | |
| // } | |
| // printf("\n"); | |
| // free(prompt_tokens); | |
| // } | |
| typedef struct { | |
| Transformer transformer; | |
| Tokenizer tokenizer; | |
| Sampler sampler; | |
| int *output; // buffer to store the output tokens(max_tokens + 1) | |
| int output_idx; // current index in the output buffer(0 ... max_tokens - 1) | |
| int gen_idx; // generated tokens(0 ... max_tokens) | |
| int finished; | |
| #ifdef USE_CUDA | |
| cublasHandle_t g_cublas_handle; | |
| #endif | |
| } llama2_ctx; | |
| void *llama2_init(char *model_path, char *tokenizer_path) { | |
| llama2_ctx *ctx = (llama2_ctx *)malloc(sizeof(llama2_ctx)); | |
| build_transformer(&ctx->transformer, model_path); | |
| build_tokenizer(&ctx->tokenizer, tokenizer_path, ctx->transformer.config.vocab_size); | |
| ctx->output = NULL; | |
| #ifdef USE_CUDA | |
| cublasStatus_t stat = cublasCreate(&ctx->g_cublas_handle); // FIXME cublasDestroy | |
| if (stat != CUBLAS_STATUS_SUCCESS) { | |
| printf ("CUBLAS initialization failed\n"); | |
| exit(EXIT_FAILURE); | |
| } | |
| #endif | |
| return ctx; | |
| } | |
| void llama2_free(void *ctx) { | |
| llama2_ctx *c = (llama2_ctx *)ctx; | |
| free_transformer(&c->transformer); | |
| free_tokenizer(&c->tokenizer); | |
| if (c->sampler.probindex != NULL) | |
| free_sampler(&c->sampler); | |
| #ifdef USE_CUDA | |
| cublasStatus_t stat = cublasDestroy(c->g_cublas_handle); | |
| if (stat != CUBLAS_STATUS_SUCCESS) { | |
| printf ("CUBLAS destroy failed\n"); | |
| exit(EXIT_FAILURE); | |
| } | |
| #endif | |
| if (c->output != NULL) | |
| free(c->output); | |
| } | |
| void llama2_generate_loop(llama2_ctx *ctx, int *prompt_tokens, int num_prompt_tokens, int steps, int *output_tokens) { | |
| // printf("generate loop started\n"); | |
| // start the main loop | |
| // long start = 0; // used to time our code, only initialized after first iteration | |
| int next; // will store the next token in the sequence | |
| int token = prompt_tokens[0]; // kick off with the first token in the prompt | |
| int pos = 0; // position in the sequence | |
| while (pos < steps) { | |
| // forward the transformer to get logits for the next token | |
| #ifdef USE_CUDA | |
| float* logits = forward(&ctx->transformer, token, pos, ctx->g_cublas_handle); | |
| #else | |
| float* logits = forward(&ctx->transformer, token, pos); | |
| #endif | |
| // advance the state machine | |
| if (pos < num_prompt_tokens - 1) { | |
| // if we are still processing the input prompt, force the next prompt token | |
| next = prompt_tokens[pos + 1]; | |
| } else { | |
| // otherwise sample the next token from the logits | |
| next = sample(&ctx->sampler, logits); | |
| } | |
| // printf("current gen idx: %d, %d\n", ctx->gen_idx, next); | |
| if (pos == num_prompt_tokens - 1) | |
| output_tokens[ctx->gen_idx] = token; | |
| if (pos >= num_prompt_tokens - 1) | |
| output_tokens[ctx->gen_idx++ + 1] = next; | |
| pos++; | |
| token = next; | |
| // EOS (=2) token ends the Assistant turn | |
| if (next == 2) | |
| break; | |
| } | |
| // report achieved tok/s (pos-1 because the timer starts after first iteration) | |
| // if (pos > 1) { | |
| // long end = time_in_ms(); | |
| // fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000); | |
| // } | |
| ctx->finished = 1; | |
| free(prompt_tokens); | |
| free_sampler(&ctx->sampler); | |
| // printf("generate loop finished\n"); | |
| } | |
| int llama2_generate(void *ctx, char *prompt, int steps, float temperature, float topp, int seed) { | |
| llama2_ctx *c = (llama2_ctx *)ctx; | |
| build_sampler(&c->sampler, c->transformer.config.vocab_size, temperature, topp, seed); | |
| char *empty_prompt = (char *)""; | |
| if (prompt == NULL) { prompt = empty_prompt; } | |
| // encode the (string) prompt into tokens sequence | |
| int num_prompt_tokens = 0; | |
| int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS | |
| encode(&c->tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens); | |
| if (num_prompt_tokens < 1) { | |
| fprintf(stderr, "something is wrong, expected at least 1 prompt token\n"); | |
| return 1; | |
| } | |
| if (num_prompt_tokens >= steps) { | |
| fprintf(stderr, "prompt tokens exceeds max token length\n"); | |
| return 1; | |
| } | |
| c->output = (int *)malloc((steps + 1) * sizeof(int)); | |
| c->gen_idx = 0; | |
| c->output_idx = 0; | |
| c->finished = 0; | |
| std::thread t(llama2_generate_loop, c, prompt_tokens, num_prompt_tokens, steps, c->output); | |
| t.detach(); | |
| return 0; | |
| } | |
| char *llama2_get_last(void *ctx) { | |
| llama2_ctx *c = (llama2_ctx *)ctx; | |
| assert(c->output != NULL); // shouldn't be called again after finished | |
| while(!c->finished && c->output_idx >= c->gen_idx) { | |
| // printf("current idx: %d, %d\n", c->output_idx, c->gen_idx); | |
| usleep(100000); | |
| } // wait for next token | |
| if (c->finished && c->output_idx >= c->gen_idx) { | |
| free(c->output); | |
| c->output = NULL; | |
| return NULL; | |
| } | |
| // printf("current idx: %d, %d, finished:%d\n", c->output_idx, c->gen_idx, c->finished); | |
| char *piece = decode(&c->tokenizer, c->output[c->output_idx], c->output[c->output_idx + 1]); | |
| c->output_idx++; | |
| return piece; | |
| } | |
| void llama2_tokenize(void *ctx, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) { | |
| llama2_ctx *c = (llama2_ctx *)ctx; | |
| encode(&c->tokenizer, text, bos, eos, tokens, n_tokens); | |
| } | |