|
|
#include "arg.h" |
|
|
#include "common.h" |
|
|
#include "log.h" |
|
|
#include "llama.h" |
|
|
#include "ggml.h" |
|
|
#include "gguf.h" |
|
|
|
|
|
#include <cstdio> |
|
|
#include <string> |
|
|
#include <vector> |
|
|
#include <numeric> |
|
|
#include <fstream> |
|
|
|
|
|
int main(int argc, char ** argv) { |
|
|
|
|
|
llama_log_set(nullptr, nullptr); |
|
|
llama_backend_init(); |
|
|
ggml_backend_load_all_from_path("build/bin"); |
|
|
|
|
|
|
|
|
struct ggml_init_params params = { |
|
|
10 * ggml_tensor_overhead() + ggml_graph_overhead(), |
|
|
NULL, |
|
|
true, |
|
|
}; |
|
|
|
|
|
ggml_context * gctx = ggml_init(params); |
|
|
ggml_context * gctx_cpu = ggml_init(params); |
|
|
ggml_context * wctx = nullptr; |
|
|
ggml_context * nctx = nullptr; |
|
|
ggml_context * ictx = nullptr; |
|
|
struct gguf_init_params wparams = { |
|
|
false, |
|
|
&wctx, |
|
|
}; |
|
|
struct gguf_init_params nparams = { |
|
|
false, |
|
|
&nctx, |
|
|
}; |
|
|
struct gguf_init_params iparams = { |
|
|
false, |
|
|
&ictx, |
|
|
}; |
|
|
gguf_context * wgctx = gguf_init_from_file("problem-tensors-weights.gguf", wparams); |
|
|
gguf_context * ngctx = gguf_init_from_file("problem-tensors-norm.gguf", nparams); |
|
|
gguf_context * igctx = gguf_init_from_file("problem-tensors-ids.gguf", iparams); |
|
|
|
|
|
ggml_tensor * weights = ggml_get_next_tensor(wctx, ggml_get_first_tensor(wctx)); |
|
|
ggml_tensor * norm = ggml_get_next_tensor(nctx, ggml_get_first_tensor(nctx)); |
|
|
ggml_tensor * ids = ggml_get_next_tensor(ictx, ggml_get_first_tensor(ictx)); |
|
|
|
|
|
ggml_context * gctx_cpu_comp = ggml_init(params); |
|
|
struct ggml_cgraph * gf_cpu = ggml_new_graph(gctx_cpu_comp); |
|
|
ggml_tensor * mul_mat_id_cpu = ggml_mul_mat_id(gctx_cpu, weights, norm, ids); |
|
|
ggml_build_forward_expand(gf_cpu, mul_mat_id_cpu); |
|
|
|
|
|
ggml_backend_t cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); |
|
|
|
|
|
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(cpu)); |
|
|
ggml_gallocr_alloc_graph(allocr, gf_cpu); |
|
|
|
|
|
ggml_backend_graph_compute(cpu, gf_cpu); |
|
|
|
|
|
double sum_cpu = 0.0f; |
|
|
float max_cpu = ((float *) mul_mat_id_cpu->data)[0]; |
|
|
float min_cpu = ((float *) mul_mat_id_cpu->data)[0]; |
|
|
for (uint64_t i = 0; i < ggml_nelements(mul_mat_id_cpu); i++) { |
|
|
float elt = ((float *) mul_mat_id_cpu->data)[i]; |
|
|
sum_cpu += elt; |
|
|
max_cpu = elt > max_cpu ? elt : max_cpu; |
|
|
min_cpu = elt < min_cpu ? elt : min_cpu; |
|
|
} |
|
|
printf("\n CPU sum of matmul: %.8f, max: %.8f, min: %.8f, nelements: %lu\n\n", sum_cpu, max_cpu, min_cpu, ggml_nelements(mul_mat_id_cpu)); |
|
|
|
|
|
struct ggml_cgraph * gf = ggml_new_graph(gctx); |
|
|
|
|
|
ggml_tensor * w_cuda = ggml_new_tensor_4d(gctx, weights->type, weights->ne[0], weights->ne[1], weights->ne[2], weights->ne[3]); |
|
|
ggml_tensor * n_cuda = ggml_new_tensor_4d(gctx, norm->type, norm->ne[0], norm->ne[1], norm->ne[2], norm->ne[3]); |
|
|
ggml_tensor * i_cuda = ggml_new_tensor_4d(gctx, ids->type, ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3]); |
|
|
|
|
|
ggml_backend_t cuda = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr); |
|
|
ggml_backend_alloc_ctx_tensors(gctx, cuda); |
|
|
|
|
|
ggml_backend_tensor_set(w_cuda, weights->data, 0, ggml_nbytes(w_cuda)); |
|
|
ggml_backend_tensor_set(n_cuda, norm->data, 0, ggml_nbytes(n_cuda)); |
|
|
ggml_backend_tensor_set(i_cuda, ids->data, 0, ggml_nbytes(i_cuda)); |
|
|
|
|
|
ggml_context * gctx_cuda_comp = ggml_init(params); |
|
|
struct ggml_cgraph * gf_cuda = ggml_new_graph(gctx_cuda_comp); |
|
|
ggml_tensor * mul_mat_id_cuda = ggml_mul_mat_id(gctx_cuda_comp, w_cuda, n_cuda, i_cuda); |
|
|
ggml_build_forward_expand(gf_cuda, mul_mat_id_cuda); |
|
|
|
|
|
ggml_gallocr_t cuda_allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(cuda)); |
|
|
ggml_gallocr_alloc_graph(cuda_allocr, gf_cuda); |
|
|
|
|
|
ggml_backend_graph_compute(cuda, gf_cuda); |
|
|
|
|
|
std::vector<float> vec; |
|
|
|
|
|
auto n_bytes = ggml_nbytes(mul_mat_id_cuda); |
|
|
vec.resize(n_bytes); |
|
|
ggml_backend_tensor_get(mul_mat_id_cuda, vec.data(), 0, n_bytes); |
|
|
double sum = 0.0f; |
|
|
float max = vec[0]; |
|
|
float min = vec[0]; |
|
|
float maxdiff = 0; |
|
|
uint64_t maxdiff_pos = -1; |
|
|
for (uint64_t i = 0; i < ggml_nelements(mul_mat_id_cuda); i++) { |
|
|
float elt = vec[i]; |
|
|
float org_elt = ((float *) mul_mat_id_cpu->data)[i]; |
|
|
float diff = fabs(elt - org_elt); |
|
|
if (diff > maxdiff) { |
|
|
maxdiff = diff; |
|
|
maxdiff_pos = i; |
|
|
} |
|
|
sum += elt; |
|
|
max = elt > max ? elt : max; |
|
|
min = elt < min ? elt : min; |
|
|
} |
|
|
printf("\n CUDA sum of matmul: %.8f, max: %.8f, min: %.8f, max diff: %.8f at pos %lu, nelements: %lu\n\n", sum, max, min, maxdiff, maxdiff_pos, ggml_nelements(mul_mat_id_cuda)); |
|
|
return 0; |
|
|
} |