Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
28316f8
hexagon: refactor set/get/sum-rows ops to use local context
max-krasnyansky Feb 14, 2026
272e4ae
hexagon: refactor ROPE and Softmax Ops to use local context
max-krasnyansky Feb 14, 2026
3cd81b1
hexagon: refactor activation ops to use local context struct
max-krasnyansky Feb 15, 2026
1f72f12
hexagon: refactor unary ops to use local context struct and DMA/VTCM
max-krasnyansky Feb 16, 2026
aca9a5f
hexagon: use aligned hvx_scale function
max-krasnyansky Feb 16, 2026
b047dfe
hexagon: remove unused fields from op_context
max-krasnyansky Feb 18, 2026
6184173
hexagon: rewrite ROPE to use DMA and VTCM scratchpad
max-krasnyansky Feb 18, 2026
1475229
hex-rope: keep N rows in scratchpad (instead of just two)
max-krasnyansky Feb 18, 2026
a732df1
hex-rope: introduce rowidx cache
max-krasnyansky Feb 18, 2026
50e83dc
hex-rope: remove unused fields
max-krasnyansky Feb 18, 2026
2cf88f9
hex-rope: rewrite dma prefetch logic to allow for multi-row fetch/com…
max-krasnyansky Feb 19, 2026
3cd2a54
hex-rope: minor formatting
max-krasnyansky Feb 19, 2026
f9d5f9d
hex-rope: use indices and unroll the loops
max-krasnyansky Feb 19, 2026
b1b744d
hex-rope: more updates to cleanup rope-block handling
max-krasnyansky Feb 20, 2026
783155b
ggml-hexagon: add Q4_1 support for MUL_MAT
google-labs-jules[bot] Feb 20, 2026
0e54341
ggml-hexagon: add Q4_1 support for MUL_MAT
google-labs-jules[bot] Feb 20, 2026
5cc6d3b
ggml-hexagon: add Q4_1 support for MUL_MAT
google-labs-jules[bot] Feb 20, 2026
87c10d2
ggml-hexagon: add Q4_1 support for MUL_MAT
google-labs-jules[bot] Feb 21, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
287 changes: 287 additions & 0 deletions ggml/src/ggml-hexagon/ggml-hexagon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,278 @@ static void repack_q4_0_q4x4x2(ggml_tensor * t, const void * data, size_t size)
ggml_aligned_free(buf_rp, row_size_rp);
}

// ======== Q4_1x4x2 ====================

static void dump_block_q4_1(const block_q4_1 * b, int i) {
HEX_VERBOSE("ggml-hex: repack q4_1 %d: %d %d %d %d ... %d %d %d %d : %.6f %.6f\n", i, unpack_q4(b->qs[0]).v[0],
unpack_q4(b->qs[1]).v[0], unpack_q4(b->qs[2]).v[0], unpack_q4(b->qs[3]).v[0], unpack_q4(b->qs[12]).v[1],
unpack_q4(b->qs[13]).v[1], unpack_q4(b->qs[14]).v[1], unpack_q4(b->qs[15]).v[1],
GGML_FP16_TO_FP32(b->d), GGML_FP16_TO_FP32(b->m));
}

static void dump_packed_block_q4_1x4x2(const uint8_t * v, unsigned int i, size_t k) {
static const int qk = QK_Q4_1x4x2;
const int dblk_size = 8 * 2; // 8x __fp16
const int qblk_size = qk / 2; // int4
const int qrow_size = k / 2; // int4 (not padded)
const int drow_size = (k / 32) * 2; // fp16

const uint8_t * v_q = v + 0; // quants first
const uint8_t * v_d = v + qrow_size; // then scales
const uint8_t * v_m = v + qrow_size + drow_size; // then mins

const uint8_t * q = v_q + i * qblk_size;
const ggml_half * d = (const ggml_half *) (v_d + i * dblk_size);
const ggml_half * m = (const ggml_half *) (v_m + i * dblk_size);

HEX_VERBOSE("ggml-hex: repack q4_1x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f ... %.6f %.6f\n", i,
unpack_q4(q[0]).v[0], unpack_q4(q[1]).v[0], unpack_q4(q[2]).v[0], unpack_q4(q[3]).v[0],
unpack_q4(q[60]).v[0], unpack_q4(q[61]).v[0], unpack_q4(q[62]).v[0], unpack_q4(q[63]).v[0],
unpack_q4(q[124]).v[0], unpack_q4(q[125]).v[0], unpack_q4(q[126]).v[0], unpack_q4(q[127]).v[0],
GGML_FP16_TO_FP32(d[0]), GGML_FP16_TO_FP32(m[0]), GGML_FP16_TO_FP32(d[3]), GGML_FP16_TO_FP32(m[3]));
}

static void unpack_q4_1_quants(uint8_t * qs, const block_q4_1 * x, unsigned int bi) {
static const int qk = QK4_1;

for (unsigned int i = 0; i < qk / 2; ++i) {
const int x0 = (x->qs[i] & 0x0F);
const int x1 = (x->qs[i] >> 4);
qs[bi * qk + i + 0] = x0;
qs[bi * qk + i + qk / 2] = x1;
}
}

static void pack_q4_1_quants(block_q4_1 * x, const uint8_t * qs, unsigned int bi) {
static const int qk = QK4_1;

for (unsigned int i = 0; i < qk / 2; ++i) {
const uint8_t x0 = qs[bi * qk + i + 0];
const uint8_t x1 = qs[bi * qk + i + qk / 2];
x->qs[i] = x0 | (x1 << 4);
}
}

static void repack_row_q4_1x4x2(uint8_t * y, const block_q4_1 * x, int64_t k) {
static const int qk = QK_Q4_1x4x2;
const int nb = (k + qk - 1) / qk; // number of blocks (padded)

const int dblk_size = 8 * 2; // 8x __fp16
const int qblk_size = qk / 2; // int4
const int qrow_size = k / 2; // int4 (not padded to blocks)
const int drow_size = (k / 32) * 2; // fp16 (not padded)

uint8_t * y_q = y + 0; // quants first
uint8_t * y_d = y + qrow_size; // then scales
uint8_t * y_m = y + qrow_size + drow_size;// then mins

if (opt_verbose > 2) {
for (int i = 0; i < nb; i++) {
for (int j = 0; j < 8; ++j) dump_block_q4_1(&x[i * 8 + j], j);
}
}

// Repack the quants
for (int i = 0; i < nb; i++) {
uint8_t qs[QK_Q4_1x4x2]; // unpacked quants
for (int j = 0; j < 8; ++j) {
unpack_q4_1_quants(qs, &x[i * 8 + j], j);
}

uint8_t * q = y_q + (i * qblk_size);
for (int j = 0; j < qk / 2; j++) {
q[j] = (qs[j + 128] << 4) | qs[j];
}
}

// Repack the scales and mins
for (int i = 0; i < nb; i++) {
ggml_half * d = (ggml_half *) (y_d + i * dblk_size);
ggml_half * m = (ggml_half *) (y_m + i * dblk_size);
for (int j = 0; j < 8; ++j) {
d[j] = x[i * 8 + j].d;
m[j] = x[i * 8 + j].m;
}
}

if (opt_verbose > 1) {
for (int i = 0; i < nb; i++) {
dump_packed_block_q4_1x4x2(y, i, k);
}
}
}

static void unpack_row_q4_1x4x2(block_q4_1 * x, const uint8_t * y, int64_t k) {
static const int qk = QK_Q4_1x4x2;
const int nb = (k + qk - 1) / qk; // number of blocks (padded)

const int dblk_size = 8 * 2; // 8x __fp16
const int qblk_size = qk / 2; // int4
const int qrow_size = k / 2; // int4 (not padded to blocks)
const int drow_size = (k / 32) * 2; // fp16 (not padded)

const uint8_t * y_q = y + 0; // quants first
const uint8_t * y_d = y + qrow_size; // then scales
const uint8_t * y_m = y + qrow_size + drow_size; // then mins

if (opt_verbose > 1) {
for (int i = 0; i < nb; i++) {
dump_packed_block_q4_1x4x2(y, i, k);
}
}

// Unpack the quants
for (int i = 0; i < nb; i++) {
uint8_t qs[QK_Q4_1x4x2]; // unpacked quants

const uint8_t * q = y_q + (i * qblk_size);
for (int j = 0; j < qk / 2; j++) {
qs[j] = q[j] & 0xf;
qs[j + 128] = q[j] >> 4;
}

for (int j = 0; j < 8; ++j) {
pack_q4_1_quants(&x[i * 8 + j], qs, j);
}
}

// Unpack the scales and mins
for (int i = 0; i < nb; i++) {
const ggml_half * d = (const ggml_half *) (y_d + i * dblk_size);
const ggml_half * m = (const ggml_half *) (y_m + i * dblk_size);
for (int j = 0; j < 8; ++j) {
x[i * 8 + j].d = d[j];
x[i * 8 + j].m = m[j];
}
}

if (opt_verbose > 2) {
for (int i = 0; i < nb; i++) {
for (int j = 0; j < 8; ++j) dump_block_q4_1(&x[i * 8 + j], j);
}
}
}

static void init_row_q4_1x4x2(block_q4_1 * x, int64_t k) {
static const int qk = QK_Q4_1x4x2;
const int nb = (k + qk - 1) / qk; // number of blocks (padded)

// Init the quants such that they unpack into zeros
uint8_t qs[QK_Q4_1x4x2]; // unpacked quants
memset(qs, 0, sizeof(qs));

for (int i = 0; i < nb; i++) {
for (int j = 0; j < 8; ++j) {
pack_q4_1_quants(&x[i * 8 + j], qs, j);
}
}

// Init scales and mins
for (int i = 0; i < nb; i++) {
for (int j = 0; j < 8; ++j) {
x[i * 8 + j].d = 0;
x[i * 8 + j].m = 0;
}
}
}

// repack q4_1 data into q4_1x4x2 tensor
static void repack_q4_1_q4_1x4x2(ggml_tensor * t, const void * data, size_t size) {
int64_t nrows = ggml_nrows(t);

size_t row_size = ggml_row_size(t->type, t->ne[0]);
size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_1x4x2)); // extra elements for the pad
size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any)

const size_t total_tensor_size = (size_t)nrows * row_size;
const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size;

const int64_t n_full_rows = n_bytes_to_copy / row_size;
const size_t n_rem_bytes = n_bytes_to_copy % row_size;

void * buf_pd = ggml_aligned_malloc(row_size_pd);
GGML_ASSERT(buf_pd != NULL);

void * buf_rp = ggml_aligned_malloc(row_size_rp);
GGML_ASSERT(buf_rp != NULL);

HEX_VERBOSE("ggml-hex: repack-q4_1-q4_1x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size,
t->ne[0], nrows, row_size);

init_row_q4_1x4x2((block_q4_1 *) buf_pd, t->ne[0]);

for (int64_t i = 0; i < n_full_rows; i++) {
const uint8_t * src = (const uint8_t *) data + (i * row_size);
uint8_t * dst = (uint8_t *) t->data + (i * row_size);

memcpy(buf_pd, src, row_size);
repack_row_q4_1x4x2((uint8_t *) buf_rp, (const block_q4_1 *) buf_pd, t->ne[0]);
memcpy(dst, buf_rp, row_size);
}

if (n_rem_bytes > 0) {
const int64_t i = n_full_rows;
const uint8_t * src = (const uint8_t *) data + (i * row_size);
uint8_t * dst = (uint8_t *) t->data + (i * row_size);

init_row_q4_1x4x2((block_q4_1 *) buf_pd, t->ne[0]);
memcpy(buf_pd, src, n_rem_bytes);
repack_row_q4_1x4x2((uint8_t *) buf_rp, (const block_q4_1 *) buf_pd, t->ne[0]);
memcpy(dst, buf_rp, n_rem_bytes);
}

ggml_aligned_free(buf_pd, row_size_pd);
ggml_aligned_free(buf_rp, row_size_rp);
}

// repack q4_1x4x2 tensor into q4_1 data
static void repack_q4_1x4x2_q4_1(void * data, const ggml_tensor * t, size_t size) {
int64_t nrows = ggml_nrows(t);

size_t row_size = ggml_row_size(t->type, t->ne[0]);
size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_1x4x2));
size_t row_size_rp = row_size * 2;

const size_t total_tensor_size = (size_t)nrows * row_size;
const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size;

const int64_t n_full_rows = n_bytes_to_copy / row_size;
const size_t n_rem_bytes = n_bytes_to_copy % row_size;

void * buf_pd = ggml_aligned_malloc(row_size_pd);
GGML_ASSERT(buf_pd != NULL);

void * buf_rp = ggml_aligned_malloc(row_size_rp);
GGML_ASSERT(buf_rp != NULL);

HEX_VERBOSE("ggml-hex: repack-q4_1x4x2-q4_1 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size,
t->ne[0], nrows, row_size);

memset(buf_pd, 0, row_size_pd);

for (int64_t i = 0; i < n_full_rows; i++) {
const uint8_t * src = (const uint8_t *) t->data + (i * row_size);
uint8_t * dst = (uint8_t *) data + (i * row_size);

memcpy(buf_pd, src, row_size);
unpack_row_q4_1x4x2((block_q4_1 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]);
memcpy(dst, buf_rp, row_size);
}

if (n_rem_bytes > 0) {
const int64_t i = n_full_rows;
const uint8_t * src = (const uint8_t *) t->data + (i * row_size);
uint8_t * dst = (uint8_t *) data + (i * row_size);

memcpy(buf_pd, src, row_size);
unpack_row_q4_1x4x2((block_q4_1 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]);
memcpy(dst, buf_rp, n_rem_bytes);
}

ggml_aligned_free(buf_pd, row_size_pd);
ggml_aligned_free(buf_rp, row_size_rp);
}

// repack q4x4x2 tensor into q4_0 data
static void repack_q4x4x2_q4_0(void * data, const ggml_tensor * t, size_t size) {
int64_t nrows = ggml_nrows(t);
Expand Down Expand Up @@ -1377,6 +1649,12 @@ static void ggml_backend_hexagon_buffer_set_tensor(ggml_backend_buffer_t buffer,
repack_q4_0_q4x4x2(tensor, data, size);
break;

case GGML_TYPE_Q4_1:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
repack_q4_1_q4_1x4x2(tensor, data, size);
break;

case GGML_TYPE_Q8_0:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
Expand Down Expand Up @@ -1413,6 +1691,12 @@ static void ggml_backend_hexagon_buffer_get_tensor(ggml_backend_buffer_t buffer,
repack_q4x4x2_q4_0(data, tensor, size);
break;

case GGML_TYPE_Q4_1:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
repack_q4_1x4x2_q4_1(data, tensor, size);
break;

case GGML_TYPE_Q8_0:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
Expand Down Expand Up @@ -1849,6 +2133,7 @@ static bool ggml_hexagon_supported_mul_mat(const struct ggml_hexagon_session * s

switch (src0->type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q8_0:
case GGML_TYPE_MXFP4:
if (src0->ne[0] % 32) {
Expand Down Expand Up @@ -3206,6 +3491,8 @@ static void ggml_hexagon_init(ggml_backend_reg * reg) {
// Basic sanity checks to make sure definitions match
static_assert((unsigned int) HTP_TYPE_Q4_0 == (unsigned int) GGML_TYPE_Q4_0,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_Q4_1 == (unsigned int) GGML_TYPE_Q4_1,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_Q8_0 == (unsigned int) GGML_TYPE_Q8_0,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_MXFP4 == (unsigned int) GGML_TYPE_MXFP4,
Expand Down
Loading
Loading