Index
All Classes and Interfaces|All Packages
A
- accumulate(AbstractTensor, AbstractTensor, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
B
- batchDotProduct(AbstractTensor, AbstractTensor, AbstractTensor, int, int, int, int, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
C
- com.github.tjake.jlama.tensor.operations - package com.github.tjake.jlama.tensor.operations
- com.github.tjake.jlama.tensor.operations.cnative - package com.github.tjake.jlama.tensor.operations.cnative
- com.github.tjake.jlama.tensor.operations.util - package com.github.tjake.jlama.tensor.operations.util
D
- dotProductBatchChunk(AbstractTensor[], AbstractTensor, AbstractTensor[], int, int, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
G
- gemm_bf16(int, MemorySegment, int, MemorySegment, int, MemorySegment, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_bf16(int flags, short* a, int aoffset, short* b, int boffset, short* cr, float* r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_bf16_batch(int, int, MemorySegment, int, MemorySegment, int, MemorySegment, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_bf16_batch(int flags, int batch_num, short* a, int aoffset, short** b, int boffset, short** cr, float** r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_f32(int, MemorySegment, int, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32(int flags, float* a, int aoffset, float* b, int boffset, float* r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_f32_batch(int, int, MemorySegment, int, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32_batch(int flags, int batch_num, float* a, int aoffset, float** b, int boffset, float** r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_f32_bf16(int, MemorySegment, int, MemorySegment, int, MemorySegment, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32_bf16(int flags, float* a, int aoffset, short* b, int boffset, short* cr, float* r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_f32_bf16_batch(int, int, MemorySegment, int, MemorySegment, int, MemorySegment, MemorySegment, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32_bf16_batch(int flags, int batch_num, float* a, int aoffset, short** b, int boffset, short** cr, float** r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldc); - gemm_f32_q4(int, MemorySegment, int, MemorySegment, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32_q4(int flags, float* a, int aoffset, float* bf, char* b, int boffset, float* r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldbf, int ldc); - gemm_f32_q4_batch(int, int, MemorySegment, int, MemorySegment, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_f32_q4_batch(int flags, int batch_num, float* a, int aoffset, float** bf, char** b, int boffset, float** r, int roffset, int m, int n0, int n, int k, int lda, int ldb, int ldbf, int ldc); - gemm_q8_q4(int, MemorySegment, MemorySegment, int, MemorySegment, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_q8_q4(int flags, float* af, char* a, int aoffset, float* bf, char* b, int boffset, float* r, int roffset, int m, int n0, int n, int k, int lda, int ldaf, int ldb, int ldbf, int ldc); - gemm_q8_q4_batch(int, int, MemorySegment, MemorySegment, int, MemorySegment, MemorySegment, int, MemorySegment, int, int, int, int, int, int, int, int, int, int) - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
void gemm_q8_q4_batch(int flags, int batch_num, float* af, char* a, int aoffset, float** bf, char** b, int boffset, float** r, int roffset, int m, int n0, int n, int k, int lda, int ldaf, int ldb, int ldbf, int ldc);
H
- HAS_AVX2 - Static variable in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- HAS_AVX2() - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
#define HAS_AVX2 4 - HAS_F16C - Static variable in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- HAS_F16C() - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
#define HAS_F16C 2
I
- IS_M_SERIES_MAC() - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
#define IS_M_SERIES_MAC 8
J
- JarSupport - Class in com.github.tjake.jlama.tensor.operations.util
- JarSupport() - Constructor for class com.github.tjake.jlama.tensor.operations.util.JarSupport
M
- maccumulate(AbstractTensor, AbstractTensor, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- maybeLoadLibrary() - Static method in class com.github.tjake.jlama.tensor.operations.util.JarSupport
- MemorySegmentSupport - Class in com.github.tjake.jlama.tensor.operations.util
- MemorySegmentSupport() - Constructor for class com.github.tjake.jlama.tensor.operations.util.MemorySegmentSupport
N
- name() - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- NativeSimd - Class in com.github.tjake.jlama.tensor.operations.cnative
- NativeSimd() - Constructor for class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
- NativeTensorOperations - Class in com.github.tjake.jlama.tensor.operations
- NativeTensorOperations() - Constructor for class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
P
- parallelSplitSize() - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
Q
- Q4_BLOCK_SIZE() - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
#define Q4_BLOCK_SIZE 32 - Q8_BLOCK_SIZE() - Static method in class com.github.tjake.jlama.tensor.operations.cnative.NativeSimd
-
#define Q8_BLOCK_SIZE 32 - quantize(AbstractTensor, DType, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
S
- saxpy(float, AbstractTensor, AbstractTensor, int, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- saxpy(AbstractTensor, AbstractTensor, AbstractTensor, int, int, int, int, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- scale(float, AbstractTensor, int, int) - Method in class com.github.tjake.jlama.tensor.operations.NativeTensorOperations
- setupBatch(Function<Integer, MemorySegment>, Function<Integer, MemorySegment>, Function<Integer, MemorySegment>, int) - Static method in class com.github.tjake.jlama.tensor.operations.util.MemorySegmentSupport
All Classes and Interfaces|All Packages