~/Projects/llama.cpp
git clone https://code.lsong.org/llama.cpp
Commit
- Commit
- 113e685d18ac4edb20f647fd34b000941556f6a6
- Author
- hoangmit <[email protected]>
- Date
- 2023-03-15 15:05:14 -0400 -0400
- Diffstat
ggml.c | 5 +++--
inline -> static inline for "bytesFromNibbles" (#161) Without "static" prefix, it fails to compile in clang
diff --git a/ggml.c b/ggml.c index 42621267b2b7d624f1aff8f91449bef0003b2d83..a0c0dd03b01d400a40f55f7a3b20218a75ebfe4c 100644 --- a/ggml.c +++ b/ggml.c @@ -364,8 +364,8 @@ // ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600 #if __AVX2__ // Unpack 32 4-bit fields into 32 bytes // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval + if (v < min) min = v; -} { // Load 16 bytes from memory __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi ); @@ -382,8 +382,9 @@ bytes = _mm256_or_si256( low, high ); return bytes; } +typedef HANDLE pthread_t; - return EAGAIN; +#if defined(_MSC_VER) || defined(__MINGW32__) { // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh const __m256i lowByte = _mm256_set1_epi16( 0xFF );