mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	Merge branch 'master' into ik/more_metal_optimizations
This commit is contained in:
		@@ -1379,7 +1379,13 @@ int main(int argc, char **argv)
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
 | 
			
		||||
            auto probs = llama.generated_token_probs;
 | 
			
		||||
            if (llama.params.n_probs > 0 && llama.stopped_word) {
 | 
			
		||||
                const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
 | 
			
		||||
                probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const json data = format_final_response(llama, llama.generated_text, probs);
 | 
			
		||||
 | 
			
		||||
            llama_print_timings(llama.ctx);
 | 
			
		||||
 | 
			
		||||
@@ -1456,7 +1462,11 @@ int main(int argc, char **argv)
 | 
			
		||||
 | 
			
		||||
                    if (!llama.has_next_token) {
 | 
			
		||||
                        // Generation is done, send extra information.
 | 
			
		||||
                        const json data = format_final_response(llama, "", llama.generated_token_probs);
 | 
			
		||||
                        const json data = format_final_response(
 | 
			
		||||
                            llama,
 | 
			
		||||
                            "",
 | 
			
		||||
                            std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
 | 
			
		||||
                        );
 | 
			
		||||
 | 
			
		||||
                        const std::string str =
 | 
			
		||||
                            "data: " +
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										17
									
								
								ggml-cuda.cu
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								ggml-cuda.cu
									
									
									
									
									
								
							@@ -81,12 +81,29 @@
 | 
			
		||||
#if defined(GGML_USE_HIPBLAS)
 | 
			
		||||
#define __CUDA_ARCH__ 1300
 | 
			
		||||
 | 
			
		||||
#ifndef __has_builtin
 | 
			
		||||
    #define __has_builtin(x) 0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
 | 
			
		||||
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
 | 
			
		||||
    const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
 | 
			
		||||
    const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
 | 
			
		||||
#if __has_builtin(__builtin_elementwise_sub_sat)
 | 
			
		||||
    const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
 | 
			
		||||
    return reinterpret_cast<const int&>(c);
 | 
			
		||||
#else
 | 
			
		||||
    int8x4_t c;
 | 
			
		||||
    int16_t tmp;
 | 
			
		||||
#pragma unroll
 | 
			
		||||
    for (int i = 0; i < 4; i++) {
 | 
			
		||||
        tmp = va[i] - vb[i];
 | 
			
		||||
        if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max();
 | 
			
		||||
        if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min();
 | 
			
		||||
        c[i] = tmp;
 | 
			
		||||
    }
 | 
			
		||||
    return reinterpret_cast<int&>(c);
 | 
			
		||||
#endif // __has_builtin(__builtin_elementwise_sub_sat)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user