mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	The goal is to allow running "run" while connected to other streams, such as TCP sockets. Signed-off-by: Thiago Padilha <thiago@padilha.cc>
		
			
				
	
	
		
			11 lines
		
	
	
		
			193 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			11 lines
		
	
	
		
			193 B
		
	
	
	
		
			C++
		
	
	
	
	
	
#pragma once
 | 
						|
 | 
						|
#include "llama.h"
 | 
						|
#include "utils.h"
 | 
						|
 | 
						|
int run(llama_context * ctx,
 | 
						|
        gpt_params params,
 | 
						|
        std::istream & instream,
 | 
						|
        FILE *outstream,
 | 
						|
        FILE *errstream);
 |