mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	server: public: use relative routes for static files (#6325)
server: public: support custom `api_url`, default to relative base path
This commit is contained in:
		| @@ -21,6 +21,7 @@ let generation_settings = null; | ||||
| // | ||||
| export async function* llama(prompt, params = {}, config = {}) { | ||||
|   let controller = config.controller; | ||||
|   const api_url = config.api_url || ""; | ||||
|  | ||||
|   if (!controller) { | ||||
|     controller = new AbortController(); | ||||
| @@ -28,7 +29,7 @@ export async function* llama(prompt, params = {}, config = {}) { | ||||
|  | ||||
|   const completionParams = { ...paramDefaults, ...params, prompt }; | ||||
|  | ||||
|   const response = await fetch("/completion", { | ||||
|   const response = await fetch(`${api_url}/completion`, { | ||||
|     method: 'POST', | ||||
|     body: JSON.stringify(completionParams), | ||||
|     headers: { | ||||
| @@ -193,9 +194,10 @@ export const llamaComplete = async (params, controller, callback) => { | ||||
| } | ||||
|  | ||||
| // Get the model info from the server. This is useful for getting the context window and so on. | ||||
| export const llamaModelInfo = async () => { | ||||
| export const llamaModelInfo = async (config = {}) => { | ||||
|   if (!generation_settings) { | ||||
|     const props = await fetch("/props").then(r => r.json()); | ||||
|     const api_url = config.api_url || ""; | ||||
|     const props = await fetch(`${api_url}/props`).then(r => r.json()); | ||||
|     generation_settings = props.default_generation_settings; | ||||
|   } | ||||
|   return generation_settings; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Eric Zhang
					Eric Zhang