mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	 3fd62a6b1c
			
		
	
	3fd62a6b1c
	
	
	
		
			
			* py : type-check all Python scripts with Pyright * server-tests : use trailing slash in openai base_url * server-tests : add more type annotations * server-tests : strip "chat" from base_url in oai_chat_completions * server-tests : model metadata is a dict * ci : disable pip cache in type-check workflow The cache is not shared between branches, and it's 250MB in size, so it would become quite a big part of the 10GB cache limit of the repo. * py : fix new type errors from master branch * tests : fix test-tokenizer-random.py Apparently, gcc applies optimisations even when pre-processing, which confuses pycparser. * ci : only show warnings and errors in python type-check The "information" level otherwise has entries from 'examples/pydantic_models_to_grammar.py', which could be confusing for someone trying to figure out what failed, considering that these messages can safely be ignored even though they look like errors.
		
			
				
	
	
		
			36 lines
		
	
	
		
			971 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			36 lines
		
	
	
		
			971 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| import asyncio
 | |
| import asyncio.threads
 | |
| import requests
 | |
| import numpy as np
 | |
| 
 | |
| 
 | |
| n = 8
 | |
| 
 | |
| result = []
 | |
| 
 | |
| async def requests_post_async(*args, **kwargs):
 | |
|     return await asyncio.threads.to_thread(requests.post, *args, **kwargs)
 | |
| 
 | |
| async def main():
 | |
|     model_url = "http://127.0.0.1:6900"
 | |
|     responses: list[requests.Response] = await asyncio.gather(*[requests_post_async(
 | |
|         url= f"{model_url}/embedding",
 | |
|         json= {"content": str(0)*1024}
 | |
|     ) for i in range(n)])
 | |
| 
 | |
|     for response in responses:
 | |
|         embedding = response.json()["embedding"]
 | |
|         print(embedding[-8:])
 | |
|         result.append(embedding)
 | |
| 
 | |
| asyncio.run(main())
 | |
| 
 | |
| # compute cosine similarity
 | |
| 
 | |
| for i in range(n-1):
 | |
|     for j in range(i+1, n):
 | |
|         embedding1 = np.array(result[i])
 | |
|         embedding2 = np.array(result[j])
 | |
|         similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
 | |
|         print(f"Similarity between {i} and {j}: {similarity:.2f}")
 |