mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Fix cuda compilation (#1128)
* Fix: Issue with CUBLAS compilation error due to missing -fPIC flag --------- Co-authored-by: B1gM8c <89020353+B1gM8c@users.noreply.github.com>
This commit is contained in:
		
							
								
								
									
										4
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								Makefile
									
									
									
									
									
								
							| @@ -109,9 +109,9 @@ ifdef LLAMA_CUBLAS | ||||
| 	LDFLAGS   += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 | ||||
| 	OBJS      += ggml-cuda.o | ||||
| 	NVCC      = nvcc | ||||
| 	NVCCFLAGS = --forward-unknown-to-host-linker -arch=native | ||||
| 	NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native | ||||
| ggml-cuda.o: ggml-cuda.cu ggml-cuda.h | ||||
| 	$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@ | ||||
| 	$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@ | ||||
| endif | ||||
| ifdef LLAMA_GPROF | ||||
| 	CFLAGS   += -pg | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren