mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-10 10:27:03 +00:00
convert : fix flake8 lint regarding lamdba assigment
This commit is contained in:
@@ -5416,8 +5416,7 @@ class LazyTorchTensor(gguf.LazyBase):
|
|||||||
dtype = cls._dtype_str_map[remote_tensor.dtype]
|
dtype = cls._dtype_str_map[remote_tensor.dtype]
|
||||||
shape = remote_tensor.shape
|
shape = remote_tensor.shape
|
||||||
meta = cls.meta_with_dtype_and_shape(dtype, shape)
|
meta = cls.meta_with_dtype_and_shape(dtype, shape)
|
||||||
func = lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape)
|
lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
|
||||||
lazy = cls(meta=meta, args=(remote_tensor,), func=func)
|
|
||||||
return cast(torch.Tensor, lazy)
|
return cast(torch.Tensor, lazy)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
Reference in New Issue
Block a user