convert : fix reflinks for stacked MoE tensors

This commit is contained in:
Francis Couture-Harpin
2025-09-02 15:22:01 -04:00
parent 562aa42c12
commit d921057027
4 changed files with 31 additions and 14 deletions

View File

@@ -21,7 +21,7 @@ class LazyMeta(ABCMeta):
return type(self)._wrap_fn(
(lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)),
use_self=self,
data_noop=name in ("view", "reshape", "squeeze", "unsqueeze"),
data_noop=name in ("view", "reshape", "squeeze", "unsqueeze", "contiguous"),
)
elif isinstance(meta_attr, self._tensor_type):
# e.g. self.T with torch.Tensor should still be wrapped