We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 2b642a1 commit c963335Copy full SHA for c963335
1 file changed
examples/models/llama/model.py
@@ -178,7 +178,7 @@ def __init__(self, **kwargs):
178
if checkpoint:
179
self.model_.checkpoint_dtype = get_checkpoint_dtype(checkpoint)
180
else:
181
- self.model_.checkpoint_dtype = None
+ self.model_.checkpoint_dtype = torch.float32
182
183
if "int8" in str(checkpoint_path):
184
print("Using int8 weight-only quantization!")
0 commit comments