mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-12 06:54:28 +08:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a5b8811bfa | |||
| 54a685a92b | |||
| 8bc6c83175 | |||
| 211e6555fa | |||
| a5b782b0a1 |
7
setup.py
7
setup.py
@ -30,7 +30,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.6.0",
|
||||
version="0.6.2",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -78,8 +78,9 @@ setup(
|
||||
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
||||
# 6. Check that you can install it in a virtualenv by running:
|
||||
# pip install -i https://testpypi.python.org/pypi accelerate
|
||||
# accelerate env
|
||||
# accelerate test
|
||||
# 7. Upload the final version to actual pypi:
|
||||
# twine upload dist/* -r pypi
|
||||
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 9. Add the release version to docs/source/_static/js/custom.js and .github/deploy_doc.sh
|
||||
# 10. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
|
||||
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
__version__ = "0.6.0"
|
||||
__version__ = "0.6.2"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs
|
||||
|
||||
@ -478,7 +478,7 @@ class Accelerator:
|
||||
The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
|
||||
that were passed to [`~Accelerator.prepare`].
|
||||
"""
|
||||
if self.state.use_fp16 and self.native_amp:
|
||||
if self.use_fp16 and self.native_amp:
|
||||
if optimizer is None:
|
||||
# TODO: this unscales all optimizers where we should only unscale the one where parameters are.
|
||||
optimizer = self._optimizers
|
||||
|
||||
@ -56,7 +56,6 @@ def launch_command_parser(subparsers=None):
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
default="no",
|
||||
type=str,
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether or not to use mixed precision training. "
|
||||
|
||||
@ -268,3 +268,8 @@ class AcceleratorState:
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
|
||||
return repr
|
||||
|
||||
# For backward compatibility
|
||||
@property
|
||||
def use_fp16(self):
|
||||
return self.mixed_precision != "no"
|
||||
|
||||
Reference in New Issue
Block a user