CHORE: Ensure PEFT works with huggingface_hub 1.0.0 (#2808)

The reset_sessions function is removed but it's also no longer necessary
to call it for the purpose we used it.

Moreover, the deprecated use_auth_token argument is fully removed now,
so everywhere we used to pass it, it is now removed, unless a user
passes it explicitly.

Also, remove the deprecated local_dir_use_symlinks argument.
This commit is contained in:
Benjamin Bossan
2025-10-02 13:21:02 +02:00
committed by GitHub
parent 815956b9b8
commit 24aebeec21
5 changed files with 51 additions and 47 deletions

View File

@ -422,15 +422,17 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
# load the config
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_id,
subfolder=kwargs.get("subfolder", None),
revision=kwargs.get("revision", None),
cache_dir=kwargs.get("cache_dir", None),
use_auth_token=kwargs.get("use_auth_token", None),
hf_kwargs = {
"subfolder": kwargs.get("subfolder", None),
"revision": kwargs.get("revision", None),
"cache_dir": kwargs.get("cache_dir", None),
"token": kwargs.get("token", None),
}
if use_auth_token := kwargs.get("use_auth_token", None):
hf_kwargs["use_auth_token"] = use_auth_token
config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_id, **hf_kwargs)].from_pretrained(
model_id, **kwargs
)
].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:

View File

@ -434,16 +434,17 @@ class PeftModel(PushToHubMixin, torch.nn.Module):
# load the config
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_id,
subfolder=kwargs.get("subfolder", None),
revision=kwargs.get("revision", None),
cache_dir=kwargs.get("cache_dir", None),
use_auth_token=kwargs.get("use_auth_token", None),
token=kwargs.get("token", None),
hf_kwargs = {
"subfolder": kwargs.get("subfolder", None),
"revision": kwargs.get("revision", None),
"cache_dir": kwargs.get("cache_dir", None),
"token": kwargs.get("token", None),
}
if use_auth_token := kwargs.get("use_auth_token", None):
hf_kwargs["use_auth_token"] = use_auth_token
config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_id, **hf_kwargs)].from_pretrained(
model_id, **kwargs
)
].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:

View File

@ -597,17 +597,15 @@ def hotswap_adapter(model, model_name_or_path, adapter_name, torch_device=None,
############################
# LOAD CONFIG AND VALIDATE #
############################
config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_name_or_path,
subfolder=kwargs.get("subfolder", None),
revision=kwargs.get("revision", None),
cache_dir=kwargs.get("cache_dir", None),
use_auth_token=kwargs.get("use_auth_token", None),
token=kwargs.get("token", None),
)
]
hf_kwargs = {
"subfolder": kwargs.get("subfolder", None),
"revision": kwargs.get("revision", None),
"cache_dir": kwargs.get("cache_dir", None),
"token": kwargs.get("token", None),
}
if use_auth_token := kwargs.get("use_auth_token", None):
hf_kwargs["use_auth_token"] = use_auth_token
config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_name_or_path, **hf_kwargs)]
config = config_cls.from_pretrained(model_name_or_path, **kwargs)
# config keys that could affect the model output besides what is determined by the state_dict
check_hotswap_configs_compatible(model.active_peft_config, config)

View File

@ -95,13 +95,7 @@ def setup_tearndown():
# provide such a feature
# download regression artifacts from Hugging Face Hub at the start
snapshot_download(
repo_id=HF_REPO,
local_dir=REGRESSION_DIR,
# Don't use symlink, because this prevents us from properly cleaning up the files once finished
local_dir_use_symlinks=False,
)
snapshot_download(repo_id=HF_REPO, local_dir=REGRESSION_DIR)
yield
# delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode

View File

@ -27,8 +27,6 @@ import pytest
import torch
from datasets import Dataset
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HfHubHTTPError, LocalEntryNotFoundError
from huggingface_hub.utils import reset_sessions
from safetensors.torch import load_file
from scipy import stats
from torch import nn
@ -73,6 +71,13 @@ from peft.utils.hotswap import hotswap_adapter, prepare_model_for_compiled_hotsw
from .testing_utils import load_dataset_english_quotes, require_deterministic_for_xpu
try:
from huggingface_hub.utils import reset_sessions
except ImportError:
# this function was removed in hfh v1.0.0
reset_sessions = None
class TestLoraInitialization:
"""Test class to check the initialization of LoRA adapters."""
@ -1986,13 +1991,19 @@ class TestLoadAdapterOfflineMode:
def hub_offline_ctx(self):
# this is required to simulate offline mode, setting the env var dynamically inside the test does not work
# because the value is checked only once at the start of the session
if reset_sessions is None:
# this means we're using huggingface_hub >= 1.0.0, there is no need to call reset_sessions() anymore
with patch("huggingface_hub.constants.HF_HUB_OFFLINE", True):
yield
else:
# in huggingface_hub < 1.0.0, it's necessary to reset the session
# TODO: remove once huggingface_hub < 1.0.0 is no longer supported
with patch("huggingface_hub.constants.HF_HUB_OFFLINE", True):
reset_sessions()
yield
reset_sessions()
# TODO remove when/if Hub is more stable
@pytest.mark.xfail(reason="Test is flaky on CI", raises=HfHubHTTPError)
def test_load_from_hub_then_offline_model(self):
# this uses LoRA but it's the same mechanism for other methods
base_model = AutoModelForCausalLM.from_pretrained(self.base_model)
@ -2019,8 +2030,6 @@ class TestLoadAdapterOfflineMode:
snapshot_download(self.base_model, cache_dir=cache_dir)
snapshot_download(self.peft_model_id, cache_dir=cache_dir)
# TODO remove when/if Hub is more stable
@pytest.mark.xfail(reason="Test is flaky on CI", raises=LocalEntryNotFoundError)
def test_load_checkpoint_offline_non_default_cache_dir(self, changed_default_cache_dir, tmp_path):
# See #2373 for context
self.load_checkpoints(tmp_path)