Fix ckpt in docs (#41659)

* fix ckpt in docs

* fix config ckpt
This commit is contained in:
Raushan Turganbay
2025-10-17 11:00:34 +02:00
committed by GitHub
parent 354567d955
commit 1eb45cd61d
4 changed files with 10 additions and 10 deletions

View File

@ -70,8 +70,8 @@ from transformers import AutoProcessor, Florence2ForConditionalGeneration
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB") image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
model = Florence2ForConditionalGeneration.from_pretrained("microsoft/Florence-2-base", dtype=torch.bfloat16, device_map="auto") model = Florence2ForConditionalGeneration.from_pretrained("florence-community/Florence-2-base", dtype=torch.bfloat16, device_map="auto")
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base") processor = AutoProcessor.from_pretrained("florence-community/Florence-2-base")
task_prompt = "<OD>" task_prompt = "<OD>"
inputs = processor(text=task_prompt, images=image, return_tensors="pt").to(model.device) inputs = processor(text=task_prompt, images=image, return_tensors="pt").to(model.device)
@ -105,12 +105,12 @@ from transformers import AutoProcessor, Florence2ForConditionalGeneration, BitsA
quantization_config = BitsAndBytesConfig(load_in_4bit=True) quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = Florence2ForConditionalGeneration.from_pretrained( model = Florence2ForConditionalGeneration.from_pretrained(
"microsoft/Florence-2-large", "florence-community/Florence-2-base",
dtype=torch.bfloat16, dtype=torch.bfloat16,
device_map="auto", device_map="auto",
quantization_config=quantization_config quantization_config=quantization_config
) )
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large") processor = AutoProcessor.from_pretrained("florence-community/Florence-2-base")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB") image = Image.open(requests.get(url, stream=True).raw).convert("RGB")

View File

@ -140,7 +140,7 @@ class Florence2Config(PreTrainedConfig):
Florence-2 model according to the specified arguments, defining the model architecture. Florence-2 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2 Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2
[microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base) architecture. [florence-community/Florence-2-base](https://huggingface.co/florence-community/Florence-2-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. documentation from [`PreTrainedConfig`] for more information.

View File

@ -884,8 +884,8 @@ class Florence2ForConditionalGeneration(Florence2PreTrainedModel, GenerationMixi
>>> import requests >>> import requests
>>> from transformers import AutoProcessor, Florence2ForConditionalGeneration >>> from transformers import AutoProcessor, Florence2ForConditionalGeneration
>>> model = Florence2ForConditionalGeneration.from_pretrained("microsoft/Florence-2-large") >>> model = Florence2ForConditionalGeneration.from_pretrained("florence-community/Florence-2-large")
>>> processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large") >>> processor = AutoProcessor.from_pretrained("florence-community/Florence-2-large")
>>> prompt = "<CAPTION>" >>> prompt = "<CAPTION>"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"

View File

@ -160,7 +160,7 @@ class Florence2Config(PreTrainedConfig):
Florence-2 model according to the specified arguments, defining the model architecture. Florence-2 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2 Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2
[microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base) architecture. [florence-community/Florence-2-base](https://huggingface.co/florence-community/Florence-2-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. documentation from [`PreTrainedConfig`] for more information.
@ -1674,8 +1674,8 @@ class Florence2ForConditionalGeneration(LlavaForConditionalGeneration):
>>> import requests >>> import requests
>>> from transformers import AutoProcessor, Florence2ForConditionalGeneration >>> from transformers import AutoProcessor, Florence2ForConditionalGeneration
>>> model = Florence2ForConditionalGeneration.from_pretrained("microsoft/Florence-2-large") >>> model = Florence2ForConditionalGeneration.from_pretrained("florence-community/Florence-2-large")
>>> processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large") >>> processor = AutoProcessor.from_pretrained("florence-community/Florence-2-large")
>>> prompt = "<CAPTION>" >>> prompt = "<CAPTION>"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"