Compare commits

...

50 Commits

Author SHA1 Message Date
7d45cb94e4 run 2025-08-01 11:17:10 +02:00
e1af5e13ee run 2025-08-01 11:05:23 +02:00
51beabbe21 run 2025-08-01 10:59:32 +02:00
a85e52553a run 2025-08-01 09:27:44 +02:00
2123cabc61 run 2025-08-01 09:19:08 +02:00
6d87eed427 run 2025-08-01 09:13:39 +02:00
2f98de75b1 run 2025-08-01 09:07:20 +02:00
37bd9caacf run 2025-08-01 08:47:47 +02:00
dada3a66cf run 2025-08-01 08:30:18 +02:00
1bb722f1da run 2025-08-01 07:42:34 +02:00
2d33060a77 run 2025-08-01 07:34:01 +02:00
4e2aa38625 run 2025-08-01 07:30:31 +02:00
ac1e171ae3 run 2025-08-01 06:18:55 +02:00
455ed547c5 run 2025-08-01 05:52:31 +02:00
747235e388 run 2025-08-01 05:42:43 +02:00
f02c51bcde run 2025-08-01 05:37:52 +02:00
d151d8e0d8 run 2025-08-01 05:36:04 +02:00
eee3e8dd8f check 2025-08-01 05:07:45 +02:00
5164e700ee check 2025-08-01 04:46:10 +02:00
a8ba13c0ad run 2025-07-31 19:48:05 +02:00
fdaaef0ca6 run 2025-07-31 18:52:45 +02:00
68af44cf37 run 2025-07-31 17:58:31 +02:00
98f8c3afe0 run 2025-07-31 17:44:55 +02:00
38e5e48c39 run 2025-07-31 17:35:41 +02:00
92c0dae310 run 2025-07-31 17:27:12 +02:00
2825a4fd81 run 2025-07-31 16:35:08 +02:00
7a5c318b5d run 2025-07-31 16:22:52 +02:00
89a880cd99 run 2025-07-31 15:28:59 +02:00
83ac55a315 run 2025-07-31 15:07:25 +02:00
848eec98e0 more info 2025-07-31 12:52:19 +02:00
093fa8ea46 check custom 140:280 2025-07-31 10:33:40 +02:00
b9b4db4773 check custom 2025-07-31 09:43:58 +02:00
f39feb64c3 check custom 2025-07-31 09:30:06 +02:00
19f01303f7 check custom 2025-07-31 09:23:27 +02:00
175b8633cb check custom 2025-07-31 09:17:05 +02:00
35dfd1add7 check custom 2025-07-30 21:54:18 +02:00
8fd2e8083f check custom 2025-07-30 21:46:01 +02:00
0a6da0825a check custom 2025-07-30 21:37:47 +02:00
40b23468be check 1-1-1 2025-07-30 21:06:08 +02:00
0f376bc16e check 1-1 2025-07-30 20:58:06 +02:00
2dc57da071 check 1 2025-07-30 20:51:14 +02:00
90c76fe0db check 1 2025-07-30 20:44:19 +02:00
371b5fd459 check 1 2025-07-30 20:42:24 +02:00
5cf53c48ab check 2025-07-30 18:50:41 +02:00
eb64e5e21c group 2025-07-30 16:56:24 +02:00
00634718b1 print 2025-07-30 16:43:52 +02:00
350fbae53e change runner 2025-07-30 16:10:51 +02:00
ab7262a5a1 env: to be checked 2025-07-30 16:01:06 +02:00
315cb554be remove tf stuff 2025-07-30 15:40:53 +02:00
92e404f087 py39 2025-07-30 15:33:40 +02:00
7 changed files with 198 additions and 655 deletions

View File

@ -3,80 +3,90 @@ name: Check Tiny Models
on:
push:
branches:
- check_tiny_models*
- debug_tiny_model_creation
repository_dispatch:
schedule:
- cron: "0 2 * * *"
env:
TOKEN: ${{ secrets.TRANSFORMERS_HUB_BOT_HF_TOKEN }}
HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
TRANSFORMERS_IS_CI: yes
TF_FORCE_GPU_ALLOW_GROWTH: true
jobs:
check_tiny_models:
name: Check tiny models
runs-on: ubuntu-22.04
runs-on:
group: aws-general-8-plus
container:
image: huggingface/transformers-quantization-latest-gpu
options: --shm-size "16gb" --ipc host
steps:
- name: Checkout transformers
uses: actions/checkout@v4
with:
fetch-depth: 2
# - name: Check cache
# working-directory: /transformers
# run: ls -la /mnt/cache/
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
# Semantic version range syntax or exact version of a Python version
python-version: '3.8'
# Optional - x64 or x86 architecture, defaults to x64
architecture: 'x64'
- name: Update clone
working-directory: /transformers
run: git fetch && git checkout ${{ github.sha }}
- name: Install
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
working-directory: /transformers
run: |
sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng cmake
pip install --upgrade pip
python -m pip install -U .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video,tf-cpu]
pip install tensorflow_probability
python -m pip install -U 'natten<0.15.0'
python3 -m pip uninstall -y transformers && python3 -m pip install -e .
python3 -m pip install -U essentia librosa pretty_midi
python3 -m pip uninstall -y natten
- name: Create all tiny models (locally)
run: |
python utils/create_dummy_models.py tiny_local_models --all --num_workers 2
free -g
- name: Create all tiny models (locally)
working-directory: /transformers
run: |
CUDA_VISIBLE_DEVICES="" python3 utils/create_dummy_models.py tiny_local_models --all --num_workers 7
- name: Create all tiny models (locally)
working-directory: /transformers
run: |
python3 utils/check_tiny.py
- name: Local tiny model reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: tiny_local_model_creation_reports
path: tiny_local_models/reports
# GitHub-hosted runners have 2-core CPUs
- name: Run pipeline tests against all new (local) tiny models
run: |
OMP_NUM_THREADS=1 TRANSFORMERS_TINY_MODEL_PATH=tiny_local_models python -m pytest --max-worker-restart=0 -n 2 --dist=loadfile -s -rA --make-reports=tests_pipelines tests/models -m is_pipeline_test -k "test_pipeline_" | tee tests_output.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: tiny_local_model_creation_reports
path: reports/tests_pipelines
- name: Create + Upload tiny models for new model architecture(s)
run: |
python utils/update_tiny_models.py --num_workers 2
- name: Full report
run: cat tiny_models/reports/tiny_model_creation_report.json
- name: Failure report
run: cat tiny_models/reports/simple_failed_report.txt
- name: Summary report
run: cat tiny_models/reports/tiny_model_summary.json
- name: New tiny model creation reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: tiny_model_creation_reports
path: tiny_models/reports
path: /transformers/tiny_local_models/reports
#
# # GitHub-hosted runners have 2-core CPUs
# - name: Run pipeline tests against all new (local) tiny models
# run: |
# OMP_NUM_THREADS=1 TRANSFORMERS_TINY_MODEL_PATH=tiny_local_models python -m pytest --max-worker-restart=0 -n 2 --dist=loadfile -s -rA --make-reports=tests_pipelines tests/models -m is_pipeline_test -k "test_pipeline_" | tee tests_output.txt
#
# - name: Test suite reports artifacts
# if: ${{ always() }}
# uses: actions/upload-artifact@v4
# with:
# name: tiny_local_model_creation_reports
# path: reports/tests_pipelines
#
# - name: Create + Upload tiny models for new model architecture(s)
# run: |
# python utils/update_tiny_models.py --num_workers 2
#
# - name: Full report
# run: cat tiny_models/reports/tiny_model_creation_report.json
#
# - name: Failure report
# run: cat tiny_models/reports/simple_failed_report.txt
#
# - name: Summary report
# run: cat tiny_models/reports/tiny_model_summary.json
#
# - name: New tiny model creation reports artifacts
# if: ${{ always() }}
# uses: actions/upload-artifact@v4
# with:
# name: tiny_model_creation_reports
# path: tiny_models/reports

View File

@ -50,506 +50,3 @@ class QAPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if not hasattr(tf_model_mapping, "is_dummy"):
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
torch_dtype="float32",
):
if isinstance(model.config, LxmertConfig):
# This is an bimodal model, we need to find a more consistent way
# to switch on those models.
return None, None
question_answerer = QuestionAnsweringPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
torch_dtype=torch_dtype,
)
examples = [
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
{"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."},
]
return question_answerer, examples
def run_pipeline_test(self, question_answerer, _):
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
handle_impossible_answer=True,
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"],
context="HuggingFace was founded in Paris.",
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
outputs = question_answerer(
question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"],
context=[
"HuggingFace is a startup based in New-York",
"HuggingFace is a startup founded in Paris",
],
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
with self.assertRaises(ValueError):
question_answerer(question="", context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question=None, context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context="")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context=None)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}
for i in range(len(outputs))
],
)
for single_output in outputs:
compare_pipeline_output_to_hub_spec(single_output, QuestionAnsweringOutputElement)
# Very long context require multiple features
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
# Using batch is OK
if question_answerer.tokenizer.pad_token_id is None:
question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id
new_outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2
)
self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs))
@require_torch
def test_small_model_pt(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_fp16(self):
question_answerer = pipeline(
"question-answering",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
torch_dtype=torch.float16,
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_bf16(self):
question_answerer = pipeline(
"question-answering",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
torch_dtype=torch.bfloat16,
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_iterator(self):
# https://github.com/huggingface/transformers/issues/18510
pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt")
def data():
for i in range(10):
yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}
for outputs in pipe(data()):
self.assertEqual(
nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"}
)
@require_torch
def test_small_model_pt_softmax_trick(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
real_postprocess = question_answerer.postprocess
# Tweak start and stop to make sure we encounter the softmax logits
# bug.
def ensure_large_logits_postprocess(
model_outputs,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
):
for output in model_outputs:
output["start"] = output["start"] * 1e6
output["end"] = output["end"] * 1e6
return real_postprocess(
model_outputs,
top_k=top_k,
handle_impossible_answer=handle_impossible_answer,
max_answer_len=max_answer_len,
)
question_answerer.postprocess = ensure_large_logits_postprocess
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.111, "start": 0, "end": 11, "answer": "HuggingFace"})
@slow
@require_torch
def test_small_model_japanese(self):
question_answerer = pipeline(
"question-answering",
model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head",
)
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip
# Wrong answer, the whole text is identified as one "word" since the tokenizer does not include
# a pretokenizer
self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip
# Disable word alignment
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip
self.assertEqual(
nested_simplify(output),
{"score": 1.0, "start": 15, "end": 18, "answer": "教科書"},
)
@slow
@require_torch
def test_small_model_long_context_cls_slow(self):
question_answerer = pipeline(
"question-answering",
model="deepset/roberta-base-squad2",
handle_impossible_answer=True,
max_seq_length=512,
)
outputs = question_answerer(
question="What country is Paris the capital of?",
context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""})
@require_torch
def test_duplicate_handling(self):
question_answerer = pipeline("question-answering", model="deepset/tinyroberta-squad2")
outputs = question_answerer(
question="Who is the chancellor of Germany?",
context="Angela Merkel was the chancellor of Germany.",
top_k=10,
)
answers = [output["answer"] for output in outputs]
self.assertEqual(len(answers), len(set(answers)), "There are duplicate answers in the outputs.")
@slow
@require_torch
def test_large_model_pt(self):
question_answerer = pipeline(
"question-answering",
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@slow
@require_torch
def test_large_model_issue(self):
qa_pipeline = pipeline(
"question-answering",
model="mrm8488/bert-multi-cased-finetuned-xquadv1",
)
outputs = qa_pipeline(
{
"context": (
"Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's"
" order from August this year that had remanded him in police custody for a week in a multi-crore"
" loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud"
" case and some related matters being probed by the CBI and Enforcement Directorate. A single"
" bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on"
" October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special"
" court's order permitting the CBI's request for police custody on August 14 was illegal and in"
" breach of the due process of law. Therefore, his police custody and subsequent judicial custody"
" in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special"
" court's order dated August 14. As per his plea, in August this year, the CBI had moved two"
" applications before the special court, one seeking permission to arrest Kapoor, who was already"
" in judicial custody at the time in another case, and the other, seeking his police custody."
" While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the"
" central agency's plea for his custody. Kapoor, however, said in his plea that before filing an"
" application for his arrest, the CBI had not followed the process of issuing him a notice under"
" Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken"
" prior sanction as mandated under section 17 A of the Prevention of Corruption Act for"
" prosecuting him. The special court, however, had said in its order at the time that as Kapoor"
" was already in judicial custody in another case and was not a free man the procedure mandated"
" under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of"
" appearance was concerned. ADVERTISING It had also said that case records showed that the"
" investigating officer had taken an approval from a managing director of Yes Bank before"
" beginning the proceedings against Kapoor and such a permission was a valid sanction. However,"
" Kapoor in his plea said that the above order was bad in law and sought that it be quashed and"
" set aside. The law mandated that if initial action was not in consonance with legal procedures,"
" then all subsequent actions must be held as illegal, he said, urging the High Court to declare"
" the CBI remand and custody and all subsequent proceedings including the further custody as"
" illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee"
" Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee"
" has stated that she is a resident of the United Kingdom and is unable to travel to India owing"
" to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case,"
" Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused"
" Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was"
" not eligible for the same"
),
"question": "Is this person involved in fraud?",
}
)
self.assertEqual(
nested_simplify(outputs),
{"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261},
)
@slow
@require_torch
def test_large_model_course(self):
question_answerer = pipeline("question-answering")
long_context = """
🤗 Transformers: State of the Art NLP
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction,
question answering, summarization, translation, text generation and more in over 100 languages.
Its aim is to make cutting-edge NLP easier to use for everyone.
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and
then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and
can be modified to enable quick research experiments.
Why should I use transformers?
1. Easy-to-use state-of-the-art models:
- High performance on NLU and NLG tasks.
- Low barrier to entry for educators and practitioners.
- Few user-facing abstractions with just three classes to learn.
- A unified API for using all our pretrained models.
- Lower compute costs, smaller carbon footprint:
2. Researchers can share trained models instead of always retraining.
- Practitioners can reduce compute time and production costs.
- Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.
3. Choose the right framework for every part of a model's lifetime:
- Train state-of-the-art models in 3 lines of code.
- Move a single model between TF2.0/PyTorch frameworks at will.
- Seamlessly pick the right framework for training, evaluation and production.
4. Easily customize a model or an example to your needs:
- We provide examples for each architecture to reproduce the results published by its original authors.
- Model internals are exposed as consistently as possible.
- Model files can be used independently of the library for quick experiments.
🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration
between them. It's straightforward to train your models with one before loading them for inference with the other.
"""
question = "Which deep learning libraries back 🤗 Transformers?"
outputs = question_answerer(question=question, context=long_context)
self.assertEqual(
nested_simplify(outputs),
{"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.972, "start": 1892},
)
@require_torch_or_tf
class QuestionAnsweringArgumentHandlerTests(unittest.TestCase):
def test_argument_handler(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
normalized = qa(Q, C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=[Q, Q], context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa({"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X=[{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(data={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
with self.assertRaises(KeyError):
qa({"context": C})
with self.assertRaises(KeyError):
qa({"question": Q})
with self.assertRaises(KeyError):
qa([{"context": C}])
with self.assertRaises(ValueError):
qa(None, C)
with self.assertRaises(ValueError):
qa("", C)
with self.assertRaises(ValueError):
qa(Q, None)
with self.assertRaises(ValueError):
qa(Q, "")
with self.assertRaises(ValueError):
qa(question=None, context=C)
with self.assertRaises(ValueError):
qa(question="", context=C)
with self.assertRaises(ValueError):
qa(question=Q, context=None)
with self.assertRaises(ValueError):
qa(question=Q, context="")
with self.assertRaises(ValueError):
qa({"question": None, "context": C})
with self.assertRaises(ValueError):
qa({"question": "", "context": C})
with self.assertRaises(ValueError):
qa({"question": Q, "context": None})
with self.assertRaises(ValueError):
qa({"question": Q, "context": ""})
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": None, "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": "", "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": None}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": ""}])
with self.assertRaises(ValueError):
qa(question={"This": "Is weird"}, context="This is a context")
with self.assertRaises(ValueError):
qa(question=[Q, Q], context=[C, C, C])
with self.assertRaises(ValueError):
qa(question=[Q, Q, Q], context=[C, C])
def test_argument_handler_old_format(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
# Backward compatibility for this
normalized = qa(question=[Q, Q], context=[C, C])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling_odd(self):
qa = QuestionAnsweringArgumentHandler()
with self.assertRaises(ValueError):
qa(None)
with self.assertRaises(ValueError):
qa(Y=None)
with self.assertRaises(ValueError):
qa(1)

View File

@ -47,12 +47,7 @@ class TextClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if not hasattr(tf_model_mapping, "is_dummy"):
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def test_small_model_pt(self):

View File

@ -53,12 +53,7 @@ class TokenClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if not hasattr(tf_model_mapping, "is_dummy"):
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(
self,

View File

@ -45,12 +45,7 @@ class ZeroShotClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if not hasattr(tf_model_mapping, "is_dummy"):
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(
self,

32
utils/check_tiny.py Normal file
View File

@ -0,0 +1,32 @@
import os
fns = os.listdir("tiny_local_models")
fns = [x.replace(".txt", "") for x in fns if x.endswith(".txt")]
fns = [x for x in fns if x != "models"]
fns2 = '\n'.join(fns)
print(fns2)
print(len(fns))
with open("tiny_local_models/models.txt") as fp:
data = fp.read()
data = data.split("\n")
data = data[2:]
data2 = '\n'.join(data)
print(data2)
print(len(data))
no_created = sorted(set(data).difference(fns))
print(len(no_created))
no_created2 = '\n'.join(no_created)
print(no_created2)
info = []
for fn in fns:
fn = os.path.join("tiny_local_models", f"{fn}.txt")
with open(fn) as fp:
time = fp.read()
info.append((fn, time))
info = sorted(info, key=lambda x: float(x[1]))
for (fn, time) in info:
print(f"{fn}: {time}")

View File

@ -16,6 +16,7 @@
import argparse
import collections.abc
import copy
import datetime
import inspect
import json
import multiprocessing
@ -43,7 +44,7 @@ from transformers import (
logging,
)
from transformers.feature_extraction_utils import FeatureExtractionMixin
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.file_utils import is_torch_available
from transformers.image_processing_utils import BaseImageProcessor
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name
from transformers.models.fsmt import configuration_fsmt
@ -63,11 +64,8 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if not is_torch_available():
raise ValueError("Please install PyTorch.")
if not is_tf_available():
raise ValueError("Please install TensorFlow.")
FRAMEWORKS = ["pytorch", "tensorflow"]
FRAMEWORKS = ["pytorch"]
INVALID_ARCH = []
TARGET_VOCAB_SIZE = 1024
@ -759,15 +757,8 @@ def convert_processors(processors, tiny_config, output_folder, result):
def get_checkpoint_dir(output_dir, model_arch):
"""Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory."""
arch_name = model_arch.__name__
if arch_name.startswith("TF"):
arch_name = arch_name[2:]
elif arch_name.startswith("Flax"):
arch_name = arch_name[4:]
return os.path.join(output_dir, arch_name)
"""Get framework-agnostic architecture name. Used to save all Pytorch models into the same directory."""
return os.path.join(output_dir, model_arch.__name__)
def build_model(model_arch, tiny_config, output_dir):
@ -874,9 +865,6 @@ def build_composite_models(config_class, output_dir):
GPT2Tokenizer,
GPT2TokenizerFast,
SpeechEncoderDecoderModel,
TFEncoderDecoderModel,
TFVisionEncoderDecoderModel,
TFVisionTextDualEncoderModel,
VisionEncoderDecoderModel,
VisionTextDualEncoderModel,
ViTConfig,
@ -898,7 +886,6 @@ def build_composite_models(config_class, output_dir):
encoder_class = BertModel
decoder_class = BertLMHeadModel
model_class = EncoderDecoderModel
tf_model_class = TFEncoderDecoderModel
elif config_class.model_type == "vision-encoder-decoder":
encoder_config_class = ViTConfig
decoder_config_class = GPT2Config
@ -907,7 +894,6 @@ def build_composite_models(config_class, output_dir):
encoder_class = ViTModel
decoder_class = GPT2LMHeadModel
model_class = VisionEncoderDecoderModel
tf_model_class = TFVisionEncoderDecoderModel
elif config_class.model_type == "speech-encoder-decoder":
encoder_config_class = Wav2Vec2Config
decoder_config_class = BertConfig
@ -916,7 +902,6 @@ def build_composite_models(config_class, output_dir):
encoder_class = Wav2Vec2Model
decoder_class = BertLMHeadModel
model_class = SpeechEncoderDecoderModel
tf_model_class = None
elif config_class.model_type == "vision-text-dual-encoder":
# Not encoder-decoder, but encoder-encoder. We just keep the same name as above to make code easier
encoder_config_class = ViTConfig
@ -926,17 +911,16 @@ def build_composite_models(config_class, output_dir):
encoder_class = ViTModel
decoder_class = BertModel
model_class = VisionTextDualEncoderModel
tf_model_class = TFVisionTextDualEncoderModel
with tempfile.TemporaryDirectory() as tmpdir:
try:
# build encoder
models_to_create = {"processor": encoder_processor, "pytorch": (encoder_class,), "tensorflow": []}
models_to_create = {"processor": encoder_processor, "pytorch": (encoder_class,)}
encoder_output_dir = os.path.join(tmpdir, "encoder")
build(encoder_config_class, models_to_create, encoder_output_dir)
# build decoder
models_to_create = {"processor": decoder_processor, "pytorch": (decoder_class,), "tensorflow": []}
models_to_create = {"processor": decoder_processor, "pytorch": (decoder_class,)}
decoder_output_dir = os.path.join(tmpdir, "decoder")
build(decoder_config_class, models_to_create, decoder_output_dir)
@ -964,10 +948,6 @@ def build_composite_models(config_class, output_dir):
)
model.save_pretrained(model_path)
if tf_model_class is not None:
model = tf_model_class.from_pretrained(model_path)
model.save_pretrained(model_path)
# copy the processors
encoder_processor_path = os.path.join(encoder_output_dir, "processors")
decoder_processor_path = os.path.join(decoder_output_dir, "processors")
@ -980,17 +960,12 @@ def build_composite_models(config_class, output_dir):
result["processor"] = {x.__name__: x.__name__ for x in encoder_processor + decoder_processor}
result["pytorch"] = {model_class.__name__: {"model": model_class.__name__, "checkpoint": model_path}}
result["tensorflow"] = {}
if tf_model_class is not None:
result["tensorflow"] = {
tf_model_class.__name__: {"model": tf_model_class.__name__, "checkpoint": model_path}
}
except Exception:
result["error"] = (
f"Failed to build models for {config_class.__name__}.",
traceback.format_exc(),
)
result["processor"] = {}
if not result["error"]:
del result["error"]
@ -1087,7 +1062,7 @@ def get_config_overrides(config_class, processors):
return config_overrides
def build(config_class, models_to_create, output_dir):
def _build(config_class, models_to_create, output_dir):
"""Create all models for a certain model type.
Args:
@ -1100,6 +1075,8 @@ def build(config_class, models_to_create, output_dir):
The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under
it. Models in different frameworks with the same architecture will be saved in the same subdirectory.
"""
print(f"Create tiny models for {config_class.__name__} ...")
if data["training_ds"] is None or data["testing_ds"] is None:
ds = load_dataset("Salesforce/wikitext", "wikitext-2-raw-v1")
data["training_ds"] = ds["train"]
@ -1153,6 +1130,7 @@ def build(config_class, models_to_create, output_dir):
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Convert the processors (reduce vocabulary size, smaller image size, etc.)
@ -1169,6 +1147,7 @@ def build(config_class, models_to_create, output_dir):
error = f"No processor is returned by `convert_processors` for {config_class.__name__}."
fill_result_with_error(result, error, None, models_to_create)
logger.error(result["error"][0])
return result
try:
@ -1178,6 +1157,7 @@ def build(config_class, models_to_create, output_dir):
trace = traceback.format_exc()
fill_result_with_error(result, error, trace, models_to_create)
logger.error(result["error"][0])
return result
# Just for us to see this easily in the report
@ -1226,42 +1206,6 @@ def build(config_class, models_to_create, output_dir):
result["pytorch"][pytorch_arch.__name__]["error"] = (error, trace)
logger.error(f"{pytorch_arch.__name__}: {error}")
for tensorflow_arch in models_to_create["tensorflow"]:
# Make PT/TF weights compatible
pt_arch_name = tensorflow_arch.__name__[2:] # Remove `TF`
pt_arch = getattr(transformers_module, pt_arch_name)
result["tensorflow"][tensorflow_arch.__name__] = {}
error = None
if pt_arch.__name__ in result["pytorch"] and result["pytorch"][pt_arch.__name__]["checkpoint"] is not None:
ckpt = get_checkpoint_dir(output_dir, pt_arch)
# Use the same weights from PyTorch.
try:
model = tensorflow_arch.from_pretrained(ckpt)
model.save_pretrained(ckpt)
except Exception as e:
# Conversion may fail. Let's not create a model with different weights to avoid confusion (for now).
model = None
error = f"Failed to convert the pytorch model to the tensorflow model for {pt_arch}: {e}"
trace = traceback.format_exc()
else:
try:
model = build_model(tensorflow_arch, tiny_config, output_dir=output_dir)
except Exception as e:
model = None
error = f"Failed to create the tensorflow model for {tensorflow_arch}: {e}"
trace = traceback.format_exc()
result["tensorflow"][tensorflow_arch.__name__]["model"] = (
model.__class__.__name__ if model is not None else None
)
result["tensorflow"][tensorflow_arch.__name__]["checkpoint"] = (
get_checkpoint_dir(output_dir, tensorflow_arch) if model is not None else None
)
if error is not None:
result["tensorflow"][tensorflow_arch.__name__]["error"] = (error, trace)
logger.error(f"{tensorflow_arch.__name__}: {error}")
if not result["error"]:
del result["error"]
if not result["warnings"]:
@ -1270,6 +1214,28 @@ def build(config_class, models_to_create, output_dir):
return result
def build(*args, **kwargs):
s = datetime.datetime.now()
result = _build(*args, **kwargs)
e = datetime.datetime.now()
t = (e-s).total_seconds()
config_class = args[0]
print(f"Finished building models for {config_class.__name__} ...")
print(f"timing: {t}")
print("=" * 40)
# save info to mark done
with open(os.path.join("tiny_local_models", f"{config_class.model_type}.txt"), "w") as fp:
fp.write(f"{t}")
return result
def build_tiny_model_summary(results, organization=None, token=None):
"""Build a summary: a dictionary of the form
{
@ -1284,7 +1250,12 @@ def build_tiny_model_summary(results, organization=None, token=None):
"""
tiny_model_summary = {}
for config_name in results:
processors = [key for key, value in results[config_name]["processor"].items()]
try:
processors = [key for key, value in results[config_name]["processor"].items()]
except:
print("strange result")
print(config_name)
print(results[config_name])
tokenizer_classes = sorted([x for x in processors if x.endswith("TokenizerFast") or x.endswith("Tokenizer")])
processor_classes = sorted([x for x in processors if x not in tokenizer_classes])
for framework in FRAMEWORKS:
@ -1411,6 +1382,8 @@ def create_tiny_models(
token,
num_workers=1,
):
print("Create")
clone_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if os.getcwd() != clone_path:
raise ValueError(f"This script should be run from the root of the clone of `transformers` {clone_path}")
@ -1423,17 +1396,14 @@ def create_tiny_models(
for x in dir(transformers_module)
if x.startswith("MODEL_") and x.endswith("_MAPPING") and x != "MODEL_NAMES_MAPPING"
]
_tensorflow_arch_mappings = [
x for x in dir(transformers_module) if x.startswith("TF_MODEL_") and x.endswith("_MAPPING")
]
pytorch_arch_mappings = [getattr(transformers_module, x) for x in _pytorch_arch_mappings]
tensorflow_arch_mappings = [getattr(transformers_module, x) for x in _tensorflow_arch_mappings]
config_classes = CONFIG_MAPPING.values()
if not all:
config_classes = [CONFIG_MAPPING[model_type] for model_type in model_types]
config_classes = sorted(config_classes, key=lambda x: x.__name__)
# A map from config classes to tuples of processors (tokenizer, feature extractor, processor) classes
processor_type_map = {c: get_processor_types_from_config_class(c) for c in config_classes}
@ -1441,9 +1411,8 @@ def create_tiny_models(
for c in config_classes:
processors = processor_type_map[c]
models = get_architectures_from_config_class(c, pytorch_arch_mappings, models_to_skip)
tf_models = get_architectures_from_config_class(c, tensorflow_arch_mappings, models_to_skip)
if len(models) + len(tf_models) > 0:
to_create[c] = {"processor": processors, "pytorch": models, "tensorflow": tf_models}
if len(models) > 0:
to_create[c] = {"processor": processors, "pytorch": models}
results = {}
if num_workers <= 1:
@ -1456,10 +1425,60 @@ def create_tiny_models(
all_build_args = []
for c, models_to_create in list(to_create.items()):
all_build_args.append((c, models_to_create, os.path.join(output_path, c.model_type)))
with multiprocessing.Pool() as pool:
skip = [
"fuyu",
"glpn",
"gpt_bigcode",
"gpt_neo",
"gptj",
"phimoe",
"pix2struct",
"pixtral",
"poolformer",
"pop2piano",
"prompt_depth_anything",
"kyutai_speech_to_text",
"layoutlm",
"led",
"moshi",
"mpt",
]
skip += ["fuyu", "kyutai_speech_to_text", "moshi"]
skip = []
all_build_args = [x for x in all_build_args if x[0].model_type not in skip]
for x in all_build_args:
print(x)
index = 0
base_size = 100
size = base_size * num_workers
start = index * size
end = (index + 1) * size
all_build_args = all_build_args[start:end]
msg1 = f"len(all_build_args) :{len(all_build_args)}"
models = sorted([build_args[0].model_type for build_args in all_build_args])
models = '\n'.join(models)
msg2 = f"model_types:\n{models}"
msg = msg1 + "\n" + msg2
with open(os.path.join("tiny_local_models", "models.txt"), "w") as fp:
fp.write(msg)
with multiprocessing.Pool(num_workers) as pool:
results = pool.starmap(build, all_build_args)
results = {buid_args[0].__name__: result for buid_args, result in zip(all_build_args, results)}
# for build_args in all_build_args:
# result = build(*build_args)
# results[c.__name__] = result
# print("=" * 40)
if upload:
if organization is None:
raise ValueError("The argument `organization` could not be `None`. No model is uploaded")