mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
[Styling
] stylify using ruff (#27144)
* try to stylify using ruff * might need to remove these changes? * use ruf format andruff check * use isinstance instead of type comparision * use # fmt: skip * use # fmt: skip * nits * soem styling changes * update ci job * nits isinstance * more files update * nits * more nits * small nits * check and format * revert wrong changes * actually use formatter instead of checker * nits * well docbuilder is overwriting this commit * revert notebook changes * try to nuke docbuilder * style * fix feature exrtaction test * remve `indent-width = 4` * fixup * more nits * update the ruff version that we use * style * nuke docbuilder styling * leve the print for detected changes * nits * Remove file I/O Co-authored-by: charliermarsh <charlie.r.marsh@gmail.com> * style * nits * revert notebook changes * Add # fmt skip when possible * Add # fmt skip when possible * Fix * More ` # fmt: skip` usage * More ` # fmt: skip` usage * More ` # fmt: skip` usage * NIts * more fixes * fix tapas * Another way to skip * Recommended way * Fix two more fiels * Remove asynch Remove asynch --------- Co-authored-by: charliermarsh <charlie.r.marsh@gmail.com>
This commit is contained in:
@ -75,4 +75,4 @@ class {{cookiecutter.camelcase_modelname}}TokenizationTest(TokenizerTesterMixin,
|
||||
"`self.tmpdirname`."
|
||||
)
|
||||
|
||||
# TODO: add tests with hard-coded target values
|
||||
# TODO: add tests with hard-coded target values
|
||||
|
@ -502,7 +502,7 @@ def main():
|
||||
|
||||
trainer.log_metrics("predict", metrics)
|
||||
trainer.save_metrics("predict", metrics)
|
||||
|
||||
|
||||
# write custom code for saving predictions according to task
|
||||
|
||||
def _mp_fn(index):
|
||||
@ -900,7 +900,7 @@ def main():
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
metric.add_batch(
|
||||
|
@ -137,7 +137,7 @@ class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig):
|
||||
{% else -%}
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
{% endif -%}
|
||||
|
||||
|
||||
{% if cookiecutter.is_encoder_decoder_model == "False" %}
|
||||
{%- else %}
|
||||
attribute_map = {
|
||||
@ -238,4 +238,3 @@ class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig):
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
@ -541,7 +541,7 @@ def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(
|
||||
class Flax{{cookiecutter.camelcase_modelname}}ModelTest(FlaxModelTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,
|
||||
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,
|
||||
Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
|
||||
Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
|
||||
Flax{{cookiecutter.camelcase_modelname}}Model,
|
||||
|
Reference in New Issue
Block a user