mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Compare commits
13 Commits
3c7552f733
...
ci_with_to
Author | SHA1 | Date | |
---|---|---|---|
d4a141de9c | |||
8e86db779a | |||
49e7bd3e40 | |||
28c3bc8a79 | |||
49249d3703 | |||
4d57257d4d | |||
95eb065772 | |||
bcd1da9580 | |||
dfa4f3f52c | |||
5fb9dea2cf | |||
f4d5b243a7 | |||
ed64d1e6b8 | |||
5d63ba4779 |
@ -71,13 +71,13 @@ jobs:
|
||||
|
||||
- name: Check failed tests
|
||||
working-directory: /transformers
|
||||
run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_run_models_gpu/new_model_failures.json --output_file new_model_failures_with_bad_commit.json
|
||||
run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_run_models_gpu/new_model_failures_temp.json --output_file new_model_failures_with_bad_commit_temp.json
|
||||
|
||||
- name: Show results
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
ls -l new_model_failures_with_bad_commit.json
|
||||
cat new_model_failures_with_bad_commit.json
|
||||
ls -l new_model_failures_with_bad_commit_temp.json
|
||||
cat new_model_failures_with_bad_commit_temp.json
|
||||
|
||||
- name: Checkout back
|
||||
working-directory: /transformers
|
||||
|
4
.github/workflows/model_jobs.yml
vendored
4
.github/workflows/model_jobs.yml
vendored
@ -93,6 +93,10 @@ jobs:
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Installed torch 2.7 RC
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/test/cu126
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
120
.github/workflows/self-scheduled-caller.yml
vendored
120
.github/workflows/self-scheduled-caller.yml
vendored
@ -2,12 +2,12 @@ name: Self-hosted runner (scheduled)
|
||||
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
# repository_dispatch:
|
||||
# schedule:
|
||||
# - cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_scheduled_ci*
|
||||
- ci_with_torch_2.7_commit_0ef339ff1b63bb03a388c79bfbebec9085e10564
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
@ -20,59 +20,59 @@ jobs:
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-torch"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
tf-pipeline:
|
||||
name: TF pipeline CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_pipelines_tf_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-tf"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-tensorflow-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-examples"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-deepspeed"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
ci_event: Daily CI
|
||||
working-directory-prefix: /workspace
|
||||
secrets: inherit
|
||||
|
||||
quantization-ci:
|
||||
name: Quantization CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_quantization_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-quantization"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-quantization-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
#
|
||||
# torch-pipeline:
|
||||
# name: Torch pipeline CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_pipelines_torch_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-pipeline-torch"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-pytorch-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# tf-pipeline:
|
||||
# name: TF pipeline CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_pipelines_tf_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-pipeline-tf"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-tensorflow-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# example-ci:
|
||||
# name: Example CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_examples_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-examples"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-all-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# deepspeed-ci:
|
||||
# name: DeepSpeed CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_torch_cuda_extensions_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-deepspeed"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# working-directory-prefix: /workspace
|
||||
# secrets: inherit
|
||||
#
|
||||
# quantization-ci:
|
||||
# name: Quantization CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_quantization_torch_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-quantization"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-quantization-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
|
@ -258,7 +258,7 @@ class NewTaskModelForNewTask(NewTaskModelPreTrainedModel, GenerationMixin):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if attention_mask is not None and 0.0 in attention_mask:
|
||||
return attention_mask
|
||||
return None
|
||||
|
@ -381,7 +381,7 @@ class DepthProImageEncoder(nn.Module):
|
||||
batch_size, num_channels, height, width = pixel_values.shape
|
||||
|
||||
# scale the image for image_encoder
|
||||
size = self.config.image_model_config.image_size
|
||||
size = self.model.config.image_size
|
||||
pixel_values = F.interpolate(
|
||||
pixel_values,
|
||||
size=(size, size),
|
||||
|
@ -1213,9 +1213,9 @@ class DPTForDepthEstimation(DPTPreTrainedModel):
|
||||
hidden_states = backbone_hidden_states
|
||||
|
||||
patch_height, patch_width = None, None
|
||||
if self.config.backbone_config is not None and self.config.is_hybrid is False:
|
||||
if self.backbone is not None and self.config.is_hybrid is False:
|
||||
_, _, height, width = pixel_values.shape
|
||||
patch_size = self.config.backbone_config.patch_size
|
||||
patch_size = self.backbone.config.patch_size
|
||||
patch_height = height // patch_size
|
||||
patch_width = width // patch_size
|
||||
|
||||
|
@ -1114,7 +1114,7 @@ class Gemma3ForConditionalGeneration(Gemma3PreTrainedModel, GenerationMixin):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
return attention_mask
|
||||
|
||||
if attention_mask is not None and attention_mask.dim() == 4:
|
||||
|
@ -788,7 +788,7 @@ class Gemma3ForConditionalGeneration(PaliGemmaForConditionalGeneration):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
return attention_mask
|
||||
|
||||
if attention_mask is not None and attention_mask.dim() == 4:
|
||||
|
@ -341,7 +341,7 @@ class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixi
|
||||
input_tensor=None,
|
||||
is_training: Optional[bool] = None,
|
||||
):
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if attention_mask is not None and 0.0 in attention_mask:
|
||||
return attention_mask
|
||||
return None
|
||||
|
@ -314,8 +314,8 @@ class VitPoseForPoseEstimation(VitPosePreTrainedModel):
|
||||
# Turn output hidden states in tensor of shape (batch_size, num_channels, height, width)
|
||||
sequence_output = outputs.feature_maps[-1] if return_dict else outputs[0][-1]
|
||||
batch_size = sequence_output.shape[0]
|
||||
patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0]
|
||||
patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1]
|
||||
patch_height = self.backbone.config.image_size[0] // self.backbone.config.patch_size[0]
|
||||
patch_width = self.backbone.config.image_size[1] // self.backbone.config.patch_size[1]
|
||||
sequence_output = (
|
||||
sequence_output.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width).contiguous()
|
||||
)
|
||||
|
@ -193,6 +193,8 @@ class BackboneMixin:
|
||||
else:
|
||||
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
|
||||
|
||||
self._forward_signature = dict(inspect.signature(self.forward).parameters)
|
||||
|
||||
@property
|
||||
def out_features(self):
|
||||
return self._out_features
|
||||
@ -230,8 +232,7 @@ class BackboneMixin:
|
||||
return [self.out_feature_channels[name] for name in self.out_features]
|
||||
|
||||
def forward_with_filtered_kwargs(self, *args, **kwargs):
|
||||
signature = dict(inspect.signature(self.forward).parameters)
|
||||
filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature}
|
||||
filtered_kwargs = {k: v for k, v in kwargs.items() if k in self._forward_signature}
|
||||
return self(*args, **filtered_kwargs)
|
||||
|
||||
def forward(
|
||||
|
@ -52,6 +52,9 @@ if len(result.stderr) > 0:
|
||||
elif "ERROR: not found: " in result.stderr:
|
||||
print("test not found in this commit")
|
||||
exit(0)
|
||||
elif "ERROR: not found: " in result.stderr:
|
||||
print("test not found in this commit")
|
||||
exit(0)
|
||||
else:
|
||||
print(f"pytest failed to run: {{result.stderr}}")
|
||||
exit(-1)
|
||||
|
@ -31,6 +31,7 @@ def get_daily_ci_runs(token, num_runs=7):
|
||||
|
||||
def get_last_daily_ci_runs(token):
|
||||
"""Get the last completed workflow run id of the scheduled (daily) CI."""
|
||||
return "14277576462"
|
||||
workflow_runs = get_daily_ci_runs(token)
|
||||
workflow_run_id = None
|
||||
for workflow_run in workflow_runs:
|
||||
|
@ -523,20 +523,20 @@ class Message:
|
||||
extra_blocks = self.get_new_model_failure_blocks(to_truncate=False)
|
||||
if extra_blocks:
|
||||
failure_text = extra_blocks[-1]["text"]["text"]
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.txt")
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.txt")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
fp.write(failure_text)
|
||||
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.txt")
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.txt")
|
||||
commit_info = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures.txt",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.txt",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures.txt"
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.txt"
|
||||
|
||||
# extra processing to save to json format
|
||||
new_failed_tests = {}
|
||||
@ -550,15 +550,15 @@ class Message:
|
||||
new_failed_tests[model] = {"single-gpu": [], "multi-gpu": []}
|
||||
for url, device in items:
|
||||
new_failed_tests[model][f"{device}-gpu"].append(line)
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.json")
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.json")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4)
|
||||
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.json")
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.json")
|
||||
_ = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
@ -1220,6 +1220,7 @@ if __name__ == "__main__":
|
||||
|
||||
target_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml@refs/heads/main"
|
||||
is_scheduled_ci_run = os.environ.get("CI_WORKFLOW_REF") == target_workflow
|
||||
is_scheduled_ci_run = True
|
||||
|
||||
# Only the model testing job is concerned: this condition is to avoid other jobs to upload the empty list as
|
||||
# results.
|
||||
@ -1228,14 +1229,14 @@ if __name__ == "__main__":
|
||||
json.dump(model_results, fp, indent=4, ensure_ascii=False)
|
||||
|
||||
# upload results to Hub dataset (only for the scheduled daily CI run on `main`)
|
||||
if is_scheduled_ci_run:
|
||||
api.upload_file(
|
||||
path_or_fileobj=f"ci_results_{job_name}/model_results.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/model_results.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
# if is_scheduled_ci_run:
|
||||
# api.upload_file(
|
||||
# path_or_fileobj=f"ci_results_{job_name}/model_results.json",
|
||||
# path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/model_results.json",
|
||||
# repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
# repo_type="dataset",
|
||||
# token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
# )
|
||||
|
||||
# Must have the same keys as in `additional_results`.
|
||||
# The values are used as the file names where to save the corresponding CI job results.
|
||||
@ -1250,14 +1251,14 @@ if __name__ == "__main__":
|
||||
json.dump(job_result, fp, indent=4, ensure_ascii=False)
|
||||
|
||||
# upload results to Hub dataset (only for the scheduled daily CI run on `main`)
|
||||
if is_scheduled_ci_run:
|
||||
api.upload_file(
|
||||
path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
# if is_scheduled_ci_run:
|
||||
# api.upload_file(
|
||||
# path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
# path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
# repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
# repo_type="dataset",
|
||||
# token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
# )
|
||||
|
||||
prev_ci_artifacts = None
|
||||
if is_scheduled_ci_run:
|
||||
|
@ -24,7 +24,7 @@ from huggingface_hub import HfApi
|
||||
if __name__ == "__main__":
|
||||
api = HfApi()
|
||||
|
||||
with open("new_model_failures_with_bad_commit.json") as fp:
|
||||
with open("new_model_failures_with_bad_commit_temp.json") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
# TODO: extend
|
||||
@ -68,16 +68,16 @@ if __name__ == "__main__":
|
||||
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
|
||||
|
||||
# Upload to Hub and get the url
|
||||
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
|
||||
with open("new_model_failures_with_bad_commit_grouped_by_authors_temp.json", "w") as fp:
|
||||
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
|
||||
commit_info = api.upload_file(
|
||||
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
|
||||
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors_temp.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors_temp.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors_temp.json"
|
||||
|
||||
# Add `GH_` prefix as keyword mention
|
||||
output = {}
|
||||
|
@ -49,7 +49,37 @@ if __name__ == "__main__":
|
||||
tests = os.getcwd()
|
||||
model_tests = os.listdir(os.path.join(tests, "models"))
|
||||
d1 = sorted(filter(os.path.isdir, os.listdir(tests)))
|
||||
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))
|
||||
# d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests if x not in ["deformable_detr", "grounding_dino", "omdet_turbo", "rt_detr", "nat", "dinat"]]))
|
||||
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests if x in [
|
||||
"bark",
|
||||
"depth_anything",
|
||||
"depth_pro",
|
||||
"dpt",
|
||||
"emu3",
|
||||
"gemma3",
|
||||
"gpt_neox",
|
||||
"granitemoeshared",
|
||||
"idefics2",
|
||||
"idefics3",
|
||||
"mpt",
|
||||
"paligemma",
|
||||
"paligemma2",
|
||||
"phi3",
|
||||
"prompt_depth_anything",
|
||||
"qwen3",
|
||||
"qwen3_moe",
|
||||
"rag",
|
||||
"reformer",
|
||||
"smolvlm",
|
||||
"superglue",
|
||||
"upernet",
|
||||
"vitmatte",
|
||||
"vitpose",
|
||||
"xglm",
|
||||
"zamba2",
|
||||
"zoedepth",
|
||||
]
|
||||
]))
|
||||
d1.remove("models")
|
||||
d = d2 + d1
|
||||
|
||||
@ -62,4 +92,6 @@ if __name__ == "__main__":
|
||||
start = end
|
||||
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
|
||||
model_splits.append(d[start:end])
|
||||
|
||||
# model_splits = [["models/vit"]]
|
||||
print(model_splits)
|
||||
|
Reference in New Issue
Block a user