mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This reverts commit 45411d1fc9a2b6d2f891b6ab0ae16409719e09fc. Reverted https://github.com/pytorch/pytorch/pull/129409 on behalf of https://github.com/jeanschmidt due to Breaking internal CI, @albanD please help get this PR merged ([comment](https://github.com/pytorch/pytorch/pull/129409#issuecomment-2571316444))
56 lines
1.8 KiB
Python
56 lines
1.8 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
import os
|
|
import re
|
|
from collections import defaultdict
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
from tools.stats.import_test_stats import ADDITIONAL_CI_FILES_FOLDER
|
|
from tools.testing.target_determination.heuristics.interface import (
|
|
HeuristicInterface,
|
|
TestPrioritizations,
|
|
)
|
|
from tools.testing.target_determination.heuristics.utils import normalize_ratings
|
|
from tools.testing.test_run import TestRun
|
|
|
|
|
|
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
|
|
|
|
class LLM(HeuristicInterface):
|
|
def __init__(self, **kwargs: dict[str, Any]) -> None:
|
|
super().__init__(**kwargs)
|
|
|
|
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
|
|
critical_tests = self.get_mappings()
|
|
filter_valid_tests = {
|
|
TestRun(test): score
|
|
for test, score in critical_tests.items()
|
|
if test in tests
|
|
}
|
|
normalized_scores = normalize_ratings(filter_valid_tests, 0.25)
|
|
return TestPrioritizations(tests, normalized_scores)
|
|
|
|
def get_mappings(self) -> dict[str, float]:
|
|
path = (
|
|
REPO_ROOT
|
|
/ ADDITIONAL_CI_FILES_FOLDER
|
|
/ "llm_results/mappings/indexer-files-gitdiff-output.json"
|
|
)
|
|
if not os.path.exists(path):
|
|
print(f"could not find path {path}")
|
|
return {}
|
|
with open(path) as f:
|
|
# Group by file
|
|
r = defaultdict(list)
|
|
for key, value in json.load(f).items():
|
|
re_match = re.match("(.*).py", key)
|
|
if re_match:
|
|
file = re_match.group(1)
|
|
r[file].append(value)
|
|
# Average the scores for each file
|
|
r = {file: sum(scores) / len(scores) for file, scores in r.items()}
|
|
return r
|