Skip to content

Commit 299ef79

Browse files
authored
[Test-fix]: HF_TOKEN param removed from MM test Script (#425)
### Previous Behavior: The `HF_TOKEN` parameter in the test script was explicitly set to an empty string. This caused authentication failures with Hugging Face, as an empty token overrides the environment-provided `HF_TOKEN`, preventing its intended use. ### Current Update: The `HF_TOKEN `parameter has been removed from the script. The script will now default to using the `HF_TOKEN `from the environment, allowing user-provided tokens to be utilized as expected. --------- Signed-off-by: Abukhoyer Shaik <[email protected]>
1 parent 03d9871 commit 299ef79

File tree

1 file changed

+1
-9
lines changed

1 file changed

+1
-9
lines changed

tests/transformers/models/test_image_text_to_text_models.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
from QEfficient.utils.run_utils import ApiRunnerInternVL, ApiRunnerVlm
2828
from QEfficient.utils.test_utils import InternProcessor
2929

30-
HF_TOKEN = ""
3130
NEW_GENERATION_TOKENS = 10
3231
test_models_config = [
3332
# CONFIG PARAMS NEEDED FOR A MODEL TO BE TESTED
@@ -104,21 +103,18 @@
104103
def load_image_text_to_text_model(model_config):
105104
model_path = hf_download(
106105
repo_id=model_config._name_or_path,
107-
hf_token=HF_TOKEN,
108106
ignore_patterns=["*.onnx", "*.ot", "*.md", "*.tflite", "*.pdf", "*.h5", "*.msgpack"],
109107
)
110108
try:
111109
model_hf = AutoModelForImageTextToText.from_pretrained(
112110
model_path,
113111
low_cpu_mem_usage=False,
114-
token=HF_TOKEN,
115112
config=model_config,
116113
)
117114
except ValueError:
118115
model_hf = AutoModelForCausalLM.from_pretrained(
119116
model_path,
120117
low_cpu_mem_usage=False,
121-
token=HF_TOKEN,
122118
trust_remote_code=True,
123119
config=model_config,
124120
)
@@ -160,9 +156,7 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100(
160156
):
161157
model_config = {"model_name": model_name}
162158
model_config["img_size"] = img_size
163-
config = AutoConfig.from_pretrained(
164-
model_config["model_name"], token=HF_TOKEN, trust_remote_code=True, padding=True
165-
)
159+
config = AutoConfig.from_pretrained(model_config["model_name"], trust_remote_code=True, padding=True)
166160
config = set_num_layers(config, n_layer=n_layer)
167161
model_hf, _ = load_image_text_to_text_model(config)
168162
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, padding=True)
@@ -199,7 +193,6 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100(
199193
model_config["model_name"],
200194
kv_offload=kv_offload,
201195
config=config,
202-
token=HF_TOKEN,
203196
)
204197

205198
# pytorch_kv_tokens = api_runner.run_vlm_kv_model_on_pytorch(qeff_model.model)
@@ -284,7 +277,6 @@ def check_intern_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100(
284277
model_config["model_name"],
285278
kv_offload=kv_offload,
286279
config=config,
287-
token=HF_TOKEN,
288280
)
289281
# pytorch_kv_tokens = api_runner.run_vlm_kv_model_on_pytorch(qeff_model.model)
290282
# assert (pytorch_hf_tokens == pytorch_kv_tokens).all(), (

0 commit comments

Comments
 (0)