python lora_train.py
Traceback (most recent call last):
File "LLM-Trainer/lora_train.py", line 26, in
model = AutoModelForCausalLM.from_pretrained(
File "/home/chris/miniconda3/envs/lora/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 563, in from_pretrained
return model_class.from_pretrained(
File "/home/chris/miniconda3/envs/lora/lib/python3.10/site-packages/transformers/modeling_utils.py", line 2977, in from_pretrained
raise ValueError(
ValueError: You can't pass load_in_4bitor load_in_8bit as a kwarg when passing quantization_config argument at the same time.
Fixed by:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
#load_in_8bit=True,
device_map="cuda:0",
trust_remote_code=True,
token=auth_token,
quantization_config=bnb_config,
)
python lora_train.py
Traceback (most recent call last):
File "LLM-Trainer/lora_train.py", line 26, in
model = AutoModelForCausalLM.from_pretrained(
File "/home/chris/miniconda3/envs/lora/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 563, in from_pretrained
return model_class.from_pretrained(
File "/home/chris/miniconda3/envs/lora/lib/python3.10/site-packages/transformers/modeling_utils.py", line 2977, in from_pretrained
raise ValueError(
ValueError: You can't pass
load_in_4bitorload_in_8bitas a kwarg when passingquantization_configargument at the same time.Fixed by:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
#load_in_8bit=True,
device_map="cuda:0",
trust_remote_code=True,
token=auth_token,
quantization_config=bnb_config,
)