How to run inference:
import transformers
import torch
def fmt_prompt(prompt: str) -> str:
return f"""[Instructions]:\n{prompt}\n\n[Response]:"""
if __name__ == "__main__":
model_name = "abacaj/mistral-7b-sft"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
model = (
transformers.AutoModelForCausalLM.from_pretrained(
model_name,
)
.to("cuda:0")
.eval()
)
prompt = "If A is greater than B and B is greater than C does that make A greater than C?"
prompt_input = fmt_prompt(prompt)
inputs = tokenizer(prompt_input, return_tensors="pt").to(model.device)
input_ids_cutoff = inputs.input_ids.size(dim=1)
with torch.no_grad():
generated_ids = model.generate(
**inputs,
use_cache=True,
max_new_tokens=512,
temperature=0.2,
top_p=0.95,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
completion = tokenizer.decode(
generated_ids[0][input_ids_cutoff:],
skip_special_tokens=True,
)
print(completion)
Evals:
Code to train model: https://github.com/abacaj/train-with-fsdp
- Downloads last month
- 16
Inference Providers
NEW
This model is not currently available via any of the supported Inference Providers.
Evaluation results
- pass@1 on HumanEvalself-reported54.270
- pass@1 on MBPPself-reported38.000
- pass@1 on MMLUself-reported45.890