Orca Mini 是使用论文《Orca:从 GPT-4 的复杂解释轨迹进行渐进式学习》中定义的方法在 Orca Style 数据集上训练的 Llama 和 Llama 2 模型。有两种变体可供选择。原始 Orca Mini 基于 Llama,参数大小分别为 30 亿、70 亿和 130 亿,v3 基于 Llama 2,参数大小分别为 70 亿、130 亿和 700 亿。
下面示例使用的模型是 Stable Beluga 模型,有 7b 个参数,是一个通用模型。
ollama serve
ollama run orca-mini
---------------
curl -X POST http://localhost:11434/api/generate -d '{
"model": "orca-mini",
"prompt":"Who is Yang yongyu?"
}'
import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
# Hugging Face model_path
model_path = 'psmathur/orca_mini_3b'
tokenizer = LlamaTokenizer.from_pretrained(model_path)
model = LlamaForCausalLM.from_pretrained(
model_path, torch_dtype=torch.float16, device_map='auto',
)
#generate text function
def generate_text(system, instruction, input=None):
if input:
prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
else:
prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Response:\n"
tokens = tokenizer.encode(prompt)
tokens = torch.LongTensor(tokens).unsqueeze(0)
tokens = tokens.to('cuda')
instance = {'input_ids': tokens,'top_p': 1.0, 'temperature':0.7, 'generate_len': 1024, 'top_k': 50}
length = len(tokens[0])
with torch.no_grad():
rest = model.generate(
input_ids=tokens,
max_length=length+instance['generate_len'],
use_cache=True,
do_sample=True,
top_p=instance['top_p'],
temperature=instance['temperature'],
top_k=instance['top_k']
)
output = rest[0][length:]
string = tokenizer.decode(output, skip_special_tokens=True)
return f'[!] Response: {string}'
# Sample Test Instruction Used by Youtuber Sam Witteveen https://www.youtube.com/@samwitteveenai
system = 'You are an AI assistant that follows instruction extremely well. Help as much as you can.'
instruction = 'Write a letter to Sam Altman, CEO of OpenAI, requesting him to convert GPT4 a private model by OpenAI to an open source project'
print(generate_text(system, instruction))