Fastapi部署llama

服务端代码

import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, LlamaForCausalLM
import torch

app = FastAPI()

class Query(BaseModel):
    text: str

device = torch.device("cuda:0")

model_path = 'llama-2-7b-chat-hf'
model = LlamaForCausalLM.from_pretrained(model_path, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_path)

@app.post("/chat/")
async def generate_response(query: Query):
    inputs = f"[INST] {query.text.strip()} [/INST]"

    input_ids = tokenizer(inputs, return_tensors="pt").input_ids.to(device)
    generate_ids = model.generate(
        input_ids,
        max_new_tokens=500,
        do_sample=True,
        top_p=0.85,
        temperature=1.0,
        repetition_penalty=1.,
        eos_token_id=2,
        bos_token_id=1,
        pad_token_id=0)

    output = tokenizer.batch_decode(generate_ids)[0]
    return {"result": output}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=6006)

客户端代码

import requests

url = "https://xxxxxxxxxxxx/chat/"
# 使用新的输入格式,包裹用户输入
query = {"text": "[INST] introduce china[/INST]"}  # 修改为使用[INST]标签

response = requests.post(url, json=query)

if response.status_code == 200:
  result = response.json()
  print("chat:", result["result"])
else:
  print("Error:", response.status_code, response.text)

Logo

魔乐社区(Modelers.cn) 是一个中立、公益的人工智能社区,提供人工智能工具、模型、数据的托管、展示与应用协同服务,为人工智能开发及爱好者搭建开放的学习交流平台。社区通过理事会方式运作,由全产业链共同建设、共同运营、共同享有,推动国产AI生态繁荣发展。

更多推荐