The Easiest and Cheapest Way to Deploy Finetuned Mistral 7B Instruct Model (or Any Model) | by Qendel AI - Freedium

PHOTO EMBED

Thu Nov 30 2023 00:36:58 GMT+0000 (Coordinated Universal Time)

Saved by @mikeee

Copy
import torch
import transformers
from typing import Dict, Any 
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline 


dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16

class EndpointHandler:
    def __init__(self, model_path: str = ""):
    
        tokenizer = AutoTokenizer.from_pretrained(model_path)
        
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            return_dict=True,
            device_map='auto',
            load_in_8bit=True,
            torch_dtype=dtype,
            trust_remote_code=True)
        
        
        self.pipeline = transformers.pipeline(
            "text-generation",
            model=model,
            tokenizer=tokenizer,
            temperature=0.8,
            repetition_penalty=1.1,
            max_new_tokens=1000,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id
        
        )


    def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
        prompt = data.pop("inputs", data)
        
        llm_response = self.pipeline(
            prompt,
            return_full_text=False 
        )
        
        return llm_response[0]['generated_text'].strip()
content_copyCOPY

https://freedium.cfd/https://medium.com/@qendelai/the-easiest-and-cheapest-way-to-deploy-finetuned-mistral-7b-instruct-model-or-any-model-3f236182e8b8