Machine Translation Model
BSC-LT/salamandraTA-2B
Model: BSC-LT/salamandraTA-2B
GPU: A10
Code examples
OpenAI Chat Completion
#pip install openai python-dotenv
from openai import OpenAI
from dotenv import load_dotenv
import os
load_dotenv(".env")
HF_TOKEN = os.environ["HF_TOKEN"]
BASE_URL = os.environ["BASE_URL"]
client = OpenAI(
       base_url=BASE_URL + "/v1/",
       api_key=HF_TOKEN
   )
src_lang_code = 'Spanish'
tgt_lang_code = 'Catalan'
sentence = 'Ayer se fue, tomó sus cosas y se puso a navegar.'
prompt = f'[{src_lang_code}] {sentence} \n[{tgt_lang_code}]'
stream = False
chat_completion = client.completions.create(
   model="tgi",
   prompt=prompt,
   stream=stream,
   max_tokens=1000,
   temperature=0.1, #Ajust this to fit to your needs
   # top_p=0.95,
   # frequency_penalty=0.2,
)
text = ""
if stream:
 for message in chat_completion:
   text += message.choices[0].text
   print(message.choices[0].text, end="")
 print(text)
else:
 text = chat_completion.choices[0].text
 print(text)Generate with requests
# pip install torch transformers python-dotenv requests
from dotenv import load_dotenv
import requests
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
load_dotenv(".env")
HF_TOKEN = os.environ["HF_TOKEN"]
BASE_URL = os.environ["BASE_URL"]
src_lang_code = 'Spanish'
tgt_lang_code = 'Catalan'
sentence = 'Ayer se fue, tomó sus cosas y se puso a navegar.'
headers = {
    "Accept" : "application/json",
    "Authorization": f"Bearer {HF_TOKEN}",
    "Content-Type": "application/json"
}
prompt = f'[{src_lang_code}] {sentence} \n[{tgt_lang_code}]'
payload = { "inputs": prompt, "parameters": {}}
response = requests.post(BASE_URL + "/generate", headers=headers, json=payload)
print(response.json()["generated_text"])Last updated