constapi_key="YOUR_TOKEN_HERE";consturl="https://api.alphakek.ai/user-info";fetch(url, { method:'GET', headers: {'Authorization':`Bearer ${api_key}` }}).then(response => {if (response.ok) {returnresponse.json(); }thrownewError('Network response was not ok.');}).then(user_data => {console.log(`User credits: ${user_data['credits']}`);console.log(`User tier: ${user_data['tier']}`);console.log(`Amount of $AIKEK in USD: ${user_data['tokens_usd']}`);}).catch(error => {console.error('There has been a problem with your fetch operation:', error);});
Comparing Our AI Models
This code sample calls API Overview for each of the three available Alpha ChatAI Models, asks them the same question, and returns a streaming response.
Note: The code sample below spends 6 API Credits upon running.
from openai import OpenAIclient =OpenAI( api_key="YOUR_TOKEN_HERE", base_url="https://api.alphakek.ai/v1")question ="What to expect from Ethereum ETFs?"print("Question:", question)models = ["versa","nexus","eclipse"]for model in models: response = client.chat.completions.create( model=model, messages=[ {'role': 'user','content': question } ], stream=True, )print(f"\n\nResponse from the {model.capitalize()} model:")for chunk in response:if chunk.choices[0].delta.content isNone:continueprint(chunk.choices[0].delta.content, end="")
import OpenAI from'openai';constapiKey="YOUR_TOKEN_HERE";constclient=newOpenAI({ apiKey: apiKey, baseURL:'https://api.alphakek.ai/v1'});constquestion="What to expect from Ethereum ETFs?";console.log("Question:", question);constmodels= ["versa","nexus","eclipse"];models.forEach(async model => {try {conststream=awaitclient.chat.completions.create({ model: model, messages: [{ role:'user', content: question }], stream:true, });console.log(`\n\nResponse from the ${model.charAt(0).toUpperCase() +model.slice(1)} model:`);forawait (constchunkof stream) {if (chunk?.choices[0]?.delta?.content) {process.stdout.write(chunk.choices[0].delta.content); } } } catch (error) {console.error('Error handling:', error); }});