Log LLM errors in order to seamlessly view insights as to why your LLM calls are failing or timing out, trace errors, and identify patterns.
// Keep track of your messagesconst messages =[{role:'system',content:'You are Jedi master Yoda.'},{role:'user',content:"What is the favorite fruit of Luke Skywalker?"},];try{// Send your requestawait openai.chat.completions.create({ messages,model:'gpt-3.5-turbo',});}catch(error){// Log error using Layerup error logging layerup.logError(error, messages);}
// Keep track of your messagesconst messages =[{role:'system',content:'You are Jedi master Yoda.'},{role:'user',content:"What is the favorite fruit of Luke Skywalker?"},];try{// Send your requestawait openai.chat.completions.create({ messages,model:'gpt-3.5-turbo',});}catch(error){// Log error using Layerup error logging layerup.logError(error, messages);}
// Keep track of your messagesconst messages =[{role:'system',content:'You are Jedi master Yoda.'},{role:'user',content:"What is the favorite fruit of Luke Skywalker?"},];try{// Send your requestawait openai.chat.completions.create({ messages,model:'gpt-3.5-turbo',});}catch(error){// Log error using Layerup error logging layerup.logError(error, messages);}