31 lines
960 B
C
Raw Normal View History

2025-01-04 05:00:03 +00:00
#ifndef CALPACA_PROMPT_H
#define CALPACA_PROMPT_H
#include <json-c/json.h>
#include "messages.h"
#include "http.h"
char * prompt_model = "gpt-4o-mini";
int prompt_max_tokens = 100;
double prompt_temperature = 0.5;
json_object * _prompt =NULL;
void chat_free(){
if(_prompt == NULL)
return;
json_object_put(_prompt);
_prompt = NULL;
}
char * chat_json(char * role, char * message){
chat_free();
message_add(role,message);
struct json_object *root_object = json_object_new_object();
json_object_object_add(root_object, "model", json_object_new_string(prompt_model));
json_object_object_add(root_object, "messages", message_list());
json_object_object_add(root_object, "max_tokens", json_object_new_int(prompt_max_tokens));
json_object_object_add(root_object, "temperature", json_object_new_double(prompt_temperature));
return (char *)json_object_to_json_string_ext(root_object, JSON_C_TO_STRING_PRETTY);
}
#endif