Progress..

This commit is contained in:
retoor 2025-04-03 02:18:41 +02:00
parent d5204b6705
commit d1bb1ae129
4 changed files with 46 additions and 21 deletions

View File

@ -37,7 +37,7 @@
#include <stdbool.h>
#include <string.h>
char *openai_fetch_models() { return curl_get(models_api_url); }
char *openai_fetch_models() { return curl_get(get_models_api_url()); }
bool openai_system(char *message_content) {
chat_json("system", message_content);
@ -108,7 +108,7 @@ char *openai_chat(const char *user_role, const char *message_content) {
char *json_data = chat_json(user_role, message_content);
struct json_object *message_object =
openai_process_chat_message(completions_api_url, json_data);
openai_process_chat_message(get_completions_api_url(), json_data);
if (message_object == NULL) {
return NULL;
@ -128,7 +128,7 @@ char *openai_chat(const char *user_role, const char *message_content) {
}
char *tool_calls_result_str = chat_json(NULL, NULL);
message_object =
openai_process_chat_message(completions_api_url, tool_calls_result_str);
openai_process_chat_message(get_completions_api_url(), tool_calls_result_str);
if (message_object == NULL) {
return NULL;
}

48
r.h
View File

@ -6,39 +6,45 @@
#include <string.h>
bool is_verbose = true;
#ifndef RD
#ifndef OLLAMA
char *models_api_url = "https://api.openai.com/v1/models";
// char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
char *completions_api_url = "https://api.openai.com/v1/chat/completions";
char *advanced_model = "gpt-4o-mini";
char *fast_model = "gpt-3.5-turbo";
#endif
#endif
#ifdef RD
char *models_api_url = "https://api.openai.com/v1/models";
char *completions_api_url = "https://api.anthropic.com/v1/chat/completions";
char *advanced_model = "claude-3-5-haiku-20241022";
//char *models_api_url = "https://api.openai.com/v1/models";
//char *completions_api_url = "https://api.anthropic.com/v1/chat/completions";
//char *advanced_model = "claude-3-5-haiku-20241022";
//char *advanced_model = "meta-llama/Meta-Llama-3.1-8B-Instruct";
//char *advanced_model = "google/gemini-1.5-flash";
char *fast_model = "claude-3-5-haiku-20241022";
//char *fast_model = "claude-3-5-haiku-20241022";
#endif
#ifdef OLLAMA
char *models_api_url = "https://ollama.molodetz.nl/v1/models";
char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
char *advanced_model = "qwen2.5:3b";
//#endif
//#ifdef OLLAMA
//char *models_api_url = "https://ollama.molodetz.nl/v1/models";
//char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
//char *advanced_model = "qwen2.5:3b";
//char *advanced_model = "qwen2.5-coder:0.5b";
char *fast_model = "qwen2.5:0.5b";
#endif
//char *fast_model = "qwen2.5:0.5b";
//#endif
char *_model = NULL;
#define DB_FILE "~/.r.db"
#define PROMPT_TEMPERATURE 0.1
char * get_completions_api_url() {
if(getenv("R_BASE_URL") != NULL) {
return joinpath(getenv("R_BASE_URL"), "v1/chat/completions");
}
return completions_api_url;
}
char * get_models_api_url() {
if(getenv("R_BASE_URL") != NULL) {
return joinpath(getenv("R_BASE_URL"), "v1/models");
}
return models_api_url;
}
void set_prompt_model(const char *model) {
if (_model != NULL) {
free(_model);
@ -47,6 +53,12 @@ void set_prompt_model(const char *model) {
}
const char *get_prompt_model() {
if(_model == NULL && getenv("R_MODEL") != NULL) {
_model = getenv("R_MODEL");
}
if(_model){
return _model;
}
if (auth_type != AUTH_TYPE_API_KEY) {
if (_model == NULL) {
_model = strdup(fast_model);

BIN
rpylib.so

Binary file not shown.

13
utils.h
View File

@ -90,6 +90,19 @@ unsigned long hash(const char *str) {
return hash;
}
char * joinpath(const char *base_url, const char *path) {
static char result[1024];
result[0] = '\0';
strcat(result, base_url);
if(base_url[strlen(base_url) - 1] != '/') {
strcat(result, "/");
}
if(path[0] == '/') {
path++;
}
strcat(result, path);
return result;
}
char *read_file(const char *path) {
char *expanded_path = expand_home_directory(path);