This commit is contained in:
retoor 2025-03-28 20:50:10 +01:00
parent bff6e758bc
commit 89be6b8542
5 changed files with 49 additions and 17 deletions

View File

@ -1,4 +1,4 @@
all: build build_rpylib run
all: build build_free build_rpylib run_free
# Variables for compiler and flags
CC = gcc
@ -9,9 +9,16 @@ build:
$(CC) main.c $(CFLAGS) -o r
publish r
build_free:
$(CC) -DOLLAMA main.c $(CFLAGS) -o rf
publish rf
build_rpylib:
$(CC) -shared -o rpylib.so -fPIC rpylib.c -lpython3.12 `python3-config --includes` -I/usr/include/CL -ljson-c -lcurl -lsqlite3
publish rpylib.so
run:
./r --verbose
run_free:
./rf --verbose

14
main.c
View File

@ -219,12 +219,20 @@ void repl() {
is_verbose ? "Verbose mode enabled" : "Verbose mode disabled");
continue;
}
if (line && *line != '\n') {
line_add_history(line);
}
if (!strncmp(line, "!models", 7)) {
printf("Current model: %s\n", openai_fetch_models());
continue;
}
if (!strncmp(line, "!model", 6)) {
if (!strncmp(line + 6, " ", 1)) {
line = line + 7;
set_prompt_model(line);
}
printf("%s\n", get_prompt_model());
printf("Current model: %s\n", get_prompt_model());
continue;
}
if (!strncmp(line, "exit", 4)) {
@ -239,7 +247,7 @@ void repl() {
continue;
}
while (line && *line != '\n') {
line_add_history(line);
char *response = openai_chat("user", line);
if (response) {
render(response);
@ -253,6 +261,8 @@ void repl() {
}
free(response);
} else {
exit(0);
}
}
}

View File

@ -31,16 +31,13 @@
#ifndef R_OPENAI_H
#define R_OPENAI_H
#include "chat.h"
#include "http_curl.h"
#include "r.h"
#include <stdbool.h>
#include <string.h>
char *openai_fetch_models() {
const char *api_url = "https://api.openai.com/v1/models";
return curl_get(api_url);
}
char *openai_fetch_models() { return curl_get(models_api_url); }
bool openai_system(char *message_content) {
chat_json("system", message_content);
@ -50,13 +47,18 @@ bool openai_system(char *message_content) {
struct json_object *openai_process_chat_message(const char *api_url,
const char *json_data) {
char *response = curl_post(api_url, json_data);
if (!response) {
fprintf(stderr, "Failed to get response.\n");
return NULL;
}
struct json_object *parsed_json = json_tokener_parse(response);
if (!parsed_json) {
fprintf(stderr, "Failed to parse JSON.\n%s\n", response);
fprintf(stderr, "Failed to parse JSON.\nContent: \"%s\"\n", response);
return NULL;
}
struct json_object *error_object;
if (json_object_object_get_ex(parsed_json, "error", &error_object)) {
if (json_object_object_get_ex(parsed_json, "error", &error_object) &&
message_array) {
char *all_messages = (char *)json_object_to_json_string(message_array);
@ -103,16 +105,16 @@ char *openai_chat(const char *user_role, const char *message_content) {
return NULL;
}
const char *api_url = "https://api.openai.com/v1/chat/completions";
char *json_data = chat_json(user_role, message_content);
struct json_object *message_object =
openai_process_chat_message(api_url, json_data);
message_add_object(message_object);
openai_process_chat_message(completions_api_url, json_data);
if (message_object == NULL) {
printf("ERROR + NULL IS SUCCESS\n");
return NULL;
}
message_add_object(message_object);
struct json_object *tool_calls;
json_object_object_get_ex(message_object, "tool_calls", &tool_calls);
if (tool_calls) {
@ -126,7 +128,7 @@ char *openai_chat(const char *user_role, const char *message_content) {
}
char *tool_calls_result_str = chat_json(NULL, NULL);
message_object =
openai_process_chat_message(api_url, tool_calls_result_str);
openai_process_chat_message(completions_api_url, tool_calls_result_str);
if (message_object == NULL) {
return NULL;
}

17
r.h
View File

@ -7,6 +7,19 @@
#include <string.h>
bool is_verbose = false;
#ifndef OLLAMA
char *models_api_url = "https://api.openai.com/v1/models";
// char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
char *completions_api_url = "https://api.openai.com/v1/chat/completions";
char *advanced_model = "gpt-4o-mini";
char *fast_model = "gpt-3.5-turbo";
#else
char *models_api_url = "https://ollama.molodetz.nl/v1/models";
char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
char *advanced_model = "qwen2.5:3b";
char *fast_model = "qwen2.5:0.5b";
#endif
char *_model = NULL;
#define DB_FILE "~/.r.db"
@ -23,10 +36,10 @@ void set_prompt_model(const char *model) {
const char *get_prompt_model() {
if (auth_type != AUTH_TYPE_API_KEY) {
if (_model == NULL) {
_model = strdup("gpt-3.5-turbo");
_model = strdup(fast_model);
}
} else if (_model == NULL) {
_model = strdup("gpt-4o-mini");
_model = strdup(advanced_model);
}
return _model;
}

BIN
rpylib.so

Binary file not shown.