Update.
This commit is contained in:
parent
bff6e758bc
commit
89be6b8542
9
Makefile
9
Makefile
@ -1,4 +1,4 @@
|
|||||||
all: build build_rpylib run
|
all: build build_free build_rpylib run_free
|
||||||
|
|
||||||
# Variables for compiler and flags
|
# Variables for compiler and flags
|
||||||
CC = gcc
|
CC = gcc
|
||||||
@ -9,9 +9,16 @@ build:
|
|||||||
$(CC) main.c $(CFLAGS) -o r
|
$(CC) main.c $(CFLAGS) -o r
|
||||||
publish r
|
publish r
|
||||||
|
|
||||||
|
build_free:
|
||||||
|
$(CC) -DOLLAMA main.c $(CFLAGS) -o rf
|
||||||
|
publish rf
|
||||||
|
|
||||||
build_rpylib:
|
build_rpylib:
|
||||||
$(CC) -shared -o rpylib.so -fPIC rpylib.c -lpython3.12 `python3-config --includes` -I/usr/include/CL -ljson-c -lcurl -lsqlite3
|
$(CC) -shared -o rpylib.so -fPIC rpylib.c -lpython3.12 `python3-config --includes` -I/usr/include/CL -ljson-c -lcurl -lsqlite3
|
||||||
publish rpylib.so
|
publish rpylib.so
|
||||||
|
|
||||||
run:
|
run:
|
||||||
./r --verbose
|
./r --verbose
|
||||||
|
|
||||||
|
run_free:
|
||||||
|
./rf --verbose
|
||||||
|
14
main.c
14
main.c
@ -219,12 +219,20 @@ void repl() {
|
|||||||
is_verbose ? "Verbose mode enabled" : "Verbose mode disabled");
|
is_verbose ? "Verbose mode enabled" : "Verbose mode disabled");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (line && *line != '\n') {
|
||||||
|
|
||||||
|
line_add_history(line);
|
||||||
|
}
|
||||||
|
if (!strncmp(line, "!models", 7)) {
|
||||||
|
printf("Current model: %s\n", openai_fetch_models());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (!strncmp(line, "!model", 6)) {
|
if (!strncmp(line, "!model", 6)) {
|
||||||
if (!strncmp(line + 6, " ", 1)) {
|
if (!strncmp(line + 6, " ", 1)) {
|
||||||
line = line + 7;
|
line = line + 7;
|
||||||
set_prompt_model(line);
|
set_prompt_model(line);
|
||||||
}
|
}
|
||||||
printf("%s\n", get_prompt_model());
|
printf("Current model: %s\n", get_prompt_model());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!strncmp(line, "exit", 4)) {
|
if (!strncmp(line, "exit", 4)) {
|
||||||
@ -239,7 +247,7 @@ void repl() {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
while (line && *line != '\n') {
|
while (line && *line != '\n') {
|
||||||
line_add_history(line);
|
|
||||||
char *response = openai_chat("user", line);
|
char *response = openai_chat("user", line);
|
||||||
if (response) {
|
if (response) {
|
||||||
render(response);
|
render(response);
|
||||||
@ -253,6 +261,8 @@ void repl() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
free(response);
|
free(response);
|
||||||
|
} else {
|
||||||
|
exit(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
26
openai.h
26
openai.h
@ -31,16 +31,13 @@
|
|||||||
|
|
||||||
#ifndef R_OPENAI_H
|
#ifndef R_OPENAI_H
|
||||||
#define R_OPENAI_H
|
#define R_OPENAI_H
|
||||||
|
|
||||||
#include "chat.h"
|
#include "chat.h"
|
||||||
#include "http_curl.h"
|
#include "http_curl.h"
|
||||||
|
#include "r.h"
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
char *openai_fetch_models() {
|
char *openai_fetch_models() { return curl_get(models_api_url); }
|
||||||
const char *api_url = "https://api.openai.com/v1/models";
|
|
||||||
return curl_get(api_url);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool openai_system(char *message_content) {
|
bool openai_system(char *message_content) {
|
||||||
chat_json("system", message_content);
|
chat_json("system", message_content);
|
||||||
@ -50,13 +47,18 @@ bool openai_system(char *message_content) {
|
|||||||
struct json_object *openai_process_chat_message(const char *api_url,
|
struct json_object *openai_process_chat_message(const char *api_url,
|
||||||
const char *json_data) {
|
const char *json_data) {
|
||||||
char *response = curl_post(api_url, json_data);
|
char *response = curl_post(api_url, json_data);
|
||||||
|
if (!response) {
|
||||||
|
fprintf(stderr, "Failed to get response.\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
struct json_object *parsed_json = json_tokener_parse(response);
|
struct json_object *parsed_json = json_tokener_parse(response);
|
||||||
if (!parsed_json) {
|
if (!parsed_json) {
|
||||||
fprintf(stderr, "Failed to parse JSON.\n%s\n", response);
|
fprintf(stderr, "Failed to parse JSON.\nContent: \"%s\"\n", response);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
struct json_object *error_object;
|
struct json_object *error_object;
|
||||||
if (json_object_object_get_ex(parsed_json, "error", &error_object)) {
|
if (json_object_object_get_ex(parsed_json, "error", &error_object) &&
|
||||||
|
message_array) {
|
||||||
|
|
||||||
char *all_messages = (char *)json_object_to_json_string(message_array);
|
char *all_messages = (char *)json_object_to_json_string(message_array);
|
||||||
|
|
||||||
@ -103,16 +105,16 @@ char *openai_chat(const char *user_role, const char *message_content) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *api_url = "https://api.openai.com/v1/chat/completions";
|
|
||||||
char *json_data = chat_json(user_role, message_content);
|
char *json_data = chat_json(user_role, message_content);
|
||||||
|
|
||||||
struct json_object *message_object =
|
struct json_object *message_object =
|
||||||
openai_process_chat_message(api_url, json_data);
|
openai_process_chat_message(completions_api_url, json_data);
|
||||||
message_add_object(message_object);
|
|
||||||
if (message_object == NULL) {
|
if (message_object == NULL) {
|
||||||
printf("ERROR + NULL IS SUCCESS\n");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message_add_object(message_object);
|
||||||
struct json_object *tool_calls;
|
struct json_object *tool_calls;
|
||||||
json_object_object_get_ex(message_object, "tool_calls", &tool_calls);
|
json_object_object_get_ex(message_object, "tool_calls", &tool_calls);
|
||||||
if (tool_calls) {
|
if (tool_calls) {
|
||||||
@ -126,7 +128,7 @@ char *openai_chat(const char *user_role, const char *message_content) {
|
|||||||
}
|
}
|
||||||
char *tool_calls_result_str = chat_json(NULL, NULL);
|
char *tool_calls_result_str = chat_json(NULL, NULL);
|
||||||
message_object =
|
message_object =
|
||||||
openai_process_chat_message(api_url, tool_calls_result_str);
|
openai_process_chat_message(completions_api_url, tool_calls_result_str);
|
||||||
if (message_object == NULL) {
|
if (message_object == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
17
r.h
17
r.h
@ -7,6 +7,19 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
bool is_verbose = false;
|
bool is_verbose = false;
|
||||||
|
|
||||||
|
#ifndef OLLAMA
|
||||||
|
char *models_api_url = "https://api.openai.com/v1/models";
|
||||||
|
// char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
|
||||||
|
char *completions_api_url = "https://api.openai.com/v1/chat/completions";
|
||||||
|
char *advanced_model = "gpt-4o-mini";
|
||||||
|
char *fast_model = "gpt-3.5-turbo";
|
||||||
|
#else
|
||||||
|
char *models_api_url = "https://ollama.molodetz.nl/v1/models";
|
||||||
|
char *completions_api_url = "https://ollama.molodetz.nl/v1/chat/completions";
|
||||||
|
char *advanced_model = "qwen2.5:3b";
|
||||||
|
char *fast_model = "qwen2.5:0.5b";
|
||||||
|
#endif
|
||||||
|
|
||||||
char *_model = NULL;
|
char *_model = NULL;
|
||||||
|
|
||||||
#define DB_FILE "~/.r.db"
|
#define DB_FILE "~/.r.db"
|
||||||
@ -23,10 +36,10 @@ void set_prompt_model(const char *model) {
|
|||||||
const char *get_prompt_model() {
|
const char *get_prompt_model() {
|
||||||
if (auth_type != AUTH_TYPE_API_KEY) {
|
if (auth_type != AUTH_TYPE_API_KEY) {
|
||||||
if (_model == NULL) {
|
if (_model == NULL) {
|
||||||
_model = strdup("gpt-3.5-turbo");
|
_model = strdup(fast_model);
|
||||||
}
|
}
|
||||||
} else if (_model == NULL) {
|
} else if (_model == NULL) {
|
||||||
_model = strdup("gpt-4o-mini");
|
_model = strdup(advanced_model);
|
||||||
}
|
}
|
||||||
return _model;
|
return _model;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user