diff --git a/browse.h b/browse.h
index 51c9a96..377fbd6 100644
--- a/browse.h
+++ b/browse.h
@@ -31,6 +31,28 @@ char * web_search_news(char * q){
     json_object_put(json_ret);
     return ret;
 }
+
+char * web_search(char * q){
+    char * news = (char *)malloc(4096);
+    news[0] = 0;
+    char * q_encoded = url_encode(q);
+    sprintf(news, "https://search.molodetz.nl/search?q=%s&format=json",q_encoded);
+    free(q_encoded);
+    char * ret = curl_get(news);
+    free(news);
+    json_object * json_ret = json_tokener_parse(ret);
+
+    json_object * json_results = json_object_object_get(json_ret, "results");
+    json_object * json_result = json_object_array_get_idx(json_results, 0);
+    if(!json_result){
+        json_object_put(json_ret);
+        free(ret);
+        return web_search_news(q);        
+    }
+    json_object_put(json_ret);
+    return ret;
+}
+
 char * web_search_engine(char * q){
     char * searx = malloc(4096);
     searx[0] = 0;
diff --git a/chat.h b/chat.h
index 2bea3af..ef9b300 100644
--- a/chat.h
+++ b/chat.h
@@ -34,18 +34,7 @@
 #include "auth.h"
 #include <json-c/json.h>
 #include "messages.h"
-
-const char * get_prompt_model() { 
-    if(auth_type != AUTH_TYPE_API_KEY) {
-        return "gpt-3.5-turbo";
-    } else {
-        return "gpt-4o-mini";
-    }
-}
-
-static int prompt_max_tokens = 10000;
-static double prompt_temperature = 0.1;
-
+#include "r.h"
 static json_object *_prompt = NULL;
 
 void chat_free() {
diff --git a/main.c b/main.c
index a1e3de7..20d826e 100644
--- a/main.c
+++ b/main.c
@@ -28,6 +28,8 @@
 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 // THE SOFTWARE.
 
+
+#include "r.h"
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -44,7 +46,7 @@
 #include <string.h>
 #include <unistd.h>
 #include "utils.h"
-#include "r.h"
+
 #include "db_utils.h"
 
 volatile sig_atomic_t sigint_count = 0;
@@ -209,11 +211,18 @@ void repl() {
         if (!line || !*line)
             continue;
      //   previous_line = line;
-        if(!strncmp(line,"dump",4)){
+        if(!strncmp(line,"!dump",5)){
             printf("%s\n",message_json());  
             continue;
-        } 
-        if (!strncmp(line, "model", 5)) {
+        }
+        if(!strncmp(line,"!context",7)){
+            
+        }
+        if (!strncmp(line, "!model", 6)) {
+            if(!strncmp(line+6," ",1)){
+                line = line+7;
+                set_prompt_model(line);
+            }
             printf("%s\n",get_prompt_model());
             continue;
         }
@@ -224,6 +233,10 @@ void repl() {
             help();
             continue;
         }
+        if(!strncmp(line, "!debug", 6)){
+            r_malloc_stats();
+            continue;
+        }
         while(line && *line != '\n'){
             line_add_history(line);
             char *response = openai_chat("user", line);
diff --git a/r.h b/r.h
index 93cc28c..1087613 100644
--- a/r.h
+++ b/r.h
@@ -1,8 +1,35 @@
 #ifndef R_H
 #define R_H
+#include "malloc.h"
 #include <stdbool.h>
+#include <string.h>
 
+#include "auth.h"
 bool is_verbose = false;
 
+char * _model = NULL;
+
+
+void set_prompt_model(const char *model) {
+    if(_model != NULL) {
+        free(_model);
+    }
+    _model = strdup(model);
+}
+
+const char * get_prompt_model() { 
+    if(auth_type != AUTH_TYPE_API_KEY) {
+        if(_model == NULL) {
+            _model = strdup("gpt-3.5-turbo");
+        }
+    } else if(_model == NULL) {
+        _model = strdup("gpt-4o-mini");
+    }
+    return _model;
+}
+
+static int prompt_max_tokens = 10000;
+static double prompt_temperature = 0.1;
+
 
 #endif 
diff --git a/rpylib.so b/rpylib.so
index 561f764..e28ef01 100755
Binary files a/rpylib.so and b/rpylib.so differ