diff --git a/.gitignore b/.gitignore
index 1d17dae..f8daee8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,3 @@
 .venv
+prompt.txt
+
diff --git a/client.py b/client.py
index a693e2a..94a1dbf 100644
--- a/client.py
+++ b/client.py
@@ -1,5 +1,3 @@
-# This script requires aiohttp Python library to function.
-
 import asyncio
 import aiohttp
 import json
@@ -7,11 +5,9 @@ import logging
 import argparse
 from urllib.parse import urlparse, urlunparse
 
-# Default values
 DEFAULT_CONCURRENCY = 4
 DEFAULT_OLLAMA_URL = 'https://localhost:11434'
 
-# Configure logging
 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
 async def websocket_client(url: str, ollama_url: str) -> None:
@@ -64,7 +60,7 @@ async def main(concurrency: int, ollama_url: str) -> None:
             await asyncio.gather(*tasks)
         except Exception as e:
             logging.error(f"Connection error: {e}")
-            await asyncio.sleep(1)  # Wait before retrying
+            await asyncio.sleep(1)
 
 def validate_url(url: str) -> bool:
     parsed = urlparse(url)
diff --git a/index.html b/index.html
index 528a9aa..7aff646 100644
--- a/index.html
+++ b/index.html
@@ -4,22 +4,15 @@
     <meta charset="utf-8">
     <meta name="viewport" content="width=device-width, initial-scale=1">
     <meta name="color-scheme" content="light dark">
-    <link
-      rel="stylesheet"
-      href="https://cdn.jsdelivr.net/npm/@picocss/pico@2/css/pico.yellow.min.css"
-    >
+    <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@picocss/pico@2/css/pico.yellow.min.css">
     <title>Ollama Crowd-Funded Server</title>
   </head>
   <body>
     <main class="container">
       <h1>Ollama Crowd-Funded Server</h1>
-      <p>
-        Welcome to the Ollama Crowd-Funded Server. You can use this URL with the official Ollama JavaScript or Python clients to communicate with an Ollama server. The Ollama servers are generously provided by individuals.
-      </p>
-      <h2>Using this Ollama Server</h2>  
-      <p>
-          Simply use the original client! The only difference is the URL.
-      </p>
+      <p>Welcome to the Ollama Crowd-Funded Server. You can use this URL with the official Ollama JavaScript or Python clients to communicate with an Ollama server. The Ollama servers are generously provided by individuals.</p>
+      <h2>Using this Ollama Server</h2>
+      <p>Simply use the original client! The only difference is the URL.</p>
       <code>
         <pre>
 from ollama import Client
@@ -45,13 +38,8 @@ while True:
         </pre>
       </code>
       <h2>Donate Your Resources</h2>
-      <p>
-          You can contribute your resources to the server by using the following script:
-      </p>
-      <code><pre>
-#client.py
-        </pre>
-        </code>
+      <p>You can contribute your resources to the server by using the following script:</p>
+      <code><pre>#client.py</pre></code>
     </main>
   </body>
 </html>
diff --git a/server.py b/server.py
index e9b754e..17f66c5 100644
--- a/server.py
+++ b/server.py
@@ -1,63 +1,34 @@
-# Written by retoor@molodetz.nl
-
-# This code creates a server using asyncio and aiohttp that manages websocket and HTTP connections to forward messages between them.
-
-# Used Imports: asyncio, aiohttp
-
-# The MIT License (MIT)
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-# 
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-# 
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-
 import asyncio
 import aiohttp
 from aiohttp import web
 import uuid
-import pathlib 
+import pathlib
 
 class OllamaServer:
-    def __init__(self,ws,models):
-        self.ws = ws 
+    def __init__(self, ws, models):
+        self.ws = ws
         self.queues = {}
-        self.models = models  
-        print("New OllamaServer created")
-        print(self.model_names)
+        self.models = models
 
     @property
     def model_names(self):
-        return [model['name'] for model in self.models] 
+        return [model['name'] for model in self.models]
 
     async def forward_to_http(self, request_id, message):
-        if not request_id in self.queues:
+        if request_id not in self.queues:
             self.queues[request_id] = asyncio.Queue()
         await self.queues[request_id].put(message)
-    
-    async def forward_to_websocket(self, request_id, message,path):
+
+    async def forward_to_websocket(self, request_id, message, path):
         self.queues[request_id] = asyncio.Queue()
-        await self.ws.send_json(dict(request_id=request_id, data=message,path=path))
+        await self.ws.send_json(dict(request_id=request_id, data=message, path=path))
 
         while True:
             chunk = await self.queues[request_id].get()
-            yield chunk 
+            yield chunk
             if chunk['done']:
                 break
 
-
     async def serve(self):
         async for msg in self.ws:
             if msg.type == web.WSMsgType.TEXT:
@@ -66,7 +37,7 @@ class OllamaServer:
                 await self.forward_to_http(request_id, data['data'])
             elif msg.type == web.WSMsgType.ERROR:
                 break
-   
+
 class ServerManager:
     def __init__(self):
         self.servers = []
@@ -77,26 +48,24 @@ class ServerManager:
     def remove_server(self, server):
         self.servers.remove(server)
 
-    async def forward_to_websocket(self, request_id, message,path):
+    async def forward_to_websocket(self, request_id, message, path):
         try:
             server = self.servers.pop(0)
             self.servers.append(server)
-            async for msg in  server.forward_to_websocket(request_id, message,path):
+            async for msg in server.forward_to_websocket(request_id, message, path):
                 yield msg
         except:
-            raise 
+            raise
 
 server_manager = ServerManager()
 
-
-
 async def websocket_handler(request):
     ws = web.WebSocketResponse()
     await ws.prepare(request)
 
     models = await ws.receive_json()
 
-    server = OllamaServer(ws,models['models'])
+    server = OllamaServer(ws, models['models'])
     server_manager.add_server(server)
 
     async for msg in ws:
@@ -109,7 +78,6 @@ async def websocket_handler(request):
     server_manager.remove_server(server)
     return ws
 
-
 async def http_handler(request):
     request_id = str(uuid.uuid4())
     data = None
@@ -118,11 +86,11 @@ async def http_handler(request):
     except ValueError:
         return web.Response(status=400)
 
-    resp = web.StreamResponse(headers={'Content-Type': 'application/x-ndjson','Transfer-Encoding': 'chunked'})
+    resp = web.StreamResponse(headers={'Content-Type': 'application/x-ndjson', 'Transfer-Encoding': 'chunked'})
     await resp.prepare(request)
-    import json 
-    async for result in server_manager.forward_to_websocket(request_id, data,path=request.path):
-        await resp.write(json.dumps(result).encode() + b'\n')    
+    import json
+    async for result in server_manager.forward_to_websocket(request_id, data, path=request.path):
+        await resp.write(json.dumps(result).encode() + b'\n')
     await resp.write_eof()
     return resp
 
@@ -139,4 +107,4 @@ app.router.add_route('GET', '/publish', websocket_handler)
 app.router.add_route('POST', '/api/chat', http_handler)
 
 if __name__ == '__main__':
-    web.run_app(app, port=8080)
+    web.run_app(app, port=1984)
diff --git a/test.py b/test.py
index 66e4293..92a3a7c 100644
--- a/test.py
+++ b/test.py
@@ -1,6 +1,5 @@
 from ollama import Client
 client = Client(
-  #host="https://ollama.molodetz.nl",
   host='http://localhost:8080',
   headers={'x-some-header': 'some-value'}
 )
@@ -19,13 +18,14 @@ def chat_stream(message):
     if message:
         messages.append({'role': 'user', 'content': message})
     content = ''
-    for response in client.chat(model='qwen2.5-coder:0.5b', messages=messages,stream=True):
+    for response in client.chat(model='qwen2.5-coder:0.5b', messages=messages, stream=True):
         content += response.message.content
-        print(response.message.content,end='',flush=True)
+        print(response.message.content, end='', flush=True)
     messages.append({'role': 'assistant', 'content': content})
     print("")
 
-def chat(message,stream=False):
+
+def chat(message, stream=False):
     if stream:
         return chat_stream(message)
     if message:
@@ -33,7 +33,6 @@ def chat(message,stream=False):
     response = client.chat(model='qwen2.5:3b', messages=messages,
     tools=[times_two])
     if response.message.tool_calls:
-  # There may be multiple tool calls in the response
         for tool in response.message.tool_calls:
             if function_to_call := available_functions.get(tool.function.name):
                 print('Calling function:', tool.function.name)
@@ -43,17 +42,11 @@ def chat(message,stream=False):
             else:
                 print('Function', tool.function.name, 'not found')
 
-# Only needed to chat with the model using the tool call results
             if response.message.tool_calls:
-                # Add the function response to messages for the model to use
                 messages.append(response.message)
                 messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name})
-
-                # Get final response from model with function outputs
                 return chat(None)
     return response.message.content
 
-
 while True:
     chat_stream("A farmer and a sheep are standing on one side of a river. There is a boat with enough room for one human and one animal. How can the farmer get across the river with the sheep in the fewest number of trips?")
-