This commit is contained in:
retoor 2024-11-28 03:41:34 +01:00
parent 5ee4b3b720
commit d5aa534f5a
15 changed files with 2 additions and 266 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
.venv
__*
.pypirc

Binary file not shown.

Binary file not shown.

View File

@ -1,42 +0,0 @@
Metadata-Version: 2.1
Name: yura
Version: 14.3.7
Summary: Yura async AI client
Author: retoor
Author-email: retoor@retoor.io
License: MIT
Requires-Python: >=3.7
Description-Content-Type: text/markdown
Requires-Dist: websockets
# Yura LLM Client for Katya server
Part of project with as target replacing the native ollama protocol. This protocol supports streaming and is usable trough https and it is possible to directly attach a web client to the backend.
## Install
```bash
pip install -e .
```
## Build
```bash
make build
```
## Command line usage
```bash
yura ws://[host]:[port]/[path]/
```
## Python
```python
import asyncio
from yura.client import AsyncClient
async def communicate():
client = AsyncClient("ws://[host]:[port]/[path]/")
async for response in client.chat("Your prompt"):
print(response)
asyncio.run(communicate())
```

View File

@ -1,31 +0,0 @@
# Yura LLM Client for Katya server
Part of project with as target replacing the native ollama protocol. This protocol supports streaming and is usable trough https and it is possible to directly attach a web client to the backend.
## Install
```bash
pip install -e .
```
## Build
```bash
make build
```
## Command line usage
```bash
yura ws://[host]:[port]/[path]/
```
## Python
```python
import asyncio
from yura.client import AsyncClient
async def communicate():
client = AsyncClient("ws://[host]:[port]/[path]/")
async for response in client.chat("Your prompt"):
print(response)
asyncio.run(communicate())
```

View File

@ -1,3 +0,0 @@
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"

View File

@ -1,25 +0,0 @@
[metadata]
name = yura
version = 14.3.7
description = Yura async AI client
author = retoor
author_email = retoor@retoor.io
license = MIT
long_description = file: README.md
long_description_content_type = text/markdown
[options]
packages = find:
package_dir =
= src
python_requires = >=3.7
install_requires =
websockets
[options.packages.find]
where = src
[egg_info]
tag_build =
tag_date = 0

View File

@ -1,42 +0,0 @@
Metadata-Version: 2.1
Name: yura
Version: 14.3.7
Summary: Yura async AI client
Author: retoor
Author-email: retoor@retoor.io
License: MIT
Requires-Python: >=3.7
Description-Content-Type: text/markdown
Requires-Dist: websockets
# Yura LLM Client for Katya server
Part of project with as target replacing the native ollama protocol. This protocol supports streaming and is usable trough https and it is possible to directly attach a web client to the backend.
## Install
```bash
pip install -e .
```
## Build
```bash
make build
```
## Command line usage
```bash
yura ws://[host]:[port]/[path]/
```
## Python
```python
import asyncio
from yura.client import AsyncClient
async def communicate():
client = AsyncClient("ws://[host]:[port]/[path]/")
async for response in client.chat("Your prompt"):
print(response)
asyncio.run(communicate())
```

View File

@ -1,11 +0,0 @@
README.md
pyproject.toml
setup.cfg
src/yura/__init__.py
src/yura/__main__.py
src/yura/client.py
src/yura.egg-info/PKG-INFO
src/yura.egg-info/SOURCES.txt
src/yura.egg-info/dependency_links.txt
src/yura.egg-info/requires.txt
src/yura.egg-info/top_level.txt

View File

@ -1 +0,0 @@

View File

@ -1 +0,0 @@
websockets

View File

@ -1 +0,0 @@
yura

View File

@ -1,106 +0,0 @@
import asyncio
import websockets
import json
import sys
class AsyncClient:
def __init__(self, url="ws://127.0.0.1:8470"):
self.url = url
self.ws = None
self.queue_in = asyncio.Queue()
self.queue_out = asyncio.Queue()
self.communication_task = None
async def ensure_connection():
if not self.ws:
self.ws = await websockets.connect(self.url)
return self.ws
async def ensure_communication(self):
if not self.communication_task:
self.communication_task = asyncio.create_task(self.communicate())
return self.communication_task
async def chat(self, message):
await self.ensure_communication()
await self.queue_out.put(message)
while True:
while True:
try:
response = await asyncio.wait_for(self.queue_in.get(), 0.1)
except asyncio.TimeoutError:
continue
break
yield response
if response["done"]:
break
async def communicate(self):
loop = asyncio.get_event_loop()
async with websockets.connect(self.url) as websocket:
while True:
message_content = None
while not message_content:
try:
message_content = await asyncio.wait_for(
self.queue_out.get(), 0.1
)
except asyncio.TimeoutError:
continue
response = await websocket.send(json.dumps(message_content))
while True:
response = json.loads(await websocket.recv())
if response["done"]:
break
await self.queue_in.put(response)
await self.queue_in.put(response)
async def cli_client(url="ws://127.0.0.1:8470"):
loop = asyncio.get_event_loop()
async_client = AsyncClient(url)
while True:
sys.stdout.write("> ")
sys.stdout.flush()
message_content = await loop.run_in_executor(None, sys.stdin.readline)
async for response in async_client.chat(message_content):
print(response["content"], end="", flush=True)
if response["done"]:
break
print("")
def main():
url = "ws://127.0.0.1:8470"
try:
url = sys.argv[1]
except IndexError:
pass
asyncio.run(cli_client(url))
if __name__ == "__main__":
main()

View File

@ -1,8 +1,6 @@
README.md
pyproject.toml
setup.cfg
src/yura/__init__.py
src/yura/__main__.py
src/yura/cli.py
src/yura/client.py
src/yura.egg-info/PKG-INFO

View File

@ -1 +1 @@
yura