sage/ollama/api.py
2025-07-16 14:28:41 +08:00

114 lines
1.9 KiB
Python

import asyncio
from appPublic.oauth_client import OAuthClient
desc = {
"data":{
"oops":1
},
"mapis":{
"generate":{
"url":"https://sage.opencomputing.cn/ollama/api/generate",
"method":"POST",
"headers":[
],
"params":[
],
"data":[
{
"name":"model",
"value":"${model}"
},
{
"name":"prompt",
"value":"${prompt}"
},
{
"name":"context",
"value":"${context}"
},
{
"name":"stream",
"value":True
},
],
"resposne_type":"2",
"error_field":"error",
"error_msg_field":"error_description",
"resp_set_data":[
]
},
"chat":{
# "url":"https://sage.opencomputing.cn/ollama/api/chat",
"url":"http://localhost:11434/api/chat",
"method":"POST",
"headers":[
],
"params":[
],
"data":[
{
"name":"model",
"value":"${model}"
},
{
"name":"messages",
"value":"${messages}"
},
{
"name":"stream",
"value":True
},
],
"resposne_type":"2",
"error_field":"error",
"error_msg_field":"error_description",
"resp_set_data":[
]
}
}
}
async def generate(desc):
ollama = OAuthClient(desc)
context = []
while True:
print('prompt:')
p = input()
if p == '':
continue
if p == 'quit':
break
r = await ollama('generate', {
"model":"llama2:13b",
"context":context,
"prompt":p
})
context = r['context']
print('ollama:', r['response'])
async def chat(desc):
ollama = OAuthClient(desc)
msgs = []
while True:
print('prompt:')
p = input()
if p == '':
continue
if p == 'quit':
break
msg = {
"role":"user",
"content":p
}
msgs.append(msg)
r = await ollama('chat', {
"model":"llama2:7b",
"messages":msgs
})
msgs.append(r['message'])
response = r['message']['content']
print('ollama:', response)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(chat(desc))