107 lines
3.8 KiB
Python
107 lines
3.8 KiB
Python
|
import os
|
|||
|
import re
|
|||
|
import gradio as gr
|
|||
|
from PIL import Image
|
|||
|
from pprint import pprint
|
|||
|
from qwen_agent.agents import Assistant
|
|||
|
|
|||
|
'''
|
|||
|
数据分析助手问答
|
|||
|
不知道为什么在服务器上跑就出现
|
|||
|
PermissionError: [Errno 13] Permission denied: '/tmp/gradio/872ec5dfa2067f6f2cafe865d734a9e4ab00234b'
|
|||
|
但是在自己机器上跑就没有问题。。。应该是权限问题吧 i think
|
|||
|
'''
|
|||
|
# 重置了临时路径 也还是不行。
|
|||
|
os.environ['TMPDIR'] = "/home/zhangxj/WorkFile/LCA-GPT/LCARAG/DataAnalysis/tmp"
|
|||
|
|
|||
|
llm_cfg = {
|
|||
|
'model': 'qwen1.5-72b-chat',
|
|||
|
'model_server': 'dashscope',
|
|||
|
'api_key': "sk-c5f441f863f44094b0ddb96c831b5002",
|
|||
|
}
|
|||
|
|
|||
|
system_instruction = '''你是一位专注在生命周期领域做数据分析的助手,在数据分析之后,
|
|||
|
如果有可视化要求,请使用 `plt.show()` 显示图像,并将图像进行保存。
|
|||
|
最后,请对数据分析结果结合生命周期评价领域知识进行解释。'''
|
|||
|
|
|||
|
tools = ['code_interpreter'] # `code_interpreter` is a built-in tool for executing code.
|
|||
|
messages = [] # This stores the chat history.
|
|||
|
|
|||
|
# Function to extract image path from response
|
|||
|
def getImage(response):
|
|||
|
pattern = r'workspace.*?\.png'
|
|||
|
path = None
|
|||
|
for res in response:
|
|||
|
content = str(res['content'])
|
|||
|
matches = re.findall(pattern, content)
|
|||
|
# print("*********",res['content'])
|
|||
|
if len(matches):
|
|||
|
path = matches[0]
|
|||
|
print("###### path #####,", path)
|
|||
|
return path
|
|||
|
|
|||
|
# Function to handle user input and generate chatbot response
|
|||
|
def chatbot_interface(user_input, uploaded_file_path):
|
|||
|
bot = Assistant(llm=llm_cfg,
|
|||
|
system_message=system_instruction,
|
|||
|
function_list=tools,
|
|||
|
files=[uploaded_file_path])
|
|||
|
|
|||
|
# Add user input to messages
|
|||
|
messages.append({'role': 'user', 'content': user_input})
|
|||
|
# Get response from bot
|
|||
|
response = []
|
|||
|
for response in bot.run(messages=messages):
|
|||
|
continue
|
|||
|
|
|||
|
pprint(response)
|
|||
|
messages.extend(response)
|
|||
|
# Convert bot response to string
|
|||
|
res_str = ""
|
|||
|
for res in response:
|
|||
|
res_str += res['content']
|
|||
|
# Get image path from response
|
|||
|
tmp_path = getImage(response)
|
|||
|
image_path = None
|
|||
|
if tmp_path:
|
|||
|
image_path = os.path.join("/home/zhangxj/WorkFile/LCA-GPT/LCARAG/DataAnalysis/tmp", tmp_path)
|
|||
|
print("image path", image_path)
|
|||
|
# Check if image path exists and open image
|
|||
|
if image_path and os.path.exists(image_path):
|
|||
|
image = Image.open(image_path)
|
|||
|
return res_str, image
|
|||
|
else:
|
|||
|
return res_str, None
|
|||
|
|
|||
|
# Function to handle file upload
|
|||
|
def upload_file(file):
|
|||
|
return f"文件上传成功{file}"
|
|||
|
|
|||
|
# Creating the Gradio interface
|
|||
|
with gr.Blocks() as demo:
|
|||
|
with gr.Column():
|
|||
|
with gr.Row():
|
|||
|
chatbot_input = gr.Textbox(label="LCA-Data-Assistant", placeholder="Enter your message here...")
|
|||
|
with gr.Row():
|
|||
|
with gr.Column():
|
|||
|
chatbot_output_markdown = gr.Markdown()
|
|||
|
chatbot_output_image = gr.Image(type="pil")
|
|||
|
with gr.Row():
|
|||
|
file_input = gr.File(label="Upload a file")
|
|||
|
file_input.GRADIO_CACHE = "/home/zhangxj/WorkFile/LCA-GPT/LCARAG/DataAnalysis/tmp"
|
|||
|
|
|||
|
# Handle user input and file upload events
|
|||
|
def update_output(user_input, uploaded_file_path):
|
|||
|
response_str, image = chatbot_interface(user_input, uploaded_file_path)
|
|||
|
markdown_output = response_str
|
|||
|
image_output = image if image else None
|
|||
|
return markdown_output, image_output
|
|||
|
|
|||
|
chatbot_input.submit(fn=update_output, inputs=[chatbot_input, file_input],
|
|||
|
outputs=[chatbot_output_markdown, chatbot_output_image])
|
|||
|
file_input.change(fn=upload_file, inputs=file_input, outputs=[])
|
|||
|
|
|||
|
# Launch the Gradio app
|
|||
|
demo.launch()
|
|||
|
|