LCA-LLM/DataAnalysis/report.py

54 lines
1.9 KiB
Python
Raw Normal View History

2024-07-30 10:11:41 +08:00
import os
import re
import gradio as gr
from PIL import Image
from pprint import pprint
from qwen_agent.agents import Assistant
import sys
os.chdir(sys.path[0])
os.environ['TMPDIR'] = "/home/zhangxj/WorkFile/LCA-GPT/LCARAG/DataAnalysis/tmp"
llm_cfg = {
'model': 'qwen1.5-72b-chat',
'model_server': 'dashscope',
'api_key': "sk-c5f441f863f44094b0ddb96c831b5002",
}
system_instruction = '''你是一位专注在生命周期领域做数据分析的助手,在数据分析之后,
如果有可视化要求请使用 `plt.show()` 显示图像,并将图像进行保存
最后请对数据分析结果结合生命周期评价领域知识进行解释'''
tools = ['code_interpreter'] # `code_interpreter` is a built-in tool for executing code.
messages = [] # This stores the chat history.
files = ["/home/zhangxj/WorkFile/LCA-GPT/DataAnalysis/tmp/2021北京.csv","/home/zhangxj/WorkFile/LCA-GPT/DataAnalysis/报告案例1.md"]
user_input = '''首先分析上传的2021北京.csv的碳排放数据并处理分析数据和可视化分析
请按照报告案例1作为模板用你掌握的信息进行填充并且将可视化得到的图像结果插入到报告中并加以分析以markdown格式输出填充数据信息之后的报告'''
messages.append({'role': 'user', 'content': user_input})
bot = Assistant(llm=llm_cfg,
system_message=system_instruction,
function_list=tools,
files=files)
# Get response from bot
response = []
for response in bot.run(messages=messages):
continue
pprint(response)
messages.extend(response)
# Convert bot response to string
res_str = ""
for res in response:
res_str += res['content']
try:
with open("./result.md", "w", encoding="utf-8") as f:
f.write(res_str)
except IOError as e:
print(f"An error occurred: {e}")
print(res_str)