代码日常上传归档

This commit is contained in:
xiazongji 2025-06-13 16:18:58 +08:00
parent 8d891b420e
commit 54c8dbffa1
19 changed files with 6699 additions and 113 deletions

Binary file not shown.

6253
color_summary.csv Normal file

File diff suppressed because it is too large Load Diff

12
config_test.py Normal file
View File

@ -0,0 +1,12 @@
import pickle
# 假设你的 pickle 文件名为 'data.pkl'
file_path = '/home/xiazj/ai-station-code/tmp/sam/7afa76b5-6730-434f-9470-651b4bcb7e8f/model_params.pickle'
# 读取 pickle 文件
with open(file_path, 'rb') as file: # 以二进制模式打开文件
data,config = pickle.load(file) # 反序列化数据
# 输出读取的数据
print(data)
print(config)

View File

@ -18,5 +18,5 @@ def predict(img):
r_image.show() r_image.show()
if __name__ == "__main__": if __name__ == "__main__":
path = "D:\\project\\ai-station\\tmp\\dimaoshibie\\crop_9_14.tif" path = "/home/xiazj/ai-station-code/tmp/dimaoshibie/d94afe94-2fae-4ce5-9ee4-94ac1d699337/dimao2.jpg"
predict(path) predict(path)

View File

@ -22,7 +22,7 @@ class SegFormer_Segmentation(object):
"num_classes": 10 + 1, # 类别数(包括背景) "num_classes": 10 + 1, # 类别数(包括背景)
"phi": "b0", # 模型规模b0-b5 "phi": "b0", # 模型规模b0-b5
"input_shape": [512, 512], # 输入图像尺寸 "input_shape": [512, 512], # 输入图像尺寸
"mix_type": 0, # 可视化方式0-混合原图1-仅分割图2-仅目标区域 "mix_type": 1, # 可视化方式0-混合原图1-仅分割图2-仅目标区域
"cuda": True, # 是否使用GPU "cuda": True, # 是否使用GPU
} }

Binary file not shown.

15
main.py
View File

@ -1,15 +0,0 @@
import sys
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
version = f"{sys.version_info.major}.{sys.version_info.minor}"
app = FastAPI()
# 将 /root/app 目录挂载为静态文件
app.mount("/files", StaticFiles(directory="/root/app"), name="files")
@app.get("/")
async def read_root():
message = f"Hello world! From FastAPI running on Uvicorn with Gunicorn. Using Python {version}"
return {"message": message}

110
run.py
View File

@ -108,7 +108,7 @@ async def read_root():
# 获取数据界面资源信息 # 获取数据界面资源信息
@app.get("/ai-station-api/index/show") @app.get("/ai-station-api/index/show")
async def get_source_index_info(): async def get_source_index_info():
sql = "SELECT id,application_name, describe_data, img_url FROM app_shouye" sql = "SELECT id,application_name, describe_data, img_url,url FROM app_shouye"
data = data_util.fetch_data(sql) data = data_util.fetch_data(sql)
if data is None: if data is None:
return { return {
@ -679,10 +679,13 @@ async def upload_image(file: UploadFile = File(...),type: str = Form(...), ):
file_location = os.path.join(upload_dir , file.filename) file_location = os.path.join(upload_dir , file.filename)
with open(file_location, "wb") as buffer: with open(file_location, "wb") as buffer:
shutil.copyfileobj(file.file, buffer) shutil.copyfileobj(file.file, buffer)
encoded_string = sam_deal.load_tmp_image(file_location)
file_location = model_deal.get_pic_url(file_location) file_location = model_deal.get_pic_url(file_location)
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":{"location": file_location}} "data":{"location": file_location},
"image":JSONResponse(content={"image_data": encoded_string})
}
""" """
@ -698,10 +701,14 @@ async def dmsb_image_analysis(path:str = None):
"data":None} "data":None}
else: else:
path = result['reason'] path = result['reason']
encoded_string = sam_deal.load_tmp_image(path[0])
path= model_deal.get_pic_url(path[0]) # 是一个列表 path= model_deal.get_pic_url(path[0]) # 是一个列表
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":{"result": path}} "data":{"result": path},
"image":JSONResponse(content={"image_data": encoded_string})
}
""" """
@ -710,19 +717,24 @@ async def dmsb_image_analysis(path:str = None):
@app.get("/ai-station-api/dmsb_image/calculate") @app.get("/ai-station-api/dmsb_image/calculate")
async def dmsb_image_calculate(path:str = None, scale:float = 1.92*1.92): async def dmsb_image_calculate(path:str = None, scale:float = 1.92*1.92):
path = model_deal.get_pic_path(path) path = model_deal.get_pic_path(path)
result = model_deal.dimaoshibie_area(path,scale,param.dmsb_colors) # result = model_deal.dimaoshibie_area(path,scale,param.dmsb_colors)
logger.info(result) # logger.info(result)
if result['status'] == True: # if result['status'] == True:
res = result['reason'] # res = result['reason']
translated_dict = {param.dmsb_type[key]: value for key, value in res.items()} # translated_dict = {param.dmsb_type[key]: value for key, value in res.items()}
# json_data = json.dumps(translated_dict) file_directory = os.path.dirname(path)
return {"success":True, output_file_path = os.path.join(file_directory,'result.txt')
"msg":"获取信息成功", # 初始化一个空字典
"data":{"result": translated_dict}} total_piex = {}
else: with open(output_file_path, 'r') as file:
return {"success":False, for line in file:
"msg":result['reason'], # 去掉行首尾的空白字符,并分割键值
"data":None} key, value = line.strip().split(': ')
# 将值转换为整数并存入字典
total_piex[key] = int(value) * scale
return {"success":True,
"msg":"获取信息成功",
"data":{"result": total_piex}}
""" """
@ -760,17 +772,19 @@ async def download_zip(path:str = None):
async def roof_image_analysis(path:str = None): async def roof_image_analysis(path:str = None):
path = model_deal.get_pic_path(path) path = model_deal.get_pic_path(path)
result = model_deal.roof_pic(roof_model,path,param.wdpv_palette) result = model_deal.roof_pic(roof_model,path,param.wdpv_palette)
if result['status'] == False: if result['status'] == False:
return {"success":False, return {"success":False,
"msg":result['reason'], "msg":result['reason'],
"data":None} "data":None}
else: else:
path = result['reason'] path = result['reason']
encoded_string = sam_deal.load_tmp_image(path[0])
path= model_deal.get_pic_url(path[0]) path= model_deal.get_pic_url(path[0])
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":{"result": path}} "data":{"result": path},
"image":JSONResponse(content={"image_data": encoded_string})
}
""" """
@ -812,10 +826,12 @@ async def pv_image_analysis(path:str = None):
"data":None} "data":None}
else: else:
path = result['reason'] path = result['reason']
encoded_string = sam_deal.load_tmp_image(path[0])
path= model_deal.get_pic_url(path[0]) path= model_deal.get_pic_url(path[0])
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":{"result": path}} "data":{"result": path},
"image":JSONResponse(content={"image_data": encoded_string})}
""" """
光伏面积 - 分割结果计算 ,图片demo的是z18 光伏面积 - 分割结果计算 ,图片demo的是z18
@ -856,10 +872,13 @@ async def roofpv_image_analysis(path:str = None):
else: else:
file_list = result['reason'] file_list = result['reason']
final_path = prepare_data.merge_binary(file_list) final_path = prepare_data.merge_binary(file_list)
encoded_string = sam_deal.load_tmp_image(final_path)
final_path = model_deal.get_pic_url(final_path) final_path = model_deal.get_pic_url(final_path)
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":{"result": final_path}} "data":{"result": final_path},
"image":JSONResponse(content={"image_data": encoded_string})
}
""" """
@ -995,7 +1014,8 @@ async def get_ch4_predict(path:str=None,start_time:str=None, end_time:str = None
else: else:
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
"data":data['reason']} "data":data['reason'],
'time':end_time}
#========================================光伏预测========================================================== #========================================光伏预测==========================================================
""" """
@ -1108,6 +1128,7 @@ async def get_pvelectri_predict(path:str=None,start_time:str=None, end_time:str
else: else:
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
'time':end_time,
"data":data['reason']} "data":data['reason']}
@ -1215,6 +1236,7 @@ csv上传
@app.get("/ai-station-api/wind_electric_data/predict") @app.get("/ai-station-api/wind_electric_data/predict")
async def get_wind_electri_predict(path:str=None,start_time:str=None, end_time:str = None,is_show:bool=True): async def get_wind_electri_predict(path:str=None,start_time:str=None, end_time:str = None,is_show:bool=True):
data = model_deal.start_wind_electric_predict_endpoint(windfd_model,path,start_time,end_time,is_show) data = model_deal.start_wind_electric_predict_endpoint(windfd_model,path,start_time,end_time,is_show)
# logger.info(data)
if data['status'] ==False: if data['status'] ==False:
return { return {
"success":False, "success":False,
@ -1224,6 +1246,7 @@ async def get_wind_electri_predict(path:str=None,start_time:str=None, end_time:s
else: else:
return {"success":True, return {"success":True,
"msg":"获取信息成功", "msg":"获取信息成功",
'time':end_time,
"data":data['reason']} "data":data['reason']}
@ -1305,12 +1328,11 @@ async def upload_sam_image(file: UploadFile = File(...),type: str = Form(...), )
要求针对返回current_index,将列表默认成选择current_index对应的分类 要求针对返回current_index,将列表默认成选择current_index对应的分类
""" """
@app.get("/ai-station-api/sam_class/create") @app.post("/ai-station-api/sam_class/create")
async def sam_class_set( async def sam_class_set(item:post_model.samItem):
class_name: str = None, class_name = item.class_name
color: Optional[List[int]] = Query(None, description="list of RGB color"), color= item.color
path: str = None path = item.path
):
loaded_data,api_config = sam_deal.load_model(path) loaded_data,api_config = sam_deal.load_model(path)
result = sam_deal.add_class(loaded_data,class_name,color) result = sam_deal.add_class(loaded_data,class_name,color)
if result['status'] == True: if result['status'] == True:
@ -1335,6 +1357,7 @@ async def sam_class_set(
"data":{"class_name_list": loaded_data['class_names'], "data":{"class_name_list": loaded_data['class_names'],
"current_index": loaded_data['class_index'], "current_index": loaded_data['class_index'],
"class_dict":loaded_data['class_colors'], "class_dict":loaded_data['class_colors'],
"color":color,
}} }}
@ -1344,12 +1367,11 @@ async def sam_class_set(
current_index : 下拉列表中的分类索引 current_index : 下拉列表中的分类索引
rgb_color : rgb_color :
""" """
@app.get("/ai-station-api/sam_color/select") @app.post("/ai-station-api/sam_color/select")
async def set_sam_color( async def set_sam_color(item:post_model.samItem2):
current_index: int = None, current_index = item.current_index
rgb_color: List[int] = Query(None, description="list of RGB color"), rgb_color = item.rgb_color
path: str = None path = item.path
):
loaded_data,api_config = sam_deal.load_model(path) loaded_data,api_config = sam_deal.load_model(path)
r, g, b = [int(c) for c in rgb_color] r, g, b = [int(c) for c in rgb_color]
bgr_color = (b, g, r) bgr_color = (b, g, r)
@ -1362,12 +1384,14 @@ async def set_sam_color(
return { return {
"success":True, "success":True,
"msg":"", "msg":"",
"color":rgb_color,
"image": JSONResponse(content={"image_data": encoded_string}), "image": JSONResponse(content={"image_data": encoded_string}),
} }
else: else:
return { return {
"success":False, "success":False,
"msg":img['reason'], "msg":img['reason'],
"color":rgb_color,
"image":None "image":None
} }
@ -1375,12 +1399,12 @@ async def set_sam_color(
""" """
选择分类 选择分类
""" """
@app.get("/ai-station-api/sam_classs/change") @app.get("/ai-station-api/sam_class/change")
async def on_class_selected(class_index : int = None,path: str = None): async def on_class_selected(class_index : int = None,path: str = None):
# 加载配置内容 # 加载配置内容
loaded_data,api_config = sam_deal.load_model(path) loaded_data,api_config = sam_deal.load_model(path)
result, api_config = sam_deal.set_current_class(loaded_data, api_config, class_index, color=None) result, api_config = sam_deal.set_current_class(loaded_data, api_config, class_index, color=None)
# loaded_data['class_index'] = class_index loaded_data['class_index']=class_index
sam_deal.save_model(loaded_data,api_config,path) sam_deal.save_model(loaded_data,api_config,path)
if result: if result:
img = sam_deal.refresh_image(loaded_data,api_config,path) img = sam_deal.refresh_image(loaded_data,api_config,path)
@ -1388,7 +1412,7 @@ async def on_class_selected(class_index : int = None,path: str = None):
encoded_string = sam_deal.load_tmp_image(img['reason']) encoded_string = sam_deal.load_tmp_image(img['reason'])
return { return {
"success":True, "success":True,
"msg":"", "msg":"更改成功",
"image":JSONResponse(content={"image_data": encoded_string}) "image":JSONResponse(content={"image_data": encoded_string})
} }
else: else:
@ -1411,6 +1435,12 @@ async def on_class_selected(class_index : int = None,path: str = None):
@app.get("/ai-station-api/sam_class/delete") @app.get("/ai-station-api/sam_class/delete")
async def sam_remove_class(path:str=None,select_index:int=None): async def sam_remove_class(path:str=None,select_index:int=None):
loaded_data,api_config = sam_deal.load_model(path) loaded_data,api_config = sam_deal.load_model(path)
if select_index == -1:
return {
"success":False,
"msg":"没有选定分类,请选定当前分类后再删除",
"image":None
}
class_name = loaded_data['class_names'][select_index] class_name = loaded_data['class_names'][select_index]
loaded_data,api_config = sam_deal.remove_class(loaded_data,api_config,class_name) loaded_data,api_config = sam_deal.remove_class(loaded_data,api_config,class_name)
sam_deal.save_model(loaded_data,api_config,path) sam_deal.save_model(loaded_data,api_config,path)
@ -1419,7 +1449,10 @@ async def sam_remove_class(path:str=None,select_index:int=None):
encoded_string = sam_deal.load_tmp_image(img['reason']) encoded_string = sam_deal.load_tmp_image(img['reason'])
return { return {
"success":True, "success":True,
"msg":"", "msg":"删除成功",
"class_name_list": loaded_data['class_names'],
"current_index": loaded_data['class_index'],
"class_dict":loaded_data['class_colors'],
"image":JSONResponse(content={"image_data": encoded_string}) "image":JSONResponse(content={"image_data": encoded_string})
} }
else: else:
@ -1634,6 +1667,7 @@ async def sam_predict_mask(path:str=None):
async def sam_reset_annotation(path:str=None): async def sam_reset_annotation(path:str=None):
loaded_data,api_config = sam_deal.load_model(path) loaded_data,api_config = sam_deal.load_model(path)
loaded_data,api_config = sam_deal.reset_annotation_all(loaded_data,api_config) loaded_data,api_config = sam_deal.reset_annotation_all(loaded_data,api_config)
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path) img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True: if img['status'] == True:
encoded_string = sam_deal.load_tmp_image(img['reason']) encoded_string = sam_deal.load_tmp_image(img['reason'])
@ -1682,8 +1716,6 @@ async def sam_add_to_class(path:str=None,class_index:int=None):
"image":None "image":None
} }
""" """
保存结果 保存结果
""" """

52
test.py Normal file
View File

@ -0,0 +1,52 @@
# import sys
# from fastapi import FastAPI
# from fastapi.staticfiles import StaticFiles
# version = f"{sys.version_info.major}.{sys.version_info.minor}"
# app = FastAPI()
# # 将 /root/app 目录挂载为静态文件
# app.mount("/files", StaticFiles(directory="/root/app"), name="files")
# @app.get("/")
# async def read_root():
# message = f"Hello world! From FastAPI running on Uvicorn with Gunicorn. Using Python {version}"
# return {"message": message}
# from PIL import Image
# import pandas as pd
# # 读取图片
# image_path = '/home/xiazj/ai-station-code/tmp/dimaoshibie/d94afe94-2fae-4ce5-9ee4-94ac1d699337/merge_binary_dimao2.jpg' # 替换成你的图片路径
# image = Image.open(image_path)
# # 获取图片的RGB数据
# pixels = list(image.getdata())
# # 创建一个DataFrame来统计每种颜色的出现次数
# color_counts = pd.Series(pixels).value_counts()
# # 将颜色统计结果转换为DataFrame
# color_summary = color_counts.reset_index()
# color_summary.columns = ['RGB', 'Count']
# # 打印结果
# print(color_summary)
# # 可选将结果保存到CSV文件
# color_summary.to_csv('color_summary.csv', index=False)
import pickle
# 指定文件路径
file_path = '/home/xiazj/ai-station-code/tmp/sam/c768f709-b123-48fb-b98b-addf0bbb8a04/model_params.pickle'
# 读取 pickle 文件
with open(file_path, 'rb') as file:
data,api = pickle.load(file)
# 打印数据
print(data)
print(api)

View File

@ -82,13 +82,42 @@ def dimaoshibie_pic(model,path,count=None, name_classes=None):
output_floder_path = os.path.join(file_directory,'binary') output_floder_path = os.path.join(file_directory,'binary')
# 创建输出文件夹(如果不存在) # 创建输出文件夹(如果不存在)
os.makedirs(output_floder_path, exist_ok=True) os.makedirs(output_floder_path, exist_ok=True)
total_piex = {
"_background_" : 0, # Background (黑色)
"Cropland" : 0, # Cropland (淡黄色)
"Forest": 0, # Forest (深绿色)
"Grass": 0, # Grass (浅绿色)
"Shrub": 0, # Shrub (浅蓝绿色)
"Wetland": 0, # Wetland (浅蓝色)
"Water": 0, # Water (深蓝色)
"Tundra": 0, # Tundra (土黄色)
"Impervious surface": 0, # Impervious surface (红色)
"Bareland": 0, # Bareland (灰色)
"Ice/snow": 0 # Ice/snow (浅天蓝色)
}
for ii, line in enumerate(lines): for ii, line in enumerate(lines):
_imagepath = os.path.join(folder_path, line) _imagepath = os.path.join(folder_path, line)
r_image, count_dict, classes_nums = dimaoshibie_cal(model,_imagepath,count, name_classes) r_image, count_dict, classes_nums = dimaoshibie_cal(model,_imagepath,count, name_classes)
final_path = os.path.join(output_floder_path,line) final_path = os.path.join(output_floder_path,line)
# 保存融合后的图像 # 保存融合后的图像
r_image.save(final_path) # 替换为你想保存的路径 r_image.save(final_path) # 替换为你想保存的路径
for key in count_dict:
if key in total_piex:
total_piex[key] += count_dict[key]
# 将像素分类,存储
# 创建一个新的字典,用于存储中文键
translated_total_piex = {}
for key, value in total_piex.items():
if key in param.dmsb_type:
translated_total_piex[param.dmsb_type[key]] = value
output_file_path = os.path.join(file_directory,'result.txt')
# 写入字典到文本文件
with open(output_file_path, 'w') as file:
for key, value in translated_total_piex.items():
file.write(f"{key}: {value}\n")
# 图片合并 # 图片合并
status = prepare_data.merge_pic_binary(path,['binary']) status = prepare_data.merge_pic_binary(path,['binary'])
@ -115,6 +144,9 @@ def dimaoshibie_pic(model,path,count=None, name_classes=None):
"msg": "临时文件删除失败", "msg": "临时文件删除失败",
"data": None "data": None
} }
# 返回图片地址 # 返回图片地址
return {"status":True, "reason":target_path} return {"status":True, "reason":target_path}
@ -131,10 +163,10 @@ def dimaoshibie_area(path,scale,colors=None):
file_path = os.path.join(file_directory,"merge_binary_" + file_name) file_path = os.path.join(file_directory,"merge_binary_" + file_name)
if not os.path.exists(file_path): if not os.path.exists(file_path):
return {"status": False, "reason": "没有找到对应文件,请先进行图像分割", "result":""} return {"status": False, "reason": "没有找到对应文件,请先进行图像分割", "result":""}
image = Image.open(path) image = Image.open(path)
try: try:
# 如果文件存在,读取图片 # 如果文件存在,读取图片
# logger.info(file_path)
image = Image.open(file_path) image = Image.open(file_path)
image = image.convert('RGB') image = image.convert('RGB')
color_count = defaultdict(int) color_count = defaultdict(int)
@ -144,8 +176,11 @@ def dimaoshibie_area(path,scale,colors=None):
pixel_color = image.getpixel((x, y)) pixel_color = image.getpixel((x, y))
if pixel_color in colors: if pixel_color in colors:
color_count[pixel_color] += 1 color_count[pixel_color] += 1
else:
color_count[pixel_color] += 1
for k,v in color_count.items(): for k,v in color_count.items():
color_count[k] = v * scale color_count[k] = v * scale
# logger.info(color_count)
result = {color: color_count[color] for color in colors} result = {color: color_count[color] for color in colors}
return {"status": True, "reason": result} return {"status": True, "reason": result}
except Exception as e: except Exception as e:
@ -367,6 +402,74 @@ def roofpv_pic(net_roof,net_pv,path,palette):
# 甲烷预测 # 甲烷预测
# def start_predict_endpoint(ch4_model_flow,ch4_model_gas,data_path, start_index, end_index, type, is_show):
# try:
# data = pd.read_csv(data_path)
# data['date_time'] = pd.to_datetime(data['date_time'])
# max_date = data['date_time'].max()
# min_date = data['date_time'].min()
# start_index = pd.to_datetime(start_index)
# end_index = pd.to_datetime(end_index)
# if max_date < end_index :
# return {"reason": "结束日期填写错误,超过上传数据最大日期","status": False}
# if min_date > start_index :
# return {"reason": "开始日期填写错误,小于上传数据最小日期","status": False}
# end_index_dt = pd.to_datetime(end_index)
# end_index_plus_one_hour = end_index_dt + pd.Timedelta(hours=1)
# filtered_data = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
# if is_show:
# if len(filtered_data) < 96:
# return {"reason": "日期填写错误截取步长应该超过96个步长","status": False}
# if max_date < end_index_plus_one_hour:
# return {"reason": "选择显示真实值,需要保留最终后四节点作为展示信息,请调整结束日期","status": False}
# else:
# if len(filtered_data) < 96:
# return {"reason": "上传文件中有效信息长度应大于96","status": False}
# train_data = prepare_data.get_pred_data(data,start_index,end_index)
# del train_data['date_time']
# train_data = np.array(train_data.values)
# train_data = xgb.DMatrix(train_data)
# target = None
# if type == 1: # 流量
# target = "Nm3d-1-ch4"
# result = ch4_model_flow.predict(train_data)
# else: # 气相
# target = "S_gas_ch4"
# result = ch4_model_gas.predict(train_data)
# if is_show:
# history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index_plus_one_hour)]
# cols = ['date_time']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# total_rows = len(history)
# history.reset_index(drop=True, inplace=True)
# history.loc[total_rows - 4:total_rows - 1, 'pred_data'] = result[0]
# else:
# history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
# history.reset_index(drop=True, inplace=True)
# cols = ['date_time']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# last_date = history['date_time'].iloc[-1]
# # 创建新的日期和对应的值
# new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(4)]
# new_data = pd.DataFrame({
# 'date_time': new_dates,
# 'true_data':[0,0,0,0],
# 'pred_data': result[0]
# })
# history = pd.concat([history, new_data], ignore_index=True)
# return {"status": True,"reason":history.to_dict(orient='records')}
# except Exception as e:
# return{"reason": str(e),"status":False}
def start_predict_endpoint(ch4_model_flow,ch4_model_gas,data_path, start_index, end_index, type, is_show): def start_predict_endpoint(ch4_model_flow,ch4_model_gas,data_path, start_index, end_index, type, is_show):
try: try:
data = pd.read_csv(data_path) data = pd.read_csv(data_path)
@ -403,40 +506,43 @@ def start_predict_endpoint(ch4_model_flow,ch4_model_gas,data_path, start_index,
target = "S_gas_ch4" target = "S_gas_ch4"
result = ch4_model_gas.predict(train_data) result = ch4_model_gas.predict(train_data)
if is_show: if is_show:
"""两字典,历史+真实,预测"""
history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index_plus_one_hour)] history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index_plus_one_hour)]
cols = ['date_time'] cols = ['date_time']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True) # history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0 last_date = history['date_time'].iloc[-5]
total_rows = len(history) new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(4)]
history.reset_index(drop=True, inplace=True) new_data = pd.DataFrame({
history.loc[total_rows - 4:total_rows - 1, 'pred_data'] = result[0] 'date_time': new_dates,
target: result[0]
})
else: else:
history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)] history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
history.reset_index(drop=True, inplace=True) history.reset_index(drop=True, inplace=True)
cols = ['date_time'] cols = ['date_time']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0
last_date = history['date_time'].iloc[-1] last_date = history['date_time'].iloc[-1]
# 创建新的日期和对应的值 # 创建新的日期和对应的值
new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(4)] new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(4)]
new_data = pd.DataFrame({ new_data = pd.DataFrame({
'date_time': new_dates, 'date_time': new_dates,
'true_data':[0,0,0,0], target: result[0]
'pred_data': result[0]
}) })
history = pd.concat([history, new_data], ignore_index=True)
return {"status": True,"reason":history.to_dict(orient='records')}
return {"status": True,"reason":[history.to_dict(orient='records'),new_data.to_dict(orient='records')]}
except Exception as e: except Exception as e:
return{"reason": str(e),"status":False} return{"reason": str(e),"status":False}
# 光伏出力预测 - 预测长度96 # 光伏出力预测 - 预测长度96
def start_pvelectric_predict_endpoint(pvfd_model,data_path, start_index, end_index, is_show): def start_pvelectric_predict_endpoint(pvfd_model,data_path, start_index, end_index, is_show):
try: try:
data = pd.read_csv(data_path) data = pd.read_csv(data_path)
@ -471,32 +577,91 @@ def start_pvelectric_predict_endpoint(pvfd_model,data_path, start_index, end_ind
cols = ['date_time'] cols = ['date_time']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True) # history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0 # history['pred_data'] = 0
total_rows = len(history) last_date = history['date_time'].iloc[-97]
history.reset_index(drop=True, inplace=True) new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(96)]
history.loc[total_rows - 96:total_rows - 1, 'pred_data'] = predictions_value new_data = pd.DataFrame({
'date_time': new_dates,
target: predictions_value
})
else: else:
history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)] history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
history.reset_index(drop=True, inplace=True) history.reset_index(drop=True, inplace=True)
cols = ['date_time'] cols = ['date_time']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0
last_date = history['date_time'].iloc[-1] last_date = history['date_time'].iloc[-1]
# 创建新的日期和对应的值 # 创建新的日期和对应的值
new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(96)] new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(96)]
new_data = pd.DataFrame({ new_data = pd.DataFrame({
'date_time': new_dates, 'date_time': new_dates,
'true_data':[0]*96, target: predictions_value
'pred_data': predictions_value
}) })
history = pd.concat([history, new_data], ignore_index=True)
return {"status": True,"reason":history.to_dict(orient='records')} return {"status": True,"reason":[history.to_dict(orient='records'),new_data.to_dict(orient='records')]}
except Exception as e: except Exception as e:
return{"reason": str(e),"status":False} return{"reason": str(e),"status":False}
# def start_pvelectric_predict_endpoint(pvfd_model,data_path, start_index, end_index, is_show):
# try:
# data = pd.read_csv(data_path)
# data['date_time'] = pd.to_datetime(data['date_time'])
# max_date = data['date_time'].max()
# min_date = data['date_time'].min()
# start_index = pd.to_datetime(start_index)
# end_index = pd.to_datetime(end_index)
# if max_date < end_index :
# return {"reason": "结束日期填写错误,超过上传数据最大日期","status": False}
# if min_date > start_index :
# return {"reason": "开始日期填写错误,小于上传数据最小日期","status": False}
# end_index_dt = pd.to_datetime(end_index)
# end_index_plus_one_day = end_index_dt + pd.Timedelta(hours=24)
# filtered_data = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
# if is_show:
# if len(filtered_data) < 192:
# return {"reason": "日期填写错误截取步长应该超过192个步长","status": False}
# if max_date < end_index_plus_one_day:
# return {"reason": "选择显示真实值需要保留最终一天数据96点作为展示信息请调整结束日期","status": False}
# else:
# if len(filtered_data) < 192:
# return {"reason": "上传文件中有效信息长度应大于192","status": False}
# predictions = pvfd_model.run_inference(filtered_data)
# if predictions['status'] == True:
# predictions_value = np.array(predictions['reason']).flatten()
# predictions_value = [max(0, x) for x in predictions_value]
# target = "power"
# if is_show:
# history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index_plus_one_day)]
# cols = ['date_time']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# total_rows = len(history)
# history.reset_index(drop=True, inplace=True)
# history.loc[total_rows - 96:total_rows - 1, 'pred_data'] = predictions_value
# else:
# history = data[(data['date_time'] >= start_index) & (data['date_time'] <= end_index)]
# history.reset_index(drop=True, inplace=True)
# cols = ['date_time']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# last_date = history['date_time'].iloc[-1]
# # 创建新的日期和对应的值
# new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(96)]
# new_data = pd.DataFrame({
# 'date_time': new_dates,
# 'true_data':[0]*96,
# 'pred_data': predictions_value
# })
# history = pd.concat([history, new_data], ignore_index=True)
# return {"status": True,"reason":history.to_dict(orient='records')}
# except Exception as e:
# return{"reason": str(e),"status":False}
@ -535,32 +700,92 @@ def start_wind_electric_predict_endpoint(windfd_model,data_path, start_index, en
cols = ['date'] cols = ['date']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True) # history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0 last_date = history['date'].iloc[-13]
total_rows = len(history) new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(12)]
history.reset_index(drop=True, inplace=True) new_data = pd.DataFrame({
history.loc[total_rows - 12:total_rows - 1, 'pred_data'] = predictions_value 'date': new_dates,
target: predictions_value
})
else: else:
history = data[(data['date'] >= start_index) & (data['date'] <= end_index)] history = data[(data['date'] >= start_index) & (data['date'] <= end_index)]
history.reset_index(drop=True, inplace=True) history.reset_index(drop=True, inplace=True)
cols = ['date'] cols = ['date']
cols.append(target) cols.append(target)
history = history[cols] history = history[cols]
history.rename(columns={target: 'true_data'}, inplace=True)
history['pred_data'] = 0
last_date = history['date'].iloc[-1] last_date = history['date'].iloc[-1]
# 创建新的日期和对应的值 # 创建新的日期和对应的值
new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(12)] new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(12)]
new_data = pd.DataFrame({ new_data = pd.DataFrame({
'date': new_dates, 'date': new_dates,
'true_data':[0]*12, target: predictions_value
'pred_data': predictions_value
}) })
history = pd.concat([history, new_data], ignore_index=True) history.rename(columns={target: 'power'}, inplace=True)
new_data.rename(columns={target: 'power'}, inplace=True)
return {"status": True,"reason":history.to_dict(orient='records')} return {"status": True,"reason":[history.to_dict(orient='records'),new_data.to_dict(orient='records')]}
# except Exception as e: # except Exception as e:
# return{"reason": str(e),"status":False} # return{"reason": str(e),"status":False}
# def start_wind_electric_predict_endpoint(windfd_model,data_path, start_index, end_index, is_show):
# # try:
# data = pd.read_csv(data_path)
# data['date'] = pd.to_datetime(data['date'])
# max_date = data['date'].max()
# min_date = data['date'].min()
# start_index = pd.to_datetime(start_index)
# end_index = pd.to_datetime(end_index)
# if max_date < end_index :
# return {"reason": "结束日期填写错误,超过上传数据最大日期","status": False}
# if min_date > start_index :
# return {"reason": "开始日期填写错误,小于上传数据最小日期","status": False}
# end_index_dt = pd.to_datetime(end_index)
# end_index_plus_one_day = end_index_dt + pd.Timedelta(hours=3)
# filtered_data = data[(data['date'] >= start_index) & (data['date'] <= end_index)]
# if is_show:
# if len(filtered_data) < 192:
# return {"reason": "日期填写错误截取步长应该超过192个步长","status": False}
# if max_date < end_index_plus_one_day:
# return {"reason": "选择显示真实值需要保留最终一天数据12个点作为展示信息请调整结束日期","status": False}
# else:
# if len(filtered_data) < 192:
# return {"reason": "上传文件中有效信息长度应大于192","status": False}
# predictions = windfd_model.run_inference(filtered_data)
# if predictions['status'] == True:
# predictions_value = np.array(predictions['reason']).flatten()
# predictions_value = [max(0, x) for x in predictions_value]
# target = "Power"
# if is_show:
# history = data[(data['date'] >= start_index) & (data['date'] <= end_index_plus_one_day)]
# cols = ['date']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# total_rows = len(history)
# history.reset_index(drop=True, inplace=True)
# history.loc[total_rows - 12:total_rows - 1, 'pred_data'] = predictions_value
# else:
# history = data[(data['date'] >= start_index) & (data['date'] <= end_index)]
# history.reset_index(drop=True, inplace=True)
# cols = ['date']
# cols.append(target)
# history = history[cols]
# history.rename(columns={target: 'true_data'}, inplace=True)
# history['pred_data'] = 0
# last_date = history['date'].iloc[-1]
# # 创建新的日期和对应的值
# new_dates = [last_date + pd.Timedelta(minutes=15 * (i + 1)) for i in range(12)]
# new_data = pd.DataFrame({
# 'date': new_dates,
# 'true_data':[0]*12,
# 'pred_data': predictions_value
# })
# history = pd.concat([history, new_data], ignore_index=True)
# return {"status": True,"reason":history.to_dict(orient='records')}
# # except Exception as e:
# # return{"reason": str(e),"status":False}
def pred_single_tar(test_content): def pred_single_tar(test_content):

View File

@ -46,21 +46,34 @@ class ModelParams():
(211, 242, 255) # Ice/snow (浅天蓝色) (211, 242, 255) # Ice/snow (浅天蓝色)
] ]
dmsb_type = { dmsb_type = {
(0, 0, 0) : "背景", # Background (黑色) "_background_" : "背景", # Background (黑色)
(252, 250, 205) : "农田", # Cropland (淡黄色) "Cropland" : "农田", # Cropland (淡黄色)
(0, 123, 79): "森林", # Forest (深绿色) "Forest": "森林", # Forest (深绿色)
(157, 221, 106): "草地", # Grass (浅绿色) "Grass": "草地", # Grass (浅绿色)
(77, 208, 159): "灌木", # Shrub (浅蓝绿色) "Shrub": "灌木", # Shrub (浅蓝绿色)
(111, 208, 242): "湿地", # Wetland (浅蓝色) "Wetland": "湿地", # Wetland (浅蓝色)
(10, 78, 151): "水体", # Water (深蓝色) "Water": "水体", # Water (深蓝色)
(92, 106, 55): "苔原", # Tundra (土黄色) "Tundra": "苔原", # Tundra (土黄色)
(155, 36, 22): "建筑", # Impervious surface (红色) "Impervious surface": "建筑", # Impervious surface (红色)
(205, 205, 205): "裸地", # Bareland (灰色) "Bareland": "裸地", # Bareland (灰色)
(211, 242, 255): "冰雪" # Ice/snow (浅天蓝色) "Ice/snow": "冰雪" # Ice/snow (浅天蓝色)
} }
# dmsb_type = {
# (0, 0, 0) : "背景", # Background (黑色)
# (252, 250, 205) : "农田", # Cropland (淡黄色)
# (0, 123, 79): "森林", # Forest (深绿色)
# (157, 221, 106): "草地", # Grass (浅绿色)
# (77, 208, 159): "灌木", # Shrub (浅蓝绿色)
# (111, 208, 242): "湿地", # Wetland (浅蓝色)
# (10, 78, 151): "水体", # Water (深蓝色)
# (92, 106, 55): "苔原", # Tundra (土黄色)
# (155, 36, 22): "建筑", # Impervious surface (红色)
# (205, 205, 205): "裸地", # Bareland (灰色)
# (211, 242, 255): "冰雪" # Ice/snow (浅天蓝色)
# }
# 前缀 wdpv 屋顶光伏 # 前缀 wdpv 屋顶光伏
wdpv_palette = [0, 0, 0, 255, 0, 0, 0, 255, 0] wdpv_palette = [0, 0, 0, 255, 0, 0, 0, 255, 0]

View File

@ -1,6 +1,6 @@
from pydantic import BaseModel from pydantic import BaseModel
from typing import Optional from typing import Optional
from typing import List
class Zongbiaomianji(BaseModel): class Zongbiaomianji(BaseModel):
A: float A: float
@ -63,3 +63,15 @@ class FormData(BaseModel):
Rt_min: Optional[float] = None Rt_min: Optional[float] = None
Rt_max: Optional[float] = None Rt_max: Optional[float] = None
Rt_step: Optional[float] = None Rt_step: Optional[float] = None
class samItem(BaseModel):
class_name: str
color: Optional[List[int]] = None
path: str
class samItem2(BaseModel):
current_index:int
rgb_color: List[int]
path: str

View File

@ -57,9 +57,11 @@ def reset_annotation(loaded_data):
def remove_class(data,api,class_name): def remove_class(data,api,class_name):
if class_name in api['class_annotations']: if class_name in api['class_annotations']:
del api['class_annotations'][class_name] del api['class_annotations'][class_name]
del api['class_colors'][class_name]
if class_name == data['class_names'][data['class_index']]: if class_name == data['class_names'][data['class_index']]:
data['class_index'] = 0 # data['class_index'] = 0
data['class_index'] = -1
api['current_class'] = None
data['class_names'].remove(class_name) data['class_names'].remove(class_name)
if class_name in data['class_colors']: if class_name in data['class_colors']:
del data['class_colors'][class_name] del data['class_colors'][class_name]
@ -236,7 +238,6 @@ def add_to_class(api,class_name):
def get_image_display(data,api): def get_image_display(data,api):
if data['image'] is None: if data['image'] is None:
logger.info("get_display_image: No image loaded") logger.info("get_display_image: No image loaded")
return {"status":False, "reason":"获取图像:没有图像加载"} return {"status":False, "reason":"获取图像:没有图像加载"}
@ -245,10 +246,11 @@ def get_image_display(data,api):
for point, label in zip(data['input_point'], data['input_label']): for point, label in zip(data['input_point'], data['input_label']):
color = (0, 255, 0) if label == 1 else (0, 0, 255) color = (0, 255, 0) if label == 1 else (0, 0, 255)
cv2.circle(display_image, tuple(point), 5, color, -1) cv2.circle(display_image, tuple(point), 5, color, -1)
if data['selected_mask'] is not None: if data['class_index'] != -1:
class_name = data['class_names'][data['class_index']] if data['selected_mask'] is not None:
color = data['class_colors'].get(class_name, (0, 0, 128)) class_name = data['class_names'][data['class_index']]
display_image = sam_apply_mask(display_image, data['selected_mask'], color) color = data['class_colors'].get(class_name, (0, 0, 128))
display_image = sam_apply_mask(display_image, data['selected_mask'], color)
logger.info(f"get_display_image: Returning image with shape {display_image.shape}") logger.info(f"get_display_image: Returning image with shape {display_image.shape}")
img = display_image img = display_image
if not isinstance(img, np.ndarray) or img.size == 0: if not isinstance(img, np.ndarray) or img.size == 0: