ai-station-code/fast_api_run.py

1383 lines
48 KiB
Python
Raw Permalink Normal View History

2025-05-06 11:18:48 +08:00
import sys
import os
from pydantic import BaseModel
from typing import List
from fastapi import FastAPI, HTTPException
import asyncio
import pandas as pd
import numpy as np
from PIL import Image
# 获取当前脚本所在目录
print("Current working directory:", os.getcwd())
current_dir = os.path.dirname(os.path.abspath(__file__))
# 添加环境变量路径
sys.path.append(os.path.join(current_dir))
print("Current sys.path:", sys.path)
import torch
from dimaoshibie import segformer
from wudingpv.taihuyuan_roof.manet.model.resunet import resUnetpamcarb as roof_resUnetpamcarb
from wudingpv.predictandeval_util import segmentation
from guangfufadian import model_base as guangfufadian_model_base
from fenglifadian import model_base as fenglifadian_model_base
2025-06-04 17:04:02 +08:00
from work_util import prepare_data,model_deal,params,data_util,post_model,sam_deal
2025-05-06 11:18:48 +08:00
from work_util.logger import logger
import joblib
2025-06-04 17:04:02 +08:00
import pickle
from segment_anything_model import sam_annotator
import cv2
import io
import json
import base64
from segment_anything_model.sam_config import sam_config,sam_api_config
from segment_anything_model.segment_anything import sam_model_registry, SamPredictor
import traceback
# def get_roof_model():
# model_roof = roof_resUnetpamcarb()
# model_path_roof = os.path.join(current_dir,'wudingpv/models/roof_best.pth')
# model_dict_roof = torch.load(model_path_roof, map_location=torch.device('cpu'))
# model_roof.load_state_dict(model_dict_roof['net'])
# logger.info("屋顶识别权重加载成功")
# model_roof.eval()
# model_roof.cuda()
# return model_roof
# def get_pv_model():
# model_roof = roof_resUnetpamcarb()
# model_path_roof = os.path.join(current_dir,'wudingpv/models/pv_best.pth')
# model_dict_roof = torch.load(model_path_roof, map_location=torch.device('cpu'))
# model_roof.load_state_dict(model_dict_roof['net'])
# logger.info("屋顶识别权重加载成功")
# model_roof.eval()
# model_roof.cuda()
# return model_roof
# # 初始化 FastAPI
# app = FastAPI()
# # 初始化参数
# param = params.ModelParams()
# pvfd_param = guangfufadian_model_base.guangfufadian_Args()
# windfd_args = fenglifadian_model_base.fenglifadian_Args()
# # 模型实例
# dimaoshibie_SegFormer = segformer.SegFormer_Segmentation()
# roof_model = get_roof_model()
# pv_model = get_pv_model()
# pvfd_model_path = os.path.join(pvfd_param.checkpoints,'Crossformer_station08_il192_ol96_sl6_win2_fa10_dm256_nh4_el3_itr0/checkpoint.pth') # 修改为实际模型路径
# pvfd_model = guangfufadian_model_base.ModelInference(pvfd_model_path, pvfd_param)
# windfd_model_path = os.path.join(windfd_args.checkpoints,'Crossformer_Wind_farm_il192_ol12_sl6_win2_fa10_dm256_nh4_el3_itr0/checkpoint.pth') # 修改为实际模型路径
# windfd_model = fenglifadian_model_base.ModelInference(windfd_model_path, windfd_args)
# ch4_model_flow = joblib.load(os.path.join(current_dir,'jiawanyuce/liuliang_model/xgb_model_liuliang.pkl'))
# ch4_model_gas = joblib.load(os.path.join(current_dir,'jiawanyuce/qixiangnongdu_model/xgb_model_qixiangnongdu.pkl'))
# ==================================SAM=================================================
# 前端需要进行图片对画布压缩和解压缩的point来回确定
location = "http://124.16.151.196:13432/files/tmp/sam/dc345a3c4-a75a-4121-91fc-e4b9f488384f/input/微信图片_20250506163349.jpg",
input_dir = "/home/xiazj/ai-station-code/tmp/sam/c345a3c4-a75a-4121-91fc-e4b9f488384f/input",
output_dir= "/home/xiazj/ai-station-code/tmp/sam/c345a3c4-a75a-4121-91fc-e4b9f488384f/output",
file_path= "/home/xiazj/ai-station-code/tmp/sam/282dc905-6d9d-40aa-8ce1-62160f5f9864"
# 模型加载
checkpoint_path = os.path.join(current_dir,'segment_anything_model/weights/vit_b.pth')
sam = sam_model_registry["vit_b"](checkpoint=checkpoint_path)
device = "cuda" if cv2.cuda.getCudaEnabledDeviceCount() > 0 else "cpu"
_ = sam.to(device=device)
sam_predictor = SamPredictor(sam)
print(f"SAM模型已加载使用设备: {device}")
# 添加分类
def sam_class_set(class_name, color,path):
# 加载配置内容
loaded_data,api_config = sam_deal.load_model(path)
result = sam_deal.add_class(loaded_data,class_name,color)
if result['status'] == True:
loaded_data = result['reason']
else:
return {
"success":False,
"msg":result['reason'],
"data":None
}
loaded_data['class_index'] = loaded_data['class_names'].index(class_name)
r, g, b = [int(c) for c in color]
bgr_color = (b, g, r)
result, api_config = sam_deal.set_current_class(loaded_data, api_config, loaded_data['class_index'], color=bgr_color)
# 更新配置内容
sam_deal.save_model(loaded_data,api_config,path)
return {"success":True,
"msg":f"已添加类别: {class_name}, 颜色: {color}",
"data":{"class_name_list": loaded_data['class_names'],
"current_index": loaded_data['class_index'],
"class_dict":loaded_data['class_colors']
}}
# class_name = 'water'
# color = [0,255,0]
# path = file_path
# sam_class_set(class_name=class_name, color=color, path=path)
# 选择分类 - 传递标签索引
def on_class_selected(class_index,path):
# 加载配置内容
loaded_data,api_config = sam_deal.load_model(path)
result, api_config = sam_deal.set_current_class(loaded_data, api_config, class_index, color=None)
sam_deal.save_model(loaded_data,api_config,path)
if result:
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
else:
return {
"success":False,
"msg":"分类标签识别错误",
"data":None
}
# on_class_selected(2,file_path)
# 选择颜色
def set_sam_color(current_index, rgb_color,path):
loaded_data,api_config = sam_deal.load_model(path)
r, g, b = [int(c) for c in rgb_color]
bgr_color = (b, g, r)
data, api = sam_deal.set_class_color(loaded_data, api_config, current_index, bgr_color)
sam_deal.save_model(data,api,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
# set_sam_color(2,(0,255,0),file_path)
# 移除分类
def sam_remove_class(path,select_index):
loaded_data,api_config = sam_deal.load_model(path)
class_name = loaded_data['class_names'][select_index]
loaded_data,api_config = sam_deal.remove_class(loaded_data,api_config,class_name)
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
# sam_remove_class(file_path,1)
# 加点
def left_mouse_down(x,y,path):
loaded_data,api_config = sam_deal.load_model(path)
if not api_config['current_class']:
return {
"success":False,
"msg":"请先选择一个分类,在添加标点之前",
"image":None
}
is_foreground = True
result = sam_deal.add_annotation_point(api_config,x,y,is_foreground)
if result['status']== False:
return {
"success":False,
"msg":result['reason'],
"image":None
}
api_config = result['reason']
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
encoded_string = sam_deal.load_tmp_image(img['reason'])
return {
"success":True,
"msg":"",
"image":encoded_string
}
else:
return {
"success":False,
"msg":img['reason'],
"image":None
}
# left_mouse_down(27,326,file_path)
# left_mouse_down(67,449,file_path)
def right_mouse_down(x,y,path):
loaded_data,api_config = sam_deal.load_model(path)
if not api_config['current_class']:
return {
"success":False,
"msg":"请先选择一个分类,在添加标点之前",
"data":None
}
is_foreground = False
api_config, result = sam_deal.add_annotation_point(api_config,x,y,is_foreground)
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
# x = 231
# y = 281
# right_mouse_down(x,y,file_path)
# 删除前一个点
def sam_delete_last_point(path):
loaded_data,api_config = sam_deal.load_model(path)
result = sam_deal.delete_last_point(api_config)
if result['status'] == True:
api_config = result['reason']
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
else:
return {
"success":False,
"msg":result['reason'],
"data":None
}
# sam_delete_last_point(file_path)
# 清除所有点
def sam_clear_all_point(path):
loaded_data,api_config = sam_deal.load_model(path)
result = sam_deal.reset_current_class_points(api_config)
if result['status'] == True:
api_config = result['reason']
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
else:
return {
"success":False,
"msg":result['reason'],
"data":None
}
# sam_clear_all_point(file_path)
# 模型预测
def sam_predict_mask(path):
loaded_data,api_config = sam_deal.load_model(path)
class_data = api_config['class_annotations'].get(api_config['current_class'], {})
if not class_data.get('points'):
return {
"success":False,
"msg":"请在预测前添加至少一个预测样本点",
"data":None
}
else:
loaded_data = sam_deal.reset_annotation(loaded_data)
# 将标注点添加到loaded_data 中
for i, (x, y) in enumerate(class_data['points']):
is_foreground = class_data['point_types'][i]
loaded_data = sam_deal.add_point(loaded_data, x, y, is_foreground=is_foreground)
try:
result = sam_deal.predict_mask(loaded_data,sam_predictor)
if result['status'] == False:
return {
"success":False,
"msg":result['reason'],
"data":None}
result = result['reason']
loaded_data = result['data']
class_data['masks'] = [np.array(mask, dtype=np.uint8) for mask in result['masks']]
class_data['scores'] = result['scores']
class_data['selected_mask_index'] = result['selected_index']
if result['selected_index'] >= 0:
class_data['selected_mask'] = class_data['masks'][result['selected_index']]
logger.info(f"predict: Predicted {len(result['masks'])} masks, selected index: {result['selected_index']}")
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
except Exception as e:
logger.error(f"predict: Error during prediction: {str(e)}")
traceback.print_exc()
return {
"success":False,
"msg":f"predict: Error during prediction: {str(e)}",
"data":None}
# sam_predict_mask(file_path)
# 清除预测内容以及点标记
def sam_reset_annotation(path):
loaded_data,api_config = sam_deal.load_model(path)
loaded_data,api_config = sam_deal.reset_annotation_all(loaded_data,api_config)
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
# sam_reset_annotation(file_path)
# 保存一个分类
# class_index 当前的选择分类
def sam_add_to_class(path,class_index):
loaded_data,api_config = sam_deal.load_model(path)
class_name = loaded_data['class_names'][class_index]
result = sam_deal.add_to_class(api_config,class_name)
if result['status'] == True:
api_config = result['reason']
sam_deal.save_model(loaded_data,api_config,path)
img = sam_deal.refresh_image(loaded_data,api_config,path)
if img['status'] == True:
return {
"success":True,
"msg":"",
"data":img['reason']
}
else:
return {
"success":False,
"msg":img['reason'],
"data":None
}
else:
return {
"success":False,
"msg":result['reason'],
"data":None
}
# sam_add_to_class(file_path,2)
def sam_save_annotation(path):
loaded_data,api_config = sam_deal.load_model(path)
if not api_config['output_dir']:
logger.info("save_annotation: Output directory not set")
return {
"success":False,
"msg":"save_annotation: Output directory not set",
"data":None
}
has_annotations = False
for class_name, class_data in api_config['class_annotations'].items():
if 'final_mask' in class_data and class_data['final_mask'] is not None:
has_annotations = True
break
if not has_annotations:
logger.info("save_annotation: No final masks to save")
return {
"success":False,
"msg":"save_annotation: No final masks to save",
"data":None
}
image_info = sam_deal.get_image_info(loaded_data)
if not image_info:
logger.info("save_annotation: No image info available")
return {
"success":False,
"msg":"save_annotation: No image info available",
"data":None
}
image_basename = os.path.splitext(image_info['filename'])[0]
annotation_dir = os.path.join(api_config['output_dir'], image_basename)
annotation_dir = file_path + "/output/20241231160414"
os.makedirs(annotation_dir, exist_ok=True)
saved_files = []
orig_img = loaded_data['image']
original_img_path = os.path.join(annotation_dir, f"{image_basename}.jpg")
cv2.imwrite(original_img_path, orig_img)
saved_files.append(original_img_path)
vis_img = orig_img.copy()
img_height, img_width = orig_img.shape[:2]
labelme_data = {
"version": "5.1.1",
"flags": {},
"shapes": [],
"imagePath": f"{image_basename}.jpg",
"imageData": None,
"imageHeight": img_height,
"imageWidth": img_width
}
for class_name, class_data in api_config['class_annotations'].items():
if 'final_mask' in class_data and class_data['final_mask'] is not None:
color = api_config['class_colors'].get(class_name, (0, 255, 0))
vis_mask = class_data['final_mask'].copy()
color_mask = np.zeros_like(vis_img)
color_mask[vis_mask > 0] = color
vis_img = cv2.addWeighted(vis_img, 1.0, color_mask, 0.5, 0)
binary_mask = (class_data['final_mask'] > 0).astype(np.uint8)
contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
epsilon = 0.0001 * cv2.arcLength(contour, True)
approx_contour = cv2.approxPolyDP(contour, epsilon, True)
points = [[float(point[0][0]), float(point[0][1])] for point in approx_contour]
if len(points) >= 3:
shape_data = {
"label": class_name,
"points": points,
"group_id": None,
"shape_type": "polygon",
"flags": {}
}
labelme_data["shapes"].append(shape_data)
vis_path = os.path.join(annotation_dir, f"{image_basename}_mask.jpg")
cv2.imwrite(vis_path, vis_img)
saved_files.append(vis_path)
try:
is_success, buffer = cv2.imencode(".jpg", orig_img)
if is_success:
img_bytes = io.BytesIO(buffer).getvalue()
labelme_data["imageData"] = base64.b64encode(img_bytes).decode('utf-8')
else:
print("save_annotation: Failed to encode image data")
labelme_data["imageData"] = ""
except Exception as e:
logger.error(f"save_annotation: Could not encode image data: {str(e)}")
labelme_data["imageData"] = ""
json_path = os.path.join(annotation_dir, f"{image_basename}.json")
with open(json_path, 'w') as f:
json.dump(labelme_data, f, indent=2)
saved_files.append(json_path)
logger.info(f"save_annotation: Annotation saved to {annotation_dir}")
return {
"success":False,
"msg":"",
"data":{
'output_dir': annotation_dir,
'files': saved_files,
'classes': list(api_config['class_annotations'].keys())
}
}
# sam_save_annotation(file_path)
# # 添加类别
# def sam_class_set(class_name:str=None, color:List = None ,path:str = None):
# # 加载配置内容
# config_path = os.path.join(path,'model_params.pickle')
# with open(config_path, 'rb') as file:
# loaded_data,api_config = pickle.load(file)
# if class_name in loaded_data['class_names']:
# return {
# "success":False,
# "msg":f"类别 '{class_name}' 已存在",
# "data":None
# }
# loaded_data['class_names'].append(class_name)
# if color is None:
# return {
# "success":False,
# "msg":f"请指定{class_name}代表的颜色",
# "data":None
# }
# loaded_data['class_colors'][class_name] = tuple(color)
# logger.info(f"已添加类别: {class_name}, 颜色: {color}")
# loaded_data['class_index'] = loaded_data['class_names'].index(class_name)
# api_config['current_class'] = class_name
# if class_name not in api_config['class_annotations']:
# api_config['class_annotations'][class_name] = {
# 'points': [],
# 'point_types': [],
# 'masks': [],
# 'selected_mask_index': -1
# }
# api_config['class_colors'][class_name] = tuple(color)
# save_data = (loaded_data,api_config)
# with open(config_path, 'wb') as file:
# pickle.dump(save_data, file)
# # 加载存储中的图片信息
# img = model_deal.get_display_image_ori(loaded_data)
# image = Image.fromarray(img['reason'])
# tmp_path = os.path.join(path,'temp/output_image.jpg')
# image.save(tmp_path)
# return {"success":True,
# "msg":f"已添加类别: {class_name}, 颜色: {color}",
# "data":{"class_name_list": loaded_data['class_names'],
# "current_index": loaded_data['class_index'],
# "class_dict":loaded_data['class_colors']
# }}
# # 加载class分类
# # sam_class_set(class_name='test11', color = [120,0,0], path=file_path)
# # 添加point
# # 添加标注器中的点信息
# def add_annotation_point(loaded_data,x, y, is_foreground=True):
# loaded_data['input_point'].append([x, y])
# loaded_data['input_label'].append(1 if is_foreground else 0)
# logger.info(f"添加{'前景' if is_foreground else '背景'}点: ({x}, {y})")
# return loaded_data
# def predict_mask(data):
# if data['image_rgb'] is None:
# logger.info("predict_mask: No image loaded")
# return {"status":False, "reason":"预测掩码:没有图像加载"}
# if len(data['input_point']) == 0:
# logger.info("predict_mask: No points added")
# return {"status":False, "reason":"预测掩码:没有进行点标注"}
# try:
# sam_predictor.set_image(data['image_rgb'])
# except Exception as e:
# logger.error(f"predict_mask: Error setting image: {str(e)}")
# return {"status":False, "reason":f"predict_mask: Error setting image: {str(e)}"}
# input_point_np = np.array(data['input_point'])
# input_label_np = np.array(data['input_label'])
# try:
# masks_pred, scores, logits = sam_predictor.predict(
# point_coords=input_point_np,
# point_labels=input_label_np,
# mask_input=data['logit_input'][None, :, :] if data['logit_input'] is not None else None,
# multimask_output=True,
# )
# except Exception as e:
# logger.error(f"predict_mask: Error during prediction: {str(e)}")
# traceback.print_exc()
# return {"status":False, "reason":f"predict_mask: Error during prediction: {str(e)}"}
# data['masks_pred'] = masks_pred
# data['scores'] = scores
# data['logits'] = logits
# best_mask_idx = np.argmax(scores)
# data['selected_mask'] = masks_pred[best_mask_idx]
# data['logit_input'] = logits[best_mask_idx, :, :]
# logger.info(f"predict_mask: Predicted {len(masks_pred)} masks, best score: {scores[best_mask_idx]:.4f}")
# return {"status":True, "reason":{
# "masks": [mask.tolist() for mask in masks_pred],
# "scores": scores.tolist(),
# "selected_index": int(best_mask_idx),
# "data":data
# }}
# def sam_apply_mask(image, mask, color, alpha=0.5):
# masked_image = image.copy()
# for c in range(3):
# masked_image[:, :, c] = np.where(
# mask == 1,
# image[:, :, c] * (1 - alpha) + alpha * color[c],
# image[:, :, c]
# )
# return masked_image
# def apply_mask_overlay(image, mask, color, alpha=0.5):
# colored_mask = np.zeros_like(image)
# colored_mask[mask > 0] = color
# return cv2.addWeighted(image, 1, colored_mask, alpha, 0)
# def get_image_display(data,api):
# if data['image'] is None:
# print("get_display_image: No image loaded")
# return {"status":False, "reason":"获取图像:没有图像加载"}
# display_image = data['image'].copy()
# try:
# for point, label in zip(data['input_point'], data['input_label']):
# color = (0, 255, 0) if label == 1 else (0, 0, 255)
# cv2.circle(display_image, tuple(point), 5, color, -1)
# if data['selected_mask'] is not None:
# class_name = data['class_names'][data['class_index']]
# color = data['class_colors'].get(class_name, (0, 0, 128))
# display_image = sam_apply_mask(display_image, data['selected_mask'], color)
# logger.info(f"get_display_image: Returning image with shape {display_image.shape}")
# if not isinstance(display_image, np.ndarray) or display_image.size == 0:
# logger.info(f"get_image_display: Invalid image array, shape: {display_image.shape if isinstance(display_image, np.ndarray) else 'None'}")
# return {"status":False, "reason":f"get_image_display: Invalid image array, shape: {display_image.shape if isinstance(display_image, np.ndarray) else 'None'}"}
# # 仅应用当前图片的final_mask
# for class_name, class_data in api['class_annotations'].items():
# if 'final_mask' in class_data and class_data['final_mask'] is not None:
# color = api['class_colors'].get(class_name, (0, 255, 0))
# mask = class_data['final_mask']
# if isinstance(mask, list):
# mask = np.array(mask, dtype=np.uint8)
# logger.info(f"Applying mask for class {class_name}, shape: {mask.shape}")
# display_image = apply_mask_overlay(display_image, mask, color, alpha=0.5)
# elif 'selected_mask' in class_data and class_data['selected_mask'] is not None:
# color = api['class_colors'].get(class_name, (0, 255, 0))
# mask = class_data['selected_mask']
# if isinstance(mask, list):
# mask = np.array(mask, dtype=np.uint8)
# logger.info(f"Applying mask for class {class_name}, shape: {mask.shape}")
# img = apply_mask_overlay(display_image, mask, color, alpha=0.5)
# if api['current_class'] and api['current_class'] in api['class_annotations']:
# class_data = api['class_annotations'][api['current_class']]
# for i, (x, y) in enumerate(class_data['points']):
# is_fg = class_data['point_types'][i]
# color = (0, 255, 0) if is_fg else (0, 0, 255)
# logger.info(f"Drawing point at ({x}, {y}), type: {'foreground' if is_fg else 'background'}")
# cv2.circle(img, (int(x), int(y)), 5, color, -1)
# logger.info(f"get_image_display: Returning image with shape {img.shape}")
# return {"status":True, "reason":img}
# except Exception as e:
# logger.error(f"get_display_image: Error processing image: {str(e)}")
# traceback.print_exc()
# return {"status":False, "reason":f"get_display_image: Error processing image: {str(e)}"}
# # 前端需要保存每次点击的点的信息,
# # 何时删除:
# point_list = [[303,123],[123,232],[234,234],[343,123]]
# point_type = [True,True,False,True]
# def sam_predict(point_list,point_type,path):
# # 加载配置内容
# config_path = os.path.join(path,'model_params.pickle')
# with open(config_path, 'rb') as file:
# loaded_data,api_config = pickle.load(file)
# # 获取当前类别
# class_data = api_config['class_annotations'][api_config['current_class']]
# for index, point in enumerate(point_list):
# x,y = point
# class_data['points'].append((x, y))
# class_data['point_types'].append(point_type[index])
# if not api_config['current_class'] or api_config['current_class'] not in api_config['class_annotations']:
# logger.info("predict: No current class selected")
# return {"status":False, "reason":"没有选择分类类别"}
# class_data = api_config['class_annotations'][api_config['current_class']]
# if not class_data['points']:
# logger.info("predict: No points added for current class")
# return {"status":False, "reason":"当前类别没有标注点信息,请添加点信息"}
# loaded_data = reset_annotation(loaded_data)
# for i, (x, y) in enumerate(class_data['points']):
# is_foreground = class_data['point_types'][i]
# loaded_data = add_annotation_point(loaded_data, x, y, is_foreground=is_foreground)
# try:
# result = predict_mask(loaded_data)
# if result['status'] == False:
# return {
# "success":False,
# "msg":result['reason'],
# "data":None
# }
# result = result['reason']
# loaded_data = result['data']
# if result is None:
# logger.info("predict: SAMAnnotator.predict_mask returned None")
# return {"status":False, "reason":"模型预测返回空值"}
# loaded_data = result['data']
# class_data['masks'] = [np.array(mask, dtype=np.uint8) for mask in result['masks']]
# class_data['scores'] = result['scores']
# class_data['selected_mask_index'] = result['selected_index']
# if result['selected_index'] >= 0:
# class_data['selected_mask'] = class_data['masks'][result['selected_index']]
# print(f"predict: Predicted {len(result['masks'])} masks, selected index: {result['selected_index']}")
# save_data = (loaded_data,api_config)
# with open(config_path, 'wb') as file:
# pickle.dump(save_data, file)
# # 生成预测结果图
# result = get_image_display(loaded_data,api_config)
# if result['status'] == True:
# img = result['reason']
# display_img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# pil_img = Image.fromarray(display_img_rgb)
# tmp_path = os.path.join(path,'temp/output_image.jpg')
# pil_img.save(tmp_path)
# else:
# return {
# "success":False,
# "msg":result['reason'],
# "data":None
# }
# return {
# "success":True,
# "msg":result['reason'],
# "location":tmp_path
# }
# except Exception as e:
# print(f"predict: Error during prediction: {str(e)}")
# traceback.print_exc()
# return {"status":False, "reason":f"predict: Error during prediction: {str(e)}"}
# sam_predict(point_list,point_type,file_path)
"""
保存当前结果
"""
# 添加pionts
# def add_point(x,y,type,path,foreground_mode=True):
# # 加载配置内容
# config_path = os.path.join(path,'model_params.pickle')
# with open(config_path, 'rb') as file:
# loaded_data,api_config = pickle.load(file)
# class_data = api_config['class_annotations'][api_config['current_class']]
# class_data['points'].append((x, y))
# class_data['point_types'].append(is_foreground)
# # 支持的图像文件扩展名
# SUPPORTED_IMAGE_EXTENSIONS = ('.png', '.jpg', '.jpeg', '.JPG', '.JPEG', '.PNG', '.tiff')
# DEFAULT_MODEL_PATH = r"/home/xiazj/ai-station-code/segment_anything_model/weights/vit_b.pth"
# config_path = r'/home/xiazj/ai-station-code/segment_anything_model/model_params.pickle'
# # # 初始化配置 , 每次上传图片时,会创建一个新的配置文件
# with open(config_path, 'wb') as file:
# pickle.dump(config, file)
# # # 初始化模型让模型加载然后类直接copy即可,暂时没测试
# # sam = sam_model_registry["vit_b"](checkpoint=DEFAULT_MODEL_PATH)
# # # 将模型移至GPU如果可用
# # device = "cuda" if cv2.cuda.getCudaEnabledDeviceCount() > 0 else "cpu"
# # _ = sam.to(device=device)
# # sam_predictor = SamPredictor(sam)
# """
# 设置分类
# """
# {
# "":"",
# "":"",
# }
# """
# 选择分类
# """
# """
# 加点预测
# """
# """
# 图像保存
# """
# # # 加载模型
# annotator = sam_annotator.SAMAnnotator(DEFAULT_MODEL_PATH,config_path)
# print('success')
# # # 加载路径
# input_dir = r"/home/xiazj/ai-station-code/segment_anything_model/input"
# output_dir = r"/home/xiazj/ai-station-code/segment_anything_model/output"
# temp_dir = r"/home/xiazj/ai-station-code/segment_anything_model/temp_images"
# image_count = annotator.set_input_directory(input_dir)
# print(image_count)
# annotator.set_output_directory(output_dir)
# annotator.save_params_to_file(config_path)
# # # 加载当前图像 ,基于current_index,进行图片替换
# annotator.load_image()
# image_info = annotator.get_current_image_info()
# # 获取显示图像, 这里显示的是当前标注的图像当前状态信息
# display_image = annotator.get_display_image()
# # 保存为临时文件
# temp_image_path = os.path.join(temp_dir,image_info['filename'])
# cv2.imwrite(temp_image_path, display_image)
# annotator.save_params_to_file(config_path)
# """添加标注点"""
# # ,测试一下加载新模型
# annotator = sam_annotator.SAMAnnotator(DEFAULT_MODEL_PATH,config_path)
# point = {
# "x": 200,
# "y": 640,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# # 第二次加点
# point = {
# "x": 270,
# "y": 480,
# "is_foreground": False,
# "button_type": "right"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# point = {
# "x": 400,
# "y": 430,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# point = {
# "x": 705,
# "y": 497,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# point = {
# "x": 408,
# "y": 460,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# point = {
# "x": 428,
# "y": 435,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# point = {
# "x": 270,
# "y": 1110,
# "is_foreground": True,
# "button_type": "left"
# }
# is_foreground = True if point['button_type'] == "left" else False
# result = annotator.add_point(point['x'], point['y'], is_foreground)
# # 更新显示图像
# display_image = annotator.get_display_image()
# if display_image is not None:
# image_info = annotator.get_current_image_info()
# temp_image_path = os.path.join(temp_dir,image_info['filename'])
# cv2.imwrite(temp_image_path, display_image)
# annotator.save_params_to_file(config_path)
# """
# 获取列表
# """
# points_data = annotator.get_points_with_labels()
# print(points_data)
# """
# 删除点
# """
# result = annotator.delete_last_point()
# # 更新显示图像
# display_image = annotator.get_display_image()
# if display_image is not None:
# image_info = annotator.get_current_image_info()
# temp_image_path = os.path.join(temp_dir,image_info['filename'])
# cv2.imwrite(temp_image_path, display_image)
# annotator.save_params_to_file(config_path)
# with open(config_path, 'rb') as file:
# loaded_data = pickle.load(file)
# print(loaded_data)
# """
# 开启预测
# """
# result = annotator.predict_mask()
# if result is None:
# print({"status": "error", "message": "预测失败,请检查是否有添加点或加载图像"})
# # 更新显示图像
# display_image = annotator.get_display_image()
# if display_image is not None:
# image_info = annotator.get_current_image_info()
# temp_image_path = os.path.join(temp_dir,image_info['filename'])
# cv2.imwrite(temp_image_path, display_image)
# annotator.save_params_to_file(config_path)
# """
# 逻辑:
# 添加一个点,就预测一次
# 因此需要一个add_point, 然后再predict
# 同理删除一个point也需要一次predict重新预测等价于回滚
# """
# """
# 添加类别 也就是要分割的内容,以及想标注的颜色
# """
# class_name = "origin"
# color = [128, 128, 128]
# result = annotator.add_class(class_name,color)
# print({"status": "success", "classes": result})
# """
# 设置某一类别
# """
# """
# 确定分类
# """
2025-05-06 11:18:48 +08:00
# 模型调用
"""地貌识别"""
2025-05-14 11:00:24 +08:00
# 地貌识别 - 图形分割
# path 这里需要每次上传出现一个uuid命名的文件夹防止文件冲突 eg:D:\\project\\ai_station\\wudingpv\\tmp\\99f06853-4788-4608-8884-2a3b7bc33768
2025-05-06 11:18:48 +08:00
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/dimaoshibie/crop_9_14.png"
# result = model_deal.dimaoshibie_pic(dimaoshibie_SegFormer,path,param.dmsb_count,param.dmsb_name_classes)
# print(result)
# # 地貌识别 - 像素面积计算
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/dimaoshibie/crop_9_14.png"
# scale_m = 0.92*0.92 # Z18代表的像素大小
# result = model_deal.dimaoshibie_area(path,scale_m,param.dmsb_colors)
# print(result)
"""屋顶识别"""
2025-05-14 11:00:24 +08:00
# 屋顶识别- 图形分割
2025-05-06 11:18:48 +08:00
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/wuding/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# result = model_deal.roof_pic(roof_model,path,param.wdpv_palette)
# print(result)
# # 屋顶识别 - 像素面积计算
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/wuding/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# scale_m = 0.92*0.92 # Z18代表的像素大小
# result = model_deal.roof_area(path,scale_m,param.wdpv_colors)
# print(result)
""" 光伏识别 """ # 公用一个接口即可,url进行区别
2025-05-14 11:00:24 +08:00
# 光伏识别- 图形分割
2025-05-06 11:18:48 +08:00
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/pv/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# result = model_deal.roof_pic(pv_model,path,param.wdpv_palette)
# print(result)
2025-05-14 11:00:24 +08:00
# # 光伏识别 - 像素面积计算
2025-05-06 11:18:48 +08:00
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/pv/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# scale_m = 0.92*0.92 # Z18代表的像素大小
# result = model_deal.roof_area(path,scale_m,param.wdpv_colors)
# print(result)
""" 屋顶光伏识别 """
2025-05-14 11:00:24 +08:00
# # 屋顶光伏识别- 图形分割
2025-05-06 11:18:48 +08:00
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/wudingpv/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# result = model_deal.roofpv_pic(roof_model,pv_model,path,param.wdpv_palette)
# file_list = result['reason']
# final_path = prepare_data.merge_binary(file_list)
# # 屋顶光伏识别 - 像素面积计算
# path = "/home/xiazj/ai-station/ai_station_merge/tmp/wudingpv/99f06853-4788-4608-8884-2a3b7bc32131/taihuyuan_7-8.png"
# scale_m = 0.92*0.92 # Z18代表的像素大小
# result = model_deal.roof_area_roofpv(path,scale_m,param.wdpv_colors)
# print(result)
"""光伏出力预测"""
# 1. 读取训练文件,截取部分数据,并针对点击列进行数据前端展示
# 展示某一列,并进行返回
# tmp = prepare_data.show_data_pvfd('nwp_directirrad')
# print(tmp)
# # 2. 返回测试数据给前端
# test_data_path = "/home/xiazj/ai-station/ai_station_merge/tmp/guangfufadian/99f06853-4788-4608-8884-2a3b7bc32131/run_test.csv"
# tmp = prepare_data.show_testdata_pvfd(test_data_path)
# print(tmp)
# 3. 上传数据的预测
# 返回pred_datatrue_data通过时间戳为x轴进行展示结果即可,没有的话就是没有,有的话就是有
2025-05-14 11:00:24 +08:00
test_data_path = "/home/xiazj/ai-station-code/guangfufadian/datasets/run_test.csv"
2025-05-06 11:18:48 +08:00
# predictions = pvfd_model.run_inference(test_data_path)
2025-05-14 11:00:24 +08:00
# print(len(predictions))
2025-05-06 11:18:48 +08:00
# predictions = np.array(predictions).flatten()
# pred_data, true_data = prepare_data.result_merge_guangfufadian(test_data_path,predictions)
# print(pred_data)
# print(true_data)
2025-05-14 11:00:24 +08:00
# start_time = '2019-05-23 03:15:00'
# end_time = '2019-05-25 03:30:00'
# print(model_deal.start_pvelectric_predict_endpoint(pvfd_model,test_data_path,start_time,end_time,True))
2025-05-06 11:18:48 +08:00
"""风力发电预测 """
2025-05-14 11:00:24 +08:00
# 1. 读取训练文件,截取部分数据,并针对点击列进行数据前端展示
# 展示某一列,并进行返回
2025-05-06 11:18:48 +08:00
# tmp = prepare_data.show_data_windfd("Wind speed at height of 10 meters (m/s)")
# print(tmp)
# # 2. 返回测试数据给前端
# test_data_path = "/home/xiazj/ai-station/ai_station_merge/tmp/fenglifadian/99f06853-4788-4608-8884-2a3b7bc32131/Wind_farm_test.csv"
# tmp = prepare_data.show_testdata_windfd(test_data_path)
# print(tmp)
# # 3. 上传数据的预测
2025-06-04 17:04:02 +08:00
# test_data_path = "/home/xiazj/ai-station-code/fenglifadian/datasets/Wind_farm_test.csv"
# predictions = windfd_model.run_inference(test_data_path)
# predictions = np.array(predictions).flatten()
# print(len(predictions))
# pred_data, true_data = prepare_data.result_merge_fenglifadian(test_data_path,predictions)
# print(pred_data)
# print(true_data)
2025-05-06 11:18:48 +08:00
"""甲烷产量预测"""
2025-05-14 11:00:24 +08:00
# 1、读取训练文件截取部分数据并针对点击列进行数据前端展示
2025-05-06 11:18:48 +08:00
# test_data_path = "/home/xiazj/ai-station/ai_station_merge/tmp/jiawanyuce/jiawan_test.csv"
# tmp = prepare_data.show_data_jiawanyuce("X_ch")
# print(tmp)
2025-05-14 11:00:24 +08:00
# 2、进行预测预测接口
# test_data_path = "/home/xiazj/ai-station-code/jiawanyuce/data/jiawan_test.csv"
# tmp = model_deal.start_predict_endpoint(ch4_model_flow,ch4_model_gas,test_data_path,"2023-01-01 02:30:00","2023-01-03 02:30:00",2,True)
2025-05-06 11:18:48 +08:00
# print(tmp)
2025-05-14 11:00:24 +08:00
# """ 煤热解 """
# meirejie_test_content = {
# 'A': 11.92,
# 'V' : 51.16,
# 'FC': 48.84,
# 'C': 83.22 ,
# 'H': 3.89,
# 'N': 2.72,
# 'S': 0.45,
# 'O':20.21,
# 'H/C':0.56,
# 'O/C':0.18,
# 'N/C':0.03,
# 'Rt':40.00,
# 'Hr':5.00,
# 'dp':0.20,
# 'T':510.00
# }
# meirejie_test_content = pd.DataFrame([meirejie_test_content])
# meirejie_result_content = {
# 'Tar':11.71,
# 'Gas':10.81 ,
# 'Char':75.31,
# 'Water':2.17
# }
# #1、 tar 焦油
2025-05-06 11:18:48 +08:00
#1.1 单独预测
# tar_result = model_deal.pred_single_tar(meirejie_test_content)
# print(tar_result)
# #1.2 文件批量单独模型预测
# model_name ='xgb_gas'
# result = model_deal.get_excel_tar(model_name)
# print(result)
#2、 char 煤渣
#2.1 单独预测
# char_result = model_deal.pred_single_char(meirejie_test_content)
# print(char_result)
# #2.2 文件批量单独模型预测
# model_name ='xgb_gas'
# result = model_deal.get_excel_char(model_name)
# print(result)
#3、 water 蒸汽
#3.1 单独预测
# water_result = model_deal.pred_single_water(meirejie_test_content)
# print(water_result)
# #3.2 文件批量单独模型预测
# model_name ='xgb_gas'
# result = model_deal.get_excel_water(model_name)
# print(result)
#4、 gas 煤气
#4.1 单独预测
# gas_result = model_deal.pred_single_gas(meirejie_test_content)
# print(gas_result)
# #4.2 文件批量单独模型预测
# model_name ='xgb_gas'
# result = model_deal.get_excel_gas(model_name)
# print(result)
2025-05-14 11:00:24 +08:00
# """煤基碳材料"""
# meijiegou_test_content = {
# 'A': 10.43,
# 'VM' : 35.88,
# 'K/C': 4,
# 'MM': 0 ,
# 'AT': 800,
# 'At': 1,
# 'Rt': 5
# }
# # meijiegou_test_content = pd.DataFrame([meijiegou_test_content])
2025-05-06 11:18:48 +08:00
2025-05-14 11:00:24 +08:00
# meicailiao_test_content = {
# "SSA":1141.8,
# "TPV":0.46,
# "N":1.74,
# "O":3.84,
# "ID/IG":1.102,
# "J":0.5
# }
2025-05-06 11:18:48 +08:00
2025-05-14 11:00:24 +08:00
# meicailiao_test_content = pd.DataFrame([meicailiao_test_content])
2025-05-06 11:18:48 +08:00
# 活性炭总表面积预测分析 #
# # # 1、 单独预测
# def test(content: post_model.Zongbiaomianji):
# meijiegou_test_content = pd.DataFrame([content])
# ssa_result = model_deal.pred_single_ssa(meijiegou_test_content)
# print(ssa_result)
# test(meijiegou_test_content)
# import uuid
# print(str(uuid.uuid4()))
# # 2、文件批量单独模型预测
# model_name ='xgb_ssa'
# file_path = "/home/xiazj/ai-station/tmp/meijitancailiao/ssa/866b43f9-9ec0-423c-a974-7f46232b8277/test_ssa.csv"
# result = model_deal.get_excel_ssa(model_name,file_path)
# print(result)
# # 活性炭孔总体积预测分析 #
# # # 1、 单独预测
# tpv_result = model_deal.pred_single_tpv(meijiegou_test_content)
# print(tpv_result)
# # # 2、文件批量单独模型预测
# model_name ='xgb_tpv'
# result = model_deal.get_excel_tpv(model_name)
# print(result)
# # 煤炭电容预测分析 #
# # 1、 单独预测
# meitan_result = model_deal.pred_single_meitan(meicailiao_test_content)
# print(meitan_result)
# # # 2、文件批量单独模型预测
# model_name ='xgb_meitan'
# result = model_deal.get_excel_meitan(model_name)
# print(result)
# # 煤炭电容预测分析 #
# # # 1、 单独预测
# meiliqing_result = model_deal.pred_single_meiliqing(meicailiao_test_content)
# print(meiliqing_result)
# # # 2、文件批量单独模型预测
# model_name ='xgb_meiliqing'
# result = model_deal.get_excel_meiliqing(model_name)
# print(result)
# # 数据模拟返回 #
# params = {"A_min" : None, "A_max": None, "A_step": None, "VM_min" : None, "VM_max": None, "VM_step": None,
# "KC_min" : None, "KC_max": None, "KC_step": None,"MM_min" : None, "MM_max": None, "MM_step": None,
# "AT_min" : None, "AT_max": None, "AT_step": None,"At_min" : None, "At_max": None, "At_step": None,
# "Rt_min" : None, "Rt_max": None, "Rt_step": None}
2025-05-14 11:00:24 +08:00
# params = {'A_min': 4.0, 'A_max': 48.0, 'A_step': 4.0, 'VM_min': 5.0, 'VM_max': 50.0, 'VM_step': 5.0, 'KC_min': 1.0, 'KC_max': 4.0, 'KC_step': 0.5, 'MM_min': 0.0, 'MM_max': 1.0, 'MM_step': 1.0, 'AT_min': 600.0, 'AT_max': 900.0, 'AT_step': 50.0, 'At_min': 0.5, 'At_max': 2.0, 'At_step': 0.5, 'Rt_min': 5.0, 'Rt_max': 10.0, 'Rt_step': 5.0}
# params = prepare_data.get_params(params)
# pred_data = prepare_data.create_pred_data(params)
# result = model_deal.pred_func("xgb",pred_data)
# sorted_result = result.sort_values(by=['SSA', 'TPV'], ascending=[False, False])
# sorted_result.to_csv('/home/xiazj/ai-station-code/meijitancailiao/data/moni.csv')
# # 保留条数
# num = 6
# if num is None:
# print(sorted_result.head()) # 返回全部
# else:
# print(sorted_result.head(6)) # 返回所需条数
2025-05-06 11:18:48 +08:00
"""数据界面获取"""
# sql = "SELECT application_name, task_type, sample_name, img_url, time, download_url FROM data_samples"
# data = data_util.fetch_data(sql)
# data = data_util.generate_json_data_source(data)
# print(data)
# sql = "SELECT application_name, task_type, sample_name, img_url, time FROM app_samples"
# data = data_util.fetch_data(sql)
# data = data_util.generate_json_app_source(data)
# print(data)
# sql = "SELECT application_name, task_type, sample_name, img_url, time FROM meijitancailiao_samples"
# data = data_util.fetch_data(sql)
# data = data_util.generate_json_meijitancailiao_source(data)
# print(data)
# type = "zongkongtiji"
# sql = "SELECT type, chinese_name, col_name, data_type, unit, data_scale FROM meijitancailiao_features where use_type = %s;"
# data = data_util.fetch_data_with_param(sql,(type,))
# print(data)