video_test.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. import time
  2. import torch
  3. import torch.nn as nn
  4. from torchvision import transforms
  5. from torchvision.models import resnet18,resnet50, squeezenet1_0, shufflenet_v2_x1_0
  6. import numpy as np
  7. from PIL import Image
  8. import os
  9. import argparse
  10. from labelme.utils import draw_grid, draw_predict_grid
  11. import cv2
  12. import matplotlib.pyplot as plt
  13. from dotenv import load_dotenv
  14. load_dotenv()
  15. # os.environ['CUDA_LAUNCH_BLOCKING'] = '0'
  16. patch_w = int(os.getenv('PATCH_WIDTH', 256))
  17. patch_h = int(os.getenv('PATCH_HEIGHT', 256))
  18. confidence_threshold = float(os.getenv('CONFIDENCE_THRESHOLD', 0.80))
  19. scale = 2
  20. class Predictor:
  21. def __init__(self, model_name, weights_path, num_classes):
  22. self.model_name = model_name
  23. self.weights_path = weights_path
  24. self.num_classes = num_classes
  25. self.model = None
  26. self.use_bias = os.getenv('USE_BIAS', True)
  27. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  28. print(f"当前设备: {self.device}")
  29. # 加载模型
  30. self.load_model()
  31. def load_model(self):
  32. if self.model is not None:
  33. return
  34. print(f"正在加载模型: {self.model_name}")
  35. name = self.model_name
  36. # 加载模型
  37. if name == 'resnet50':
  38. self.model = resnet50()
  39. elif name == 'squeezenet':
  40. self.model = squeezenet1_0()
  41. elif name == 'shufflenet':
  42. self.model = shufflenet_v2_x1_0()
  43. else:
  44. raise ValueError(f"Invalid model name: {name}")
  45. # 替换最后的分类层以适应新的分类任务
  46. if hasattr(self.model, 'fc'):
  47. # ResNet系列模型
  48. self.model.fc = nn.Linear(int(self.model.fc.in_features), self.num_classes, bias=self.use_bias)
  49. elif hasattr(self.model, 'classifier'):
  50. # Swin Transformer等模型
  51. self.model.classifier = nn.Linear(int(self.model.classifier.in_features), self.num_classes, bias=self.use_bias)
  52. elif hasattr(self.model, 'head'):
  53. # Swin Transformer使用head层
  54. self.model.head = nn.Linear(int(self.model.head.in_features), self.num_classes, bias=self.use_bias)
  55. else:
  56. raise ValueError(f"Model {name} does not have recognizable classifier layer")
  57. print(self.model)
  58. # 加载训练好的权重
  59. self.model.load_state_dict(torch.load(self.weights_path, map_location=torch.device('cpu')))
  60. print(f"成功加载模型参数: {self.weights_path}")
  61. # 将模型移动到GPU
  62. self.model.eval()
  63. self.model = self.model.to(self.device)
  64. print(f"成功加载模型: {self.model_name}")
  65. def predict(self, image_tensor):
  66. """
  67. 对单张图像进行预测
  68. Args:
  69. image_tensor: 预处理后的图像张量
  70. Returns:
  71. predicted_class: 预测的类别索引
  72. confidence: 预测置信度
  73. probabilities: 各类别的概率
  74. """
  75. image_tensor = image_tensor.to(self.device)
  76. with torch.no_grad():
  77. outputs = self.model(image_tensor)
  78. probabilities = torch.softmax(outputs, dim=1) # 沿行计算softmax
  79. confidence, predicted_class = torch.max(probabilities, 1)
  80. return confidence.cpu().numpy(), predicted_class.cpu().numpy()
  81. def preprocess_image(img):
  82. """
  83. 预处理图像以匹配训练时的预处理
  84. Args:
  85. img: PIL图像
  86. Returns:
  87. tensor: 预处理后的图像张量
  88. """
  89. # 定义与训练时相同的预处理步骤
  90. transform = transforms.Compose([
  91. transforms.Resize((224, 224)),
  92. transforms.ToTensor(),
  93. transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  94. ])
  95. # 打开并转换图像
  96. img_w, img_h = img.size
  97. global patch_w, patch_h
  98. imgs_patch = []
  99. imgs_index = []
  100. # fig, axs = plt.subplots(img_h // patch_h + 1, img_w // patch_w + 1)
  101. for i in range(img_h // patch_h + 1):
  102. for j in range(img_w // patch_w + 1):
  103. left = j * patch_w # 裁剪区域左边框距离图像左边的像素值
  104. top = i * patch_h # 裁剪区域上边框距离图像上边的像素值
  105. right = min(j * patch_w + patch_w, img_w) # 裁剪区域右边框距离图像左边的像素值
  106. bottom = min(i * patch_h + patch_h, img_h) # 裁剪区域下边框距离图像上边的像素值
  107. # 检查区域是否有效
  108. if right > left and bottom > top:
  109. patch = img.crop((left, top, right, bottom))
  110. # 长宽比过滤
  111. # rate = patch.height / (patch.width + 1e-6)
  112. # if rate > 1.314 or rate < 0.75:
  113. # # print(f"长宽比过滤: {patch_name}")
  114. # continue
  115. imgs_patch.append(patch)
  116. imgs_index.append((left, top))
  117. # axs[i, j].imshow(patch)
  118. # axs[i, j].set_title(f'Image {i} {j}')
  119. # axs[i, j].axis('off')
  120. # plt.tight_layout()
  121. # plt.show()
  122. imgs_patch = torch.stack([transform(img) for img in imgs_patch])
  123. # 添加批次维度
  124. # image_tensor = image_tensor.unsqueeze(0)
  125. return imgs_index, imgs_patch
  126. def visualize_prediction(image_path, predicted_class, confidence, class_names):
  127. """
  128. 可视化预测结果
  129. Args:
  130. image_path: 图像路径
  131. predicted_class: 预测的类别索引
  132. confidence: 预测置信度
  133. class_names: 类别名称列表
  134. """
  135. image = Image.open(image_path).convert('RGB')
  136. plt.figure(figsize=(8, 6))
  137. plt.imshow(image)
  138. plt.axis('off')
  139. plt.title(f'Predicted: {class_names[predicted_class]}\n'
  140. f'Confidence: {confidence:.4f}', fontsize=14)
  141. plt.show()
  142. def get_33_patch(arr:np.ndarray, center_row:int, center_col:int):
  143. """以(center_row,center_col)为中心,从arr中取出来3*3区域的数据"""
  144. # 边界检查
  145. h,w = arr.shape
  146. safe_row_up_limit = max(0, center_row-1)
  147. safe_row_bottom_limit = min(h, center_row+2)
  148. safe_col_left_limit = max(0, center_col-1)
  149. safe_col_right_limit = min(w, center_col+2)
  150. return arr[safe_row_up_limit:safe_row_bottom_limit, safe_col_left_limit:safe_col_right_limit]
  151. def fileter_prediction(predicted_class, confidence, pre_rows, pre_cols, filter_down_limit=3):
  152. """预测结果矩阵滤波,九宫格内部存在浑浊水体的数量需要大于filter_down_limit,"""
  153. predicted_class_mat = np.resize(predicted_class, (pre_rows, pre_cols))
  154. predicted_conf_mat = np.resize(confidence, (pre_rows, pre_cols))
  155. new_predicted_class_mat = predicted_class_mat.copy()
  156. new_predicted_conf_mat = predicted_conf_mat.copy()
  157. for i in range(pre_rows):
  158. for j in range(pre_cols):
  159. if (1. - predicted_class_mat[i, j]) > 0.1:
  160. continue # 跳过背景类
  161. core_region = get_33_patch(predicted_class_mat, i, j)
  162. if np.sum(core_region) < filter_down_limit:
  163. new_predicted_class_mat[i, j] = 0 # 重置为背景类
  164. new_predicted_conf_mat[i, j] = 1.0
  165. return new_predicted_conf_mat.flatten(), new_predicted_class_mat.flatten()
  166. def discriminate_ratio(water_pre_list:list):
  167. # 方式一:60%以上的帧存在浑浊水体
  168. water_pre_arr = np.array(water_pre_list, dtype=np.float32)
  169. water_pre_arr_mean = np.mean(water_pre_arr, axis=0)
  170. bad_water = np.array(water_pre_arr_mean >= 0.6, dtype=np.int32)
  171. bad_flag = np.sum(bad_water, dtype=np.int32)
  172. print(f'浑浊比例方式:该时间段是否存在浑浊水体:{bool(bad_flag)}')
  173. return bad_flag
  174. def discriminate_cont(pre_class_arr, continuous_count_mat):
  175. """连续帧判别"""
  176. positive_index = np.array(pre_class_arr,dtype=np.int32) > 0
  177. negative_index = np.array(pre_class_arr,dtype=np.int32) == 0
  178. # 给负样本区域置零
  179. continuous_count_mat[negative_index] = 0
  180. # 给正样本区域加1
  181. continuous_count_mat[positive_index] += 1
  182. # 判断浑浊
  183. bad_flag = np.max(continuous_count_mat) > 30
  184. if bad_flag:
  185. print(f'连续帧方式:该时间段是否存在浑浊水体:{bool(bad_flag)}')
  186. return bad_flag
  187. def main():
  188. # 初始化模型实例
  189. # TODO:修改模型网络名称/模型权重路径/视频路径
  190. predictor = Predictor(model_name='shufflenet',
  191. weights_path=r'./shufflenet.pth',
  192. num_classes=2)
  193. input_path = r'D:\code\water_turbidity_det\frame_data\test\20251225\video4_20251129120320_20251129123514'
  194. # 预处理图像
  195. all_imgs = os.listdir(input_path)
  196. all_imgs = [os.path.join(input_path, p) for p in all_imgs if p.split('.')[-1] in ['jpg', 'png']]
  197. image = Image.open(all_imgs[0]).convert('RGB')
  198. # 将预测结果reshape为矩阵时的行列数量
  199. pre_rows = image.height // patch_h + 1
  200. pre_cols = image.width // patch_w + 1
  201. # 图像显示时resize的尺寸
  202. resized_img_h = image.height // 2
  203. resized_img_w = image.width // 2
  204. # 预测每张图像
  205. water_pre_list = []
  206. continuous_count_mat = np.zeros(pre_rows*pre_cols, dtype=np.int32)
  207. flag = False
  208. for img_path in all_imgs:
  209. image = Image.open(img_path).convert('RGB')
  210. # 预处理
  211. patches_index, image_tensor = preprocess_image(image)
  212. # 推理
  213. confidence, predicted_class = predictor.predict(image_tensor)
  214. # 第一层虚警抑制,置信度过滤,低于阈值将会被忽略
  215. for i in range(len(confidence)):
  216. if confidence[i] < confidence_threshold:
  217. confidence[i] = 1.0
  218. predicted_class[i] = 0
  219. # 第二层虚警抑制,空间滤波
  220. # 在此处添加过滤逻辑
  221. new_confidence, new_predicted_class = fileter_prediction(predicted_class, confidence, pre_rows, pre_cols, filter_down_limit=3)
  222. # 可视化预测结果
  223. image = cv2.imread(img_path)
  224. image = draw_grid(image, patch_w, patch_h)
  225. image = draw_predict_grid(image, patches_index, predicted_class, confidence)
  226. new_image = cv2.imread(img_path)
  227. new_image = draw_grid(new_image, patch_w, patch_h)
  228. new_image = draw_predict_grid(new_image, patches_index, new_predicted_class, new_confidence)
  229. image = cv2.resize(image, (resized_img_w, resized_img_h))
  230. new_img = cv2.resize(new_image, (resized_img_w, resized_img_h))
  231. cv2.imshow('image', image)
  232. cv2.imshow('image_filter', new_img)
  233. cv2.waitKey(20)
  234. # 方式1判别
  235. if len(water_pre_list) > 20:
  236. flag = discriminate_ratio(water_pre_list) and flag
  237. water_pre_list = []
  238. print('综合判别结果:', flag)
  239. water_pre_list.append(new_predicted_class)
  240. # 方式2判别
  241. flag = discriminate_cont(new_predicted_class, continuous_count_mat)
  242. if __name__ == "__main__":
  243. main()