基于Unet/Unet++实现了道路裂缝图像检测,深度学习,图像分割,有检测界面,可生成miou和mpa,精确率,召回率等指标,
1
1
1
基于 U-Net / U-Net++ 的道路裂缝图像分割系统的完整技术架构、功能解析与详细代码实现(含 Python + GUI 界面),支持:
✅ 道路裂缝图像自动分割
✅ 图像预处理 + 模型推理
✅ 可视化结果展示(原图 vs 分割图)
✅ 支持 mIoU、mPA、精确率、召回率等指标计算
✅ 提供图形界面(PyQt5)
✅ 使用 TensorFlow/Keras 实现
✅ 一、系统概览
| 项目 | 内容 |
|---|---|
| 系统名称 | 道路裂缝检测分割系统(Road Crack Detection & Segmentation System) |
| 核心算法 | U-Net / U-Net++(可切换) |
| 任务类型 | 图像分割(Semantic Segmentation) |
| 输入格式 | JPG/PNG/BMP 格式道路图像 |
| 输出格式 | 分割掩膜(黑白二值图) |
| 评估指标 | mIoU、mPA、Precision、Recall、F1-score |
| 技术栈 | |
| - 深度学习:TensorFlow 2.x + Keras | |
| - 图像处理:OpenCV + PIL | |
| - GUI:PyQt5 | |
| - 数据集:自定义道路裂缝数据集(标注为二值掩膜) |
✅ 二、系统架构图
┌────────────────────┐ ┌────────────────────┐ │ GUI (PyQt5) │◄───►│ 模型推理 (Keras) │ │ (图像加载 + 显示) │ │ (U-Net / U-Net++) │ └────────────────────┘ └────────────────────┘ ↑ ↑ │ │ │ │ ▼ ▼ ┌────────────────────┐ ┌────────────────────┐ │ 图像预处理 │ │ 指标计算 │ │ (Resize, Normalize) │ │ (mIoU, mPA, etc.) │ └────────────────────┘ └────────────────────┘ ↑ ↑ │ │ │ │ ▼ ▼ ┌────────────────────┐ ┌────────────────────┐ │ 原始图像 │ │ 分割结果 │ └────────────────────┘ └────────────────────┘✅ 三、模型结构说明
🧠 1. U-Net 结构(基础版本)
# unet.pyimporttensorflowastffromtensorflow.kerasimportlayers,Modeldefbuild_unet(input_shape=(256,256,3),num_classes=2):inputs=layers.Input(shape=input_shape)# Encoderconv1=layers.Conv2D(64,3,activation='relu',padding='same')(inputs)conv1=layers.Conv2D(64,3,activation='relu',padding='same')(conv1)pool1=layers.MaxPooling2D(2)(conv1)conv2=layers.Conv2D(128,3,activation='relu',padding='same')(pool1)conv2=layers.Conv2D(128,3,activation='relu',padding='same')(conv2)pool2=layers.MaxPooling2D(2)(conv2)conv3=layers.Conv2D(256,3,activation='relu',padding='same')(pool2)conv3=layers.Conv2D(256,3,activation='relu',padding='same')(conv3)pool3=layers.MaxPooling2D(2)(conv3)conv4=layers.Conv2D(512,3,activation='relu',padding='same')(pool3)conv4=layers.Conv2D(512,3,activation='relu',padding='same')(conv4)pool4=layers.MaxPooling2D(2)(conv4)# Bottleneckconv5=layers.Conv2D(1024,3,activation='relu',padding='same')(pool4)conv5=layers.Conv2D(1024,3,activation='relu',padding='same')(conv5)# Decoderup6=layers.Conv2DTranspose(512,2,strides=2,padding='same')(conv5)merge6=layers.concatenate([conv4,up6],axis=3)conv6=layers.Conv2D(512,3,activation='relu',padding='same')(merge6)conv6=layers.Conv2D(512,3,activation='relu',padding='same')(conv6)up7=layers.Conv2DTranspose(256,2,strides=2,padding='same')(conv6)merge7=layers.concatenate([conv3,up7],axis=3)conv7=layers.Conv2D(256,3,activation='relu',padding='same')(merge7)conv7=layers.Conv2D(256,3,activation='relu',padding='same')(conv7)up8=layers.Conv2DTranspose(128,2,strides=2,padding='same')(conv7)merge8=layers.concatenate([conv2,up8],axis=3)conv8=layers.Conv2D(128,3,activation='relu',padding='same')(merge8)conv8=layers.Conv2D(128,3,activation='relu',padding='same')(conv8)up9=layers.Conv2DTranspose(64,2,strides=2,padding='same')(conv8)merge9=layers.concatenate([conv1,up9],axis=3)conv9=layers.Conv2D(64,3,activation='relu',padding='same')(merge9)conv9=layers.Conv2D(64,3,activation='relu',padding='same')(conv9)outputs=layers.Conv2D(num_classes,1,activation='softmax')(conv9)model=Model(inputs=inputs,outputs=outputs)returnmodel🧠 2. U-Net++(增强版,可选)
🔗 参考:https://github.com/zhixuhao/unet
✅ 四、训练代码(可选)
# train.pyimporttensorflowastffromtensorflow.keras.callbacksimportModelCheckpoint,EarlyStoppingfromsklearn.model_selectionimporttrain_test_split# 加载数据(示例)train_images=np.load('train_images.npy')train_masks=np.load('train_masks.npy')# 划分训练集/验证集X_train,X_val,y_train,y_val=train_test_split(train_images,train_masks,test_size=0.2)# 编译模型model=build_unet()model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])# 训练callbacks=[ModelCheckpoint('best_model.h5',save_best_only=True),EarlyStopping(patience=10,restore_best_weights=True)]history=model.fit(X_train,y_train,validation_data=(X_val,y_val),epochs=100,batch_size=16,callbacks=callbacks)✅ 五、GUI 界面代码(PyQt5)
# main_window.pyimportsysimportosfromPyQt5.QtWidgetsimportQApplication,QMainWindow,QLabel,QPushButton,QVBoxLayout,QHBoxLayout,QWidget,QFileDialog,QGroupBoxfromPyQt5.QtGuiimportQPixmap,QImagefromPyQt5.QtCoreimportQtimportcv2importnumpyasnpimporttensorflowastfclassRoadCrackDetectionApp(QMainWindow):def__init__(self):super().__init__()self.setWindowTitle("道路裂缝检测分割系统")self.setGeometry(100,100,800,600)self.original_image=Noneself.predicted_mask=Noneself.init_ui()definit_ui(self):central_widget=QWidget()self.setCentralWidget(central_widget)layout=QVBoxLayout()# 图像显示区域image_layout=QHBoxLayout()self.original_label=QLabel("原始图像")self.predicted_label=QLabel("预测结果")self.original_label.setAlignment(Qt.AlignCenter)self.predicted_label.setAlignment(Qt.AlignCenter)image_layout.addWidget(self.original_label)image_layout.addWidget(self.predicted_label)layout.addLayout(image_layout)# 按钮区域button_layout=QVBoxLayout()self.open_btn=QPushButton("打开图像")self.segment_btn=QPushButton("图像分割")self.save_btn=QPushButton("保存预测图像")button_layout.addWidget(self.open_btn)button_layout.addWidget(self.segment_btn)button_layout.addWidget(self.save_btn)layout.addLayout(button_layout)central_widget.setLayout(layout)# 连接信号self.open_btn.clicked.connect(self.open_image)self.segment_btn.clicked.connect(self.segment_image)self.save_btn.clicked.connect(self.save_prediction)defopen_image(self):file_name,_=QFileDialog.getOpenFileName(self,"打开图像","","Image Files (*.jpg *.jpeg *.png *.bmp)")iffile_name:self.original_image=cv2.imread(file_name)self.display_image(self.original_image,self.original_label)defsegment_image(self):ifself.original_imageisNone:return# 预处理img=cv2.resize(self.original_image,(256,256))img=img.astype(np.float32)/255.0img=np.expand_dims(img,axis=0)# 加载模型model=tf.keras.models.load_model('best_model.h5')# 替换为你的模型路径pred=model.predict(img)[0]# 获取最大概率类别(裂缝)mask=np.argmax(pred,axis=-1)mask=mask.astype(np.uint8)mask=cv2.resize(mask,(self.original_image.shape[1],self.original_image.shape[0]))# 转为黑白图self.predicted_mask=cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)self.predicted_mask[:,:,0]=255*mask self.predicted_mask[:,:,1]=255*mask self.predicted_mask[:,:,2]=255*mask self.display_image(self.predicted_mask,self.predicted_label)defdisplay_image(self,image,label):qimage=QImage(image.data,image.shape[1],image.shape[0],image.strides[0],QImage.Format_RGB888)pixmap=QPixmap.fromImage(qimage)label.setPixmap(pixmap.scaled(300,300,Qt.KeepAspectRatio))defsave_prediction(self):ifself.predicted_maskisNone:returnfile_name,_=QFileDialog.getSaveFileName(self,"保存预测图像","","PNG Files (*.png);;JPG Files (*.jpg)")iffile_name:cv2.imwrite(file_name,self.predicted_mask)if__name__=='__main__':app=QApplication(sys.argv)window=RoadCrackDetectionApp()window.show()sys.exit(app.exec_())✅ 六、评估指标计算函数
# metrics.pyimportnumpyasnpdefcalculate_metrics(y_true,y_pred):# 转为二值y_true=(y_true>0).astype(np.int32)y_pred=(y_pred>0).astype(np.int32)TP=np.sum((y_true==1)&(y_pred==1))FP=np.sum((y_true==0)&(y_pred==1))FN=np.sum((y_true==1)&(y_pred==0))TN=np.sum((y_true==0)&(y_pred==0))precision=TP/(TP+FP)if(TP+FP)>0else0recall=TP/(TP+FN)if(TP+FN)>0else0f1=2*precision*recall/(precision+recall)if(precision+recall)>0else0iou=TP/(TP+FP+FN)if(TP+FP+FN)>0else0accuracy=(TP+TN)/(TP+TN+FP+FN)if(TP+TN+FP+FN)>0else0return{'precision':precision,'recall':recall,'f1_score':f1,'iou':iou,'accuracy':accuracy}✅ 七、运行流程
- 安装依赖:
pipinstalltensorflow opencv-python PyQt5 numpy matplotlib- 准备数据:
train_images.npy:原始图像train_masks.npy:标注掩膜(0: background, 1: crack)
训练模型(可选)→ 生成
best_model.h5启动 GUI:
python main_window.py✅ 八、可视化结果示例
# plot_metrics.pyimportmatplotlib.pyplotasplt metrics={'building':0.81,'background':1.00}plt.bar(metrics.keys(),metrics.values())plt.title(f'mPA ={sum(metrics.values())/len(metrics):.2%}')plt.ylabel('Pixel Accuracy')plt.show()✅ 九、特色功能总结
| 功能 | 说明 |
|---|---|
| 🚧高精度裂缝分割 | U-Net 模型准确提取裂缝 |
| 📊多指标评估 | mIoU、mPA、Precision、Recall |
| 🖼️实时可视化 | 原图 vs 分割图对比 |
| 💾一键保存 | 保存预测结果 |
| 🧩可扩展性强 | 易于替换模型或添加新功能 |
💡提示:
- 若需支持视频流,可集成 OpenCV 视频读取。
- 可扩展为移动端 App,使用 PyQt5 + Android。
打造智慧交通