• 如何將人臉變漂亮(七)


    利用 mediapipe 進行處理
    規劃
    1.先把人臉辨識,然後取出框框
    2.把框框內的人臉,進行美容
    -高反差保留
    (1)曝光度調整
    (2)綠色與藍色,疊加
    (3)YUCIHighPassSkinSmoothingMaskBoost
    -調整圖像亮度
    -混合
    3.把人臉的嘴巴,進行塗紅 (太醜了)
    4.把人臉的眼睛塗黑
    5.新增去背景,用綠幕,然後塞入OBS內,這樣就可以直播了
    6.把功能改寫為pyqt5,以利後續使用
    (1) 用designer 劃一個 mainwindow.py ( 用pyuic5 轉換)
    (2) 主程式呼叫 main.py (呼叫 mainwindow.py)
    (3) 用個thread 拉 camera 攝影機 (init, run ,stop)
    (4) 用個 frame 接收影像處理資訊

    資料出處,請參考 https://www.samproell.io/posts/yarppg/yarppg-face-detection-with-mediapipe/

    下一篇再來修改之前內容。

    # main.py
    from PyQt5.QtWidgets import QApplication
    
    from mainwindow import MainWindow
    from rppg import RPPG
    
    if __name__ == "__main__":
        app = QApplication([])
        rppg = RPPG(video=0, parent=app)
        win = MainWindow(rppg=rppg)
        win.show()
    
        rppg.start()
        app.exec_()
        rppg.stop()
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    # mainwindow.py
    
    from PyQt5.QtWidgets import QMainWindow
    import pyqtgraph as pg
    import mediapipe as mp
    mp_drawing = mp.solutions.drawing_utils
    mp_drawing_styles = mp.solutions.drawing_styles
    mp_face_mesh = mp.solutions.face_mesh
    
    
    class MainWindow(QMainWindow):
        def __init__(self, rppg):
            """MainWindow visualizing the output of the RPPG model.
            """
            super().__init__()
    
            rppg.rppg_updated.connect(self.on_rppg_updated)
            self.init_ui()
    
        def on_rppg_updated(self, output):
            """Update UI based on RppgResults.
            """
            img = output.rawimg.copy()
            draw_facemesh(img, output.landmarks, tesselate=True, contour=True)
            self.img.setImage(img)
    
    
        def init_ui(self):
            """Initialize window with pyqtgraph image view box in the center.
            """
            self.setWindowTitle("FaceMesh detection in PyQt")
    
            layout = pg.GraphicsLayoutWidget()
            self.img = pg.ImageItem(axisOrder="row-major")
            vb = layout.addViewBox(invertX=True, invertY=True, lockAspect=True)
            vb.addItem(self.img)
    
            self.setCentralWidget(layout)
    
    
    def draw_facemesh(img, results, tesselate=False,
                      contour=False, irises=False):
        """Draw all facemesh landmarks found in an image.
    
        Irises are only drawn if the corresponding landmarks are present,
        which requires FaceMesh to be initialized with refine=True.
        """
        if results is None or results.multi_face_landmarks is None:
            return
    
        for face_landmarks in results.multi_face_landmarks:
            if tesselate:
                mp.solutions.drawing_utils.draw_landmarks(
                    image=img,
                    landmark_list=face_landmarks,
                    connections=mp_face_mesh.FACEMESH_TESSELATION,
                    landmark_drawing_spec=None,
                    connection_drawing_spec=mp_drawing_styles
                    .get_default_face_mesh_tesselation_style())
            if contour:
                mp.solutions.drawing_utils.draw_landmarks(
                    image=img,
                    landmark_list=face_landmarks,
                    connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,
                    landmark_drawing_spec=None,
                    connection_drawing_spec=mp.solutions.drawing_styles
                    .get_default_face_mesh_contours_style())
            if irises and len(face_landmarks) > 468:
                mp.solutions.drawing_utils.draw_landmarks(
                    image=img,
                    landmark_list=face_landmarks,
                    connections=mp_face_mesh.FACEMESH_IRISES,
                    landmark_drawing_spec=None,
                    connection_drawing_spec=mp_drawing_styles
                    .get_default_face_mesh_iris_connections_style())
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    # rppg.py
    
    from collections import namedtuple
    import numpy as np
    from PyQt5.QtCore import pyqtSignal, QObject
    import mediapipe as mp
    
    from camera import Camera
    
    RppgResults = namedtuple("RppgResults", ["rawimg", "landmarks"])
    
    class RPPG(QObject):
    
        rppg_updated = pyqtSignal(RppgResults)
    
        def __init__(self, parent=None, video=0):
            """rPPG model processing incoming frames and emitting calculation
            outputs.
    
            The signal RPPG.updated provides a named tuple RppgResults containing
              - rawimg: the raw frame from camera
              - landmarks: multiface_landmarks object returned by FaceMesh
            """
            super().__init__(parent=parent)
    
            self._cam = Camera(video=video, parent=parent)
            self._cam.frame_received.connect(self.on_frame_received)
    
            self.detector = mp.solutions.face_mesh.FaceMesh(
                max_num_faces=1,
                refine_landmarks=False,
                min_detection_confidence=0.5,
                min_tracking_confidence=0.5
            )
    
        def on_frame_received(self, frame):
            """Process new frame - find face mesh and emit outputs.
            """
            rawimg = frame.copy()
            results = self.detector.process(frame)
    
            self.rppg_updated.emit(RppgResults(rawimg, results))
    
        def start(self):
            """Launch the camera thread.
            """
            self._cam.start()
    
        def stop(self):
            """Stop the camera thread and clean up the detector.
            """
            self._cam.stop()
            self.detector.close()
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    # camera.py
    
    import time
    
    import cv2
    import numpy as np
    from PyQt5.QtCore import QThread, pyqtSignal
    
    
    class Camera(QThread):
        """Wraps cv2.VideoCapture and emits Qt signal with frames in RGB format.
    
        The `run` function launches a loop that waits for new frames in the
        VideoCapture and emits them with a `frame_received` signal.  Calling
        `stop` stops the loop and releases the camera.
        """
        frame_received = pyqtSignal(np.ndarray)
        """PyQt Signal emitting new frames read from the camera.
        """
    
        def __init__(self, video=0, parent=None):
            """Initialize Camera instance.
    
            Args:
                video (int or string): ID of camera or video filename
                parent (QObject): parent object in Qt context
            """
            super().__init__(parent=parent)
    
            self._cap = cv2.VideoCapture(video)
            self._running = False
    
        def run(self):
            """Start loop in thread capturing incoming frames.
            """
            self._running = True
            while self._running:
                ret, frame = self._cap.read()
    
                if not ret:
                    self._running = False
                    raise RuntimeError("No frame received")
    
                self.frame_received.emit(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    
        def stop(self):
            """Stop loop and release camera.
            """
            self._running = False
            time.sleep(0.1)
            self._cap.release()
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
  • 相关阅读:
    破解软件的原理是什么(软件被破解公开)
    一篇玩转mybatis-plus框架的详细讲解(入门必备)
    【Docker】将自定义的镜像上传至dockerhub或阿里云私有仓库,并在其他节点进行拉取
    疫情下跨越一万公里的友情:熊超与飒特电子哨兵的故事
    2-37.1 EmpProject综合案例
    Win11 22H2怎么卸载更新补丁?Win11 22H2卸载更新补丁的步骤
    Javassist-ConstPool常量池
    Qlik部署动态经营分析,实时帮助企业掌控盈利能力
    传统供应链和现代供应链有哪些区别?
    WSL重装Anaconda
  • 原文地址:https://blog.csdn.net/chencef/article/details/125557329