• Handler消息机制-Native层


    Handler消息机制-Native层

    android12-release


    Handler消息机制-FWK层


    1. MessageQueue连接Native层

    在这里插入图片描述

    1.1 nativeInit初始化

    • NativeMessageQueue 继承MessageQueueLooperCallback
    • Looper::getForThread() 获取TLS中的Looper对象;Looper::setForThread(mLooper) 保存native层的Looper到TLS
    • eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC) 构造唤醒事件的fd;rebuildEpollLocked()重建Epoll实例并注册wake管道;这里Looper与Java层的Looper没有任何的关系

    frameworks/base/core/jni/android_os_MessageQueue.cpp
    system/core/libutils/include/utils/Looper.h
    system/core/libutils/Looper.cpp
    在这里插入图片描述 在这里插入图片描述
    在这里插入图片描述 在这里插入图片描述

    1.2 nativePollOnce阻塞操作

    • pollOnce() 处理Response数组中不带Callback的事件(ident >= 0 表示没有callback, 因为POLL_CALLBACK = -2),之后调用了pollInner()方法
    • pollInner() 查看Done:阶段,先处理Native的Message handler->handleMessage(message),再处理Request中response.request.callback->handleEvent(fd, events, data)

    在这里插入图片描述 在这里插入图片描述
    system/core/libutils/Looper.cpp

    int Looper::pollOnce(int timeoutMillis, int* outFd, int* outEvents, void** outData) {
        int result = 0;
        for (;;) {
            while (mResponseIndex < mResponses.size()) {
                const Response& response = mResponses.itemAt(mResponseIndex++);
                int ident = response.request.ident;
                if (ident >= 0) {
                    int fd = response.request.fd;
                    int events = response.events;
                    void* data = response.request.data;
    #if DEBUG_POLL_AND_WAKE
                    ALOGD("%p ~ pollOnce - returning signalled identifier %d: "
                            "fd=%d, events=0x%x, data=%p",
                            this, ident, fd, events, data);
    #endif
                    if (outFd != nullptr) *outFd = fd;
                    if (outEvents != nullptr) *outEvents = events;
                    if (outData != nullptr) *outData = data;
                    return ident;
                }
            }
    
            if (result != 0) {
    #if DEBUG_POLL_AND_WAKE
                ALOGD("%p ~ pollOnce - returning result %d", this, result);
    #endif
                if (outFd != nullptr) *outFd = 0;
                if (outEvents != nullptr) *outEvents = 0;
                if (outData != nullptr) *outData = nullptr;
                return result;
            }
    
            result = pollInner(timeoutMillis);
        }
    }
    
    int Looper::pollInner(int timeoutMillis) {
    #if DEBUG_POLL_AND_WAKE
        ALOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis);
    #endif
    
        // Adjust the timeout based on when the next message is due.
        if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) {
            nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
            int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime);
            if (messageTimeoutMillis >= 0
                    && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) {
                timeoutMillis = messageTimeoutMillis;
            }
    #if DEBUG_POLL_AND_WAKE
            ALOGD("%p ~ pollOnce - next message in %" PRId64 "ns, adjusted timeout: timeoutMillis=%d",
                    this, mNextMessageUptime - now, timeoutMillis);
    #endif
        }
    
        // Poll.
        int result = POLL_WAKE;
        mResponses.clear();
        mResponseIndex = 0;
    
        // We are about to idle.
        mPolling = true;
    
        struct epoll_event eventItems[EPOLL_MAX_EVENTS];
        int eventCount = epoll_wait(mEpollFd.get(), eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
    
        // No longer idling.
        mPolling = false;
    
        // Acquire lock.
        mLock.lock();
    
        // Rebuild epoll set if needed.
        if (mEpollRebuildRequired) {
            mEpollRebuildRequired = false;
            rebuildEpollLocked();
            goto Done;
        }
    
        // Check for poll error.
        if (eventCount < 0) {
            if (errno == EINTR) {
                goto Done;
            }
            ALOGW("Poll failed with an unexpected error: %s", strerror(errno));
            result = POLL_ERROR;
            goto Done;
        }
    
        // Check for poll timeout.
        if (eventCount == 0) {
    #if DEBUG_POLL_AND_WAKE
            ALOGD("%p ~ pollOnce - timeout", this);
    #endif
            result = POLL_TIMEOUT;
            goto Done;
        }
    
        // Handle all events.
    #if DEBUG_POLL_AND_WAKE
        ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount);
    #endif
    
        for (int i = 0; i < eventCount; i++) {
            int fd = eventItems[i].data.fd;
            uint32_t epollEvents = eventItems[i].events;
            if (fd == mWakeEventFd.get()) {
                if (epollEvents & EPOLLIN) {
                    awoken();
                } else {
                    ALOGW("Ignoring unexpected epoll events 0x%x on wake event fd.", epollEvents);
                }
            } else {
                ssize_t requestIndex = mRequests.indexOfKey(fd);
                if (requestIndex >= 0) {
                    int events = 0;
                    if (epollEvents & EPOLLIN) events |= EVENT_INPUT;
                    if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT;
                    if (epollEvents & EPOLLERR) events |= EVENT_ERROR;
                    if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP;
                    pushResponse(events, mRequests.valueAt(requestIndex));
                } else {
                    ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "
                            "no longer registered.", epollEvents, fd);
                }
            }
        }
    Done: ;
    
        // Invoke pending message callbacks.
        mNextMessageUptime = LLONG_MAX;
        while (mMessageEnvelopes.size() != 0) {
            nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
            const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0);
            if (messageEnvelope.uptime <= now) {
                // Remove the envelope from the list.
                // We keep a strong reference to the handler until the call to handleMessage
                // finishes.  Then we drop it so that the handler can be deleted *before*
                // we reacquire our lock.
                { // obtain handler
                    sp<MessageHandler> handler = messageEnvelope.handler;
                    Message message = messageEnvelope.message;
                    mMessageEnvelopes.removeAt(0);
                    mSendingMessage = true;
                    mLock.unlock();
    
    #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
                    ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d",
                            this, handler.get(), message.what);
    #endif
                    handler->handleMessage(message);
                } // release handler
    
                mLock.lock();
                mSendingMessage = false;
                result = POLL_CALLBACK;
            } else {
                // The last message left at the head of the queue determines the next wakeup time.
                mNextMessageUptime = messageEnvelope.uptime;
                break;
            }
        }
    
        // Release lock.
        mLock.unlock();
    
        // Invoke all response callbacks.
        for (size_t i = 0; i < mResponses.size(); i++) {
            Response& response = mResponses.editItemAt(i);
            if (response.request.ident == POLL_CALLBACK) {
                int fd = response.request.fd;
                int events = response.events;
                void* data = response.request.data;
    #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
                ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p",
                        this, response.request.callback.get(), fd, events, data);
    #endif
                // Invoke the callback.  Note that the file descriptor may be closed by
                // the callback (and potentially even reused) before the function returns so
                // we need to be a little careful when removing the file descriptor afterwards.
                int callbackResult = response.request.callback->handleEvent(fd, events, data);
                if (callbackResult == 0) {
                    removeFd(fd, response.request.seq);
                }
    
                // Clear the callback reference in the response structure promptly because we
                // will not clear the response vector itself until the next poll.
                response.request.callback.clear();
                result = POLL_CALLBACK;
            }
        }
        return result;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193

    2. Handler消息机制-Native层

    2.1 类比FWK层Handler消息机制对象

    在这里插入图片描述
    Native层自身有套Handler机制,Looper.h文件中,定义了Message结构体,MessageHandler消息处理类,LooperCallback 回调类,Looper类。Native也有Looper::prepare()Looper::sendMessage()Looper::wake()Looper::pollOnce()等,这里不在描述类比查看Handler消息机制-FWK层,注意上面nativePollOnce阻塞操作。接下来系统中LooperCallback使用案例。

    system/core/libutils/include/utils/Looper.h

    /**
     * A message that can be posted to a Looper.
     */
    struct Message {
        Message() : what(0) { }
        Message(int w) : what(w) { }
    
        /* The message type. (interpretation is left up to the handler) */
        int what;
    };
    
    
    /**
     * Interface for a Looper message handler.
     *
     * The Looper holds a strong reference to the message handler whenever it has
     * a message to deliver to it.  Make sure to call Looper::removeMessages
     * to remove any pending messages destined for the handler so that the handler
     * can be destroyed.
     */
    class MessageHandler : public virtual RefBase {
    protected:
        virtual ~MessageHandler();
    
    public:
        /**
         * Handles a message.
         */
        virtual void handleMessage(const Message& message) = 0;
    };
    
    
    /**
     * A simple proxy that holds a weak reference to a message handler.
     */
    class WeakMessageHandler : public MessageHandler {
    protected:
        virtual ~WeakMessageHandler();
    
    public:
        WeakMessageHandler(const wp<MessageHandler>& handler);
        virtual void handleMessage(const Message& message);
    
    private:
        wp<MessageHandler> mHandler;
    };
    
    
    /**
     * A looper callback.
     */
    class LooperCallback : public virtual RefBase {
    protected:
        virtual ~LooperCallback();
    
    public:
        /**
         * Handles a poll event for the given file descriptor.
         * It is given the file descriptor it is associated with,
         * a bitmask of the poll events that were triggered (typically EVENT_INPUT),
         * and the data pointer that was originally supplied.
         *
         * Implementations should return 1 to continue receiving callbacks, or 0
         * to have this file descriptor and callback unregistered from the looper.
         */
        virtual int handleEvent(int fd, int events, void* data) = 0;
    };
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67

    2.2 LooperCallback使用案例

    在这里插入图片描述 在这里插入图片描述

    2.2.1 SensorService中Receiver

    APP注册SensorEventListener-Android12
    SensorService数据传递给APP-Android12

    • 注册Sensor监听JNI Natvie层创建的接收器Receiver继承LooperCallback,这里fd就是mReceiveFd
      在这里插入图片描述
    • 在查看mSendFd监听,是在注册创建SensorEventConnection后,updateLooperRegistration更新Looper,这里的Looper获取的是SensorService中的mLooper
      在这里插入图片描述

    frameworks/native/libs/sensor/BitTube.cpp

    int BitTube::getFd() const
    {
        return mReceiveFd;
    }
    
    int BitTube::getSendFd() const
    {
        return mSendFd;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9

    2.2.2 InputManagerService中InputChannel

    InputChannel通道建立-Android12

    • 服务端serverChannel添加 LooperEventCallback
      在这里插入图片描述
    • clientChannel给到上层,InputEventReceiver在通过JNI初始化NativeInputEventReceiver后调用receiver->initialize() --> NativeInputEventReceiver::setFdEvents()
      在这里插入图片描述 在这里插入图片描述

    frameworks/native/libs/input/InputTransport.cpp

    status_t InputChannel::openInputChannelPair(const std::string& name,
                                                std::unique_ptr<InputChannel>& outServerChannel,
                                                std::unique_ptr<InputChannel>& outClientChannel) {
        int sockets[2];
        if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets)) {
            status_t result = -errno;
            ALOGE("channel '%s' ~ Could not create socket pair.  errno=%s(%d)", name.c_str(),
                  strerror(errno), errno);
            outServerChannel.reset();
            outClientChannel.reset();
            return result;
        }
    
        int bufferSize = SOCKET_BUFFER_SIZE;
        setsockopt(sockets[0], SOL_SOCKET, SO_SNDBUF, &bufferSize, sizeof(bufferSize));
        setsockopt(sockets[0], SOL_SOCKET, SO_RCVBUF, &bufferSize, sizeof(bufferSize));
        setsockopt(sockets[1], SOL_SOCKET, SO_SNDBUF, &bufferSize, sizeof(bufferSize));
        setsockopt(sockets[1], SOL_SOCKET, SO_RCVBUF, &bufferSize, sizeof(bufferSize));
    
        sp<IBinder> token = new BBinder();
    
        std::string serverChannelName = name + " (server)";
        android::base::unique_fd serverFd(sockets[0]);
        outServerChannel = InputChannel::create(serverChannelName, std::move(serverFd), token);
    
        std::string clientChannelName = name + " (client)";
        android::base::unique_fd clientFd(sockets[1]);
        outClientChannel = InputChannel::create(clientChannelName, std::move(clientFd), token);
        return OK;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30

    3 FWK层和Native层互操 待续~

  • 相关阅读:
    什么是微服务?
    盘点 | 跨平台桌面应用开发的5大主流框架
    redis主从同步及哨兵模式
    Java数字处理类--Math类--大数字运算
    Ubuntu22.04 安装Mongodb提示dpkg-deb: 错误
    Python注释及PEP 8编码规范
    机械设计基础重点
    【frp实现内网穿透踩坑到成功篇】
    .NET分布式Orleans - 2 - Grain的通信原理与定义
    从外网打点到渗透内网域控 NASA
  • 原文地址:https://blog.csdn.net/qq_23452385/article/details/125975368