• Transfer Learning with MobileNetV2(吴恩达课程)


    Transfer Learning with MobileNetV2(吴恩达课程)

    # UNQ_C1
    # GRADED FUNCTION: data_augmenter
    def data_augmenter():
        '''
        Create a Sequential model composed of 2 layers
        Returns:
            tf.keras.Sequential
        '''
        ### START CODE HERE
        data_augmentation = tf.keras.Sequential()
        data_augmentation.add(RandomFlip('horizontal'))
        data_augmentation.add(RandomRotation(0.2))
        ### END CODE HERE
        
        return data_augmentation
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    # UNQ_C2
    # GRADED FUNCTION
    def alpaca_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):
        ''' Define a tf.keras model for binary classification out of the MobileNetV2 model
        Arguments:
            image_shape -- Image width and height
            data_augmentation -- data augmentation function
        Returns:
        Returns:
            tf.keras.model
        '''
        
        
        input_shape = image_shape + (3,)
        
        ### START CODE HERE
        base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
                                                       include_top=False, # <== Important!!!!
                                                       weights='imagenet') # From imageNet
        # Freeze the base model by making it non trainable
        base_model.trainable = False
    
        # create the input layer (Same as the imageNetv2 input size)
        inputs = tf.keras.Input(shape=input_shape)
        
        # apply data augmentation to the inputs
        x = data_augmentation(inputs)
        
        # data preprocessing using the same weights the model was trained on
        x = tf.keras.applications.mobilenet_v2.preprocess_input(x)
        
        # set training to False to avoid keeping track of statistics in the batch norm layer
        x = base_model(x, training=False) 
        
        # Add the new Binary classification layers
        # use global avg pooling to summarize the info in each channel
        x = tfl.GlobalAveragePooling2D()(x) 
        #include dropout with probability of 0.2 to avoid overfitting
        x = tfl.Dropout(rate=0.2)(x)
            
        # create a prediction layer with one neuron (as a classifier only needs one)
        prediction_layer = tfl.Dense(1)
        
        ### END CODE HERE
        outputs = prediction_layer(x) 
        model = tf.keras.Model(inputs, outputs)
        
        return model
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    # UNQ_C3
    base_model = model2.layers[4]
    base_model.trainable = True
    # Let's take a look to see how many layers are in the base model
    print("Number of layers in the base model: ", len(base_model.layers))
    
    # Fine-tune from this layer onwards
    fine_tune_at = 120
    
    ### START CODE HERE
    
    # Freeze all the layers before the `fine_tune_at` layer
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable = True
        
    # Define a BinaryCrossentropy loss function. Use from_logits=True
    loss_function=tf.python.keras.losses.BinaryCrossentropy(from_logits=True)
    # Define an Adam optimizer with a learning rate of 0.1 * base_learning_rate
    optimizer = tf.keras.optimizers.Adam(lr=base_learning_rate*0.1)
    # Use accuracy as evaluation metric
    metrics=['accuracy']
    ### END CODE HERE
    
    model2.compile(loss=loss_function,
                  optimizer = optimizer,
                  metrics=metrics)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
  • 相关阅读:
    华为年薪千万的大牛机器视觉搞了几十年,没有搞定的定位算法,现在我开源给大家。用于非标自动化行业。
    JAVA茶叶销售网站计算机毕业设计Mybatis+系统+数据库+调试部署
    Detecting Deepfakes with Self-Blended Images翻译
    Debezium日常分享系列之:Debezium 2.3.0.Final发布
    「五度情报站」网罗全量企业情报,找客户、查竞品、寻商机!
    java运行linux命令时报错
    Redis6 八:Redis的发布和订阅
    中间件安全:Apache Tomcat 弱口令.(反弹 shell 拿到服务器的最高控制权.)
    Applied Time Series Analysis with R
    webpack 原理
  • 原文地址:https://blog.csdn.net/weixin_43456810/article/details/125572430