# (≈ 1 line of code)# test = # YOUR CODE STARTS HEREprint("test: "+ test)# YOUR CODE ENDS HERE
1
2
3
4
5
6
import math
from public_tests import*# GRADED FUNCTION: basic_sigmoiddefbasic_sigmoid(x):"""
Compute sigmoid of x.
Arguments:
x -- A scalar
Return:
s -- sigmoid(x)
"""# (≈ 1 line of code)# s = # YOUR CODE STARTS HERE
s =1/(1+ math.exp(-x))# YOUR CODE ENDS HEREreturn s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# GRADED FUNCTION: sigmoiddefsigmoid(x):"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size
Return:
s -- sigmoid(x)
"""# (≈ 1 line of code)# s = # YOUR CODE STARTS HERE
s =1/(1+ np.exp(-x))# YOUR CODE ENDS HERE
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# GRADED FUNCTION: sigmoid_derivativedefsigmoid_derivative(x):"""
Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x.
You can store the output of the sigmoid function into variables and then use it to calculate the gradient.
Arguments:
x -- A scalar or numpy array
Return:
ds -- Your computed gradient.
"""#(≈ 2 lines of code)# s = # ds = # YOUR CODE STARTS HERE
s =1/(1+ np.exp(-x))
ds = s *(1- s)# YOUR CODE ENDS HEREreturn ds
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# GRADED FUNCTION:image2vectordefimage2vector(image):"""
Argument:
image -- a numpy array of shape (length, height, depth)
Returns:
v -- a vector of shape (length*height*depth, 1)
"""# (≈ 1 line of code)# v =# YOUR CODE STARTS HERE
v = image.reshape((image.shape[0]* image.shape[1]* image.shape[2],1))# YOUR CODE ENDS HEREreturn v
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# GRADED FUNCTION: normalize_rowsdefnormalize_rows(x):"""
Implement a function that normalizes each row of the matrix x (to have unit length).
Argument:
x -- A numpy matrix of shape (n, m)
Returns:
x -- The normalized (by row) numpy matrix. You are allowed to modify x.
"""#(≈ 2 lines of code)# Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)# x_norm =# Divide x by its norm.# x =# YOUR CODE STARTS HERE
x_norm = np.linalg.norm(x,ord=2,axis =1,keepdims =True)
x = x / x_norm
# YOUR CODE ENDS HEREreturn x
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# GRADED FUNCTION: softmaxdefsoftmax(x):"""Calculates the softmax for each row of the input x.
Your code should work for a row vector and also for matrices of shape (m,n).
Argument:
x -- A numpy matrix of shape (m,n)
Returns:
s -- A numpy matrix equal to the softmax of x, of shape (m,n)
"""#(≈ 3 lines of code)# Apply exp() element-wise to x. Use np.exp(...).# x_exp = ...# Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True).# x_sum = ...# Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting.# s = ...# YOUR CODE STARTS HERE
x_exp = np.exp(x)
x_sum = np.sum(x_exp,axis =1,keepdims =True)
s = x_exp / x_sum
# YOUR CODE ENDS HEREreturn s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# GRADED FUNCTION: L1defL1(yhat, y):"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L1 loss function defined above
"""#(≈ 1 line of code)# loss = # YOUR CODE STARTS HERE
loss = np.sum(np.abs(yhat-y),axis =0)# YOUR CODE ENDS HEREreturn loss
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# GRADED FUNCTION: L2defL2(yhat, y):"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L2 loss function defined above
"""#(≈ 1 line of code)# loss = ...# YOUR CODE STARTS HERE
loss = np.dot(np.abs(yhat-y),np.abs(yhat-y))# YOUR CODE ENDS HEREreturn loss