148148 " ### Defining higher-order Tensors ###\n " ,
149149 " \n " ,
150150 " '''TODO: Define a 2-d Tensor'''\n " ,
151- " matrix = tf.constant([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]) # TODO\n " ,
152- " # matrix = # TODO\n " ,
151+ " matrix = # TODO\n " ,
153152 " \n " ,
154153 " assert isinstance(matrix, tf.Tensor), \" matrix must be a tf Tensor object\"\n " ,
155154 " assert tf.rank(matrix).numpy() == 2"
166165 " '''TODO: Define a 4-d Tensor.'''\n " ,
167166 " # Use tf.zeros to initialize a 4-d Tensor of zeros with size 10 x 256 x 256 x 3.\n " ,
168167 " # You can think of this as 10 images where each image is RGB 256 x 256.\n " ,
169- " images = tf.zeros([10, 256, 256, 3]) # TODO\n " ,
170- " # images = # TODO\n " ,
168+ " images = # TODO\n " ,
171169 " \n " ,
172170 " assert isinstance(images, tf.Tensor), \" matrix must be a tf Tensor object\"\n " ,
173171 " assert tf.rank(images).numpy() == 4, \" matrix must be of rank 4\"\n " ,
263261 " # Construct a simple computation function\n " ,
264262 " def func(a,b):\n " ,
265263 " '''TODO: Define the operation for c, d, e (use tf.add, tf.subtract, tf.multiply).'''\n " ,
266- " c = tf.add(a, b)\n " ,
267- " # c = # TODO\n " ,
268- " d = tf.subtract(b, 1)\n " ,
269- " # d = # TODO\n " ,
270- " e = tf.multiply(c, d)\n " ,
271- " # e = # TODO\n " ,
264+ " c = # TODO\n " ,
265+ " d = # TODO\n " ,
266+ " e = # TODO\n " ,
272267 " return e"
273268 ]
274269 },
349344 " \n " ,
350345 " def call(self, x):\n " ,
351346 " '''TODO: define the operation for z (hint: use tf.matmul)'''\n " ,
352- " z = tf.matmul(x, self.W) + self.b # TODO\n " ,
353- " # z = # TODO\n " ,
347+ " z = # TODO\n " ,
354348 " \n " ,
355349 " '''TODO: define the operation for out (hint: use tf.sigmoid)'''\n " ,
356- " y = tf.sigmoid(z) # TODO\n " ,
357- " # y = # TODO\n " ,
350+ " y = # TODO\n " ,
358351 " return y\n " ,
359352 " \n " ,
360353 " # Since layer parameters are initialized randomly, we will set a random seed for reproducibility\n " ,
402395 " # Remember: dense layers are defined by the parameters W and b!\n " ,
403396 " # You can read more about the initialization of W and b in the TF documentation :)\n " ,
404397 " # https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense?version=stable\n " ,
405- " dense_layer = Dense(n_output_nodes, activation='sigmoid') # TODO\n " ,
406- " # dense_layer = # TODO\n " ,
398+ " dense_layer = # TODO\n " ,
407399 " \n " ,
408400 " # Add the dense layer to the model\n " ,
409401 " model.add(dense_layer)\n "
430422 " x_input = tf.constant([[1,2.]], shape=(1,2))\n " ,
431423 " \n " ,
432424 " '''TODO: feed input into the model and predict the output!'''\n " ,
433- " model_output = model(x_input).numpy()\n " ,
434- " # model_output = # TODO\n " ,
425+ " model_output = # TODO\n " ,
435426 " print(model_output)"
436427 ]
437428 },
463454 " def __init__(self, n_output_nodes):\n " ,
464455 " super(SubclassModel, self).__init__()\n " ,
465456 " '''TODO: Our model consists of a single Dense layer. Define this layer.'''\n " ,
466- " self.dense_layer = Dense(n_output_nodes, activation='sigmoid') # TODO\n " ,
467- " # self.dense_layer = '''TODO: Dense Layer'''\n " ,
457+ " self.dense_layer = '''TODO: Dense Layer'''\n " ,
468458 " \n " ,
469459 " # In the call function, we define the Model's forward pass.\n " ,
470460 " def call(self, inputs):\n " ,
529519 " \n " ,
530520 " '''TODO: Implement the behavior where the network outputs the input, unchanged, under control of the isidentity argument.'''\n " ,
531521 " def call(self, inputs, isidentity=False):\n " ,
532- " x = self.dense_layer(inputs)\n " ,
533- " if isidentity: # TODO\n " ,
534- " return inputs # TODO\n " ,
535- " return x\n " ,
536- " \n " ,
537- " # def call(self, inputs, isidentity=False):\n " ,
538- " # TODO"
522+ " ### TODO"
539523 ]
540524 },
541525 {
560544 " \n " ,
561545 " x_input = tf.constant([[1,2.]], shape=(1,2))\n " ,
562546 " '''TODO: pass the input into the model and call with and without the input identity option.'''\n " ,
563- " out_activate = model.call(x_input) # TODO\n " ,
564- " # out_activate = # TODO\n " ,
565- " out_identity = model.call(x_input, isidentity=True) # TODO\n " ,
566- " # out_identity = # TODO\n " ,
547+ " out_activate = # TODO\n " ,
548+ " out_identity = # TODO\n " ,
567549 " \n " ,
568550 " print(\" Network output with activation: {}; network identity output: {}\" .format(out_activate.numpy(), out_identity.numpy()))"
569551 ]
658640 " for i in range(500):\n " ,
659641 " with tf.GradientTape() as tape:\n " ,
660642 " '''TODO: define the loss as described above'''\n " ,
661- " loss = (x - x_f)**2 # \" forward pass\" : record the current loss on the tape\n " ,
662- " # loss = # TODO\n " ,
643+ " loss = # TODO\n " ,
663644 " \n " ,
664645 " # loss minimization using gradient tape\n " ,
665646 " grad = tape.gradient(loss, x) # compute the derivative of the loss with respect to x\n " ,
691672 "collapsed_sections" : [
692673 " WBk0ZDWY-ff8"
693674 ],
694- "name" : " TF_Part1_Intro .ipynb" ,
675+ "name" : " TF_Part1_Intro_Solution .ipynb" ,
695676 "provenance" : []
696677 },
697678 "kernelspec" : {
711692 },
712693 "nbformat" : 4 ,
713694 "nbformat_minor" : 0
714- }
695+ }
0 commit comments