Working with matrixes

In [71]:
import datetime
import numpy as np
import tensorflow as tf
print(f'tensorlfow version: {tf.__version__}') # find the version number (should be 2.x+)
tensorlfow version: 2.12.0
The history saving thread hit an unexpected error (OperationalError('attempt to write a readonly database')).History will not be written to the database.

Working With Matrixes

Matrix mutliplication

> 🔑 Note: '@' in Python is the symbol for matrix multiplication.

One of the most common operations in machine learning algorithms is matrix multiplication.

TensorFlow implements this matrix multiplication functionality in the tf.matmul() method.

Matrix Multiplication Rules

The inner dimensions of the two matrixes must match:

  • (3, 5) @ (3, 5) NOPE!
  • (5, 3) @ (3, 5)
  • (3, 5) @ (5, 3)
  1. The resulting matrix has the shape of the outer dimensions:
  • (5, 3) @ (3, 5) -> (5, 5)
  • (3, 5) @ (5, 3) -> (3, 3)
In [72]:
simpleTensor = tf.constant([[10, 7], [3, 4]])

# Matrix multiplication in TensorFlow
tensor = tf.constant([[[1, 2, 3],
                       [4, 5, 6]],
                      [[7, 8, 9],
                       [10, 11, 12]],
                      [[13, 14, 15],
                       [16, 17, 18]]])

print('TENSOR--------')
print(tensor)
print('----tensor MULTIPLIED----')
tf.matmul(simpleTensor, simpleTensor)
TENSOR--------
tf.Tensor(
[[[ 1  2  3]
  [ 4  5  6]]

 [[ 7  8  9]
  [10 11 12]]

 [[13 14 15]
  [16 17 18]]], shape=(3, 2, 3), dtype=int32)
----tensor MULTIPLIED----
Out [72]:
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[121,  98],
       [ 42,  37]], dtype=int32)>
In [73]:
# Matrix multiplication with Python operator '@'
simpleTensor @ simpleTensor
Out [73]:
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[121,  98],
       [ 42,  37]], dtype=int32)>
In [74]:
# Create (3, 2) tensor
X = tf.constant([[1, 2],
                 [3, 4],
                 [5, 6]])

# Create another (3, 2) tensor
Y = tf.constant([[7, 8],
                 [9, 10],
                 [11, 12]])
X, Y
Out [74]:
(<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
 array([[1, 2],
        [3, 4],
        [5, 6]], dtype=int32)>,
 <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
 array([[ 7,  8],
        [ 9, 10],
        [11, 12]], dtype=int32)>)
In [75]:
# 
# Will ERROR
# 
# X @ Y


# will return
# InvalidArgumentError: {{function_node __wrapped__MatMul_device_/job:localhost/replica:0/task:0/device:CPU:0}} Matrix size-incompatible: In[0]: [3,2], In[1]: [3,2] [Op:MatMul]

Trying to matrix multiply two tensors with the shape (3, 2) errors because the inner dimensions don't match.

We need to either:

  • Reshape X to (2, 3) so it's (2, 3) @ (3, 2).
  • Reshape Y to (3, 2) so it's (3, 2) @ (2, 3).

Reshaping Matrixes

Reshaping can be done with either:

Reshape

In [76]:
# Example of reshape (3, 2) -> (2, 3)
print('-----Y----')
print(Y)
print('-----Y reshaped----')
tf.reshape(Y, shape=(2, 3))
-----Y----
tf.Tensor(
[[ 7  8]
 [ 9 10]
 [11 12]], shape=(3, 2), dtype=int32)
-----Y reshaped----
Out [76]:
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[ 7,  8,  9],
       [10, 11, 12]], dtype=int32)>
In [77]:
# 
# NOW multiplying X & Y works
# 
X @ tf.reshape(Y, shape=(2, 3))
Out [77]:
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[ 27,  30,  33],
       [ 61,  68,  75],
       [ 95, 106, 117]], dtype=int32)>
In [78]:
# Example of transpose (3, 2) -> (2, 3)
tf.transpose(X)
Out [78]:
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 3, 5],
       [2, 4, 6]], dtype=int32)>
In [79]:
# 
# NOW multiplying X & Y will work
# 
tf.matmul(tf.transpose(X), Y)
Out [79]:
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 89,  98],
       [116, 128]], dtype=int32)>
In [80]:
tf.matmul(a=X, b=Y, transpose_a=True, transpose_b=False)
Out [80]:
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 89,  98],
       [116, 128]], dtype=int32)>

Notice the difference in the resulting shapes when tranposing X or reshaping Y. Machine-Learning Engineers & those working in neural-networks may spend a bunch of time reshaping data (in the form of tensors) to prepare it to be used with various operations (such as feeding it to a model).

The dot product

Multiplying matrices by eachother is also referred to as the dot product.

tensordot

You can perform the tf.matmul() operation using tf.tensordot().

In [81]:
# Perform the dot product on X and Y (requires X to be transposed)
tf.tensordot(tf.transpose(X), Y, axes=1)
Out [81]:
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 89,  98],
       [116, 128]], dtype=int32)>

Comparing reshape & transpose

These two get different results.

In [82]:
# 
# transposing Y
# 
# Perform matrix multiplication between X and Y (transposed)
tf.matmul(X, tf.transpose(Y))
Out [82]:
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[ 23,  29,  35],
       [ 53,  67,  81],
       [ 83, 105, 127]], dtype=int32)>
In [83]:
# 
# reshaping Y 
# 
tf.matmul(X, tf.reshape(Y, (2, 3)))
Out [83]:
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[ 27,  30,  33],
       [ 61,  68,  75],
       [ 95, 106, 117]], dtype=int32)>
In [84]:
# Inspecting shapes of Y comparing reshaing methods

print(f'Y.shape: {Y.shape}')
print(f'Y.reshape: {tf.reshape(Y, (2, 3)).shape}')
print(f'Y.transpose: {tf.transpose(Y).shape}')
Y.shape: (3, 2)
Y.reshape: (2, 3)
Y.transpose: (2, 3)
In [85]:
# Check values of Y, reshape Y and tranposed Y
print("Y:")
print(Y, "\n") # "\n" for newline

print("Y reshaped to (2, 3):")
print(tf.reshape(Y, (2, 3)), "\n")

print("Y transposed:")
print(tf.transpose(Y))
Y:
tf.Tensor(
[[ 7  8]
 [ 9 10]
 [11 12]], shape=(3, 2), dtype=int32) 

Y reshaped to (2, 3):
tf.Tensor(
[[ 7  8  9]
 [10 11 12]], shape=(2, 3), dtype=int32) 

Y transposed:
tf.Tensor(
[[ 7  9 11]
 [ 8 10 12]], shape=(2, 3), dtype=int32)

The different results could be explained:

  • tf.reshape() - change the shape of the given tensor (first) and then insert values in order they appear (in our case, 7, 8, 9, 10, 11, 12).
  • tf.transpose() - swap the order of the axes, by default the last axis becomes the first, however the order can be changed using the perm parameter.

So which should you use?

Not Sure...

references, and examples

* If we transposed Y, it would be represented as (note the capital T for tranpose).

visual demo of matrix multiplication

In [86]:
# Create tensor with negative values
D = tf.constant([-7, -10])
print(D)
# Get the absolute values
print(f'abs values: {tf.abs(D)}')
tf.Tensor([ -7 -10], shape=(2,), dtype=int32)
abs values: [ 7 10]

Stats: min, max, mean, sum (aggregation)

To do so, aggregation methods typically have the syntax reduce()_[action], such as:

Also available are the standard deviation (tf.reduce_std()) and variance (tf.reduce_variance()).

Note: typically, each of these is under the math module, e.g. tf.math.reduce_min() but you can use the alias tf.reduce_min().

In [87]:
# Create a tensor with 50 random values between 0 and 100
E = tf.constant(np.random.randint(low=0, high=100, size=50))
print(E)

print('--------')
# minimum
print(f'MIN: {tf.reduce_min(E)}')

# maximum
print(f'MAX: {tf.reduce_max(E)}')

# mean
print(f'MEAN: {tf.reduce_mean(E)}')

print(f'SUM: {tf.reduce_sum(E)}')
tf.Tensor(
[67 61 40 20 98  0 76 38 23 54 73 75 77 38 89  3  4 42 49 76 82  6 32  1
 31  0 29 15 93 98 50 18 53 13 51 60 54 57 56  1 92 72 91  6 86 63 10 65
 16 40], shape=(50,), dtype=int64)
--------
MIN: 0
MAX: 98
MEAN: 46
SUM: 2344

Finding the positional maximum and minimum

* tf.argmax() - find the position of the maximum element in a given tensor.

  • tf.argmin() - find the position of the minimum element in a given tensor.
In [88]:
# Create a tensor with 50 values between 0 and 1
F = tf.constant(np.random.random(50))
F
Out [88]:
<tf.Tensor: shape=(50,), dtype=float64, numpy=
array([0.36093207, 0.10366052, 0.15027288, 0.45924361, 0.50528467,
       0.76370077, 0.8763022 , 0.47184988, 0.00366867, 0.11415668,
       0.93080914, 0.07551295, 0.06457022, 0.71277592, 0.80367897,
       0.17191018, 0.2799615 , 0.25305706, 0.69857444, 0.76043815,
       0.23106375, 0.77264264, 0.60445771, 0.62934127, 0.21972642,
       0.09398097, 0.52897258, 0.08095592, 0.34843743, 0.89833881,
       0.01440641, 0.52955682, 0.01922664, 0.39039613, 0.57762191,
       0.84800918, 0.35243873, 0.78801068, 0.88486316, 0.64931281,
       0.94677764, 0.5485244 , 0.36491433, 0.9145402 , 0.03277379,
       0.55837612, 0.87561091, 0.19186637, 0.56370985, 0.0802614 ])>
In [89]:
# Find the maximum element position of F
tf.argmax(F)
Out [89]:
<tf.Tensor: shape=(), dtype=int64, numpy=40>
In [90]:
# Find the minimum element position of F
tf.argmin(F)
Out [90]:
<tf.Tensor: shape=(), dtype=int64, numpy=8>
In [91]:
# Find the maximum element position of F
print(f"The maximum value of F is {tf.reduce_max(F).numpy()} at position: {tf.argmax(F).numpy()}") 
print(f"Using tf.argmax() to index F, the maximum value of F is: {F[tf.argmax(F)].numpy()}")
print(f"Are the two max values the same? {F[tf.argmax(F)].numpy() == tf.reduce_max(F).numpy()}")
The maximum value of F is 0.9467776376641881 at position: 40
Using tf.argmax() to index F, the maximum value of F is: 0.9467776376641881
Are the two max values the same? True
In [92]:
# Create a list of indices
some_list = [0, 1, 2, 3]

# One hot encode them
print(tf.one_hot(some_list, depth=len(some_list)))
print(tf.one_hot(some_list, depth=len(some_list) - 2))
tf.Tensor(
[[1. 0. 0. 0.]
 [0. 1. 0. 0.]
 [0. 0. 1. 0.]
 [0. 0. 0. 1.]], shape=(4, 4), dtype=float32)
tf.Tensor(
[[1. 0.]
 [0. 1.]
 [0. 0.]
 [0. 0.]], shape=(4, 2), dtype=float32)
In [93]:
# Specify a custom one-hot values for on and off encoding
on="ON"
off="-"
tf.one_hot(some_list, depth=4, on_value=on, off_value=off)
Out [93]:
<tf.Tensor: shape=(4, 4), dtype=string, numpy=
array([[b'ON', b'-', b'-', b'-'],
       [b'-', b'ON', b'-', b'-'],
       [b'-', b'-', b'ON', b'-'],
       [b'-', b'-', b'-', b'ON']], dtype=object)>

Squaring, log, square root

* tf.square() - get the square of every value in a tensor.

  • tf.sqrt() - get the squareroot of every value in a tensor (note: the elements need to be floats or this will error).
  • tf.math.log() - get the natural log of every value in a tensor (elements need to floats).
In [94]:
# Create a new tensor
H = tf.constant(np.arange(1, 10))
print(H)
H
tf.Tensor([1 2 3 4 5 6 7 8 9], shape=(9,), dtype=int64)
Out [94]:
<tf.Tensor: shape=(9,), dtype=int64, numpy=array([1, 2, 3, 4, 5, 6, 7, 8, 9])>
In [95]:
# 
# Square
# 
tf.square(H)

# 
# sqrt 
# 
# shows error
# tf.sqrt(H)
# InvalidArgumentError: Value for attr 'T' of int64 is not in the list of allowed values: bfloat16, half, float, double, complex64, complex128
# 	; NodeDef: {{node Sqrt}}; Op<name=Sqrt; signature=x:T -> y:T; attr=T:type,allowed=[DT_BFLOAT16, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128]> [Op:Sqrt]
Out [95]:
<tf.Tensor: shape=(9,), dtype=int64, numpy=array([ 1,  4,  9, 16, 25, 36, 49, 64, 81])>
In [96]:
# Change H to float32
H = tf.cast(H, dtype=tf.float32)
H
Out [96]:
<tf.Tensor: shape=(9,), dtype=float32, numpy=array([1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)>
In [97]:
# Find the square root
tf.sqrt(H)
Out [97]:
<tf.Tensor: shape=(9,), dtype=float32, numpy=
array([1.       , 1.4142135, 1.7320508, 2.       , 2.236068 , 2.4494898,
       2.6457512, 2.828427 , 3.       ], dtype=float32)>
In [98]:
# 
# log (input also needs to be float)
# 
tf.math.log(H)
Out [98]:
<tf.Tensor: shape=(9,), dtype=float32, numpy=
array([0.       , 0.6931472, 1.0986123, 1.3862944, 1.609438 , 1.7917595,
       1.9459102, 2.0794415, 2.1972246], dtype=float32)>

Manipulating tf.Variable tensors

Change tensors in-place with

  • .assign() - assign a different value to a particular index of a variable tensor
  • .add_assign() - add to an existing value and reassign it at a particular index of a variable tensor
In [99]:
# Create a variable tensor
I = tf.Variable(np.arange(0, 5))
print('I:')
print(I)

# 
# Assign the final value a new value of 50
# 
print('I.assing...')
print(I.assign([0, 1, 2, 3, 50]))

# NOTE: no copy made, the "original" gets the update
print('I again')
print(I)

# 
# 
#
# Add 10 to every element in I
print('I.assign_add')
I.assign_add([10, 10, 10, 10, 10])
I:
<tf.Variable 'Variable:0' shape=(5,) dtype=int64, numpy=array([0, 1, 2, 3, 4])>
I.assing...
<tf.Variable 'UnreadVariable' shape=(5,) dtype=int64, numpy=array([ 0,  1,  2,  3, 50])>
I again
<tf.Variable 'Variable:0' shape=(5,) dtype=int64, numpy=array([ 0,  1,  2,  3, 50])>
I.assign_add
Out [99]:
<tf.Variable 'UnreadVariable' shape=(5,) dtype=int64, numpy=array([10, 11, 12, 13, 60])>

Tensors and NumPy

Tensors can also be converted to NumPy arrays using:

  • np.array() - pass a tensor to convert to an ndarray (NumPy's main datatype)
  • tensor.numpy() - call on a tensor to convert to an ndarray

Doing this can be helpful: it makes tensors iterable & makes NumPy's methods available

In [100]:
# Create a tensor from a NumPy array
J = tf.constant(np.array([3., 7., 10.]))
print(f'J: {J}')
print(f'type of J: {type(J)}')
print(f'J.numpy: {J.numpy()}')
print(f'new type of J: {type(J.numpy())}')
J: [ 3.  7. 10.]
type of J: <class 'tensorflow.python.framework.ops.EagerTensor'>
J.numpy: [ 3.  7. 10.]
new type of J: <class 'numpy.ndarray'>
In [101]:
# Create a tensor from NumPy and from an array
numpy_J = tf.constant(np.array([3., 7., 10.])) # will be float64 (due to NumPy)
tensor_J = tf.constant([3., 7., 10.]) # will be float32 (due to being TensorFlow default)
numpy_J.dtype, tensor_J.dtype
Out [101]:
(tf.float64, tf.float32)

Using @tf.function

Python functions might have the decorator @tf.function. For reference, RealPython's guide on decorators. Decorators modify a function in one way or another.

In the @tf.function decorator case, it turns a Python function into a callable TensorFlow graph. Which is a fancy way of saying, if you've written your own Python function, and you decorate it with @tf.function, when you export your code (to potentially run on another device), TensorFlow will attempt to convert it into a fast(er) version of itself (by making it part of a computation graph).

For more on this, read the Better performnace with tf.function guide.

In [102]:
# Create a simple function
def timesTwoPlusY(x, y):
    return x ** 2 + y

x = tf.constant(np.arange(0, 10))
print(f'x: {x}')
y = tf.constant(np.arange(10, 20))
print(f'y: {y}')
print(timesTwoPlusY(x, y))
x: [0 1 2 3 4 5 6 7 8 9]
y: [10 11 12 13 14 15 16 17 18 19]
tf.Tensor([ 10  12  16  22  30  40  52  66  82 100], shape=(10,), dtype=int64)
In [103]:
# Create the same function and decorate it with tf.function
@tf.function
def tf_function(x, y):
  return x ** 2 + y

tf_function(x, y)
2025-02-15 21:18:58.698370: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
Out [103]:
<tf.Tensor: shape=(10,), dtype=int64, numpy=array([ 10,  12,  16,  22,  30,  40,  52,  66,  82, 100])>
In [104]:
print(tf.config.list_physical_devices('GPU'))
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]

If the above outputs an empty array (or nothing), it means you don't have access to a GPU (or at least TensorFlow can't find it).

If you're running in Google Colab, you can access a GPU by going to Runtime -> Change Runtime Type -> Select GPU.

Matrix Practice

1. Create a vector, scalar, matrix and tensor with (any) values using tf.constant() 2. Find the shape, rank and size of the tensors from 1 3. Create two tensors containing random values between 0 and 1 with shape [5, 300] 4. Multiply the two tensors from 3 using matrix multiplication 5. Multiply the two tensors from 3 using dot product 6. Create a tensor with random values between 0 and 1 with shape [224, 224, 3] 7. Find the min and max values of the tensor from 6 8. Created a tensor with random values of shape [1, 224, 224, 3] then squeeze it to change the shape to [224, 224, 3] 9. Create a tensor with shape [10] using any values, then find the matrix item index which has the maximum value 10. One-hot encode the tensor in 9

In [105]:
# 1
vectorTest = tf.constant([2, 3])
matrixTest = matrix = tf.constant([[2, 3],[4, 5]])
scalarTest = tf.constant(2)
In [106]:
# 2
print('#2')
print('vector props:')
print(f'shape: {vectorTest.shape}')
print(f'rank: {tf.rank(vectorTest)}')
print(f'size: {tf.size(vectorTest)}')

print('')
print('matrix props:')
print(f'shape: {matrix.shape}')
print(f'rank: {tf.rank(matrix)}')
print(f'size: {tf.size(matrix)}')
#2
vector props:
shape: (2,)
rank: 1
size: 2

matrix props:
shape: (2, 2)
rank: 2
size: 4
In [107]:

#3
print('')
print('#3: 2 random tensors')
random_1 = tf.random.Generator.from_seed(42) # set the seed for reproducibility
randomTestOne = tf.random.uniform(shape=[5,300], minval=0, maxval=1)
randomTestTwo = tf.random.uniform(shape=[5,300], minval=0, maxval=1)
print(randomTestOne)
print(randomTestTwo)
#3: 2 random tensors
tf.Tensor(
[[0.31179297 0.8263413  0.6849456  ... 0.56451845 0.44649565 0.41026497]
 [0.60833323 0.50973356 0.31043375 ... 0.50437474 0.6110033  0.7943536 ]
 [0.20573044 0.5442884  0.10470831 ... 0.18146193 0.26921892 0.71201587]
 [0.31422472 0.15571809 0.59350574 ... 0.21489167 0.96785796 0.13088453]
 [0.00473952 0.06306279 0.91214204 ... 0.7092078  0.93488145 0.7450988 ]], shape=(5, 300), dtype=float32)
tf.Tensor(
[[0.31179297 0.8263413  0.6849456  ... 0.56451845 0.44649565 0.41026497]
 [0.60833323 0.50973356 0.31043375 ... 0.50437474 0.6110033  0.7943536 ]
 [0.20573044 0.5442884  0.10470831 ... 0.18146193 0.26921892 0.71201587]
 [0.31422472 0.15571809 0.59350574 ... 0.21489167 0.96785796 0.13088453]
 [0.00473952 0.06306279 0.91214204 ... 0.7092078  0.93488145 0.7450988 ]], shape=(5, 300), dtype=float32)
In [108]:

print('')
print('#4: matmul the 2 random tensors with matmul')
print(tf.matmul(randomTestOne, tf.transpose(randomTestTwo)))
#4: matmul the 2 random tensors with matmul
tf.Tensor(
[[ 98.34755   80.0121    76.37611   70.609406  74.27988 ]
 [ 80.0121   107.65361   74.474396  72.36779   79.742485]
 [ 76.37611   74.474396  99.23935   71.1331    72.19905 ]
 [ 70.609406  72.36779   71.1331    89.66724   69.214325]
 [ 74.27988   79.742485  72.19905   69.214325  99.42442 ]], shape=(5, 5), dtype=float32)
In [109]:

print('')
print('#5: multiply using dotproduct')
print(tf.tensordot(tf.transpose(randomTestOne), randomTestTwo, axes=1))
#5: multiply using dotproduct
tf.Tensor(
[[0.6083688  0.7289413  0.6147674  ... 0.5910586  0.87485015 0.80229133]
 [0.7289413  1.2671431  0.9311706  ... 0.9005364  1.0366086  1.1988387 ]
 [0.6147674  0.9311706  1.7607356  ... 1.3366777  1.9508647  1.359474  ]
 ...
 [0.5910586  0.9005364  1.3366777  ... 1.1551576  1.4800924  1.3180138 ]
 [0.87485015 1.0366086  1.9508647  ... 1.4800924  2.4559145  1.683479  ]
 [0.80229133 1.1988387  1.359474   ... 1.3180138  1.683479   1.8785846 ]], shape=(300, 300), dtype=float32)
In [110]:
print('')
print('#6: random tensor with shape')
testSix = tf.random.uniform(shape=[224, 224, 3], minval=0, maxval=1)
print(testSix)
#6: random tensor with shape
tf.Tensor(
[[[0.31179297 0.8263413  0.6849456 ]
  [0.0067091  0.78749514 0.3906511 ]
  [0.29263055 0.99216926 0.95810425]
  ...
  [0.43970728 0.976676   0.8946736 ]
  [0.9747398  0.7011992  0.73613393]
  [0.4360633  0.7536833  0.01353109]]

 [[0.2823602  0.0927937  0.2424469 ]
  [0.92214036 0.1318667  0.18895125]
  [0.13469017 0.29916108 0.22880554]
  ...
  [0.10263181 0.3771491  0.7380618 ]
  [0.6484846  0.69211054 0.67284966]
  [0.43589175 0.48166    0.14082742]]

 [[0.41235518 0.45162737 0.6804353 ]
  [0.04887676 0.6775023  0.29143417]
  [0.3466624  0.17450547 0.38451958]
  ...
  [0.8667455  0.16113937 0.67500424]
  [0.8560462  0.48929954 0.97561824]
  [0.5725883  0.8036673  0.3923154 ]]

 ...

 [[0.68855166 0.3706571  0.03126299]
  [0.61133444 0.21750414 0.9080095 ]
  [0.20064199 0.31340492 0.85072494]
  ...
  [0.8053781  0.54289234 0.08450699]
  [0.6037886  0.0091573  0.14664805]
  [0.36727667 0.66913784 0.9003469 ]]

 [[0.4256966  0.88771296 0.6295724 ]
  [0.99141705 0.49446368 0.28761828]
  [0.18791914 0.23971832 0.9050621 ]
  ...
  [0.871716   0.18161333 0.5948199 ]
  [0.41212344 0.57436764 0.69658744]
  [0.64828384 0.0412488  0.70286477]]

 [[0.48708987 0.24799585 0.5379176 ]
  [0.21690464 0.206483   0.40172505]
  [0.8851794  0.11487484 0.05643916]
  ...
  [0.97581923 0.1263851  0.1536969 ]
  [0.47106755 0.2772559  0.00255883]
  [0.16718328 0.01739931 0.56322515]]], shape=(224, 224, 3), dtype=float32)
In [111]:
print('')
print('#7: min & max of that tensor')
print(f'min: {tf.reduce_min(testSix)}')
print(f'max: {tf.reduce_max(testSix)}')
#7: min & max of that tensor
min: 4.0531158447265625e-06
max: 0.9999873638153076
In [112]:
print('')
print('#8: random tensor & squeeze')
testEight = tf.random.uniform(shape=[1, 224, 224, 3])
print(f'testEight shape:{testEight.shape}')
print(f'testEight SQUEEZED:{tf.squeeze(testEight).shape}')
#8: random tensor & squeeze
testEight shape:(1, 224, 224, 3)
testEight SQUEEZED:(224, 224, 3)
In [113]:
print('#9: create with shape and custom vals')
my_values = [1, 2, 10, 4, 5, 6, 7, 8, 9, 3]
testNine = tf.constant(my_values, shape=[10])
print(testNine)

#find the matrix item index which has the maximum value
max_index = tf.argmax(testNine).numpy()
print(f"Index of the largest number: {max_index}")
#9: create with shape and custom vals
tf.Tensor([ 1  2 10  4  5  6  7  8  9  3], shape=(10,), dtype=int32)
Index of the largest number: 2
In [114]:
print('#10: one-hot-encode #9')
print(tf.one_hot(testNine, depth=len(testNine)))
#10: one-hot-encode #9
tf.Tensor(
[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
 [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]], shape=(10, 10), dtype=float32)
Page Tags:
python
data-science
jupyter
learning
numpy