S
S
SemenAnnigilator2021-09-19 17:13:21
numpy
SemenAnnigilator, 2021-09-19 17:13:21

How to fix "Data cardinality is ambiguous: x sizes: 4 y sizes: 2 Make sure all arrays contain the same number of samples." error?

Tell me, please, how to solve this problem? If necessary, I can attach a Jupyter Notebook and a folder with files (there are images).

def define_discriminator(in_shape = (106, 106, 1)):
    model = Sequential()
    model.add(Conv2D(64, (3,3), strides = (2,2), padding = "same", input_shape = in_shape))
    model.add(LeakyReLU(alpha = 0.2))
    model.add(Dropout(0.5))
    model.add(Conv2D(64, (3,3), strides = (2,2), padding = "same"))
    model.add(LeakyReLU(alpha = 0.2))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(1, activation = "sigmoid"))
    opt = Adam(learning_rate = 0.0002, beta_1 = 0.5)
    model.compile(loss = "binary_crossentropy", optimizer = opt, metrics = ["accuracy"])
    return model

def define_generator(latent_dim):
    model = Sequential()
    n_nodes = 128 * 53 * 53
    model.add(Dense(n_nodes, input_dim = latent_dim))
    model.add(LeakyReLU(alpha = 0.2))
    model.add(Reshape((53, 53, 128)))
    model.add(Dense(1024))
    model.add(Conv2DTranspose(1024, (4,4), strides = (2,2), padding = "same"))
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha = 0.2))
    model.add(Dense(1024))
    model.add(Conv2D(1, (7,7), padding = "same", activation = "sigmoid"))
    return model

def define_gan(g_model, d_model):
    d_model.trianabel = False
    model = Sequential()
    model.add(g_model)
    model.add(d_model)
    opt = Adam(learning_rate = 0.0002, beta_1 = 0.5)
    model.compile(loss = "binary_crossentropy", optimizer = opt)
    return model

def generate_real_samples(dataset, n_samples):
    ix = randint(0, dataset.shape[0], n_samples)
    X = dataset[ix].T
    Y = ones((n_samples, 1)).T
    return X, Y

def generate_latent_points(latent_dim, n_samples):
    x_input = randn(latent_dim * n_samples)
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input

def generate_fake_samples(g_model, latent_dim, n_samples):
    x_input = generate_latent_points(latent_dim, n_samples)
    X = g_model.predict(x_input).T
    Y = zeros((n_samples, 1)).T
    return X, Y

import tensorflow as tf
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=51, n_batch=10):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    for i in range(n_epochs):
        for j in range(bat_per_epo):
            X_real, y_real = generate_real_samples(dataset, half_batch)
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
            print(X_real, X_fake)
            print(y_real, y_fake)
            X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
            d_loss, _ = d_model.train_on_batch(X, y)
            X_gan = generate_latent_points(latent_dim, n_batch)
            y_gan = ones((n_batch, 1))
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
        if (i+1) % 10 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)
            clear_output()

latent_dim = 100
d_model = define_discriminator()
g_model = define_generator(latent_dim)
gan_model = define_gan(g_model, d_model)
print(pixels.shape)
train(g_model, d_model, gan_model, np.array(pixels), latent_dim)<code lang="python">

</code>
<code lang="python">
ValueError                                Traceback (most recent call last)
<ipython-input-324-b3360c520333> in <module>
      4 gan_model = define_gan(g_model, d_model)
      5 print(pixels.shape)
----> 6 train(g_model, d_model, gan_model, np.array(pixels), latent_dim)

<ipython-input-323-051ded2aca77> in train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs, n_batch)
     10             print(y_real, y_fake)
     11             X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
---> 12             d_loss, _ = d_model.train_on_batch(X, y)
     13             X_gan = generate_latent_points(latent_dim, n_batch)
     14             y_gan = ones((n_batch, 1))

~\anaconda3\envs\LikeProject\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight, reset_metrics, return_dict)
   1850     with self.distribute_strategy.scope(), \
   1851          training_utils.RespectCompiledTrainableState(self):
-> 1852       iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x,
   1853                                                     y, sample_weight,
   1854                                                     class_weight)

~\anaconda3\envs\LikeProject\lib\site-packages\keras\engine\data_adapter.py in single_batch_iterator(strategy, x, y, sample_weight, class_weight)
   1631     data = (x, y, sample_weight)
   1632 
-> 1633   _check_data_cardinality(data)
   1634   dataset = tf.data.Dataset.from_tensors(data)
   1635   if class_weight:

~\anaconda3\envs\LikeProject\lib\site-packages\keras\engine\data_adapter.py in _check_data_cardinality(data)
   1647           label, ", ".join(str(i.shape[0]) for i in tf.nest.flatten(single_data)))
   1648     msg += "Make sure all arrays contain the same number of samples."
-> 1649     raise ValueError(msg)
   1650 
   1651 

ValueError: Data cardinality is ambiguous:
  x sizes: 4
  y sizes: 2
Make sure all arrays contain the same number of samples.
</code>

Answer the question

In order to leave comments, you need to log in

Didn't find what you were looking for?

Ask your question

Ask a Question

731 491 924 answers to any question