help@rskworld.in +91 93305 39277
RSK World
  • Home
  • Development
    • Web Development
    • Mobile Apps
    • Software
    • Games
    • Project
  • Technologies
    • Data Science
    • AI Development
    • Cloud Development
    • Blockchain
    • Cyber Security
    • Dev Tools
    • Testing Tools
  • About
  • Contact

Theme Settings

Color Scheme
Display Options
Font Size
100%
Back to Project
RSK World
tensorflow-deeplearning
/
src
RSK World
tensorflow-deeplearning
Deep learning with TensorFlow and Keras
src
  • utils
  • __init__.py330 B
  • autoencoders.py8 KB
  • cnns.py6.7 KB
  • custom_layers.py8.3 KB
  • data_generator.py14.2 KB
  • data_preprocessing.py9.9 KB
  • gans.py7 KB
  • model_deployment.py8.7 KB
  • model_evaluation.py10.5 KB
  • model_training.py10.1 KB
  • neural_networks.py4.7 KB
  • rnns.py6.8 KB
  • transfer_learning.py5.4 KB
  • transformers.py7.8 KB
  • visualization.py9.6 KB
gans.pycustom_layers.py
src/gans.py
Raw Download
Find: Go to:
"""
Generative Adversarial Networks (GANs) with TensorFlow
Author: RSK World
Website: https://rskworld.in
Email: help@rskworld.in
Phone: +91 93305 39277

This module demonstrates GAN implementations including DCGAN.
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Model
import numpy as np
import matplotlib.pyplot as plt

def build_generator(latent_dim=100):
    """
    Build a generator model for GAN.
    
    Args:
        latent_dim: Dimension of latent space
    
    Returns:
        Generator model
    """
    model = keras.Sequential([
        layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(latent_dim,)),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        
        layers.Reshape((7, 7, 256)),
        
        layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        
        layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        
        layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
    ])
    
    return model

def build_discriminator(input_shape=(28, 28, 1)):
    """
    Build a discriminator model for GAN.
    
    Args:
        input_shape: Shape of input images
    
    Returns:
        Discriminator model
    """
    model = keras.Sequential([
        layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=input_shape),
        layers.LeakyReLU(),
        layers.Dropout(0.3),
        
        layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),
        layers.LeakyReLU(),
        layers.Dropout(0.3),
        
        layers.Flatten(),
        layers.Dense(1, activation='sigmoid')
    ])
    
    return model

def build_dcgan(generator, discriminator):
    """
    Build a DCGAN model combining generator and discriminator.
    
    Args:
        generator: Generator model
        discriminator: Discriminator model
    
    Returns:
        Combined GAN model
    """
    # Freeze discriminator during generator training
    discriminator.trainable = False
    
    # Create GAN
    gan_input = keras.Input(shape=(100,))
    generated_image = generator(gan_input)
    gan_output = discriminator(generated_image)
    
    gan = Model(gan_input, gan_output)
    gan.compile(
        optimizer=keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5),
        loss='binary_crossentropy'
    )
    
    return gan

def train_gan(generator, discriminator, gan, dataset, epochs=50, batch_size=128, latent_dim=100):
    """
    Train a GAN model.
    
    Args:
        generator: Generator model
        discriminator: Discriminator model
        gan: Combined GAN model
        dataset: Training dataset
        epochs: Number of training epochs
        batch_size: Batch size
        latent_dim: Dimension of latent space
    
    Returns:
        Training history
    """
    real_label = 1.0
    fake_label = 0.0
    
    history = {'d_loss': [], 'g_loss': []}
    
    for epoch in range(epochs):
        epoch_d_loss = []
        epoch_g_loss = []
        
        for batch in dataset:
            batch_size_actual = batch.shape[0]
            
            # Train discriminator
            noise = tf.random.normal([batch_size_actual, latent_dim])
            generated_images = generator(noise, training=False)
            
            # Combine real and fake images
            real_images = batch
            combined_images = tf.concat([real_images, generated_images], axis=0)
            
            # Create labels
            labels = tf.concat([
                tf.ones((batch_size_actual, 1)) * real_label,
                tf.ones((batch_size_actual, 1)) * fake_label
            ], axis=0)
            
            # Add noise to labels (label smoothing)
            labels += 0.05 * tf.random.uniform(labels.shape)
            
            # Train discriminator
            d_loss = discriminator.train_on_batch(combined_images, labels)
            epoch_d_loss.append(d_loss)
            
            # Train generator
            noise = tf.random.normal([batch_size_actual, latent_dim])
            misleading_labels = tf.ones((batch_size_actual, 1)) * real_label
            
            g_loss = gan.train_on_batch(noise, misleading_labels)
            epoch_g_loss.append(g_loss)
        
        avg_d_loss = np.mean(epoch_d_loss)
        avg_g_loss = np.mean(epoch_g_loss)
        
        history['d_loss'].append(avg_d_loss)
        history['g_loss'].append(avg_g_loss)
        
        if (epoch + 1) % 10 == 0:
            print(f"Epoch {epoch + 1}/{epochs} - D Loss: {avg_d_loss:.4f}, G Loss: {avg_g_loss:.4f}")
            
            # Generate sample images
            generate_and_save_images(generator, epoch + 1, latent_dim)
    
    return history

def generate_and_save_images(generator, epoch, latent_dim, num_images=16):
    """
    Generate and save sample images from generator.
    
    Args:
        generator: Generator model
        epoch: Current epoch number
        latent_dim: Dimension of latent space
        num_images: Number of images to generate
    """
    noise = tf.random.normal([num_images, latent_dim])
    generated_images = generator(noise, training=False)
    
    fig = plt.figure(figsize=(4, 4))
    for i in range(generated_images.shape[0]):
        plt.subplot(4, 4, i + 1)
        plt.imshow(generated_images[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
        plt.axis('off')
    
    plt.tight_layout()
    plt.savefig(f'generated_images_epoch_{epoch}.png')
    plt.close()

def example_usage():
    """
    Example usage of GAN functions.
    """
    # Build models
    generator = build_generator(latent_dim=100)
    discriminator = build_discriminator(input_shape=(28, 28, 1))
    
    # Compile discriminator
    discriminator.compile(
        optimizer=keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5),
        loss='binary_crossentropy',
        metrics=['accuracy']
    )
    
    # Build GAN
    gan = build_dcgan(generator, discriminator)
    
    print("Generator Model:")
    generator.summary()
    
    print("\nDiscriminator Model:")
    discriminator.summary()
    
    # Generate dummy dataset for demonstration
    dataset = tf.random.normal([1000, 28, 28, 1])
    dataset = tf.data.Dataset.from_tensor_slices(dataset).batch(128)
    
    # Train GAN (short training for demo)
    print("\nTraining GAN...")
    history = train_gan(
        generator, discriminator, gan,
        dataset, epochs=5, batch_size=128, latent_dim=100
    )
    
    return generator, discriminator, gan, history

if __name__ == '__main__':
    print("Generative Adversarial Networks with TensorFlow")
    print("Author: RSK World - https://rskworld.in")
    generator, discriminator, gan, history = example_usage()
233 lines•7 KB
python
src/custom_layers.py
Raw Download
Find: Go to:
"""
Custom Layers and Models with TensorFlow
Author: RSK World
Website: https://rskworld.in
Email: help@rskworld.in
Phone: +91 93305 39277

This module demonstrates how to create custom layers and models in TensorFlow/Keras.
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Model
import numpy as np

class DenseLayer(layers.Layer):
    """
    Custom dense layer with custom initialization.
    Author: RSK World - https://rskworld.in
    """
    
    def __init__(self, units, activation=None, **kwargs):
        super(DenseLayer, self).__init__(**kwargs)
        self.units = units
        self.activation = keras.activations.get(activation)
    
    def build(self, input_shape):
        self.kernel = self.add_weight(
            name='kernel',
            shape=(input_shape[-1], self.units),
            initializer='glorot_uniform',
            trainable=True
        )
        self.bias = self.add_weight(
            name='bias',
            shape=(self.units,),
            initializer='zeros',
            trainable=True
        )
        super(DenseLayer, self).build(input_shape)
    
    def call(self, inputs):
        output = tf.matmul(inputs, self.kernel) + self.bias
        if self.activation is not None:
            output = self.activation(output)
        return output
    
    def get_config(self):
        config = super(DenseLayer, self).get_config()
        config.update({
            'units': self.units,
            'activation': keras.activations.serialize(self.activation)
        })
        return config

class AttentionLayer(layers.Layer):
    """
    Custom attention layer for sequence models.
    Author: RSK World - https://rskworld.in
    """
    
    def __init__(self, units, **kwargs):
        super(AttentionLayer, self).__init__(**kwargs)
        self.units = units
    
    def build(self, input_shape):
        self.W1 = self.add_weight(
            name='W1',
            shape=(input_shape[-1], self.units),
            initializer='glorot_uniform',
            trainable=True
        )
        self.W2 = self.add_weight(
            name='W2',
            shape=(self.units, 1),
            initializer='glorot_uniform',
            trainable=True
        )
        super(AttentionLayer, self).build(input_shape)
    
    def call(self, inputs):
        # Compute attention scores
        attention_scores = tf.matmul(tf.tanh(tf.matmul(inputs, self.W1)), self.W2)
        attention_weights = tf.nn.softmax(attention_scores, axis=1)
        
        # Apply attention weights
        context = tf.reduce_sum(attention_weights * inputs, axis=1)
        return context
    
    def get_config(self):
        config = super(AttentionLayer, self).get_config()
        config.update({'units': self.units})
        return config

class ResidualBlock(layers.Layer):
    """
    Custom residual block layer.
    Author: RSK World - https://rskworld.in
    """
    
    def __init__(self, units, **kwargs):
        super(ResidualBlock, self).__init__(**kwargs)
        self.units = units
    
    def build(self, input_shape):
        self.dense1 = layers.Dense(self.units, activation='relu')
        self.bn1 = layers.BatchNormalization()
        self.dense2 = layers.Dense(self.units)
        self.bn2 = layers.BatchNormalization()
        
        # Shortcut connection
        if input_shape[-1] != self.units:
            self.shortcut = layers.Dense(self.units)
        else:
            self.shortcut = lambda x: x
        
        super(ResidualBlock, self).build(input_shape)
    
    def call(self, inputs, training=False):
        # Main path
        x = self.dense1(inputs)
        x = self.bn1(x, training=training)
        x = self.dense2(x)
        x = self.bn2(x, training=training)
        
        # Shortcut connection
        shortcut = self.shortcut(inputs)
        
        # Add and activate
        output = layers.Activation('relu')(x + shortcut)
        return output
    
    def get_config(self):
        config = super(ResidualBlock, self).get_config()
        config.update({'units': self.units})
        return config

class CustomCNNModel(Model):
    """
    Custom CNN model using functional API.
    Author: RSK World - https://rskworld.in
    """
    
    def __init__(self, num_classes=10, **kwargs):
        super(CustomCNNModel, self).__init__(**kwargs)
        
        # Convolutional layers
        self.conv1 = layers.Conv2D(32, 3, activation='relu')
        self.bn1 = layers.BatchNormalization()
        self.pool1 = layers.MaxPooling2D(2)
        
        self.conv2 = layers.Conv2D(64, 3, activation='relu')
        self.bn2 = layers.BatchNormalization()
        self.pool2 = layers.MaxPooling2D(2)
        
        self.conv3 = layers.Conv2D(128, 3, activation='relu')
        self.bn3 = layers.BatchNormalization()
        self.pool3 = layers.MaxPooling2D(2)
        
        # Dense layers
        self.flatten = layers.Flatten()
        self.dense1 = layers.Dense(256, activation='relu')
        self.dropout = layers.Dropout(0.5)
        self.dense2 = layers.Dense(num_classes, activation='softmax')
    
    def call(self, inputs, training=False):
        x = self.conv1(inputs)
        x = self.bn1(x, training=training)
        x = self.pool1(x)
        
        x = self.conv2(x)
        x = self.bn2(x, training=training)
        x = self.pool2(x)
        
        x = self.conv3(x)
        x = self.bn3(x, training=training)
        x = self.pool3(x)
        
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout(x, training=training)
        return self.dense2(x)

class CustomRNNModel(Model):
    """
    Custom RNN model with attention mechanism.
    Author: RSK World - https://rskworld.in
    """
    
    def __init__(self, vocab_size, embedding_dim=128, lstm_units=256, num_classes=10, **kwargs):
        super(CustomRNNModel, self).__init__(**kwargs)
        
        self.embedding = layers.Embedding(vocab_size, embedding_dim)
        self.lstm1 = layers.LSTM(lstm_units, return_sequences=True)
        self.lstm2 = layers.LSTM(lstm_units, return_sequences=True)
        self.attention = AttentionLayer(units=128)
        self.dense1 = layers.Dense(128, activation='relu')
        self.dropout = layers.Dropout(0.5)
        self.dense2 = layers.Dense(num_classes, activation='softmax')
    
    def call(self, inputs, training=False):
        x = self.embedding(inputs)
        x = self.lstm1(x)
        x = self.lstm2(x)
        x = self.attention(x)
        x = self.dense1(x)
        x = self.dropout(x, training=training)
        return self.dense2(x)

def create_model_with_custom_layers(input_shape, num_classes):
    """
    Create a model using custom layers.
    
    Args:
        input_shape: Shape of input data
        num_classes: Number of output classes
    
    Returns:
        Compiled Keras model
    """
    inputs = keras.Input(shape=input_shape)
    
    # Use custom dense layer
    x = DenseLayer(128, activation='relu')(inputs)
    x = layers.Dropout(0.2)(x)
    
    # Use residual block
    x = ResidualBlock(64)(x)
    x = ResidualBlock(32)(x)
    
    # Output layer
    outputs = layers.Dense(num_classes, activation='softmax')(x)
    
    model = keras.Model(inputs, outputs)
    
    model.compile(
        optimizer=keras.optimizers.Adam(learning_rate=0.001),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
    )
    
    return model

def example_usage():
    """
    Example usage of custom layers and models.
    """
    # Generate sample data
    X_train = np.random.randn(1000, 784).astype('float32')
    y_train = np.random.randint(0, 10, 1000)
    
    # Create model with custom layers
    model = create_model_with_custom_layers(input_shape=(784,), num_classes=10)
    
    # Display model architecture
    model.summary()
    
    # Train model
    model.fit(
        X_train, y_train,
        batch_size=32,
        epochs=5,
        verbose=1
    )
    
    return model

if __name__ == '__main__':
    print("Custom Layers and Models with TensorFlow")
    print("Author: RSK World - https://rskworld.in")
    model = example_usage()
272 lines•8.3 KB
python

About RSK World

Founded by Molla Samser, with Designer & Tester Rima Khatun, RSK World is your one-stop destination for free programming resources, source code, and development tools.

Founder: Molla Samser
Designer & Tester: Rima Khatun

Development

  • Game Development
  • Web Development
  • Mobile Development
  • AI Development
  • Development Tools

Legal

  • Terms & Conditions
  • Privacy Policy
  • Disclaimer

Contact Info

Nutanhat, Mongolkote
Purba Burdwan, West Bengal
India, 713147

+91 93305 39277

hello@rskworld.in
support@rskworld.in

© 2026 RSK World. All rights reserved.

Content used for educational purposes only. View Disclaimer