Back to Tech

RadLab Network Science

Coming SoonIn PlanningComplex Systems
Network Science Research

Complex Networks & Social Dynamics

Understanding interconnected systems through computational modeling

Research Overview

The RadLab at Northeastern University focuses on understanding complex networks in social, biological, and technological systems. Our work applies graph theory, statistical physics, and machine learning to uncover hidden patterns in large-scale networked data. Current projects include epidemic spreading models, social influence dynamics, and resilience in infrastructure networks.

Interactive Network Explorer

Network Topology & Dynamics Simulator

Network Properties

Average Degree0
Clustering Coefficient0
Network Diameter0
Modularity0
Degree Distribution
1
2
3
4
5

Network Analysis Framework

network_analysis.py
import networkx as nx
import numpy as np
from scipy import sparse
from sklearn.cluster import SpectralClustering
import pandas as pd

class NetworkAnalyzer:
    def __init__(self, graph):
        self.G = graph
        self.n = graph.number_of_nodes()
        self.m = graph.number_of_edges()
        
    def structural_properties(self):
        """Calculate fundamental network properties"""
        properties = {
            'nodes': self.n,
            'edges': self.m,
            'density': nx.density(self.G),
            'avg_degree': 2 * self.m / self.n,
            'avg_clustering': nx.average_clustering(self.G),
            'transitivity': nx.transitivity(self.G),
            'assortativity': nx.degree_assortativity_coefficient(self.G)
        }
        
        # Connected components
        if not nx.is_directed(self.G):
            components = nx.connected_components(self.G)
            giant_component = max(components, key=len)
            properties['giant_component_size'] = len(giant_component) / self.n
            
            # Diameter of giant component
            G_giant = self.G.subgraph(giant_component)
            properties['diameter'] = nx.diameter(G_giant)
            properties['avg_path_length'] = nx.average_shortest_path_length(G_giant)
        
        return properties
    
    def centrality_analysis(self):
        """Comprehensive centrality measures"""
        centralities = {
            'degree': nx.degree_centrality(self.G),
            'betweenness': nx.betweenness_centrality(self.G),
            'closeness': nx.closeness_centrality(self.G),
            'eigenvector': nx.eigenvector_centrality(self.G, max_iter=1000),
            'pagerank': nx.pagerank(self.G)
        }
        
        # Identify influential nodes
        influential = {}
        for measure, values in centralities.items():
            top_nodes = sorted(values.items(), key=lambda x: x[1], reverse=True)[:10]
            influential[measure] = top_nodes
        
        return centralities, influential
    
    def community_detection(self, method='louvain'):
        """Detect communities using various algorithms"""
        if method == 'louvain':
            import community as community_louvain
            partition = community_louvain.best_partition(self.G)
            modularity = community_louvain.modularity(partition, self.G)
            
        elif method == 'spectral':
            # Convert to adjacency matrix
            A = nx.to_numpy_array(self.G)
            
            # Spectral clustering
            n_communities = self._estimate_communities(A)
            clustering = SpectralClustering(
                n_clusters=n_communities,
                affinity='precomputed',
                random_state=42
            )
            labels = clustering.fit_predict(A)
            
            partition = dict(zip(self.G.nodes(), labels))
            modularity = self._calculate_modularity(partition)
            
        elif method == 'label_propagation':
            communities = nx.algorithms.community.label_propagation_communities(self.G)
            partition = {}
            for i, comm in enumerate(communities):
                for node in comm:
                    partition[node] = i
            modularity = nx.algorithms.community.modularity(self.G, communities)
        
        return partition, modularity
    
    def epidemic_simulation(self, model='SIR', beta=0.3, gamma=0.1, seeds=None):
        """Simulate epidemic spreading on network"""
        if seeds is None:
            seeds = [np.random.choice(list(self.G.nodes()))]
        
        # Initialize states: S=0, I=1, R=2
        states = {node: 0 for node in self.G.nodes()}
        for seed in seeds:
            states[seed] = 1
        
        time_series = {'S': [], 'I': [], 'R': []}
        
        while any(state == 1 for state in states.values()):
            new_states = states.copy()
            
            for node in self.G.nodes():
                if states[node] == 1:  # Infected
                    # Recovery
                    if np.random.random() < gamma:
                        new_states[node] = 2
                    
                    # Transmission
                    for neighbor in self.G.neighbors(node):
                        if states[neighbor] == 0 and np.random.random() < beta:
                            new_states[neighbor] = 1
            
            states = new_states
            
            # Record statistics
            counts = {'S': 0, 'I': 0, 'R': 0}
            for state in states.values():
                if state == 0: counts['S'] += 1
                elif state == 1: counts['I'] += 1
                else: counts['R'] += 1
            
            for key in time_series:
                time_series[key].append(counts[key] / self.n)
        
        return time_series
    
    def robustness_analysis(self, attack_type='random', fraction=0.5):
        """Analyze network robustness under attacks"""
        G_copy = self.G.copy()
        initial_size = nx.number_of_nodes(G_copy)
        
        # Track largest component size
        robustness_curve = []
        
        nodes_to_remove = int(fraction * initial_size)
        
        if attack_type == 'random':
            nodes = list(G_copy.nodes())
            np.random.shuffle(nodes)
            remove_order = nodes[:nodes_to_remove]
            
        elif attack_type == 'targeted':
            # Remove high-degree nodes first
            degrees = dict(G_copy.degree())
            remove_order = sorted(degrees, key=degrees.get, reverse=True)[:nodes_to_remove]
        
        for i, node in enumerate(remove_order):
            G_copy.remove_node(node)
            
            if nx.is_connected(G_copy):
                largest_cc = G_copy.number_of_nodes()
            else:
                largest_cc = len(max(nx.connected_components(G_copy), key=len))
            
            robustness_curve.append(largest_cc / initial_size)
        
        return robustness_curve
    
    def _estimate_communities(self, adjacency_matrix):
        """Estimate optimal number of communities using eigenvalue gap"""
        laplacian = nx.normalized_laplacian_matrix(self.G).todense()
        eigenvalues = np.linalg.eigvalsh(laplacian)
        
        # Find largest eigenvalue gap
        gaps = np.diff(eigenvalues)
        n_communities = np.argmax(gaps[:20]) + 1  # Look at first 20 eigenvalues
        
        return max(2, min(n_communities, int(np.sqrt(self.n))))

Research Applications

🦠

Epidemic Modeling

COVID-19 spread prediction and intervention strategies

  • Contact tracing optimization
  • Vaccine distribution planning
  • Quarantine effectiveness
🌐

Social Networks

Information diffusion and influence maximization

  • Viral marketing strategies
  • Fake news detection
  • Community formation

Infrastructure

Critical infrastructure resilience and cascading failures

  • Power grid stability
  • Transportation networks
  • Supply chain robustness
🧬

Biological Networks

Protein interactions and metabolic pathways

  • Drug target identification
  • Disease module detection
  • Evolutionary dynamics

Network Datasets

DatasetNodesEdgesTypeDescription
Twitter Follow41.7M1.47BDirectedSocial follow relationships
Brain Connectivity65K3.9MWeightedNeural connections
US Power Grid4,9416,594UndirectedWestern US power grid
Protein-Protein19,577360KUndirectedHuman interactome

Tech Stack

NetworkX
igraph
Graph-tool
SNAP
Gephi
D3.js