315 – Network


Problem Statement

Given a connected undirected graph, find its critical nodes (articulation points). A critical node is a node which its removal leaves the graph disconnected.

A straight-forward solution –to remove each node and do a DFS check for connectivity– will get an AC, but this is an implementation that uses Tarjan’s DFS algorithm for finding Articulation Point:

#include <algorithm>
#include <iostream>
#include <fstream>
#include <cstring>
#include <limits>
#include <sstream>
#include <string>
#include <stack>
#include <queue>
#include <map>
#include <set>
using namespace std;

set<int> apNodes, apVis;
set<pair<int, int> > apEdges;
void articulationPointAndBridge(map<int, map<int, int> > &graph, int s) {
//     cout << "ARTICULATION POINT AND BRIDGE" << endl;
    map<int, int> parent, num_dfs, num_low;
    int children_s = 0, num_dfs_size = 0;
    stack<int> stk;
    parent[s] = s, stk.push(s);
    // dfs
    while (stk.size()) {
        int u = stk.top();
        // grey
        if (apVis.find(u) == apVis.end()) {
            num_dfs[u] = num_low[u] = num_dfs_size++;
            apVis.insert(u);
            children_s += u != s && parent[u] == s;
            for (map<int, int>::reverse_iterator v = graph[u].rbegin();
                 v != graph[u].rend(); ++v)
                // grey to white
                if (apVis.find(v->first) == apVis.end())
                    parent[v->first] = u, stk.push(v->first);
                // grey to grey
                else if (parent[u] != v->first)
                    num_low[u] = min(num_low[u], num_dfs[v->first]);
        }
        // black
        else {
            if (parent[u] != u) {
                // POINTS
                if (num_dfs[parent[u]] <= num_low[u])
                    apNodes.insert(parent[u]);
                // BRIDGES
                if (num_dfs[parent[u]] < num_low[u])
                    if (u < parent[u])
                        apEdges.insert(make_pair(u, parent[u]));
                    else
                        apEdges.insert(make_pair(parent[u], u));
                num_low[parent[u]] = min(num_low[parent[u]], num_low[u]);
            }
            stk.pop();
        }
    }
    apNodes.erase(s);
    if (children_s > 1)
        apNodes.insert(s);
}

int main() {
    int n;
    string line;
    while (cin >> n && n) {
        // input
        getline(cin, line);
        bool inputDone = false;
        map<int, map<int, int> > graph;
        while (!inputDone) {
            getline(cin, line);
            stringstream ss(line);
            int u, v;
            ss >> u;
            if (!u)
                inputDone = true;
            while (ss >> v)
                graph[u][v] = 1, graph[v][u] = 1;
        }
        // solve
        apVis.clear(), apNodes.clear(), apEdges.clear();
        articulationPointAndBridge(graph, graph.begin()->first);
        cout << apNodes.size() << endl;
    }
}

796 – Critical Links


Problem Statement

Given an undirected graph, find its critical edges. A critical edge is one that when removed disconnects any two connected nodes.

The brute-force solution –removing an edge and checking for connectivity– will work. But this is an implementation using Tarjan’s DFS Bridge finding algorithm.


set<int> apNodes, apVis;
set<pair<int, int> > apEdges;
void articulationPointAndBridge(map<int, map<int, int> > &graph, int s) {
    map<int, int> parent, num_dfs, num_low;
    int children_s = 0, num_dfs_size = 0;
    stack<int> stk;
    parent[s] = s, stk.push(s);
    // dfs
    while (stk.size()) {
        int u = stk.top();
        // grey
        if (apVis.find(u) == apVis.end()) {
            num_dfs[u] = num_low[u] = num_dfs_size++;
            apVis.insert(u);
            children_s += u != s && parent[u] == s;
            for (map<int, int>::reverse_iterator v = graph[u].rbegin();
                 v != graph[u].rend(); ++v)
                // grey to white
                if (apVis.find(v->first) == apVis.end())
                    parent[v->first] = u, stk.push(v->first);
                // grey to grey
                else if (parent[u] != v->first)
                    num_low[u] = min(num_low[u], num_dfs[v->first]);
        }
        // black
        else {
            if (parent[u] != u) {
                // POINTS
                if (num_dfs[parent[u]] <= num_low[u])
                    apNodes.insert(parent[u]);
                // BRIDGES
                if (num_dfs[parent[u]] < num_low[u])
                    if (u < parent[u])
                        apEdges.insert(make_pair(u, parent[u]));
                    else
                        apEdges.insert(make_pair(parent[u], u));
                num_low[parent[u]] = min(num_low[parent[u]], num_low[u]);
            }
            stk.pop();
        }
    }
    apNodes.erase(s);
    if (children_s > 1)
        apNodes.insert(s);
}

int main() {
    string str;
    int n, a, b, m;
    while (cin >> n) {
        // input
        map<int, map<int, int> > graph;
        for (int i = 0; i < n; ++i) {
            cin >> a;
            cin >> str;
            stringstream ss(str.substr(1, str.length() - 2));
            ss >> m;
            for (int j = 0; j < m; ++j)
                cin >> b, graph[a][b] = graph[b][a] = 1;
        }
        // solve
        apVis.clear(), apEdges.clear(), apNodes.clear();
        for (map<int, map<int, int> >::iterator i = graph.begin();
             i != graph.end(); ++i)
            if (apVis.find(i->first) == apVis.end())
                articulationPointAndBridge(graph, i->first);
        // print
        cout << apEdges.size() << " critical links";
        for (set<pair<int, int> >::iterator i = apEdges.begin();
             i != apEdges.end(); ++i)
            cout << endl << i->first << " - " << i->second;
        cout << endl << endl;
    }
}

Heuristic Search & AI – 3


After finding a heuristic function and an evaluation function, another way for finding the target-state s_{target} is iterative improvement.

These algorithms are able to solve problems where the state space S is continuous. Also when we don’t know the characteristics of the target-state s_{target}, but we have some evaluation function f (potential field) to minimize. These methods scale well to high dimensions where no other methods work. They also don’t require high storage.

  • Steepest Descent (Hill Climbing)
  • We start from the start-state s_{start} and pick the best neighboring state s_1, then, from s_1, pick the best neighboring state s_2 and so on… Until, we reach a state t that is better than all its neighbors. Then we halt.

    hill_climbing(start) 	
    	u = start
    	while true
    		v = minimum valued successor of u
    		if (f(v) >= f(u))
    			return u
    		u = v
    

    However, it may halt at a local minimum.

  • Random Steepest Descent (Random Hill Climbing)
  • Runs Steepest Descent multiple times from random start states.

    random_hill_climbing()
    	min = random node
    	S = a list of random start nodes
    	for each node u in S
    		v = hill_climbing(u)
    		if f(v) < f(min)
    			min = v
    	return min
    

    This algorithm lowers the chances of halting at local minimum.

  • Simulated Annealing
  • From the current state s, it picks the next state u randomly. If u gets us lower heuristic (good), we proceed to u. If not (bad), we are supposed to be at a local minimum. Try to escape the local minimum s with some probability. The more bad u is compared to s, the more likely we’re going to stick with our old state s.

    simulated_annealing(start, schedule) 	
    	u = start
    	for t = 1, 2, ...
    		T = schedule(t)
    		if T = 0
    			return u
    		v = random successor of u
    		delta = f(u) - f(v)
    		if (delta > 0)
    			u = v
    		else
    			u = v with probability e^(delta/T)
    

    The parameter T controls the speed of convergence. schedule should be decreasing to make sure the algorithm is more likely to halt the more it proceeds.

    e^(-x)
    e^(-x)

    This means: escape local minima by allowing some bad moves, but gradually decrease their frequency.

    Heuristic Search & AI – 2


    Now, after generating a heuristic function h and an evaluation function f, we utilize it to find the target-state s_{target}. In this post, we discuss search algorithms.

    These are good for when the state space S is well-defined (discrete) and we know the characteristics of target-state s_{target}. Usually, we can always discrete-ize a continuous state space using some tolerance threshold.

  • Generate And Test (Brute-Force)

  • Generate an arbitary state s and see if it is the target s_{target}.

    brute_force(isGoal)
    	while true
    		u = generate a state
    		if isGoal(u)
    			return u
    

    This algorithm is simple. But, it’s more suitable for small problems as search space gets exponential.

  • Best-First Search (A*)

  • This algorithm is similar to Dijkstra’s, but instead of using the distance from the start-state s_{start} as the sorting criteria, we use the evaluation function f.

    best_first_search(start, isGoal)
    	Q = PriorityQueue(start, f(start))
    	while !Q.isEmpty()
    		u = Q.pop()
    		if isGoal(u)
    			return u
    		for each child v in succ(u)
    			Q.push(v, f(v))
    	return null
    

    These algorithm takes too much storage space. However, we can always introduce a visited set of states to avoid repeated states.

  • Beam Search

  • This algorithm tries to limit the storage space of A*.

    beam-search(start, isGoal, k) 	
    	Q = PriorityQueue(start, f(start))
    	while !Q.isEmpty()
    		u = Q.pop()
    		if isGoal(u)
    			return u
    		for each child v in succ(u)
    			Q.push(v, f(v))
    			if Q.size() > k
    				Q.removeLast() // earliest/worst state
    	return null
    

    If k = \infty , the algorithm resembles A*.
    If k = 1, the algorithm resembles hill-climbing which we’ll discuss in the next post.

    Heuristic Search & AI – 1


    This series of posts is intended to give an overview of heuristic search terminology and the very abstract algorithms. In this post, we discuss the heuristic function.

    In a search problem: we have a search-space S, a start-state s_{start}, and a target-state s_{target}. We’re trying to find the path P from s_{start} and s_{target}. The path P consists of a sequence of states from our search-space S. P can have constraints.

    Suppose we’re at an arbitrary state s, a heuristic function tells how good the current-state s is (how close s is to s_{target}).
    evaluation
    f(s) = g(s) + h(s)

    • f(s) – Evaluation function. Usually, we try to go towards states that minimize this function. s_{target} should have the minimum f.
    • g(s) – Cost of the path P that goes from s_{start} to s. Minimizing this factor, reduces the length of the path P.
    • h(s)Estimated distance from the s to s_{target}. This is the heuristic function.

    An admissible heuristic function
    A heuristic function h is said to be admissible if it never overestimates the distance to s_{target}. To understand this, consider the actual distance to the target-state h^*(s) (the actual distance is often hard to determine). This is what a search algorithm produces for different choices of h(s):

    • h(s) = h^*(s)
      Only nodes in the correct path are expanded. Optimal solution is found.
    • h(s) < h^*(s)
      Additional nodes are expanded. Optimal solution is found.
    • h(s) > h^*(s)
      Optimal solution can be overlooked.

    This last case is what we’re trying to avoid when we impose an admissible function.

    Generating heuristic functions
    One way of generating admissible functions is by relaxing the problem (e.g. by removing constraints). Then using the actual distance to the goal in the relaxed problem as a heuristic function. These are always admissible.
    Also, the distances computed in a sub-problem of the original problem often serve as useful estimates of the original distances.

    Now, given a set of heuristic functions, how can we tell which is better?
    If h1(s), h2(s) are two admissible functions and h1(s) > h2(s) for all s, pick h1(s). h1(s) > h2(s) means that h1(s) is a tighter estimate (visits less states) and yet never overlooks the optimal solution (admissible).

    Learning heuristics is also possible. We can get an agent solve many instances of the problem and store the actual cost of h^*(s) at a state s. Then using h^*(s), learn how to compute h(s) from the state features.

    Greedy evaluation functions
    If we throw away g(s) out of f(s), we get a greedy function:
    f(s) = h(s)
    A greedy function tries to visit states closer to s_{target} regardless of their distances from the s_{start}.

    Next: search algorithms