| 
 | 1 | +package aima.core.probability;  | 
 | 2 | + | 
 | 3 | +import aima.core.agent.Action;  | 
 | 4 | +import aima.core.agent.Agent;  | 
 | 5 | +import aima.core.agent.Percept;  | 
 | 6 | +import aima.core.probability.bayes.BayesInference;  | 
 | 7 | +import aima.core.probability.domain.FiniteDomain;  | 
 | 8 | +import aima.core.probability.proposition.AssignmentProposition;  | 
 | 9 | + | 
 | 10 | +import java.util.ArrayList;  | 
 | 11 | +import java.util.Collections;  | 
 | 12 | +import java.util.List;  | 
 | 13 | + | 
 | 14 | +/**  | 
 | 15 | + * Artificial Intelligence A Modern Approach (3rd Edition): Figure 16.9, page 632.<br>  | 
 | 16 | + * </br>  | 
 | 17 | + * <pre>  | 
 | 18 | + *  | 
 | 19 | + * function INFORMATION-GATHERING-AGENT(percept) returns an action  | 
 | 20 | + *  persistent: D, a decision network  | 
 | 21 | + *  | 
 | 22 | + * integrate percept into D  | 
 | 23 | + *  j ← the value that maximizes VPI(Ej) / Cost(Ej)  | 
 | 24 | + *  if VPI(Ej) > Cost(Ej)  | 
 | 25 | + *    return REQUEST(Ej)  | 
 | 26 | + *  else return the best action from D  | 
 | 27 | + *  | 
 | 28 | + *     </pre>  | 
 | 29 | + * <p>  | 
 | 30 | + * Figure ?? Design of a simple information-gathering agent.  | 
 | 31 | + * The agent works by repeatedly selecting the observation with  | 
 | 32 | + * the highest information value, until the cost of the next  | 
 | 33 | + * observation is greater than its expected benefit.  | 
 | 34 | + *  | 
 | 35 | + * @author samagra  | 
 | 36 | + */  | 
 | 37 | +public abstract class InformationGatheringAgent implements Agent {  | 
 | 38 | + | 
 | 39 | +    // To carry out conditional probability calculations  | 
 | 40 | +    private BayesInference inferenceMethod;  | 
 | 41 | +    // persistent: D, a decision network  | 
 | 42 | +    private DecisionNetwork decisionNetwork;  | 
 | 43 | +    // To store the information collected till now  | 
 | 44 | +    private List<AssignmentProposition> observedEvidence;  | 
 | 45 | +    // To store the scope of information that can be collected  | 
 | 46 | +    private List<RandomVariable> randomVars;  | 
 | 47 | + | 
 | 48 | +    /**  | 
 | 49 | +     * Constructor for the agent.  | 
 | 50 | +     *  | 
 | 51 | +     * @param decisionNetwork The decision network which represents the problem  | 
 | 52 | +     *                        for which the information is to be collected  | 
 | 53 | +     * @param inferenceMethod To carry out various conditional probability calculations  | 
 | 54 | +     * @param initialEvidence The information which is available beforehand to the agent.  | 
 | 55 | +     */  | 
 | 56 | +    public InformationGatheringAgent(DecisionNetwork decisionNetwork,  | 
 | 57 | +                                     BayesInference inferenceMethod,  | 
 | 58 | +                                     List<AssignmentProposition> initialEvidence) {  | 
 | 59 | +        this.decisionNetwork = decisionNetwork;  | 
 | 60 | +        this.inferenceMethod = inferenceMethod;  | 
 | 61 | +        this.observedEvidence = initialEvidence;  | 
 | 62 | +        this.randomVars = this.decisionNetwork.getNetwork().getVariablesInTopologicalOrder();  | 
 | 63 | +    }  | 
 | 64 | + | 
 | 65 | +    public InformationGatheringAgent(DecisionNetwork decisionNetwork,  | 
 | 66 | +                                     BayesInference inferenceMethod) {  | 
 | 67 | +        this(decisionNetwork, inferenceMethod, new ArrayList<>());  | 
 | 68 | +    }  | 
 | 69 | + | 
 | 70 | +    /**  | 
 | 71 | +     * function INFORMATION-GATHERING-AGENT(percept) returns an action  | 
 | 72 | +     *  | 
 | 73 | +     * @param percept The current percept of a sequence perceived by the Agent.  | 
 | 74 | +     * @return action to be executed by the agent  | 
 | 75 | +     */  | 
 | 76 | +    @Override  | 
 | 77 | +    public Action execute(Percept percept) {  | 
 | 78 | +        // integrate percept into D  | 
 | 79 | +        observedEvidence = integratePercept(observedEvidence, percept);  | 
 | 80 | + | 
 | 81 | +        // j ← the value that maximizes VPI(Ej) / Cost(Ej)  | 
 | 82 | +        List<Double> vpiPerUnitCosts = this.vpiPerUnitCost(this.randomVars);  | 
 | 83 | +        int j = vpiPerUnitCosts.indexOf(Collections.max(vpiPerUnitCosts));  | 
 | 84 | +        RandomVariable randomVar = this.randomVars.get(j);  | 
 | 85 | + | 
 | 86 | +        // if VPI(Ej) > Cost(Ej)  | 
 | 87 | +        if (getVpi(randomVar) > getCost(randomVar)) {  | 
 | 88 | +            // return REQUEST(Ej)  | 
 | 89 | +            return this.request(randomVar);  | 
 | 90 | +        }  | 
 | 91 | +        // else return the best action from D  | 
 | 92 | +        return ((Action) decisionNetwork.getBestAction());  | 
 | 93 | +    }  | 
 | 94 | + | 
 | 95 | +    /**  | 
 | 96 | +     * We assume that the result of  | 
 | 97 | +     * the action Request (Ej ) is that the next percept provides the value of Ej .  | 
 | 98 | +     *  | 
 | 99 | +     * @param randomVar The random variable for which the information is needed.  | 
 | 100 | +     * @return The action which leads to the agent to the value of Ej.  | 
 | 101 | +     */  | 
 | 102 | +    protected abstract Action request(RandomVariable randomVar);  | 
 | 103 | + | 
 | 104 | +    /**  | 
 | 105 | +     * Calculates the vpi (value of perfect information) per unit cost  | 
 | 106 | +     * for all the random variables.  | 
 | 107 | +     *  | 
 | 108 | +     * @param variablesInTopologicalOrder The variables for which information is required.  | 
 | 109 | +     * @return A list of vpi values.  | 
 | 110 | +     */  | 
 | 111 | +    private List<Double> vpiPerUnitCost(List<RandomVariable> variablesInTopologicalOrder) {  | 
 | 112 | +        List<Double> vpiPerUnitCost = new ArrayList<>();  | 
 | 113 | +        for (RandomVariable var :  | 
 | 114 | +                variablesInTopologicalOrder) {  | 
 | 115 | +            vpiPerUnitCost.add(getVpi(var) / getCost(var));  | 
 | 116 | +        }  | 
 | 117 | +        return vpiPerUnitCost;  | 
 | 118 | +    }  | 
 | 119 | + | 
 | 120 | +    /**  | 
 | 121 | +     * Calculates the cost of obtaining information for  | 
 | 122 | +     * a particular variable.  | 
 | 123 | +     *  | 
 | 124 | +     * @param var  | 
 | 125 | +     * @return  | 
 | 126 | +     */  | 
 | 127 | +     abstract double getCost(RandomVariable var);  | 
 | 128 | + | 
 | 129 | +    /**  | 
 | 130 | +     * Calculates VPI for a particular random variable.  | 
 | 131 | +     *  | 
 | 132 | +     * @param var  | 
 | 133 | +     * @return  | 
 | 134 | +     */  | 
 | 135 | +     double getVpi(RandomVariable var) {  | 
 | 136 | +        double vpi = 0;  | 
 | 137 | +        CategoricalDistribution distribution = inferenceMethod.ask((new RandomVariable[]{var}),  | 
 | 138 | +                ((AssignmentProposition[]) observedEvidence.toArray()), decisionNetwork.getNetwork());  | 
 | 139 | +        for (Object value :  | 
 | 140 | +                ((FiniteDomain) var.getDomain()).getPossibleValues()) {  | 
 | 141 | +            double posterierProb = distribution.getValue(value);  | 
 | 142 | +            List<AssignmentProposition> modifiedEvidence = new ArrayList<>(observedEvidence);  | 
 | 143 | +            modifiedEvidence.add(new AssignmentProposition(var, value));  | 
 | 144 | +            double expectedUtilityForParticularValue = decisionNetwork.getExpectedUtility(var,  | 
 | 145 | +                    modifiedEvidence);  | 
 | 146 | +            vpi += posterierProb * expectedUtilityForParticularValue;  | 
 | 147 | +        }  | 
 | 148 | +        vpi -= decisionNetwork.getExpectedUtility(var, observedEvidence);  | 
 | 149 | +        return vpi;  | 
 | 150 | +    }  | 
 | 151 | + | 
 | 152 | +    /**  | 
 | 153 | +     * Extracts the information from the percepts and adds ot to our observed evidence.  | 
 | 154 | +     *  | 
 | 155 | +     * @param observedEvidence  | 
 | 156 | +     * @param percept  | 
 | 157 | +     * @return  | 
 | 158 | +     */  | 
 | 159 | +    abstract List<AssignmentProposition> integratePercept(List<AssignmentProposition> observedEvidence, Percept percept);  | 
 | 160 | + | 
 | 161 | + | 
 | 162 | +}  | 
0 commit comments