Skip to content

Commit a6b38bc

Browse files
committed
Adds final comments
1 parent 16daf50 commit a6b38bc

File tree

2 files changed

+145
-24
lines changed

2 files changed

+145
-24
lines changed

aima-core/src/main/java/aima/core/probability/DecisionNetwork.java

Lines changed: 46 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,41 +4,81 @@
44
import aima.core.probability.bayes.BayesianNetwork;
55
import aima.core.probability.domain.FiniteDomain;
66
import aima.core.probability.proposition.AssignmentProposition;
7-
import org.junit.Test;
87

98
import java.util.List;
109

10+
/**
11+
* Artificial Intelligence A Modern Approach (3rd Edition): page 626.<br>
12+
* <br>
13+
* Decision networks combine Bayesian networks
14+
* with additional node types for actions and utilities.<br>
15+
* <p>
16+
* In its most general form, a decision network represents information about the agent’s current
17+
* state, its possible actions, the state that will result from the agent’s action, and the utility of
18+
* that state.
19+
*
20+
* @author samagra
21+
*/
1122
public abstract class DecisionNetwork {
1223

24+
// The underlying Bayesian network
1325
private BayesianNetwork network;
26+
// The single decision node
1427
private RandomVariable action;
28+
// To calculate various conditional probabilities
1529
private BayesInference inferenceProcedure;
1630

31+
/**
32+
* Constructor for the decision network.
33+
*
34+
* @param network The underlying Bayesian Network.
35+
* @param action The decision node .
36+
* @param inferenceProcedure The inference procedure to be utilised for probability calculations.
37+
*/
1738
public DecisionNetwork(BayesianNetwork network,
1839
RandomVariable action, BayesInference inferenceProcedure) {
1940
this.network = network;
2041
this.action = action;
2142
this.inferenceProcedure = inferenceProcedure;
2243
}
2344

24-
public abstract double getUtilityForAction(RandomVariable action, Object value );
45+
// Returns the utility for a particular state
46+
public abstract double getUtilityForAction(RandomVariable action, Object value);
2547

48+
/**
49+
* Calculates the expected utility of an action in the presence of a certain random variable.
50+
*
51+
* @param action Action for which the utility is to be calculated.
52+
* @param evidence The available information.
53+
* @return
54+
*/
2655
public double getExpectedUtility(RandomVariable action,
27-
List<AssignmentProposition> evidence){
56+
List<AssignmentProposition> evidence) {
2857
double utility = 0;
2958
CategoricalDistribution distribution = inferenceProcedure.ask((new RandomVariable[]{action}),
30-
((AssignmentProposition[])evidence.toArray()),this.getNetwork());
59+
((AssignmentProposition[]) evidence.toArray()), this.getNetwork());
3160
for (Object value :
3261
((FiniteDomain) action.getDomain()).getPossibleValues()) {
33-
utility += distribution.getValue(value)*this.getUtilityForAction(action,value);
62+
utility += distribution.getValue(value) * this.getUtilityForAction(action, value);
3463
}
3564
return utility;
3665
}
3766

38-
public Object getBestAction(){
67+
/**
68+
* Currently the decision network supports only a single decision node and hence returns
69+
* the same action.
70+
*
71+
* @return
72+
*/
73+
public Object getBestAction() {
3974
return action;
4075
}
4176

77+
/**
78+
* Returns the underlying Bayesian Network.
79+
*
80+
* @return
81+
*/
4282
public BayesianNetwork getNetwork() {
4383
return network;
4484
}

aima-core/src/main/java/aima/core/probability/InformationGatheringAgent.java

Lines changed: 99 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,77 +4,158 @@
44
import aima.core.agent.Agent;
55
import aima.core.agent.Percept;
66
import aima.core.probability.bayes.BayesInference;
7-
import aima.core.probability.bayes.BayesianNetwork;
87
import aima.core.probability.domain.FiniteDomain;
98
import aima.core.probability.proposition.AssignmentProposition;
109

1110
import java.util.ArrayList;
12-
import java.util.Arrays;
1311
import java.util.Collections;
1412
import java.util.List;
1513

14+
/**
15+
* Artificial Intelligence A Modern Approach (3rd Edition): Figure 16.9, page 632.<br>
16+
* </br>
17+
* <pre>
18+
*
19+
* function INFORMATION-GATHERING-AGENT(percept) returns an action
20+
*  persistent: D, a decision network
21+
*
22+
* integrate percept into D
23+
*  j ← the value that maximizes VPI(Ej) / Cost(Ej)
24+
*  if VPI(Ej) > Cost(Ej)
25+
*    return REQUEST(Ej)
26+
*  else return the best action from D
27+
*
28+
* </pre>
29+
* <p>
30+
* Figure ?? Design of a simple information-gathering agent.
31+
* The agent works by repeatedly selecting the observation with
32+
* the highest information value, until the cost of the next
33+
* observation is greater than its expected benefit.
34+
*
35+
* @author samagra
36+
*/
1637
public abstract class InformationGatheringAgent implements Agent {
38+
39+
// To carry out conditional probability calculations
1740
private BayesInference inferenceMethod;
41+
// persistent: D, a decision network
1842
private DecisionNetwork decisionNetwork;
43+
// To store the information collected till now
1944
private List<AssignmentProposition> observedEvidence;
45+
// To store the scope of information that can be collected
2046
private List<RandomVariable> randomVars;
21-
47+
48+
/**
49+
* Constructor for the agent.
50+
*
51+
* @param decisionNetwork The decision network which represents the problem
52+
* for which the information is to be collected
53+
* @param inferenceMethod To carry out various conditional probability calculations
54+
* @param initialEvidence The information which is available beforehand to the agent.
55+
*/
2256
public InformationGatheringAgent(DecisionNetwork decisionNetwork,
2357
BayesInference inferenceMethod,
24-
List<AssignmentProposition> initialEvidence){
58+
List<AssignmentProposition> initialEvidence) {
2559
this.decisionNetwork = decisionNetwork;
2660
this.inferenceMethod = inferenceMethod;
2761
this.observedEvidence = initialEvidence;
2862
this.randomVars = this.decisionNetwork.getNetwork().getVariablesInTopologicalOrder();
2963
}
3064

3165
public InformationGatheringAgent(DecisionNetwork decisionNetwork,
32-
BayesInference inferenceMethod){
33-
this(decisionNetwork,inferenceMethod,new ArrayList<>());
66+
BayesInference inferenceMethod) {
67+
this(decisionNetwork, inferenceMethod, new ArrayList<>());
3468
}
35-
69+
70+
/**
71+
* function INFORMATION-GATHERING-AGENT(percept) returns an action
72+
*
73+
* @param percept The current percept of a sequence perceived by the Agent.
74+
* @return action to be executed by the agent
75+
*/
3676
@Override
3777
public Action execute(Percept percept) {
38-
observedEvidence = integratePercept(observedEvidence,percept);
78+
// integrate percept into D
79+
observedEvidence = integratePercept(observedEvidence, percept);
80+
81+
// j ← the value that maximizes VPI(Ej) / Cost(Ej)
3982
List<Double> vpiPerUnitCosts = this.vpiPerUnitCost(this.randomVars);
4083
int j = vpiPerUnitCosts.indexOf(Collections.max(vpiPerUnitCosts));
4184
RandomVariable randomVar = this.randomVars.get(j);
42-
if (getVpi(randomVar)>getCost(randomVar)){
85+
86+
// if VPI(Ej) > Cost(Ej)
87+
if (getVpi(randomVar) > getCost(randomVar)) {
88+
// return REQUEST(Ej)
4389
return this.request(randomVar);
4490
}
91+
// else return the best action from D
4592
return ((Action) decisionNetwork.getBestAction());
4693
}
4794

95+
/**
96+
* We assume that the result of
97+
* the action Request (Ej ) is that the next percept provides the value of Ej .
98+
*
99+
* @param randomVar The random variable for which the information is needed.
100+
* @return The action which leads to the agent to the value of Ej.
101+
*/
48102
protected abstract Action request(RandomVariable randomVar);
49103

50-
private List<Double> vpiPerUnitCost(List<RandomVariable> variablesInTopologicalOrder){
51-
List<Double> vpiPerUnitCost = new ArrayList<>();
104+
/**
105+
* Calculates the vpi (value of perfect information) per unit cost
106+
* for all the random variables.
107+
*
108+
* @param variablesInTopologicalOrder The variables for which information is required.
109+
* @return A list of vpi values.
110+
*/
111+
private List<Double> vpiPerUnitCost(List<RandomVariable> variablesInTopologicalOrder) {
112+
List<Double> vpiPerUnitCost = new ArrayList<>();
52113
for (RandomVariable var :
53114
variablesInTopologicalOrder) {
54115
vpiPerUnitCost.add(getVpi(var) / getCost(var));
55116
}
56117
return vpiPerUnitCost;
57118
}
58119

59-
protected abstract double getCost(RandomVariable var);
120+
/**
121+
* Calculates the cost of obtaining information for
122+
* a particular variable.
123+
*
124+
* @param var
125+
* @return
126+
*/
127+
abstract double getCost(RandomVariable var);
60128

61-
protected double getVpi(RandomVariable var){
62-
double vpi =0;
129+
/**
130+
* Calculates VPI for a particular random variable.
131+
*
132+
* @param var
133+
* @return
134+
*/
135+
double getVpi(RandomVariable var) {
136+
double vpi = 0;
63137
CategoricalDistribution distribution = inferenceMethod.ask((new RandomVariable[]{var}),
64-
((AssignmentProposition[])observedEvidence.toArray()),decisionNetwork.getNetwork());
138+
((AssignmentProposition[]) observedEvidence.toArray()), decisionNetwork.getNetwork());
65139
for (Object value :
66140
((FiniteDomain) var.getDomain()).getPossibleValues()) {
67141
double posterierProb = distribution.getValue(value);
68142
List<AssignmentProposition> modifiedEvidence = new ArrayList<>(observedEvidence);
69-
modifiedEvidence.add(new AssignmentProposition(var,value));
143+
modifiedEvidence.add(new AssignmentProposition(var, value));
70144
double expectedUtilityForParticularValue = decisionNetwork.getExpectedUtility(var,
71145
modifiedEvidence);
72-
vpi+= posterierProb*expectedUtilityForParticularValue;
146+
vpi += posterierProb * expectedUtilityForParticularValue;
73147
}
74-
vpi-=decisionNetwork.getExpectedUtility(var,observedEvidence);
148+
vpi -= decisionNetwork.getExpectedUtility(var, observedEvidence);
75149
return vpi;
76150
}
77151

152+
/**
153+
* Extracts the information from the percepts and adds ot to our observed evidence.
154+
*
155+
* @param observedEvidence
156+
* @param percept
157+
* @return
158+
*/
78159
abstract List<AssignmentProposition> integratePercept(List<AssignmentProposition> observedEvidence, Percept percept);
79160

80161

0 commit comments

Comments
 (0)