Skip to content

Commit d9526e4

Browse files
authored
Merge pull request ACEsuit#142 from ACEsuit/develop
MACE 0.3.0 release
2 parents ff054d5 + d42e185 commit d9526e4

34 files changed

+2037
-1075
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,5 @@ build/
1616
# IDE
1717
.idea/
1818
.vscode/
19-
logs/MACE_run-5.log
2019
*.txt
20+
*.log

README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -90,10 +90,10 @@ pip install ./mace
9090

9191
### Training
9292

93-
To train a MACE model, you can use the `run_train.py` script:
93+
To train a MACE model, you can use the `mace_run_train` script, which should be in the usual place that pip places binaries (or you can explicitly run `python3 <path_to_cloned_dir>/mace/cli/run_train.py`)
9494

9595
```sh
96-
python ./mace/scripts/run_train.py \
96+
mace_run_train \
9797
--name="MACE_model" \
9898
--train_file="train.xyz" \
9999
--valid_fraction=0.05 \
@@ -132,18 +132,18 @@ To use Apple Silicon GPU acceleration make sure to install the latest PyTorch ve
132132

133133
### Evaluation
134134

135-
To evaluate your MACE model on an XYZ file, run the `eval_configs.py`:
135+
To evaluate your MACE model on an XYZ file, run the `mace_eval_configs`:
136136

137137
```sh
138-
python3 ./mace/scripts/eval_configs.py \
138+
mace_eval_configs \
139139
--configs="your_configs.xyz" \
140140
--model="your_model.model" \
141141
--output="./your_output.xyz"
142142
```
143143

144144
## Tutorial
145145

146-
You can run our [Colab tutorial](https://colab.research.google.com/drive/1D6EtMUjQPey_GkuxUAbPgld6_9ibIa-V?authuser=1#scrollTo=Z10787RE1N8T) to quickly get started with MACE.
146+
You can run our [Colab tutorial](https://colab.research.google.com/drive/1D6EtMUjQPey_GkuxUAbPgld6_9ibIa-V?authuser=1#scrollTo=Z10787RE1N8T) to quickly get started with MACE. We also have a more detailed user and developer tutorial at https://github.com/ilyes319/mace-tutorials
147147

148148
## Weights and Biases for experiment tracking
149149

mace/calculators/__init__.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1+
from .foundations_models import mace_anicc, mace_mp
12
from .lammps_mace import LAMMPS_MACE
2-
from .mace import DipoleMACECalculator, EnergyDipoleMACECalculator, MACECalculator
3+
from .mace import MACECalculator
34

45
__all__ = [
56
"MACECalculator",
6-
"DipoleMACECalculator",
7-
"EnergyDipoleMACECalculator",
87
"LAMMPS_MACE",
8+
"mace_mp",
9+
"mace_anicc",
910
]
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import os
2+
3+
from .mace import MACECalculator
4+
5+
path = os.path.dirname(__file__)
6+
7+
8+
def mace_mp(
9+
device="cuda",
10+
model_path=None,
11+
) -> MACECalculator:
12+
"""
13+
Constructs a MACECalculator with a pretrained model based on the Materials Project (89 elements).
14+
The model is released under the MIT license.
15+
Note:
16+
If you are using this function, please cite the relevant paper for the Materials Project,
17+
any paper associated with the MACE model, and also the following:
18+
- "MACE-Universal by Yuan Chiang, 2023, Hugging Face, Revision e5ebd9b, DOI: 10.57967/hf/1202, URL: https://huggingface.co/cyrusyc/mace-universal"
19+
- "Matbench Discovery by Janosh Riebesell, Rhys EA Goodall, Anubhav Jain, Philipp Benner, Kristin A Persson, Alpha A Lee, 2023, arXiv:2308.14920"
20+
"""
21+
if model_path is None:
22+
model_path = os.path.join(
23+
path, "foundations_models/2023-08-14-mace-universal.model"
24+
)
25+
print(
26+
"Using Materials Project model for MACECalculator, see https://huggingface.co/cyrusyc/mace-universal"
27+
)
28+
return MACECalculator(model_path, device=device, default_dtype="float32")
29+
30+
31+
def mace_anicc(
32+
device="cuda",
33+
model_path=None,
34+
) -> MACECalculator:
35+
"""
36+
Constructs a MACECalculator with a pretrained model based on the ANI (H, C, N, O).
37+
The model is released under the MIT license.
38+
Note:
39+
If you are using this function, please cite the relevant paper associated with the MACE model, ANI dataset, and also the following:
40+
- "Evaluation of the MACE Force Field Architecture by Dávid Péter Kovács, Ilyes Batatia, Eszter Sára Arany, and Gábor Csányi, The Journal of Chemical Physics, 2023, URL: https://doi.org/10.1063/5.0155322
41+
"""
42+
if model_path is None:
43+
model_path = os.path.join(path, "foundations_models/ani500k_large_CC.model")
44+
print(
45+
"Using ANI couple cluster model for MACECalculator, see https://doi.org/10.1063/5.0155322"
46+
)
47+
return MACECalculator(model_path, device=device, default_dtype="float64")
Binary file not shown.
34 MB
Binary file not shown.

mace/calculators/lammps_mace.py

Lines changed: 46 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import torch
44
from e3nn.util.jit import compile_mode
55

6-
from mace.modules.utils import get_outputs
76
from mace.tools.scatter import scatter_sum
87

98

@@ -15,20 +14,19 @@ def __init__(self, model):
1514
self.register_buffer("atomic_numbers", model.atomic_numbers)
1615
self.register_buffer("r_max", model.r_max)
1716
self.register_buffer("num_interactions", model.num_interactions)
17+
for param in self.model.parameters():
18+
param.requires_grad = False
1819

1920
def forward(
2021
self,
2122
data: Dict[str, torch.Tensor],
22-
mask_ghost: torch.Tensor,
23-
compute_force: bool = True,
23+
local_or_ghost: torch.Tensor,
2424
compute_virials: bool = False,
25-
compute_stress: bool = False,
2625
) -> Dict[str, Optional[torch.Tensor]]:
2726
num_graphs = data["ptr"].numel() - 1
2827
compute_displacement = False
29-
if compute_virials or compute_stress:
28+
if compute_virials:
3029
compute_displacement = True
31-
3230
out = self.model(
3331
data,
3432
training=False,
@@ -39,59 +37,58 @@ def forward(
3937
)
4038
node_energy = out["node_energy"]
4139
if node_energy is None:
42-
return {"energy": None, "forces": None, "virials": None, "stress": None}
40+
return {
41+
"total_energy_local": None,
42+
"node_energy": None,
43+
"forces": None,
44+
"virials": None,
45+
}
46+
positions = data["positions"]
4347
displacement = out["displacement"]
48+
forces: Optional[torch.Tensor] = torch.zeros_like(positions)
4449
virials: Optional[torch.Tensor] = torch.zeros_like(data["cell"])
45-
stress: Optional[torch.Tensor] = torch.zeros_like(data["cell"])
46-
if mask_ghost is not None and displacement is not None:
47-
# displacement.requires_grad_(True) # For some reason torchscript needs that.
48-
node_energy_ghost = node_energy * mask_ghost
49-
total_energy_ghost = scatter_sum(
50-
src=node_energy_ghost, index=data["batch"], dim=-1, dim_size=num_graphs
51-
)
52-
grad_outputs: List[Optional[torch.Tensor]] = [
53-
torch.ones_like(total_energy_ghost)
54-
]
55-
virials = torch.autograd.grad(
56-
outputs=[total_energy_ghost],
57-
inputs=[displacement],
50+
# accumulate energies of local atoms
51+
node_energy_local = node_energy * local_or_ghost
52+
total_energy_local = scatter_sum(
53+
src=node_energy_local, index=data["batch"], dim=-1, dim_size=num_graphs
54+
)
55+
# compute partial forces and (possibly) partial virials
56+
grad_outputs: List[Optional[torch.Tensor]] = [
57+
torch.ones_like(total_energy_local)
58+
]
59+
if compute_virials and displacement is not None:
60+
forces, virials = torch.autograd.grad(
61+
outputs=[total_energy_local],
62+
inputs=[positions, displacement],
5863
grad_outputs=grad_outputs,
59-
retain_graph=True,
60-
create_graph=True,
64+
retain_graph=False,
65+
create_graph=False,
6166
allow_unused=True,
62-
)[0]
63-
67+
)
68+
if forces is not None:
69+
forces = -1 * forces
70+
else:
71+
forces = torch.zeros_like(positions)
6472
if virials is not None:
6573
virials = -1 * virials
66-
cell = data["cell"].view(-1, 3, 3)
67-
volume = torch.einsum(
68-
"zi,zi->z",
69-
cell[:, 0, :],
70-
torch.cross(cell[:, 1, :], cell[:, 2, :], dim=1),
71-
).unsqueeze(-1)
72-
stress = virials / volume.view(-1, 1, 1)
7374
else:
7475
virials = torch.zeros_like(displacement)
75-
76-
total_energy = scatter_sum(
77-
src=node_energy, index=data["batch"], dim=-1, dim_size=num_graphs
78-
)
79-
80-
forces, _, _ = get_outputs(
81-
energy=total_energy,
82-
positions=data["positions"],
83-
displacement=displacement,
84-
cell=data["cell"],
85-
training=False,
86-
compute_force=compute_force,
87-
compute_virials=False,
88-
compute_stress=False,
89-
)
90-
76+
else:
77+
forces = torch.autograd.grad(
78+
outputs=[total_energy_local],
79+
inputs=[positions],
80+
grad_outputs=grad_outputs,
81+
retain_graph=False,
82+
create_graph=False,
83+
allow_unused=True,
84+
)[0]
85+
if forces is not None:
86+
forces = -1 * forces
87+
else:
88+
forces = torch.zeros_like(positions)
9189
return {
92-
"energy": total_energy,
90+
"total_energy_local": total_energy_local,
9391
"node_energy": node_energy,
9492
"forces": forces,
9593
"virials": virials,
96-
"stress": stress,
9794
}

0 commit comments

Comments
 (0)