|
9 | 9 | import pathlib |
10 | 10 | import hashlib |
11 | 11 | import torch |
12 | | -import yaml |
| 12 | +import yaml # type: ignore[import-untyped] |
13 | 13 | import openfermion |
14 | 14 | import platformdirs |
15 | 15 | from ..networks.mlp import WaveFunctionElectronUpDown as MlpWaveFunction |
@@ -151,14 +151,14 @@ def __init__(self, args: ModelConfig) -> None: |
151 | 151 | torch.save((self.hamiltonian.site, self.hamiltonian.kind, self.hamiltonian.coef), cache_file) |
152 | 152 | logging.info("OpenFermion Hamiltonian successfully cached") |
153 | 153 |
|
154 | | - self.n_qubit: int = n_orbit * 2 |
155 | | - self.n_electron: int = n_electron |
156 | | - self.n_spin: int = n_spin |
| 154 | + self.n_qubits: int = n_orbit * 2 |
| 155 | + self.n_electrons: int = n_electron |
| 156 | + self.n_spins: int = n_spin |
157 | 157 | logging.info( |
158 | 158 | "Identified %d qubits, %d electrons and %d spin", |
159 | | - self.n_qubit, |
160 | | - self.n_electron, |
161 | | - self.n_spin, |
| 159 | + self.n_qubits, |
| 160 | + self.n_electrons, |
| 161 | + self.n_spins, |
162 | 162 | ) |
163 | 163 |
|
164 | 164 | self.ref_energy: float |
@@ -201,7 +201,7 @@ def show_config(self, config: torch.Tensor) -> str: |
201 | 201 | string = "".join(f"{i:08b}"[::-1] for i in config.cpu().numpy()) |
202 | 202 | return ( |
203 | 203 | "[" |
204 | | - + "".join(self._show_config_site(string[index : index + 2]) for index in range(0, self.n_qubit, 2)) |
| 204 | + + "".join(self._show_config_site(string[index : index + 2]) for index in range(0, self.n_qubits, 2)) |
205 | 205 | + "]" |
206 | 206 | ) |
207 | 207 |
|
@@ -238,11 +238,11 @@ def create(self, model: Model) -> NetworkProto: |
238 | 238 | logging.info("Hidden layer widths: %a", self.hidden) |
239 | 239 |
|
240 | 240 | network = MlpWaveFunction( |
241 | | - double_sites=model.n_qubit, |
| 241 | + double_sites=model.n_qubits, |
242 | 242 | physical_dim=2, |
243 | 243 | is_complex=True, |
244 | | - spin_up=(model.n_electron + model.n_spin) // 2, |
245 | | - spin_down=(model.n_electron - model.n_spin) // 2, |
| 244 | + spin_up=(model.n_electrons + model.n_spins) // 2, |
| 245 | + spin_down=(model.n_electrons - model.n_spins) // 2, |
246 | 246 | hidden_size=self.hidden, |
247 | 247 | ordering=+1, |
248 | 248 | ) |
@@ -298,11 +298,11 @@ def create(self, model: Model) -> NetworkProto: |
298 | 298 | ) |
299 | 299 |
|
300 | 300 | network = TransformersWaveFunction( |
301 | | - double_sites=model.n_qubit, |
| 301 | + double_sites=model.n_qubits, |
302 | 302 | physical_dim=2, |
303 | 303 | is_complex=True, |
304 | | - spin_up=(model.n_electron + model.n_spin) // 2, |
305 | | - spin_down=(model.n_electron - model.n_spin) // 2, |
| 304 | + spin_up=(model.n_electrons + model.n_spins) // 2, |
| 305 | + spin_down=(model.n_electrons - model.n_spins) // 2, |
306 | 306 | embedding_dim=self.embedding_dim, |
307 | 307 | heads_num=self.heads_num, |
308 | 308 | feed_forward_dim=self.feed_forward_dim, |
@@ -336,10 +336,10 @@ def create(self, model: Model) -> NetworkProto: |
336 | 336 | logging.info("Hidden layer widths: %a", self.hidden) |
337 | 337 |
|
338 | 338 | network = MlpWaveFunctionElectron( |
339 | | - sites=model.n_qubit, |
| 339 | + sites=model.n_qubits, |
340 | 340 | physical_dim=2, |
341 | 341 | is_complex=True, |
342 | | - electrons=model.n_electron, |
| 342 | + electrons=model.n_electrons, |
343 | 343 | hidden_size=self.hidden, |
344 | 344 | ordering=+1, |
345 | 345 | ) |
@@ -394,10 +394,10 @@ def create(self, model: Model) -> NetworkProto: |
394 | 394 | ) |
395 | 395 |
|
396 | 396 | network = TransformersWaveFunctionElectron( |
397 | | - sites=model.n_qubit, |
| 397 | + sites=model.n_qubits, |
398 | 398 | physical_dim=2, |
399 | 399 | is_complex=True, |
400 | | - electrons=model.n_electron, |
| 400 | + electrons=model.n_electrons, |
401 | 401 | embedding_dim=self.embedding_dim, |
402 | 402 | heads_num=self.heads_num, |
403 | 403 | feed_forward_dim=self.feed_forward_dim, |
|
0 commit comments