{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c4153281",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparations:           24.7628 s\n",
      "Overlap/Kinetic matrix: 3.6636 s\n",
      "Nuclei matrix:          3.9694 s\n",
      "Electron-electron integrals:\n",
      "    |Relevant densities: 7819\n",
      "    |Relevant density combinations: 4504594\n",
      "  >Relevance computations:        5.0628 s\n",
      "  >ERI computations:              23.0283 s\n",
      "  >Second relevance computations: 0.2005 s\n",
      "    |Total integrals:         4294967296\n",
      "    |Unique integrals:        541089856\n",
      "    |With relevant densities: 30572290\n",
      "    |With relevant values:    3946243\n",
      "  >Process ERIs:                  4.6335 s\n",
      "Electron tensor:        35.1118 s\n",
      "Ionic energy:           1.0483 s\n",
      "Starting SCF cycle: \n",
      "SCF step | convergence measure:\n",
      "       1 | 5.72175e-2  | -4339.216665820144\n",
      "       2 | 1.75679e-2  | -4342.320338762729\n",
      "       3 | 7.31540e-3  | -4343.106737375712\n",
      "       4 | 3.79263e-3  | -4343.323867076762\n",
      "       5 | 1.87254e-3  | -4343.373832298697\n",
      "       6 | 8.65667e-4  | -4343.383901294922\n",
      "       7 | 3.69165e-4  | -4343.38563088206\n",
      "       8 | 1.43351e-4  | -4343.385883556486\n",
      "       9 | 5.24773e-5  | -4343.38591693275\n",
      "      10 | 1.88662e-5  | -4343.385921231831\n",
      "      11 | 7.04467e-6  | -4343.385921828215\n",
      "      12 | 2.96675e-6  | -4343.38592192928\n",
      "      13 | 1.45335e-6  | -4343.385921950733\n",
      "      14 | 7.59142e-7  | -4343.3859219558035\n",
      "SCF times:\n",
      "    |Core Hamiltonian: 0.0 s\n",
      "    |Square root of inverse: 0.0014 s\n",
      "    |SAD guess: 0.0 s\n",
      "    |G tensor: 5.9581 s\n",
      "    |Fock matrix construction: 0.0 s\n",
      "    |Commutator SPF-FPS: 0.0211 s\n",
      "    |Save Fock matrix: 0.0001 s\n",
      "    |Error matrix calculation: 0.0 s\n",
      "    |Energy calculation: 0.001 s\n",
      "    |Transform Fock matrix: 0.0107 s\n",
      "    |Eigenvalue calculation: 0.0993 s\n",
      "    |Transform eigenorbitals: 0.005 s\n",
      "    |Density matrix: 0.005 s\n",
      "    |DIIS coefficients: 0.008 s\n",
      "    |DIIS equations: 0.001 s\n",
      "    |Mix new Fock matrix: 0.0223 s\n",
      "    |Convergence criterium: 0.002 s\n",
      "SCF cycle:              6.4073 s\n",
      "_______________________________________\n",
      "Total time:             74.9634 s\n",
      "Total energy: -1528.5474502578872 Ha\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "======\n",
    "NIMBLE - a large-scale Hartree-Fock program\n",
    "===========================================\n",
    "Nearsighted Integral-Optimized Matrix-Based Large-Scale Electronic Structure Prediction\n",
    "=           =                  =      =     =           =\n",
    "=======================================================================================\n",
    "version 3.10\n",
    "\n",
    "This Hartree-Fock program is focused on fast calculations for large systems.\n",
    "\n",
    "The key priciples:\n",
    "    - Nearsightedness: Based on this principle by Walter Kohn, irrelevant and long-range interactions can be ignored.\n",
    "    - Integral-Optimized: The integral calculation algorithm can ignore integrals with only small contributions to the result.\n",
    "    - Matrix-Based: A matrix formalism can be derived which enables an electron repulsion integral scheme which is highly parallelizable.\n",
    "    - Large-Scale Systems: Sparse formalisms and memory constraints allow fast calculations for systems with multiple of hundereds of atoms.\n",
    "\n",
    "The key features:\n",
    "    - electronic structure calculations for large-scale systems with >1,000,000 atoms\n",
    "    - very fast calculations for systems with a few hundred to a few thousand atoms\n",
    "    - subsystem/clustering approach for linear scalability\n",
    "    - visualization of electonic densities\n",
    "    - UV/Vis spectra for systems containing hundreds or thousands of atoms\n",
    "\n",
    "To run this program:\n",
    "1. Read the program configurations below the variable selections and change variables according to your needs. The default values will work fine in most cases.\n",
    "2. Read the section ---MAIN--- at the very end of the program and follow the instructions there.\n",
    "\n",
    "Author/Contact information:\n",
    "Luc Wieners\n",
    "Institute of Physics, University of Kassel, Heinrich-Plett-Straße 40, 34132 Kassel, Germany\n",
    "lucwieners@physik.uni-kassel.de\n",
    "\"\"\"\n",
    "\n",
    "\"\"\"\n",
    "--------------------\n",
    "MAIN CONFIGURATIONS \n",
    "--------------------\n",
    "\"\"\"\n",
    "\n",
    "# SCF CONFIGURATIONS\n",
    "max_scf_steps=100\n",
    "scf_tolerance_density=1.0e-6\n",
    "max_DIIS_linear_equations=max_scf_steps\n",
    "pulay_mixing_rate=0.7\n",
    "DIIS_penalty=1.05\n",
    "added_electrons=0\n",
    "level_shift_enabled=False\n",
    "level_shift_value=0.3\n",
    "\n",
    "# PRECISION CONFIGURATIONS\n",
    "density_threshold=1.0e-4\n",
    "coulomb_threshold=10.0\n",
    "coulomb_threshold_low=8.0\n",
    "\n",
    "# BASIS FUNCTION CONFIGURATIONS\n",
    "sto_precision=3\n",
    "lobe_precision=6\n",
    "\n",
    "# VERBOSITY\n",
    "verbosity=1\n",
    "\n",
    "# THREADING\n",
    "num_threads=4\n",
    "max_electron_integrals=100000000\n",
    "\n",
    "# TIME-DEPENDENT HARTREE-FOCK\n",
    "time_steps=2000\n",
    "delta_t=0.25\n",
    "pulse_standard_deviation=0.2\n",
    "pulse_shift_factor=10\n",
    "e_field_max=2.0e-5\n",
    "update=1\n",
    "\n",
    "# DIVIDE-AND-CONQUER HARTREE-FOCK\n",
    "partition_length=12.5\n",
    "partition_cut_off=8.0\n",
    "section_cut_off=10.0\n",
    "\n",
    "# DENSITY PLOTTING CONFIGURATIONS\n",
    "pixel_size=0.5\n",
    "basis_function_space=5.0\n",
    "additional_space=7.0\n",
    "hide_core_electrons=True\n",
    "\n",
    "# ALPHA-FOLD PREDICTIONS\n",
    "amino_acids_per_cluster=5\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-------------------------------\n",
    "ADDITIONAL CONFIGURATIONS \n",
    "(changes usually not necessary)\n",
    "-------------------------------\n",
    "\"\"\"\n",
    "\n",
    "# PRECISION CONFIGURATIONS\n",
    "density_threshold_2=density_threshold\n",
    "densities_threshold=density_threshold\n",
    "densities_threshold_2=density_threshold\n",
    "\n",
    "# THREADING\n",
    "num_threads_G=num_threads\n",
    "num_threads_integrals=num_threads\n",
    "e_tensor_datatype='float64'\n",
    "\n",
    "# BASIS SET CONFIGURATIONS\n",
    "overlap_sto_precision=6\n",
    "overlap_part_precision=7\n",
    "\n",
    "# ADVANCED VERBOSITY\n",
    "print_ERI_relevance=False\n",
    "display_runtimes=False\n",
    "display_eigenenergies=False\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "================\n",
    "Paramater Manual\n",
    "================\n",
    "\n",
    "\n",
    "--------------------\n",
    "MAIN CONFIGURATIONS:\n",
    "--------------------\n",
    "Change these parameters according to the calculation. If in doubt, use the default values.\n",
    "\n",
    "\n",
    "\n",
    "SCF CONFIGURATIONS:\n",
    "\n",
    "max_scf_steps: the maximum number of scf steps after which the calculation is stopped regardless of the convergence value. \n",
    "Default of max_scf_steps is 100, much higher values are not recommended as convergence should usually occur at latest after this amount of steps if no other error is present.\n",
    "\n",
    "scf_tolerance_density: The convergence threshold value which terminates the calculation if the threshold is passed.\n",
    "For the convergence value the RMSD of the density matrix is used i.e. sqrt(mean((P_{i-1}-P_{i})^2)). \n",
    "The default value is 1.0e-6. For more precise calculations (especially for real-time Hartree-Fock!) a stricter convergence threshold is recommended as for example 1.0e-8 or 1.0e-9.\n",
    "For less precise calculations a higher value (for example 1.0e-4) can be used.\n",
    "\n",
    "max_DIIS_linear_equations: The number of DIIS (direct inversion in the iterative subspace also called Pulay mixing) equations which is used to construct a new density matrix via Pulay mixing. \n",
    "The default is max_scf_steps which leads to stable results even for large systems (>1,000 atoms) which are otherwise extremely difficult to converge.\n",
    "\n",
    "pulay_mixing_rate: The mixing rate of the density matrices during Pulay mixing. The mixing is done as P_new = pulay_mixing_rate*P_Pulay + (1-pulay_mixing_rate)*P_old, \n",
    "where P_old is the old density matrix and P_Pulay the newly constructed density matrix via Pulay mixing. The default value is 0.7. A higher mixing rate accelerates the scf procedure\n",
    "but can lead to instabilities as wells. The mixing rate can be increased if the system contains a lot of water which often stabilizes the calculation. \n",
    "\n",
    "DIIS_penalty: A paramter which pushes the DIIS towards a final solution by favouring the lowest-energy state. This parameter stabilizes calculations where a switching between two or more\n",
    "states occurs (also sometimes called charge sloshing). Very helpful for systems with small HOMO-LUMO (highes/lowest occupied molecular orbital) gaps. The default and recommended value is: 1.05.\n",
    "Can be disabled by setting the parameter to 1.0 which corresponds to normal mixing. Values >=1.0 are valid, higher values than 1.5 are discouraged. \n",
    "The use of this parameter is highly recommended since there are no downsides.\n",
    "\n",
    "added_electrons: Adds electrons to the system. A negative value removes electrons from the system. \n",
    "Is used to add charges to systems which is often done in real-time time-dependet Hartree-Fock calculations. A good measure of added_electrons is the difference between the number of electrons\n",
    "in the system and the calculated position of the HOMO-LUMO gap.\n",
    "\n",
    "level_shift_enabled: Artificially increases the HOMO-LUMO gap by adding an energy shift to the Fock matrix. This can help to converge calculation with small HOMO-LUMO gaps.\n",
    "The default is 'False' since using only the parameter DIIS_penalty works well in most cases.\n",
    "\n",
    "level_shift_value: The value by which the virtual orbital energies are shifted if level_shift_enabled=True. This parameter is only used if level_shift_enabled=True.\n",
    "Default and recommended value: 0.3. Values are given in Hartree. Note that 1 Ha = 27.211 eV (electron volts).\n",
    "\n",
    "\n",
    "\n",
    "THRESHOLDS:\n",
    "\n",
    "density_threshold: sets the value below which electronic density contributions will be ignored.\n",
    "Recommended values: standard precision: 1.0e-4\n",
    "                    higher precision: 1.0e-6\n",
    "                    minimum: 1.0e-10 (lower values will likely not influence calculation results)\n",
    "                    maximum: 3.0e-4 (high accuracy loss for higher values)\n",
    "\n",
    "coulomb_threshold and coulomb_threshold_low: sets the values above which the Coulomb interaction will be ignored or dampened, respectively. Values are in Angstrom!\n",
    "Recommended values: 10 Angstrom and 8 Angstrom\n",
    "\n",
    "\n",
    "\n",
    "BASIS FUNCTION CONFIGURATIONS:\n",
    "\n",
    "sto_precision: determines how from how many Gaussian functions a STO-nG basis function is built.\n",
    "Currently supported are 2,3,6 (corresponding to STO-2G, STO-3G, STO-6G). Default and recommendend: 3.\n",
    "Note that this value only influences s-type orbitals since orbitals of higher angular momenta are treated with the Gaussian lobe function expansion algorithm.\n",
    "\n",
    "lobe_precision: Determines the amount of Gaussian functions in a Gaussian lobe function expansion of a p-type orbital.\n",
    "Currently supported are 4,6 (approximately corresponds to STO-2G, STO-3G). Default and recommended: 6.\n",
    "Note that double the amount of Gaussians are needed here since one p-type orbital contains two lobes which are treated independently.\n",
    "\n",
    "\n",
    "\n",
    "VERBOSITY: \n",
    "\n",
    "verbosity: 1 for run-time and integral-count outputs, 0 for no outputs at all (default: 1)\n",
    "\n",
    "\n",
    "\n",
    "THREADING:\n",
    "\n",
    "num_threads: the number of threads which will be started for the parallelized computation of the electron repulsion integrals and the G-matrix\n",
    "             should correspond to the number of CPU cores available\n",
    "\n",
    "max_electron_integrals: the maximum number of electron repulsion integrals that are used in a calculation\n",
    "\n",
    "\n",
    "\n",
    "OPTIONAL: TIME-DEPENDENT HARTREE-FOCK\n",
    "Note: all parameters below are ONLY used if a real-time time-dependent Hartree-Fock (RT-TDHF) calculation is done and will be ignored otherwise.\n",
    "\n",
    "time_steps: The amount of time steps over which a real-time time-dependent Hartree-Fock calculation is propagated. The default value is: 2000.\n",
    "Multiplied with delta_t (see below) the total propagation time can be calculated. This time should be around 500 atomic units of time (=~12.1 femtoseconds).\n",
    "If absorption peaks towards infra-red are computed a higher time might be needed for a better resolution of low-frequency oscillations. A value of 1000 atomic units of time should be enough.\n",
    "For ultra-violet calculations lower values than 500 might be used.\n",
    "\n",
    "delta_t: The time of one time step. Default and recommended values is: 0.25. Given in atomic units of time. See also 'time_steps'.\n",
    "A higher time step can lead to numerical instabilities (starting at around 0.3) and should therefore be avoided!\n",
    "Lower time steps are possible but have no advantage and should therefore not be used.\n",
    "\n",
    "pulse_standard_deviation: The standard deviation of the Gaussian pulse with which the system is excited in the calculation. Default and recommended value: 0.2. Given in atomic units of time.\n",
    "A higher value can lead to different results since the RT-TDHF approach uses only a very small excitation.\n",
    "\n",
    "pulse_shift_factor: Shifts the pulse forward on the time axis. Default value: 10. Given as a multiple of pulse_standard_deviation.\n",
    "A much lower value leads to parts of the pulse not being on the positive side of the time axis and a much higher value leads to a large time span before the pulse in which no dynamics occur.\n",
    "\n",
    "e_field_max: The electric field strength of the pulse. Default and recommended: 1.0e-5 (given in atomic units). This is the maximum value of the electric field.\n",
    "This value should be chosen dependent on the size and dipole moment of the system which is studied. \n",
    "The values of the dipole moment during the time evolution should be between 1.0e-3 and 1.0e-6. They scale proportionally to e_field_max and e_field_max should therefore be chosen accordingly.\n",
    "Too high values lead the system from linear to non-linear dynamics which is undesired and too low values make the time evolution susceptible to numerical instabilities.\n",
    "\n",
    "update: How often an update during the time evolution is displayed in form of a console output. Default: 1. Given in the amount of time steps after which in update is shown.\n",
    "The update contains information about the coorindate (x,y or z) of the evolution, the time step and the corresponding percentage of completion, \n",
    "the current dipole moment and the numerical stability (trace deviation of the density matrix). Especially for testing update=1 is recommended.\n",
    "\n",
    "\n",
    "\n",
    "OPTIONAL: DIVIDE-AND-CONQUER HARTREE-FOCK\n",
    "Note: all parameters below are ONLY used if a divide-and-conquer calculation is done and will be ignored otherwise.\n",
    "\n",
    "Additional information for the parameters:\n",
    "All values are in the variable section should be given in Angstrom!\n",
    "The divide-and-conquer scheme for dividing into clusters using a 3D grid:\n",
    "xxxxxxxxxx|---------------------|-----------------------|-----------0----------|-----------------------|---------------------|xxxxxxxxxxx\n",
    "(ignored) |<--section_cut_off-->|<--partition_cut_off-->|<--partition_length-->|<--partition_cut_off-->|<--section_cut_off-->| (ignored)\n",
    "\n",
    "partition_length: atoms in this partition/box are determined by the partition num (default: 12.5 Angstrom, variable always given in Angstrom)\n",
    "\n",
    "partition_cut_off: atoms that are neighbors to atoms in the partition/box (default: 10 Angstrom, variable always given in Angstrom)\n",
    "\n",
    "section_cut_off: atoms that could be added to avoid bond breaking (default: 10 Angstrom, variable always given in Angstrom)\n",
    "\n",
    "\n",
    "\n",
    "OPTIONAL: DENSITY PLOTTING CONFIGURATIONS\n",
    "Note: all parameters below are ONLY used if a density plotting calculation is done and will be ignored otherwise.\n",
    "\n",
    "pixel_size: The size of one pixel / length of the 3D grid used for the visualization of the electronic density. Default: 0.5 Bohr atomic units. \n",
    "For larger systems a value of 1.0 or even 1.5 should be considered due to the high memory requirements of density grids.\n",
    "For small sytems values up to 0.1 might work. Memory requirements scale cubical with the inverse of pixel_size since we use a 3D grid.\n",
    "\n",
    "basis_function_space: Space around basis functions on their local grids. Basis functions grids get precomputed for better performance.\n",
    "Default value: 5.0 Bohr atomic units.\n",
    "Important: Has to be a multiple of pixel_size!\n",
    "\n",
    "additional_space: Additional space around the computed structure. Should be larger than basis_function_space and can be used to generate empty outer regions on the grid.\n",
    "Default value: 7.0 Bohr atomic units.\n",
    "\n",
    "hide_core_electrons: Core electron basis functions will not be plotted during the density grid calculation. Default and recommended: True.\n",
    "Core electrons have their electronic density concentrated to a small region which is problematic for visualizing electronic densities since only the core electron densities would be visible\n",
    "since valence electron densities occupy more space but have lower values.\n",
    "\n",
    "\n",
    "\n",
    "OPTIONAL: ALPHA-FOLD PREDICTIONS\n",
    "Note: the parameter below are ONLY used if an atomic energy calculation for comparison with AlphaFold is done and will be ignored otherwise.\n",
    "\n",
    "amino_acids_per_cluster: The clustering for atomic energy calculations for comparison with AlphaFold is done by \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "--------------------------\n",
    "ADDITIONAL CONFIGURATIONS:\n",
    "--------------------------\n",
    "Configurations which usually do not need to changed. Defaults are recommended.\n",
    "\n",
    "\n",
    "\n",
    "# PRECISION CONFIGURATIONS\n",
    "\n",
    "density_threshold_2: A lower cut-off for the density relevance value. Can be used to apply a smooth cut-off function for density cut-offs which is usually not done \n",
    "since the cut-off values are already small.\n",
    "Default: density_threshold\n",
    "\n",
    "densities_threshold: A different threshold value for the combination of two densities. A threshold is used both for the relevance of single densities and for the product of two densities.\n",
    "Usually the same threshold value is used but the value for the product can be chosen differently if wanted.\n",
    "Default: density_threshold\n",
    "\n",
    "densities_threshold_2: A lower cut-off for the relevance value of the product of two densities (i.e. densities_threshold, see above). See also the explanation of density_threshold_2.\n",
    "Default: density_threshold\n",
    "\n",
    "\n",
    "\n",
    "# THREADING\n",
    "\n",
    "num_threads_G: Number of threads for the G matrix computation. In most cases identical to num_threads. \n",
    "Note that on some compute nodes a higher number of threads can slow down calculations for large systems since the memory usage during G matrix computations can get very high.\n",
    "Default: num_threads.\n",
    "\n",
    "num_threads_integrals: Number of threads for the integral computation. In most cases identical to num_threads. \n",
    "Note that on some compute nodes a higher number of threads can slow down calculations for large systems since the memory usage during the integral computation can get very high.\n",
    "Default: num_threads.\n",
    "\n",
    "e_tensor_datatype: Datatype of the electron repulsion tensor in which the electron repulsion intgrals (ERIs) are stored. \n",
    "This is usually done in double precision but can be changed to single precision if the memory for double precision is not high enough. This can however influence the accuracy of the calculation.\n",
    "Default: 'float64'.\n",
    "\n",
    "\n",
    "\n",
    "# BASIS SET CONFIGURATIONS\n",
    "\n",
    "overlap_sto_precision: The precision of s-type basis functions used to estimate the relevance of electronic densities can be chosen.\n",
    "Lower values increase the computational speed of this part slightly but the relevance computation part is not performance-critical.\n",
    "Default: 6. Currently 2,3,6 is supported.\n",
    "\n",
    "overlap_part_precision: The precision of individual lobes of p-type basis functions used to estimate the relevance of electronic densities can be chosen.\n",
    "Default: 7. Currently only 7 is supported.\n",
    "\n",
    "\n",
    "\n",
    "ADVANCED VERBOSITY:\n",
    "\n",
    "display_runtimes: more detailed outputs for scf runtimes. Useful for runtime analysis.\n",
    "Default: False\n",
    "\n",
    "display_eigenenergies: displays eigenenergies around the Fermi edge. Useful to locate the HOMO-LUMO gap.\n",
    "default: False\n",
    "\n",
    "print_ERI_relevance: prints detailed information about the distribution of absolute values of ERIs which cooresponds to their relevance.\n",
    "Can be used to see how many ERIs get ignored in the relevance calculations. \n",
    "Default: False.\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "-------\n",
    "OUTPUT:\n",
    "-------\n",
    "The console output of Nimble consists of run-time logs and density/ERI counts.\n",
    "Below is an examplary output for beta-carotene (96 atoms) on an intel i5 processor using default paramenters:\n",
    "\n",
    "Preparations:           23.6352 s\n",
    "Overlap/Kinetic matrix: 3.6987 s\n",
    "Nuclei matrix:          7.5813 s\n",
    "Electron-electron integrals:\n",
    "    |Relevant densities: 7819\n",
    "    |Relevant density combinations: 4504594\n",
    "  >Relevance computations:        4.5151 s\n",
    "  >ERI computations:              22.3655 s\n",
    "  >Second relevance computations: 0.1991 s\n",
    "    |Total integrals:         4294967296\n",
    "    |Unique integrals:        541089856\n",
    "    |With relevant densities: 30572290\n",
    "    |With relevant values:    3946243\n",
    "  >Process ERIs:                  4.7365 s\n",
    "Electron tensor:        34.8972 s\n",
    "Ionic energy:           1.1126 s\n",
    "Starting SCF cycle: \n",
    "SCF step | convergence measure:\n",
    "       1 | 5.72175e-2  | -4339.2166657470425\n",
    "       2 | 1.75679e-2  | -4342.3203387113235\n",
    "       3 | 7.31540e-3  | -4343.106737365173\n",
    "       4 | 3.79263e-3  | -4343.323867045995\n",
    "       5 | 1.87254e-3  | -4343.37383228906\n",
    "       6 | 8.65668e-4  | -4343.383901291234\n",
    "       7 | 3.69166e-4  | -4343.3856308814475\n",
    "       8 | 1.43351e-4  | -4343.385883556393\n",
    "       9 | 5.24774e-5  | -4343.385916932791\n",
    "      10 | 1.88662e-5  | -4343.385921231835\n",
    "      11 | 7.04501e-6  | -4343.385921828226\n",
    "      12 | 2.96648e-6  | -4343.385921929285\n",
    "      13 | 1.45412e-6  | -4343.385921950744\n",
    "      14 | 7.58954e-7  | -4343.385921955811\n",
    "SCF times:\n",
    "    |Core Hamiltonian: 0.0 s\n",
    "    |Square root of inverse: 0.0226 s\n",
    "    |SAD guess: 0.0007 s\n",
    "    |G tensor: 5.6212 s\n",
    "    |Fock matrix construction: 0.0 s\n",
    "    |Commutator SPF-FPS: 0.0483 s\n",
    "    |Save Fock matrix: 0.0 s\n",
    "    |Error matrix calculation: 0.0 s\n",
    "    |Energy calculation: 0.001 s\n",
    "    |Transform Fock matrix: 0.004 s\n",
    "    |Eigenvalue calculation: 0.0831 s\n",
    "    |Transform eigenorbitals: 0.005 s\n",
    "    |Density matrix: 0.0351 s\n",
    "    |DIIS coefficients: 0.0104 s\n",
    "    |DIIS equations: 0.0023 s\n",
    "    |Mix new Fock matrix: 0.005 s\n",
    "    |Convergence criterium: 0.0162 s\n",
    "SCF cycle:              6.1007 s\n",
    "_______________________________________\n",
    "Total time:             77.0256 s\n",
    "Total energy: -1528.5474502578945 Ha\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "------------------\n",
    "PROGRAM STRUCTURE:\n",
    "------------------\n",
    "Main sections for the different applications are seperated with multi-line comments containing two lines of '======='.\n",
    "Subsections for functions with usage in similar program parts are declared with comments containing lines of '-------'.\n",
    "Functions contain a comment with further explanation.\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "------------\n",
    "DISCLAIMERS:\n",
    "------------\n",
    "- We cannot guarantee that NIMBLE runs smoothly on every device as it is still in a testing phase. Consider contacting us\n",
    "via the given contact information at the start of the program in case of difficulties.\n",
    "- There are currently several functions with unused/dead variables. These are mostly attributed to force calculations which were suppported\n",
    "in previous versions but not in version 3.10. Code for force calculations is still left in some places as forces will be implemented in the future.\n",
    "It was removed in all cases where it would reduce the performance of the code.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "=========================\n",
    "=========================\n",
    "START OF THE MAIN PROGRAM\n",
    "=========================\n",
    "=========================\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------\n",
    "Imports:\n",
    "--------\n",
    "\n",
    "numpy for various mathematical calculations\n",
    "np.set_printoptions(suppress=True) enables more convenient print outputs of arrays.\n",
    "\n",
    "numba.njit for just-in-time compilation to massively speed up calculations\n",
    "\n",
    "matplotlib for plotting\n",
    "\n",
    "time timekeeping of various parts of the program\n",
    "\n",
    "torch for matrix operations (better parallelization results for high CPU core counts than numpy)\n",
    "torch.set_grad_enabled(False) ensures that torch does not compute gradients which are used in machine learning to update models which is unnecessary here.\n",
    "\n",
    "json for data loading (optional, depending on coordinate file type)\n",
    "\n",
    "sys for large-scale calculations on clusters to load in job numbers from slurm files (optional, only if large-scale calculations (>1,000,000 atoms) are done) \n",
    "\n",
    "Datatype defines the datatype for most operations. Double precision i.e. 64-bit floating point numbers should be used.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "import numpy as np\n",
    "np.set_printoptions(suppress=True)\n",
    "from numba import njit,prange\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import torch\n",
    "torch.set_grad_enabled(False)\n",
    "import json\n",
    "import sys \n",
    "datatype='float64'\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------------------------------\n",
    "constants and derived quantities\n",
    "--------------------------------\n",
    "\"\"\"\n",
    "\n",
    "angstrom_to_bohr=1.88973\n",
    "nanometer_to_bohr=18.8973\n",
    "k_b=3.166811563e-6\n",
    "speed_of_light=137.03599\n",
    "\n",
    "eV_to_Hertz=241799050402417.0\n",
    "nm_to_Hertz=2.99792458E+17\n",
    "hertz_to_atomic_units=2.4188843265864e-16\n",
    "\n",
    "single='float32'\n",
    "\n",
    "coulomb_threshold*=angstrom_to_bohr\n",
    "coulomb_threshold_low*=angstrom_to_bohr\n",
    "\n",
    "partition_length*=angstrom_to_bohr\n",
    "partition_cut_off*=angstrom_to_bohr\n",
    "section_cut_off*=angstrom_to_bohr\n",
    "\n",
    "density_threshold_difference,densities_threshold_difference,coulomb_threshold_difference=density_threshold_2-density_threshold,densities_threshold_2-densities_threshold,coulomb_threshold-coulomb_threshold_low\n",
    "coulomb_threshold_times_2=coulomb_threshold*2\n",
    "\n",
    "delta_t=0.5*delta_t\n",
    "\n",
    "basis_set_configs=np.array([[[1,0,0,0,0],[1,0,0,0,0],[sto_precision,0            ,0             ,0            ,0             ]],\n",
    "                            [[1,2,3,0,0],[1,1,1,0,0],[sto_precision,sto_precision,lobe_precision,0            ,0             ]],\n",
    "                            [[1,2,3,4,5],[1,1,1,1,1],[sto_precision,sto_precision,lobe_precision,sto_precision,lobe_precision]]],dtype='int32')\n",
    "basis_set_configs_len=np.array([1,3,5],dtype='int32')\n",
    "basis_function_type_length_list=np.array(basis_set_configs[2][2],dtype='int32') \n",
    "implemented_orbital_types_num=5\n",
    "\n",
    "parts_sto_precision_1s=overlap_sto_precision\n",
    "parts_sto_precision_2s=overlap_sto_precision\n",
    "parts_sto_precision_2p=overlap_part_precision\n",
    "parts_sto_precision_3s=overlap_sto_precision\n",
    "parts_sto_precision_3p=overlap_part_precision\n",
    "\n",
    "num_basis_function_types=len(basis_function_type_length_list)\n",
    "num_basis_function_types_sqaure=num_basis_function_types*num_basis_function_types\n",
    "max_gaussian_functions=int(np.max(basis_function_type_length_list))\n",
    "max_gaussian_functions_square=max_gaussian_functions*max_gaussian_functions\n",
    "\n",
    "nuclei_hyp1f1_prefactor=np.sqrt(np.pi)/2.0\n",
    "V_ee_prefactor_sqrt=np.sqrt(2.0*np.pi*np.pi*np.sqrt(np.pi))*np.sqrt(np.sqrt(np.pi)/2.0)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-------------------\n",
    "Coefficient section\n",
    "-------------------\n",
    "\n",
    "\n",
    "Coefficients and exponents for Gaussian-type basis functions\n",
    "Values are set after the array definition. Values are taken from: https://www.basissetexchange.org/.\n",
    "Literature: \n",
    "A New Basis Set Exchange: An Open, Up-to-date Resource for the Molecular Sciences Community. \n",
    "Benjamin P. Pritchard, Doaa Altarawy, Brett Didier, Tara D. Gibson, Theresa L. Windus. J. Chem. Inf. Model. 2019, 59(11), 4814-4820, doi:10.1021/acs.jcim.9b00725. \n",
    "\n",
    "Explanation of the array format:\n",
    "Coefficient array for Slater-type orbitals:\n",
    "Axis 1: Element (1 for H, 2 for He, ...), currently implemented: H,C,N,O;\n",
    "Axis 2: Number of primitve Gaussians (e.g. STO-3, STO-6), currently implemented: STO-3G, STO-6G;\n",
    "Axis 3: coeffs: Orbital type: 0: 1s, 1: 2s, 2: 2p, 3: 3s, 4: 3p;\n",
    "        exponents/norms: Orbital type: 0: 1s, 1: 2s/2p, 2: 3s/3p;\n",
    "Axis 4: Coefficients for selected element, precision and orbital\n",
    "\"\"\"\n",
    "\n",
    "sto_coefs=np.zeros((19,7,3,6),dtype=datatype)\n",
    "sto_exps=np.zeros((19,7,3,6),dtype=datatype)\n",
    "sto_norms=np.zeros((19,7,3,6),dtype=datatype)\n",
    "\n",
    "\n",
    "sto_coefs[1,3,0,:3]=np.array([0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],dtype=datatype)\n",
    "sto_exps[1,3,0,:3]=np.array([0.3425250914E+01,0.6239137298E+00,0.1688554040E+00],dtype=datatype)\n",
    "sto_norms[1,3,0,:3]=np.array([1.7944418,0.50032645,0.18773545],dtype=datatype)\n",
    "sto_coefs[6,3,:2,:3]=np.array([[0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],\n",
    "                              [-0.9996722919E-01,0.3995128261E+00,0.7001154689E+00]],dtype=datatype) \n",
    "sto_exps[6,3,:2,:3]=np.array([[0.7161683735E+02,0.1304509632E+02,0.3530512160E+01],\n",
    "                             [0.2941249355E+01,0.6834830964E+00,0.2222899159E+00]],dtype=datatype)\n",
    "sto_norms[6,3,:2,:3]=np.array([[17.54573,4.8921027,1.8356436],\n",
    "                              [1.6006964,0.5357423,0.23072779]],dtype=datatype) \n",
    "sto_coefs[7,3,:2,:3]=np.array([[0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],\n",
    "                              [-0.9996722919E-01,0.3995128261E+00,0.7001154689E+00]],dtype=datatype) \n",
    "sto_exps[7,3,:2,:3]=np.array([[0.9910616896E+02,0.1805231239E+02,0.4885660238E+01],\n",
    "                             [0.3780455879E+01,0.8784966449E+00,0.2857143744E+00]],dtype=datatype)\n",
    "sto_norms[7,3,:2,:3]=np.array([[22.386469,6.2417984,2.3420842],\n",
    "                              [1.9322718,0.6467183,0.27852178]],dtype=datatype) \n",
    "sto_coefs[8,3,:2,:3]=np.array([[0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],\n",
    "                              [-0.9996722919E-01,0.3995128261E+00,0.7001154689E+00]],dtype=datatype) \n",
    "sto_exps[8,3,:2,:3]=np.array([[0.1307093214E+03,0.2380886605E+02,0.6443608313E+01],\n",
    "                             [0.5033151319E+01,0.1169596125E+01,0.3803889600E+00]],dtype=datatype)\n",
    "sto_norms[8,3,:2,:3]=np.array([[27.551168,7.68182,2.882418],\n",
    "                              [2.3949149,0.80156183,0.3452081]],dtype=datatype) \n",
    "\n",
    "sto_coefs[15,3,:2,:3]=np.array([[0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],\n",
    "                                [-0.9996722919E-01,0.3995128261E+00,0.7001154689E+00]],dtype=datatype) \n",
    "sto_coefs[15,3,2,:3]=np.array([-0.2196203690E+00,0.2255954336E+00,0.9003984260E+00],dtype=datatype)\n",
    "sto_exps[15,3,:2,:3]=np.array([[0.4683656378E+03,0.8531338559E+02,0.2308913156E+02],\n",
    "                               [0.2803263958E+02,0.6514182577E+01,0.2118614352E+01]],dtype=datatype)\n",
    "sto_exps[15,3,2,:3]=np.array([0.1743103231E+01,0.4863213771E+00,0.1903428909E+00],dtype=datatype)\n",
    "sto_norms[15,3,:2,:3]=np.array([[71.75445349,20.00658552,7.5069892],\n",
    "                                [8.6827658,2.90606308,1.25155236]],dtype=datatype)\n",
    "sto_norms[15,3,2,:3]=np.array([1.081191,0.4150521,0.2053821],dtype=datatype)\n",
    "sto_coefs[16,3,:2,:3]=np.array([[0.1543289673E+00,0.5353281423E+00,0.4446345422E+00],\n",
    "                                [-0.9996722919E-01,0.3995128261E+00,0.7001154689E+00]],dtype=datatype) \n",
    "sto_coefs[16,3,2,:3]=np.array([-0.2196203690E+00,0.2255954336E+00,0.9003984260E+00],dtype=datatype)\n",
    "sto_exps[16,3,:2,:3]=np.array([[0.5331257359E+03,0.9710951830E+02,0.2628162542E+02],\n",
    "                               [0.3332975173E+02,0.7745117521E+01,0.2518952599E+01]],dtype=datatype)\n",
    "sto_exps[16,3,2,:3]=np.array([0.2029194274E+01,0.5661400518E+00,0.2215833792E+00],dtype=datatype)\n",
    "sto_norms[16,3,:2,:3]=np.array([[79.07374871,22.04735231,8.27273777],\n",
    "                                [9.88630834,3.30888064,1.42503355]],dtype=datatype)\n",
    "sto_norms[16,3,2,:3]=np.array([1.2117215,0.46516069,0.23017756],dtype=datatype)\n",
    "\n",
    "\n",
    "sto_coefs[1,6,0,:]=np.array([0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],dtype=datatype)\n",
    "sto_exps[1,6,0,:]=np.array([0.3552322122E+02,0.6513143725E+01,0.1822142904E+01,0.6259552659E+00,0.2430767471E+00,0.1001124280E+00],dtype=datatype)\n",
    "sto_norms[1,6,0,:]=np.array([10.370372,2.9057155,1.1177558,0.50155383,0.24672756,0.12684579],dtype=datatype)\n",
    "sto_coefs[6,6,:2,:]=np.array([[0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],\n",
    "                              [-0.1325278809E-01,-0.4699171014E-01,-0.3378537151E-01,0.2502417861E+00,0.5951172526E+00,0.2407061763E+00]],dtype=datatype) \n",
    "sto_exps[6,6,:2,:]=np.array([[0.7427370491E+03,0.1361800249E+03,0.3809826352E+02,0.1308778177E+02,0.5082368648E+01,0.2093200076E+01],\n",
    "                             [0.3049723950E+02,0.6036199601E+01,0.1876046337E+01,0.7217826470E+00,0.3134706954E+00,0.1436865550E+00]],dtype=datatype)\n",
    "sto_norms[6,6,:2,:]=np.array([[101.399635,28.411564,10.929215,4.9041033,2.412458,1.2402754],\n",
    "                              [9.2492285,2.7446237,1.1424646,0.5581037,0.298578,0.1663306]],dtype=datatype) \n",
    "sto_coefs[7,6,:2,:]=np.array([[0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],\n",
    "                              [-0.1325278809E-01,-0.4699171014E-01,-0.3378537151E-01,0.2502417861E+00,0.5951172526E+00,0.2407061763E+00]],dtype=datatype) \n",
    "sto_exps[7,6,:2,:]=np.array([[0.1027828458E+04,0.1884512226E+03,0.5272186097E+02,0.1811138217E+02,0.7033179691E+01,0.2896651794E+01],\n",
    "                             [0.3919880787E+02,0.7758467071E+01,0.2411325783E+01,0.9277239437E+00,0.4029111410E+00,0.1846836552E+00]],dtype=datatype)\n",
    "sto_norms[7,6,:2,:]=np.array([[129.37506,36.25011,13.944506,6.2571096,3.0780375,1.5824584],\n",
    "                              [11.165154,3.3131573,1.3791199,0.67371184,0.36042675,0.20078509]],dtype=datatype) \n",
    "sto_coefs[8,6,:2,:]=np.array([[0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],\n",
    "                              [-0.1325278809E-01,-0.4699171014E-01,-0.3378537151E-01,0.2502417861E+00,0.5951172526E+00,0.2407061763E+00]],dtype=datatype) \n",
    "sto_exps[8,6,:2,:]=np.array([[0.1355584234E+04,0.2485448855E+03,0.6953390229E+02,0.2388677211E+02,0.9275932609E+01,0.3820341298E+01],\n",
    "                             [0.5218776196E+02,0.1032932006E+02,0.3210344977E+01,0.1235135428E+01,0.5364201581E+00,0.2458806060E+00]],dtype=datatype)\n",
    "sto_norms[8,6,:2,:]=np.array([[159.22269,44.613235,17.16159,7.700664,3.7881596,1.9475415],\n",
    "                              [13.838423,4.106425,1.709322,0.83501834,0.4467236,0.24885897]],dtype=datatype) \n",
    "\n",
    "sto_coefs[15,6,:2,:]=np.array([[0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],\n",
    "                               [-0.1325278809E-01,-0.4699171014E-01,-0.3378537151E-01,0.2502417861E+00,0.5951172526E+00,0.2407061763E+00]],dtype=datatype) \n",
    "sto_coefs[15,6,2,:]=np.array([-0.7943126362E-02,-0.7100264172E-01,-0.1785026925E+00,0.1510635058E+00,0.7354914767E+00,0.2760593123E+00],dtype=datatype)\n",
    "sto_exps[15,6,:2,:]=np.array([[0.4857412371E+04,0.8906012410E+03,0.2491581331E+03,0.8559254335E+02,0.3323808927E+02,0.1368928069E+02],\n",
    "                              [0.2906649590E+03,0.5753018103E+02,0.1788033738E+02,0.6879210280E+01,0.2987645712E+01,0.1369456623E+01]],dtype=datatype)\n",
    "sto_exps[15,6,2,:]=np.array([0.1111939652E+02,0.2977874272E+01,0.1116734493E+01,0.4998708868E+00,0.2473606277E+00,0.1274811462E+00],dtype=datatype)\n",
    "sto_norms[15,6,:2,:]=np.array([[414.68070016,116.19101999,44.69576689,20.0556638,9.86590955,5.07219059],\n",
    "                               [50.17121301,14.88784702,6.19714809,3.02735974,1.61959681,0.90223839]],dtype=datatype)\n",
    "sto_norms[15,6,2,:]=np.array([4.33981289,1.61562238,0.77423454,0.42369513,0.24998159,0.15205285],dtype=datatype)\n",
    "sto_coefs[16,6,:2,:]=np.array([[0.9163596281E-02,0.4936149294E-01,0.1685383049E+00,0.3705627997E+00,0.4164915298E+00,0.1303340841E+00],\n",
    "                               [-0.1325278809E-01,-0.4699171014E-01,-0.3378537151E-01,0.2502417861E+00,0.5951172526E+00,0.2407061763E+00]],dtype=datatype) \n",
    "sto_coefs[16,6,2,:]=np.array([-0.7943126362E-02,-0.7100264172E-01,-0.1785026925E+00,0.1510635058E+00,0.7354914767E+00,0.2760593123E+00],dtype=datatype)\n",
    "sto_exps[16,6,:2,:]=np.array([[0.5529038289E+04,0.1013743118E+04,0.2836087927E+03,0.9742727471E+02,0.3783386178E+02,0.1558207360E+02],\n",
    "                              [0.3455896791E+03,0.6840121655E+02,0.2125904712E+02,0.8179121699E+01,0.3552198128E+01,0.1628232301E+01]],dtype=datatype)\n",
    "sto_exps[16,6,2,:]=np.array([0.1294439442E+02,0.3466625105E+01,0.1300021248E+01,0.5819134077E+00,0.2879592903E+00,0.1484042983E+00],dtype=datatype)\n",
    "sto_norms[16,6,:2,:]=np.array([[456.98010207,128.04305616,49.25494751,22.10143681,10.87227921,5.58957813],\n",
    "                               [57.12558561,16.95149327,7.05615217,3.44699057,1.84409367,1.02730019]],dtype=datatype)\n",
    "sto_norms[16,6,2,:]=np.array([4.8637517,1.81067394,0.86770666,0.47484718,0.28016147,0.17040995],dtype=datatype)\n",
    "\n",
    "sto_coefs=sto_coefs*sto_norms\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "Coefficients, exponents and offsets for the Gaussian lobe function basis functions\n",
    "Similar structure as before.\n",
    "Implemented are precisions 2,4,6 (i.e. 1,2,3 Gaussian functions per lobe).\n",
    "Implemented for elements C,N,O,P,S (note that H does not have p-orbitals in minimal basis calculations).\n",
    "\"\"\"\n",
    "lobe_coefs=np.zeros((19,5,2,8),dtype=datatype)\n",
    "lobe_exps=np.zeros((19,5,2,8),dtype=datatype)\n",
    "lobe_offsets=np.zeros((19,5,2,8),dtype=datatype)\n",
    "lobe_norms=np.zeros((19,5,2,8),dtype=datatype)\n",
    "\n",
    "lobe_coefs[6,3,0,:6]=np.array([0.64520464,0.88439881,0.33605098,0.64520464,0.88439881,0.33605098],dtype=datatype)\n",
    "lobe_exps[6,3,0,:6]=np.array([0.2670606,0.81133893,3.29524071,0.2670606,0.81133893,3.29524071],dtype=datatype)\n",
    "lobe_offsets[6,3,0,:6]=np.array([0.2,0.2,0.2,-0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[6,3,0,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.000657885155526\n",
    "lobe_coefs[7,3,0,:6]=np.array([0.64054726,0.9684976,0.39050546,0.64054726,0.9684976,0.39050546],dtype=datatype)\n",
    "lobe_exps[7,3,0,:6]=np.array([0.3292863,0.9914091,4.04400941,0.3292863,0.9914091,4.04400941],dtype=datatype)\n",
    "lobe_offsets[7,3,0,:6]=np.array([0.2,0.2,0.2,-0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[7,3,0,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.0004310541198355\n",
    "lobe_coefs[8,3,0,:6]=np.array([0.64437286,1.06711325,0.45753199,0.64437286,1.06711325,0.45753199],dtype=datatype)\n",
    "lobe_exps[8,3,0,:6]=np.array([0.42241359,1.26459378,5.20158956,0.42241359,1.26459378,5.20158956],dtype=datatype)\n",
    "lobe_offsets[8,3,0,:6]=np.array([0.2,0.2,0.2,-0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[8,3,0,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.000318589703924\n",
    "\n",
    "lobe_coefs[15,3,0,:6]=np.array([3.20780205,2.22347651,1.35755385,3.20780205,2.22347651,1.35755385],dtype=datatype)\n",
    "lobe_exps[15,3,0,:6]=np.array([7.67937533,2.51881708,32.39705451,7.67937533,2.51881708,32.39705451],dtype=datatype)\n",
    "lobe_offsets[15,3,0,:6]=np.array([0.1,0.1,0.1,-0.1,-0.1,-0.1],dtype=datatype)\n",
    "lobe_norms[15,3,0,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.000702490931977\n",
    "lobe_coefs[15,3,1,:6]=np.array([0.73776749,0.08185985,0.64707098,0.73776749,0.08185985,0.64707098],dtype=datatype)\n",
    "lobe_exps[15,3,1,:6]=np.array([0.2525002,0.11081551,0.59072838,0.2525002,0.11081551,0.59072838],dtype=datatype)\n",
    "lobe_offsets[15,3,1,:6]=np.array([0.2,0.2,0.2,-0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[15,3,1,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.0000599599379631\n",
    "lobe_coefs[16,3,0,:6]=np.array([3.42244003,2.2028318,1.53763642,3.42244003,2.2028318,1.53763642],dtype=datatype)\n",
    "lobe_exps[16,3,0,:6]=np.array([8.79554437,2.90676936,37.24211708,8.79554437,2.90676936,37.24211708],dtype=datatype)\n",
    "lobe_offsets[16,3,0,:6]=np.array([0.1,0.1,0.1,-0.1,-0.1,-0.1],dtype=datatype)\n",
    "lobe_norms[16,3,0,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.0006429068471574\n",
    "lobe_coefs[16,3,1,:6]=np.array([0.75966712,0.09877814,0.66719658,0.75966712,0.09877814,0.66719658],dtype=datatype)\n",
    "lobe_exps[16,3,1,:6]=np.array([0.29789993,0.13715709,0.69077193,0.29789993,0.13715709,0.69077193],dtype=datatype)\n",
    "lobe_offsets[16,3,1,:6]=np.array([0.2,0.2,0.2,-0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[16,3,1,:6]=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0],dtype=datatype)*1.0000721729039534\n",
    "\n",
    "lobe_coefs[6,2,0,:4]=np.array([0.96413977,0.82547949,0.96413977,0.82547949],dtype=datatype)\n",
    "lobe_exps[6,2,0,:4]=np.array([0.33941932,1.41201315,0.33941932,1.41201315],dtype=datatype)\n",
    "lobe_offsets[6,2,0,:4]=np.array([0.2,0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[6,2,0,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.0028886026340327\n",
    "lobe_coefs[7,2,0,:4]=np.array([0.99478787,0.91921183,0.99478787,0.91921183],dtype=datatype)\n",
    "lobe_exps[7,2,0,:4]=np.array([0.42224616,1.75218641,0.42224616,1.75218641],dtype=datatype)\n",
    "lobe_offsets[7,2,0,:4]=np.array([0.2,0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[7,2,0,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.002071766276696\n",
    "lobe_coefs[8,2,0,:4]=np.array([1.04707358,1.0237873,1.04707358,1.0237873],dtype=datatype)\n",
    "lobe_exps[8,2,0,:4]=np.array([0.5507524,2.29001396,0.5507524,2.29001396],dtype=datatype)\n",
    "lobe_offsets[8,2,0,:4]=np.array([0.2,0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[8,2,0,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.001730293227881\n",
    "\n",
    "lobe_coefs[15,2,0,:4]=np.array([3.40067464,3.06307294,3.40067464,3.06307294],dtype=datatype)\n",
    "lobe_exps[15,2,0,:4]=np.array([3.23569931,13.74354402,3.23569931,13.74354402],dtype=datatype)\n",
    "lobe_offsets[15,2,0,:4]=np.array([0.1,0.1,-0.1,-0.1],dtype=datatype)\n",
    "lobe_norms[15,2,0,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.0028505105813497\n",
    "lobe_coefs[15,2,1,:4]=np.array([0.6686712,0.79181905,0.6686712,0.79181905],dtype=datatype)\n",
    "lobe_exps[15,2,1,:4]=np.array([0.21070418,0.54247792,0.21070418,0.54247792],dtype=datatype)\n",
    "lobe_offsets[15,2,1,:4]=np.array([0.2,0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[15,2,1,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.0002723419325867\n",
    "lobe_coefs[16,2,0,:4]=np.array([3.32374337,3.47888193,3.32374337,3.47888193],dtype=datatype)\n",
    "lobe_exps[16,2,0,:4]=np.array([16.01219981,3.76474575,16.01219981,3.76474575],dtype=datatype)\n",
    "lobe_offsets[16,2,0,:4]=np.array([0.1,0.1,-0.1,-0.1],dtype=datatype)\n",
    "lobe_norms[16,2,0,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.0023555392051022\n",
    "lobe_coefs[16,2,1,:4]=np.array([0.68440713,0.83493367,0.68440713,0.83493367],dtype=datatype)\n",
    "lobe_exps[16,2,1,:4]=np.array([0.24322328,0.62787788,0.24322328,0.62787788],dtype=datatype)\n",
    "lobe_offsets[16,2,1,:4]=np.array([0.2,0.2,-0.2,-0.2],dtype=datatype)\n",
    "lobe_norms[16,2,1,:4]=np.array([-1.0,-1.0,1.0,1.0],dtype=datatype)*1.0002133800470114\n",
    "\n",
    "lobe_coefs=lobe_coefs*lobe_norms\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "Coefficients, exponents and offsets for orbital parts. Used for estimating the relevance of wave function interactions.\n",
    "The shifted direction has the index 1 \n",
    "\"\"\"\n",
    "orbital_parts_coeffs=np.zeros((18,2,7),dtype=datatype)\n",
    "orbital_parts_exps=np.zeros((18,2,7,2),dtype=datatype)\n",
    "orbital_parts_offsets=np.zeros((18,2,7),dtype=datatype)\n",
    "\n",
    "orbital_parts_coeffs[6,0]=np.array([0.21020597,-0.10733293,0.12119919,0.07106207,-0.05400071,0.05048129,0.18935931],dtype=datatype)*1.0072001798871546\n",
    "orbital_parts_exps[6,0,:,0]=np.array([0.5764236,0.87086635,4.74752682,0.22863708,0.32374581,1.46137983,1.516823],dtype=datatype)\n",
    "orbital_parts_exps[6,0,:,1]=np.array([0.96949833,18.27707337,14.12262395,0.34975632,2.30160144,56.80367948,3.43478416],dtype=datatype)\n",
    "orbital_parts_offsets[6,0]=np.array([0.96183788,0.02043568,0.36447395,1.53245644,-0.14208569,0.16136831,0.63376153],dtype=datatype)\n",
    "\n",
    "orbital_parts_coeffs[7,0]=np.array([0.12594915,0.2063496,3.0087257,-0.0877023,-2.99593826,0.03994704,0.40324338],dtype=datatype)*1.0041560691795643\n",
    "orbital_parts_exps[7,0,:,0]=np.array([0.55053191,4.45342761,0.56341201,0.96059725,0.58047751,0.24416567,1.23328148],dtype=datatype)\n",
    "orbital_parts_exps[7,0,:,1]=np.array([1.77288519,13.06354044,4.0972497,41.14980098,4.26751697,0.62358979,3.60332721],dtype=datatype)\n",
    "orbital_parts_offsets[7,0]=np.array([1.47613285,0.32624442,0.8411654,-0.02996572,0.84364999,2.04000418,0.72411866],dtype=datatype)\n",
    "\n",
    "orbital_parts_coeffs[8,0]=np.array([0.05957721,0.25223747,0.28753908,0.29850678,-0.08915678,0.21772122,-0.12416925],dtype=datatype)*1.0112732552315165\n",
    "orbital_parts_exps[8,0,:,0]=np.array([0.3262607,0.75808978,1.85098,14.39296814,0.53665137,4.5566118,1.58209237],dtype=datatype)\n",
    "orbital_parts_exps[8,0,:,1]=np.array([0.47470149,1.2008111,3.70769177,209.21137127,4.4360045,13.24945624,38.02792748],dtype=datatype)\n",
    "orbital_parts_offsets[8,0]=np.array([1.26686064,0.82636302,0.57807172,0.17249301,-0.08838873,0.36008857,-0.02014936],dtype=datatype)\n",
    "\n",
    "orbital_parts_coeffs[15,0]=np.array([1.22961756,8.39115404,-2.57565278,0.81374749,0.24589321,-8.55022728,2.69403254],dtype=datatype)*1.0115313380573823\n",
    "orbital_parts_exps[15,0,:,0]=np.array([11.56045365,8.33496245,2.5728766,4.88488294,1.87827802,8.21259311,2.62899862],dtype=datatype)\n",
    "orbital_parts_exps[15,0,:,1]=np.array([23.1630487,80.60350661,12.16498862,8.63369543,3.12080396,78.57470156,12.14378531],dtype=datatype)\n",
    "orbital_parts_offsets[15,0]=np.array([0.20994224,0.05841471,0.12521072,0.40181425,0.57244225,0.05325301,0.14185866],dtype=datatype)\n",
    "orbital_parts_coeffs[15,1]=np.array([0.09597981,0.05056336,-0.06168637,0.08630214,0.07481595,-0.03724176,0.12060358],dtype=datatype)*1.0057201407894794\n",
    "orbital_parts_exps[15,1,:,0]=np.array([0.63966982,0.32623491,0.2678816,0.19522489,0.47314754,0.19298332,0.43613297],dtype=datatype)\n",
    "orbital_parts_exps[15,1,:,1]=np.array([3.07613523,16.30538177,4.42319665,0.22930567,7.94012517,0.77526177,0.91420862],dtype=datatype)\n",
    "orbital_parts_offsets[15,1]=np.array([1.03604633,0.2836936,0.20146867,1.28010504,0.59456785,-0.36530171,1.44961309],dtype=datatype)\n",
    "\n",
    "orbital_parts_coeffs[16,0]=np.array([-10.13669092,-0.66666356,-0.24129124,9.51745308,1.03294444,0.19423795,1.85596052],dtype=datatype)*1.0152183206062153\n",
    "orbital_parts_exps[16,0,:,0]=np.array([8.71597986,14.00945829,3.00237973,8.83381982,4.57601809,2.01192887,12.22370701],dtype=datatype)\n",
    "orbital_parts_exps[16,0,:,1]=np.array([70.28919659,22.01410401,15.36844365,74.26872175,6.95223015,3.11401983,14.06196848],dtype=datatype)\n",
    "orbital_parts_offsets[16,0]=np.array([0.05482271,-0.05780663,-0.04124501,0.0603514,0.30744847,0.52949946,0.12796264],dtype=datatype)\n",
    "orbital_parts_coeffs[16,1]=np.array([0.15694047,0.10590538,0.05370748,0.0740629,0.05082452,1.11074957,-1.07687433],dtype=datatype)*1.0017166478392552\n",
    "orbital_parts_exps[16,1,:,0]=np.array([0.72395277,0.48092914,0.21892612,0.24555874,0.36743536,0.40806574,0.40162075],dtype=datatype)\n",
    "orbital_parts_exps[16,1,:,1]=np.array([2.90350593,1.37808715,0.53086674,1.53910575,20.90254615,6.50731129,6.44015996],dtype=datatype)\n",
    "orbital_parts_offsets[16,1]=np.array([0.92661667,1.64834863,2.22262194,1.10123271,0.25207419,0.39454945,0.37554786],dtype=datatype)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "Array with atomic masses for all elements.\n",
    "\"\"\"\n",
    "atomic_masses=np.array([0.0,1.0080,4.00260,7.0,9.012183,10.81,12.011,14.007,15.999,18.99840316,20.180,22.9897693,24.305,26.981538,28.085,30.97376200,32.07,35.45,39.9],dtype=datatype)\n",
    "\n",
    "\n",
    "element_cuts=np.zeros((9,9),dtype=datatype)\n",
    "element_cuts[6,7]=2.67\n",
    "element_cuts[6,8]=2.48\n",
    "element_cuts[7,8]=4.0\n",
    "element_cuts+=element_cuts.T\n",
    "element_cuts[6,6]=2.76\n",
    "element_cuts[7,7]=4.0\n",
    "element_cuts[8,8]=4.0\n",
    "element_h_bond_distances=np.zeros(19,dtype=datatype)\n",
    "element_h_bond_distances[6]=2.0598\n",
    "element_h_bond_distances[7]=1.9087\n",
    "element_h_bond_distances[8]=1.8142\n",
    "element_h_bond_distances[16]=2.5247\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "Single atom density arrays which are used for the single atom density guess.\n",
    "These arrays are obtained via runnning a Hartree-Fock calculation for a single atom.\n",
    "Currently, only STO-3G calculations are supported.\n",
    "\"\"\"\n",
    "single_atom_densities=np.zeros((19,9,9),dtype=datatype)\n",
    "single_atom_densities[1,0,0]=1.0\n",
    "single_atom_densities[6,:5,:5]=np.array([[ 2.1257596 ,-0.51704437,-0.        ,-0.        , 0.        ],\n",
    "                                         [-0.51704437, 2.1257598 , 0.        , 0.        ,-0.        ],\n",
    "                                         [-0.        , 0.        , 0.66666141, 0.66673975, 0.66659095],\n",
    "                                         [-0.        , 0.        , 0.66673975, 0.66681809, 0.66666928],\n",
    "                                         [ 0.        ,-0.        , 0.66659095, 0.66666928, 0.6665205 ]])\n",
    "single_atom_densities[7,:5,:5]=np.array([[ 2.11111116,-0.48432224,-0.        , 0.        ,-0.        ],\n",
    "                                         [-0.48432224, 2.11111129, 0.        ,-0.        , 0.        ],\n",
    "                                         [-0.        , 0.        , 1.        , 0.36364542, 0.35919786],\n",
    "                                         [ 0.        ,-0.        , 0.36364542, 0.99999899, 0.36142164],\n",
    "                                         [-0.        , 0.        , 0.35919786, 0.36142164, 1.00000101]])\n",
    "single_atom_densities[8,:5,:5]=np.array([[ 2.11288737,-0.48838309,-0.        , 0.        ,-0.        ],\n",
    "                                         [-0.48838309, 2.11288734, 0.        ,-0.        , 0.        ],\n",
    "                                         [-0.        , 0.        , 1.33333698, 0.33333328, 0.33295953],\n",
    "                                         [ 0.        ,-0.        , 0.33333328, 1.33332968, 0.33370704],\n",
    "                                         [-0.        , 0.        , 0.33295953, 0.33370704, 1.33333333]])\n",
    "single_atom_densities[15]=np.array([[ 2.24915541,-0.77208348,-0.        ,-0.        , 0.        , 0.16024315, 0.        , 0.        ,-0.        ],\n",
    "                                    [-0.77208348, 2.39627112, 0.        , 0.        ,-0.        ,-0.58386231,-0.        ,-0.        , 0.        ],\n",
    "                                    [-0.        , 0.        , 2.00017364,-0.00096016, 0.05029112,-0.        ,-0.15059793, 0.00110663,-0.14668533],\n",
    "                                    [-0.        , 0.        ,-0.00096016, 1.99794802, 0.11250314,-0.        , 0.00110662,-0.14713737,-0.32812355],\n",
    "                                    [ 0.        ,-0.        , 0.05029112, 0.11250314, 1.99739123, 0.        ,-0.14668495,-0.32812338,-0.14627048],\n",
    "                                    [ 0.16024315,-0.58386231,-0.        ,-0.        , 0.        , 2.1426492 , 0.        , 0.        ,-0.        ],\n",
    "                                    [ 0.        ,-0.        ,-0.15059793, 0.00110662,-0.14668495, 0.        , 1.0729779 , 0.0017765 , 0.42783984],\n",
    "                                    [ 0.        ,-0.        , 0.00110663,-0.14713737,-0.32812338, 0.        , 0.0017765 , 1.07183876, 0.95699552],\n",
    "                                    [-0.        , 0.        ,-0.14668533,-0.32812355,-0.14627048,-0.        , 0.42783984, 0.95699552, 1.07154692]])\n",
    "single_atom_densities[16]=np.array([[ 2.26601244,-0.79971199,-0.        , 0.        , 0.        , 0.16211699, 0.        ,-0.        ,-0.        ],\n",
    "                                    [-0.79971199, 2.40784747, 0.        ,-0.        ,-0.        ,-0.57394584,-0.        , 0.        , 0.        ],\n",
    "                                    [-0.        , 0.        , 2.04283706, 0.07500791, 0.07539494,-0.        ,-0.27033786,-0.22892389,-0.23010518],\n",
    "                                    [ 0.        ,-0.        , 0.07500791, 2.04360807,-0.07500844, 0.        ,-0.22892397,-0.27269107, 0.22892558],\n",
    "                                    [ 0.        ,-0.        , 0.07539494,-0.07500844, 2.042836  , 0.        ,-0.23010518, 0.2289255 ,-0.27033462],\n",
    "                                    [ 0.16211699,-0.57394584,-0.        , 0.        , 0.        , 2.13721461, 0.        ,-0.        ,-0.        ],\n",
    "                                    [ 0.        ,-0.        ,-0.27033786,-0.22892397,-0.23010518, 0.        , 1.41595597, 0.69867516, 0.70228044],\n",
    "                                    [-0.        , 0.        ,-0.22892389,-0.27269107, 0.2289255 ,-0.        , 0.69867516, 1.42313819,-0.69868007],\n",
    "                                    [-0.        , 0.        ,-0.23010518, 0.22892558,-0.27033462,-0.        , 0.70228044,-0.69868007, 1.41594609]])\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-----------------------\n",
    "preprocessing functions\n",
    "-----------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_num_basis_functions(element_list,num_atoms):\n",
    "    \"\"\"\n",
    "    Calculates the number of basis functions for an array of elements.\n",
    "    For example: In the STO-3G minimal basis the number of basis functions is 1, 5 or 9 for Z<=2, 2<Z<=10 and 10<Z<=18, respectively. The orbital types are 1s, 2s, 2px, 2py, 2pz, 3s, 3px, 3py, 3pz.\n",
    "    Inputs are a list of elements and number of atoms and the output the number of basis functions\n",
    "    \"\"\"\n",
    "\n",
    "    num_atom_types=3\n",
    "    basis_function_lengths_per_atom_type=np.zeros(num_atom_types,dtype='int32')\n",
    "\n",
    "    for atom_type in range(num_atom_types):\n",
    "        for i in range(basis_set_configs_len[atom_type]):\n",
    "            \n",
    "            current_type=basis_set_configs[atom_type,0,i]\n",
    "            current_class=basis_set_configs[atom_type,1,i]\n",
    "            if ((current_type==1 or current_type==2 or current_type==4) and current_class==1):\n",
    "                basis_function_lengths_per_atom_type[atom_type]+=1\n",
    "            elif ((current_type==3 or current_type==5) and current_class==1):\n",
    "                basis_function_lengths_per_atom_type[atom_type]+=3\n",
    "\n",
    "    num_basis_functions=0\n",
    "    for i in range(num_atoms):\n",
    "\n",
    "        if (element_list[i]<=2): num_basis_functions+=basis_function_lengths_per_atom_type[0]\n",
    "        elif (element_list[i]<=10): num_basis_functions+=basis_function_lengths_per_atom_type[1]\n",
    "        elif (element_list[i]<=18): num_basis_functions+=basis_function_lengths_per_atom_type[2]\n",
    "\n",
    "    return num_basis_functions\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_num_gaussian_functions(element_list,num_atoms,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Calculates the number of basis functions for an array of elements.\n",
    "    For example: Basis function type list for minimal basis STO-3: 1: 1s, 2: 2s, 3: 2p, 4: 3s, 5: 3p.\n",
    "    Inputs are a list of elements, number of atoms, number of basis functions.\n",
    "    \"\"\"\n",
    "\n",
    "    num_gaussian_functions=0\n",
    "    basis_functions_index_list=np.zeros(num_atoms+1,dtype='int32')\n",
    "    gaussian_functions_index_list=np.zeros(num_basis_functions+1,dtype='int32')\n",
    "    gaussian_functions_atom_index_list=np.zeros(num_atoms+1,dtype='int32')\n",
    "    atom_of_basisfunction=np.zeros(num_basis_functions,dtype='int32')\n",
    "    type_of_basis_function=np.zeros(num_basis_functions,dtype='int32')\n",
    "    basis_functions_count=0\n",
    "\n",
    "    for i in range(num_atoms):\n",
    "\n",
    "        basis_functions_index_list[i]=basis_functions_count\n",
    "        gaussian_functions_atom_index_list[i]=num_gaussian_functions\n",
    "        if (element_list[i]<=2): atom_type=0\n",
    "        elif (element_list[i]<=10): atom_type=1\n",
    "        elif (element_list[i]<=18): atom_type=2\n",
    "        \n",
    "        for j in range(basis_set_configs_len[atom_type]):\n",
    "            orbital_type=basis_set_configs[atom_type,0,j]\n",
    "            orbital_class=basis_set_configs[atom_type,1,j]\n",
    "            orbital_num_gaussians=basis_set_configs[atom_type,2,j]\n",
    "            \n",
    "            if ((orbital_type==1 or orbital_type==2 or orbital_type==4) and orbital_class==1):\n",
    "                gaussian_functions_index_list[basis_functions_count]=num_gaussian_functions\n",
    "                atom_of_basisfunction[basis_functions_count]=i\n",
    "                type_of_basis_function[basis_functions_count]=j+1\n",
    "                num_gaussian_functions+=orbital_num_gaussians\n",
    "                basis_functions_count+=1\n",
    "\n",
    "            elif ((orbital_type==3 or orbital_type==5) and orbital_class==1):\n",
    "                gaussian_functions_index_list[basis_functions_count:basis_functions_count+3]\\\n",
    "                                =[num_gaussian_functions,num_gaussian_functions+orbital_num_gaussians,num_gaussian_functions+2*orbital_num_gaussians]\n",
    "                atom_of_basisfunction[basis_functions_count:basis_functions_count+3]=[i,i,i]\n",
    "                type_of_basis_function[basis_functions_count:basis_functions_count+3]=[j+1,j+1,j+1]\n",
    "                num_gaussian_functions+=3*orbital_num_gaussians\n",
    "                basis_functions_count+=3\n",
    "    \n",
    "    gaussian_functions_index_list[-1]=num_gaussian_functions\n",
    "    basis_functions_index_list[-1]=num_basis_functions\n",
    "    gaussian_functions_atom_index_list[-1]=num_gaussian_functions\n",
    "\n",
    "    return num_gaussian_functions,basis_functions_index_list,gaussian_functions_index_list,atom_of_basisfunction,type_of_basis_function\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def sto_ng_functions_for_s_orbitals(gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count,\n",
    "                                    current_atom,current_element,coordinates,sto_precision,n_number):\n",
    "    \"\"\"\n",
    "    Sets up STO-nG functions for s-type orbitals.\n",
    "    \"\"\"\n",
    "    \n",
    "    gaussian_functions_coordinates[gaussian_function_count:gaussian_function_count+sto_precision]=np.zeros((sto_precision,3),dtype=datatype)+coordinates[current_atom]\n",
    "    gaussian_functions_coefficients[gaussian_function_count:gaussian_function_count+sto_precision]=sto_coefs[current_element,sto_precision,n_number-1,:sto_precision]\n",
    "    gaussian_functions_exponents[gaussian_function_count:gaussian_function_count+sto_precision]=sto_exps[current_element,sto_precision,n_number-1,:sto_precision]\n",
    "    gaussian_function_count+=sto_precision\n",
    "    \n",
    "    return gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def sto_ng_functions_for_p_orbitals(gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count,\n",
    "                                    current_atom,current_element,coordinates,lobe_precision,n_number):\n",
    "    \"\"\"\n",
    "    Sets up STO-nG functions for p-type orbitals.\n",
    "    \"\"\"\n",
    "    \n",
    "    n_number-=2\n",
    "    half_lobe_precision=int(lobe_precision/2)\n",
    "    \n",
    "    for i in range(3):\n",
    "\n",
    "        x_part=np.zeros(lobe_precision,dtype=datatype)+coordinates[current_atom,0]\n",
    "        y_part=np.zeros(lobe_precision,dtype=datatype)+coordinates[current_atom,1]\n",
    "        z_part=np.zeros(lobe_precision,dtype=datatype)+coordinates[current_atom,2]\n",
    "        if (i==0): x_part-=lobe_offsets[current_element,half_lobe_precision,n_number,:lobe_precision]\n",
    "        elif (i==1): y_part-=lobe_offsets[current_element,half_lobe_precision,n_number,:lobe_precision]\n",
    "        else: z_part-=lobe_offsets[current_element,half_lobe_precision,n_number,:lobe_precision]\n",
    "        gaussian_functions_coordinates[gaussian_function_count:gaussian_function_count+lobe_precision,0]=x_part.T\n",
    "        gaussian_functions_coordinates[gaussian_function_count:gaussian_function_count+lobe_precision,1]=y_part.T\n",
    "        gaussian_functions_coordinates[gaussian_function_count:gaussian_function_count+lobe_precision,2]=z_part.T\n",
    "\n",
    "        gaussian_functions_coefficients[gaussian_function_count:gaussian_function_count+lobe_precision]=lobe_coefs[current_element,half_lobe_precision,n_number-2,:lobe_precision]\n",
    "\n",
    "        gaussian_functions_exponents[gaussian_function_count:gaussian_function_count+lobe_precision]=lobe_exps[current_element,half_lobe_precision,n_number-2,:lobe_precision]\n",
    "        \n",
    "        gaussian_function_count+=lobe_precision\n",
    "    \n",
    "    return gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_gaussian_function_inputs(element_list,coordinates,num_atoms,num_gaussian_functions):\n",
    "    \"\"\"\n",
    "    Calculates the coordinates, coefficients and exponents for all Gaussian functions.\n",
    "    Inputs are elements, coordinates and number of basis functions.\n",
    "    \"\"\"\n",
    "    \n",
    "    gaussian_functions_coordinates=np.zeros((num_gaussian_functions,3),dtype=datatype)\n",
    "    gaussian_functions_coefficients=np.zeros((num_gaussian_functions),dtype=datatype)\n",
    "    gaussian_functions_exponents=np.zeros((num_gaussian_functions),dtype=datatype)\n",
    "    gaussian_function_count=0\n",
    "    \n",
    "    for i in range(num_atoms):\n",
    "        \n",
    "        current_element=element_list[i]\n",
    "        if (current_element<=2): atom_type=0\n",
    "        elif (current_element<=10): atom_type=1\n",
    "        elif (current_element<=18): atom_type=2\n",
    "\n",
    "        num_atom_orbitals=basis_set_configs_len[atom_type]\n",
    "\n",
    "        for j in range(num_atom_orbitals):\n",
    "            \n",
    "            orbital_type=basis_set_configs[atom_type,0,j]\n",
    "            orbital_class=basis_set_configs[atom_type,1,j]\n",
    "            orbital_num_gaussians=basis_set_configs[atom_type,2,j]\n",
    "\n",
    "            if ((orbital_type==1 or orbital_type==2 or orbital_type==4) and orbital_class==1):\n",
    "\n",
    "                if (orbital_type==1): n_value=1\n",
    "                elif (orbital_type==2): n_value=2\n",
    "                elif (orbital_type==4): n_value=3\n",
    "\n",
    "                gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count\\\n",
    "                                =sto_ng_functions_for_s_orbitals(gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count,\n",
    "                                                                 i,current_element,coordinates,orbital_num_gaussians,n_value)\n",
    "\n",
    "            elif ((orbital_type==3 or orbital_type==5) and orbital_class==1):\n",
    "\n",
    "                if (orbital_type==3): n_value=2\n",
    "                elif (orbital_type==5): n_value=3\n",
    "                \n",
    "                gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count\\\n",
    "                                =sto_ng_functions_for_p_orbitals(gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_function_count,\n",
    "                                                                 i,current_element,coordinates,orbital_num_gaussians,n_value)\n",
    "            \n",
    "    return gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_num_orbital_parts(element_list,num_atoms,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Calculate the number of orbital parts in total and create an index list for the indices of the orbital parts at each basis function.\n",
    "    This gives information about the number of orbital parts per basis function and which orbital part belongs to which basis function. \n",
    "    This is necessary to know for part-based approximations as used in the calculation of V_ee.\n",
    "    There is one orbital part for s-orbitals and two for p-orbitals (four for d-orbitals).\n",
    "    Inputs are list of elements, number of atoms, number of basis functions and returned are the total number of orbital parts and the orbital parts index list.\n",
    "    \"\"\"\n",
    "\n",
    "    basis_functions_count=0\n",
    "    num_orbital_parts=0\n",
    "    orbital_parts_index_list=np.zeros(num_basis_functions+1,dtype='int32')\n",
    "    \n",
    "    for i in range(num_atoms):\n",
    "\n",
    "        current_element=element_list[i]\n",
    "        if (current_element<=2): atom_type=0\n",
    "        elif (current_element<=10): atom_type=1\n",
    "        elif (current_element<=18): atom_type=2\n",
    "        \n",
    "        for j in range(basis_set_configs_len[atom_type]):\n",
    "\n",
    "            orbital_type=basis_set_configs[atom_type,0,j]\n",
    "            orbital_class=basis_set_configs[atom_type,1,j]\n",
    "            \n",
    "            if ((orbital_type==1 or orbital_type==2 or orbital_type==4) and orbital_class==1):\n",
    "                orbital_parts_index_list[basis_functions_count]=num_orbital_parts\n",
    "                basis_functions_count+=1\n",
    "                num_orbital_parts+=1\n",
    "\n",
    "            elif ((orbital_type==3 or orbital_type==5) and orbital_class==1):\n",
    "                orbital_parts_index_list[basis_functions_count:basis_functions_count+3]=[num_orbital_parts,num_orbital_parts+2,num_orbital_parts+4]\n",
    "                basis_functions_count+=3\n",
    "                num_orbital_parts+=6\n",
    "    \n",
    "    orbital_parts_index_list[-1]=num_orbital_parts\n",
    "\n",
    "    return num_orbital_parts,orbital_parts_index_list\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_gaussians_for_orbital_parts(element_list,num_atoms,num_orbital_parts):\n",
    "    \"\"\"\n",
    "    Calculates the number of Gaussian functions for all orbital parts and creates an index list for the orbital parts.\n",
    "    This index list lists the starting index of the Gaussian functions list for each orbital part.\n",
    "    \"\"\"\n",
    "\n",
    "    num_parts_gaussian_functions=0\n",
    "    orbital_parts_gaussian_index_list=np.zeros(num_orbital_parts+1,dtype='int32')\n",
    "    orbital_part_count=0\n",
    "\n",
    "    for i in range(num_atoms):\n",
    "\n",
    "        current_element=element_list[i]\n",
    "        if (current_element<=2): atom_type=0\n",
    "        elif (current_element<=10): atom_type=1\n",
    "        elif (current_element<=18): atom_type=2\n",
    "\n",
    "        for j in range(basis_set_configs_len[atom_type]):\n",
    "\n",
    "            orbital_type=basis_set_configs[atom_type,0,j]\n",
    "            orbital_class=basis_set_configs[atom_type,1,j]\n",
    "            \n",
    "            if ((orbital_type==1 or orbital_type==2 or orbital_type==4) and orbital_class==1):\n",
    "                orbital_parts_gaussian_index_list[orbital_part_count]=num_parts_gaussian_functions\n",
    "                num_parts_gaussian_functions+=overlap_sto_precision\n",
    "                orbital_part_count+=1\n",
    "            \n",
    "            elif ((orbital_type==3 or orbital_type==5) and orbital_class==1):\n",
    "                orbital_parts_gaussian_index_list[orbital_part_count:orbital_part_count+6]=[num_parts_gaussian_functions,num_parts_gaussian_functions+overlap_part_precision,\n",
    "                                                                                            num_parts_gaussian_functions+2*overlap_part_precision,num_parts_gaussian_functions+3*overlap_part_precision,\n",
    "                                                                                            num_parts_gaussian_functions+4*overlap_part_precision,num_parts_gaussian_functions+5*overlap_part_precision]\n",
    "                num_parts_gaussian_functions+=6*overlap_part_precision\n",
    "                orbital_part_count+=6\n",
    "        \n",
    "    orbital_parts_gaussian_index_list[-1]=num_parts_gaussian_functions\n",
    "\n",
    "    return num_parts_gaussian_functions,orbital_parts_gaussian_index_list\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def sto_ng_functions_for_s_orbital_parts(orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count,\n",
    "                                         current_atom,current_element,coordinates,parts_sto_precision,n_number):\n",
    "    \"\"\"\n",
    "    Updates the coordinates, coefficients and exponents of orbital parts as well as the total number of gaussian functions for an s-orbital.\n",
    "    Inputs are the coordinates, coefficients and exponents of orbital parts (the main list) to be updated, gaussian function count to indicate the start of the update, \n",
    "    number of the current atom, element of the current atom, coordinates, number of gaussian functions per orbital part, the main quantum number n and returned are \n",
    "    updated coordinates, coefficients and exponents of orbital parts (the main list)\n",
    "    \"\"\"\n",
    "    \n",
    "    orbital_parts_coordinates[gaussian_function_count:gaussian_function_count+parts_sto_precision]=np.zeros((parts_sto_precision,3),dtype=datatype)+coordinates[current_atom]\n",
    "    orbital_parts_coefficients[gaussian_function_count:gaussian_function_count+parts_sto_precision]=sto_coefs[current_element,parts_sto_precision,n_number-1,:parts_sto_precision]\n",
    "    orbital_parts_exponents[gaussian_function_count:gaussian_function_count+parts_sto_precision]=(np.zeros((3,parts_sto_precision))+sto_exps[current_element,parts_sto_precision,n_number-1,:parts_sto_precision]).T\n",
    "\n",
    "    gaussian_function_count+=parts_sto_precision\n",
    "    \n",
    "    return orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def sto_ng_functions_for_p_orbital_parts(orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count,\n",
    "                                         current_atom,current_element,coordinates,parts_sto_precision,n_number):\n",
    "    \"\"\"\n",
    "    Updates the coordinates, coefficients and exponents of orbital parts as well as the total number of gaussian functions for a p-orbital.\n",
    "    \"\"\"\n",
    "    \n",
    "    n_number-=2\n",
    "    \n",
    "    for i in range(6):\n",
    "\n",
    "        x_part=np.zeros(parts_sto_precision,dtype=datatype)+coordinates[current_atom,0]\n",
    "        y_part=np.zeros(parts_sto_precision,dtype=datatype)+coordinates[current_atom,1]\n",
    "        z_part=np.zeros(parts_sto_precision,dtype=datatype)+coordinates[current_atom,2]\n",
    "        if (i==0 or i==1): x_part+=(-1)**(i+1)*orbital_parts_offsets[current_element,n_number]\n",
    "        elif (i==2 or i==3): y_part+=(-1)**(i+1)*orbital_parts_offsets[current_element,n_number]\n",
    "        else: z_part+=(-1)**(i+1)*orbital_parts_offsets[current_element,n_number]\n",
    "        orbital_parts_coordinates[gaussian_function_count:gaussian_function_count+parts_sto_precision,0]=x_part.T\n",
    "        orbital_parts_coordinates[gaussian_function_count:gaussian_function_count+parts_sto_precision,1]=y_part.T\n",
    "        orbital_parts_coordinates[gaussian_function_count:gaussian_function_count+parts_sto_precision,2]=z_part.T\n",
    "        orbital_parts_coefficients[gaussian_function_count:gaussian_function_count+parts_sto_precision]=(-1)**(i+1)*orbital_parts_coeffs[current_element,n_number]\n",
    "        if (i==0 or i==1): x_exps,y_exps,z_exps=1,0,0\n",
    "        elif (i==2 or i==3): x_exps,y_exps,z_exps=0,1,0\n",
    "        else: x_exps,y_exps,z_exps=0,0,1\n",
    "\n",
    "        orbital_parts_exponents[gaussian_function_count:gaussian_function_count+parts_sto_precision,0]=orbital_parts_exps[current_element,n_number,:,x_exps]\n",
    "        orbital_parts_exponents[gaussian_function_count:gaussian_function_count+parts_sto_precision,1]=orbital_parts_exps[current_element,n_number,:,y_exps]\n",
    "        orbital_parts_exponents[gaussian_function_count:gaussian_function_count+parts_sto_precision,2]=orbital_parts_exps[current_element,n_number,:,z_exps]\n",
    "\n",
    "        gaussian_function_count+=parts_sto_precision\n",
    "    \n",
    "    return orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_orbital_parts_preprocessing(element_list,coordinates,num_parts_gaussian_functions,num_atoms):\n",
    "    \"\"\"\n",
    "    Generates arrays of the coordinates, coefficients and exponents for all orbtial parts. \n",
    "    The coordinates, coefficients and exponents are for the Gaussian functions which characterize the orbital parts.\n",
    "    \"\"\"\n",
    "\n",
    "    orbital_parts_coordinates=np.zeros((num_parts_gaussian_functions,3),dtype=datatype)\n",
    "    orbital_parts_coefficients=np.zeros((num_parts_gaussian_functions),dtype=datatype)\n",
    "    orbital_parts_exponents=np.zeros((num_parts_gaussian_functions,3),dtype=datatype)\n",
    "    gaussian_function_count=0\n",
    "\n",
    "    for i in range(num_atoms):\n",
    "        \n",
    "        current_element=element_list[i]\n",
    "        if (current_element<=2): atom_type=0\n",
    "        elif (current_element<=10): atom_type=1\n",
    "        elif (current_element<=18): atom_type=2\n",
    "\n",
    "        num_atom_orbitals=basis_set_configs_len[atom_type]\n",
    "\n",
    "        for j in range(num_atom_orbitals):\n",
    "            \n",
    "            orbital_type=basis_set_configs[atom_type,0,j]\n",
    "            orbital_class=basis_set_configs[atom_type,1,j]\n",
    "\n",
    "            if ((orbital_type==1 or orbital_type==2 or orbital_type==4) and orbital_class==1):\n",
    "\n",
    "                if (orbital_type==1): n_value=1\n",
    "                elif (orbital_type==2): n_value=2\n",
    "                elif (orbital_type==4): n_value=3\n",
    "\n",
    "                orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count\\\n",
    "                                =sto_ng_functions_for_s_orbital_parts(orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count,\n",
    "                                                                      i,current_element,coordinates,parts_sto_precision_1s,n_value)\n",
    "\n",
    "            elif ((orbital_type==3 or orbital_type==5) and orbital_class==1):\n",
    "\n",
    "                if (orbital_type==3): n_value=2\n",
    "                elif (orbital_type==5): n_value=3\n",
    "                \n",
    "                orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count\\\n",
    "                                =sto_ng_functions_for_p_orbital_parts(orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents,gaussian_function_count,\n",
    "                                                                      i,current_element,coordinates,parts_sto_precision_2p,n_value) \n",
    "    \n",
    "    return orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def asymmetric_overlap_integral_reduced(gaussian_functions_coefficients_i,gaussian_functions_coefficients_j,\n",
    "                                        gaussian_functions_exponents_i_x,gaussian_functions_exponents_j_x,\n",
    "                                        gaussian_functions_exponents_i_y,gaussian_functions_exponents_j_y,\n",
    "                                        gaussian_functions_exponents_i_z,gaussian_functions_exponents_j_z,\n",
    "                                        gaussian_functions_coordinates_i_x,gaussian_functions_coordinates_i_y,gaussian_functions_coordinates_i_z,\n",
    "                                        gaussian_functions_coordinates_j_x,gaussian_functions_coordinates_j_y,gaussian_functions_coordinates_j_z):\n",
    "    \"\"\"\n",
    "    Calculates an overlap integral for two Gaussian functions. \n",
    "    In this function the Gaussians can have different exponents for the three spatial dimensions.\n",
    "    This is used for the processing of the orbital parts where the corresponding Gaussian functions have a different exponent for one dimension.\n",
    "    \"\"\"\n",
    "    \n",
    "    prefactor=gaussian_functions_coefficients_i*gaussian_functions_coefficients_j\n",
    "    exp_sum_x=gaussian_functions_exponents_i_x+gaussian_functions_exponents_j_x\n",
    "    exp_product_x=gaussian_functions_exponents_i_x*gaussian_functions_exponents_j_x\n",
    "    product_sum_quotient_x=exp_product_x/exp_sum_x\n",
    "    exp_sum_y=gaussian_functions_exponents_i_y+gaussian_functions_exponents_j_y\n",
    "    exp_product_y=gaussian_functions_exponents_i_y*gaussian_functions_exponents_j_y\n",
    "    product_sum_quotient_y=exp_product_y/exp_sum_y\n",
    "    exp_sum_z=gaussian_functions_exponents_i_z+gaussian_functions_exponents_j_z\n",
    "    exp_product_z=gaussian_functions_exponents_i_z*gaussian_functions_exponents_j_z\n",
    "    product_sum_quotient_z=exp_product_z/exp_sum_z\n",
    "\n",
    "    distance_x=gaussian_functions_coordinates_i_x-gaussian_functions_coordinates_j_x\n",
    "    distance_y=gaussian_functions_coordinates_i_y-gaussian_functions_coordinates_j_y\n",
    "    distance_z=gaussian_functions_coordinates_i_z-gaussian_functions_coordinates_j_z\n",
    "\n",
    "    pi_divided_by_sum_x=np.pi/exp_sum_x\n",
    "    pi_divided_by_sum_y=np.pi/exp_sum_y\n",
    "    pi_divided_by_sum_z=np.pi/exp_sum_z\n",
    "    exp_part=np.exp(-product_sum_quotient_x*distance_x*distance_x-product_sum_quotient_y*distance_y*distance_y-product_sum_quotient_z*distance_z*distance_z)\n",
    "    result_s=prefactor*np.sqrt(pi_divided_by_sum_x)*np.sqrt(pi_divided_by_sum_y)*np.sqrt(pi_divided_by_sum_z)*exp_part\n",
    "    \n",
    "    return result_s\n",
    "\n",
    "\n",
    "@njit(parallel=False,fastmath=True)\n",
    "def calculate_relevant_densities(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                 orbital_parts_index_list,orbital_parts_gaussian_index_list,atom_of_basisfunction,gaussian_functions_index_list,coordinates,num_gaussian_functions,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Evaluates which densities are relevant for the density screening. This is done by calculating the overlap of all orbital parts of a basis function.\n",
    "    The overlap of two orbital parts is computed by calculating the asymmetric overlap integrals of all Gaussian function combinations in the orbtial parts.\n",
    "    Works faster with parallel=False.\n",
    "    \"\"\"\n",
    "    \n",
    "    relevance_matrix=np.zeros((int(num_basis_functions*(num_basis_functions+1)/2)),dtype=datatype)\n",
    "    relevance_matrix_derivative=0\n",
    "    ij_list_no_duplicates=np.zeros((int((num_basis_functions*(num_basis_functions+1))/2.0),2),dtype='int32')\n",
    "    gaussians_for_densities=np.zeros((int((num_gaussian_functions*(num_gaussian_functions+1))/1.0),2),dtype='int32')\n",
    "    gaussians_for_densities_index_list=np.zeros(int((num_basis_functions*(num_basis_functions+1))/2.0),dtype='int32')\n",
    "    count_no_duplicates=0\n",
    "    gaussians_count_no_duplicates=0\n",
    "\n",
    "    dist_threshold=10.0\n",
    "    dist_threshold_squared=dist_threshold**2\n",
    "\n",
    "    for i in range(num_basis_functions):\n",
    "        for j in range(i,num_basis_functions):\n",
    "\n",
    "            x_dist=coordinates[atom_of_basisfunction[i],0]-coordinates[atom_of_basisfunction[j],0]\n",
    "            y_dist=coordinates[atom_of_basisfunction[i],1]-coordinates[atom_of_basisfunction[j],1]\n",
    "            z_dist=coordinates[atom_of_basisfunction[i],2]-coordinates[atom_of_basisfunction[j],2]\n",
    "\n",
    "            if (np.abs(x_dist)>dist_threshold):\n",
    "                continue\n",
    "            if (np.abs(y_dist)>dist_threshold):\n",
    "                continue\n",
    "            if (np.abs(z_dist)>dist_threshold):\n",
    "                continue\n",
    "            if (x_dist*x_dist+y_dist*y_dist+z_dist*z_dist>dist_threshold_squared):\n",
    "                continue\n",
    "\n",
    "\n",
    "            for pi in range(orbital_parts_index_list[i],orbital_parts_index_list[i+1]):\n",
    "                for pj in range(orbital_parts_index_list[j],orbital_parts_index_list[j+1]):\n",
    "                    orbital_parts_overlap=0\n",
    "\n",
    "                    for gi in prange(orbital_parts_gaussian_index_list[pi],orbital_parts_gaussian_index_list[pi+1]):\n",
    "                        for gj in prange(orbital_parts_gaussian_index_list[pj],orbital_parts_gaussian_index_list[pj+1]):\n",
    "                            integral_value=asymmetric_overlap_integral_reduced(gaussian_functions_coefficients[gi],gaussian_functions_coefficients[gj],\n",
    "                                                                               gaussian_functions_exponents[gi,0],gaussian_functions_exponents[gj,0],\n",
    "                                                                               gaussian_functions_exponents[gi,1],gaussian_functions_exponents[gj,1],\n",
    "                                                                               gaussian_functions_exponents[gi,2],gaussian_functions_exponents[gj,2],\n",
    "                                                                               gaussian_functions_coordinates[gi,0],gaussian_functions_coordinates[gi,1],gaussian_functions_coordinates[gi,2],\n",
    "                                                                               gaussian_functions_coordinates[gj,0],gaussian_functions_coordinates[gj,1],gaussian_functions_coordinates[gj,2])\n",
    "                            orbital_parts_overlap+=integral_value\n",
    "\n",
    "                    if (orbital_parts_overlap>=0):\n",
    "                        relevance_matrix[count_no_duplicates]+=orbital_parts_overlap\n",
    "                    else:\n",
    "                        relevance_matrix[count_no_duplicates]-=orbital_parts_overlap\n",
    "\n",
    "            if (relevance_matrix[count_no_duplicates]>density_threshold):\n",
    "                ij_list_no_duplicates[count_no_duplicates,0],ij_list_no_duplicates[count_no_duplicates,1]=i,j\n",
    "                count_no_duplicates+=1\n",
    "\n",
    "                for gi in range(gaussian_functions_index_list[i],gaussian_functions_index_list[i+1]):\n",
    "                    for gj in range(gaussian_functions_index_list[j],gaussian_functions_index_list[j+1]):\n",
    "                        gaussians_for_densities[gaussians_count_no_duplicates,0],gaussians_for_densities[gaussians_count_no_duplicates,1]=gi,gj\n",
    "                        gaussians_count_no_duplicates+=1\n",
    "                \n",
    "                gaussians_for_densities_index_list[count_no_duplicates]=gaussians_count_no_duplicates\n",
    "\n",
    "    relevance_matrix=relevance_matrix[:count_no_duplicates]\n",
    "    \n",
    "    ij_list_no_duplicates=ij_list_no_duplicates[:count_no_duplicates]\n",
    "    gaussians_for_densities=gaussians_for_densities[:gaussians_count_no_duplicates]\n",
    "\n",
    "    gaussians_for_densities_index_list=gaussians_for_densities_index_list[:count_no_duplicates+1]\n",
    "\n",
    "    return ij_list_no_duplicates,gaussians_for_densities,gaussians_for_densities_index_list,gaussians_count_no_duplicates,count_no_duplicates,relevance_matrix,relevance_matrix_derivative\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_center_of_mass(coordinates,elements,num_atoms):\n",
    "    \"\"\"\n",
    "    Calculates the center of mass for a set of nuclei positions. The nuclei are weighted with the atomic mass values stored in the array atomic_masses (see variables section).\n",
    "    Inputs are nuclei coordinates, elements (for weights) and number of atoms and it returns the center of mass\n",
    "    \"\"\"\n",
    "\n",
    "    center_of_mass=np.zeros(3,dtype=datatype)\n",
    "    total_mass=0.0\n",
    "    for i in range(num_atoms):\n",
    "        current_atomic_mass=atomic_masses[elements[i]]\n",
    "        center_of_mass+=current_atomic_mass*coordinates[i]\n",
    "        total_mass+=current_atomic_mass\n",
    "    center_of_mass/=total_mass\n",
    "    return center_of_mass\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------------------------\n",
    "hyp1f1 function evaluation\n",
    "--------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def erf(x):\n",
    "    \"\"\"\n",
    "    Calculates an approximation for the error function erf(x) for x>=0 and also returns exp(-x^2).\n",
    "    Has a maximum error aroung 1.0e-14. Only use for calculations which have to be very accurate\n",
    "    Input: x (x>=0)\n",
    "    Returns: erf(x), exp(-x^2)\n",
    "    \"\"\"\n",
    "\n",
    "    x2=x*x\n",
    "    exp_val=np.exp(-x2)\n",
    "    erf_val=1-exp_val*(0.56418958354775629/(x+2.06955023132914151)*\\\n",
    "            (x2+2.71078540045147805*x+ 5.80755613130301624)/(x2+3.47954057099518960*x+12.06166887286239555)*\\\n",
    "            (x2+3.47469513777439592*x+12.07402036406381411)/(x2+3.72068443960225092*x+ 8.44319781003968454)*\\\n",
    "            (x2+4.00561509202259545*x+ 9.30596659485887898)/(x2+3.90225704029924078*x+ 6.36161630953880464)*\\\n",
    "            (x2+5.16722705817812584*x+ 9.12661617673673262)/(x2+4.03296893109262491*x+ 5.13578530585681539)*\\\n",
    "            (x2+5.95908795446633271*x+ 9.19435612886969243)/(x2+4.11240942957450885*x+ 4.48640329523408675))\n",
    "\n",
    "    return erf_val\n",
    "\n",
    "@njit\n",
    "def erf2(x):\n",
    "    \"\"\"\n",
    "    Calculates an approximation for the error function erf(x) for x>=0 and also returns exp(-x^2).\n",
    "    Has a lower accuracy than erf() - the maximum error is around 1.0e-7 which is still good enough for most calculations.\n",
    "    Input: x (x>=0)\n",
    "    Returns: erf(x), exp(-x^2)\n",
    "    \"\"\"\n",
    "\n",
    "    t=1.0/(1.0+0.3275911*x)\n",
    "    x2,t2=x*x,t*t\n",
    "    exp_val=np.exp(-x2)\n",
    "    erf_val=1-(0.254829592*t-0.284496736*t2+1.421413741*t*t2-1.453152027*t2*t2+1.061405429*t2*t2*t)*exp_val\n",
    "    \n",
    "    return erf_val,exp_val\n",
    "\n",
    "@njit\n",
    "def erf2_val(x):\n",
    "    \"\"\"\n",
    "    Calculates an approximation for the error function erf(x) for x>=0 and also returns exp(-x^2).\n",
    "    Input: x (x>=0)\n",
    "    Returns: erf(x), exp(-x^2)\n",
    "    \"\"\"\n",
    "\n",
    "    t=1.0/(1.0+0.3275911*x)\n",
    "    x2,t2=x*x,t*t\n",
    "    erf_val=1-(0.254829592*t-0.284496736*t2+1.421413741*t*t2-1.453152027*t2*t2+1.061405429*t2*t2*t)*np.exp(-x2)\n",
    "    \n",
    "    return erf_val\n",
    "\n",
    "prefactor_hyp1f1=np.sqrt(np.pi) \n",
    "@njit\n",
    "def erf_hyp1f1_with_arrays(x):\n",
    "    \"\"\"\n",
    "    Calculates an approximation for the hyp1f1 function which uses the error function.\n",
    "    hyp1f1(x)=sqrt(pi)*erf(sqrt(x))/(2*sqrt(x))\n",
    "    hyp1f1'(x)=d/dx((sqrt(pi)*erf(sqrt(x)))/(2*sqrt(x)))=exp(-x)/(2*x)-(sqrt(pi)*erf(sqrt(x)))/(4*x^(3/2))\n",
    "    Input is x, x>=0 and\n",
    "    returned is erf(x)\n",
    "    \"\"\"\n",
    "    \n",
    "    if (x>16):\n",
    "        return 1.0/np.sqrt(x)\n",
    "    else:\n",
    "        sx=np.sqrt(x+1.0e-7)\n",
    "        erf_val=erf2_val(sx)\n",
    "        return erf_val/sx\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-----------------------------------------------------\n",
    "Overlap, kinetic and nuclei matrices and ionic energy\n",
    "-----------------------------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "@njit(fastmath=True)\n",
    "def overlap_and_kinetic_integral(gaussian_functions_coefficients_i,gaussian_functions_coefficients_j,gaussian_functions_exponents_i,gaussian_functions_exponents_j,\n",
    "                                 gaussian_functions_coordinates_i_x,gaussian_functions_coordinates_i_y,gaussian_functions_coordinates_i_z,\n",
    "                                 gaussian_functions_coordinates_j_x,gaussian_functions_coordinates_j_y,gaussian_functions_coordinates_j_z):\n",
    "    \"\"\"\n",
    "    Calculates the overlap intgeral and the kinetic integral of two Gaussian functions i and j as well as its derivatives with respect to a change in the coordinates of the Gaussian function i.\n",
    "    In addition to this, prefactors for the nuclei integrals are calculated.\n",
    "    Additionally, the center coordinates/weighted coordinates of the new Gaussian functions, which are created by the combination of the Gaussian functions i and j, are calculated.\n",
    "    Inputs are coefficients (i/j), exponents (i/j), coordinates (i/j) and \n",
    "    retured will be the overlap intergral <i|j>, its derivatives in x/y/z-direction,\n",
    "    the kinetic integral <i|T^|j>, its derivatives in x/y/z-direction,\n",
    "    the nuclei prefactor ~<i|V_en_atom|j>, its derivatives in x/y/z-direction and\n",
    "    the x/y/z-coordinate of the center coordinates (Gaussian center).\n",
    "    \"\"\"\n",
    "    \n",
    "    prefactor=gaussian_functions_coefficients_i*gaussian_functions_coefficients_j\n",
    "    exp_sum=gaussian_functions_exponents_i+gaussian_functions_exponents_j\n",
    "    exp_product=gaussian_functions_exponents_i*gaussian_functions_exponents_j\n",
    "    product_sum_quotient=exp_product/exp_sum\n",
    "\n",
    "    distance_x=gaussian_functions_coordinates_i_x-gaussian_functions_coordinates_j_x\n",
    "    distance_y=gaussian_functions_coordinates_i_y-gaussian_functions_coordinates_j_y\n",
    "    distance_z=gaussian_functions_coordinates_i_z-gaussian_functions_coordinates_j_z\n",
    "    coordinate_distance=distance_x*distance_x+distance_y*distance_y+distance_z*distance_z\n",
    "\n",
    "    pi_divided_by_sum=np.pi/exp_sum\n",
    "    result_s=prefactor*pi_divided_by_sum*np.sqrt(pi_divided_by_sum)*np.exp(-product_sum_quotient*coordinate_distance)\n",
    "\n",
    "    result_t=3.0*result_s*gaussian_functions_exponents_j*(1.0-gaussian_functions_exponents_j/exp_sum)-\\\n",
    "             2.0*result_s*product_sum_quotient*product_sum_quotient*coordinate_distance\n",
    "\n",
    "    derivative_const_part=-2.0*product_sum_quotient*result_s\n",
    "    derivative_s_i_x=derivative_const_part*distance_x\n",
    "    derivative_s_i_y=derivative_const_part*distance_y\n",
    "    derivative_s_i_z=derivative_const_part*distance_z\n",
    "    derivative_s_j_x,derivative_s_j_y,derivative_s_j_z=-derivative_s_i_x,-derivative_s_i_y,-derivative_s_i_z\n",
    "\n",
    "\n",
    "    derivative_const_part_t_1=3.0*gaussian_functions_exponents_j*(1.0-gaussian_functions_exponents_j/exp_sum)\n",
    "    derivative_const_part_t_2=-2.0*product_sum_quotient*product_sum_quotient*coordinate_distance\n",
    "    derivative_const_part_t_3=-2.0*result_s*product_sum_quotient*product_sum_quotient\n",
    "    derivative_t_i_x=(derivative_const_part_t_1+derivative_const_part_t_2)*derivative_s_i_x+derivative_const_part_t_3*2.0*distance_x\n",
    "    derivative_t_i_y=(derivative_const_part_t_1+derivative_const_part_t_2)*derivative_s_i_y+derivative_const_part_t_3*2.0*distance_y\n",
    "    derivative_t_i_z=(derivative_const_part_t_1+derivative_const_part_t_2)*derivative_s_i_z+derivative_const_part_t_3*2.0*distance_z\n",
    "    derivative_t_j_x,derivative_t_j_y,derivative_t_j_z=-derivative_t_i_x,-derivative_t_i_y,-derivative_t_i_z\n",
    "\n",
    "    result_n=-prefactor*(2.0*np.pi/exp_sum)*np.exp(-product_sum_quotient*coordinate_distance)\n",
    "\n",
    "    weighted_coords_ij_x=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_x+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_x)/exp_sum\n",
    "    weighted_coords_ij_y=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_y+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_y)/exp_sum\n",
    "    weighted_coords_ij_z=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_z+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_z)/exp_sum\n",
    "\n",
    "    return result_s,derivative_s_i_x,derivative_s_i_y,derivative_s_i_z,derivative_s_j_x,derivative_s_j_y,derivative_s_j_z,\\\n",
    "           result_t,derivative_t_i_x,derivative_t_i_y,derivative_t_i_z,derivative_t_j_x,derivative_t_j_y,derivative_t_j_z,\\\n",
    "           result_n,weighted_coords_ij_x,weighted_coords_ij_y,weighted_coords_ij_z,np.abs(prefactor)\n",
    "\n",
    "\n",
    "\n",
    "@njit(fastmath=True,parallel=True)\n",
    "def calculate_overlap_and_kinetic_matrix(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                         gaussian_functions_index_list,gaussians_for_densities,gaussians_for_densities_index_list,ij_list_no_duplicates,\n",
    "                                         num_gaussians_for_densities,relevant_densities_no_duplicates,num_basis_functions,num_gaussian_functions):\n",
    "    \"\"\"\n",
    "    Calculates the overlap matrix with all basis functions i and j which can be decomposed into Gaussian functions gi and gj:\n",
    "    Inputs are coordinates, coefficients, exponents of the Gaussian functions,\n",
    "    basis function <-> Gaussian function list, basis function <-> atom list, number of atoms/basis functions and Gaussian functions\n",
    "    This function will return the overlap matrix S with S_ij = <phi_i|phi_j> and its derivative w.r.t. phi_i,\n",
    "    the kinetic matrix T with T_ij = <phi_i|-1/2*d^2/dr^2|phi_j> and its derivative w.r.t. phi_i,\n",
    "    a nuclei prefactor matrix which is used for the computation of the nuclei potential matrix V_en,\n",
    "    the center coordinates of all combinations of two Gaussian functions (also used later).\n",
    "    \"\"\"\n",
    "\n",
    "    overlap_matrix=np.zeros((relevant_densities_no_duplicates),dtype=datatype)\n",
    "    kinetic_matrix=np.zeros((relevant_densities_no_duplicates),dtype=datatype)\n",
    "    overlap_matrix_derivative=np.zeros((relevant_densities_no_duplicates,6),dtype=datatype)\n",
    "    kinetic_matrix_derivative=np.zeros((relevant_densities_no_duplicates,6),dtype=datatype)\n",
    "    nuclei_prefactor=np.zeros((num_gaussians_for_densities),dtype=datatype)\n",
    "    weighted_coords=np.zeros((num_gaussians_for_densities,3),dtype=datatype)\n",
    "    orbitals_center_coords=np.zeros((relevant_densities_no_duplicates,3),dtype=datatype)\n",
    "\n",
    "    for d in range(relevant_densities_no_duplicates):\n",
    "        i,j=ij_list_no_duplicates[d]\n",
    "        prefactor_sum=0\n",
    "        for gd in range(gaussians_for_densities_index_list[d],gaussians_for_densities_index_list[d+1]):\n",
    "            gi,gj=gaussians_for_densities[gd,0],gaussians_for_densities[gd,1]\n",
    "\n",
    "            integral_value_s,derivative_s_i_x,derivative_s_i_y,derivative_s_i_z,\\\n",
    "                            derivative_s_j_x,derivative_s_j_y,derivative_s_j_z,\\\n",
    "            integral_value_t,derivative_t_i_x,derivative_t_i_y,derivative_t_i_z,\\\n",
    "                            derivative_t_j_x,derivative_t_j_y,derivative_t_j_z,\\\n",
    "            integral_value_n,weighted_coords_ij_x,weighted_coords_ij_y,weighted_coords_ij_z,prefactor \\\n",
    "                            =overlap_and_kinetic_integral(gaussian_functions_coefficients[gi],gaussian_functions_coefficients[gj],gaussian_functions_exponents[gi],gaussian_functions_exponents[gj],\n",
    "                                                          gaussian_functions_coordinates[gi,0],gaussian_functions_coordinates[gi,1],gaussian_functions_coordinates[gi,2],\n",
    "                                                          gaussian_functions_coordinates[gj,0],gaussian_functions_coordinates[gj,1],gaussian_functions_coordinates[gj,2])\n",
    "            \n",
    "            overlap_matrix[d]+=integral_value_s\n",
    "            kinetic_matrix[d]+=integral_value_t\n",
    "            nuclei_prefactor[gd]=integral_value_n\n",
    "            prefactor_sum+=prefactor\n",
    "\n",
    "            overlap_matrix_derivative[d,0]+=derivative_s_i_x\n",
    "            overlap_matrix_derivative[d,1]+=derivative_s_i_y\n",
    "            overlap_matrix_derivative[d,2]+=derivative_s_i_z\n",
    "            overlap_matrix_derivative[d,3]+=derivative_s_j_x\n",
    "            overlap_matrix_derivative[d,4]+=derivative_s_j_y\n",
    "            overlap_matrix_derivative[d,5]+=derivative_s_j_z\n",
    "\n",
    "            kinetic_matrix_derivative[d,0]+=derivative_t_i_x\n",
    "            kinetic_matrix_derivative[d,1]+=derivative_t_i_y\n",
    "            kinetic_matrix_derivative[d,2]+=derivative_t_i_z\n",
    "            kinetic_matrix_derivative[d,3]+=derivative_t_j_x\n",
    "            kinetic_matrix_derivative[d,4]+=derivative_t_j_y\n",
    "            kinetic_matrix_derivative[d,5]+=derivative_t_j_z\n",
    "\n",
    "            orbitals_center_coords[d,0]+=prefactor*weighted_coords_ij_x\n",
    "            orbitals_center_coords[d,1]+=prefactor*weighted_coords_ij_y\n",
    "            orbitals_center_coords[d,2]+=prefactor*weighted_coords_ij_z\n",
    "\n",
    "            weighted_coords[gd,0]=weighted_coords_ij_x\n",
    "            weighted_coords[gd,1]=weighted_coords_ij_y\n",
    "            weighted_coords[gd,2]=weighted_coords_ij_z\n",
    "        \n",
    "        orbitals_center_coords[d,0]/=prefactor_sum\n",
    "        orbitals_center_coords[d,1]/=prefactor_sum\n",
    "        orbitals_center_coords[d,2]/=prefactor_sum\n",
    "\n",
    "    return overlap_matrix,overlap_matrix_derivative,kinetic_matrix,kinetic_matrix_derivative,nuclei_prefactor,weighted_coords,orbitals_center_coords\n",
    "\n",
    "\n",
    "\n",
    "@njit(fastmath=True)\n",
    "def nuclei_integral(prefactor_n,exp_sum,weighted_coords_x,weighted_coords_y,weighted_coords_z,atom_coords_x,atom_coords_y,atom_coords_z,element):\n",
    "    \"\"\"\n",
    "    Calculates a nuclei integral n_ija of the form <phi_i|Z_a/(r-r_a)|phi_j>.\n",
    "    Additionally the derivatives w.r.t. r_a, phi_i and phi_j are calculated.\n",
    "    Inputs are a prefactor, Gaussian functions exponents sums (precomputed), derivative prefactors, orbital distance (distance between the two Gaussian functions),\n",
    "    center coordinates/weighted coordinates of the combination of the two Gaussians, coordinates of the atom a, element of the atom a and returned will be\n",
    "    the nuclei integral n_ija.\n",
    "    \"\"\"\n",
    "\n",
    "    distance_x=weighted_coords_x-atom_coords_x\n",
    "    distance_y=weighted_coords_y-atom_coords_y\n",
    "    distance_z=weighted_coords_z-atom_coords_z\n",
    "    atom_distance=distance_x*distance_x+distance_y*distance_y+distance_z*distance_z\n",
    "    hyp1f1_value=nuclei_hyp1f1_prefactor*erf_hyp1f1_with_arrays(exp_sum*atom_distance)\n",
    "\n",
    "    result_n=prefactor_n*element*hyp1f1_value\n",
    "\n",
    "    return result_n \n",
    "\n",
    "\n",
    "@njit(fastmath=True,parallel=True)\n",
    "def calculate_nuclei_matrix_and_derivatives(gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                            ij_list_no_duplicates,gaussians_for_densities,gaussians_for_densities_index_list,num_gaussians_for_densities,relevant_densities_no_duplicates,\n",
    "                                            gaussian_functions_index_list,atom_of_basisfunction,coordinates,elements,\n",
    "                                            relevance_matrix,relevance_matrix_derivative,\n",
    "                                            nuclei_prefactor,weighted_coords,orbitals_center_coords,num_atoms,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Calculates the nuclei potential matix V_en or N with V_en_ij=N_ij=Sum_a[n_ija] where n_ija are calculated in the function nuclei_integral().\n",
    "    Instead of the double loop formalism over all combinations of two basis functions:\n",
    "        for i in range(num_basis_functions):\n",
    "            for j in range(i,num_basis_functions):\n",
    "    we only loop over the relevant densities.\n",
    "    Inputs are Exponents and coordinates of Gaussian functions, basis function <-> Gaussian function list, basis function <-> atom list,\n",
    "    coordinates of the atoms, elements of the atoms, precomputed nuclei prefactor matrix, precomputed center/weighted coordinates, number of atoms/basis functions and returned\n",
    "    will be the nuclei potential matrix V_en.\n",
    "    \"\"\"\n",
    "\n",
    "    nuclei_matrix=np.zeros((relevant_densities_no_duplicates),dtype=datatype)\n",
    "    nuclei_matrix_nuclei_derivative=np.zeros((1,1,6),dtype=datatype) \n",
    "    nuclei_matrix_wavefunction_derivative=np.zeros((1,6),dtype=datatype)\n",
    "\n",
    "    for d in prange(relevant_densities_no_duplicates):\n",
    "        i,j=ij_list_no_duplicates[d,0],ij_list_no_duplicates[d,1]\n",
    "        for gd in range(gaussians_for_densities_index_list[d],gaussians_for_densities_index_list[d+1]):\n",
    "            gi,gj=gaussians_for_densities[gd,0],gaussians_for_densities[gd,1]\n",
    "\n",
    "            exp_sum=gaussian_functions_exponents[gi]+gaussian_functions_exponents[gj]\n",
    "            atom_1,atom_2=atom_of_basisfunction[i],atom_of_basisfunction[j]\n",
    "            coordinates_a1,coordinates_a2=coordinates[atom_1],coordinates[atom_2]\n",
    "            distance_12_x=0.5*(coordinates_a1[0]+coordinates_a2[0])\n",
    "            distance_12_y=0.5*(coordinates_a1[1]+coordinates_a2[1])\n",
    "            distance_12_z=0.5*(coordinates_a1[2]+coordinates_a2[2])\n",
    "            \n",
    "            for atom in range(num_atoms):\n",
    "                \n",
    "                distance_12_n_x=distance_12_x-coordinates[atom,0]\n",
    "                distance_12_n_y=distance_12_y-coordinates[atom,1]\n",
    "                distance_12_n_z=distance_12_z-coordinates[atom,2]\n",
    "                distance_12_n_2=distance_12_n_x*distance_12_n_x+distance_12_n_y*distance_12_n_y+distance_12_n_z*distance_12_n_z\n",
    "                distance_12_n=np.sqrt(distance_12_n_2)\n",
    "                if (distance_12_n>coulomb_threshold):\n",
    "                    continue\n",
    "                elif (distance_12_n<coulomb_threshold_low):\n",
    "                    relevance_12_n=1.0\n",
    "                else:\n",
    "                    fraction_12_n=1.0-(distance_12_n-coulomb_threshold_low)/coulomb_threshold_difference\n",
    "                    fraction_square_12_n=fraction_12_n*fraction_12_n\n",
    "                    relevance_12_n=-2.0*fraction_square_12_n*fraction_12_n+3.0*fraction_square_12_n\n",
    "                \n",
    "                relevance_12_value=relevance_matrix[d]\n",
    "                if (relevance_12_value>density_threshold_2):\n",
    "                    relevance_12=1.0\n",
    "                else:\n",
    "                    fraction_12=(relevance_12_value-density_threshold)/density_threshold_difference\n",
    "                    fraction_square_12=fraction_12*fraction_12\n",
    "                    relevance_12=-2.0*fraction_square_12*fraction_12+3.0*fraction_square_12\n",
    "\n",
    "                relevance_prefactor=relevance_12*relevance_12_n\n",
    "\n",
    "                integral_value_n=nuclei_integral(nuclei_prefactor[gd],exp_sum,\n",
    "                                                 weighted_coords[gd,0],weighted_coords[gd,1],weighted_coords[gd,2],\n",
    "                                                 coordinates[atom,0],coordinates[atom,1],coordinates[atom,2],elements[atom])\n",
    "                \n",
    "                nuclei_matrix[d]+=integral_value_n*relevance_prefactor\n",
    "\n",
    "    return nuclei_matrix,nuclei_matrix_nuclei_derivative,nuclei_matrix_wavefunction_derivative\n",
    "\n",
    "\n",
    "@njit(fastmath=True,parallel=True)\n",
    "def calculate_ionic_energy(coordinates,elements,num_atoms):\n",
    "    \"\"\"\n",
    "    Calculates the ionic energy i.e. the repulsion of the atom cores.\n",
    "    \"\"\"\n",
    "    ionic_energy=0\n",
    "    for i in prange(num_atoms):\n",
    "        for j in range(i+1,num_atoms):\n",
    "\n",
    "            distance_x=coordinates[i,0]-coordinates[j,0]\n",
    "            distance_y=coordinates[i,1]-coordinates[j,1]\n",
    "            distance_z=coordinates[i,2]-coordinates[j,2]\n",
    "            r=np.sqrt(distance_x*distance_x+distance_y*distance_y+distance_z*distance_z)\n",
    "            \n",
    "            if (r>coulomb_threshold):\n",
    "                relevance_n_n=0.0\n",
    "            elif (r<coulomb_threshold_low):\n",
    "                relevance_n_n=1.0\n",
    "            else:\n",
    "                fraction=1.0-(r-coulomb_threshold_low)/coulomb_threshold_difference\n",
    "                fraction_square=fraction*fraction\n",
    "                relevance_n_n=-2.0*fraction_square*fraction+3.0*fraction_square\n",
    "            \n",
    "            ionic_energy+=relevance_n_n*elements[i]*elements[j]*1.0/r\n",
    "    return ionic_energy\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-----------------------------------\n",
    "Electron repulsion tensor functions\n",
    "-----------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "Additional information for thies section:\n",
    "\n",
    "phi_1-phi_2 interaction -> cut-off via density threshold\n",
    "phi_3-phi_4 interaction -> cut-off via density threshold\n",
    "(phi_1|phi_2)-(phi_3|phi_4) interaction -> cut-off via coulomb threshold\n",
    "\n",
    "The naive implementation for the electron tensor (V_ee) and its derivative (d_V_ee) as a 4-dimensional object is shown below.\n",
    "We see the 8-fold symmetry of the electron repulsion integrals.\n",
    "V_ee[i,j,k,l]=v_ijkl\n",
    "V_ee[i,j,l,k]=v_ijkl\n",
    "V_ee[j,i,k,l]=v_ijkl\n",
    "V_ee[j,i,l,k]=v_ijkl\n",
    "V_ee[k,l,i,j]=v_ijkl\n",
    "V_ee[k,l,j,i]=v_ijkl\n",
    "V_ee[l,k,i,j]=v_ijkl\n",
    "V_ee[l,k,j,i]=v_ijkl\n",
    "d_V_ee[i,j,l,k,atom_of_basisfunction[i],0],d_V_ee[i,j,l,k,atom_of_basisfunction[i],1],d_V_ee[i,j,l,k,atom_of_basisfunction[i],2]=v_ijkl_i_dx,v_ijkl_i_dy,v_ijkl_i_dz\n",
    "d_V_ee[i,j,k,l,atom_of_basisfunction[i],0],d_V_ee[i,j,k,l,atom_of_basisfunction[i],1],d_V_ee[i,j,k,l,atom_of_basisfunction[i],2]=v_ijkl_i_dx,v_ijkl_i_dy,v_ijkl_i_dz\n",
    "d_V_ee[j,i,k,l,atom_of_basisfunction[j],0],d_V_ee[j,i,k,l,atom_of_basisfunction[j],1],d_V_ee[j,i,k,l,atom_of_basisfunction[j],2]=v_ijkl_j_dx,v_ijkl_j_dy,v_ijkl_j_dz\n",
    "d_V_ee[j,i,l,k,atom_of_basisfunction[j],0],d_V_ee[j,i,l,k,atom_of_basisfunction[j],1],d_V_ee[j,i,l,k,atom_of_basisfunction[j],2]=v_ijkl_j_dx,v_ijkl_j_dy,v_ijkl_j_dz\n",
    "d_V_ee[k,l,i,j,atom_of_basisfunction[k],0],d_V_ee[k,l,i,j,atom_of_basisfunction[k],1],d_V_ee[k,l,i,j,atom_of_basisfunction[k],2]=v_ijkl_k_dx,v_ijkl_k_dy,v_ijkl_k_dz\n",
    "d_V_ee[k,l,j,i,atom_of_basisfunction[k],0],d_V_ee[k,l,j,i,atom_of_basisfunction[k],1],d_V_ee[k,l,j,i,atom_of_basisfunction[k],2]=v_ijkl_k_dx,v_ijkl_k_dy,v_ijkl_k_dz\n",
    "d_V_ee[l,k,i,j,atom_of_basisfunction[l],0],d_V_ee[l,k,i,j,atom_of_basisfunction[l],1],d_V_ee[l,k,i,j,atom_of_basisfunction[l],2]=v_ijkl_l_dx,v_ijkl_l_dy,v_ijkl_l_dz\n",
    "d_V_ee[l,k,j,i,atom_of_basisfunction[l],0],d_V_ee[l,k,j,i,atom_of_basisfunction[l],1],d_V_ee[l,k,j,i,atom_of_basisfunction[l],2]=v_ijkl_l_dx,v_ijkl_l_dy,v_ijkl_l_dz\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit(parallel=True,fastmath=True)\n",
    "def overlap_contributions(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                          gaussians_for_densities,gaussians_for_densities_index_list,num_gaussians_for_densities,relevant_densities_no_duplicates,num_gaussian_functions):\n",
    "    \"\"\"\n",
    "    Precalculations for the electron repulsion integral evaluation.\n",
    "    \"\"\"\n",
    "\n",
    "    S_terms=np.zeros((num_gaussians_for_densities),dtype=datatype)\n",
    "    S_terms_derivatives=np.zeros((num_gaussians_for_densities,3),dtype=datatype)\n",
    "    exp_sums=np.zeros((num_gaussians_for_densities),dtype=datatype)\n",
    "    exp_products=np.zeros((num_gaussians_for_densities),dtype=datatype)\n",
    "    center_coords=np.zeros((num_gaussians_for_densities,3),dtype=datatype)\n",
    "    for d in prange(relevant_densities_no_duplicates):\n",
    "        for gd in range(gaussians_for_densities_index_list[d],gaussians_for_densities_index_list[d+1]):\n",
    "            i,j=gaussians_for_densities[gd,0],gaussians_for_densities[gd,1]\n",
    "            exp_sums[gd]=gaussian_functions_exponents[i]+gaussian_functions_exponents[j]\n",
    "            exp_products[gd]=gaussian_functions_exponents[i]*gaussian_functions_exponents[j]\n",
    "            quotient=exp_products[gd]/exp_sums[gd]\n",
    "            coords_i,coords_j=gaussian_functions_coordinates[i],gaussian_functions_coordinates[j]\n",
    "            distance_x=coords_i[0]-coords_j[0]\n",
    "            distance_y=coords_i[1]-coords_j[1]\n",
    "            distance_z=coords_i[2]-coords_j[2]\n",
    "            coordinate_difference=distance_x*distance_x+distance_y*distance_y+distance_z*distance_z\n",
    "            center_coords[gd]=(gaussian_functions_exponents[i]*gaussian_functions_coordinates[i]+\n",
    "                               gaussian_functions_exponents[j]*gaussian_functions_coordinates[j])/exp_sums[gd]\n",
    "            S_terms[gd]=np.exp(-quotient*coordinate_difference)*gaussian_functions_coefficients[i]*gaussian_functions_coefficients[j]/exp_sums[gd]*V_ee_prefactor_sqrt/np.sqrt(exp_sums[gd])\n",
    "            S_terms_derivatives[gd,0]=-2.0*distance_x*quotient*np.exp(-quotient*coordinate_difference)\\\n",
    "                                            *gaussian_functions_coefficients[i]*gaussian_functions_coefficients[j]/exp_sums[gd]*V_ee_prefactor_sqrt/np.sqrt(exp_sums[gd])\n",
    "            S_terms_derivatives[gd,1]=-2.0*distance_y*quotient*np.exp(-quotient*coordinate_difference)\\\n",
    "                                            *gaussian_functions_coefficients[i]*gaussian_functions_coefficients[j]/exp_sums[gd]*V_ee_prefactor_sqrt/np.sqrt(exp_sums[gd])\n",
    "            S_terms_derivatives[gd,2]=-2.0*distance_z*quotient*np.exp(-quotient*coordinate_difference)\\\n",
    "                                            *gaussian_functions_coefficients[i]*gaussian_functions_coefficients[j]/exp_sums[gd]*V_ee_prefactor_sqrt/np.sqrt(exp_sums[gd])\n",
    "            \n",
    "    return S_terms,S_terms_derivatives,1.0/exp_sums,center_coords\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def electron_integrals_list_generation(gaussian_functions_exponents,S_terms,S_terms_derivatives,exp_sums,center_coords,gaussians_for_densities,gaussians_for_densities_index_list,\n",
    "                                       ij_list_no_duplicates,gaussian_functions_index_list,type_of_basis_function,relevant_densities_no_duplicates):\n",
    "    \"\"\"\n",
    "    Preprocessing for the ERI evaluation. Here various values which only depend on one density are precomputed and stored in arrays.\n",
    "    These will be accessed during the actual evaluation of the ERIs which speeds up the calculation since they do not need to be calculated again.\n",
    "    \"\"\"\n",
    "\n",
    "    gaussian_functions_exponents_i_list,gaussian_functions_exponents_j_list\\\n",
    "        =np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype)\n",
    "    overlap_list=np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype)\n",
    "    overlap_derivative_i_x_list,overlap_derivative_i_y_list,overlap_derivative_i_z_list,overlap_derivative_j_x_list,overlap_derivative_j_y_list,overlap_derivative_j_z_list\\\n",
    "        =np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype),\\\n",
    "         np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype)\n",
    "    exp_sum_list=np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates),dtype=e_tensor_datatype)\n",
    "    center_coords_list=np.zeros((max_gaussian_functions_square,relevant_densities_no_duplicates,3),dtype=e_tensor_datatype)\n",
    "    \n",
    "    ij_list_no_duplicates_new=np.zeros((relevant_densities_no_duplicates,2),dtype='int32')\n",
    "    \n",
    "    combinations_limits=np.zeros(num_basis_function_types_sqaure,dtype='int32')\n",
    "    gaussians_limits=np.zeros(max_gaussian_functions_square,dtype='int32')\n",
    "    type_combination_lengths=np.zeros(num_basis_function_types_sqaure,dtype='int32')\n",
    "    type_combinations_i=np.zeros(num_basis_function_types_sqaure,dtype='int32')\n",
    "    type_combinations_j=np.zeros(num_basis_function_types_sqaure,dtype='int32')\n",
    "\n",
    "    for ti in range(0,num_basis_function_types):\n",
    "        for tj in range(0,num_basis_function_types):\n",
    "            index=ti*num_basis_function_types+tj\n",
    "            type_combinations_i[index],type_combinations_j[index]=ti+1,tj+1\n",
    "            type_combination_lengths[index]=basis_function_type_length_list[ti]*basis_function_type_length_list[tj]\n",
    "    type_combination_lengths_indexing=np.flip(type_combination_lengths.argsort())\n",
    "    type_combination_lengths=np.flip(np.sort(type_combination_lengths))\n",
    "    type_combinations_i=type_combinations_i[type_combination_lengths_indexing]\n",
    "    type_combinations_j=type_combinations_j[type_combination_lengths_indexing]\n",
    "\n",
    "    count_densities=-1\n",
    "    for t in range(len(type_combinations_i)):\n",
    "        ti,tj=type_combinations_i[t],type_combinations_j[t]\n",
    "        for density in range(relevant_densities_no_duplicates):\n",
    "            i,j=ij_list_no_duplicates[density]\n",
    "            if (type_of_basis_function[i]==ti and type_of_basis_function[j]==tj):\n",
    "                count_densities+=1\n",
    "                ij_list_no_duplicates_new[count_densities,0],ij_list_no_duplicates_new[count_densities,1]=i,j\n",
    "                count_gaussians=-1\n",
    "                for gd in range(gaussians_for_densities_index_list[density],gaussians_for_densities_index_list[density+1]):\n",
    "                    gi,gj=gaussians_for_densities[gd,0],gaussians_for_densities[gd,1]\n",
    "                    count_gaussians+=1\n",
    "                    gaussian_functions_exponents_i_list[count_gaussians,count_densities],gaussian_functions_exponents_j_list[count_gaussians,count_densities]\\\n",
    "                        =gaussian_functions_exponents[gi],gaussian_functions_exponents[gj]\n",
    "                    overlap_list[count_gaussians,count_densities]=S_terms[gd]\n",
    "                    exp_sum_list[count_gaussians,count_densities]=exp_sums[gd]\n",
    "                    center_coords_list[count_gaussians,count_densities,0],center_coords_list[count_gaussians,count_densities,1],center_coords_list[count_gaussians,count_densities,2]\\\n",
    "                        =center_coords[gd,0],center_coords[gd,1],center_coords[gd,2]\n",
    "        combinations_limits[t]=count_densities+1\n",
    "\n",
    "    for gi in range(max_gaussian_functions_square):\n",
    "        gaussians_limits[gi]=combinations_limits[-1]\n",
    "        for t in range(1,len(type_combination_lengths)):\n",
    "            if (type_combination_lengths[t]<=gi):\n",
    "                gaussians_limits[gi]=combinations_limits[t-1]\n",
    "                break\n",
    "    \n",
    "    return gaussian_functions_exponents_i_list,gaussian_functions_exponents_j_list,overlap_list,\\\n",
    "           overlap_derivative_i_x_list,overlap_derivative_i_y_list,overlap_derivative_i_z_list,overlap_derivative_j_x_list,overlap_derivative_j_y_list,overlap_derivative_j_z_list,\\\n",
    "           exp_sum_list,center_coords_list,ij_list_no_duplicates_new,gaussians_limits\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit(fastmath=True,parallel=True)\n",
    "def electron_integrals_distance_relevance(relevance_matrix,ij_list_no_duplicates,atom_of_basisfunction,coordinates,orbitals_center_coords,relevant_densities_no_duplicates):\n",
    "    \"\"\"\n",
    "    Calculates estimates for the relevance of density/density interactions. This is done by iterating over all density/density pairs.\n",
    "    Only relevant densities (tabulated in the ij_list_no_duplicates) are used for this.\n",
    "    For each density/density combination an estimate of the distance of the density pairs is calculated. We approximate that a density is located at the mean position of the atomic coordinates\n",
    "    associated with each of the two basis functions which together form an electronic density.\n",
    "    Then the difference of the two density locations is calculated and the Coulomb cut-off is used to calculate if a density combination is relevant.\n",
    "    \"\"\"\n",
    "    \n",
    "    if (verbosity==1): print('    |Relevant densities: '+str(relevant_densities_no_duplicates))\n",
    "    \n",
    "    relevance_d1d2=np.empty((num_threads_integrals,int(max_electron_integrals/num_threads_integrals),2),dtype='int32') \n",
    "    relevance_mask=np.full((1,1),False,dtype='bool')\n",
    "    \n",
    "    count=np.zeros(num_threads_integrals,dtype='int32')\n",
    "\n",
    "    for t in prange(num_threads_integrals):\n",
    "        for d1 in range(t,relevant_densities_no_duplicates,num_threads_integrals):\n",
    "            i,j=ij_list_no_duplicates[d1,0],ij_list_no_duplicates[d1,1]\n",
    "            for d2 in range(d1,relevant_densities_no_duplicates):\n",
    "                k,l=ij_list_no_duplicates[d2,0],ij_list_no_duplicates[d2,1]\n",
    "\n",
    "                relevance_1234_value_upper_bound=relevance_matrix[d1]*relevance_matrix[d2]\n",
    "                if (relevance_1234_value_upper_bound<densities_threshold):\n",
    "                    continue\n",
    "                \n",
    "                atom_1,atom_2,atom_3,atom_4=atom_of_basisfunction[i],atom_of_basisfunction[j],atom_of_basisfunction[k],atom_of_basisfunction[l]\n",
    "                coordinates_a1,coordinates_a2,coordinates_a3,coordinates_a4=coordinates[atom_1],coordinates[atom_2],coordinates[atom_3],coordinates[atom_4]\n",
    "\n",
    "                distance_12_34_x=coordinates_a1[0]+coordinates_a2[0]-coordinates_a3[0]-coordinates_a4[0]\n",
    "                if (np.abs(distance_12_34_x)>coulomb_threshold_times_2): continue\n",
    "                distance_12_34_y=coordinates_a1[1]+coordinates_a2[1]-coordinates_a3[1]-coordinates_a4[1]\n",
    "                if (np.abs(distance_12_34_y)>coulomb_threshold_times_2): continue\n",
    "                distance_12_34_z=coordinates_a1[2]+coordinates_a2[2]-coordinates_a3[2]-coordinates_a4[2]\n",
    "                if (np.abs(distance_12_34_z)>coulomb_threshold_times_2): continue\n",
    "                distance_12_34_x*=0.5\n",
    "                distance_12_34_y*=0.5\n",
    "                distance_12_34_z*=0.5      \n",
    "                distance_12_34=np.sqrt(distance_12_34_x*distance_12_34_x+distance_12_34_y*distance_12_34_y+distance_12_34_z*distance_12_34_z)\n",
    "                if (distance_12_34>coulomb_threshold):\n",
    "                    continue\n",
    "                elif (distance_12_34<coulomb_threshold_low):\n",
    "                    relevance_12_34=1.0\n",
    "                else:\n",
    "                    fraction_12_34=1.0-(distance_12_34-coulomb_threshold_low)/coulomb_threshold_difference\n",
    "                    fraction_square_12_34=fraction_12_34*fraction_12_34\n",
    "                    relevance_12_34=-2.0*fraction_square_12_34*fraction_12_34+3.0*fraction_square_12_34\n",
    "\n",
    "                relevance_1234_value=relevance_matrix[d1]*relevance_matrix[d2]/(distance_12_34+1.0)\n",
    "                if (relevance_1234_value>densities_threshold_2):\n",
    "                    relevance_1234=1.0\n",
    "                elif (relevance_1234_value<densities_threshold):\n",
    "                    continue\n",
    "                else:\n",
    "                    fraction_1234=(relevance_1234_value-densities_threshold)/densities_threshold_difference\n",
    "                    fraction_square_1234=fraction_1234*fraction_1234\n",
    "                    relevance_1234=-2.0*fraction_square_1234*fraction_1234+3.0*fraction_square_1234\n",
    "\n",
    "                relevance_12_value=relevance_matrix[d1]\n",
    "                if (relevance_12_value>density_threshold_2):\n",
    "                    relevance_12=1.0\n",
    "                else:\n",
    "                    fraction_12=(relevance_12_value-density_threshold)/density_threshold_difference\n",
    "                    fraction_square_12=fraction_12*fraction_12\n",
    "                    relevance_12=-2.0*fraction_square_12*fraction_12+3.0*fraction_square_12\n",
    "\n",
    "                relevance_34_value=relevance_matrix[d2]\n",
    "                if (relevance_34_value>density_threshold_2):\n",
    "                    relevance_34=1.0\n",
    "                else:\n",
    "                    fraction_34=(relevance_34_value-density_threshold)/density_threshold_difference\n",
    "                    fraction_square_34=fraction_34*fraction_34\n",
    "                    relevance_34=-2.0*fraction_square_34*fraction_34+3.0*fraction_square_34\n",
    "\n",
    "                relevance_prefactor=relevance_12*relevance_34*relevance_12_34*relevance_1234\n",
    "\n",
    "                if (relevance_prefactor>0):\n",
    "\n",
    "                    relevance_d1d2[t,count[t],0],relevance_d1d2[t,count[t],1]=d1,d2\n",
    "                    count[t]+=1\n",
    "    \n",
    "    relevance_d1d2_unfolded=np.empty((max_electron_integrals,2),dtype='int32')\n",
    "    current_start,current_end=0,0\n",
    "    for t in range(num_threads_integrals):\n",
    "        current_end+=count[t]\n",
    "        relevance_d1d2_unfolded[current_start:current_end]=relevance_d1d2[t,:count[t]] \n",
    "        current_start+=count[t]\n",
    "    relevance_d1d2_unfolded=relevance_d1d2_unfolded[:np.sum(count)]\n",
    "    \n",
    "    if (verbosity==1): print('    |Relevant density combinations: '+str(np.sum(count)))\n",
    "    \n",
    "    return relevance_mask,relevance_d1d2_unfolded,np.sum(count)\n",
    "\n",
    "\n",
    "\n",
    "@njit(parallel=True,fastmath=True)\n",
    "def electron_arrays_calculation_loops(gaussian_functions_exponents_i_list,gaussian_functions_exponents_j_list,\n",
    "                                      overlap_list,exp_sum_list,\n",
    "                                      overlap_derivative_i_x_list,overlap_derivative_i_y_list,overlap_derivative_i_z_list,\n",
    "                                      overlap_derivative_j_x_list,overlap_derivative_j_y_list,overlap_derivative_j_z_list,\n",
    "                                      center_coords_list,relevance_mask,relevance_d1d2,num_relevant,\n",
    "                                      ij_list_no_duplicates,gaussians_limits,atom_of_basisfunction,orbital_parts_index_list,coordinates,\n",
    "                                      relevant_densities_no_duplicates,num_orbital_parts,num_basis_functions):\n",
    "    \"\"\"\n",
    "    The main routine for electron repulsion integral (ERI) evaluation.\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "    result=np.zeros((num_relevant),dtype=e_tensor_datatype)\n",
    "    derivative=np.zeros((1,12),dtype=e_tensor_datatype) \n",
    "\n",
    "    \n",
    "    for d1 in range(max_gaussian_functions_square):\n",
    "        for d2 in range(max_gaussian_functions_square):\n",
    "            \n",
    "            matrix_cut_d1=gaussians_limits[d1]\n",
    "            matrix_cut_d2=gaussians_limits[d2]\n",
    "            overlap_list_d1,overlap_list_d2=overlap_list[d1,:matrix_cut_d1],overlap_list[d2,:matrix_cut_d2]\n",
    "            exp_sum_list_d1,exp_sum_list_d2=exp_sum_list[d1,:matrix_cut_d1],exp_sum_list[d2,:matrix_cut_d2]\n",
    "            center_coords_list_d1,center_coords_list_d2=center_coords_list[d1,:matrix_cut_d1],center_coords_list[d2,:matrix_cut_d2]\n",
    "            \n",
    "\n",
    "            for d1d2 in prange(num_relevant):\n",
    "                i,j=relevance_d1d2[d1d2,0],relevance_d1d2[d1d2,1]\n",
    "                if (i<matrix_cut_d1):\n",
    "                    if(j<matrix_cut_d2):  \n",
    "        \n",
    "                        exp_sum=1.0/(exp_sum_list_d1[i]+exp_sum_list_d2[j])\n",
    "                        \n",
    "                        center_coords_d1,center_coords_d2=center_coords_list_d1[i],center_coords_list_d2[j]\n",
    "                        coulomb_distance_x=center_coords_d1[0]-center_coords_d2[0]\n",
    "                        coulomb_distance_y=center_coords_d1[1]-center_coords_d2[1]\n",
    "                        coulomb_distance_z=center_coords_d1[2]-center_coords_d2[2]\n",
    "                        coulomb_distance=(coulomb_distance_x*coulomb_distance_x+coulomb_distance_y*coulomb_distance_y+coulomb_distance_z*coulomb_distance_z)*exp_sum\n",
    "                    \n",
    "                        result[d1d2]+=np.sqrt(exp_sum)*overlap_list_d1[i]*overlap_list_d2[j]*erf_hyp1f1_with_arrays(coulomb_distance)\n",
    "                        \n",
    "    \n",
    "    return result,derivative\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit(fastmath=True,parallel=True)\n",
    "def electron_integrals_with_arrays(result,derivative,relevance_d1d2,\n",
    "                                   relevance_matrix,relevance_matrix_derivative,relevance_mask,num_relevant,\n",
    "                                   ij_list_no_duplicates,gaussians_limits,atom_of_basisfunction,orbital_parts_index_list,coordinates,orbitals_center_coords,\n",
    "                                   relevant_densities_no_duplicates,num_orbital_parts,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Screens electron repulsion intgrals (ERIs) for relevance based on a Coulomb cut-off and the product of the relevance values of their densities.\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "    electron_tensor=np.zeros(num_relevant,dtype='float64') \n",
    "    electron_tensor_derivative=np.zeros((1,3),dtype='float64') \n",
    "    ijkl_list=np.zeros((num_relevant,4),dtype='int16') \n",
    "\n",
    "\n",
    "    for d1d2 in prange(num_relevant):\n",
    "        d1,d2=relevance_d1d2[d1d2]\n",
    "        i,j=ij_list_no_duplicates[d1,0],ij_list_no_duplicates[d1,1]\n",
    "        k,l=ij_list_no_duplicates[d2,0],ij_list_no_duplicates[d2,1]\n",
    "    \n",
    "        \n",
    "        relevance_1234_value_upper_bound=relevance_matrix[d1]*relevance_matrix[d2]\n",
    "\n",
    "        if (relevance_1234_value_upper_bound<densities_threshold):\n",
    "            continue\n",
    "        \n",
    "        atom_1,atom_2,atom_3,atom_4=atom_of_basisfunction[i],atom_of_basisfunction[j],atom_of_basisfunction[k],atom_of_basisfunction[l]\n",
    "        coordinates_a1,coordinates_a2,coordinates_a3,coordinates_a4=coordinates[atom_1],coordinates[atom_2],coordinates[atom_3],coordinates[atom_4]\n",
    "\n",
    "        distance_12_34_x=0.5*(coordinates_a1[0]+coordinates_a2[0]-coordinates_a3[0]-coordinates_a4[0])\n",
    "        if (np.abs(distance_12_34_x)>coulomb_threshold): continue\n",
    "        distance_12_34_y=0.5*(coordinates_a1[1]+coordinates_a2[1]-coordinates_a3[1]-coordinates_a4[1])\n",
    "        if (np.abs(distance_12_34_y)>coulomb_threshold): continue\n",
    "        distance_12_34_z=0.5*(coordinates_a1[2]+coordinates_a2[2]-coordinates_a3[2]-coordinates_a4[2])\n",
    "        if (np.abs(distance_12_34_z)>coulomb_threshold): continue\n",
    "        distance_12_34=np.sqrt(distance_12_34_x*distance_12_34_x+distance_12_34_y*distance_12_34_y+distance_12_34_z*distance_12_34_z)\n",
    "        if (distance_12_34>coulomb_threshold):\n",
    "            continue\n",
    "        elif (distance_12_34<coulomb_threshold_low):\n",
    "            relevance_12_34=1.0\n",
    "        else:\n",
    "            fraction_12_34=1.0-(distance_12_34-coulomb_threshold_low)/coulomb_threshold_difference\n",
    "            fraction_square_12_34=fraction_12_34*fraction_12_34\n",
    "            relevance_12_34=-2.0*fraction_square_12_34*fraction_12_34+3.0*fraction_square_12_34\n",
    "\n",
    "        relevance_1234_value=relevance_matrix[d1]*relevance_matrix[d2]/(distance_12_34+1.0)\n",
    "        if (relevance_1234_value>densities_threshold_2):\n",
    "            relevance_1234=1.0\n",
    "        elif (relevance_1234_value<densities_threshold):\n",
    "            continue\n",
    "        else:\n",
    "            fraction_1234=(relevance_1234_value-densities_threshold)/densities_threshold_difference\n",
    "            fraction_square_1234=fraction_1234*fraction_1234\n",
    "            relevance_1234=-2.0*fraction_square_1234*fraction_1234+3.0*fraction_square_1234\n",
    "\n",
    "        relevance_12_value=relevance_matrix[d1]\n",
    "        if (relevance_12_value>density_threshold_2):\n",
    "            relevance_12=1.0\n",
    "        else:\n",
    "            fraction_12=(relevance_12_value-density_threshold)/density_threshold_difference\n",
    "            fraction_square_12=fraction_12*fraction_12\n",
    "            relevance_12=-2.0*fraction_square_12*fraction_12+3.0*fraction_square_12\n",
    "\n",
    "        relevance_34_value=relevance_matrix[d2]\n",
    "        if (relevance_34_value>density_threshold_2):\n",
    "            relevance_34=1.0\n",
    "        else:\n",
    "            fraction_34=(relevance_34_value-density_threshold)/density_threshold_difference\n",
    "            fraction_square_34=fraction_34*fraction_34\n",
    "            relevance_34=-2.0*fraction_square_34*fraction_34+3.0*fraction_square_34\n",
    "\n",
    "        relevance_prefactor=relevance_12*relevance_34*relevance_12_34*relevance_1234\n",
    "\n",
    "        v_ijkl=result[d1d2]\n",
    "        \n",
    "        v_ijkl=v_ijkl*relevance_prefactor\n",
    "\n",
    "        i,j=ij_list_no_duplicates[d1,0],ij_list_no_duplicates[d1,1]\n",
    "        k,l=ij_list_no_duplicates[d2,0],ij_list_no_duplicates[d2,1]\n",
    "\n",
    "        electron_tensor[d1d2]=v_ijkl\n",
    "        ijkl_list[d1d2,0],ijkl_list[d1d2,1],ijkl_list[d1d2,2],ijkl_list[d1d2,3]=i,j,k,l\n",
    "\n",
    "\n",
    "    unique_integrals=int((num_basis_functions*(num_basis_functions+1.0)/2.0)*(num_basis_functions*(num_basis_functions+1.0)/2.0+1.0)/2.0)\n",
    "    if (verbosity==1):\n",
    "        print('    |Total integrals:         '+str(num_basis_functions**4))\n",
    "        print('    |Unique integrals:        '+str(unique_integrals))\n",
    "        print('    |With relevant densities: '+str(int((relevant_densities_no_duplicates+1)*relevant_densities_no_duplicates/2)))\n",
    "        print('    |With relevant values:    '+str(num_relevant))\n",
    "\n",
    "    return electron_tensor,electron_tensor_derivative,ijkl_list,num_relevant\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "----------------------------------------------------------------\n",
    "Functions of the scf-procedure (energy and density calculations)\n",
    "----------------------------------------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "@njit(parallel=True,fastmath=True)\n",
    "def calculate_G(P,V_ee,ijkl_list,num_basis_functions,relevant_V_ee_elements):\n",
    "    \"\"\"\n",
    "    Calculates the two-center part of the Fock matrix.\n",
    "    The naive implementation is shown below for a better understanding.\n",
    "    \n",
    "    for i in range(num_basis_functions):\n",
    "        for j in range(num_basis_functions):\n",
    "            for k in range(num_basis_functions):\n",
    "                for l in range(num_basis_functions):\n",
    "                    density=P[k,l]\n",
    "                    J=V_ee[i,j,k,l]\n",
    "                    K=V_ee[i,l,k,j]\n",
    "                    G[i,j]+=density*(J-0.5*K)\n",
    "    \"\"\"\n",
    "\n",
    "    num_parts=num_threads_G\n",
    "    part_length=int(np.ceil(relevant_V_ee_elements/num_parts))\n",
    "    G=np.zeros((num_parts,num_basis_functions,num_basis_functions),dtype=datatype)\n",
    "    if (num_basis_functions==1): return np.sum(G,axis=0)\n",
    "    \n",
    "    for part in prange(num_parts):\n",
    "        max_index=min((part+1)*part_length,relevant_V_ee_elements)\n",
    "        ijkl_list_part=ijkl_list[part*part_length:max_index]\n",
    "        V_ee_part=V_ee[part*part_length:max_index]\n",
    "        for ee in range(max_index-part*part_length):\n",
    "            ijkl_list_ee=ijkl_list_part[ee]\n",
    "            i,j,k,l=ijkl_list_ee[0],ijkl_list_ee[1],ijkl_list_ee[2],ijkl_list_ee[3]\n",
    "            V_ee_ee=V_ee_part[ee]\n",
    "            \n",
    "            P_ij_V_ee=P[i,j]*V_ee_ee\n",
    "            P_kl_V_ee=P[k,l]*V_ee_ee\n",
    "\n",
    "            if (k!=l):\n",
    "                G[part,i,j]+=2.0*P_kl_V_ee\n",
    "                G[part,i,k]-=0.5*P[l,j]*V_ee_ee\n",
    "            else:\n",
    "                G[part,i,j]+=P_kl_V_ee\n",
    "            G[part,i,l]-=0.5*P[k,j]*V_ee_ee\n",
    "\n",
    "            if (i!=j):\n",
    "                if (k!=l):\n",
    "                    G[part,j,i]+=2.0*P_kl_V_ee\n",
    "                    G[part,j,k]-=0.5*P[l,i]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,j,i]+=P_kl_V_ee\n",
    "                G[part,j,l]-=0.5*P[k,i]*V_ee_ee\n",
    "                        \n",
    "            b=not(i==k and j==l)\n",
    "            if (b):\n",
    "                if (i!=j):\n",
    "                    G[part,k,l]+=2.0*P_ij_V_ee\n",
    "                    G[part,k,i]-=0.5*P[j,l]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,k,l]+=P_ij_V_ee\n",
    "                G[part,k,j]-=0.5*P[i,l]*V_ee_ee\n",
    "                \n",
    "            if (b and k!=l):\n",
    "                if (i!=j):\n",
    "                    G[part,l,k]+=2.0*P_ij_V_ee\n",
    "                    G[part,l,i]-=0.5*P[j,k]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,l,k]+=P_ij_V_ee\n",
    "                G[part,l,j]-=0.5*P[i,k]*V_ee_ee\n",
    "                \n",
    "    G=np.sum(G,axis=0)\n",
    "    return G\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit(parallel=True,fastmath=True)\n",
    "def calculate_P(eigenorbitals,occupied_orbitals,occupancies,radical,T_e,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Calculates the density matrix P from the eigenorbitals.\n",
    "    \"\"\"\n",
    "\n",
    "    P=np.zeros((num_basis_functions,num_basis_functions),dtype=datatype)\n",
    "    if (not radical):\n",
    "        for i in prange(num_basis_functions):\n",
    "            for j in prange(num_basis_functions):\n",
    "                for o in prange(occupied_orbitals):\n",
    "                    C=eigenorbitals[i,o]\n",
    "                    C_dagger=eigenorbitals[j,o]\n",
    "                    P[i,j]+=2*C*C_dagger \n",
    "    else:\n",
    "        for i in prange(num_basis_functions):\n",
    "            for j in prange(num_basis_functions):\n",
    "                for o in prange(int(np.floor(occupied_orbitals))):\n",
    "                    C=eigenorbitals[i,o]\n",
    "                    C_dagger=eigenorbitals[j,o]\n",
    "                    P[i,j]+=2*C*C_dagger \n",
    "                o=int(np.floor(occupied_orbitals))\n",
    "                C=eigenorbitals[i,o]\n",
    "                C_dagger=eigenorbitals[j,o]\n",
    "                P[i,j]+=C*C_dagger\n",
    "    return P\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-------------------------------------------\n",
    "scf-function and Hartree-Fock main function\n",
    "-------------------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "@njit\n",
    "def sparse_to_dense(sparse_indices,sparse_values,dense_len):\n",
    "    \"\"\"\n",
    "    Transforms a sparse matrix to dense matrix.\n",
    "    \"\"\"\n",
    "\n",
    "    dense=np.zeros((dense_len,dense_len),dtype=datatype)\n",
    "    sparse_len=len(sparse_values)\n",
    "    for i in range(sparse_len):\n",
    "        current_indices=sparse_indices[i]\n",
    "        i1,i2=current_indices[0],current_indices[1]\n",
    "        dense[i1,i2]=sparse_values[i]\n",
    "        dense[i2,i1]=sparse_values[i]\n",
    "    return dense\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def matrix_new_indexing_order(sparse_indices,sparse_indices_new,sparse_values,dense_len):\n",
    "    \"\"\"\n",
    "    Changes the indexing order of a matrix. This is used if matrices are transformed to the second densities-list.\n",
    "    \"\"\"\n",
    "\n",
    "    dense=np.zeros((dense_len,dense_len),dtype=datatype)\n",
    "    sparse_len=len(sparse_values)\n",
    "    for i in range(sparse_len):\n",
    "        current_indices=sparse_indices[i]\n",
    "        i1,i2=current_indices[0],current_indices[1]\n",
    "        dense[i1,i2]=sparse_values[i]\n",
    "        dense[i2,i1]=sparse_values[i]\n",
    "    sparse=np.zeros(sparse_len,dtype=datatype)\n",
    "    for i in range(sparse_len):\n",
    "        current_indices=sparse_indices_new[i]\n",
    "        i1,i2=current_indices[0],current_indices[1]\n",
    "        sparse[i]=dense[i1,i2]\n",
    "    return sparse\n",
    "\n",
    "\n",
    "\n",
    "def calculate_occupations(eigenenergies,num_basis_functions,num_electrons,radical,pseudo_finite_temp=0,relevant_orbitals=-1):\n",
    "    \"\"\"\n",
    "    Calculations the electronic occupations of all orbitals.\n",
    "    \"\"\"\n",
    "\n",
    "    occupations=np.zeros(num_basis_functions,dtype=datatype)\n",
    "\n",
    "    if (pseudo_finite_temp==0):\n",
    "        if (not radical):\n",
    "            occupations[:int(num_electrons/2.0)]=2.0\n",
    "        else:\n",
    "            occupations[:int(np.floor(num_electrons/2.0))]=2.0\n",
    "            occupations[int(np.floor(num_electrons/2.0))]=1.0\n",
    "    \n",
    "    else:\n",
    "        if (not radical):\n",
    "            homo_energy=eigenenergies[int(num_electrons/2.0)-1]\n",
    "            lumo_energy=eigenenergies[int(num_electrons/2.0)]\n",
    "            chemical_potential=(homo_energy+lumo_energy)/2.0\n",
    "            lower_border=int(num_electrons/2.0)-relevant_orbitals\n",
    "            upper_border=int(num_electrons/2.0)+relevant_orbitals\n",
    "        else:\n",
    "            chemical_potential=eigenenergies[int(np.floor(num_electrons/2.0))]\n",
    "            lower_border=int(np.floor(num_electrons/2.0))-relevant_orbitals\n",
    "            upper_border=int(np.floor(num_electrons/2.0))+relevant_orbitals+1\n",
    "\n",
    "        if (relevant_orbitals==-1):\n",
    "            occupations=2.0/(np.exp(np.minimum((eigenenergies-chemical_potential)/(k_b*pseudo_finite_temp+1.0e-100),100))+1.0)\n",
    "        else: \n",
    "            if (not radical):\n",
    "                occupations[:int(num_electrons/2.0)]=2.0\n",
    "            else:\n",
    "                occupations[:int(np.floor(num_electrons/2.0))]=2.0\n",
    "                occupations[int(np.floor(num_electrons/2.0))]=1.0\n",
    "            occupations[lower_border:upper_border]=2.0/(np.exp(np.minimum((eigenenergies[lower_border:upper_border]-chemical_potential)/(k_b*pseudo_finite_temp+1.0e-100),100))+1.0)\n",
    "        occupations=occupations/np.sum(occupations)*num_electrons\n",
    "\n",
    "    return occupations\n",
    "\n",
    "\n",
    "\n",
    "def scf(S,T,V_en,V_ee,nuclei_matrix_hamiltonian_derivative,overlap_matrix_wavefunction_derivative,kinetic_matrix_wavefunction_derivative,nuclei_matrix_wavefunction_derivative,\n",
    "        electron_matrix_wavefunction_derivative,atom_of_basisfunction,basis_functions_index_list,ijkl_list,coordinates,elements,occupied_orbitals,radical,\n",
    "        num_atoms,num_basis_functions,relevant_V_ee_elements,linear_mixing=False):\n",
    "    \"\"\"\n",
    "    Performs the self-consistent field iterations for the Hartree-Fock calculation. \n",
    "    Takes all inputs which were computed previously, i.e. overlap matrix, kinetic matrix, nuclei attraction matrix and electron repulsion tensor. \n",
    "    \n",
    "    With linear mixing enables: identical to the other mode but uses linear mixing instead of DIIS.\n",
    "    This function should normally used with linear_mixing=False since linear mixing is much slower than DIIS! However, DIIS is more susceptible to errors and instabilities.\n",
    "    Therefore, linear_mixing=True can be used as a comparison if problems in the DIIS of scf() occur since linear mixing (with a sufficiently small mixing factor) is numerically very stable. \n",
    "\n",
    "    Info:\n",
    "    Test:\n",
    "       level-shift,    DIIS penalty, max DIIS equations: good convergence\n",
    "       level-shift,    DIIS penalty, avg DIIS equations: no convergence\n",
    "       level-shift, no DIIS penalty, max DIIS equations: medium convergence\n",
    "       level-shift, no DIIS penalty, avg DIIS equations: no convergence\n",
    "    no level-shift,    DIIS penalty, max DIIS equations: good convergence\n",
    "    no level-shift,    DIIS penalty, avg DIIS equations: no convergence\n",
    "    no level-shift, no DIIS penalty, max DIIS equations: no convergence\n",
    "    no level-shift, no DIIS penalty, avg DIIS equations: no convergence\n",
    "    Pulay mixing factor 0.5 was used\n",
    "\n",
    "       level-shift,    DIIS penalty, max DIIS equations: good convergence (converged: 25)\n",
    "    no level-shift,    DIIS penalty, max DIIS equations: good convergence (converged: 16)\n",
    "    Pulay mixing factor 1.0 was used \n",
    "\n",
    "    recommended:\n",
    "    Pulay mixing: enabled, factor 0.7\n",
    "    DIIS penalty: enabled, weight 1.05\n",
    "    level-shift: disabled, if enabled: recommended to set to 0.3 Hartree\n",
    "    max DIIS equations: num_scf_steps, otherwise difficult convergence for large-scale systems\n",
    "    num scf steps: ~10-100 to observe maximal convergence (depending on the system)\n",
    "    \"\"\"\n",
    "\n",
    "    energy=0\n",
    "    scf_tolerance=scf_tolerance_density\n",
    "\n",
    "    H_core_time=0.0\n",
    "    Inverse_time=0.0\n",
    "    SAD_time=0.0\n",
    "    G_time=0.0\n",
    "    F_time=0.0\n",
    "    SPF_time=0.0\n",
    "    store_F_time=0.0\n",
    "    error_matrix_time=0.0\n",
    "    energy_time=0.0\n",
    "    F_transform_time=0.0\n",
    "    eigenvalue_time=0.0\n",
    "    eigenorbitals_time=0.0\n",
    "    P_time=0.0\n",
    "    DIIS_coef_time=0.0\n",
    "    DIIS_time=0.0\n",
    "    new_F_time=0.0\n",
    "    conv_crit_time=0.0\n",
    "    \n",
    "    if (not linear_mixing):\n",
    "        S=torch.from_numpy(S)\n",
    "        T=torch.from_numpy(T)\n",
    "        V_en=torch.from_numpy(V_en)\n",
    "\n",
    "        t1=time.time()\n",
    "        H_core=torch.add(T,V_en)\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Caclculate H_core: ',t2-t1)\n",
    "        H_core_time+=t2-t1\n",
    "        \n",
    "        commutator_matrices=torch.zeros((max_scf_steps,num_basis_functions,num_basis_functions),dtype=torch.float32) \n",
    "        stored_fock_matrices=torch.zeros((max_scf_steps,num_basis_functions,num_basis_functions),dtype=torch.float32) \n",
    "        error_matrix=np.zeros((max_scf_steps,max_scf_steps),dtype=datatype)\n",
    "\n",
    "        saved_energies=np.zeros(max_scf_steps,dtype=datatype)\n",
    "        min_j=0\n",
    "        min_energy=1.0\n",
    "        \n",
    "        current_DIIS_linear_equations=max_DIIS_linear_equations\n",
    "\n",
    "        num_electrons=np.sum(elements)+added_electrons\n",
    "        electronic_occupations=np.zeros(num_basis_functions,dtype=datatype)\n",
    "        count_electrons=0\n",
    "        for i in range(num_basis_functions):\n",
    "            if (count_electrons<num_electrons-1):\n",
    "                count_electrons+=2\n",
    "                electronic_occupations[i]=2\n",
    "            elif (count_electrons==num_electrons-1):\n",
    "                count_electrons+=1\n",
    "                electronic_occupations[i]=1\n",
    "                break\n",
    "            else:\n",
    "                break\n",
    "        electronic_occupations=np.diag(electronic_occupations)\n",
    "        \n",
    "        t1=time.time()\n",
    "        evals,evecs=torch.linalg.eigh(S)\n",
    "        evpow=evals**(-1/2) \n",
    "        S_inverse_sqrt=torch.matmul(evecs,torch.matmul(torch.diag(evpow),torch.inverse(evecs)))\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Inverse square matrix: ',t2-t1)\n",
    "        Inverse_time+=t2-t1\n",
    "\n",
    "        t1=time.time()\n",
    "        P=np.zeros((num_basis_functions,num_basis_functions),dtype=datatype)\n",
    "        for atom in range(num_atoms):\n",
    "            element=elements[atom]\n",
    "            if (element<=2):\n",
    "                P[basis_functions_index_list[atom]:basis_functions_index_list[atom+1],basis_functions_index_list[atom]:basis_functions_index_list[atom+1]]=single_atom_densities[element,0,0]\n",
    "            elif (element>2 and element<=10):\n",
    "                P[basis_functions_index_list[atom]:basis_functions_index_list[atom+1],basis_functions_index_list[atom]:basis_functions_index_list[atom+1]]=single_atom_densities[element,:5,:5]\n",
    "            elif (element>10 and element<=18):\n",
    "                P[basis_functions_index_list[atom]:basis_functions_index_list[atom+1],basis_functions_index_list[atom]:basis_functions_index_list[atom+1]]=single_atom_densities[element,:9,:9]\n",
    "        P_0=np.copy(P)\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('SAD guess: ',t2-t1)\n",
    "        SAD_time+=t2-t1\n",
    "        \n",
    "        t1=time.time()\n",
    "        G=calculate_G(P,V_ee,ijkl_list,num_basis_functions,relevant_V_ee_elements)\n",
    "        G_0=np.copy(G)\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Caclculate G: ',t2-t1)\n",
    "        G_time+=t2-t1\n",
    "        t1=time.time()\n",
    "        G=torch.from_numpy(G)\n",
    "        F=torch.add(H_core,G)\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Caclculate F: ',t2-t1)\n",
    "        F_time+=t2-t1\n",
    "\n",
    "        t1=time.time()\n",
    "\n",
    "        P=torch.from_numpy(P)\n",
    "        SPF=torch.matmul(S.to(torch.float32),(torch.matmul(P.to(torch.float32),F.to(torch.float32))))\n",
    "        commutator_matrices[0]=SPF-SPF.t()\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Caclculate SPF-FPS: ',t2-t1)\n",
    "        SPF_time+=t2-t1\n",
    "        t1=time.time()\n",
    "        stored_fock_matrices[0]=F\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Store F: ',t2-t1)\n",
    "        store_F_time+=t2-t1\n",
    "        t1=time.time()\n",
    "        error_matrix[0,0]=torch.sum(torch.mul(commutator_matrices[0],commutator_matrices[0]))\n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Caclculate error matrix: ',t2-t1)\n",
    "        error_matrix_time+=t2-t1\n",
    "\n",
    "        t1=time.time()\n",
    "        E_0=torch.add(H_core,0.5*G)\n",
    "        saved_energies[0]=torch.sum(torch.mul(P,E_0)).numpy() \n",
    "        t2=time.time()\n",
    "        if (display_runtimes): print('Calculate energy: ',t2-t1)\n",
    "        energy_time+=t2-t1\n",
    "\n",
    "        for i in range(1,max_scf_steps):\n",
    "            old_P=P\n",
    "            old_F=F\n",
    "\n",
    "            t1=time.time()\n",
    "            F_transformed=torch.matmul(S_inverse_sqrt,torch.matmul(F,S_inverse_sqrt))\n",
    "            if (level_shift_enabled):\n",
    "                F_transformed=F_transformed.numpy()\n",
    "                for j in range(int(occupied_orbitals),num_basis_functions):\n",
    "                    F_transformed[j,j]+=level_shift_value\n",
    "                F_transformed=torch.from_numpy(F_transformed)\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Transform F: ',t2-t1)\n",
    "            F_transform_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            eigenenergies,eigenvectors=torch.linalg.eigh(F_transformed)\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Eigenvalue calculation: ',t2-t1)\n",
    "            eigenvalue_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            eigenorbitals=torch.matmul(S_inverse_sqrt,eigenvectors)\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate eigenorbitals: ',t2-t1)\n",
    "            eigenorbitals_time+=t2-t1\n",
    "\n",
    "            occupancies=np.zeros(num_basis_functions,dtype=datatype)\n",
    "            t1=time.time()\n",
    "            if (display_runtimes): print('Eigenvalues around Fermi level')\n",
    "            if (display_runtimes or display_eigenenergies): \n",
    "                print(np.round(np.reshape(eigenenergies[int(occupied_orbitals)-20:int(occupied_orbitals)+20].numpy(),(4,10))*27.2114079527,3))\n",
    "                \n",
    "            occupation_numbers=calculate_occupations(eigenenergies.numpy(),num_basis_functions,num_electrons,radical,pseudo_finite_temp=0,relevant_orbitals=-1)\n",
    "            occupation_numbers=torch.from_numpy(occupation_numbers)\n",
    "            P=torch.matmul(eigenorbitals[:,:int(np.ceil(occupied_orbitals))],(eigenorbitals[:,:int(np.ceil(occupied_orbitals))].T)*occupation_numbers[:int(np.ceil(occupied_orbitals)),None])\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate P: ',t2-t1)\n",
    "            P_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            G=calculate_G(P.numpy(),V_ee,ijkl_list,num_basis_functions,relevant_V_ee_elements)\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Caclculate G: ',t2-t1)\n",
    "            G_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            G=torch.from_numpy(G)\n",
    "            F=torch.add(H_core,G)\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Caclculate F: ',t2-t1)\n",
    "            F_time+=t2-t1\n",
    "\n",
    "            t1=time.time()\n",
    "            SPF=torch.matmul(S,(torch.matmul(P,F)))\n",
    "            commutator_matrices[i]=SPF-SPF.t()\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Caclculate SPF-FPS: ',t2-t1)\n",
    "            SPF_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            stored_fock_matrices[i]=F\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Store F: ',t2-t1)\n",
    "            store_F_time+=t2-t1\n",
    "\n",
    "            t1=time.time()\n",
    "            for j in range(i+1):\n",
    "                error_ij=torch.sum(torch.mul(commutator_matrices[i],commutator_matrices[j]))\n",
    "                error_matrix[i,j]=error_ij\n",
    "                error_matrix[j,i]=error_ij\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate DIIS coeffs: ',t2-t1)\n",
    "            DIIS_coef_time+=t2-t1\n",
    "            \n",
    "            t1=time.time()\n",
    "            lhs=np.zeros((i+2,i+2),dtype=datatype)+1\n",
    "            lhs[i+1,i+1]=0\n",
    "            lhs[:i+1,:i+1]=error_matrix[:i+1,:i+1]\n",
    "            min_energy=1.0\n",
    "            for j in range(max(0,i+1-current_DIIS_linear_equations),i+1):\n",
    "                if (saved_energies[j]<min_energy):\n",
    "                    min_energy=saved_energies[j]\n",
    "                    min_j=j\n",
    "            for j in range(max(0,i+1-current_DIIS_linear_equations),i+1):\n",
    "                if (j!=min_j):\n",
    "                    lhs[j,j]*=DIIS_penalty\n",
    "                    \n",
    "            rhs=np.zeros(i+2,dtype=datatype)\n",
    "            rhs[i+1]=1\n",
    "            if (error_matrix[i,i]<1.0e-20):\n",
    "                energy=torch.sum(torch.mul(P,torch.add(H_core,0.5*G))).numpy()\n",
    "                saved_energies[i]=energy\n",
    "                return energy,P.numpy(),P_0,G.numpy(),G_0,H_core.numpy(),F.numpy(),eigenorbitals.numpy(),eigenenergies.numpy(),occupancies\n",
    "            coef_start=max(0,i+1-current_DIIS_linear_equations)\n",
    "            DIIS_coeffs=np.linalg.solve(lhs[coef_start:,coef_start:],rhs[coef_start:])\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate DIIS: ',t2-t1)\n",
    "            DIIS_time+=t2-t1\n",
    "            \n",
    "            t1=time.time()\n",
    "            DIIS_coeffs=torch.from_numpy(DIIS_coeffs)\n",
    "            F_new=torch.sum(DIIS_coeffs[:i+1-coef_start].to(torch.float32)[:,None,None]*stored_fock_matrices[coef_start:i+1],0)\n",
    "            F=pulay_mixing_rate*F_new+(1.0-pulay_mixing_rate)*old_F\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Construct new F: ',t2-t1)\n",
    "            new_F_time+=t2-t1\n",
    "\n",
    "            t1=time.time()\n",
    "            energy=torch.sum(torch.mul(P,torch.add(H_core,0.5*G))).numpy()\n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate energy: ',t2-t1)\n",
    "            energy_time+=t2-t1\n",
    "            t1=time.time()\n",
    "            delta_P=old_P-P\n",
    "            scf_evaluation=(torch.sqrt(torch.mean(delta_P*delta_P))).numpy()             \n",
    "            t2=time.time()\n",
    "            if (display_runtimes): print('Calculate convergence crit: ',t2-t1)\n",
    "            conv_crit_time+=t2-t1\n",
    "            if (verbosity==1):\n",
    "                if (i<10): print(' ',end='')\n",
    "                scf_evaluation_log=int(np.floor(np.log10(scf_evaluation)))\n",
    "                if (scf_evaluation_log<0):\n",
    "                    if (scf_evaluation_log>=-9):\n",
    "                        print('      '+str(i)+' | '+str('{0:.5f}'.format(scf_evaluation/(10**scf_evaluation_log)))+'e'+str(scf_evaluation_log)+'  | '+str(energy))\n",
    "                    else: \n",
    "                        print('      '+str(i)+' | '+str('{0:.5f}'.format(scf_evaluation/(10**scf_evaluation_log)))+'e'+str(scf_evaluation_log)+' | '+str(energy))\n",
    "                else:\n",
    "                    print('      '+str(i)+' | '+str('{0:.5f}'.format(scf_evaluation))+' | '+str(energy))\n",
    "            if (scf_evaluation<scf_tolerance):\n",
    "                if (verbosity==1): \n",
    "                    print('SCF times:')\n",
    "                    print('    |Core Hamiltonian: '+str(np.round(H_core_time,4))+' s')\n",
    "                    print('    |Square root of inverse: '+str(np.round(Inverse_time,4))+' s')\n",
    "                    print('    |SAD guess: '+str(np.round(SAD_time,4))+' s')\n",
    "                    print('    |G tensor: '+str(np.round(G_time,4))+' s')\n",
    "                    print('    |Fock matrix construction: '+str(np.round(F_time,4))+' s')\n",
    "                    print('    |Commutator SPF-FPS: '+str(np.round(SPF_time,4))+' s')\n",
    "                    print('    |Save Fock matrix: '+str(np.round(store_F_time,4))+' s')\n",
    "                    print('    |Error matrix calculation: '+str(np.round(error_matrix_time,4))+' s')\n",
    "                    print('    |Energy calculation: '+str(np.round(energy_time,4))+' s')\n",
    "                    print('    |Transform Fock matrix: '+str(np.round(F_transform_time,4))+' s')\n",
    "                    print('    |Eigenvalue calculation: '+str(np.round(eigenvalue_time,4))+' s')\n",
    "                    print('    |Transform eigenorbitals: '+str(np.round(eigenorbitals_time,4))+' s')\n",
    "                    print('    |Density matrix: '+str(np.round(P_time,4))+' s')\n",
    "                    print('    |DIIS coefficients: '+str(np.round(DIIS_coef_time,4))+' s')\n",
    "                    print('    |DIIS equations: '+str(np.round(DIIS_time,4))+' s')\n",
    "                    print('    |Mix new Fock matrix: '+str(np.round(new_F_time,4))+' s')\n",
    "                    print('    |Convergence criterium: '+str(np.round(conv_crit_time,4))+' s')\n",
    "                return energy,P.numpy(),P_0,G.numpy(),G_0,H_core.numpy(),F.numpy(),eigenorbitals.numpy(),eigenenergies.numpy(),occupancies\n",
    "        \n",
    "        if (verbosity==1): \n",
    "            print('SCF cycle did not meet specified tolerance.')\n",
    "            if (verbosity==1): \n",
    "                print('SCF times:')\n",
    "                print('    |Core Hamiltonian: '+str(np.round(H_core_time,4))+' s')\n",
    "                print('    |Square root of inverse: '+str(np.round(Inverse_time,4))+' s')\n",
    "                print('    |SAD guess: '+str(np.round(SAD_time,4))+' s')\n",
    "                print('    |G tensor: '+str(np.round(G_time,4))+' s')\n",
    "                print('    |Fock matrix construction: '+str(np.round(F_time,4))+' s')\n",
    "                print('    |Commutator SPF-FPS: '+str(np.round(SPF_time,4))+' s')\n",
    "                print('    |Save Fock matrix: '+str(np.round(store_F_time,4))+' s')\n",
    "                print('    |Error matrix calculation: '+str(np.round(error_matrix_time,4))+' s')\n",
    "                print('    |Energy calculation: '+str(np.round(energy_time,4))+' s')\n",
    "                print('    |Transform Fock matrix: '+str(np.round(F_transform_time,4))+' s')\n",
    "                print('    |Eigenvalue calculation: '+str(np.round(eigenvalue_time,4))+' s')\n",
    "                print('    |Transform eigenorbitals: '+str(np.round(eigenorbitals_time,4))+' s')\n",
    "                print('    |Density matrix: '+str(np.round(P_time,4))+' s')\n",
    "                print('    |DIIS coefficients: '+str(np.round(DIIS_coef_time,4))+' s')\n",
    "                print('    |DIIS equations: '+str(np.round(DIIS_time,4))+' s')\n",
    "                print('    |Mix new Fock matrix: '+str(np.round(new_F_time,4))+' s')\n",
    "                print('    |Convergence criterium: '+str(np.round(conv_crit_time,4))+' s')\n",
    "        \n",
    "        return energy,P.numpy(),P_0,G.numpy(),G_0,H_core.numpy(),F.numpy(),eigenorbitals.numpy(),eigenenergies.numpy(),occupancies\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def run_HF(coordinates,elements,enable_plotting=False,rthf=False,partitioning=False,early_stopping=False):\n",
    "    \"\"\"\n",
    "    Main routine to run a Hartree-Fock calculation.\n",
    "    Inputs: Coordinates and periodic numbers of an atomic configuration. \n",
    "            coordinates: Array of coordinates (num_atoms x 3), \n",
    "            elements: Array of the element of each atom (integer) \n",
    "            enable_plotting: returns additional parts of the HF calculation to enable plotting electronic quantities like the density or molecular orbitals\n",
    "    Returns: energy of the given configuration\n",
    "    \"\"\"\n",
    "    alpha_fold=False\n",
    "\n",
    "\n",
    "    start_preparations=time.time()\n",
    "\n",
    "    if (rthf):\n",
    "        center_of_mass=calculate_center_of_mass(coordinates,elements,len(elements))\n",
    "\n",
    "    num_atoms=len(coordinates)\n",
    "    num_basis_functions=calculate_num_basis_functions(elements,num_atoms)\n",
    "\n",
    "    radical=False\n",
    "    if ((np.sum(elements)+added_electrons)%2==0):\n",
    "        occupied_orbitals=int(np.floor(0.5*(np.sum(elements)+added_electrons)+0.1))\n",
    "    else:\n",
    "        occupied_orbitals=0.5*(np.sum(elements)+added_electrons)\n",
    "        radical=True\n",
    "    \n",
    "    num_gaussian_functions,basis_functions_index_list,gaussian_functions_index_list,atom_of_basisfunction,type_of_basis_function\\\n",
    "                    =calculate_num_gaussian_functions(elements,num_atoms,num_basis_functions)\n",
    "    gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents\\\n",
    "                    =calculate_gaussian_function_inputs(elements,coordinates,num_atoms,num_gaussian_functions)\n",
    "\n",
    "    \n",
    "    num_orbital_parts,orbital_parts_index_list=calculate_num_orbital_parts(elements,num_atoms,num_basis_functions) \n",
    "    num_parts_gaussian_functions,orbital_parts_gaussian_index_list\\\n",
    "                    =calculate_gaussians_for_orbital_parts(elements,num_atoms,num_orbital_parts)\n",
    "    orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents=calculate_orbital_parts_preprocessing(elements,coordinates,num_parts_gaussian_functions,num_atoms)\n",
    "    \n",
    "    ij_list_no_duplicates,gaussians_for_densities,gaussians_for_densities_index_list,num_gaussians_for_densities,relevant_densities_no_duplicates,relevance_matrix,relevance_matrix_derivative\\\n",
    "                    =calculate_relevant_densities(orbital_parts_coefficients,orbital_parts_exponents,orbital_parts_coordinates,\n",
    "                                                  orbital_parts_index_list,orbital_parts_gaussian_index_list,atom_of_basisfunction,gaussian_functions_index_list,\n",
    "                                                  coordinates,num_gaussian_functions,num_basis_functions)\n",
    "    \n",
    "    if (early_stopping):\n",
    "        return gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\\\n",
    "                        ij_list_no_duplicates,atom_of_basisfunction,gaussian_functions_index_list,type_of_basis_function,num_basis_functions\n",
    "\n",
    "    S_terms,S_terms_derivatives,exp_sums,center_coords\\\n",
    "                    =overlap_contributions(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                           gaussians_for_densities,gaussians_for_densities_index_list,num_gaussians_for_densities,relevant_densities_no_duplicates,num_gaussian_functions)    \n",
    "    \n",
    "\n",
    "    del relevance_matrix_derivative,S_terms_derivatives\n",
    "    relevance_matrix_derivative=np.zeros((1,3),dtype=datatype)\n",
    "    S_terms_derivatives=np.zeros((1,3),dtype=datatype)\n",
    "\n",
    "    stop_preparations=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('Preparations:           '+str(np.round(stop_preparations-start_preparations,4))+' s')\n",
    "    \n",
    "\n",
    "\n",
    "    start_overlap_kinetic=time.time()\n",
    "    S,overlap_matrix_wavefunction_derivative,T,kinetic_matrix_wavefunction_derivative,\\\n",
    "                nuclei_prefactor,weighted_coords,orbitals_center_coords=calculate_overlap_and_kinetic_matrix(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                                                                      gaussian_functions_index_list,gaussians_for_densities,gaussians_for_densities_index_list,\n",
    "                                                                                      ij_list_no_duplicates,num_gaussians_for_densities,relevant_densities_no_duplicates,num_basis_functions,num_gaussian_functions)\n",
    "\n",
    "    del overlap_matrix_wavefunction_derivative,kinetic_matrix_wavefunction_derivative\n",
    "    overlap_matrix_wavefunction_derivative=np.zeros((1,3),dtype=datatype)\n",
    "    kinetic_matrix_wavefunction_derivative=np.zeros((1,3),dtype=datatype)\n",
    "\n",
    "    stop_overlap_kinetic=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('Overlap/Kinetic matrix: '+str(np.round(stop_overlap_kinetic-start_overlap_kinetic,4))+' s')\n",
    "\n",
    "    start_nuclei=time.time()\n",
    "    V_en,nuclei_matrix_hamiltonian_derivative,nuclei_matrix_wavefunction_derivative\\\n",
    "                    =calculate_nuclei_matrix_and_derivatives(gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                                             ij_list_no_duplicates,gaussians_for_densities,gaussians_for_densities_index_list,num_gaussians_for_densities,relevant_densities_no_duplicates,\n",
    "                                                             gaussian_functions_index_list,atom_of_basisfunction,coordinates,elements,\n",
    "                                                             relevance_matrix,relevance_matrix_derivative,\n",
    "                                                             nuclei_prefactor,weighted_coords,orbitals_center_coords,num_atoms,num_basis_functions)\n",
    "\n",
    "\n",
    "    stop_nuclei=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('Nuclei matrix:          '+str(np.round(stop_nuclei-start_nuclei,4))+' s') \n",
    "        print('Electron-electron integrals:')\n",
    "    start_electrons=time.time()\n",
    "\n",
    "    gaussian_functions_exponents_i_list,gaussian_functions_exponents_j_list,overlap_list,\\\n",
    "                    overlap_derivative_i_x_list,overlap_derivative_i_y_list,overlap_derivative_i_z_list,overlap_derivative_j_x_list,overlap_derivative_j_y_list,overlap_derivative_j_z_list,\\\n",
    "                    exp_sum_list,center_coords_list,ij_list_no_duplicates_new,gaussians_limits=\\\n",
    "                    electron_integrals_list_generation(gaussian_functions_exponents,S_terms,S_terms_derivatives,exp_sums,center_coords,gaussians_for_densities,gaussians_for_densities_index_list,\n",
    "                                                       ij_list_no_duplicates,gaussian_functions_index_list,type_of_basis_function,relevant_densities_no_duplicates)\n",
    "    \n",
    "\n",
    "    relevance_matrix=matrix_new_indexing_order(ij_list_no_duplicates,ij_list_no_duplicates_new,relevance_matrix,num_basis_functions)\n",
    "\n",
    "    t_relevance_start=time.time()\n",
    "    relevance_mask,relevance_d1d2,num_relevant=electron_integrals_distance_relevance(relevance_matrix,ij_list_no_duplicates_new,atom_of_basisfunction,coordinates,\n",
    "                                                                                     orbitals_center_coords,relevant_densities_no_duplicates)\n",
    "    t_relevance_stop=time.time()\n",
    "    if (verbosity==1): print('  >Relevance computations:        '+str(np.round(t_relevance_stop-t_relevance_start,4))+' s')\n",
    "    \n",
    "\n",
    "    t_eri_start=time.time()\n",
    "    result,derivative=electron_arrays_calculation_loops(gaussian_functions_exponents_i_list,gaussian_functions_exponents_j_list,\n",
    "                                                        overlap_list,exp_sum_list,\n",
    "                                                        overlap_derivative_i_x_list,overlap_derivative_i_y_list,overlap_derivative_i_z_list,\n",
    "                                                        overlap_derivative_j_x_list,overlap_derivative_j_y_list,overlap_derivative_j_z_list,\n",
    "                                                        center_coords_list,relevance_mask,relevance_d1d2,num_relevant,\n",
    "                                                        ij_list_no_duplicates,gaussians_limits,atom_of_basisfunction,orbital_parts_index_list,coordinates,\n",
    "                                                        relevant_densities_no_duplicates,num_orbital_parts,num_basis_functions)\n",
    "\n",
    "    if (print_ERI_relevance):\n",
    "        result_abs=np.abs(result)\n",
    "        print('Total values: '+str((result_abs>-0.1).sum()))\n",
    "        print('Nonzero values: '+str((result_abs>0.0).sum()))\n",
    "        print('Values > 1.0e-10: '+str((result_abs>1.0e-10).sum()))\n",
    "        print('Values > 1.0e-9: '+str((result_abs>1.0e-9).sum()))\n",
    "        print('Values > 1.0e-8: '+str((result_abs>1.0e-8).sum()))\n",
    "        print('Values > 1.0e-7: '+str((result_abs>1.0e-7).sum()))\n",
    "        print('Values > 1.0e-6: '+str((result_abs>1.0e-6).sum()))\n",
    "        print('Values > 1.0e-5: '+str((result_abs>1.0e-5).sum()))\n",
    "        print('Values > 1.0e-4: '+str((result_abs>1.0e-4).sum()))\n",
    "        print('Values > 1.0e-3: '+str((result_abs>1.0e-3).sum()))\n",
    "        print('Values > 1.0e-2: '+str((result_abs>1.0e-2).sum()))\n",
    "        print('Values > 1.0e-1: '+str((result_abs>1.0e-1).sum()))\n",
    "        print('Values > 1.0e0 : '+str((result_abs>1.0e0 ).sum()))\n",
    "\n",
    "    t_eri_stop=time.time()\n",
    "    if (verbosity==1): print('  >ERI computations:              '+str(np.round(t_eri_stop-t_eri_start,4))+' s')\n",
    "\n",
    "    t_mask_start=time.time()\n",
    "    result=torch.from_numpy(result)\n",
    "    relevance_d1d2=torch.from_numpy(relevance_d1d2)\n",
    "    mask_V_ee=torch.where(torch.abs(result)>1.0e-5,1.0,0.0)\n",
    "    result=result*mask_V_ee\n",
    "    nonzeros=result.nonzero().reshape(-1)\n",
    "    result=(result[nonzeros]).numpy()\n",
    "    relevance_d1d2=(relevance_d1d2[nonzeros]).numpy()\n",
    "    num_relevant=len(result)\n",
    "    t_mask_stop=time.time()\n",
    "\n",
    "    if (verbosity==1): print('  >Second relevance computations: '+str(np.round(t_mask_stop-t_mask_start,4))+' s')\n",
    "\n",
    "\n",
    "    t_loop_start=time.time()\n",
    "    V_ee,electron_matrix_wavefunction_derivative,ijkl_list,relevant_V_ee_elements\\\n",
    "                    =electron_integrals_with_arrays(result,derivative,relevance_d1d2,\n",
    "                                                    relevance_matrix,relevance_matrix_derivative,relevance_mask,num_relevant,\n",
    "                                                    ij_list_no_duplicates_new,gaussians_limits,atom_of_basisfunction,orbital_parts_index_list,coordinates,orbitals_center_coords,\n",
    "                                                    relevant_densities_no_duplicates,num_orbital_parts,num_basis_functions)\n",
    "    t_loop_stop=time.time()\n",
    "    if (verbosity==1): print('  >Process ERIs:                  '+str(np.round(t_loop_stop-t_loop_start,4))+' s')\n",
    "\n",
    "    del relevance_d1d2\n",
    "    relevance_d1d2=np.zeros((1,3),dtype=datatype)\n",
    "    del relevance_d1d2\n",
    "\n",
    "    del result,S_terms,S_terms_derivatives,exp_sums,center_coords\n",
    "    \n",
    "    stop_electrons=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('Electron tensor:        '+str(np.round(stop_electrons-start_electrons,4))+' s')\n",
    "\n",
    "    start_ions=time.time()\n",
    "    E_nn=calculate_ionic_energy(coordinates,elements,num_atoms)\n",
    "    stop_ions=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('Ionic energy:           '+str(np.round(stop_ions-start_ions,4))+' s')\n",
    "        print('Starting SCF cycle: ')\n",
    "        print('SCF step | convergence measure:')\n",
    "    \n",
    "    \n",
    "    start_scf=time.time()\n",
    "\n",
    "    S=sparse_to_dense(ij_list_no_duplicates,S,num_basis_functions)\n",
    "    T=sparse_to_dense(ij_list_no_duplicates,T,num_basis_functions)\n",
    "\n",
    "    V_en=sparse_to_dense(ij_list_no_duplicates,V_en,num_basis_functions)\n",
    "\n",
    "    electronic_energy,P,P_0,G,G_0,H_core,F,eigenorbitals,eigenenergies,occupancies=scf(S,T,V_en,V_ee,nuclei_matrix_hamiltonian_derivative,overlap_matrix_wavefunction_derivative,kinetic_matrix_wavefunction_derivative,\n",
    "                                                        nuclei_matrix_wavefunction_derivative,electron_matrix_wavefunction_derivative,atom_of_basisfunction,basis_functions_index_list,ijkl_list,\n",
    "                                                        coordinates,elements,occupied_orbitals,radical,num_atoms,num_basis_functions,relevant_V_ee_elements)\n",
    " \n",
    "    stop_scf=time.time()\n",
    "    if (verbosity==1):\n",
    "        print('SCF cycle:              '+str(np.round(stop_scf-start_scf,4))+' s')\n",
    "    \n",
    "    energy=electronic_energy+E_nn\n",
    "    if (verbosity==1):\n",
    "        print('_______________________________________')\n",
    "        print('Total time:             '+str(np.round(stop_scf-start_preparations,4))+' s')\n",
    "        print('Total energy: '+str(energy)+' Ha')\n",
    "        print()\n",
    "        print()\n",
    "\n",
    "\n",
    "    if (alpha_fold):\n",
    "        return energy,P,P_0,G,G_0,H_core,eigenorbitals,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\\\n",
    "               ij_list_no_duplicates,atom_of_basisfunction,gaussian_functions_index_list,type_of_basis_function,num_basis_functions\n",
    "\n",
    "    if (rthf):\n",
    "        return gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,gaussians_for_densities,gaussians_for_densities_index_list,\\\n",
    "               F,S,P,eigenenergies,eigenorbitals,occupancies,H_core,ij_list_no_duplicates,relevant_densities_no_duplicates,V_ee,ijkl_list,relevant_V_ee_elements,num_basis_functions,center_of_mass,\\\n",
    "               gaussian_functions_index_list,atom_of_basisfunction,type_of_basis_function\n",
    "    if (partitioning):\n",
    "            return energy,P,F,eigenorbitals,eigenenergies,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\\\n",
    "                   ij_list_no_duplicates,atom_of_basisfunction,gaussian_functions_index_list,type_of_basis_function,num_basis_functions\n",
    "    if (not enable_plotting):\n",
    "        return energy\n",
    "    else:\n",
    "        return energy,E_nn,P,F,eigenorbitals,eigenenergies,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\\\n",
    "               ij_list_no_duplicates,atom_of_basisfunction,gaussian_functions_index_list,type_of_basis_function,num_basis_functions\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------------------------------\n",
    "Functions for input file loading\n",
    "--------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def load_coordinates(coordinate_file_path):\n",
    "    \"\"\"\n",
    "    Loads in coordinates, elements and connections which are stored in a .json file.\n",
    "    Input is the file path of a json file (.json) which gets loaded and coordinates, elements and connections are returned as numpy arrays.\n",
    "    \"\"\"\n",
    "\n",
    "    structure_elements=[]\n",
    "    structure_connections=[]\n",
    "    structure_coordinates=[]\n",
    "    with open(coordinate_file_path+'.json') as f:\n",
    "        d=json.load(f)\n",
    "    structure_elements=np.array(d['PC_Compounds'][0]['atoms']['element'],dtype='float32').astype('int32')\n",
    "    structure_connections=np.stack([np.array(d['PC_Compounds'][0]['bonds']['aid1'],dtype='float32'),\n",
    "                                    np.array(d['PC_Compounds'][0]['bonds']['aid2'],dtype='float32')]).T\n",
    "    structure_connections=structure_connections.astype('int')\n",
    "    structure_coordinates=np.stack([np.array(d['PC_Compounds'][0]['coords'][0]['conformers'][0]['x'],dtype='float32'),\n",
    "                                    np.array(d['PC_Compounds'][0]['coords'][0]['conformers'][0]['y'],dtype='float32'),\n",
    "                                    np.array(d['PC_Compounds'][0]['coords'][0]['conformers'][0]['z'],dtype='float32')]).T*angstrom_to_bohr\n",
    "    \n",
    "    return structure_coordinates,structure_elements,structure_connections\n",
    "\n",
    "\n",
    "def load_coordinates_pdb(file_path,discard_hetatoms=False,file_type=2):\n",
    "    \"\"\"\n",
    "    Loads in coordinates and elements which are stored in the .pdb file format.\n",
    "    The file path of a text file (.txt) which gets loaded is the input and the coordinates, elements and connections are plotted.\n",
    "    For older PDB formats file_type=1 might be used.\n",
    "    Several edge cases are included to ensure correct outputs but due to irregularities in PDB files, especially for very large systems, errors can occur in this function.\n",
    "    If input coordinates seem off, it is advised to check this function.\n",
    "    The same holds if a Hartree-Fock run terminates due to singularities (often encountered in the form of invalid inputs for a logarithm). Then the error can be identical\n",
    "    coordinates for two atoms. This can also be the case in PDB entries were there exist two different models for one or more amino acids.\n",
    "    \"\"\"\n",
    "\n",
    "    structure_elements=[]\n",
    "    structure_coordinates=[]\n",
    "    element_strings=['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar']\n",
    "\n",
    "    with open(file_path+'.txt','r') as file:\n",
    "        reading_coords=False\n",
    "        stop_reading=False\n",
    "\n",
    "        count=0\n",
    "        for line in file:\n",
    "            count+=1\n",
    "            words=line.split()\n",
    "\n",
    "            if (words[0]=='ATOM' or words[0]=='HETATM'):\n",
    "                reading_coords=True\n",
    "            if (reading_coords and words[0]=='#'):\n",
    "                break\n",
    "            \n",
    "            if (reading_coords):\n",
    "                if (words[0]=='TER' or words[0]=='END' or words[0]=='IGN' or words[0]=='CONECT' or words[0]=='ANISOU'):\n",
    "                    continue\n",
    "                if ((words[0]=='HETATM' and discard_hetatoms)\n",
    "                    or (words[0]=='ENDMDL')\n",
    "                    or (words[0]=='MODEL' and words[1]=='2')):\n",
    "                    stop_reading=True\n",
    "                    break\n",
    "\n",
    "                if (file_type==1):\n",
    "                    structure_coordinates.append([words[10],words[11],words[12]])\n",
    "                    for i in range(len(element_strings)):\n",
    "                        if (words[2]==element_strings[i]):\n",
    "                            structure_elements.append(i+1)\n",
    "                            break\n",
    "                elif (file_type==2):\n",
    "                    if (words[3]=='WAT' or words[2]=='WAT'):\n",
    "                        if (words[0]=='HETATM'):\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[4],words[5],words[6]])\n",
    "                    elif ((words[2]=='H1' or words[2]=='H2' or words[2]=='OH2') and count>10000):\n",
    "                        if (len(words[3])>5):\n",
    "                            structure_coordinates.append([words[4],words[5],words[6]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                        if (words[2]=='OH2'):\n",
    "                            structure_elements.append(8)\n",
    "                        else:\n",
    "                            structure_elements.append(1)\n",
    "                        continue\n",
    "                    else:\n",
    "                        if (len(words[2])<5):\n",
    "                            structure_coordinates.append([words[6],words[7],words[8]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                    if (len(words[-1])>2):\n",
    "                        structure_elements.append(1)\n",
    "                    else:\n",
    "                        for i in range(len(element_strings)):\n",
    "                            if (words[-1]==element_strings[i]):\n",
    "                                structure_elements.append(i+1)\n",
    "                                break\n",
    "            \n",
    "            if (stop_reading): break\n",
    "\n",
    "    structure_coordinates=np.array(structure_coordinates,dtype=datatype)*angstrom_to_bohr\n",
    "    structure_elements=np.array(structure_elements,dtype='int32')\n",
    "\n",
    "    return structure_coordinates,structure_elements\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------\n",
    "Plotting\n",
    "--------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_connections(coordinates,elements,cut=4.0,cut_H=2.6,cut_H_H=0.0):\n",
    "    \"\"\"\n",
    "    Calculates atomic bonds for a given set of coordinates and elements. \n",
    "    This is used for plotting atomic bonds in the functions plot_structure() and single_plot().\n",
    "    \"\"\"\n",
    "\n",
    "    num_atoms=len(elements)\n",
    "    connections=np.zeros((int(num_atoms*(num_atoms+1)/2.0),2),dtype='int32')\n",
    "    connection_count=0\n",
    "    for i in range(num_atoms):\n",
    "        for j in range(i,num_atoms):\n",
    "            if (i!=j):\n",
    "                coordinate_distance_x=coordinates[i,0]-coordinates[j,0]\n",
    "                coordinate_distance_y=coordinates[i,1]-coordinates[j,1]\n",
    "                coordinate_distance_z=coordinates[i,2]-coordinates[j,2]\n",
    "                coordinate_distance=np.sqrt(coordinate_distance_x*coordinate_distance_x+coordinate_distance_y*coordinate_distance_y+coordinate_distance_z*coordinate_distance_z)\n",
    "                if (elements[i]>1 and elements[j]>1):\n",
    "                    if (coordinate_distance<cut):\n",
    "                        connections[connection_count,0],connections[connection_count,1]=i,j\n",
    "                        connection_count+=1\n",
    "                elif (elements[i]==1 and elements[j]==1):\n",
    "                    if (coordinate_distance<cut_H_H):\n",
    "                        connections[connection_count,0],connections[connection_count,1]=i,j\n",
    "                        connection_count+=1\n",
    "                else:\n",
    "                    if (coordinate_distance<cut_H):\n",
    "                        connections[connection_count,0],connections[connection_count,1]=i,j\n",
    "                        connection_count+=1\n",
    "    connections=connections[:connection_count]\n",
    "    return connections\n",
    "\n",
    "\n",
    "\n",
    "def plot_structure(coordinates,elements,structure):\n",
    "    \"\"\"\n",
    "    Plots the electronic structure of a system. The electronic structure is hereby given as as real-space grid in three dimensions.\n",
    "    The coordinates and elements of the atoms of the system are also given and are plotted similar to the function single_plot() below.\n",
    "    The parametrization of the scatter plot - especially the point size - might need to be changed depending on the size of the system that is plotted.\n",
    "    \"\"\"\n",
    "\n",
    "    dir_1 = 0 \n",
    "    dir_2 = 1 \n",
    "    dir_3 = 2 \n",
    "\n",
    "    density_mean_value=0.3\n",
    "    density_variation=0.15\n",
    "\n",
    "    edge_cut=3.0\n",
    "    edge_cut_pixels=int(edge_cut/pixel_size)\n",
    "    structure=structure[edge_cut_pixels:-edge_cut_pixels,edge_cut_pixels:-edge_cut_pixels,edge_cut_pixels:-edge_cut_pixels]\n",
    "    \n",
    "    x_min=np.min(coordinates[:,dir_1])-additional_space+edge_cut\n",
    "    y_min=np.min(coordinates[:,dir_2])-additional_space+edge_cut\n",
    "    z_min=np.min(coordinates[:,dir_3])-additional_space+edge_cut\n",
    "    points_x=structure.shape[dir_1]\n",
    "    points_y=structure.shape[dir_2]\n",
    "    points_z=structure.shape[dir_3]\n",
    "    pixel_length=pixel_size\n",
    "    interpolation_num=1\n",
    "    offset_pixels=0\n",
    "\n",
    "    str_xyz = ['x [$a_0$]','y [$a_0$]','z [$a_0$]']\n",
    "    coord_str = [str_xyz[dir_1],str_xyz[dir_2],str_xyz[dir_3]]\n",
    "\n",
    "    x = np.indices(structure.shape)[dir_1]\n",
    "    y = np.indices(structure.shape)[dir_2]\n",
    "    z = np.indices(structure.shape)[dir_3]\n",
    "\n",
    "    fig = plt.figure(figsize=(20, 20))\n",
    "    ax3D = fig.add_subplot(projection='3d',computed_zorder=False)\n",
    "    points_size = 0.1*50000.0*pixel_size*pixel_size/(points_x*points_y*points_z)**(2.0/3.0)\n",
    "    ax3D.scatter(x,y,z,s=points_size,cmap='hot_r',c=np.where(np.abs(np.abs(structure)-density_mean_value)<density_variation,structure,None))\n",
    "    \n",
    "    atom_scaling=0.015\n",
    "    for atom in range(len(elements)):\n",
    "        ax = ((coordinates[atom,dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        ay = ((coordinates[atom,dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        az = ((coordinates[atom,dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        if (elements[atom]==1):\n",
    "            ax3D.scatter(ax,ay,az,s=200*atom_scaling,color='black',zorder=10) \n",
    "            ax3D.scatter(ax,ay,az,s=150*atom_scaling,color='white',zorder=10)    \n",
    "        if (elements[atom]==6):\n",
    "            ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='grey',zorder=10)   \n",
    "        if (elements[atom]==7):\n",
    "            ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='blue',zorder=10)      \n",
    "        if (elements[atom]==8):\n",
    "            ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='red',zorder=10)     \n",
    "        if (elements[atom]==16):\n",
    "            ax3D.scatter(ax,ay,az,s=600*atom_scaling,color='black',zorder=10) \n",
    "            ax3D.scatter(ax,ay,az,s=500*atom_scaling,color='yellow',zorder=10)    \n",
    "\n",
    "    connections_plot=calculate_connections(coordinates,elements)\n",
    "    \n",
    "    for line in range(len(connections_plot)):\n",
    "        plt.plot([((coordinates[connections_plot[line][0]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "                 [((coordinates[connections_plot[line][0]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              zs=[((coordinates[connections_plot[line][0]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              color='black',linewidth=1.0) \n",
    "        plt.plot([((coordinates[connections_plot[line][0]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "                 [((coordinates[connections_plot[line][0]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              zs=[((coordinates[connections_plot[line][0]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates[connections_plot[line][1]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              color='grey',linewidth=0.5) \n",
    "\n",
    "    tick_spacing=10\n",
    "    x_ticks=[]\n",
    "    x_ticks_pos=[]\n",
    "    for xt in range(int(np.floor(np.min(coordinates[:,dir_1]))),int(np.ceil(np.max(coordinates[:,dir_1]))),tick_spacing):\n",
    "        x_ticks.append(xt)\n",
    "        x_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(xt-int(np.floor(np.min(coordinates[:,dir_1]))))/pixel_length)*interpolation_num)\n",
    "    y_ticks=[]\n",
    "    y_ticks_pos=[]\n",
    "    for yt in range(int(np.floor(np.min(coordinates[:,dir_2]))),int(np.ceil(np.max(coordinates[:,dir_2]))),tick_spacing):\n",
    "        y_ticks.append(yt)\n",
    "        y_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(yt-int(np.floor(np.min(coordinates[:,dir_2]))))/pixel_length)*interpolation_num)\n",
    "    z_ticks=[]\n",
    "    z_ticks_pos=[]\n",
    "    for zt in range(int(np.floor(np.min(coordinates[:,dir_3]))),int(np.ceil(np.max(coordinates[:,dir_3]))),tick_spacing):\n",
    "        z_ticks.append(zt)\n",
    "        z_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(zt-int(np.floor(np.min(coordinates[:,dir_3]))))/pixel_length)*interpolation_num)\n",
    "\n",
    "    ax3D.set_aspect('equal')\n",
    "    fig.subplots_adjust(top=1.1)\n",
    "    ax3D.set_xlabel(coord_str[0],fontsize=16)\n",
    "    ax3D.set_ylabel(coord_str[1],fontsize=16)\n",
    "    ax3D.set_zlabel(coord_str[2],fontsize=16)\n",
    "    ax3D.set_xticks(x_ticks_pos,x_ticks,fontsize=10)\n",
    "    ax3D.set_yticks(y_ticks_pos,y_ticks,fontsize=10)\n",
    "    ax3D.set_zticks(z_ticks_pos,z_ticks,fontsize=10)\n",
    "    ax3D.view_init(0,0)\n",
    "    plt.show()\n",
    "    plt.close()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def single_plot(coordinates,elements):\n",
    "    \"\"\"\n",
    "    Plots a structure of atoms - the coordinates and elements are given.\n",
    "    Bonds are calculated and displayed as well as the atoms which are coloured with respect to their species.\n",
    "    \"\"\"\n",
    "\n",
    "    coordinates_step=coordinates\n",
    "\n",
    "    dir_1 = 0\n",
    "    dir_2 = 1\n",
    "    dir_3 = 2\n",
    "\n",
    "    edge_cut=0.0\n",
    "    \n",
    "    x_min=0\n",
    "    y_min=0\n",
    "    z_min=0\n",
    "    pixel_length=1\n",
    "    interpolation_num=1\n",
    "    offset_pixels=0\n",
    "\n",
    "    str_xyz = ['x [$a_0$]','y [$a_0$]','z [$a_0$]']\n",
    "    coord_str = [str_xyz[dir_1],str_xyz[dir_2],str_xyz[dir_3]]\n",
    "\n",
    "    fig = plt.figure(figsize=(15, 15))\n",
    "    ax3D = fig.add_subplot(projection='3d',computed_zorder=False) \n",
    "\n",
    "    atom_scaling=0.03\n",
    "    for atom in range(len(elements)):\n",
    "        ax = ((coordinates_step[atom,dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        ay = ((coordinates_step[atom,dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        az = ((coordinates_step[atom,dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num\n",
    "        if (elements[atom]==1):\n",
    "            p3d = ax3D.scatter(ax,ay,az,s=200*atom_scaling,color='black',zorder=10) \n",
    "            p3d = ax3D.scatter(ax,ay,az,s=150*atom_scaling,color='white',zorder=10)    \n",
    "        if (elements[atom]==6):\n",
    "            p3d = ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            p3d = ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='green',zorder=10)   \n",
    "        if (elements[atom]==7):\n",
    "            p3d = ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            p3d = ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='blue',zorder=10)      \n",
    "        if (elements[atom]==8):\n",
    "            p3d = ax3D.scatter(ax,ay,az,s=350*atom_scaling,color='black',zorder=10) \n",
    "            p3d = ax3D.scatter(ax,ay,az,s=270*atom_scaling,color='red',zorder=10)     \n",
    "        if (elements[atom]==16):\n",
    "            p3d = ax3D.scatter(ax,ay,az,s=600*atom_scaling,color='black',zorder=10) \n",
    "            p3d = ax3D.scatter(ax,ay,az,s=500*atom_scaling,color='yellow',zorder=10)    \n",
    "\n",
    "    connections_plot=calculate_connections(coordinates_step,elements)\n",
    "    \n",
    "    for line in range(len(connections_plot)):\n",
    "        plt.plot([((coordinates_step[connections_plot[line][0]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "                 [((coordinates_step[connections_plot[line][0]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              zs=[((coordinates_step[connections_plot[line][0]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              color='black',linewidth=1.0) \n",
    "        plt.plot([((coordinates_step[connections_plot[line][0]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_1]-x_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "                 [((coordinates_step[connections_plot[line][0]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_2]-y_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              zs=[((coordinates_step[connections_plot[line][0]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num,\n",
    "                  ((coordinates_step[connections_plot[line][1]][dir_3]-z_min)/pixel_length-offset_pixels)*interpolation_num],\n",
    "              color='grey',linewidth=0.5) \n",
    "\n",
    "    tick_spacing=2\n",
    "    x_ticks=[]\n",
    "    x_ticks_pos=[]\n",
    "    for xt in range(int(np.floor(np.min(coordinates_step[:,dir_1]))),int(np.ceil(np.max(coordinates_step[:,dir_1]))),tick_spacing):\n",
    "        x_ticks.append(xt)\n",
    "        x_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(xt-int(np.floor(np.min(coordinates_step[:,dir_1]))))/pixel_length)*interpolation_num)\n",
    "    y_ticks=[]\n",
    "    y_ticks_pos=[]\n",
    "    for yt in range(int(np.floor(np.min(coordinates_step[:,dir_2]))),int(np.ceil(np.max(coordinates_step[:,dir_2]))),tick_spacing):\n",
    "        y_ticks.append(yt)\n",
    "        y_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(yt-int(np.floor(np.min(coordinates_step[:,dir_2]))))/pixel_length)*interpolation_num)\n",
    "    z_ticks=[]\n",
    "    z_ticks_pos=[]\n",
    "    for zt in range(int(np.floor(np.min(coordinates_step[:,dir_3]))),int(np.ceil(np.max(coordinates_step[:,dir_3]))),tick_spacing):\n",
    "        z_ticks.append(zt)\n",
    "        z_ticks_pos.append(((additional_space-edge_cut)/pixel_length+(zt-int(np.floor(np.min(coordinates_step[:,dir_3]))))/pixel_length)*interpolation_num)\n",
    "\n",
    "    ax3D.set_aspect('equal')\n",
    "    fig.subplots_adjust(top=1.1)\n",
    "    ax3D.set_xlabel(coord_str[0],fontsize=16)\n",
    "    ax3D.set_ylabel(coord_str[1],fontsize=16)\n",
    "    ax3D.set_zlabel(coord_str[2],fontsize=16)\n",
    "    ax3D.set_xticks(x_ticks_pos,x_ticks,fontsize=10)\n",
    "    ax3D.set_yticks(y_ticks_pos,y_ticks,fontsize=10)\n",
    "    ax3D.set_zticks(z_ticks_pos,z_ticks,fontsize=10)\n",
    "    ax3D.set_xlim(np.min(coordinates[:,0])-4, np.max(coordinates[:,0])+4)\n",
    "    ax3D.set_ylim(np.min(coordinates[:,1])-4, np.max(coordinates[:,1])+4)\n",
    "    ax3D.set_zlim(np.min(coordinates[:,2])-4, np.max(coordinates[:,2])+4)\n",
    "    ax3D.view_init(0,0) \n",
    "\n",
    "    plt.show()\n",
    "    plt.close()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "===============================\n",
    "===============================\n",
    "DIVIDE-AND-CONQUER HARTREE-FOCK\n",
    "===============================\n",
    "===============================\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "-----------------------------------------------------\n",
    "Functions for k-means divide-and-conquer Hartree-Fock\n",
    "-----------------------------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "@njit\n",
    "def naive_neighbor_list(coordinates,cut_off,max_neighbors):\n",
    "    \"\"\"\n",
    "    A naive implementation of a neighbourhood algorithm.\n",
    "    \"\"\"\n",
    "    num_atoms=len(coordinates)\n",
    "    n_list=np.zeros((num_atoms,max_neighbors),dtype=datatype)\n",
    "    n_list_lengths=np.zeros(num_atoms,dtype='int32')\n",
    "    for i in range(num_atoms):\n",
    "        for j in range(num_atoms):\n",
    "            dist_x=coordinates[i,0]-coordinates[j,0]\n",
    "            if (dist_x>cut_off): continue\n",
    "            dist_y=coordinates[i,1]-coordinates[j,1]\n",
    "            if (dist_y>cut_off): continue\n",
    "            dist_z=coordinates[i,2]-coordinates[j,2]\n",
    "            if (dist_z>cut_off): continue\n",
    "            dist=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "            if (dist>cut_off): continue\n",
    "            n_list[i,n_list_lengths[i]]=j\n",
    "            n_list_lengths[i]+=1\n",
    "    if (verbosity==1):\n",
    "        print('Mean neigbors: ',np.mean(n_list_lengths))\n",
    "        print('Maximum neigbors: ',np.max(n_list_lengths))\n",
    "    n_list=n_list[:,:np.max(n_list_lengths)]\n",
    "    return n_list,n_list_lengths\n",
    "\n",
    "\n",
    "@njit\n",
    "def k_means_clustering(coordinates,n_list,n_list_lengths,num_clusters,max_optimization_steps=100):\n",
    "    \"\"\"\n",
    "    Calculates clusters for a divide-and-conquer Hartree-Fock calculation based on the k-means algorithm.\n",
    "    \"\"\"\n",
    "\n",
    "    if (len(coordinates)<num_clusters):\n",
    "        print('Error: More clusters than points.')\n",
    "        return None\n",
    "    \n",
    "    num_points=len(coordinates)\n",
    "    means_origins=np.zeros((num_clusters,3),dtype=datatype)\n",
    "    means_origins_new=np.zeros((num_clusters,3),dtype=datatype)\n",
    "    index_list=np.zeros(num_clusters,dtype='int32')-1\n",
    "    assignment_list=np.zeros(num_points,dtype='int32')-1\n",
    "    assignment_list_old=np.zeros(num_points,dtype='int32')-2\n",
    "\n",
    "    for i in range(num_clusters):\n",
    "        valid_index=False\n",
    "        while (not valid_index):\n",
    "            index=np.random.randint(num_points)\n",
    "            if (index not in index_list):\n",
    "                valid_index=True\n",
    "                index_list[i]=index\n",
    "                means_origins[i]=coordinates[index]\n",
    "    means_origins=coordinates[::int(len(coordinates)/num_clusters)]\n",
    "    \n",
    "    for i in range(max_optimization_steps):\n",
    "        cluster_counts=np.zeros(num_clusters,dtype='int32')\n",
    "\n",
    "        for p in range(num_points):\n",
    "            min_distance=1.0e100\n",
    "            current_cluster=-1\n",
    "            for c in range(num_clusters):\n",
    "                distance_x=coordinates[p,0]-means_origins[c,0]\n",
    "                distance_y=coordinates[p,1]-means_origins[c,1]\n",
    "                distance_z=coordinates[p,2]-means_origins[c,2]\n",
    "                distance=np.sqrt(distance_x*distance_x+distance_y*distance_y+distance_z*distance_z)\n",
    "                if (distance<min_distance):\n",
    "                    current_cluster=c\n",
    "                    min_distance=distance\n",
    "            assignment_list[p]=current_cluster\n",
    "            cluster_counts[current_cluster]+=1\n",
    "        \n",
    "        for p in range(num_points):\n",
    "            current_cluster=assignment_list[p]\n",
    "            current_coordinates=coordinates[p]\n",
    "            means_origins_new[current_cluster,0]+=current_coordinates[0]\n",
    "            means_origins_new[current_cluster,1]+=current_coordinates[1]\n",
    "            means_origins_new[current_cluster,2]+=current_coordinates[2]\n",
    "        for c in range(num_clusters):\n",
    "            means_origins_new[c]/=cluster_counts[c]\n",
    "\n",
    "        means_origins=means_origins_new\n",
    "        if (np.array_equal(assignment_list_old,assignment_list)):\n",
    "            break\n",
    "        assignment_list_old=np.copy(assignment_list)\n",
    "\n",
    "    indices_for_clusters=np.zeros((num_clusters,np.max(cluster_counts)),dtype='int32')\n",
    "    indices_for_overlapping_clusters=np.zeros((num_clusters,10*np.max(cluster_counts)),dtype='int32')-1\n",
    "    temp_cluster_index_list=np.zeros(num_clusters,dtype='int32')\n",
    "    for i in range(num_points):\n",
    "        current_cluster=assignment_list[i]\n",
    "        indices_for_clusters[current_cluster,temp_cluster_index_list[current_cluster]]=i\n",
    "        indices_for_overlapping_clusters[current_cluster,temp_cluster_index_list[current_cluster]]=i\n",
    "        temp_cluster_index_list[current_cluster]+=1\n",
    "    \n",
    "    \n",
    "    for i in range(num_points):\n",
    "        current_cluster=assignment_list[i]\n",
    "        for j in range(n_list_lengths[i]):\n",
    "            current_n_atom=n_list[i,j]\n",
    "            if (current_n_atom not in indices_for_overlapping_clusters[current_cluster]):\n",
    "                indices_for_overlapping_clusters[current_cluster,temp_cluster_index_list[current_cluster]]=current_n_atom\n",
    "                temp_cluster_index_list[current_cluster]+=1\n",
    "\n",
    "    if (verbosity==1):\n",
    "        print('Cluster sizes')\n",
    "        print(np.sort(temp_cluster_index_list))   \n",
    "        print('Mean cluster size: ',np.mean(temp_cluster_index_list))         \n",
    "\n",
    "    return means_origins,assignment_list,indices_for_overlapping_clusters,indices_for_clusters\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "--------------------------------------------------\n",
    "Functions for grid divide-and-conquer Hartree-Fock\n",
    "--------------------------------------------------\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "def calculate_parts_center_coordinates(coordinates,partition_length,partition_cut_off):\n",
    "    \"\"\"\n",
    "    Calculates the central coordinates of the subsystems of a divide-and-conquer Hartree-Fock calculation.\n",
    "    \"\"\"\n",
    "\n",
    "    account_for_surface=0.0\n",
    "    x_min,x_max=np.min(coordinates[:,0])+partition_cut_off*account_for_surface,np.max(coordinates[:,0])-partition_cut_off*account_for_surface\n",
    "    y_min,y_max=np.min(coordinates[:,1])+partition_cut_off*account_for_surface,np.max(coordinates[:,1])-partition_cut_off*account_for_surface\n",
    "    z_min,z_max=np.min(coordinates[:,2])+partition_cut_off*account_for_surface,np.max(coordinates[:,2])-partition_cut_off*account_for_surface\n",
    "    x_length,y_length,z_length=x_max-x_min,y_max-y_min,z_max-z_min\n",
    "    x_boxes,y_boxes,z_boxes=int(np.ceil(x_length/partition_length)),int(np.ceil(y_length/partition_length)),int(np.ceil(z_length/partition_length))\n",
    "    num_boxes=x_boxes*y_boxes*z_boxes\n",
    "    print('Partitioning dimensions (x/y/z/total):',x_boxes,y_boxes,z_boxes,num_boxes)\n",
    "    x_space=0.5*(np.linspace(x_min,x_min+(x_boxes-1)*partition_length,x_boxes)+np.linspace(x_max-(x_boxes-1)*partition_length,x_max,x_boxes))\n",
    "    y_space=0.5*(np.linspace(y_min,y_min+(y_boxes-1)*partition_length,y_boxes)+np.linspace(y_max-(y_boxes-1)*partition_length,y_max,y_boxes))\n",
    "    z_space=0.5*(np.linspace(z_min,z_min+(z_boxes-1)*partition_length,z_boxes)+np.linspace(z_max-(z_boxes-1)*partition_length,z_max,z_boxes))\n",
    "    xyz=np.meshgrid(x_space,y_space,z_space,indexing='ij')\n",
    "    xyz=np.reshape(xyz,(3,num_boxes)).T\n",
    "\n",
    "    return xyz\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def select_relevant_atoms(coordinates,elements,center_coordinate,cut_off):\n",
    "    \"\"\"\n",
    "    Selects atoms which are within a cubical (!) cut-off cut_off of a central coordinate center_coordinate.\n",
    "    \"\"\"\n",
    "\n",
    "    num_atoms_total=len(coordinates)\n",
    "    relevant_atoms=np.zeros(20000,dtype='int32')\n",
    "    relevant_atom_count=0\n",
    "    center_coordinate_x,center_coordinate_y,center_coordinate_z=center_coordinate[0],center_coordinate[1],center_coordinate[2]\n",
    "\n",
    "    for i in range(num_atoms_total):\n",
    "        current_coordinates=coordinates[i]\n",
    "        dist_x=current_coordinates[0]-center_coordinate_x\n",
    "        if (np.abs(dist_x)>cut_off): continue\n",
    "        dist_y=current_coordinates[1]-center_coordinate_y\n",
    "        if (np.abs(dist_y)>cut_off): continue\n",
    "        dist_z=current_coordinates[2]-center_coordinate_z\n",
    "        if (np.abs(dist_z)>cut_off): continue\n",
    "        relevant_atoms[relevant_atom_count]=i\n",
    "        relevant_atom_count+=1\n",
    "    \n",
    "    relevant_atoms=relevant_atoms[:relevant_atom_count]\n",
    "    relevant_coordinates=coordinates[relevant_atoms]\n",
    "    relevant_elements=elements[relevant_atoms]\n",
    "\n",
    "    return relevant_atoms,relevant_coordinates,relevant_elements\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def partition_naive_neighbor_list(partition_atoms,partition_coordinates,padded_partition_coordinates,padded_partition_elements,cut_off,\n",
    "                                  parts_center_coordinates,partition_length,partition_num,max_neighbors=10000):\n",
    "    \"\"\"\n",
    "    Calculates the neighbouring atoms of a given selection of atoms. The neighbouring atoms are taken from another selection of atoms which\n",
    "    contains more atoms than the first selection of atoms.\n",
    "    \"\"\"\n",
    "\n",
    "    num_atoms_partition=len(partition_coordinates)\n",
    "    num_atoms_section=len(padded_partition_coordinates)\n",
    "\n",
    "    n_list=np.zeros((num_atoms_partition*max_neighbors),dtype='int32')\n",
    "    n_list_length=0\n",
    "    for i in range(num_atoms_partition):\n",
    "        for j in range(num_atoms_section):\n",
    "            coordinates_i=partition_coordinates[i]\n",
    "            coordinates_j=padded_partition_coordinates[j]\n",
    "            dist_x=coordinates_i[0]-coordinates_j[0]\n",
    "            if (np.abs(dist_x)>cut_off): continue\n",
    "            dist_y=coordinates_i[1]-coordinates_j[1]\n",
    "            if (np.abs(dist_y)>cut_off): continue\n",
    "            dist_z=coordinates_i[2]-coordinates_j[2]\n",
    "            if (np.abs(dist_z)>cut_off): continue\n",
    "            dist=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "            if (dist>cut_off): continue\n",
    "            n_list[n_list_length]=j\n",
    "            n_list_length+=1\n",
    "    \n",
    "    partition_center_coordinates=np.copy(parts_center_coordinates[partition_num])\n",
    "    parts_center_coordinates[partition_num]=np.array([1.0e10,1.0e10,1.0e10])\n",
    "    mask=np.where(np.sqrt((parts_center_coordinates[:,0]-partition_center_coordinates[0])**2\n",
    "                         +(parts_center_coordinates[:,1]-partition_center_coordinates[1])**2\n",
    "                         +(parts_center_coordinates[:,2]-partition_center_coordinates[2])**2)<2*partition_length)[0]\n",
    "    relevant_center_coordinates=parts_center_coordinates[mask]\n",
    "    num_relevant_center_coordinates=len(relevant_center_coordinates)\n",
    "    add_to_unique=np.zeros(num_atoms_section,dtype='int32')\n",
    "    added_atoms=0\n",
    "    for i in range(num_atoms_section):\n",
    "        coordinates_i=padded_partition_coordinates[i]\n",
    "        current_partition_center_difference=np.sqrt((coordinates_i[0]-partition_center_coordinates[0])**2\n",
    "                                                   +(coordinates_i[1]-partition_center_coordinates[1])**2\n",
    "                                                   +(coordinates_i[2]-partition_center_coordinates[2])**2)\n",
    "        \n",
    "        add_section_atom=True\n",
    "        for j in range(num_relevant_center_coordinates):\n",
    "            coordinates_j=relevant_center_coordinates[j]\n",
    "            dist_x=coordinates_i[0]-coordinates_j[0]\n",
    "            dist_y=coordinates_i[1]-coordinates_j[1]\n",
    "            dist_z=coordinates_i[2]-coordinates_j[2]\n",
    "            other_partition_center_difference=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "            if (other_partition_center_difference<current_partition_center_difference):\n",
    "                add_section_atom=False\n",
    "\n",
    "        if (add_section_atom):\n",
    "            n_list[n_list_length]=i\n",
    "            n_list_length+=1\n",
    "            add_to_unique[added_atoms]=i\n",
    "            added_atoms+=1\n",
    "    parts_center_coordinates[partition_num]=partition_center_coordinates\n",
    "    add_to_unique=add_to_unique[:added_atoms]\n",
    "    len_partition_atoms=len(partition_atoms)\n",
    "    partition_unique=np.zeros(added_atoms+len_partition_atoms,dtype=datatype)\n",
    "    partition_unique[:len_partition_atoms]=partition_atoms\n",
    "    partition_unique[len_partition_atoms:]=add_to_unique\n",
    "    partition_unique=np.unique(partition_unique)\n",
    "    \n",
    "    n_list=n_list[:n_list_length]\n",
    "    padded_partition_atoms=np.unique(n_list)\n",
    "    padded_partition_coordinates=padded_partition_coordinates[padded_partition_atoms]\n",
    "    padded_partition_elements=padded_partition_elements[padded_partition_atoms]\n",
    "    return padded_partition_atoms,padded_partition_coordinates,padded_partition_elements,partition_unique\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_bonds(coordinates,elements,cut=4.0,cut_H=2.6):\n",
    "    \"\"\"\n",
    "    Calculates the bonds of an atomic structure.\n",
    "    \"\"\"\n",
    "\n",
    "    num_atoms=len(coordinates)\n",
    "    max_bonds=12\n",
    "    bonds=np.zeros((num_atoms,max_bonds),dtype='int32')-1\n",
    "    bond_types=np.zeros((num_atoms,max_bonds),dtype='int32')\n",
    "    bond_distances=np.zeros((num_atoms,max_bonds),dtype=datatype)\n",
    "    element_distances=np.zeros((num_atoms,max_bonds,19,19),dtype=datatype)\n",
    "    bond_nums=np.zeros(num_atoms,dtype='int32')\n",
    "    for i in range(num_atoms):\n",
    "        for j in range(i,num_atoms):\n",
    "            if (i!=j):\n",
    "                coordinate_distance_x=coordinates[i,0]-coordinates[j,0]\n",
    "                if (np.abs(coordinate_distance_x)>cut): continue\n",
    "                coordinate_distance_y=coordinates[i,1]-coordinates[j,1]\n",
    "                if (np.abs(coordinate_distance_y)>cut): continue\n",
    "                coordinate_distance_z=coordinates[i,2]-coordinates[j,2]\n",
    "                if (np.abs(coordinate_distance_z)>cut): continue\n",
    "                coordinate_distance=np.sqrt(coordinate_distance_x*coordinate_distance_x+coordinate_distance_y*coordinate_distance_y+coordinate_distance_z*coordinate_distance_z)\n",
    "                if (coordinate_distance>cut): continue\n",
    "                element_i,element_j=elements[i],elements[j]\n",
    "                if (element_i>1 and element_j>1): \n",
    "                    bonds[i,bond_nums[i]]=j\n",
    "                    bond_types[i,bond_nums[i]]=1\n",
    "                    bond_distances[i,bond_nums[i]]=coordinate_distance\n",
    "                    element_distances[i,bond_nums[i],element_i,element_j]=coordinate_distance\n",
    "                    bond_nums[i]+=1\n",
    "                    bonds[j,bond_nums[j]]=i\n",
    "                    bond_types[j,bond_nums[j]]=1\n",
    "                    bond_distances[j,bond_nums[j]]=coordinate_distance\n",
    "                    element_distances[j,bond_nums[j],element_j,element_i]=coordinate_distance\n",
    "                    bond_nums[j]+=1\n",
    "                elif ((element_i==1 and element_j>1) or (element_i>1 and element_j==1)):\n",
    "                    if (coordinate_distance<cut_H):\n",
    "                        bonds[i,bond_nums[i]]=j\n",
    "                        bond_types[i,bond_nums[i]]=2\n",
    "                        bond_distances[i,bond_nums[i]]=coordinate_distance\n",
    "                        element_distances[i,bond_nums[i],element_i,element_j]=coordinate_distance\n",
    "                        bond_nums[i]+=1\n",
    "                        bonds[j,bond_nums[j]]=i\n",
    "                        bond_types[j,bond_nums[j]]=2\n",
    "                        bond_distances[j,bond_nums[j]]=coordinate_distance\n",
    "                        element_distances[j,bond_nums[j],element_j,element_i]=coordinate_distance\n",
    "                        bond_nums[j]+=1\n",
    "\n",
    "    return bonds,bond_types,bond_distances,bond_nums,element_distances\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def avoid_bond_cutting(selection,total_coordinates,total_elements,bonds,bond_nums):\n",
    "    \"\"\"\n",
    "    Function that avoids the cutting of atomic bonds during the selection of subsystems for a large-scale divide-and-conquer Hartree-Fock calculation.\n",
    "    This is done by adding atoms to the system to avoid cutting double bonds and terminating the cutting of singly bonds by adding Hydrogen atoms instead \n",
    "    which are placed at a distance corresponding to the usual bond length of the atomic species to hydrogen.\n",
    "    Bonds with hydrogen atoms are never cut.\n",
    "    The algorithm iterates over the subsystem multiple times and adds the necessary atoms in each iteration. The process is stopped if no new atoms got added.\n",
    "    \"\"\"\n",
    "\n",
    "    max_iterations=10\n",
    "    num_atoms_selection=len(selection)\n",
    "    current_atoms=num_atoms_selection\n",
    "    new_length=max(2*num_atoms_selection,num_atoms_selection+200)\n",
    "    coordinates_new=np.zeros((new_length,3),dtype=datatype)\n",
    "    coordinates_new[:num_atoms_selection]=total_coordinates[selection]\n",
    "    elements_new=np.zeros(new_length,dtype='int32')\n",
    "    elements_new[:num_atoms_selection]=total_elements[selection]\n",
    "    selection_new=np.zeros(new_length,dtype='int32')\n",
    "    selection_new[:num_atoms_selection]=selection\n",
    "    h_replacements=np.zeros(new_length,dtype='int32')-1\n",
    "\n",
    "    no_bonds_cut=False\n",
    "\n",
    "    start=0\n",
    "    for iteration in range(max_iterations):\n",
    "        last_iteration=False\n",
    "        if (iteration==max_iterations-1):\n",
    "            last_iteration=True\n",
    "        if (no_bonds_cut): \n",
    "            break\n",
    "\n",
    "        no_new_bonds=True\n",
    "        start_new=current_atoms\n",
    "        end=current_atoms\n",
    "        for i in range(start,end):\n",
    "            atom_i=selection_new[i]\n",
    "            if (atom_i==-1): \n",
    "                continue\n",
    "            element_i=total_elements[atom_i]\n",
    "            coordinates_i=total_coordinates[atom_i]\n",
    "\n",
    "            for j in range(bond_nums[atom_i]):\n",
    "                atom_j=bonds[atom_i,j]\n",
    "                if (atom_j not in selection_new):\n",
    "                    if (atom_j in h_replacements):\n",
    "                        change_h_modelling=True\n",
    "                    else: change_h_modelling=False\n",
    "\n",
    "                    keep_bond=True\n",
    "                    element_j=total_elements[atom_j]\n",
    "                    if (element_i==6 and (element_j==6 or element_j==7 or element_j==8)): \n",
    "                        coordinates_j=total_coordinates[atom_j]\n",
    "                        dist_x=coordinates_i[0]-coordinates_j[0]\n",
    "                        dist_y=coordinates_i[1]-coordinates_j[1]\n",
    "                        dist_z=coordinates_i[2]-coordinates_j[2]\n",
    "                        dist=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "                        if (dist>element_cuts[element_i,element_j]):\n",
    "                            keep_bond=False\n",
    "                    \n",
    "                    if (last_iteration and not (element_i==1 or element_j==1)):\n",
    "                        keep_bond=False\n",
    "\n",
    "                    if (not change_h_modelling):\n",
    "                        if (keep_bond):\n",
    "                            selection_new[current_atoms]=atom_j\n",
    "                            coordinates_new[current_atoms]=total_coordinates[atom_j]\n",
    "                            elements_new[current_atoms]=total_elements[atom_j]\n",
    "                            current_atoms+=1\n",
    "                            no_new_bonds=False\n",
    "                        else:\n",
    "                            coordinates_j=total_coordinates[atom_j]\n",
    "                            dist_x=coordinates_i[0]-coordinates_j[0]\n",
    "                            dist_y=coordinates_i[1]-coordinates_j[1]\n",
    "                            dist_z=coordinates_i[2]-coordinates_j[2]\n",
    "                            dist=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "                            new_dist=element_h_bond_distances[element_i]/dist\n",
    "                            dist_x*=new_dist\n",
    "                            dist_y*=new_dist\n",
    "                            dist_z*=new_dist\n",
    "\n",
    "                            h_replacements[current_atoms]=atom_j\n",
    "                            selection_new[current_atoms]=-1\n",
    "                            coordinates_new[current_atoms,0]=coordinates_i[0]-dist_x\n",
    "                            coordinates_new[current_atoms,1]=coordinates_i[1]-dist_y\n",
    "                            coordinates_new[current_atoms,2]=coordinates_i[2]-dist_z\n",
    "                            elements_new[current_atoms]=1\n",
    "                            current_atoms+=1\n",
    "                    else:\n",
    "                        replace_h=np.where(h_replacements==atom_j)[0][0]\n",
    "                        selection_new[replace_h]=-2\n",
    "                        h_replacements[replace_h]=-1\n",
    "                        selection_new[current_atoms]=atom_j\n",
    "                        coordinates_new[current_atoms]=total_coordinates[atom_j]\n",
    "                        elements_new[current_atoms]=total_elements[atom_j]\n",
    "                        current_atoms+=1\n",
    "                        no_new_bonds=False\n",
    "\n",
    "        if (no_new_bonds): \n",
    "            break\n",
    "        start=start_new\n",
    "    \n",
    "    selection_new=selection_new[:current_atoms]\n",
    "    coordinates_new=coordinates_new[:current_atoms]\n",
    "    elements_new=elements_new[:current_atoms]\n",
    "    mask=np.where(selection_new>=-1)[0]\n",
    "    selection_new=selection_new[mask]\n",
    "    coordinates_new=coordinates_new[mask]\n",
    "    elements_new=elements_new[mask]\n",
    "\n",
    "    return selection_new,coordinates_new,elements_new\n",
    "\n",
    "\n",
    "\n",
    "def initialize_divide_conquer_HF(total_coordinates,total_elements,partition_num):\n",
    "    \"\"\"\n",
    "    Initializes a divide-and-conquer Hartree-Fock run. This is done by calling all of the relevant preprocessing functions (see the functions above this function).\n",
    "    This serves the purpose of computing the atoms which are included in the subsystem Hartree-Fock run.\n",
    "    \"\"\"\n",
    "    \n",
    "    parts_center_coordinates=calculate_parts_center_coordinates(total_coordinates,partition_length,partition_cut_off)\n",
    "    \n",
    "    center_coordinate=parts_center_coordinates[partition_num]\n",
    "    \n",
    "    _,section_coordinates,section_elements=select_relevant_atoms(total_coordinates,total_elements,center_coordinate,0.5*partition_length+partition_cut_off+section_cut_off)\n",
    "    \n",
    "    partition_atoms,partition_coordinates,_=select_relevant_atoms(section_coordinates,section_elements,center_coordinate,0.5*partition_length)\n",
    "    \n",
    "    padded_partition_atoms,_,_,partition_unique\\\n",
    "        =partition_naive_neighbor_list(partition_atoms,partition_coordinates,section_coordinates,section_elements,partition_cut_off,parts_center_coordinates,partition_length,partition_num)\n",
    "    \n",
    "    bonds,_,_,bond_nums,_=calculate_bonds(section_coordinates,section_elements,cut=4.0,cut_H=2.6)\n",
    "    atoms_final,coordinates_final,elements_final=avoid_bond_cutting(padded_partition_atoms,section_coordinates,section_elements,bonds,bond_nums)\n",
    "    partition_atoms_unique=partition_unique\n",
    "    print('Number of atoms in this subsystem:',len(atoms_final))\n",
    "    \n",
    "    print('Unique atoms in this subsystem:',len(partition_atoms_unique))\n",
    "    \n",
    "    return coordinates_final,elements_final,atoms_final,partition_atoms_unique,center_coordinate\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_intersection(box_center,point_inside,point_outside,box_length=partition_length):\n",
    "    \"\"\"\n",
    "    Calculate the intersection point of the line connecting two points with the surface of a cubical box.\n",
    "    \"\"\"\n",
    "\n",
    "    length_one_half=0.5*box_length\n",
    "    direction=point_outside-point_inside\n",
    "    direction=direction/np.linalg.norm(direction)\n",
    "\n",
    "    min_bounds=box_center-length_one_half\n",
    "    max_bounds=box_center+length_one_half\n",
    "\n",
    "    for i in range(3):  \n",
    "        border_array=np.array([min_bounds[i],max_bounds[i]])\n",
    "        for j in range(2):\n",
    "            current_border=border_array[j]\n",
    "            if (direction[i]!=0):  \n",
    "                t=(current_border-point_inside[i])/direction[i]\n",
    "                if (t>0):  \n",
    "                    possible_intersection=point_inside+t*direction\n",
    "                    correct_intersection=True\n",
    "                    for j in range(3):\n",
    "                        if (j!=i):\n",
    "                            if (not (min_bounds[j]<=possible_intersection[j]<=max_bounds[j])):\n",
    "                                correct_intersection=False\n",
    "                    if (correct_intersection):\n",
    "                        return possible_intersection\n",
    "\n",
    "    return None  \n",
    "\n",
    "\n",
    "\n",
    "def run_HF_for_partition(coordinates,elements,partition_indices,partition_unique_indices,partition_num,center_coordinate,saving_path='SavedPartitioningResults'):\n",
    "    \"\"\"\n",
    "    The main function for a single Hartree-Fock run in a divide-and-conquer Hartree-Fock calculation.\n",
    "    \"\"\"\n",
    "\n",
    "    print('----------------')\n",
    "    print(' SUBSYSTEM '+str(partition_num))\n",
    "    print('----------------')\n",
    "\n",
    "    print(len(np.where(elements==1)[0]),'H')\n",
    "    print(len(np.where(elements==6)[0]),'C')\n",
    "    print(len(np.where(elements==7)[0]),'N')\n",
    "    print(len(np.where(elements==8)[0]),'O')\n",
    "    print(len(np.where(elements==16)[0]),'S')\n",
    "    print()\n",
    "\n",
    "    box_center=center_coordinate\n",
    "    \n",
    "    partition_coordinates=coordinates\n",
    "    partition_elements=elements\n",
    "    \n",
    "    pulay_mixing_rate=0.7  \n",
    "\n",
    "    if (len(np.where(elements==1)[0])+len(np.where(elements==8)[0])>=0.9*len(elements)):\n",
    "        pulay_mixing_rate=0.8\n",
    "    \n",
    "    if (len(np.where(elements==1)[0])+len(np.where(elements==8)[0])==len(elements)):\n",
    "        pulay_mixing_rate=1.0\n",
    "\n",
    "    _,P,_,_,eigenenergies,_,_,_,ij_list_no_duplicates,atom_of_basisfunction,_,_,_=\\\n",
    "                    run_HF(partition_coordinates,partition_elements,partitioning=True,pulay_mixing_rate=pulay_mixing_rate)\n",
    "    \n",
    "    relevant_densities=len(ij_list_no_duplicates)\n",
    "    relevant_densities_no_overlap=np.zeros((relevant_densities,2),dtype='int32')\n",
    "    relevant_densities_no_overlap_values=np.zeros(relevant_densities,dtype=datatype)\n",
    "    no_overlap_count=0\n",
    "    for d in range(relevant_densities):\n",
    "        i,j=ij_list_no_duplicates[d]\n",
    "        atom_i,atom_j=atom_of_basisfunction[i],atom_of_basisfunction[j]\n",
    "        system_atom_i,system_atom_j=partition_indices[atom_i],partition_indices[atom_j]\n",
    "        i_included,j_included=False,False\n",
    "        if (system_atom_i in partition_unique_indices): i_included=True\n",
    "        if (system_atom_j in partition_unique_indices): j_included=True\n",
    "        \n",
    "        if (i_included and j_included):\n",
    "            relevant_densities_no_overlap[no_overlap_count,0],relevant_densities_no_overlap[no_overlap_count,1]=i,j\n",
    "            relevant_densities_no_overlap_values[no_overlap_count]=P[i,j]\n",
    "            no_overlap_count+=1\n",
    "        elif ((i_included and not j_included) or (not i_included and j_included)):\n",
    "            relevant_densities_no_overlap[no_overlap_count,0],relevant_densities_no_overlap[no_overlap_count,1]=i,j\n",
    "            if (i_included):\n",
    "                point_inside=partition_coordinates[atom_i]\n",
    "                point_outside=partition_coordinates[atom_j]\n",
    "            else:\n",
    "                point_inside=partition_coordinates[atom_j]\n",
    "                point_outside=partition_coordinates[atom_i]\n",
    "            intersection_point=calculate_intersection(box_center,point_inside,point_outside)\n",
    "            if (intersection_point is not None):\n",
    "                total_distance=np.linalg.norm(point_outside-point_inside)\n",
    "                distance_in_partition=np.linalg.norm(intersection_point-point_inside)\n",
    "                distance_fraction=distance_in_partition/total_distance\n",
    "                relevance_fraction=-2*distance_fraction**3+3*distance_fraction**2\n",
    "            else:\n",
    "                relevance_fraction=0.0\n",
    "            \n",
    "            relevant_densities_no_overlap_values[no_overlap_count]=relevance_fraction*P[i,j]\n",
    "            no_overlap_count+=1\n",
    "        \n",
    "    relevant_densities_no_overlap=relevant_densities_no_overlap[:no_overlap_count]\n",
    "    relevant_densities_no_overlap_values=relevant_densities_no_overlap_values[:no_overlap_count]\n",
    "    \n",
    "    np.save(saving_path+'/Energies/'+str(partition_num)+'_eigenenergies.npy',eigenenergies.astype('float32'))\n",
    "    np.save(saving_path+'/Densities/'+str(partition_num)+'_relevant_densities.npy',relevant_densities_no_overlap_values.astype('float32'))\n",
    "\n",
    "    print()\n",
    "    print()\n",
    "\n",
    "\n",
    "\n",
    "def array_number_to_partition_numbers(num_jobs,num_nodes,current_node,job_num_start=0,job_num_stop=-1):\n",
    "    \"\"\"\n",
    "    Calculates which subsystems are processed by a given node.\n",
    "    For example for 10 nodes and 100 subsystems the distributin would look like\n",
    "    Node 1:  0,10,20,...,90\n",
    "    Node 2:  1,11,21,...,91\n",
    "    ...\n",
    "    Node 10: 9,19,29,...,99\n",
    "    This splitting guarantees relatively similar runtimes for each node which might not be the case if the first node computes the subsystems 0-9\n",
    "    because subsystems at the edges are often faster to compute (as they contain more water).\n",
    "    \"\"\"\n",
    "\n",
    "    if (job_num_stop==-1):\n",
    "        job_num_stop=num_jobs\n",
    "    num_jobs=job_num_stop-job_num_start\n",
    "    job_array=np.arange(num_jobs)\n",
    "    jobs_per_node=int(np.ceil(num_jobs/num_nodes))\n",
    "    job_arrays_for_nodes=np.zeros(jobs_per_node*num_nodes,dtype='int32')-1\n",
    "    job_arrays_for_nodes[:num_jobs]=job_array\n",
    "    job_arrays_for_nodes=job_arrays_for_nodes.reshape(jobs_per_node,num_nodes).T\n",
    "    \n",
    "    jobs_to_current_node=job_arrays_for_nodes[current_node]\n",
    "    mask=np.where(jobs_to_current_node>=0)\n",
    "    jobs_to_current_node=jobs_to_current_node[mask]\n",
    "    \n",
    "    return jobs_to_current_node \n",
    "\n",
    "\n",
    "\n",
    "def initialize_and_run_HF(total_coordinates,total_elements,saving_path,num_jobs,num_nodes,job_array_number):\n",
    "    \"\"\"\n",
    "    The main function which is called for a large-scale divide-and-conquer Hartree-Fock calculation.\n",
    "    \"\"\"\n",
    "\n",
    "    partition_numbers_array=array_number_to_partition_numbers(num_jobs,num_nodes,current_node=job_array_number)\n",
    "    print()\n",
    "    print('LOG NUMBER '+str(job_array_number))\n",
    "    print('This node runs a Hartree-Fock calculation for the following partition numbers: ')\n",
    "    print(partition_numbers_array)\n",
    "    print()\n",
    "    print()\n",
    "    \n",
    "    for p_num in range(len(partition_numbers_array)):\n",
    "        partition_num=partition_numbers_array[p_num]\n",
    "        coordinates_partition,elements_partition,atoms_partition,partition_atoms_unique,center_coordinate=initialize_divide_conquer_HF(total_coordinates,total_elements,partition_num)\n",
    "        run_HF_for_partition(coordinates_partition,elements_partition,atoms_partition,partition_atoms_unique,partition_num,center_coordinate,saving_path=saving_path)\n",
    "        \n",
    "    return None\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "========================\n",
    "========================\n",
    "DENSITY GRID COMPUTATION\n",
    "========================\n",
    "========================\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def calculate_grid(coordinates):\n",
    "    \"\"\"\n",
    "    Calculates the grid for the calculation of the electronic density on a 3D real-space grid.\n",
    "    The np.meshgrid() function is used for this.\n",
    "    \"\"\"\n",
    "\n",
    "    x_min,x_max=np.min(coordinates[:,0])-additional_space,np.max(coordinates[:,0])+additional_space \n",
    "    y_min,y_max=np.min(coordinates[:,1])-additional_space,np.max(coordinates[:,1])+additional_space \n",
    "    z_min,z_max=np.min(coordinates[:,2])-additional_space,np.max(coordinates[:,2])+additional_space \n",
    "    x_pixels=int(np.floor((x_max-x_min)/pixel_size+1))\n",
    "    y_pixels=int(np.floor((y_max-y_min)/pixel_size+1))\n",
    "    z_pixels=int(np.floor((z_max-z_min)/pixel_size+1))\n",
    "    x_max=x_min+(x_pixels-1)*pixel_size\n",
    "    y_max=y_min+(y_pixels-1)*pixel_size\n",
    "    z_max=z_min+(z_pixels-1)*pixel_size\n",
    "    x,y,z=np.linspace(x_min,x_max,x_pixels,dtype=single),np.linspace(y_min,y_max,y_pixels,dtype=single),np.linspace(z_min,z_max,z_pixels,dtype=single)\n",
    "    x,y,z=np.meshgrid(x,y,z,indexing='ij')\n",
    "\n",
    "    return x,y,z,x_min,y_min,z_min\n",
    "\n",
    "\n",
    "@njit\n",
    "def wave_function_tables(x,y,z,x_min,y_min,z_min,gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,coordinates,\n",
    "                         gaussian_functions_index_list,atom_of_basisfunction,num_basis_functions):\n",
    "    \"\"\"\n",
    "    Precomputes the wave functions needed for calculation the density of a Hartree-Fock calculation on a real-space grid.\n",
    "    These wave functions are precomputed since they are used many times during the calculation of the individual electronic densities.\n",
    "    \"\"\"\n",
    "\n",
    "    wave_function_grid_length=int((2.0*basis_function_space)/pixel_size+1)\n",
    "    wave_function_grids=np.zeros((num_basis_functions,wave_function_grid_length,wave_function_grid_length,wave_function_grid_length),dtype=single)\n",
    "    wave_function_points=np.zeros((num_basis_functions,3),dtype='int32')\n",
    "\n",
    "    for i in range(num_basis_functions):\n",
    "        center=coordinates[atom_of_basisfunction[i]]\n",
    "        x_min_i=center[0]-basis_function_space\n",
    "        y_min_i=center[1]-basis_function_space\n",
    "        z_min_i=center[2]-basis_function_space\n",
    "        x_point=int((x_min_i-x_min)/pixel_size)\n",
    "        y_point=int((y_min_i-y_min)/pixel_size)\n",
    "        z_point=int((z_min_i-z_min)/pixel_size)\n",
    "        x_wave,y_wave,z_wave=x[x_point:x_point+wave_function_grid_length,y_point:y_point+wave_function_grid_length,z_point:z_point+wave_function_grid_length],\\\n",
    "                             y[x_point:x_point+wave_function_grid_length,y_point:y_point+wave_function_grid_length,z_point:z_point+wave_function_grid_length],\\\n",
    "                             z[x_point:x_point+wave_function_grid_length,y_point:y_point+wave_function_grid_length,z_point:z_point+wave_function_grid_length]\n",
    "        wave_function_points[i,0],wave_function_points[i,1],wave_function_points[i,2]=x_point,y_point,z_point\n",
    "        for gi in range(gaussian_functions_index_list[i],gaussian_functions_index_list[i+1]):\n",
    "            x_wave_new=x_wave-gaussian_functions_coordinates[gi,0]\n",
    "            y_wave_new=y_wave-gaussian_functions_coordinates[gi,1]\n",
    "            z_wave_new=z_wave-gaussian_functions_coordinates[gi,2]\n",
    "            wave_function_grids[i]+=gaussian_functions_coefficients[gi]*np.exp(-gaussian_functions_exponents[gi]*(x_wave_new*x_wave_new+y_wave_new*y_wave_new+z_wave_new*z_wave_new))\n",
    "    wave_function_points_r=wave_function_points+wave_function_grid_length\n",
    "\n",
    "    return wave_function_grids,wave_function_points,wave_function_points_r\n",
    "\n",
    "\n",
    "@njit\n",
    "def remove_density_overlap(ij_list_no_duplicates,atom_of_basisfunction,atoms_partition,partition_atoms_unique):\n",
    "    \"\"\"\n",
    "    Removes density contributions that not included in the current partition. \n",
    "    This is done by checking if both wave functions of a denisty are located on atoms in the current partition.\n",
    "    \"\"\"\n",
    "\n",
    "    relevant_densities=len(ij_list_no_duplicates)\n",
    "    relevant_densities_no_overlap=np.zeros((relevant_densities,2),dtype='int32')\n",
    "    no_overlap_count=0\n",
    "    for d in range(relevant_densities):\n",
    "        i,j=ij_list_no_duplicates[d]\n",
    "        atom_i,atom_j=atom_of_basisfunction[i],atom_of_basisfunction[j]\n",
    "        system_atom_i,system_atom_j=atoms_partition[atom_i],atoms_partition[atom_j]\n",
    "        i_included,j_included=False,False\n",
    "        if (system_atom_i in partition_atoms_unique): i_included=True\n",
    "        if (system_atom_j in partition_atoms_unique): j_included=True\n",
    "        \n",
    "        if (i_included and j_included):\n",
    "            relevant_densities_no_overlap[no_overlap_count,0],relevant_densities_no_overlap[no_overlap_count,1]=i,j\n",
    "            no_overlap_count+=1\n",
    "        elif ((i_included and not j_included) or (not i_included and j_included)):\n",
    "            relevant_densities_no_overlap[no_overlap_count,0],relevant_densities_no_overlap[no_overlap_count,1]=i,j\n",
    "            no_overlap_count+=1\n",
    "\n",
    "    relevant_densities_no_overlap=relevant_densities_no_overlap[:no_overlap_count]\n",
    "\n",
    "    return relevant_densities_no_overlap\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def initialize_divide_conquer_HF_for_plot(total_coordinates,total_elements,partition_num,xyz):\n",
    "    \"\"\"\n",
    "    Initializes a divide-and-conquer Hartree-Fock run for calculating the density on a grid.\n",
    "    \"\"\"\n",
    "    \n",
    "    parts_center_coordinates=xyz\n",
    "    \n",
    "    center_coordinate=parts_center_coordinates[partition_num]\n",
    "    \n",
    "    section_atoms,section_coordinates,section_elements=select_relevant_atoms(total_coordinates,total_elements,center_coordinate,0.5*partition_length+partition_cut_off+section_cut_off)\n",
    "    \n",
    "    partition_atoms,partition_coordinates,_=select_relevant_atoms(section_coordinates,section_elements,center_coordinate,0.5*partition_length)\n",
    "    \n",
    "    padded_partition_atoms,_,_,partition_unique\\\n",
    "        =partition_naive_neighbor_list(partition_atoms,partition_coordinates,section_coordinates,section_elements,partition_cut_off,parts_center_coordinates,partition_length,partition_num)\n",
    "\n",
    "    bonds,_,_,bond_nums,_=calculate_bonds(section_coordinates,section_elements,cut=4.0,cut_H=2.6)\n",
    "    atoms_final,coordinates_final,elements_final=avoid_bond_cutting(padded_partition_atoms,section_coordinates,section_elements,bonds,bond_nums)\n",
    "    partition_atoms_unique=partition_unique\n",
    "    \n",
    "    atom_ids=section_atoms[atoms_final]\n",
    "\n",
    "    return coordinates_final,elements_final,atoms_final,partition_atoms_unique,atom_ids\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_density_grid_for_partitions(x,y,z,x_min,y_min,z_min,total_coordinates,total_elements,xyz,num_subsystems,\n",
    "                                          hide_H_atoms=False,saving_path='Output_Data/Vault/Densities'):\n",
    "    \"\"\"\n",
    "    The main functions that is called to compute a real-space grid of the electronic density after a large-scale Hartree-Fock calculation.\n",
    "    \"\"\"\n",
    "    \n",
    "    num_plot_threads=1\n",
    "\n",
    "    x_len,y_len,z_len=x.shape[0],x.shape[1],x.shape[2]\n",
    "    complete_density_grid=np.zeros((x_len,y_len,z_len),dtype=single)\n",
    "\n",
    "    relevant_partitions=np.arange(num_subsystems)\n",
    "    num_plots=len(relevant_partitions)\n",
    "\n",
    "    id_cut=968136-1\n",
    "\n",
    "    print('Plotting subsystems:')\n",
    "    print(relevant_partitions)\n",
    "    print()\n",
    "    print()\n",
    "    print('Current system')\n",
    "\n",
    "    for t in range(num_plot_threads):\n",
    "        \n",
    "        for partition_num in relevant_partitions[t::num_plot_threads]:\n",
    "            print(str(partition_num)+'/'+str(num_plots) +'-'+str(int(partition_num/num_plots*10000.0)/100)+'%')\n",
    "\n",
    "            partition_coordinates,partition_elements,atoms_partition,partition_atoms_unique,atom_ids=initialize_divide_conquer_HF_for_plot(total_coordinates,total_elements,partition_num,xyz)\n",
    "            \n",
    "            if (np.min(atom_ids)>id_cut): continue\n",
    "\n",
    "\n",
    "            num_atoms=len(partition_coordinates)\n",
    "            num_basis_functions=calculate_num_basis_functions(partition_elements,num_atoms)\n",
    "            \n",
    "            num_gaussian_functions,_,gaussian_functions_index_list,atom_of_basisfunction,type_of_basis_function\\\n",
    "                            =calculate_num_gaussian_functions(partition_elements,num_atoms,num_basis_functions)\n",
    "            gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents\\\n",
    "                            =calculate_gaussian_function_inputs(partition_elements,partition_coordinates,num_atoms,num_gaussian_functions)\n",
    "\n",
    "            \n",
    "            num_orbital_parts,orbital_parts_index_list=calculate_num_orbital_parts(partition_elements,num_atoms,num_basis_functions) \n",
    "            num_parts_gaussian_functions,orbital_parts_gaussian_index_list\\\n",
    "                            =calculate_gaussians_for_orbital_parts(partition_elements,num_atoms,num_orbital_parts)\n",
    "            orbital_parts_coordinates,orbital_parts_coefficients,orbital_parts_exponents=calculate_orbital_parts_preprocessing(partition_elements,partition_coordinates,num_parts_gaussian_functions,num_atoms)\n",
    "            \n",
    "            ij_list_no_duplicates,_,_,_,_,_,_\\\n",
    "                            =calculate_relevant_densities(orbital_parts_coefficients,orbital_parts_exponents,orbital_parts_coordinates,\n",
    "                                                        orbital_parts_index_list,orbital_parts_gaussian_index_list,atom_of_basisfunction,gaussian_functions_index_list,\n",
    "                                                        partition_coordinates,num_gaussian_functions,num_basis_functions)\n",
    "\n",
    "\n",
    "\n",
    "            relevant_densities_no_overlap=remove_density_overlap(ij_list_no_duplicates,atom_of_basisfunction,atoms_partition,partition_atoms_unique)\n",
    "\n",
    "            relevant_densities=relevant_densities_no_overlap\n",
    "            relevant_densities_values=np.load(saving_path+'/'+str(partition_num)+'_relevant_densities.npy')\n",
    "        \n",
    "            num_basis_functions=len(atom_of_basisfunction)   \n",
    "\n",
    "            wave_function_grids,wave_function_points,wave_function_points_r=wave_function_tables(x,y,z,x_min,y_min,z_min,gaussian_functions_coordinates,gaussian_functions_coefficients,gaussian_functions_exponents,\n",
    "                                                                                                partition_coordinates,gaussian_functions_index_list,atom_of_basisfunction,num_basis_functions)\n",
    "\n",
    "            for d in range(len(relevant_densities)):\n",
    "                i,j=int(relevant_densities[d,0]),int(relevant_densities[d,1])\n",
    "\n",
    "                update_density=True\n",
    "                if (atom_ids[atom_of_basisfunction[i]]>id_cut or atom_ids[atom_of_basisfunction[j]]>id_cut):\n",
    "                    update_density=False\n",
    "                if (hide_core_electrons):\n",
    "                    if ((type_of_basis_function[i]==1 and partition_elements[atom_of_basisfunction[i]]!=1) or (type_of_basis_function[j]==1 and partition_elements[atom_of_basisfunction[j]]!=1)):\n",
    "                        update_density=False\n",
    "                    if ((type_of_basis_function[i]<=3 and partition_elements[atom_of_basisfunction[i]]>10) or (type_of_basis_function[j]<=3 and partition_elements[atom_of_basisfunction[j]]>10)):\n",
    "                        update_density=False\n",
    "                if (hide_H_atoms):\n",
    "                    if (partition_elements[atom_of_basisfunction[i]]==1 or partition_elements[atom_of_basisfunction[j]]==1):\n",
    "                        update_density=False\n",
    "                \n",
    "                if (update_density):\n",
    "                    x_point_l=max(wave_function_points[i,0],wave_function_points[j,0])\n",
    "                    y_point_l=max(wave_function_points[i,1],wave_function_points[j,1])\n",
    "                    z_point_l=max(wave_function_points[i,2],wave_function_points[j,2])\n",
    "                    x_point_r=min(wave_function_points_r[i,0],wave_function_points_r[j,0])\n",
    "                    y_point_r=min(wave_function_points_r[i,1],wave_function_points_r[j,1])\n",
    "                    z_point_r=min(wave_function_points_r[i,2],wave_function_points_r[j,2])\n",
    "                    x_point_l_i,y_point_l_i,z_point_l_i=x_point_l-wave_function_points[i,0],y_point_l-wave_function_points[i,1],z_point_l-wave_function_points[i,2]\n",
    "                    x_point_l_j,y_point_l_j,z_point_l_j=x_point_l-wave_function_points[j,0],y_point_l-wave_function_points[j,1],z_point_l-wave_function_points[j,2]\n",
    "                    x_point_r_i,y_point_r_i,z_point_r_i=x_point_r-wave_function_points[i,0],y_point_r-wave_function_points[i,1],z_point_r-wave_function_points[i,2]\n",
    "                    x_space=max(0,x_point_r_i-x_point_l_i)\n",
    "                    y_space=max(0,y_point_r_i-y_point_l_i)\n",
    "                    z_space=max(0,z_point_r_i-z_point_l_i)\n",
    "                    current_density=np.multiply(wave_function_grids[i,x_point_l_i:x_point_l_i+x_space,y_point_l_i:y_point_l_i+y_space,z_point_l_i:z_point_l_i+z_space],\n",
    "                                                wave_function_grids[j,x_point_l_j:x_point_l_j+x_space,y_point_l_j:y_point_l_j+y_space,z_point_l_j:z_point_l_j+z_space])\n",
    "                if (update_density):\n",
    "                    if (i!=j):\n",
    "                        double_prefactor=2.0\n",
    "                    else:\n",
    "                        double_prefactor=1.0\n",
    "                    complete_density_grid[x_point_l:x_point_r,y_point_l:y_point_r,z_point_l:z_point_r]+=relevant_densities_values[d]*double_prefactor*current_density\n",
    "\n",
    " \n",
    "\n",
    "    return complete_density_grid\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "=====================================\n",
    "=====================================\n",
    "REAL-TIME TIME-DEPENDENT HARTREE-FOCK\n",
    "=====================================\n",
    "=====================================\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def electric_field_integral(gaussian_functions_coefficients_i,gaussian_functions_coefficients_j,gaussian_functions_exponents_i,gaussian_functions_exponents_j,\n",
    "                            gaussian_functions_coordinates_i_x,gaussian_functions_coordinates_i_y,gaussian_functions_coordinates_i_z,\n",
    "                            gaussian_functions_coordinates_j_x,gaussian_functions_coordinates_j_y,gaussian_functions_coordinates_j_z,\n",
    "                            e_field_strength,e_field_origin_x,e_field_origin_y,e_field_origin_z,direction):\n",
    "    \"\"\"\n",
    "    Calculates a single entry for the electric field matrix E.\n",
    "    The computed integrals are <phi_i|d_alpha|phi_j> where d_alpha is one component of the dipole moment operator d=transpose(x,y,z)\n",
    "    For example: we are computing <phi_i|x|phi_j> for the E_x matrix.\n",
    "    \"\"\"\n",
    "    \n",
    "    prefactor=gaussian_functions_coefficients_i*gaussian_functions_coefficients_j\n",
    "    exp_sum=gaussian_functions_exponents_i+gaussian_functions_exponents_j\n",
    "    exp_product=gaussian_functions_exponents_i*gaussian_functions_exponents_j\n",
    "    product_sum_quotient=exp_product/exp_sum\n",
    "\n",
    "    distance_x=gaussian_functions_coordinates_i_x-gaussian_functions_coordinates_j_x\n",
    "    distance_y=gaussian_functions_coordinates_i_y-gaussian_functions_coordinates_j_y\n",
    "    distance_z=gaussian_functions_coordinates_i_z-gaussian_functions_coordinates_j_z\n",
    "    coordinate_distance=distance_x*distance_x+distance_y*distance_y+distance_z*distance_z\n",
    "\n",
    "    pi_divided_by_sum=np.pi/exp_sum\n",
    "    result_s=prefactor*pi_divided_by_sum*np.sqrt(pi_divided_by_sum)*np.exp(-product_sum_quotient*coordinate_distance)\n",
    "\n",
    "    if (direction==0):\n",
    "        correction=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_x+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_x-exp_sum*e_field_origin_x)\n",
    "    elif (direction==1):\n",
    "        correction=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_y+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_y-exp_sum*e_field_origin_y)\n",
    "    elif (direction==2):\n",
    "        correction=(gaussian_functions_exponents_i*gaussian_functions_coordinates_i_z+gaussian_functions_exponents_j*gaussian_functions_coordinates_j_z-exp_sum*e_field_origin_z)\n",
    "    result_e=e_field_strength*result_s*correction/exp_sum\n",
    "    \n",
    "    return result_e\n",
    "\n",
    "\n",
    "@njit\n",
    "def calculate_electric_field_matrix(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                    gaussians_for_densities,gaussians_for_densities_index_list,relevant_densities_no_duplicates,\n",
    "                                    e_field_strength,e_field_origin_x,e_field_origin_y,e_field_origin_z,direction):\n",
    "    \"\"\"\n",
    "    This function calculates the electric field matrix E which is needed for real-time time-dependent Hartree-Fock calculations.\n",
    "    The individual entries of the matrix E are computed by calling the function electric_field_integral() for every relevant density.\n",
    "    \"\"\"\n",
    "\n",
    "    electric_field_matrix=np.zeros((relevant_densities_no_duplicates),dtype=datatype)\n",
    "\n",
    "    for d in range(relevant_densities_no_duplicates):\n",
    "        for gd in range(gaussians_for_densities_index_list[d],gaussians_for_densities_index_list[d+1]):\n",
    "            gi,gj=gaussians_for_densities[gd,0],gaussians_for_densities[gd,1]\n",
    "            integral_value_e=electric_field_integral(gaussian_functions_coefficients[gi],gaussian_functions_coefficients[gj],gaussian_functions_exponents[gi],gaussian_functions_exponents[gj],\n",
    "                                                     gaussian_functions_coordinates[gi,0],gaussian_functions_coordinates[gi,1],gaussian_functions_coordinates[gi,2],\n",
    "                                                     gaussian_functions_coordinates[gj,0],gaussian_functions_coordinates[gj,1],gaussian_functions_coordinates[gj,2],\n",
    "                                                     e_field_strength,e_field_origin_x,e_field_origin_y,e_field_origin_z,direction)\n",
    "            electric_field_matrix[d]+=integral_value_e\n",
    "    \n",
    "    return electric_field_matrix\n",
    "\n",
    "\n",
    "\n",
    "@njit(parallel=True,fastmath=True)\n",
    "def calculate_complex_G(P,V_ee,ijkl_list,num_basis_functions,relevant_V_ee_elements):\n",
    "    \"\"\"\n",
    "    Calculates the two-center part of the Fock matrix. This algorithm is modified for a complex matrix G. \n",
    "    The usual algorithm cannot be used for a calculation with a complex density matrix input since it relies on it being real symmetric.\n",
    "    \"\"\"\n",
    "\n",
    "    num_parts=num_threads_G\n",
    "    part_length=int(np.ceil(relevant_V_ee_elements/num_parts))\n",
    "    G=np.zeros((num_parts,num_basis_functions,num_basis_functions),dtype='complex128')\n",
    "    if (num_basis_functions==1): return np.sum(G,axis=0)\n",
    "    \n",
    "    for part in prange(num_parts):\n",
    "        max_index=min((part+1)*part_length,relevant_V_ee_elements)\n",
    "        ijkl_list_part=ijkl_list[part*part_length:max_index]\n",
    "        V_ee_part=V_ee[part*part_length:max_index]\n",
    "        for ee in range(max_index-part*part_length):\n",
    "            ijkl_list_ee=ijkl_list_part[ee]\n",
    "            i,j,k,l=ijkl_list_ee[0],ijkl_list_ee[1],ijkl_list_ee[2],ijkl_list_ee[3]\n",
    "            V_ee_ee=V_ee_part[ee]\n",
    "            P_kl_V=P[k,l]*V_ee_ee\n",
    "            P_lk_V=P[l,k]*V_ee_ee\n",
    "            P_ij_V=P[i,j]*V_ee_ee\n",
    "            P_ji_V=P[j,i]*V_ee_ee\n",
    "\n",
    "            if (k!=l):\n",
    "                G[part,i,j]+=P_kl_V\n",
    "                G[part,i,j]+=P_lk_V\n",
    "                G[part,i,k]-=0.5*P[l,j]*V_ee_ee\n",
    "            else:\n",
    "                G[part,i,j]+=P_kl_V\n",
    "            G[part,i,l]-=0.5*P[k,j]*V_ee_ee\n",
    "\n",
    "            if (i!=j):\n",
    "                if (k!=l):\n",
    "                    G[part,j,i]+=P_kl_V\n",
    "                    G[part,j,i]+=P_lk_V\n",
    "                    G[part,j,k]-=0.5*P[l,i]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,j,i]+=P_kl_V\n",
    "                G[part,j,l]-=0.5*P[k,i]*V_ee_ee\n",
    "                        \n",
    "            b=not(i==k and j==l)\n",
    "            if (b):\n",
    "                if (i!=j):\n",
    "                    G[part,k,l]+=P_ij_V\n",
    "                    G[part,k,l]+=P_ji_V\n",
    "                    G[part,k,i]-=0.5*P[j,l]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,k,l]+=P_ij_V\n",
    "                G[part,k,j]-=0.5*P[i,l]*V_ee_ee\n",
    "                \n",
    "            if (b and k!=l):\n",
    "                if (i!=j):\n",
    "                    G[part,l,k]+=P_ij_V\n",
    "                    G[part,l,k]+=P_ji_V\n",
    "                    G[part,l,i]-=0.5*P[j,k]*V_ee_ee\n",
    "                else:\n",
    "                    G[part,l,k]+=P_ij_V\n",
    "                G[part,l,j]-=0.5*P[i,k]*V_ee_ee\n",
    "            \n",
    "    G=np.sum(G,axis=0)\n",
    "    return G\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def time_propagation(F_n_1_2_in,F_n_3_2_in,S,P,eigenorbitals,H_core,E,V_ee,ijkl_list,relevant_V_ee_elements,num_basis_functions,e_field_strengths,e_field_strengths_half,elements,\n",
    "                     time_domain,time_steps,delta_t,update=100):\n",
    "    \"\"\"\n",
    "    This function performs the propagtion in time for a real-time time-dependent Hartree-Fock calculation.\n",
    "    The runtimes of the individual parts of the code are tracked with time.time() and returned after completion of the method in the console.\n",
    "    \"\"\"\n",
    "\n",
    "    total_time=0.0\n",
    "    time_s_matrix=0.0\n",
    "    time_prep=0.0\n",
    "    time_F_transform=0.0\n",
    "    time_matrix_exp=0.0\n",
    "    time_P_transform=0.0\n",
    "    time_time_evolution=0.0\n",
    "    time_P_transform_back=0.0\n",
    "    time_G=0.0\n",
    "    time_other=0.0\n",
    "\n",
    "    time_start=time.time()\n",
    "    \n",
    "    time_1=time.time()\n",
    "    dipole_moments=np.zeros((3,time_steps+1),dtype='complex128')\n",
    "    consistency=np.zeros((3,time_steps+1),dtype='complex128')\n",
    "    H_core=torch.from_numpy(H_core).to(torch.complex128)\n",
    "\n",
    "    S=torch.from_numpy(S.astype('float64')).to(torch.float64)\n",
    "    evals,evecs=torch.linalg.eigh(S)\n",
    "    evpow_1=evals**(-1/2) \n",
    "    S_inverse_sqrt=torch.matmul(evecs,torch.matmul(torch.diag(evpow_1),torch.inverse(evecs))).to(torch.complex128)\n",
    "    evpow_2=evals**(1/2) \n",
    "    S_sqrt=torch.matmul(evecs,torch.matmul(torch.diag(evpow_2),torch.inverse(evecs))).to(torch.complex128)\n",
    "    time_2=time.time()\n",
    "    time_s_matrix+=time_2-time_1\n",
    "    \n",
    "    for xyz in range(3):\n",
    "        \n",
    "        time_1=time.time()\n",
    "        E_xyz=E[xyz]\n",
    "        E_xyz=torch.from_numpy(E_xyz).to(torch.complex128)\n",
    "\n",
    "        P_0=torch.from_numpy(P).to(torch.complex128).detach().clone()\n",
    "        F_n_1_2=torch.from_numpy(F_n_1_2_in).to(torch.complex128).detach().clone()\n",
    "        F_n_3_2=torch.from_numpy(F_n_3_2_in).to(torch.complex128).detach().clone()\n",
    "\n",
    "        dipole_moments[xyz,0]=torch.sum(P_0*E_xyz).numpy()\n",
    "        SPS=S_sqrt@(P_0@S_sqrt)\n",
    "        consistency[xyz,0]=torch.sum(SPS*SPS).numpy()\n",
    "        time_2=time.time()\n",
    "        time_prep+=time_2-time_1\n",
    "\n",
    "        for i in range(1,time_steps+1):\n",
    "\n",
    "            time_1=time.time()\n",
    "            relative_e_field_strength_1_2=e_field_strengths_half[i]\n",
    "            F_1_4=1.75*F_n_1_2-0.75*F_n_3_2\n",
    "            time_2=time.time()\n",
    "            time_other+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            exp_matrix=(-0.5*delta_t*1j)*(S_inverse_sqrt@(F_1_4@S_inverse_sqrt))\n",
    "            time_2=time.time()\n",
    "            time_F_transform+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            U_1_2=torch.linalg.matrix_exp(exp_matrix)\n",
    "            time_2=time.time()\n",
    "            time_matrix_exp+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            P_0_transformed=S_sqrt@(P_0@S_sqrt)\n",
    "            time_2=time.time()\n",
    "            time_P_transform+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            P_1_2=U_1_2@(P_0_transformed@(torch.conj(torch.t(U_1_2))))\n",
    "            time_2=time.time()\n",
    "            time_time_evolution+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            P_1_2=S_inverse_sqrt@(P_1_2@S_inverse_sqrt)\n",
    "            time_2=time.time()\n",
    "            time_P_transform_back+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            G=calculate_complex_G(P_1_2.numpy(),V_ee,ijkl_list,num_basis_functions,relevant_V_ee_elements)\n",
    "            time_2=time.time()\n",
    "            time_G+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            F_1_2=H_core+torch.from_numpy(G).to(torch.complex128)+E_xyz*relative_e_field_strength_1_2\n",
    "            time_2=time.time()\n",
    "            time_other+=time_2-time_1\n",
    "\n",
    "\n",
    "\n",
    "            time_1=time.time()\n",
    "            exp_matrix=(-delta_t*1j)*(S_inverse_sqrt@(F_1_2@S_inverse_sqrt))\n",
    "            time_2=time.time()\n",
    "            time_F_transform+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            U_1=torch.linalg.matrix_exp(exp_matrix)\n",
    "            time_2=time.time()\n",
    "            time_matrix_exp+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            P_1=U_1@(P_0_transformed@(torch.conj(torch.t(U_1))))\n",
    "            time_2=time.time()\n",
    "            time_time_evolution+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            consistency[xyz,i]=torch.sum(P_1*P_1)\n",
    "            time_2=time.time()\n",
    "            time_other+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            P_1=S_inverse_sqrt@(P_1@S_inverse_sqrt)\n",
    "            time_2=time.time()\n",
    "            time_P_transform_back+=time_2-time_1\n",
    "\n",
    "            time_1=time.time()\n",
    "            dipole_moments[xyz,i]=(torch.sum(P_1*E_xyz)).numpy()\n",
    "            P_0=P_1.detach().clone()\n",
    "            F_n_3_2=F_n_1_2.detach().clone()\n",
    "            F_n_1_2=F_1_2.detach().clone()\n",
    "            time_2=time.time()\n",
    "            time_other+=time_2-time_1\n",
    "            \n",
    "            \n",
    "            np.save('file/to/output/dipole_moments',dipole_moments)\n",
    "\n",
    "\n",
    "            if (i%update==0): print('Coordinate '+str(xyz+1)+' - Step '+str(i)+'/'+str(time_steps)+': '+str(np.round(i/time_steps*100.0,4))+'% - dipole moment: '+str(np.real(dipole_moments[xyz,i]-dipole_moments[xyz,0]))+' - numerical stability: '+str(np.real(consistency[xyz,i]-consistency[xyz,0])))\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    dipole_moments[0]-=dipole_moments[0,0]\n",
    "    dipole_moments[1]-=dipole_moments[1,0]\n",
    "    dipole_moments[2]-=dipole_moments[2,0]\n",
    "\n",
    "    time_stop=time.time()\n",
    "    total_time=time_stop-time_start\n",
    "\n",
    "    print('Time-propagtion times:')\n",
    "    print('    |S matrix exponentiation: '+str(np.round(time_s_matrix,4))+' s')\n",
    "    print('    |Preparation times: '+str(np.round(time_prep,4))+' s')\n",
    "    print('    |Fock matrix transforms: '+str(np.round(time_F_transform,4))+' s')\n",
    "    print('    |Matrix exponentiation: '+str(np.round(time_matrix_exp,4))+' s')\n",
    "    print('    |Density matrix transforms: '+str(np.round(time_P_transform,4))+' s')\n",
    "    print('    |Time evolution: '+str(np.round(time_time_evolution,4))+' s')\n",
    "    print('    |Density matrix back-transforms: '+str(np.round(time_P_transform_back,4))+' s')\n",
    "    print('    |G matrix computations: '+str(np.round(time_G,4))+' s')\n",
    "    print('    |Additions, traces and copies: '+str(np.round(time_other,4))+' s')\n",
    "    print('____________________________________________')\n",
    "    print('Total time: '+str(np.round(total_time,4))+' s')\n",
    "\n",
    "    return dipole_moments,consistency\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def run_rthf(coordinates,elements,time_steps=10000,delta_t=0.1,pulse_standard_deviation=0.2,pulse_shift_factor=15,e_field_max=2.0e-5,update=100):\n",
    "    \"\"\"\n",
    "    The main function to run a real-time time-dependent Hartree-Fock calculation.\n",
    "    First a single Hartree-Fock run is done to calculate relevant matrices (e.g. the density matrix, overlap matrix, the electron rpulsion tensor, ...).\n",
    "    Afterwards the E-field matrices are computed for the three spatial directions.\n",
    "    Finally the propagation in time is done by calling the function time_propagtion(). \n",
    "    The outputs are stored in an array (rthf_outputs) and returned.\n",
    "    \"\"\"\n",
    "\n",
    "    pulse_shift=pulse_shift_factor*pulse_standard_deviation\n",
    "\n",
    "    time_domain=np.linspace(0,time_steps*delta_t,time_steps+1)\n",
    "\n",
    "    relative_e_field_strengths=np.exp(-((time_domain-pulse_shift)**2)/(2*pulse_standard_deviation**2))*e_field_max\n",
    "    relative_e_field_strengths_half=np.exp(-((time_domain-0.5*delta_t-pulse_shift)**2)/(2*pulse_standard_deviation**2))*e_field_max\n",
    "\n",
    "    gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,gaussians_for_densities,gaussians_for_densities_index_list,\\\n",
    "    F,S,P,eigenorbitals,H_core,ij_list_no_duplicates,relevant_densities_no_duplicates,V_ee,ijkl_list,relevant_V_ee_elements,num_basis_functions,center_of_mass,_,_,_\\\n",
    "                    =run_HF(coordinates,elements,rthf=True)\n",
    "    \n",
    "    F=F.astype('complex128')\n",
    "    P=P.astype('complex128')\n",
    "    H_core=H_core.astype('complex128')\n",
    "    V_ee=V_ee.astype('complex128')\n",
    "\n",
    "    e_field_origin_x,e_field_origin_y,e_field_origin_z=center_of_mass[0],center_of_mass[1],center_of_mass[2]\n",
    "    E=np.zeros((3,num_basis_functions,num_basis_functions),dtype='complex128')\n",
    "    for xyz in range(3):\n",
    "        E[xyz]=sparse_to_dense(ij_list_no_duplicates,\n",
    "                               calculate_electric_field_matrix(gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\n",
    "                                                               gaussians_for_densities,gaussians_for_densities_index_list,relevant_densities_no_duplicates,\n",
    "                                                               1.0,e_field_origin_x,e_field_origin_y,e_field_origin_z,xyz)*(-1), \n",
    "                               num_basis_functions)\n",
    "        \n",
    "    \n",
    "    dipole_moments,consistency=time_propagation(F,F,S,P,eigenorbitals,H_core,E,V_ee,ijkl_list,relevant_V_ee_elements,num_basis_functions,relative_e_field_strengths,relative_e_field_strengths_half,elements,\n",
    "                                    time_domain,time_steps,delta_t,update=update)\n",
    "    \n",
    "    rthf_outputs=np.zeros((8,time_steps+1),dtype='complex128')\n",
    "    rthf_outputs[:3]=dipole_moments\n",
    "    rthf_outputs[3]=time_domain\n",
    "    rthf_outputs[4]=relative_e_field_strengths\n",
    "    rthf_outputs[5:8]=consistency\n",
    "    \n",
    "    return rthf_outputs\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "=======================================\n",
    "=======================================\n",
    "ALPHA-FOLD CONFIDENCE SCORE PREDICTIONS\n",
    "=======================================\n",
    "=======================================\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "def load_CA_coordinates_pdb(file_path,discard_hetatoms=False,file_type=2):\n",
    "    \"\"\"\n",
    "    Loads in only the C-alpha atoms of a pdb file.\n",
    "    Used for visualizing protein chains.\n",
    "    \"\"\"\n",
    "\n",
    "    structure_elements=[]\n",
    "    structure_coordinates=[]\n",
    "    structure_proteins=[]\n",
    "    structure_numbers=[]\n",
    "    element_strings=['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar']\n",
    "\n",
    "    with open(file_path+'.txt','r') as file:\n",
    "        reading_coords=False\n",
    "        stop_reading=False\n",
    "\n",
    "        count=0\n",
    "        for line in file:\n",
    "            words=line.split()\n",
    "\n",
    "            if (words[0]=='ATOM' or words[0]=='HETATM'):\n",
    "                reading_coords=True\n",
    "            if (reading_coords and words[0]=='#'):\n",
    "                break\n",
    "            \n",
    "            if (reading_coords):\n",
    "                if (words[0]=='TER' or words[0]=='END' or words[0]=='IGN' or words[0]=='CONECT' or words[0]=='ANISOU'):\n",
    "                    continue\n",
    "                if ((words[0]=='HETATM' and discard_hetatoms)\n",
    "                    or (words[0]=='ENDMDL')\n",
    "                    or (words[0]=='MODEL' and words[1]=='2')):\n",
    "                    stop_reading=True\n",
    "                    break\n",
    "                if ((words[3]=='WAT' or words[2]=='WAT') and discard_hetatoms):\n",
    "                    stop_reading=True\n",
    "                    break\n",
    "\n",
    "                if (file_type==1):\n",
    "                    count+=1\n",
    "                    if (words[3]!='CA'): continue\n",
    "                    structure_proteins.append(words[6])\n",
    "                    structure_numbers.append(count)\n",
    "                    structure_coordinates.append([words[10],words[11],words[12]])\n",
    "                    for i in range(len(element_strings)):\n",
    "                        if (words[2]==element_strings[i]):\n",
    "                            structure_elements.append(i+1)\n",
    "                            break\n",
    "                elif (file_type==2):\n",
    "                    count+=1\n",
    "                    if (words[2]!='CA'): continue\n",
    "                    structure_proteins.append(words[6])\n",
    "                    structure_numbers.append(count)\n",
    "                    if (words[3]=='WAT' or words[2]=='WAT'):\n",
    "                        if (words[0]=='HETATM'):\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[4],words[5],words[6]])\n",
    "                    elif ((words[2]=='H1' or words[2]=='H2' or words[2]=='OH2') and count>10000):\n",
    "                        if (len(words[3])>5):\n",
    "                            structure_coordinates.append([words[4],words[5],words[6]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                        if (words[2]=='OH2'):\n",
    "                            structure_elements.append(8)\n",
    "                        else:\n",
    "                            structure_elements.append(1)\n",
    "                        continue\n",
    "                    else:\n",
    "                        if (len(words[2])<5):\n",
    "                            structure_coordinates.append([words[6],words[7],words[8]])\n",
    "                        else:\n",
    "                            structure_coordinates.append([words[5],words[6],words[7]])\n",
    "                    if (len(words[-1])>2):\n",
    "                        structure_elements.append(1)\n",
    "                    else:\n",
    "                        for i in range(len(element_strings)):\n",
    "                            if (words[-1]==element_strings[i]):\n",
    "                                structure_elements.append(i+1)\n",
    "                                break\n",
    "            \n",
    "            if (stop_reading): break\n",
    "\n",
    "    structure_coordinates=np.array(structure_coordinates,dtype='float32')*angstrom_to_bohr\n",
    "    structure_elements=np.array(structure_elements,dtype='int32')\n",
    "    structure_numbers=np.array(structure_numbers,dtype='int32')\n",
    "\n",
    "    return structure_coordinates,structure_elements,structure_proteins,structure_numbers\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "@njit\n",
    "def naive_neighbor_list_for_unique_section(total_atoms_coordinates,unique_atoms_ids,cut_off,max_neighbors=1000):\n",
    "    \"\"\"\n",
    "    A naive neighbourhood algorithm.\n",
    "    \"\"\"\n",
    "\n",
    "    num_total_atoms=len(total_atoms_coordinates)\n",
    "    num_unique_atoms=len(unique_atoms_ids)\n",
    "    total_atoms_ids=np.arange(num_total_atoms)\n",
    "\n",
    "    n_list=np.zeros(num_unique_atoms*max_neighbors,dtype='int32')\n",
    "    n_list_length=0\n",
    "    for i in unique_atoms_ids:\n",
    "        for j in total_atoms_ids:\n",
    "            coordinates_i=total_atoms_coordinates[i]\n",
    "            coordinates_j=total_atoms_coordinates[j]\n",
    "            dist_x=coordinates_i[0]-coordinates_j[0]\n",
    "            if (np.abs(dist_x)>cut_off): continue\n",
    "            dist_y=coordinates_i[1]-coordinates_j[1]\n",
    "            if (np.abs(dist_y)>cut_off): continue\n",
    "            dist_z=coordinates_i[2]-coordinates_j[2]\n",
    "            if (np.abs(dist_z)>cut_off): continue\n",
    "            dist=np.sqrt(dist_x*dist_x+dist_y*dist_y+dist_z*dist_z)\n",
    "            if (dist>cut_off): continue\n",
    "            n_list[n_list_length]=j\n",
    "            n_list_length+=1\n",
    "    \n",
    "    n_list=np.unique(n_list[:n_list_length])\n",
    "    return n_list\n",
    "\n",
    "\n",
    "\n",
    "def run_AlphaFold_prediction(coordinates_without_water,elements_without_water,coordinates,elements,structure_numbers,bonds,bond_nums,amino_acids_per_cluster,saving_path):\n",
    "    \"\"\"\n",
    "    The main function for running a calculation of atomic energies for a protein or protein complex.\n",
    "    These energies can be used to evaluate protein structure predictions since they correlate with AlphaFold's pLDDT (predicted local distance difference test) score.\n",
    "    \"\"\"\n",
    "\n",
    "    max_unique_atoms_per_cluster=1000\n",
    "    num_atoms_without_water=len(coordinates_without_water)\n",
    "\n",
    "    num_amino_acids=len(structure_numbers)\n",
    "    number_of_clusters=int(np.ceil(num_amino_acids/amino_acids_per_cluster))\n",
    "    cluster_borders=structure_numbers[np.linspace(0,num_amino_acids-1,number_of_clusters+1).astype('int32')-1]\n",
    "    cluster_borders[0]=0\n",
    "    cluster_borders[-1]=num_atoms_without_water\n",
    "\n",
    "    cluster_unique_atoms=np.zeros((number_of_clusters,max_unique_atoms_per_cluster),dtype='int32')\n",
    "    num_unique_coords=np.zeros(number_of_clusters,dtype='int32')\n",
    "    max_atoms=0\n",
    "    for i in range(number_of_clusters):\n",
    "        atoms_of_this_cluster=cluster_borders[i+1]-cluster_borders[i]\n",
    "        if (atoms_of_this_cluster>max_atoms): max_atoms=atoms_of_this_cluster\n",
    "        cluster_unique_atoms[i,:atoms_of_this_cluster]=np.arange(cluster_borders[i],cluster_borders[i+1])\n",
    "        num_unique_coords[i]=atoms_of_this_cluster\n",
    "    cluster_unique_atoms=cluster_unique_atoms[:,:max_atoms]\n",
    "\n",
    "    num_atoms_total=len(elements_without_water)\n",
    "\n",
    "    energies_0_total=np.zeros(num_atoms_total)\n",
    "    energies_converged_total=np.zeros(num_atoms_total)\n",
    "\n",
    "\n",
    "    print('Number of clusters:',number_of_clusters)\n",
    "\n",
    "    for current_cluster_num in range(number_of_clusters):\n",
    "        print('============')\n",
    "        print('Cluster',current_cluster_num)\n",
    "        print('============')\n",
    "\n",
    "        unique_atoms_ids=cluster_unique_atoms[current_cluster_num,:num_unique_coords[current_cluster_num]]\n",
    "        print('Unique atoms in this cluster:',len(unique_atoms_ids))\n",
    "        n_list=naive_neighbor_list_for_unique_section(coordinates,unique_atoms_ids,10*angstrom_to_bohr)\n",
    "        print('Atoms after adding neighbors:',len(n_list))\n",
    "        \n",
    "        atoms_final,coordinates_final,elements_final=avoid_bond_cutting(n_list,coordinates,elements,bonds,bond_nums)\n",
    "        print('Atoms after avoiding bond cutting:',len(atoms_final))\n",
    "        print()\n",
    "        \n",
    "        final_coordinates_cluster,final_elements_cluster=coordinates_final,elements_final\n",
    "\n",
    "        _,P,P_0,G,G_0,H_core,_,_,_,_,_,_,_,type_of_basis_function,num_basis_functions\\\n",
    "            =run_HF(final_coordinates_cluster,final_elements_cluster,enable_plotting=True)\n",
    "        print()\n",
    "            \n",
    "        E_0=H_core+0.5*G_0\n",
    "        E=H_core+0.5*G\n",
    "        \n",
    "        num_atoms=len(final_coordinates_cluster)\n",
    "        _,basis_functions_index_list,_,_,type_of_basis_function=calculate_num_gaussian_functions(final_elements_cluster,num_atoms,num_basis_functions)\n",
    "        \n",
    "        energies_0=np.zeros(num_atoms)\n",
    "        energies_converged=np.zeros(num_atoms)\n",
    "        \n",
    "        \n",
    "        h_atom_count=0\n",
    "        heavy_atom_count=0\n",
    "        for atm in range(num_atoms):\n",
    "            for i in range(basis_functions_index_list[atm],basis_functions_index_list[atm+1]):\n",
    "                if (type_of_basis_function[i]==1 and final_elements_cluster[atm]>1): continue\n",
    "                if (type_of_basis_function[i]<=3 and final_elements_cluster[atm]==16): continue\n",
    "                energies_0[atm]+=np.sum(E_0[i]*P_0[i])/final_elements_cluster[atm]\n",
    "                energies_converged[atm]+=np.sum(E[i]*P[i])/final_elements_cluster[atm]\n",
    "            if (final_elements_cluster[atm]>1): \n",
    "                heavy_atom_count+=1\n",
    "            else: \n",
    "                h_atom_count+=1\n",
    "        \n",
    "        \n",
    "        for atm in range(num_atoms):\n",
    "            atom_id=atoms_final[atm]\n",
    "            if (atom_id in unique_atoms_ids):\n",
    "                energies_0_total[atom_id]=energies_0[atm]\n",
    "                energies_converged_total[atom_id]=energies_converged[atm]\n",
    "                \n",
    "        np.save(saving_path+'/energies_0',energies_0_total)\n",
    "        np.save(saving_path+'/energies_converged',energies_converged_total)\n",
    "\n",
    "    return None\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "====================\n",
    "####### MAIN #######\n",
    "====================\n",
    "\n",
    "Select a calculation mode (explanation below) and follow the corresponding instructions. \n",
    "You only need to modify the section of the selected calculation mode.\n",
    "\n",
    "Two general annotations:\n",
    "    1. Replace any strings containing path/to/... with the actual directories!\n",
    "    2. Load atomic coordinates and elements\n",
    "        - via 'coordinates,elements,_=load_coordinates(path/to/coordinate/file)' for a .json file\n",
    "        - via 'coordinates,elements=load_coordinates_pdb(path/to/coordinate/file)' for a .pdb (protein data base) file\n",
    "        - via 'coordinates=np.load('path/to/coordinates.npy')' and 'elements=np.load('path/to/elements.npy')' for coordinates/elements stored in a numpy file \n",
    "          (recommended for very large systems with >1,000,000 atoms)\n",
    "\n",
    "\n",
    "Recommended test: download the coordinate file for Beta-Carotene obtained at \n",
    "https://pubchem.ncbi.nlm.nih.gov/compound/Beta-Carotene#section=3D-Conformer\n",
    "and run the program in mode='normal' with the file path changed to the directory where the file was saved.\n",
    "The output then should be identical to the output displayed at the parameter section, subsection outputs (apart from the last 2/3 digits which can vary).\n",
    "This only holds if the default settings are used.\n",
    "The execution time will be ~1-3 minutes if the program was not loaded before, otherwise ~10-60 seconds depending on the computer.\n",
    "Note: The first execution of NIMBLE can take around 1-2 minutes longer as normally due to imports and compilation of the jit-functions.\n",
    "\n",
    "\n",
    "\n",
    "------------------------------------\n",
    "Explanations for the different modes\n",
    "------------------------------------\n",
    "\n",
    "\n",
    "---normal---\n",
    "A single Hartree-Fock calculation.\n",
    "Load the coordinates of the atoms and elements as described above.\n",
    "run_HF() is called afterwards and the calculation starts.\n",
    "\n",
    "\n",
    "\n",
    "---large_scale---\n",
    "For a divide-and-conquer Hartree-Fock calculation. For >1,000,000 atoms it is recommended to use numpy arrays in single precision (.astype('float32')) to store and load coordinates and elements \n",
    "since PDB files of these structures can have the size of multiple GB and are difficult to upload and read out.\n",
    "\n",
    "Specify the number of subsystems and number of compute nodes for the calculation (it is recommended to run this mode only on computing clusters, if not choose num_nodes=1). \n",
    "The number of subsystems has to be computed before the actual calculation by calling\n",
    "'center_coordinates=calculate_parts_center_coordinates(coordinates,partition_length,partition_cut_off)'. \n",
    "This can also be done on a normal computer. Select the given output (total) as the number of your subsystems.\n",
    "\n",
    "Make sure that the cut-offs for divide-and-conquer calculations are selected accordingly in the variable declaration section at the beginning of the program.\n",
    "\n",
    "The job array number is the number of the computation thread. We recommend using the job array option from SLURM and passing the job number as 'job_array_number=int(sys.argv[1])'.\n",
    "If only one device is used, set 'job_array_number=0'\n",
    "\n",
    "Specify a saving path with 'saving_path=...'.\n",
    "\n",
    "initialize_and_run_HF() is called and the calculation starts.\n",
    "\n",
    "\n",
    "\n",
    "---plotting---\n",
    "For calculating the electronic density on a 3D real-space grid. This mode therefore visualizes the results of a large-scale calculation.\n",
    "\n",
    "Make sure that the cut-offs for divide-and-conquer calculations AND the parameters for density plotting calculations are selected accordingly \n",
    "in the variable declaration section at the beginning of the program.\n",
    "\n",
    "Specify the path where the numpy array for the 3D grid will be saved with 'path/to/density_grid.npy'\n",
    "\n",
    "calculate_density_grid_for_partitions() is called and the calculation starts.\n",
    "\n",
    "\n",
    "\n",
    "---time_dependent---\n",
    "For running a real-time time-dependent Hartree-Fock calculation used for the calculation of absorption spectra.\n",
    "\n",
    "Load the coordinates of the atoms and elements as described above.\n",
    "\n",
    "Make sure that the configurations for real-time time-dependent Hartree-Fock calculations are selected accordingly \n",
    "in the variable declaration section at the beginning of the program.\n",
    "\n",
    "Specify a saving path with 'saving_path=...'.\n",
    "\n",
    "run_rthf() is called and the calculation starts.\n",
    "\n",
    "\n",
    "\n",
    "---alpha_fold---\n",
    "For running a calculation of atomic energies. These correlate strongly with AlphaFolds pLDDT (predicted local distance difference test) score\n",
    "which means atomic energies can be used to evaluate predicted protein structures from AlphaFold.\n",
    "\n",
    "Load the coordinates of the atoms and elements by specifying the file path. Here we assume a .pdb file is used.\n",
    "\n",
    "Make sure that the configuration for Alpha-Fold prediction calculations is selected accordingly \n",
    "in the variable declaration section at the beginning of the program ('amino_acids_per_cluster=...').\n",
    "\n",
    "Specify a saving path with 'saving_path=...'.\n",
    "\n",
    "run_AlphaFold_prediction() is called and the calculation starts.\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "mode='normal'\n",
    "\n",
    "if (mode=='normal'):\n",
    "\n",
    "    \"\"\"\n",
    "    Examples:\n",
    "    Beta-carotene, Insulin (more time-consuming), DNA (more time-consuming)\n",
    "    coordinates,elements,_=load_coordinates('path/to/coordinate/file') \n",
    "    coordinates,elements=load_coordinates_pdb('Insulin_Test')\n",
    "    coordinates,elements=load_coordinates_pdb('DNA_Test',file_type=1)\n",
    "    \"\"\"\n",
    "    coordinates,elements,_=load_coordinates('Beta_Carotene_Test')\n",
    "    energy,E_nn,P,F,eigenorbitals,eigenenergies,gaussian_functions_coefficients,gaussian_functions_exponents,gaussian_functions_coordinates,\\\n",
    "        ij_list_no_duplicates,atom_of_basisfunction,gaussian_functions_index_list,type_of_basis_function,num_basis_functions\\\n",
    "        =run_HF(coordinates,elements,enable_plotting=True)\n",
    "\n",
    "\n",
    "elif (mode=='large_scale'):\n",
    "\n",
    "    coordinates=np.load('path/to/coordinates.npy')\n",
    "    elements=np.load('path/to/elements.npy')\n",
    "\n",
    "    num_jobs=10000\n",
    "    num_nodes=10\n",
    "    job_array_number=int(sys.argv[1])\n",
    "    saving_path='path/to/density/files'\n",
    "\n",
    "    initialize_and_run_HF(coordinates,elements,saving_path,num_jobs,num_nodes,job_array_number)\n",
    "\n",
    "\n",
    "elif (mode=='plotting'):\n",
    "\n",
    "    total_coordinates=np.load('path/to/coordinates.npy')\n",
    "    total_elements=np.load('path/to/elements.npy')\n",
    "\n",
    "    num_subsystems=10000\n",
    "\n",
    "    x,y,z,x_min,y_min,z_min=calculate_grid(total_coordinates)\n",
    "    xyz=calculate_parts_center_coordinates(total_coordinates,partition_length,partition_cut_off)\n",
    "\n",
    "    density_grid=calculate_density_grid_for_partitions(x,y,z,x_min,y_min,z_min,total_coordinates,total_elements,xyz,num_subsystems,\n",
    "                                                       hide_H_atoms=False,saving_path='path/to/density/files')\n",
    "    np.save('path/to/density_grid.npy',density_grid.astype('float32'))\n",
    "\n",
    "\n",
    "elif (mode=='time_dependent'):\n",
    "\n",
    "    coordinates,elements=load_coordinates_pdb('path/to/coordinate/file')\n",
    "\n",
    "\n",
    "    saving_path='path/to/RTHF/data'\n",
    "\n",
    "    rthf_outputs=run_rthf(coordinates,elements,time_steps=time_steps,delta_t=delta_t,pulse_standard_deviation=pulse_standard_deviation,pulse_shift_factor=pulse_shift_factor,e_field_max=e_field_max,update=update)\n",
    "\n",
    "    np.save(saving_path,rthf_outputs)\n",
    "\n",
    "\n",
    "elif (mode=='alpha_fold'):\n",
    "\n",
    "    alpha_fold_file_path='path/to/coordinate/file'\n",
    "    structure_coordinates,structure_elements,structure_proteins,structure_numbers=load_CA_coordinates_pdb(alpha_fold_file_path,discard_hetatoms=False,file_type=2)\n",
    "    coordinates_without_water,elements_without_water=load_coordinates_pdb(alpha_fold_file_path,discard_hetatoms=True,file_type=2)\n",
    "    coordinates,elements=load_coordinates_pdb(alpha_fold_file_path,discard_hetatoms=False,file_type=2)\n",
    "\n",
    "    bonds,bond_types,bond_distances,bond_nums,element_distances=calculate_bonds(coordinates,elements,cut=4.0,cut_H=2.6)\n",
    "\n",
    "    saving_path='path/to/AlphaFold/data'\n",
    "\n",
    "    run_AlphaFold_prediction(coordinates_without_water,elements_without_water,coordinates,elements,structure_numbers,bonds,bond_nums,amino_acids_per_cluster,saving_path)\n",
    "\n",
    "\n",
    "else:\n",
    "    print('Calculation mode not recognized - change it in section MAIN. Choose between normal, large_scale, plotting, time_dependent, alpha_fold.')\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
