diff --git a/PG-PuReMD/src/charges.c b/PG-PuReMD/src/charges.c
index 8f53b65d4b49a95fc68f4acf7698802e650137df..72eeb2848071d7d9f7801626b1de39d9b5c607a6 100644
--- a/PG-PuReMD/src/charges.c
+++ b/PG-PuReMD/src/charges.c
@@ -263,7 +263,7 @@ void Init_MatVec( reax_system *system, simulation_data *data,
     int i; //, fillin;
     reax_atom *atom;
 
-//    if( (data->step - data->prev_steps) % control->refactor == 0 ||
+//    if( (data->step - data->prev_steps) % control->cm_solver_pre_comp_refactor == 0 ||
 //            workspace->L == NULL )
 //    {
 ////        Print_Linear_System( system, control, workspace, data->step );
@@ -421,14 +421,14 @@ void QEq( reax_system *system, control_params *control, simulation_data *data,
 
     //MATRIX CHANGES
     s_matvecs = dual_CG( system, workspace, &workspace->H, workspace->b,
-            control->q_err, workspace->x, mpi_data, out_control->log, data );
+            control->cm_solver_q_err, workspace->x, mpi_data, out_control->log, data );
     t_matvecs = 0;
     //fprintf (stderr, "Host: First CG complated with iterations: %d \n", s_matvecs);
 
     //s_matvecs = CG(system, workspace, workspace->H, workspace->b_s, //newQEq sCG
-    // control->q_err, workspace->s, mpi_data, out_control->log );
+    // control->cm_solver_q_err, workspace->s, mpi_data, out_control->log );
     //s_matvecs = PCG( system, workspace, workspace->H, workspace->b_s,
-    //   control->q_err, workspace->L, workspace->U, workspace->s,
+    //   control->cm_solver_q_err, workspace->L, workspace->U, workspace->s,
     //   mpi_data, out_control->log );
 
 #if defined(DEBUG)
@@ -436,9 +436,9 @@ void QEq( reax_system *system, control_params *control, simulation_data *data,
 #endif
 
     //t_matvecs = CG(system, workspace, workspace->H, workspace->b_t, //newQEq sCG
-    // control->q_err, workspace->t, mpi_data, out_control->log );
+    // control->cm_solver_q_err, workspace->t, mpi_data, out_control->log );
     //t_matvecs = PCG( system, workspace, workspace->H, workspace->b_t,
-    //   control->q_err, workspace->L, workspace->U, workspace->t,
+    //   control->cm_solver_q_err, workspace->L, workspace->U, workspace->t,
     //   mpi_data, out_control->log );
 
 #if defined(DEBUG)
diff --git a/PG-PuReMD/src/control.c b/PG-PuReMD/src/control.c
index 268b6a1750874a63f3fbbc3a7e339582f5b231f4..5cb8ce6f1099fde86343d81d8c7b147d41bc4864 100644
--- a/PG-PuReMD/src/control.c
+++ b/PG-PuReMD/src/control.c
@@ -47,7 +47,7 @@ char Read_Control_File( char *control_file, control_params* control,
     }
 
     /* assign default values */
-    strcpy( control->sim_name, "simulate" );
+    strcpy( control->sim_name, "default.sim" );
     control->ensemble        = NVE;
     control->nsteps          = 0;
     control->dt              = 0.25;
@@ -77,10 +77,21 @@ char Read_Control_File( char *control_file, control_params* control,
 
     control->tabulate = 0;
 
+    control->charge_method = QEQ_CM;
     control->charge_freq = 1;
-    control->q_err = 1e-6;
-    control->refactor = 100;
-    control->droptol = 1e-2;;
+    control->cm_q_net = 0.0;
+    control->cm_solver_type = GMRES_S;
+    control->cm_solver_max_iters = 100;
+    control->cm_solver_restart = 50;
+    control->cm_solver_q_err = 0.000001;
+    control->cm_domain_sparsify_enabled = FALSE;
+    control->cm_domain_sparsity = 1.0;
+    control->cm_solver_pre_comp_type = DIAG_PC;
+    control->cm_solver_pre_comp_sweeps = 3;
+    control->cm_solver_pre_comp_refactor = 100;
+    control->cm_solver_pre_comp_droptol = 0.01;
+    control->cm_solver_pre_app_type = TRI_SOLVE_PA;
+    control->cm_solver_pre_app_jacobi_iters = 50;
 
     control->T_init = 0.;
     control->T_final = 300.;
@@ -247,25 +258,79 @@ char Read_Control_File( char *control_file, control_params* control,
             ival = atoi( tmp[1] );
             control->tabulate = ival;
         }
+        else if ( strcmp(tmp[0], "charge_method") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->charge_method = ival;
+        }
         else if ( strcmp(tmp[0], "charge_freq") == 0 )
         {
             ival = atoi( tmp[1] );
             control->charge_freq = ival;
         }
-        else if ( strcmp(tmp[0], "q_err") == 0 )
+        else if ( strcmp(tmp[0], "cm_q_net") == 0 )
+        {
+            val = atof( tmp[1] );
+            control->cm_q_net = val;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_type") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_type = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_max_iters") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_max_iters = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_restart") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_restart = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_q_err") == 0 )
+        {
+            val = atof( tmp[1] );
+            control->cm_solver_q_err = val;
+        }
+        else if ( strcmp(tmp[0], "cm_domain_sparsity") == 0 )
         {
             val = atof( tmp[1] );
-            control->q_err = val;
+            control->cm_domain_sparsity = val;
+            if ( val < 1.0 )
+            {
+                control->cm_domain_sparsify_enabled = TRUE;
+            }
         }
-        else if ( strcmp(tmp[0], "ilu_refactor") == 0 )
+        else if ( strcmp(tmp[0], "cm_solver_pre_comp_type") == 0 )
         {
             ival = atoi( tmp[1] );
-            control->refactor = ival;
+            control->cm_solver_pre_comp_type = ival;
         }
-        else if ( strcmp(tmp[0], "ilu_droptol") == 0 )
+        else if ( strcmp(tmp[0], "cm_solver_pre_comp_refactor") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_pre_comp_refactor = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_pre_comp_droptol") == 0 )
         {
             val = atof( tmp[1] );
-            control->droptol = val;
+            control->cm_solver_pre_comp_droptol = val;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_pre_comp_sweeps") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_pre_comp_sweeps = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_pre_app_type") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_pre_app_type = ival;
+        }
+        else if ( strcmp(tmp[0], "cm_solver_pre_app_jacobi_iters") == 0 )
+        {
+            ival = atoi( tmp[1] );
+            control->cm_solver_pre_app_jacobi_iters = ival;
         }
         else if ( strcmp(tmp[0], "temp_init") == 0 )
         {
diff --git a/PG-PuReMD/src/cuda/cuda_charges.cu b/PG-PuReMD/src/cuda/cuda_charges.cu
index 9ea976537c2527ae52616edab5e5b367615f81e1..8452548d07beffee1a2958980f21fc1a6516fcf4 100644
--- a/PG-PuReMD/src/cuda/cuda_charges.cu
+++ b/PG-PuReMD/src/cuda/cuda_charges.cu
@@ -269,8 +269,8 @@ void Cuda_QEq( reax_system *system, control_params *control, simulation_data
 #endif
 
     //MATRIX CHANGES
-    s_matvecs = Cuda_dual_CG( system, workspace, &dev_workspace->H,
-            dev_workspace->b, control->q_err, dev_workspace->x, mpi_data,
+    s_matvecs = Cuda_dual_CG( system, control, workspace, &dev_workspace->H,
+            dev_workspace->b, control->cm_solver_q_err, dev_workspace->x, mpi_data,
             out_control->log, data );
     t_matvecs = 0;
     //fprintf (stderr, "Device: First CG complated with iterations: %d \n", s_matvecs);
diff --git a/PG-PuReMD/src/cuda/cuda_forces.cu b/PG-PuReMD/src/cuda/cuda_forces.cu
index 4960038cb48d0c2c2a127f05ad1146d63f7ebf8a..28982bffa1d0ab72cbc80032d790798fd95ba1fb 100644
--- a/PG-PuReMD/src/cuda/cuda_forces.cu
+++ b/PG-PuReMD/src/cuda/cuda_forces.cu
@@ -212,7 +212,7 @@ CUDA_GLOBAL void k_estimate_storages( reax_atom *my_atoms,
     int local;
     int my_bonds, my_hbonds, my_cm_entries;
     real cutoff;
-    real r_ij, r2; 
+    real r_ij; 
     real C12, C34, C56;
     real BO, BO_s, BO_pi, BO_pi2;
     single_body_parameters *sbp_i, *sbp_j;
@@ -334,8 +334,6 @@ CUDA_GLOBAL void k_estimate_storages( reax_atom *my_atoms,
                 /* uncorrected bond orders */
                 if ( nbr_pj->d <= control->bond_cut )
                 {
-                    r2 = SQR( r_ij );
-
                     if ( sbp_i->r_s > 0.0 && sbp_j->r_s > 0.0 )
                     {
                         C12 = twbp->p_bo1 * POW( r_ij / twbp->r_s, twbp->p_bo2 );
@@ -1074,6 +1072,7 @@ CUDA_GLOBAL void k_update_hbonds( reax_atom *my_atoms, reax_list hbonds, int n )
 }
 
 
+#if defined(DEBUG)
 CUDA_GLOBAL void k_print_forces( reax_atom *my_atoms, rvec *f, int n )
 {
     int i; 
@@ -1151,6 +1150,7 @@ static void Print_HBonds( reax_system *system, int step )
     cudaThreadSynchronize( );
     cudaCheckError( );
 }
+#endif
 
 
 CUDA_GLOBAL void k_init_bond_orders( reax_atom *my_atoms, reax_list far_nbrs, 
@@ -1345,7 +1345,8 @@ int Cuda_Compute_Bonded_Forces( reax_system *system, control_params *control,
         simulation_data *data, storage *workspace, 
         reax_list **lists, output_controls *out_control )
 {
-    int hbs, hnbrs_blocks, update_energy, ret;
+    int update_energy, ret;
+//    int hbs, hnbrs_blocks;
     int *thbody;
     static int compute_bonded_part1 = FALSE;
     real *spad = (real *) scratch;
@@ -1643,8 +1644,8 @@ int Cuda_Compute_Bonded_Forces( reax_system *system, control_params *control,
             cuda_memset( spad, 0,
                     2 * sizeof(real) * system->n + sizeof(rvec) * system->n * 2, "scratch" );
 
-            hbs = (system->n * HB_KER_THREADS_PER_ATOM / HB_BLOCK_SIZE) + 
-                (((system->n * HB_KER_THREADS_PER_ATOM) % HB_BLOCK_SIZE) == 0 ? 0 : 1);
+//            hbs = (system->n * HB_KER_THREADS_PER_ATOM / HB_BLOCK_SIZE) + 
+//                (((system->n * HB_KER_THREADS_PER_ATOM) % HB_BLOCK_SIZE) == 0 ? 0 : 1);
 
             Cuda_Hydrogen_Bonds <<< BLOCKS, BLOCK_SIZE >>>
 //            Cuda_Hydrogen_Bonds_MT <<< hbs, HB_BLOCK_SIZE, 
@@ -1694,8 +1695,8 @@ int Cuda_Compute_Bonded_Forces( reax_system *system, control_params *control,
             cudaCheckError( );
 
             /* post process step2 */
-            hnbrs_blocks = (system->N * HB_POST_PROC_KER_THREADS_PER_ATOM / HB_POST_PROC_BLOCK_SIZE) +
-                (((system->N * HB_POST_PROC_KER_THREADS_PER_ATOM) % HB_POST_PROC_BLOCK_SIZE) == 0 ? 0 : 1);
+//            hnbrs_blocks = (system->N * HB_POST_PROC_KER_THREADS_PER_ATOM / HB_POST_PROC_BLOCK_SIZE) +
+//                (((system->N * HB_POST_PROC_KER_THREADS_PER_ATOM) % HB_POST_PROC_BLOCK_SIZE) == 0 ? 0 : 1);
 
             Cuda_Hydrogen_Bonds_HNbrs <<< system->N, 32, 32 * sizeof(rvec) >>>
                 ( system->d_my_atoms, *dev_workspace, *(*dev_lists + HBONDS) );
@@ -1898,7 +1899,7 @@ int Cuda_Compute_Forces( reax_system *system, control_params *control,
 #if defined(DEBUG_FOCUS)
         fprintf( stderr, "p%d @ step%d: total forces computed\n",
                 system->my_rank, data->step );
-        //Print_Total_Force( system, data, workspace );
+//        Print_Total_Force( system, data, workspace );
         MPI_Barrier( MPI_COMM_WORLD );
 
 #endif
diff --git a/PG-PuReMD/src/cuda/cuda_lin_alg.cu b/PG-PuReMD/src/cuda/cuda_lin_alg.cu
index d47a08562b9993f8cd2d760069eeb802a010bd8f..bb35d181168f0bfd25950fd5bc1f8cce0a538f7b 100644
--- a/PG-PuReMD/src/cuda/cuda_lin_alg.cu
+++ b/PG-PuReMD/src/cuda/cuda_lin_alg.cu
@@ -598,35 +598,35 @@ void Cuda_Dual_Matvec( sparse_matrix *H, rvec2 *a, rvec2 *b, int n, int size )
 
 void Cuda_Matvec( sparse_matrix *H, real *a, real *b, int n, int size )
 {
-    int blocks;
+//    int blocks;
 
-    blocks = (n / DEF_BLOCK_SIZE) + 
-        (( n % DEF_BLOCK_SIZE) == 0 ? 0 : 1);
+//    blocks = (n / DEF_BLOCK_SIZE) + 
+//        (( n % DEF_BLOCK_SIZE) == 0 ? 0 : 1);
 
     cuda_memset( b, 0, sizeof(real) * size, "dual_matvec:result" );
 
-    //one thread per row implementation
-    //k_matvec <<< blocks, DEF_BLOCK_SIZE >>>
-    //        (*H, a, b, n);
-    //cudaThreadSynchronize ();
-    //cudaCheckError ();
+    /* one thread per row implementation */
+//    k_matvec <<< blocks, DEF_BLOCK_SIZE >>>
+//        ( *H, a, b, n );
+//    cudaThreadSynchronize( );
+//    cudaCheckError( );
 
 #if defined(__SM_35__)
     k_matvec_csr <<< MATVEC_BLOCKS, MATVEC_BLOCK_SIZE >>>
 #else
     k_matvec_csr <<< MATVEC_BLOCKS, MATVEC_BLOCK_SIZE,
-                 sizeof(real) * MATVEC_BLOCK_SIZE>>>
+                 sizeof(real) * MATVEC_BLOCK_SIZE >>>
 #endif
-                     (*H, a, b, n);
+         ( *H, a, b, n );
 
     cudaThreadSynchronize( );
     cudaCheckError( );
 }
 
 
-int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
-        rvec2 *b, real tol, rvec2 *x, mpi_datatypes* mpi_data, FILE *fout,
-        simulation_data *data )
+int Cuda_dual_CG( reax_system *system, control_params *control, storage *workspace,
+        sparse_matrix *H, rvec2 *b, real tol, rvec2 *x, mpi_datatypes* mpi_data,
+        FILE *fout, simulation_data *data )
 {
     int i, n, matvecs, scale;
 //    int j, N;
@@ -768,7 +768,7 @@ int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
     }
 #endif
 
-    for ( i = 1; i < 1000; ++i )
+    for ( i = 1; i < control->cm_solver_max_iters; ++i )
     {
         //MVAPICH2
 //#ifdef __CUDA_DEBUG__
@@ -921,8 +921,8 @@ int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
         //compare_array (workspace->b_t, dev_workspace->b_t, system->n, "b_t");
         //compare_array (workspace->t, dev_workspace->t, system->n, "t");
 
-        matvecs = Cuda_CG( system, workspace, H, dev_workspace->b_t, tol, dev_workspace->t,
-                mpi_data, fout );
+        matvecs = Cuda_CG( system, control, workspace, H, dev_workspace->b_t, tol, dev_workspace->t,
+                mpi_data );
 
         //fprintf (stderr, " Cuda_CG1: iterations --> %d \n", matvecs );
         //for( j = 0; j < n; ++j )
@@ -942,8 +942,8 @@ int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
 
         //fprintf (stderr, "Getting started with Cuda_CG2 \n");
 
-        matvecs = Cuda_CG( system, workspace, H, dev_workspace->b_s, tol, dev_workspace->s,
-                mpi_data, fout );
+        matvecs = Cuda_CG( system, control, workspace, H, dev_workspace->b_s, tol, dev_workspace->s,
+                mpi_data );
 
         //fprintf (stderr, " Cuda_CG2: iterations --> %d \n", matvecs );
         //for( j = 0; j < system->n; ++j )
@@ -952,7 +952,7 @@ int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
         Cuda_RvecCopy_To( dev_workspace->x, dev_workspace->s, 0, system->n );
     }
 
-    if ( i >= 1000 )
+    if ( i >= control->cm_solver_max_iters )
     {
         fprintf( stderr, "[WARNING] p%d: dual CG convergence failed! (%d steps)\n",
                 system->my_rank, i );
@@ -972,8 +972,8 @@ int Cuda_dual_CG( reax_system *system, storage *workspace, sparse_matrix *H,
 }
 
 
-int Cuda_CG( reax_system *system, storage *workspace, sparse_matrix *H, real
-        *b, real tol, real *x, mpi_datatypes* mpi_data, FILE *fout )
+int Cuda_CG( reax_system *system, control_params *control, storage *workspace,
+        sparse_matrix *H, real *b, real tol, real *x, mpi_datatypes* mpi_data )
 {
     int  i, scale;
 //    int j;
@@ -1039,7 +1039,7 @@ int Cuda_CG( reax_system *system, storage *workspace, sparse_matrix *H, real
     }
 #endif
 
-    for ( i = 1; i < 1000 && SQRT(sig_new) / b_norm > tol; ++i )
+    for ( i = 1; i < control->cm_solver_max_iters && SQRT(sig_new) / b_norm > tol; ++i )
     {
         //MVAPICH2
         copy_host_device( spad, dev_workspace->d, sizeof(real) * system->total_cap,
diff --git a/PG-PuReMD/src/cuda/cuda_lin_alg.h b/PG-PuReMD/src/cuda/cuda_lin_alg.h
index aa31c126642b8eb560256390c89698df99b34a73..768c0a36a1582a585f5d45161844a23640a68212 100644
--- a/PG-PuReMD/src/cuda/cuda_lin_alg.h
+++ b/PG-PuReMD/src/cuda/cuda_lin_alg.h
@@ -51,11 +51,11 @@ void Cuda_Dual_Matvec( sparse_matrix *, rvec2 *, rvec2 *, int , int );
 
 void Cuda_Matvec( sparse_matrix *, real *, real *, int , int );
 
-int Cuda_dual_CG( reax_system*, storage*, sparse_matrix*,
+int Cuda_dual_CG( reax_system*, control_params*, storage*, sparse_matrix*,
         rvec2*, real, rvec2*, mpi_datatypes*, FILE* , simulation_data * );
 
-int Cuda_CG( reax_system*, storage*, sparse_matrix*,
-        real*, real, real*, mpi_datatypes*, FILE* );
+int Cuda_CG( reax_system*, control_params*, storage*, sparse_matrix*,
+        real*, real, real*, mpi_datatypes* );
 
 #ifdef __cplusplus
 }
diff --git a/PG-PuReMD/src/reax_types.h b/PG-PuReMD/src/reax_types.h
index a6d16f8ef08ef7c40d2770d2f466b2089a468d1a..68b4091e3edc2590a52e529bd076c396b8fde507 100644
--- a/PG-PuReMD/src/reax_types.h
+++ b/PG-PuReMD/src/reax_types.h
@@ -277,24 +277,6 @@
 
 
 /******************* ENUMERATIONS *************************/
-/* geometry file format */
-enum geo_formats
-{
-    CUSTOM = 0,
-    PDB = 1,
-    ASCII_RESTART = 2,
-    BINARY_RESTART = 3,
-    GF_N = 4,
-};
-
-/* restart file format */
-enum restart_formats
-{
-    WRITE_ASCII = 0,
-    WRITE_BINARY = 1,
-    RF_N = 2,
-};
-
 /* ensemble type */
 enum ensembles
 {
@@ -369,6 +351,57 @@ enum errors
     RUNTIME_ERROR = -19,
 };
 
+/* restart file format */
+enum restart_formats
+{
+    WRITE_ASCII = 0,
+    WRITE_BINARY = 1,
+    RF_N = 2,
+};
+
+/* geometry file format */
+enum geo_formats
+{
+    CUSTOM = 0,
+    PDB = 1,
+    ASCII_RESTART = 2,
+    BINARY_RESTART = 3,
+    GF_N = 4,
+};
+
+enum charge_method
+{
+    QEQ_CM = 0,
+    EE_CM = 1,
+    ACKS2_CM = 2,
+};
+
+enum solver
+{
+    GMRES_S = 0,
+    GMRES_H_S = 1,
+    CG_S = 2,
+    SDM_S = 3,
+};
+
+enum pre_comp
+{
+    NONE_PC = 0,
+    DIAG_PC = 1,
+    ICHOLT_PC = 2,
+    ILU_PAR_PC = 3,
+    ILUT_PAR_PC = 4,
+    ILU_SUPERLU_MT_PC = 5,
+};
+
+enum pre_app
+{
+    TRI_SOLVE_PA = 0,
+    TRI_SOLVE_LEVEL_SCHED_PA = 1,
+    TRI_SOLVE_GC_PA = 2,
+    JACOBI_ITER_PA = 3,
+};
+
 /* ??? */
 enum exchanges
 {
@@ -1274,8 +1307,9 @@ typedef struct
     /* format of geometry input file */
     int geo_format;
     /* format of restart file */
-    int  restart;
+    int restart;
 
+    /**/
     int restrict_bonds;
     /* flag to control if center of mass velocity is removed */
     int remove_CoM_vel;
@@ -1313,18 +1347,40 @@ typedef struct
     /* flag to control if force computations are tablulated */
     int tabulate;
 
+    /**/
+    unsigned int charge_method;
     /* frequency (in terms of simulation time steps) at which to
      * re-compute atomic charge distribution */
     int charge_freq;
+    /**/
+    unsigned int cm_solver_type;
+    /**/
+    real cm_q_net;
+    /**/
+    unsigned int cm_solver_max_iters;
+    /**/
+    unsigned int cm_solver_restart;
     /* error tolerance of solution produced by charge distribution
      * sparse iterative linear solver */
-    real q_err;
+    real cm_solver_q_err;
+    /**/
+    real cm_domain_sparsity;
+    /**/
+    unsigned int cm_domain_sparsify_enabled;
+    /**/
+    unsigned int cm_solver_pre_comp_type;
     /* frequency (in terms of simulation time steps) at which to recompute
      * incomplete factorizations */
-    int refactor;
+    unsigned int cm_solver_pre_comp_refactor;
     /* drop tolerance of incomplete factorization schemes (ILUT, ICHOLT, etc.)
      * used for preconditioning the iterative linear solver used in charge distribution */
-    real droptol;
+    real cm_solver_pre_comp_droptol;
+    /**/
+    unsigned int cm_solver_pre_comp_sweeps;
+    /**/
+    unsigned int cm_solver_pre_app_type;
+    /**/
+    unsigned int cm_solver_pre_app_jacobi_iters;
 
     /* initial temperature of simulation, in Kelvin */
     real T_init;
diff --git a/PG-PuReMD/src/traj.c b/PG-PuReMD/src/traj.c
index af27e1cb04c9061f3bb52cb4a290cc999c93deac..14560661f57d23918774095a4df4c2d3a41fae4b 100644
--- a/PG-PuReMD/src/traj.c
+++ b/PG-PuReMD/src/traj.c
@@ -273,7 +273,7 @@ int Write_Header( reax_system *system, control_params *control,
                  control->thb_cut );
         strncat( out_control->buffer, out_control->line, HEADER_LINE_LEN + 1 );
 
-        sprintf( out_control->line, SCI_LINE, "QEq_tolerance:", control->q_err );
+        sprintf( out_control->line, SCI_LINE, "QEq_tolerance:", control->cm_solver_q_err );
         strncat( out_control->buffer, out_control->line, HEADER_LINE_LEN + 1 );
 
         /* temperature controls */
diff --git a/environ/parallel_control b/environ/parallel_control
index 3b608f1b0ddccdaf224dd254b5266ee33a148c82..cfe9fa5a59d51d4988d71569a19ba5fa52e2958b 100644
--- a/environ/parallel_control
+++ b/environ/parallel_control
@@ -18,8 +18,20 @@ bond_graph_cutoff        0.3                    ! bond strength cutoff for bond
 thb_cutoff               0.001                  ! cutoff value for three body interactions (Angstroms)
 hbond_cutoff             7.50                   ! cutoff distance for hydrogen bond interactions (Angstroms)
 
-charge_freq              1                      ! frequency (sim step) at which atomic charges are computed
-q_err                    1e-6                   ! norm of the relative residual in QEq solve
+charge_method                     0             ! charge method: 0 = QEq, 1 = EEM, 2 = ACKS2
+charge_freq                       1             ! frequency (sim step) at which atomic charges are computed
+cm_q_net                          0.0           ! net system charge
+cm_solver_type                    0             ! iterative linear solver for charge method: 0 = GMRES, 1 = GMRES_H, 2 = CG, 3 = SDM
+cm_solver_max_iters               1000          ! max solver iterations
+cm_solver_restart                 100           ! inner iterations of GMRES before restarting
+cm_solver_q_err                   1e-6          ! relative residual norm threshold used in solver
+cm_domain_sparsity                1.0           ! scalar for scaling cut-off distance, used to sparsify charge matrix (between 0.0 and 1.0)
+cm_solver_pre_comp_type           1             ! method used to compute preconditioner, if applicable
+cm_solver_pre_comp_refactor       1000          ! number of steps before recomputing preconditioner
+cm_solver_pre_comp_droptol        0.0           ! threshold tolerance for dropping values in preconditioner computation, if applicable
+cm_solver_pre_comp_sweeps         3             ! number of sweeps used to compute preconditioner (ILU_PAR)
+cm_solver_pre_app_type            1             ! method used to apply preconditioner
+cm_solver_pre_app_jacobi_iters    50            ! number of Jacobi iterations used for applying precondition, if applicable
 
 temp_init                0.01                   ! desired initial temperature of the simulated system
 temp_final               300.0                  ! desired final temperature of the simulated system