mirror of
https://github.com/TREX-CoE/trexio.git
synced 2024-12-22 04:14:40 +01:00
Working on io_dset_sparse.c. Not finished
This commit is contained in:
parent
25780f9c16
commit
8b3d237c1f
@ -4,10 +4,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define SIZE 100
|
||||
#define N_CHUNKS 5
|
||||
#define N_CHUNKS 4
|
||||
|
||||
static int test_write_dset_sparse (const char* file_name, const back_end_t backend, const int64_t offset) {
|
||||
static int test_write_dset_sparse (const char* file_name, const back_end_t backend, const int64_t offset, const int64_t mo_num) {
|
||||
|
||||
/* Try to write an array of sparse data into the TREXIO file */
|
||||
|
||||
@ -24,26 +23,25 @@ static int test_write_dset_sparse (const char* file_name, const back_end_t backe
|
||||
// parameters to be written
|
||||
int32_t* index;
|
||||
double* value;
|
||||
int64_t size = mo_num/2;
|
||||
|
||||
index = calloc(4L*SIZE, sizeof(int32_t));
|
||||
value = calloc(SIZE, sizeof(double));
|
||||
index = calloc(4L*size, sizeof(int32_t));
|
||||
value = calloc(size, sizeof(double));
|
||||
|
||||
for(int i=0; i<SIZE; i++){
|
||||
index[4*i] = 4*i;
|
||||
index[4*i+1] = 4*i+1;
|
||||
index[4*i+2] = 4*i+2;
|
||||
index[4*i+3] = 4*i+3;
|
||||
for(int i=0; i<size; i++){
|
||||
index[4*i] = i;
|
||||
index[4*i+1] = i+1;
|
||||
index[4*i+2] = i+2;
|
||||
index[4*i+3] = i+3;
|
||||
value[i] = 3.14 + (double) i;
|
||||
}
|
||||
|
||||
// write mo_num which will be used to determine the optimal size of int indices
|
||||
if (trexio_has_mo_num(file) == TREXIO_HAS_NOT) {
|
||||
rc = trexio_write_mo_num(file, 1000);
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
}
|
||||
rc = trexio_write_mo_num(file, mo_num);
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
|
||||
// write dataset chunks of sparse data in the file (including FAKE statements)
|
||||
uint64_t chunk_size = (uint64_t) SIZE/N_CHUNKS;
|
||||
uint64_t chunk_size = (uint64_t) size/N_CHUNKS+1;
|
||||
uint64_t offset_f = 0UL;
|
||||
uint64_t offset_d = 0UL;
|
||||
if (offset != 0L) offset_f += offset;
|
||||
@ -51,6 +49,7 @@ static int test_write_dset_sparse (const char* file_name, const back_end_t backe
|
||||
// write n_chunks times using write_sparse
|
||||
for(int i=0; i<N_CHUNKS; ++i){
|
||||
rc = trexio_write_mo_2e_int_eri(file, offset_f, chunk_size, &index[4*offset_d], &value[offset_d]);
|
||||
printf("%5d: %s\n", __LINE__, trexio_string_of_error(rc));
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
offset_d += chunk_size;
|
||||
offset_f += chunk_size;
|
||||
@ -126,19 +125,25 @@ static int test_read_dset_sparse (const char* file_name, const back_end_t backen
|
||||
assert (file != NULL);
|
||||
assert (rc == TREXIO_SUCCESS);
|
||||
|
||||
int32_t mo_num = 0;
|
||||
rc = trexio_read_mo_num(file, &mo_num);
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
printf("%5d: mo_num = %d\n", __LINE__, mo_num);
|
||||
const int64_t size = mo_num/2;
|
||||
|
||||
// define arrays to read into
|
||||
int32_t* index_read;
|
||||
double* value_read;
|
||||
uint64_t size_r = 40L;
|
||||
uint64_t size_r = mo_num;
|
||||
|
||||
index_read = (int32_t*) calloc(4L*size_r,sizeof(int32_t));
|
||||
value_read = (double*) calloc(size_r,sizeof(double));
|
||||
|
||||
// specify the read parameters, here:
|
||||
// 1 chunk of 10 elements using offset of 40 (i.e. lines No. 40--59) into elements of the array starting from 5
|
||||
int64_t chunk_read = 10L;
|
||||
int64_t offset_file_read = 40L;
|
||||
int offset_data_read = 5;
|
||||
int64_t chunk_read = size/3;
|
||||
int64_t offset_file_read = size/3;
|
||||
int offset_data_read = 2L;
|
||||
int64_t read_size_check;
|
||||
read_size_check = chunk_read;
|
||||
|
||||
@ -146,24 +151,35 @@ static int test_read_dset_sparse (const char* file_name, const back_end_t backen
|
||||
|
||||
// read one chunk using the aforementioned parameters
|
||||
rc = trexio_read_mo_2e_int_eri(file, offset_file_read, &chunk_read, &index_read[4*offset_data_read], &value_read[offset_data_read]);
|
||||
printf("%5d: %s\n", __LINE__, trexio_string_of_error(rc));
|
||||
for (int i=0 ; i<chunk_read ; ++i) {
|
||||
printf("%d %d\n", i, index_read[4*offset_data_read+i]);
|
||||
}
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
assert(chunk_read == read_size_check);
|
||||
assert(index_read[0] == 0);
|
||||
assert(index_read[4*offset_data_read] == 4 * (int32_t) (offset_file_read-offset));
|
||||
assert(index_read[4*offset_data_read] == (int32_t) (offset_file_read-offset));
|
||||
|
||||
// now attempt to read so that one encounters end of file during reading (i.e. offset_file_read + chunk_read > size_max)
|
||||
offset_file_read = 97L;
|
||||
offset_data_read = 1;
|
||||
int64_t eof_read_size_check = SIZE - offset_file_read; // if offset_file_read=97 => only 3 integrals will be read out of total of 100
|
||||
int64_t size_max;
|
||||
rc = trexio_read_mo_2e_int_eri_size(file, &size_max);
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
offset_file_read = size_max-chunk_read+1L;
|
||||
offset_data_read = 1L;
|
||||
int64_t eof_read_size_check = size_max - offset_file_read; // if offset_file_read=97 => only 3 integrals will be read out of total of 100
|
||||
|
||||
if (offset != 0L) offset_file_read += offset;
|
||||
|
||||
// read one chunk that will reach EOF and return TREXIO_END code
|
||||
rc = trexio_read_mo_2e_int_eri(file, offset_file_read, &chunk_read, &index_read[4*offset_data_read], &value_read[offset_data_read]);
|
||||
assert(rc == TREXIO_END);
|
||||
for (int i=0 ; i<chunk_read ; ++i) {
|
||||
printf("%d %d x\n", i, index_read[4*offset_data_read+i]);
|
||||
}
|
||||
assert(chunk_read == eof_read_size_check);
|
||||
assert(index_read[4*size_r-1] == 0);
|
||||
assert(index_read[4*offset_data_read] == 4 * (int32_t) (offset_file_read-offset));
|
||||
printf("%d %d\n", index_read[4*offset_data_read] , (int32_t) (offset_file_read-offset));
|
||||
assert(index_read[4*offset_data_read] == (int32_t) (offset_file_read-offset));
|
||||
|
||||
// close current session
|
||||
rc = trexio_close(file);
|
||||
@ -198,6 +214,7 @@ static int test_read_dset_sparse_size (const char* file_name, const back_end_t b
|
||||
// read one chunk using the aforementioned parameters
|
||||
rc = trexio_read_mo_2e_int_eri_size(file, &size_written);
|
||||
assert(rc == TREXIO_SUCCESS);
|
||||
printf("%5d: %ld %ld\n", __LINE__, size_written, size_check);
|
||||
assert(size_written == size_check);
|
||||
|
||||
// close current session
|
||||
@ -217,19 +234,27 @@ int main(){
|
||||
rc = system(RM_COMMAND);
|
||||
assert (rc == 0);
|
||||
|
||||
// check the first write attempt (SIZE elements written in N_CHUNKS chunks)
|
||||
test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND, 0);
|
||||
test_has_dset_sparse (TREXIO_FILE, TEST_BACKEND);
|
||||
test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND, 0);
|
||||
test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, SIZE);
|
||||
int32_t mo_num = 42;
|
||||
for (int i=0 ; i<10 ; ++i) {
|
||||
|
||||
// check the second write attempt (SIZE elements written in N_CHUNKS chunks)
|
||||
test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND, SIZE);
|
||||
test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND, SIZE);
|
||||
test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, SIZE*2);
|
||||
printf("%5d: mo_num = %d\n", __LINE__, mo_num);
|
||||
const int64_t size = mo_num/2;
|
||||
// check the first write attempt (SIZE elements written in N_CHUNKS chunks)
|
||||
test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND, 0, mo_num);
|
||||
test_has_dset_sparse (TREXIO_FILE, TEST_BACKEND);
|
||||
test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND, 0);
|
||||
test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, size);
|
||||
|
||||
rc = system(RM_COMMAND);
|
||||
assert (rc == 0);
|
||||
// check the second write attempt (SIZE elements written in N_CHUNKS chunks)
|
||||
test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND, size, mo_num);
|
||||
test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND, size);
|
||||
test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, size*2);
|
||||
|
||||
rc = system(RM_COMMAND);
|
||||
assert (rc == 0);
|
||||
mo_num *= 2;
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user