1
0
mirror of https://github.com/TREX-CoE/trexio.git synced 2024-11-03 20:54:07 +01:00

add read_size function to HDF5 back end

This commit is contained in:
q-posev 2021-12-09 14:10:51 +01:00
parent c17297ca1d
commit e774cb6852
2 changed files with 51 additions and 37 deletions

View File

@ -419,7 +419,6 @@ trexio_hdf5_write_$group_dset$ (trexio_t* const file,
H5P_DEFAULT);
assert(dset_id >= 0);
printf(" HERE HERE HERE !\n");
status = H5Dwrite(dset_id, H5T_NATIVE_INT32, H5S_ALL, H5S_ALL, H5P_DEFAULT, index_sparse);
/*const herr_t status = H5LTmake_dataset(f->$group$_group,
@ -439,7 +438,7 @@ trexio_hdf5_write_$group_dset$ (trexio_t* const file,
hid_t fspace = H5Dget_space(dset_id);
hsize_t offset[1] = {offset_file*4}; //[4] = {offset_file, offset_file, offset_file, offset_file};
hsize_t offset[1] = {(hsize_t) offset_file*4}; //[4] = {offset_file, offset_file, offset_file, offset_file};
// allocate space for the dimensions to be read
hsize_t ddims[1] = {0};
@ -448,8 +447,6 @@ trexio_hdf5_write_$group_dset$ (trexio_t* const file,
int rrank = H5Sget_simple_extent_dims(fspace, ddims, NULL);
ddims[0] += chunk_dims[0];
printf("SIZE = %ld\n", ddims[0]);
// extend the dset size
herr_t status = H5Dset_extent(dset_id, ddims);
@ -465,11 +462,12 @@ trexio_hdf5_write_$group_dset$ (trexio_t* const file,
dspace, fspace, H5P_DEFAULT,
index_sparse);
assert(status >= 0);
// TODO: CLOSE ALL OPENED
H5Dclose(dset_id);
H5Sclose(dspace);
H5Sclose(fspace);
//if (status < 0) return TREXIO_FAILURE;
if (status < 0) return TREXIO_FAILURE;
}
@ -485,8 +483,8 @@ trexio_hdf5_read_$group_dset$ (trexio_t* const file,
const int64_t offset_file,
const int64_t size,
const int64_t size_max,
int32_t* const index_sparse,
double* const value_sparse)
int32_t* const index_read,
double* const value_read)
{
if (file == NULL) return TREXIO_INVALID_ARG_1;
@ -497,35 +495,24 @@ trexio_hdf5_read_$group_dset$ (trexio_t* const file,
hid_t dset_id = H5Dopen(f->$group$_group, $GROUP_DSET$_NAME, H5P_DEFAULT);
if (dset_id <= 0) return TREXIO_INVALID_ID;
const uint32_t rank = 4;
// allocate space for the dimensions to be read
hsize_t* ddims = CALLOC( (int) rank, hsize_t);
if (ddims == NULL) return TREXIO_FAILURE;
// get the dataspace of the dataset
hid_t dspace_id = H5Dget_space(dset_id);
// get the rank and dimensions of the dataset
int rrank = H5Sget_simple_extent_dims(dspace_id, ddims, NULL);
hid_t fspace_id = H5Dget_space(dset_id);
// check that dimensions are consistent
if (rrank != (int) rank) {
FREE(ddims);
H5Sclose(dspace_id);
H5Dclose(dset_id);
return TREXIO_INVALID_ARG_3;
}
// possible overflow HERE ?
hsize_t offset[1] = {(hsize_t) offset_file*4};
hsize_t count[1] = {(hsize_t) size*4};
free(ddims);
H5Sclose(dspace_id);
herr_t status = H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL);
hid_t memspace_id = H5Screate_simple(1, count, NULL);
status = H5Dread(dset_id, H5T_NATIVE_INT32, memspace_id, fspace_id, H5P_DEFAULT, index_read);
H5Sclose(fspace_id);
H5Sclose(memspace_id);
H5Dclose(dset_id);
/* High-level H5LT API. No need to deal with dataspaces and datatypes */
/*herr_t status = H5LTread_dataset(f->$group$_group,
$GROUP_DSET$_NAME,
H5T_$GROUP_DSET_H5_DTYPE$,
$group_dset$);
if (status < 0) return TREXIO_FAILURE;*/
assert (status >= 0);
return TREXIO_SUCCESS;
}
@ -539,7 +526,27 @@ trexio_hdf5_read_$group_dset$_size (trexio_t* const file, int64_t* const size_ma
if (file == NULL) return TREXIO_INVALID_ARG_1;
// TODO
const trexio_hdf5_t* f = (const trexio_hdf5_t*) file;
hid_t dset_id = H5Dopen(f->$group$_group, $GROUP_DSET$_NAME, H5P_DEFAULT);
if (dset_id <= 0) return TREXIO_INVALID_ID;
hid_t fspace_id = H5Dget_space(dset_id);
// allocate space for the dimensions to be read
hsize_t ddims[1] = {0};
// get the rank and dimensions of the dataset
int rrank = H5Sget_simple_extent_dims(fspace_id, ddims, NULL);
H5Dclose(dset_id);
H5Sclose(fspace_id);
int mod_4 = (int) (ddims[0] % 4);
if (mod_4 != 0) return TREXIO_FAILURE;
*size_max = ((int64_t) ddims[0]) / 4L;
return TREXIO_SUCCESS;
}

View File

@ -115,14 +115,14 @@ static int test_read_dset_sparse (const char* file_name, const back_end_t backen
// define arrays to read into
int32_t* index_read;
double* value_read;
uint64_t size_r = 20L;
uint64_t size_r = 40L;
index_read = (int32_t*) calloc(4L*size_r,sizeof(int32_t));
value_read = (double*) calloc(size_r,sizeof(double));
// specify the read parameters, here:
// 1 chunk of 10 elements using offset of 40 (i.e. lines No. 40--59) into elements of the array starting from 5
int64_t chunk_read = 10L;
int64_t chunk_read = 30L;
int64_t offset_file_read = 40L;
int offset_data_read = 5;
@ -133,6 +133,7 @@ static int test_read_dset_sparse (const char* file_name, const back_end_t backen
assert(index_read[4*offset_data_read] == offset_file_read*4);
// now attempt to read so that one encounters end of file during reading (i.e. offset_file_read + chunk_read > size_max)
/*
offset_file_read = 97L;
offset_data_read = 1;
@ -141,6 +142,11 @@ static int test_read_dset_sparse (const char* file_name, const back_end_t backen
assert(rc == TREXIO_END);
assert(index_read[4*size_r-1] == 0);
assert(index_read[4*offset_data_read] == 4 * (int32_t) offset_file_read);
*/
for (int i=0; i<size_r; i++){
printf("%d %d \n", index_read[4*i], index_read[4*i+1]);
}
// close current session
rc = trexio_close(file);
@ -175,6 +181,7 @@ static int test_read_dset_sparse_size (const char* file_name, const back_end_t b
// read one chunk using the aforementioned parameters
rc = trexio_read_mo_2e_int_eri_size(file, &size_written);
assert(rc == TREXIO_SUCCESS);
printf("%ld \n", size_written);
assert(size_written == size_check);
// close current session
@ -197,8 +204,8 @@ int main(){
// check the first write attempt (SIZE elements written in N_CHUNKS chunks)
test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND);
test_has_dset_sparse (TREXIO_FILE, TEST_BACKEND);
//test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND);
//test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, (int64_t) SIZE);
test_read_dset_sparse (TREXIO_FILE, TEST_BACKEND);
test_read_dset_sparse_size(TREXIO_FILE, TEST_BACKEND, (int64_t) SIZE);
// check the second write attempt (SIZE elements written in N_CHUNKS chunks)
//test_write_dset_sparse (TREXIO_FILE, TEST_BACKEND);