Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ Currently, these API calls are available:

- Export Records
- Export Metadata
- Import Metadata
- Delete Records
- Import Records
- Export File
Expand Down
95 changes: 81 additions & 14 deletions redcap/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
__license__ = "MIT"
__copyright__ = "2014, Vanderbilt University"

# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
Expand Down Expand Up @@ -585,15 +586,88 @@ def import_records(
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
payload = self.__basepl("record")
payload = self._initialize_import_payload(to_import, format, "record")

payload["overwriteBehavior"] = overwrite
payload["returnFormat"] = return_format
payload["returnContent"] = return_content
payload["dateFormat"] = date_format
payload["forceAutoNumber"] = force_auto_number
response = self._call_api(payload, "imp_record")[0]
if "error" in response:
raise RedcapError(str(response))
return response

def import_metadata(
self, to_import, format="json", return_format="json", date_format="YMD"
):
"""
Import metadata (DataDict) into the RedCap Project

Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.

Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
If successful, the number of imported fields
"""
payload = self._initialize_import_payload(to_import, format, "metadata")
payload["returnFormat"] = return_format
payload["dateFormat"] = date_format
response = self._call_api(payload, "imp_metadata")[0]
if "error" in str(response):
raise RedcapError(str(response))
return response

def _initialize_import_payload(self, to_import, format, data_type):
"""
Standardize the data to be imported and add it to the payload

Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
data_type: 'record', 'metadata'
The kind of data that are imported

Returns
-------
payload : (dict, str)
The initialized payload dictionary and updated format
"""

payload = self.__basepl(data_type)
# pylint: disable=comparison-with-callable
if hasattr(to_import, "to_csv"):
# We'll assume it's a df
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]}
else:
csv_kwargs = {"index_label": self.def_field}
if data_type == "record":
if self.is_longitudinal():
csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]}
else:
csv_kwargs = {"index_label": self.def_field}
elif data_type == "metadata":
csv_kwargs = {"index": False}
to_import.to_csv(buf, **csv_kwargs)
payload["data"] = buf.getvalue()
buf.close()
Expand All @@ -604,16 +678,9 @@ def import_records(
# don't do anything to csv/xml
payload["data"] = to_import
# pylint: enable=comparison-with-callable
payload["overwriteBehavior"] = overwrite

payload["format"] = format
payload["returnFormat"] = return_format
payload["returnContent"] = return_content
payload["dateFormat"] = date_format
payload["forceAutoNumber"] = force_auto_number
response = self._call_api(payload, "imp_record")[0]
if "error" in response:
raise RedcapError(str(response))
return response
return payload

def export_file(self, record, field, event=None, return_format="json"):
"""
Expand Down
5 changes: 5 additions & 0 deletions redcap/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@ def validate(self):
"record",
"Importing record but content is not record",
),
"imp_metadata": (
["type", "data", "format"],
"metadata",
"Importing record but content is not record",
),
"metadata": (
["format"],
"metadata",
Expand Down
23 changes: 23 additions & 0 deletions test/test_project.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,29 @@ def test_import_exception(self):
exc = assert_context.exception
self.assertIn("error", exc.args[0])

@responses.activate
def test_import_metadata(self):
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm happy with this test 👍🏻

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have been experimenting with additional tests and found some weird behavior, of which I suspect is is due to the test setup as it works with other redcap servers. Do you have an idea why the following test might fail?

    @responses.activate
    def test_import_reduced_metadata(self):
        "Test import of a reduced set of metadata"
        self.add_normalproject_response()
        data = self.reg_proj.export_metadata()
        # reducing the metadata
        data = data[0:1]
        d = self.reg_proj.import_metadata(data)
        # this should return the reduced set of metadata, but instead returns the full set of metadata when running on the test server
        self.assertEqual(len(d), len(data))

"Test metadata import"
self.add_normalproject_response()
data = self.reg_proj.export_metadata()
response = self.reg_proj.import_metadata(data)
for field_dict in response:
for key in ["field_name", "field_label", "form_name", "arm_num", "name"]:
self.assertIn(key, field_dict)
self.assertNotIn("error", response)

@unittest.skip("Fails on test server for unknown reason")
@responses.activate
def test_import_reduced_metadata(self):
"Test import of a reduced set of metadata"
self.add_normalproject_response()
original_data = self.reg_proj.export_metadata()
# reducing the metadata
reduced_data = original_data[0:1]
imported_data = self.reg_proj.import_metadata(reduced_data)

self.assertEqual(len(imported_data), len(reduced_data))

@staticmethod
def is_good_csv(csv_string):
"Helper to test csv strings"
Expand Down