Commit d1a902db authored by Lozac'h Loic's avatar Lozac'h Loic
Browse files

ADD: THISMEDownload.py

parent 96f8dc59
#!/usr/bin/python3
##################################################################################
#Script developped by: #
#Loic Lozach #
#THEIA Development Engineer at INRAE #
# #
#Download Soil Moisture maps from THISME web application. #
#On web client, use the search filters to find the images you want. #
#In adress bar of your browser, copy the query starting after ? in the url. #
#Paste it as argument of -urlq option, WARNING ADD QUOTES!!!!. #
# #
##################################################################################
import argparse, os
from subprocess import Popen, PIPE
import otbApplication, datetime, json
def write_xml_results(file, tree):
xml_string = ET.tostring(tree).decode()
parsed_xml = mdom.parseString(xml_string)
pretty_xml_as_string = parsed_xml.toprettyxml()
file.write(pretty_xml_as_string)
def process_command(cmd):
print("Starting : "+" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
# p.wait()
output = p.communicate()[0]
if p.returncode != 0:
print("process failed %d : %s" % (p.returncode, output))
print("#################################################")
return p.returncode
def download_json(urlquery):
querynow = datetime.datetime.today()
exformat = "%Y%m%d-%H%M%S"
qoutfile = os.path.join(args.downdir,"THISMEquery_"+querynow.strftime(exformat)+".json")
cmd = ["curl", "-X", "GET", urlquery, "--output",qoutfile]
process_command(cmd)
return qoutfile
if __name__ == "__main__":
# Make parser object
parser = argparse.ArgumentParser(description=
"""
Download Soil Moisture maps from THISME web application.
On web client, use the search filters to find the images you want.
In adress bar of your browser, copy the query starting after ? in the url.
Paste it as argument of -urlq option, WARNING ADD QUOTES!!!!.
""")
parser.add_argument('-urlq', action='store', required=True, help='Images query contained in the url of THISME (ADD QUOTES!), starts after "https://thisme.cines.teledetection.fr/#!/search?" ')
parser.add_argument('-downdir', action='store', required=False, help='[Optional] Directory where soil moisture maps will be downloaded, default current dir')
args=parser.parse_args()
if args.downdir == None:
args.downdir = os.path.abspath(".")
if not os.path.isdir(args.downdir):
os.mkdir(args.downdir)
querysplit = args.urlq.split("&")
indcoll = None
indlimit = None
collection = None
for q in querysplit:
if q.lower().find("collection") == 0:
collection = q.split("=")[1]
indcoll = querysplit.index(q)
if None == indcoll:
print("Error: Can't find collection")
exit()
querysplit.pop(indcoll)
for q in querysplit:
if q.lower().find("limit") == 0:
indlimit = querysplit.index(q)
if None != indlimit:
querysplit.pop(indlimit)
urlbase = 'https://api.thisme.cines.teledetection.fr/collections/'+collection+'/features?_pretty=1&limit=500&'+"&".join(querysplit)
qoutfile = download_json(urlbase)
jsonnext = True
while jsonnext:
with open(qoutfile) as f:
data = json.load(f)
datafeatures = data["features"]
for feat in datafeatures:
durl = feat["properties"]["services"]["download"]["url"]
outtif = os.path.join(args.downdir,durl.split("/")[-1])
cmd = ["wget", "-O", outtif, durl]
process_command(cmd)
datalinks = data["properties"]["links"]
if len(datalinks) == 3:
if datalinks[2]["rel"] != "next":
print("Error: invalid json file")
exit()
urlnext=datalinks[2]["href"]
qoutfile = download_json(urlnext)
else:
jsonnext = False
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment