import datetime
import requests
from dateutil.parser import parse as parsedate
import os
import sys
from os import listdir
from os.path import isfile, join
import traceback

# Usage:
# python3 downloadViirs.py;  aws s3 sync FIRMS s3://alertwildfirebackup/FIRMS/



def download_file(url, local_filename, headers):
    directory = os.path.split(local_filename)[0]
    if not os.path.exists(directory):
        os.makedirs(directory)
    # NOTE the stream=True parameter below
    with requests.get(url, headers=headers, stream=True) as r:
        r.raise_for_status()
        with open(local_filename, 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192): 
                # If you have chunk encoded response uncomment if
                # and set chunk_size parameter to None.
                #if chunk: 
                f.write(chunk)
    return local_filename

def processFiles():
    inputDirectory = "./FIRMS/suomi-npp-viirs-c2/USA_contiguous_and_Hawaii/"
    outputDirectory = "./FIRMS/suomi-npp-viirs-c2/USA_contiguous_and_Hawaii_processed/"
    if not os.path.exists(outputDirectory):
        os.makedirs(outputDirectory)
    onlyfiles = [f for f in listdir(inputDirectory) if 'txt' in f and isfile(join(inputDirectory, f))]
    for fname in onlyfiles:
        if not os.path.isfile(join(outputDirectory, fname)):
            print('need to make', fname)
            result = processFile(join(inputDirectory, fname), join(outputDirectory, fname))
            with open(os.path.join(outputDirectory,fname), "w") as text_file:
                text_file.write(result)
    # print(onlyfiles)

def processFile(inputName, outputName):
    print('processing', inputName, outputName)
    with open (inputName, "r") as myfile:
        data=myfile.readlines()
        # get column numbers
        cats = data[0].split(',')
        longIndex = cats.index('longitude')
        latIndex = cats.index('latitude')
        conf = cats.index('confidence')
        bright_ti5 = cats.index('frp')
        # prun columns
        plop = list(map(lambda x: x.split(','),data))
        splah = list(filter(lambda x: len(x) >= len(cats),plop))
        #plop = filter(lambda x: not 'low' in x[8],splah)
        prunedColumns = [[x[latIndex], x[longIndex], x[bright_ti5]] for x in splah] 
        strRows = map(lambda x: ','.join(x),prunedColumns)
        result = '\n'.join(strRows)
        # print(result)
        return result

if __name__ == "__main__":
    # url = "https://nrt3.modaps.eosdis.nasa.gov/api/v2/content/archives/FIRMS/suomi-npp-viirs-c2/USA_contiguous_and_Hawaii"
    now = datetime.datetime.now()
    currYear = now.year
    newYearPasses = 0
    for year in range(2020, currYear+1):
        for day in range(1,367):
            print(year, day)
            url="https://nrt3.modaps.eosdis.nasa.gov/api/v2/content/archives/FIRMS/suomi-npp-viirs-c2/USA_contiguous_and_Hawaii/SUOMI_VIIRS_C2_USA_contiguous_and_Hawaii_VNP14IMGTDL_NRT_" + str(year) + str(day).zfill(3) + ".txt"
            localFilename = "./FIRMS/suomi-npp-viirs-c2/USA_contiguous_and_Hawaii/SUOMI_VIIRS_C2_USA_contiguous_and_Hawaii_VNP14IMGTDL_NRT_" + str(year) + str(day).zfill(3) + ".txt"
            headers = {'Authorization': "Bearer ZG9vZGVyc29uOlpEQXdaRE55Y3pCdVFHZHRZV2xzTG1OdmJRPT06MTYxMTM1NTkyMzpmNjEzNjgxYThiMjRlNGYyNjE5YTAxNGFhZDVmZjczNGZiNmIzNTVm"}
            if not os.path.isfile(localFilename) :
                try:
                    print('downloading', url)
                    download_file(url, localFilename, headers)
                    print('downloaded', url)
                except Exception:
                    print(traceback.format_exc())
                    if year == currYear :
                        newYearPasses = newYearPasses + 1
                    if newYearPasses > 10:
                        break
    processFiles()

