hima8_pv/main.py

133 lines
5.5 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import os
import Download_AOD as Daod
import datetime as dt
import glob
# import AOD_NetCDF_to_GeoTIFF as trans
from read_par_data import trans2csv
from logzero import logger
today = dt.date(2022, 8, 1)
print(today)
_yearNum = today.year
_monNum = today.month
_dayNum = today.day
_yearStr = ""
_monStr = ""
_dayStr = ""
_hourStr = [f"0{x}" if x < 10 else str(x) for x in range(24)]
if __name__ == "__main__":
# 传入IP地址
# 传入的YesdayNum在下载日数据时昨天的会需要
ftp = Daod.myFTP(host='ftp.ptree.jaxa.jp', YesdayNum=_dayNum - 1)
# 传入用户名和密码,可以自行注册并修改
ftp.Login('jh_zhao_asagi.waseda.jp', 'SP+wari8')
# 从目标路径ftp_filePath将文件下载至本地路径dst_filePath
dst_filePath_root = './data'
dst_filePath = dst_filePath_root + "/" + dt.datetime.strftime(today, '%Y-%m-%d')
if not os.path.exists(dst_filePath):
os.makedirs(dst_filePath)
'''
下载小时数据和日数据时,前置路径都是:/pub/himawari/L3/ARP/031(20-22年)
下载每10分钟数据时前置路径是/pub/himawari/L2/ARP/030(20-22年)
下载日数据时,示例路径:/pub/himawari/L3/ARP/031/202008/daily/
下载小时数据时,示例路径:/pub/himawari/L3/ARP/031/202008/19/
下载10分钟数据时示例路径/pub/himawari/L2/ARP/030/202210/10/
'''
logger.info("请选择要下载的数据:")
_choice = int(input("1.AOD小时数据当天所有 2.AOD日均数据昨天 3.PAR分钟数据当天所有\n"))
# Download_Path用于存储下载的原始数据
Download_Path = ""
# Analysis_Path用于存储处理后的数据即转为TIFF后的数据的文件夹
Analysis_Path = ""
# 如果选择为AOD小时数据
if _choice == 1:
_yearStr, _monStr, _dayStr = Daod.getDateStr(_yearNum, _monNum, _dayNum)
ftp_filePath = "/pub/himawari/L3/ARP/031" + "/" + _yearStr + _monStr + "/" + _dayStr + "/"
Download_Path = dst_filePath + "/AOD_Hourly_Download"
if not os.path.exists(Download_Path):
os.makedirs(Download_Path)
Daod.deleteFile(Download_Path, suf='.temp') # 删除存储路径中的临时文件(也就是上次未下载完整的文件)
Analysis_Path = dst_filePath + "/AOD_Hourly_Analysis"
if not os.path.exists(Analysis_Path):
os.makedirs(Analysis_Path)
ftp.DownLoadFileTree(Download_Path, ftp_filePath, _choice)
# 如果选择为AOD日数据昨天的
elif _choice == 2:
_yearNum, _monNum, _dayNum = Daod.getYesterday(_yearNum, _monNum, _dayNum)
_yearStr, _monStr, _dayStr = Daod.getDateStr(_yearNum, _monNum, _dayNum)
ftp_filePath = "/pub/himawari/L3/ARP/030" + "/" + _yearStr + _monStr + "/" + "daily" + "/"
Download_Path = dst_filePath + "/AOD_Daily_Download"
if not os.path.exists(Download_Path):
os.makedirs(Download_Path)
Daod.deleteFile(Download_Path, suf='.temp') # 删除存储路径中的临时文件(也就是上次未下载完整的文件)
Analysis_Path = dst_filePath + "/AOD_Daily_Analysis"
if not os.path.exists(Analysis_Path):
os.makedirs(Analysis_Path)
ftp.DownLoadFileTree(Download_Path, ftp_filePath, _choice)
elif _choice == 3:
while today <= dt.date(2022, 7, 23):
_yearNum = today.year
_monNum = today.month
_dayNum = today.day
_yearStr = ""
_monStr = ""
_dayStr = ""
_hourStr = [f"0{x}" if x < 10 else str(x) for x in range(24)]
_yearStr, _monStr, _dayStr = Daod.getDateStr(_yearNum, _monNum, _dayNum)
ftp_filePath = "/pub/himawari/L2/PAR/020" + "/" + _yearStr + _monStr + "/" + _dayStr + "/"
dst_filePath = dst_filePath_root + "/" + dt.datetime.strftime(today, '%Y-%m-%d')
Download_Path = dst_filePath + "/PAR_Minutes_Download"
if not os.path.exists(Download_Path):
os.makedirs(Download_Path)
Daod.deleteFile(Download_Path, suf='.temp') # 删除存储路径中的临时文件(也就是上次未下载完整的文件)
for hour in _hourStr:
logger.info(f"{ftp_filePath}{hour}/")
ftp.DownLoadFileTree(Download_Path, f"{ftp_filePath}{hour}/", _choice)
# Analysis_Path = dst_filePath + "/PAR_Minutes_Analysis"
# if not os.path.exists(Analysis_Path):
# os.makedirs(Analysis_Path)
data_list = glob.glob(Download_Path + "\\*.nc")
logger.info(data_list)
try:
date_df = trans2csv(data_list)
date_df.to_csv(f'./{Download_Path}/{_yearStr}-{_monStr}-{_dayStr}PAR.csv', encoding='utf-8-sig',
index=False)
Daod.deleteFile(Download_Path, '.nc')
except Exception as e:
logger.error(e)
logger.error(Download_Path)
today = today + dt.timedelta(days=1)
else:
logger.error("选择错误!")
# 下载结束
ftp.close()
logger.info("下载完成!")
# 下面开始数据处理
# 读取所有nc数据
# data_list = glob.glob(Download_Path + "\\*.nc")
# # for循环完成解析
# for i in range(len(data_list)):
# data = data_list[i]
# trans.NC_to_tiffs(data, Analysis_Path)
# print(data + "-----转tif成功")
print("----转换结束----")