from operator import contains import requests import os import csv import zipfile url = 'https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/10_minutes/air_temperature/now/' download_folder = 'dwd-data/' response = requests.get(url) #print(response.text) from bs4 import BeautifulSoup soup = BeautifulSoup(response.text, 'html.parser') print(soup.title) if not os.path.isdir(download_folder): os.mkdir(download_folder) dwd_links = soup.findAll('a') i = int(1) dwd_len = len(dwd_links) - 3 station_file = '' for file_text in dwd_links: dwd_len = len(dwd_links) - 3 if (str(file_text.text).__contains__('10minutenwerte')): dest_file = download_folder + file_text.text if not os.path.isfile(dest_file): file_url = url + "/" + file_text.text download(file_url, dest_file) elif (str(file_text)).__contains__('zehn_now_tu_Beschreibung_Stationen'): dest_file = download_folder + file_text.text file_url = url + "/" + file_text.text download(file_url,dest_file) station_file = dest_file print("Download ", i," von ",dwd_len, end=' ') i += 1 def download(url, dest_file): response = requests.get(file_url) open(dest_file, 'wb').write(response.content) for filename in os.listdir(download_folder): file_path = os.path.join(download_folder, filename) zip=zipfile.ZipFile(file_path) f=zip.open(zip.namelist()[0]) contents=f.read() print(contents) def read_dwd_file(file_path): with open(file_path) as f: line = f.readline() while line: line = f.readline() print(line)