Hallo
Ein tolles Tool. Genau das, was ich gesucht habe. Vielen Dank für die Mühen!
Da Faulheit bekanntlich Innovation gebiert, habe ich mich mal hingesetzt und ein Ariva-Export-Tool in Python geschrieben, welches basierend auf WKN/ISIN einzeln oder aus nem PP-Export-CSV den notwendigen Link sowie ein Import-CSV für historische Daten erstellt. Toll wäre es, wenn es in PP einen Batchimport gäbe, aber der Import von Hand ist zumindest einfacher, wenn man nicht von Hand die Links und CSVs bei Ariva generieren muss. Hier der Code zur allgemeinen Verfügung (Edit: natürlich kurz nach dem Einstellen noch einen Fehler in einer Regex gefunden und korrigiert):
#! /usr/bin/python
# Obtain historical stock data from ariva to use with Portfolio Performance (or something else)
# V0.1 - 2021/01/10 Maik Goette : First running version
import os,sys,argparse,csv,time,re
from urllib import request, parse, error as urlerror
from datetime import date
from pathlib import Path
parser = argparse.ArgumentParser(description='This script builds links and downloads historical data for given stock identifiers',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('id', metavar='ID', type=str,
help='Asset identifier (ISIN|WKN) or PP stock data export file. Symbol is NOT supported, yet')
parser.add_argument('--out', nargs='?', type=Path,
default=Path.cwd(),
help='Path for data files (csv). Default: '+str(Path.cwd()))
parser.add_argument('--past', type=str,
default='1.1.2000',
help='Starting time for historical data CSV. Default: 1.1.2000')
parser.add_argument('--delimiter', type=str,
default=';',
help='CSV-Delimiter. Default: \";\"')
parser.add_argument('--currency', dest='cur', type=str,
default='EUR',
help='Currency EUR or USD. Default: EUR')
parser.add_argument('--exchange', dest='stex', type=str,
default='Xetra',
help='''Stock exchange to obtain data from (select one, Default: Xetra):
Xetra
Tradegate
LS
Gettex
Nasdaq
Nasdaq-OTC
''')
parser.add_argument('--allexchanges', dest='all', action='store_true',
help='If this flag is given, all supported stock exchanges are searched. Results from exchange with longest history are used. USE WITH CARE!')
args = parser.parse_args()
## Translation table for ariva stock exchanges
stodic = {'xetra':'6',
'tradegate':'131',
'ls':'16',
'gettex':'207',
'nasdaq':'40',
'nasdaq-otc':'83'}
def check_asset(stock, exch, curr):
exchused = exch.upper()
stocklist = []
stockhist = '0' # Used as initial length counter
stocklist.append(exch)
if args.all:
for key in stodic.keys():
if key.lower() != exch.lower():
stocklist.append(key)
for item in stocklist:
linko = "https://www.ariva.de/{0}/historische_kurse?go=1&boerse_id={1}&month=¤cy={2}&clean_split=1&clean_bezug=1".format(stock, stodic[item.lower()], curr)
try: # Have to cope with 403 forbidden sites
resu = request.urlopen(linko)
except urlerror.HTTPError as e:
print("ERROR: Historical data not available for {0} due to website error: {1}\nDetails: {2}".format(stock, e.code, e.__dict__))
return -1
except urlerror.URLError as e:
print("ERROR: Historical data not available for {0} due to website error: {1}\nDetails: {2}".format(stock, e.code, e.__dict__))
return -1
if resu.code == 200:
# read website content to parse
rdata = resu.read().decode('utf-8')
stockdata = str(re.findall(r'Keine aktuellen Kursdaten',rdata))
if len(stockdata) > 2: # Have no clue why its 2 instead of zero, but it's so
print("WARNING: No stockdata found on {0} for {1}.".format(item.upper(), stock))
continue
stockid = str(re.findall(r'name="secu"\ value.+?(?=\ \/>)',rdata)[0].split('=')[2].strip('\"'))
# The next two lines are somewhat insane an likely VERY error prone
subst_list = ['/', 'span', 'itemprop="productID"','<','>',' ','Typ']
stockinfos = ' '.join(re.sub('|'.join(subst_list), '', str(re.findall(r'(?s)>WKN.*?>Typ',rdata)[0].replace('\n','').replace('\t',''))).split('div')).split()
if len(stockinfos) < 3: # Fix if no stock symbol exists
stockinfos.append('Symbol:')
ahist = obtain_ariva_history(stockid, args.past, today(), stodic[item.lower()], args.cur).read().decode("utf-8")
if len(ahist) > len(stockhist):
stockhist = ahist
exchused = item.upper()
finallink = linko
else:
print("ERROR: Website access yields Code {0}. Manually check the link: {1}".format(resu.code, linko))
return -1
return finallink, stockhist, exchused, stockinfos
def obtain_ariva_history(stockid, minti, maxti, exch, curr):
'''
Function returns an object with CSV data
'''
csvurl = 'https://www.ariva.de/quote/historic/historic.csv'
csvparam = {"clean_split":"1", # Splits taken out
"secu":stockid,
"boerse_id":exch,
"clean_bezug":"1", # options taken out
"clean_payout":"", # Dividents NOT taken out
"trenner":";", # Delimiter for export
"currency":curr,
"min_time":minti,
"max_time":maxti}
qstring = parse.urlencode(csvparam)
csvquery = csvurl + '?' + qstring
csvresp = request.urlopen(csvquery)
return csvresp
def today():
format = '%d.%m.%Y'
return date.today().strftime(format)
def write_csv(path, data, tag):
fixedpath = path.joinpath(tag+'_history.csv')
csvfile = open(fixedpath, "w")
csvfile.write(data)
csvfile.close()
def main():
# here we test the asset string to check if its a file. not very elegant, but hey...
# To ease up the Rest we build a 1-item dictionary for the no-file-based apporach, too
reader = []
if Path(args.id).is_file():
print("\nINFO: Using filebased apporach with file {}".format(args.id))
with open(Path(args.id), 'r', encoding='utf8', newline='') as csvfile:
for line in csv.DictReader(csvfile, delimiter=args.delimiter):
reader.append(line)
else:
if len(args.id) == 12: # This is ISIN
reader.append({'ISIN':args.id,
'WKN':''})
elif len(args.id) == 6: # This is WKN
reader.append({'WKN':args.id,
'ISIN':''})
else:
print("ERROR: Something went wrong here. Found neither file nor a valid ISIN/WKN-length string was given")
sys.exit(1)
# Perform the magic!
linklist = []
print("\nExchange : WKN : ISIN : Symbol : Ariva-Link\n")
for item in reader:
picker = False
if 'ISIN' in item and len(item['ISIN']) == 12:
picker = item['ISIN']
elif 'WKN' in item and len(item['ISIN']) == 6:
picker = item['WKN']
else: # If this is hit its likely a problem with the import file
print("ERROR: Something went wrong here. Check your Input-CSV-file for entry: {}".format(item))
continue
try: # Have to try due to possible 403 Errors in function
linko, stockhist, stockexch, stockids = check_asset(picker, args.stex, args.cur)
except:
continue
if len(re.findall(r'\n',stockhist)) < 7 or not stockexch: # There are ALWAYS 5 LF at the end of each CSV + 1 for the header...therefor below 7 its empty.
print("WARNING: No history data found for asset {}.".format(picker))
else:
write_csv(args.out, stockhist, picker)
linklist.append(stockexch+','+stockids[0].split(':')[1]+','+stockids[1].split(':')[1]+','+stockids[2].split(':')[1]+','+linko)
print("{0} : {1} : {2} : {3} : {4}".format(stockexch,stockids[0].split(':')[1],stockids[1].split(':')[1],stockids[2].split(':')[1], linko))
linkfile = open(args.out.joinpath('linklist.csv'), 'w')
linklist = map(lambda x:x+'\n', linklist)
linkfile.writelines(linklist)
linkfile.close()
if __name__ == "__main__":
main()