# agentBuying.py - Paper-buying agent # AIFCA Python code Version 0.9.7 Documentation at http://aipython.org # Download the zip file and read aipython.pdf for documentation # Artificial Intelligence: Foundations of Computational Agents http://artint.info # Copyright 2017-2023 David L. Poole and Alan K. Mackworth # This work is licensed under a Creative Commons # Attribution-NonCommercial-ShareAlike 4.0 International License. # See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en import random from agents import Agent, Environment, Simulate from utilities import pick_from_dist class TP_env(Environment): prices = [234, 234, 234, 234, 255, 255, 275, 275, 211, 211, 211, 234, 234, 234, 234, 199, 199, 275, 275, 234, 234, 234, 234, 255, 255, 260, 260, 265, 265, 265, 265, 270, 270, 255, 255, 260, 260, 265, 265, 150, 150, 265, 265, 270, 270, 255, 255, 260, 260, 265, 265, 265, 265, 270, 270, 211, 211, 255, 255, 260, 260, 265, 265, 260, 265, 270, 270, 205, 255, 255, 260, 260, 265, 265, 265, 265, 270, 270] max_price_addon = 20 # maximum of random value added to get price def __init__(self): """paper buying agent""" self.time=0 self.stock=20 self.stock_history = [] # memory of the stock history self.price_history = [] # memory of the price history def initial_percept(self): """return initial percept""" self.stock_history.append(self.stock) price = self.prices[0]+random.randrange(self.max_price_addon) self.price_history.append(price) return {'price': price, 'instock': self.stock} def do(self, action): """does action (buy) and returns percept consiting of price and instock""" used = pick_from_dist({6:0.1, 5:0.1, 4:0.1, 3:0.3, 2:0.2, 1:0.2}) # used = pick_from_dist({7:0.1, 6:0.2, 5:0.2, 4:0.3, 3:0.1, 2:0.1}) # uses more paper bought = action['buy'] self.stock = self.stock+bought-used self.stock_history.append(self.stock) self.time += 1 price = (self.prices[self.time%len(self.prices)] # repeating pattern +random.randrange(self.max_price_addon) # plus randomness +self.time//2) # plus inflation self.price_history.append(price) return {'price': price, 'instock': self.stock} class TP_agent(Agent): def __init__(self): self.spent = 0 percept = env.initial_percept() self.ave = self.last_price = percept['price'] self.instock = percept['instock'] self.buy_history = [] def select_action(self, percept): """return next action to caurry out """ self.last_price = percept['price'] self.ave = self.ave+(self.last_price-self.ave)*0.05 self.instock = percept['instock'] if self.last_price < 0.9*self.ave and self.instock < 60: tobuy = 48 elif self.instock < 12: tobuy = 12 else: tobuy = 0 self.spent += tobuy*self.last_price self.buy_history.append(tobuy) return {'buy': tobuy} env = TP_env() ag = TP_agent() sim = Simulate(ag,env) #sim.go(90) #ag.spent/env.time ## average spent per time period import matplotlib.pyplot as plt class Plot_history(object): """Set up the plot for history of price and number in stock""" def __init__(self, ag, env): self.ag = ag self.env = env plt.ion() plt.xlabel("Time") plt.ylabel("Value") def plot_env_hist(self): """plot history of price and instock""" num = len(env.stock_history) plt.plot(range(num),env.price_history,label="Price") plt.plot(range(num),env.stock_history,label="In stock") plt.legend() #plt.draw() def plot_agent_hist(self): """plot history of buying""" num = len(ag.buy_history) plt.bar(range(1,num+1), ag.buy_history, label="Bought") plt.legend() #plt.draw() #pl = Plot_history(ag,env) #sim.go(90) #pl.plot_env_hist() #pl.plot_agent_hist()