import subprocess
import os
import kvmIPs
import kvmUtil
import globals
import time


class kvmNas:
    """ a craeted gluster fs nas on a one or more hostnodes
    """
    
    # class properties
    vip=""              # the virtual ip of the setup - actually just a unique string for this NAS of this type.
    lvmVolume=""        # the LVM volume name this nas sits on 
    lvmPath=""          # Path to the LVM volume device
    lvmMountPath=""     # where the LVM volume is mounted in the file system
    glusterVolume=""    # the Gluster FS volume name.
    name=""             # unique name of this nas. Shared across all hostnodes that support this nas via gluster
                        # VM's will ask for how to mount this NAS 
    lvmAvailableFreeSpace=0 # amount of GiB of space free in the local LVM
    lastError="(noError)"   # last error encountered
    debug=False         # are we doing extra logging?
    firstNodeAdded=""   # IP address of the first node added. Needed during gluster setup to create trust pool
    ipHelper=""         # IP helper object
    count=1             # Number of replica Bricks on this NAS
 

    def __init__(self,name,vip,ipHelper,debug):
        # define the lvm values
        self.lvmVolume="nas_"+name+"_"+vip
        self.lvmPath="/dev/VolGroup/"+self.lvmVolume
        self.lvmMountPath=os.path.join('/','data',self.lvmVolume)
        self.vip=vip
        self.debug=debug
        self.name=name
        self.glusterVolume="nas_"+name+"_"+ipHelper.ipWithoutDots(vip,"_")
        self.glusterVolumeMount=os.path.join(self.lvmMountPath,'brick')
        self.ipHelper = ipHelper

    def getName(self):
        return self.name

    def getError(self):
        return self.lastError

    def setError(self,lastError):
        self.lastError=lastError

    def printd(self,msg):
        """ debug logging
        """
        if self.debug:
            print msg

    def initGlusterDaemon(self,server):
        """ set the gluster daemon to start on this node and
            restart on reboot and set the iptables to allow usage
        """
        # service already exists - set it to start on runlevesl 345
        retval=subprocess.call(['ssh',server["accessIP"],
            'chkconfig --level 345 glusterd on'])
        if retval != 0:
            self.setError("Failed to chkconfig glusterd on on "+server["accessIP"]+" Retval:"+str(retval))
            return False

        # start the service
        retval=subprocess.call(['ssh',server["accessIP"],
            'service glusterd start'])
        if retval != 0:
            self.setError("Failed to start glusterd on "+server["accessIP"]+" Retval:"+str(retval))
            return False

        # determine if iptables/firewall has been flushed
        # if it has - then we will skip adding rules.


        with open('./.is.tmp','w') as f:
            subprocess.call(['ssh',server["accessIP"],
                             'iptables','-S'],stdout=f)

        d=""
        with open('./.is.tmp','r') as f:
            d=f.read()

        if d != "-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT\n":
            self.printd(". Update firewall rules for gluster")
     
            # setup iptables to allow traffic at this host for gluster
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -D INPUT -j REJECT --reject-with icmp-host-prohibited'])
            if retval != 0:
                self.setError("Failed to delete iptables rule for gluster (5) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24047 -j ACCEPT'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (1) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 111 -j ACCEPT'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (2) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -m state --state NEW -m udp -p udp --dport 111 -j ACCEPT'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (3) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 38465:38469 -j ACCEPT'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (4) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49360 -j ACCEPT'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (5) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'iptables -A INPUT -j REJECT --reject-with icmp-host-prohibited'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (5) Retval:"+str(retval))
                return False
           
            retval=subprocess.call(['ssh',server["accessIP"],
                'service iptables save'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (4) Retval:"+str(retval))
                return False
            retval=subprocess.call(['ssh',server["accessIP"],
                'service iptables restart'])
            if retval != 0:
                self.setError("Failed to set iptables rule for gluster (5) Retval:"+str(retval))
                return False
        else:
            self.printd(". skipping firewall rules as they have been flushed.")

        return True

    def lvmSpaceAvailable(self,server):
        """ how much space is available in this servers LVM?
        """
        freeSpace=0
        # determine amount of free space in LVM, all NASes come from that.
        freeSpaceFile=open('./.lvSpace.tmp','w')
        retval=subprocess.call(['ssh',server["accessIP"],
            'pvs --units g --noheadings --nosuffix'],stdout=freeSpaceFile)
        if retval != 0:
            print "Fatal Error trying to determine amount of freeSpace in LVM partition on hostnode "+server["accessIP"]+" Error:"+str(retval)
            # return zero bytes available will trip an error in the caller
            return 0
        
        freeSpaceFile.close()
        with open("./.lvSpace.tmp","r") as myfile:
            data=[ line.strip().split() for line in myfile ]
            # free space is the last, 5th, parameter, in g
            freeSpace=float(data[0][5])

        self.printd(".  "+str(freeSpace)+" g space available on "+server["accessIP"]+" lvm")

        if freeSpace==0:
            # No free space? this will likely be a problem.
            print "Warning - no free space on LVM for hostnode "+server["accessIP"]

        return freeSpace

    def amountOfSpaceRequestedinG(self,freeSpace,requestedSpace):
        # translate this request for space from an encoded number to a literal number of GiB
        # requestedSpace is a number in the range of -100..2pow(32)
        # > 0 is simply the number of GiB requested
        # < 0 is a % of the free space requested
        # freeSpace is the number of GiB of free space curently available in the LVM
        if requestedSpace >= 0:
            return requestedSpace
        else:
            # then requested space is a % of available.
            resolvedRequestedSpace = ( requestedSpace * freeSpace ) / 100
            return resolvedRequestedSpace

    def lvmSpaceExistsForAllNases(self,server):
        """ is there enough space in the LVM left over to create the
            requested nases?
        """
        self.lvmAvailableFreeSpace = self.lvmSpaceAvailable(server)
        freeSpace=self.lvmAvailableFreeSpace
        
        # walk how much is being asked for accros all nases - and make sure we can say yes.
        for nas in server["nasRequests"]:
            nasName=nas["name"]
            requestedSpace=self.amountOfSpaceRequestedinG(freeSpace,nas["size"])
            self.printd(". nasRequest: name:"+nasName+" Size:"+str(requestedSpace)+" Available:"+str(freeSpace))

            if requestedSpace > freeSpace:
                # we have a problem.
                self.setError("Not enough space left over to create requested nas "+nasName+" on host "+server["accessIP"]+" Requested "+str(requestedSpace)+"g, remaining available space after other nases craeted "+str(freeSpace))
                return 1
            else:
                freeSpace -= requestedSpace

        self.printd(". After all space assignments, there will be "+str(freeSpace)+"g left free in the LVM")

        return 0

    def setupLocalLVMMount(self,nasName,server,spaceRequested):
        """ Create the lvm volume, format it and mount
        """

        # assert some things...
        assert self.lvmVolume != ""
        assert self.lvmPath != ""
        assert self.lvmMountPath != ""
  
        y=open('./.y.tmp','w+')
        y.write('y\n')
        y.seek(0)
        
        self.printd(". define the lvm volume...")
        retval = subprocess.call(['ssh',server["accessIP"],
            'lvcreate -L '+str(spaceRequested)+'g -n '+self.lvmVolume+' VolGroup'])
        if retval != 0:
                self.setError("Failed to create lv volume for nas "+nasName+" nas on host "+server["accessIP"]+". Error:"+str(retval))
                return False

        self.printd(". format the file system as ext3")
        retval = subprocess.call(['ssh',server["accessIP"],'mkfs.ext3 '+self.lvmPath])
        if retval != 0:
            self.setError("Failed to make new filesystem on "+self.lvmPath+" Error:"+str(retval))
            # try to drop the volume
            subprocess.call(['ssh',server["accessIP"],'lvremove '+self.lvmMountPath],stdin=y);
            return False;

        # create mount point and mount locally on hostnode
        print "Mounting "+self.name+" NAS Volume"
        self.printd(". create the mount folder for the lvm volume")
        if not kvmUtil.createRemoteFolder(server["accessIP"],self.lvmMountPath):
            # try to drop the volume
            subprocess.call(['ssh',server["accessIP"],'lvremove '+self.lvmMountPath],stdin=y);
            self.setError("Failed to create remote mount folder on hostnode "+server["accessIP"]+" at "+self.lvmMountPath)
            return False

        self.printd(". create the fstab line to mount the lvm volume")
        mountNASSource='/dev/mapper/VolGroup-'+self.lvmVolume
        retval = subprocess.call(['ssh',server["accessIP"],
                    'echo "'+mountNASSource+' '+self.lvmMountPath+' ext3 defaults 1 3 " >> /etc/fstab'])
        if retval != 0:
            # try to drop the volume
            subprocess.call(['ssh',server["accessIP"],'lvremove '+self.lvmMountPath],stdin=y);
            self.setError("Failed to append fstab entry for nas volume "+mountNASSource+" Error"+str(retval))
            return False            

        # now mount it now
        self.printd(". mount the lvm volume locally")
        retval = subprocess.call(['ssh',server["accessIP"],
                'mount '+self.lvmMountPath])
        if retval != 0:
            # try to drop the volume
            subprocess.call(['ssh',server["accessIP"],'lvremove '+self.lvmMountPath],stdin=y);
            subprocess.call(['ssh',server["accessIP"],
                'grep -v '+self.lvmVolume+' /etc/fstab > ~/fstab && mv -f ~/fstab /etc/fstab'])

            self.setError("Failed to mount nas volume "+self.lvmVolume)
            return False

        # create the BRICK folder
        self.printd(". create the gluster brick mount point")
        if not kvmUtil.createRemoteFolder(server["accessIP"],self.glusterVolumeMount):
            subprocess.call(['ssh',server["accessIP"],'umount '+self.lvmMountPath])
            subprocess.call(['ssh',server["accessIP"],'lvremove '+self.lvmMountPath],stdin=y);
            subprocess.call(['ssh',server["accessIP"],
                'grep -v '+self.lvmVolume+' /etc/fstab > ~/fstab && mv -f ~/fstab /etc/fstab'])

            self.setError("failed to create brick folder on hostnode "+server["accessIP"])
            return False

        # done.
        return True

    def setupGlusterShare(self,firstMember, server,index):
        """ create the gluster volume on the provided server
        """

        # gluster setup
        self.printd(". glusterVolume:"+self.glusterVolume)

        if firstMember:

            # create the gluster volume
            self.printd(". create gluster volume on "+server["accessIP"])
            retval=subprocess.call(['ssh',server["accessIP"],
                    'gluster volume create '+self.glusterVolume+' '+self.ipHelper.hostnameForServer(index)+':'+self.glusterVolumeMount],stdout=globals.fnull)
            if retval != 0:
                self.setError("Failed to create gluster volume for "+self.glusterVolume+" at "+self.glusterVolumeMount+". Error:"+str(retval))
                return False

            # start the volume
            self.printd(". start gluster volume on "+server["accessIP"])
            retval=subprocess.call(['ssh',server["accessIP"],
                    'gluster volume start '+self.glusterVolume],stdout=globals.fnull)
            if retval != 0:
                self.setError("Failed to start gluster volume "+self.glusterVolume+" on host "+server["accessIP"]+" Error:"+str(retval))
                return False

        else:
            #extend the volume
            self.count += 1

            # include this host in the trust pool - from the host that created the volume
            newTrustHost=self.ipHelper.hostnameForServer(index)
            self.printd(". extend the gluster trust pool on "+self.firstNodeAdded+" to include "+newTrustHost)
            retval = subprocess.call(['ssh',self.firstNodeAdded,'gluster peer probe '+newTrustHost])
            if retval !=0:
                # give the network adapter we just created a bridge on some time to come online before we give up
                self.printd(". pausing to retry probe")
                time.sleep(2)
                self.printd(". retry probe...")
                retval = subprocess.call(['ssh',self.firstNodeAdded,'gluster peer probe '+newTrustHost])
                if retval !=0:
                    # wait some more
                    self.printd(". pausing to retry probe")
                    time.sleep(10)
                    self.printd(". retry probe again...")
                    retval = subprocess.call(['ssh',self.firstNodeAdded,'gluster peer probe '+newTrustHost])
                    if retval != 0:
                        # - thrid time is a charm
                        self.printd(". pausing to retry probe")
                        time.sleep(30)
                        self.printd(". retry probe last time...")
                        retval = subprocess.call(['ssh',self.firstNodeAdded,'gluster peer probe '+newTrustHost])
                        if retval != 0:
                            # then its probably not the nic startup thats the problem.
                            self.setError("Failed to add host "+newTrustHost+" as a gluster peer to "+self.firstNodeAdded+". Retval:"+str(retval))
                            return False

            # extend the volume and increase the replica count
            self.printd(". extend the gluster volume")
            retval = subprocess.call(['ssh',server["accessIP"],
                    'gluster volume add-brick '+self.glusterVolume+' replica '+str(self.count)+' '+newTrustHost+':'+self.glusterVolumeMount],stdout=globals.fnull)

            if retval != 0:
                self.setError("Failed to add new brick to volume for "+self.glusterVolume+" at "+self.glusterVolumeMount+" at "+newTrustHost)
                return False

            self.printd(". New Volume  info is ")
            if self.debug:
                subprocess.call(['ssh',server["accessIP"],
                    'gluster volume info '+self.glusterVolume])

        return True
        

    def addHostNode(self,server,index,size):
        """ create a NAS of this size, this name on this server
            return boolean success.

            This method should be called once for a nas.
            
            Additional hostnodes that want to use this same glusterNas should
            call addAdditionalHostNode
        """
        if self.firstNodeAdded != "":
            # error - only the first node into a NAS uses this method.
            self.setError("Calling addHostNode twice on a kvmNas object. Call addAdditionalHostNode")
            return False


        # save who called us first
        self.firstNodeAdded=server["accessIP"]
       
        # see if it already exists
        retval=subprocess.call(['ssh',server["accessIP"],
                'lvdisplay '+self.lvmPath],stdout=globals.fnull,stderr=globals.fnull)
        if retval==5:
            # does not exist.  Create
            print "Creating "+self.name+" NAS"

            # Enabled glusterfs-server on this hostnode
            if not self.initGlusterDaemon(server):
                # error is already set...   self.setError("Call to init gluster daemon on host "+server["accessIP"]+" failed.")
                return False

            # resolve how much space we are allocating.
            spaceRequested=self.amountOfSpaceRequestedinG(self.lvmAvailableFreeSpace,float(size))

            # there should be space allocated!
            assert spaceRequested>0

            if not self.setupLocalLVMMount(self.name,server,spaceRequested):
                # error is already set
                return False

            if not self.setupGlusterShare(True,server,index):
                # error is already set
                return False
        else:
            print "Skipping NAS create as it already exists"

        return True

    def addAdditionalHostNode(self,server,index,size):
        """ add this hostnode as a new peer to the existing nas.
        """

        if self.firstNodeAdded == "":
            # error - must call addHostNode prior to addAdditionalHostNode
            self.setError("Calling addAddtionalHostnode before AddHostNode on a kvmNAS.")
            return False

        # see if it already exists
        retval=subprocess.call(['ssh',server["accessIP"],
                'lvdisplay '+self.lvmPath],stdout=globals.fnull,stderr=globals.fnull)
        if retval==5:
            # does not exist.  Create
            print "Creating/Extending "+self.name+" NAS"

            # Enabled glusterfs-server on this hostnode
            if not self.initGlusterDaemon(server):
                self.setError("Call to init gluster daemon on host "+server["accessIP"]+" failed.")
                return False

            # resolve how much space we are allocating.
            spaceRequested=self.amountOfSpaceRequestedinG(self.lvmAvailableFreeSpace,float(size))

            # there should be space allocated!
            assert spaceRequested>0

            if not self.setupLocalLVMMount(self.name,server,spaceRequested):
                # error is already set
                return False

            if not self.setupGlusterShare(False,server,index):
                # eror is already set
                return False
        else:
            print "Skipping NAS create on this additional host as it already exists"


        return True

    def removeHostNode(self,server,index):
        """ remove this server from the gluster volume.
            if the last server in the volume - remove volume.
            remove lvm partition for this brick on this hostnode
            remove iptables entries
        """
        
        self.printd(". removing hostnode from nas "+self.name)

        self.takeDownGlusterShare(server,index)

        self.takeDownLVMMount(server)

        self.takeDownIpTables(server)
        

    def takeDownGlusterShare(self,server,index):
        """ remove this host from this NAS - remove our brick
            if the last server in the volume - remove volume
            cleanup trust pool
        """

        nasHostNameLeaving=self.ipHelper.hostnameForServer(index)

        with open('./.gvi.tmp','w') as f:
            subprocess.call(['ssh',server["accessIP"],
                'gluster volume info '+self.glusterVolume],stdout=f)

        bricks=0
        hostHasBrick=False
        with open('./.gvi.tmp','r') as f:
            for line in f:
                if 'brick' in line:
                    bricks += 1
                if nasHostNameLeaving in line:
                    hostHasBrick=True

        if not hostHasBrick:
            self.printd(". asked to remove a gluster volume I dont have... "+self.name)
        else:

            # remove our brick.
            self.printd(". Remove our brick from the gluster volume")
            subprocess.call(['ssh',server["accessIP"],
                'gluster --mode=script volume remove-brick '+self.glusterVolume+' replica '+str(bricks-1)+' '+nasHostNameLeaving+':'+self.glusterVolumeMount+' force'],stdout=globals.fnull)                
        
            # stop the gluster volume (it may already be stopped)
            self.printd(". Stop the gluster volume")
            subprocess.call(['ssh',server["accessIP"],
                'gluster --mode=script volume stop '+self.glusterVolume+' force'],stderr=globals.fnull,stdout=globals.fnull)

            #  delete the volume.
            self.printd(". Delete the gluster volume")
            subprocess.call(['ssh',server["accessIP"],
                'gluster --mode=script volume delete '+self.glusterVolume],stdout=globals.fnull)
                

            # if that was the last volume on this host - then remove all other trust pool members
            # from this host.
            with open('./.gli.tmp','w') as f:
                subprocess.call(['ssh',server["accessIP"],
                    'gluster volume list'],stdout=f,stderr=globals.fnull)
            if os.path.getsize('./.gli.tmp') == 0:
                self.printd(". No more volumes, removing trust pool entries")
                with open('./.gps.tmp','w') as f:
                    subprocess.call(['ssh',server["accessIP"],
                        'gluster peer status'],stdout=f)
                # now parse and remove anyone but localhost.
                with file("./.gps.tmp") as l:
                    result=[ line.split() for line in l if line.startswith("Hostname")]
                for h in result:
                    # remove this peer
                    subprocess.call(['ssh',server["accessIP"],
                        'gluster peer detach '+h[1]],stdout=globals.fnull)
    

    def takeDownLVMMount(self,server):
        """ Undo all the work for this NAS with regard to lvm
        """     

        # remove the line from the fs tab to mount the lvm volume
        self.printd(". clean up the fstab for the lvm volume")
        subprocess.call(['ssh',server["accessIP"],
                'grep -v '+self.lvmVolume+' /etc/fstab > ~/fstab && mv -f ~/fstab /etc/fstab'])

        # umount the lvm
        self.printd(". unmount the lvm volume")
        subprocess.call(['ssh',server["accessIP"],
                'umount '+self.lvmMountPath])

        self.printd(". remove lvm volume")
        # destroy the lvm volume
        subprocess.call(['ssh',server["accessIP"],
                'lvremove -f '+self.lvmPath],stdout=globals.fnull,stderr=globals.fnull)

        # remove the mount point folder
        self.printd(". remove lvm mount directory")
        subprocess.call(['ssh',server["accessIP"],
                'rmdir '+self.lvmMountPath],stderr=globals.fnull)

    def takeDownIpTables(self,server):
        """ remove work done to setup ip tables to allow gluster traffic
        """
        
        # cleanup iptables firewall settings
        retval=subprocess.call(['ssh',server["accessIP"],
            'iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24047 -j ACCEPT'],stdout=globals.fnull,stderr=globals.fnull)
        retval=subprocess.call(['ssh',server["accessIP"],
            'iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport 111 -j ACCEPT'],stdout=globals.fnull,stderr=globals.fnull)
        retval=subprocess.call(['ssh',server["accessIP"],
            'iptables -D INPUT -m state --state NEW -m udp -p udp --dport 111 -j ACCEPT'],stdout=globals.fnull,stderr=globals.fnull)
        retval=subprocess.call(['ssh',server["accessIP"],
            'iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport 38465:38469 -j ACCEPT'],stdout=globals.fnull,stderr=globals.fnull)
        retval=subprocess.call(['ssh',server["accessIP"],
            'iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49360 -j ACCEPT'],stdout=globals.fnull,stderr=globals.fnull)
        
        retval=subprocess.call(['ssh',server["accessIP"],'service iptables save'],stdout=globals.fnull)
        retval=subprocess.call(['ssh',server["accessIP"],'service iptables restart'],stdout=globals.fnull)


