DXR is a code search and navigation tool aimed at making sense of large projects. It supports full-text and regex searches as well as structural queries.

Mercurial (40604532805a)

VCS Links

Line Code
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.

from __future__ import absolute_import, print_function

import errno
import os
import signal
import subprocess
import sys
import threading
import traceback
from datetime import datetime

import six
import time

if six.PY2:
    from Queue import Queue, Empty  # Python 2
else:
    from queue import Queue, Empty  # Python 3

__all__ = ['ProcessHandlerMixin', 'ProcessHandler', 'LogOutput',
           'StoreOutput', 'StreamOutput']

# Set the MOZPROCESS_DEBUG environment variable to 1 to see some debugging output
MOZPROCESS_DEBUG = os.getenv("MOZPROCESS_DEBUG")

INTERVAL_PROCESS_ALIVE_CHECK = 0.02

# We dont use mozinfo because it is expensive to import, see bug 933558.
isWin = os.name == "nt"
isPosix = os.name == "posix"  # includes MacOS X

if isWin:
    from ctypes import sizeof, addressof, c_ulong, byref, WinError, c_longlong
    from . import winprocess
    from .qijo import JobObjectAssociateCompletionPortInformation, \
        JOBOBJECT_ASSOCIATE_COMPLETION_PORT, JobObjectExtendedLimitInformation, \
        JOBOBJECT_BASIC_LIMIT_INFORMATION, JOBOBJECT_EXTENDED_LIMIT_INFORMATION, IO_COUNTERS


class ProcessHandlerMixin(object):
    """
    A class for launching and manipulating local processes.

    :param cmd: command to run. May be a string or a list. If specified as a list, the first
      element will be interpreted as the command, and all additional elements will be interpreted
      as arguments to that command.
    :param args: list of arguments to pass to the command (defaults to None). Must not be set when
      `cmd` is specified as a list.
    :param cwd: working directory for command (defaults to None).
    :param env: is the environment to use for the process (defaults to os.environ).
    :param ignore_children: causes system to ignore child processes when True,
      defaults to False (which tracks child processes).
    :param kill_on_timeout: when True, the process will be killed when a timeout is reached.
      When False, the caller is responsible for killing the process.
      Failure to do so could cause a call to wait() to hang indefinitely. (Defaults to True.)
    :param processOutputLine: function or list of functions to be called for
        each line of output produced by the process (defaults to an empty
        list).
    :param processStderrLine: function or list of functions to be called
        for each line of error output - stderr - produced by the process
        (defaults to an empty list). If this is not specified, stderr lines
        will be sent to the *processOutputLine* callbacks.
    :param onTimeout: function or list of functions to be called when the process times out.
    :param onFinish: function or list of functions to be called when the process terminates
      normally without timing out.
    :param kwargs: additional keyword args to pass directly into Popen.

    NOTE: Child processes will be tracked by default.  If for any reason
    we are unable to track child processes and ignore_children is set to False,
    then we will fall back to only tracking the root process.  The fallback
    will be logged.
    """

    class Process(subprocess.Popen):
        """
        Represents our view of a subprocess.
        It adds a kill() method which allows it to be stopped explicitly.
        """

        MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY = 180
        MAX_PROCESS_KILL_DELAY = 30
        TIMEOUT_BEFORE_SIGKILL = 1.0

        def __init__(self,
                     args,
                     bufsize=0,
                     executable=None,
                     stdin=None,
                     stdout=None,
                     stderr=None,
                     preexec_fn=None,
                     close_fds=False,
                     shell=False,
                     cwd=None,
                     env=None,
                     universal_newlines=False,
                     startupinfo=None,
                     creationflags=0,
                     ignore_children=False):

            # Parameter for whether or not we should attempt to track child processes
            self._ignore_children = ignore_children

            if not self._ignore_children and not isWin:
                # Set the process group id for linux systems
                # Sets process group id to the pid of the parent process
                # NOTE: This prevents you from using preexec_fn and managing
                #       child processes, TODO: Ideally, find a way around this
                def setpgidfn():
                    os.setpgid(0, 0)

                preexec_fn = setpgidfn

            try:
                subprocess.Popen.__init__(self, args, bufsize, executable,
                                          stdin, stdout, stderr,
                                          preexec_fn, close_fds,
                                          shell, cwd, env,
                                          universal_newlines, startupinfo, creationflags)
            except OSError:
                print(args, file=sys.stderr)
                raise

        def debug(self, msg):
            if not MOZPROCESS_DEBUG:
                return
            thread = threading.current_thread().name
            print("DBG::MOZPROC PID:{} ({}) | {}".format(self.pid, thread, msg))

        def __del__(self):
            if isWin:
                if six.PY2:
                    _maxint = sys.maxint
                else:
                    _maxint = sys.maxsize
                handle = getattr(self, '_handle', None)
                if handle:
                    self._internal_poll(_deadstate=_maxint)
                if handle or self._job or self._io_port:
                    self._cleanup()
            else:
                subprocess.Popen.__del__(self)

        def kill(self, sig=None):
            if isWin:
                try:
                    if not self._ignore_children and self._handle and self._job:
                        self.debug("calling TerminateJobObject")
                        winprocess.TerminateJobObject(self._job, winprocess.ERROR_CONTROL_C_EXIT)
                    elif self._handle:
                        self.debug("calling TerminateProcess")
                        winprocess.TerminateProcess(self._handle, winprocess.ERROR_CONTROL_C_EXIT)
                except WindowsError:
                    self._cleanup()

                    traceback.print_exc()
                    raise OSError("Could not terminate process")

            else:
                def send_sig(sig, retries=0):
                    pid = self.detached_pid or self.pid
                    if not self._ignore_children:
                        try:
                            os.killpg(pid, sig)
                        except BaseException as e:
                            # On Mac OSX if the process group contains zombie
                            # processes, killpg results in an EPERM.
                            # In this case, zombie processes need to be reaped
                            # before continuing
                            # Note: A negative pid refers to the entire process
                            # group
                            if retries < 1 and getattr(e, "errno", None) == errno.EPERM:
                                try:
                                    os.waitpid(-pid, 0)
                                finally:
                                    return send_sig(sig, retries + 1)

                            # ESRCH is a "no such process" failure, which is fine because the
                            # application might already have been terminated itself. Any other
                            # error would indicate a problem in killing the process.
                            if getattr(e, "errno", None) != errno.ESRCH:
                                print("Could not terminate process: %s" %
                                      self.pid, file=sys.stderr)
                                raise
                    else:
                        os.kill(pid, sig)

                if sig is None and isPosix:
                    # ask the process for termination and wait a bit
                    send_sig(signal.SIGTERM)
                    limit = time.time() + self.TIMEOUT_BEFORE_SIGKILL
                    while time.time() <= limit:
                        if self.poll() is not None:
                            # process terminated nicely
                            break
                        time.sleep(INTERVAL_PROCESS_ALIVE_CHECK)
                    else:
                        # process did not terminate - send SIGKILL to force
                        send_sig(signal.SIGKILL)
                else:
                    # a signal was explicitly set or not posix
                    send_sig(sig or signal.SIGKILL)

            self.returncode = self.wait()
            self._cleanup()
            return self.returncode

        def poll(self):
            """ Popen.poll
                Check if child process has terminated. Set and return returncode attribute.
            """
            # If we have a handle, the process is alive
            if isWin and getattr(self, '_handle', None):
                return None

            return subprocess.Popen.poll(self)

        def wait(self, timeout=None):
            """ Popen.wait
                Called to wait for a running process to shut down and return
                its exit code
                Returns the main process's exit code
            """
            # This call will be different for each OS
            self.returncode = self._custom_wait(timeout=timeout)
            self._cleanup()
            return self.returncode

        """ Private Members of Process class """

        if isWin:
            # Redefine the execute child so that we can track process groups
            def _execute_child(self, *args_tuple):
                if six.PY3:
                    (args, executable, preexec_fn, close_fds,
                     pass_fds, cwd, env,
                     startupinfo, creationflags, shell,
                     p2cread, p2cwrite,
                     c2pread, c2pwrite,
                     errread, errwrite,
                     restore_signals, start_new_session) = args_tuple
                # workaround for bug 950894
                elif sys.hexversion < 0x02070600:  # prior to 2.7.6
                    (args, executable, preexec_fn, close_fds,
                     cwd, env, universal_newlines, startupinfo,
                     creationflags, shell,
                     p2cread, p2cwrite,
                     c2pread, c2pwrite,
                     errread, errwrite) = args_tuple
                    to_close = set()
                else:  # 2.7.6 and later
                    (args, executable, preexec_fn, close_fds,
                     cwd, env, universal_newlines, startupinfo,
                     creationflags, shell, to_close,
                     p2cread, p2cwrite,
                     c2pread, c2pwrite,
                     errread, errwrite) = args_tuple
                if not isinstance(args, six.string_types):
                    args = subprocess.list2cmdline(args)

                # Always or in the create new process group
                creationflags |= winprocess.CREATE_NEW_PROCESS_GROUP

                if startupinfo is None:
                    startupinfo = winprocess.STARTUPINFO()

                if None not in (p2cread, c2pwrite, errwrite):
                    startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
                    startupinfo.hStdInput = int(p2cread)
                    startupinfo.hStdOutput = int(c2pwrite)
                    startupinfo.hStdError = int(errwrite)
                if shell:
                    startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
                    startupinfo.wShowWindow = winprocess.SW_HIDE
                    comspec = os.environ.get("COMSPEC", "cmd.exe")
                    args = comspec + " /c " + args

                # Determine if we can create a job or create nested jobs.
                can_create_job = winprocess.CanCreateJobObject()
                can_nest_jobs = self._can_nest_jobs()

                # Ensure we write a warning message if we are falling back
                if not (can_create_job or can_nest_jobs) and not self._ignore_children:
                    # We can't create job objects AND the user wanted us to
                    # Warn the user about this.
                    print("ProcessManager UNABLE to use job objects to manage "
                          "child processes", file=sys.stderr)

                # set process creation flags
                creationflags |= winprocess.CREATE_SUSPENDED
                creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
                if can_create_job:
                    creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
                if not (can_create_job or can_nest_jobs):
                    # Since we've warned, we just log info here to inform you
                    # of the consequence of setting ignore_children = True
                    print("ProcessManager NOT managing child processes")

                # create the process
                hp, ht, pid, tid = winprocess.CreateProcess(
                    executable, args,
                    None, None,  # No special security
                    1,  # Must inherit handles!
                    creationflags,
                    winprocess.EnvironmentBlock(env),
                    cwd, startupinfo)
                self._child_created = True
                self._handle = hp
                self._thread = ht
                self.pid = pid
                self.tid = tid

                if not self._ignore_children and (can_create_job or can_nest_jobs):
                    try:
                        # We create a new job for this process, so that we can kill
                        # the process and any sub-processes
                        # Create the IO Completion Port
                        self._io_port = winprocess.CreateIoCompletionPort()
                        self._job = winprocess.CreateJobObject()

                        # Now associate the io comp port and the job object
                        joacp = JOBOBJECT_ASSOCIATE_COMPLETION_PORT(winprocess.COMPKEY_JOBOBJECT,
                                                                    self._io_port)
                        winprocess.SetInformationJobObject(
                            self._job,
                            JobObjectAssociateCompletionPortInformation,
                            addressof(joacp),
                            sizeof(joacp)
                        )

                        # Allow subprocesses to break away from us - necessary when
                        # Firefox restarts, or flash with protected mode
                        limit_flags = winprocess.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
                        if not can_nest_jobs:
                            # This allows sandbox processes to create their own job,
                            # and is necessary to set for older versions of Windows
                            # without nested job support.
                            limit_flags |= winprocess.JOB_OBJECT_LIMIT_BREAKAWAY_OK

                        jbli = JOBOBJECT_BASIC_LIMIT_INFORMATION(
                            c_longlong(0),  # per process time limit (ignored)
                            c_longlong(0),  # per job user time limit (ignored)
                            limit_flags,
                            0,  # min working set (ignored)
                            0,  # max working set (ignored)
                            0,  # active process limit (ignored)
                            None,  # affinity (ignored)
                            0,  # Priority class (ignored)
                            0,  # Scheduling class (ignored)
                        )

                        iocntr = IO_COUNTERS()
                        jeli = JOBOBJECT_EXTENDED_LIMIT_INFORMATION(
                            jbli,  # basic limit info struct
                            iocntr,  # io_counters (ignored)
                            0,  # process mem limit (ignored)
                            0,  # job mem limit (ignored)
                            0,  # peak process limit (ignored)
                            0)  # peak job limit (ignored)

                        winprocess.SetInformationJobObject(self._job,
                                                           JobObjectExtendedLimitInformation,
                                                           addressof(jeli),
                                                           sizeof(jeli)
                                                           )

                        # Assign the job object to the process
                        winprocess.AssignProcessToJobObject(self._job, int(hp))

                        # It's overkill, but we use Queue to signal between threads
                        # because it handles errors more gracefully than event or condition.
                        self._process_events = Queue()

                        # Spin up our thread for managing the IO Completion Port
                        self._procmgrthread = threading.Thread(target=self._procmgr)
                    except Exception:
                        print("""Exception trying to use job objects;
falling back to not using job objects for managing child processes""", file=sys.stderr)
                        tb = traceback.format_exc()
                        print(tb, file=sys.stderr)
                        # Ensure no dangling handles left behind
                        self._cleanup_job_io_port()
                else:
                    self._job = None

                winprocess.ResumeThread(int(ht))
                if getattr(self, '_procmgrthread', None):
                    self._procmgrthread.start()
                ht.Close()

                for i in (p2cread, c2pwrite, errwrite):
                    if i is not None:
                        i.Close()

            # Per:
            # https://msdn.microsoft.com/en-us/library/windows/desktop/hh448388%28v=vs.85%29.aspx
            # Nesting jobs came in with windows versions starting with 6.2 according to the table
            # on this page:
            # https://msdn.microsoft.com/en-us/library/ms724834%28v=vs.85%29.aspx
            def _can_nest_jobs(self):
                winver = sys.getwindowsversion()
                return (winver.major > 6 or
                        winver.major == 6 and winver.minor >= 2)

            # Windows Process Manager - watches the IO Completion Port and
            # keeps track of child processes
            def _procmgr(self):
                if not (self._io_port) or not (self._job):
                    return

                try:
                    self._poll_iocompletion_port()
                except KeyboardInterrupt:
                    raise KeyboardInterrupt

            def _poll_iocompletion_port(self):
                # Watch the IO Completion port for status
                self._spawned_procs = {}
                countdowntokill = 0

                self.debug("start polling IO completion port")

                while True:
                    msgid = c_ulong(0)
                    compkey = c_ulong(0)
                    pid = c_ulong(0)
                    portstatus = winprocess.GetQueuedCompletionStatus(self._io_port,
                                                                      byref(msgid),
                                                                      byref(compkey),
                                                                      byref(pid),
                                                                      5000)

                    # If the countdowntokill has been activated, we need to check
                    # if we should start killing the children or not.
                    if countdowntokill != 0:
                        diff = datetime.now() - countdowntokill
                        # Arbitrarily wait 3 minutes for windows to get its act together
                        # Windows sometimes takes a small nap between notifying the
                        # IO Completion port and actually killing the children, and we
                        # don't want to mistake that situation for the situation of an unexpected
                        # parent abort (which is what we're looking for here).
                        if diff.seconds > self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY:
                            print("WARNING | IO Completion Port failed to signal "
                                  "process shutdown", file=sys.stderr)
                            print("Parent process %s exited with children alive:"
                                  % self.pid, file=sys.stderr)
                            print("PIDS: %s" % ', '.join([str(i) for i in self._spawned_procs]),
                                  file=sys.stderr)
                            print("Attempting to kill them, but no guarantee of success",
                                  file=sys.stderr)

                            self.kill()
                            self._process_events.put({self.pid: 'FINISHED'})
                            break

                    if not portstatus:
                        # Check to see what happened
                        errcode = winprocess.GetLastError()
                        if errcode == winprocess.ERROR_ABANDONED_WAIT_0:
                            # Then something has killed the port, break the loop
                            print("IO Completion Port unexpectedly closed", file=sys.stderr)
                            self._process_events.put({self.pid: 'FINISHED'})
                            break
                        elif errcode == winprocess.WAIT_TIMEOUT:
                            # Timeouts are expected, just keep on polling
                            continue
                        else:
                            print("Error Code %s trying to query IO Completion Port, "
                                  "exiting" % errcode, file=sys.stderr)
                            raise WinError(errcode)
                            break

                    if compkey.value == winprocess.COMPKEY_TERMINATE.value:
                        self.debug("compkeyterminate detected")
                        # Then we're done
                        break

                    # Check the status of the IO Port and do things based on it
                    if compkey.value == winprocess.COMPKEY_JOBOBJECT.value:
                        if msgid.value == winprocess.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
                            # No processes left, time to shut down
                            # Signal anyone waiting on us that it is safe to shut down
                            self.debug("job object msg active processes zero")
                            self._process_events.put({self.pid: 'FINISHED'})
                            break
                        elif msgid.value == winprocess.JOB_OBJECT_MSG_NEW_PROCESS:
                            # New Process started
                            # Add the child proc to our list in case our parent flakes out on us
                            # without killing everything.
                            if pid.value != self.pid:
                                self._spawned_procs[pid.value] = 1
                                self.debug("new process detected with pid value: %s" % pid.value)
                        elif msgid.value == winprocess.JOB_OBJECT_MSG_EXIT_PROCESS:
                            self.debug("process id %s exited normally" % pid.value)
                            # One process exited normally
                            if pid.value == self.pid and len(self._spawned_procs) > 0:
                                # Parent process dying, start countdown timer
                                countdowntokill = datetime.now()
                            elif pid.value in self._spawned_procs:
                                # Child Process died remove from list
                                del (self._spawned_procs[pid.value])
                        elif msgid.value == winprocess.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
                            # One process existed abnormally
                            self.debug("process id %s exited abnormally" % pid.value)
                            if pid.value == self.pid and len(self._spawned_procs) > 0:
                                # Parent process dying, start countdown timer
                                countdowntokill = datetime.now()
                            elif pid.value in self._spawned_procs:
                                # Child Process died remove from list
                                del self._spawned_procs[pid.value]
                        else:
                            # We don't care about anything else
                            self.debug("We got a message %s" % msgid.value)
                            pass

            def _custom_wait(self, timeout=None):
                """ Custom implementation of wait.

                - timeout: number of seconds before timing out. If None,
                  will wait indefinitely.
                """
                # First, check to see if the process is still running
                if self._handle:
                    self.returncode = winprocess.GetExitCodeProcess(self._handle)
                else:
                    # Dude, the process is like totally dead!
                    return self.returncode

                threadalive = False
                if hasattr(self, "_procmgrthread"):
                    threadalive = self._procmgrthread.is_alive()
                if self._job and threadalive and threading.current_thread() != self._procmgrthread:
                    self.debug("waiting with IO completion port")
                    if timeout is None:
                        timeout = (self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY +
                                   self.MAX_PROCESS_KILL_DELAY)
                    # Then we are managing with IO Completion Ports
                    # wait on a signal so we know when we have seen the last
                    # process come through.
                    # We use queues to synchronize between the thread and this
                    # function because events just didn't have robust enough error
                    # handling on pre-2.7 versions
                    try:
                        # timeout is the max amount of time the procmgr thread will wait for
                        # child processes to shutdown before killing them with extreme prejudice.
                        item = self._process_events.get(timeout=timeout)
                        if item[self.pid] == 'FINISHED':
                            self.debug("received 'FINISHED' from _procmgrthread")
                            self._process_events.task_done()
                    except Exception:
                        traceback.print_exc()
                        raise OSError("IO Completion Port failed to signal process shutdown")
                    finally:
                        if self._handle:
                            self.returncode = winprocess.GetExitCodeProcess(self._handle)
                        self._cleanup()

                else:
                    # Not managing with job objects, so all we can reasonably do
                    # is call waitforsingleobject and hope for the best
                    self.debug("waiting without IO completion port")

                    if not self._ignore_children:
                        self.debug("NOT USING JOB OBJECTS!!!")
                    # First, make sure we have not already ended
                    if self.returncode != winprocess.STILL_ACTIVE:
                        self._cleanup()
                        return self.returncode

                    rc = None
                    if self._handle:
                        if timeout is None:
                            timeout = -1
                        else:
                            # timeout for WaitForSingleObject is in ms
                            timeout = timeout * 1000

                        rc = winprocess.WaitForSingleObject(self._handle, timeout)

                    if rc == winprocess.WAIT_TIMEOUT:
                        # The process isn't dead, so kill it
                        print("Timed out waiting for process to close, "
                              "attempting TerminateProcess")
                        self.kill()
                    elif rc == winprocess.WAIT_OBJECT_0:
                        # We caught WAIT_OBJECT_0, which indicates all is well
                        print("Single process terminated successfully")
                        self.returncode = winprocess.GetExitCodeProcess(self._handle)
                    else:
                        # An error occured we should probably throw
                        rc = winprocess.GetLastError()
                        if rc:
                            raise WinError(rc)

                    self._cleanup()

                return self.returncode

            def _cleanup_job_io_port(self):
                """ Do the job and IO port cleanup separately because there are
                    cases where we want to clean these without killing _handle
                    (i.e. if we fail to create the job object in the first place)
                """
                if getattr(self, '_job') and self._job != winprocess.INVALID_HANDLE_VALUE:
                    self._job.Close()
                    self._job = None
                else:
                    # If windows already freed our handle just set it to none
                    # (saw this intermittently while testing)
                    self._job = None

                if getattr(self, '_io_port', None) and \
                        self._io_port != winprocess.INVALID_HANDLE_VALUE:
                    self._io_port.Close()
                    self._io_port = None
                else:
                    self._io_port = None

                if getattr(self, '_procmgrthread', None):
                    self._procmgrthread = None

            def _cleanup(self):
                self._cleanup_job_io_port()
                if self._thread and self._thread != winprocess.INVALID_HANDLE_VALUE:
                    self._thread.Close()
                    self._thread = None
                else:
                    self._thread = None

                if self._handle and self._handle != winprocess.INVALID_HANDLE_VALUE:
                    self._handle.Close()
                    self._handle = None
                else:
                    self._handle = None

        elif isPosix:

            def _custom_wait(self, timeout=None):
                """ Haven't found any reason to differentiate between these platforms
                    so they all use the same wait callback.  If it is necessary to
                    craft different styles of wait, then a new _custom_wait method
                    could be easily implemented.
                """

                if not self._ignore_children:
                    try:
                        # os.waitpid return value:
                        # > [...] a tuple containing its pid and exit status
                        # > indication: a 16-bit number, whose low byte is the
                        # > signal number that killed the process, and whose
                        # > high byte is the exit status (if the signal number
                        # > is zero)
                        # - http://docs.python.org/2/library/os.html#os.wait
                        status = os.waitpid(self.pid, 0)[1]

                        # For consistency, format status the same as subprocess'
                        # returncode attribute
                        if status > 255:
                            return status >> 8
                        return -status
                    except OSError as e:
                        if getattr(e, "errno", None) != 10:
                            # Error 10 is "no child process", which could indicate normal
                            # close
                            print("Encountered error waiting for pid to close: %s" % e,
                                  file=sys.stderr)
                            raise

                        return self.returncode

                else:
                    # For non-group wait, call base class
                    if six.PY2:
                        subprocess.Popen.wait(self)
                    else:
                        # timeout was introduced in Python 3.3
                        subprocess.Popen.wait(self, timeout=timeout)
                    return self.returncode

            def _cleanup(self):
                pass

        else:
            # An unrecognized platform, we will call the base class for everything
            print("Unrecognized platform, process groups may not "
                  "be managed properly", file=sys.stderr)

            def _custom_wait(self, timeout=None):
                if six.PY2:
                    self.returncode = subprocess.Popen.wait(self)
                else:
                    # timeout was introduced in Python 3.3
                    self.returncode = subprocess.Popen.wait(self, timeout=timeout)
                return self.returncode

            def _cleanup(self):
                pass

    def __init__(self,
                 cmd,
                 args=None,
                 cwd=None,
                 env=None,
                 ignore_children=False,
                 kill_on_timeout=True,
                 processOutputLine=(),
                 processStderrLine=(),
                 onTimeout=(),
                 onFinish=(),
                 **kwargs):
        self.cmd = cmd
        self.args = args
        self.cwd = cwd
        self.didTimeout = False
        self.didOutputTimeout = False
        self._ignore_children = ignore_children
        self.keywordargs = kwargs
        self.read_buffer = ''

        if env is None:
            env = os.environ.copy()
        self.env = env

        # handlers
        def to_callable_list(arg):
            if callable(arg):
                arg = [arg]
            return CallableList(arg)

        processOutputLine = to_callable_list(processOutputLine)
        processStderrLine = to_callable_list(processStderrLine)
        onTimeout = to_callable_list(onTimeout)
        onFinish = to_callable_list(onFinish)

        def on_timeout():
            self.didTimeout = True
            self.didOutputTimeout = self.reader.didOutputTimeout
            if kill_on_timeout:
                self.kill()

        onTimeout.insert(0, on_timeout)

        self._stderr = subprocess.STDOUT
        if processStderrLine:
            self._stderr = subprocess.PIPE
        self.reader = ProcessReader(stdout_callback=processOutputLine,
                                    stderr_callback=processStderrLine,
                                    finished_callback=onFinish,
                                    timeout_callback=onTimeout)

        # It is common for people to pass in the entire array with the cmd and
        # the args together since this is how Popen uses it.  Allow for that.
        if isinstance(self.cmd, list):
            if self.args is not None:
                raise TypeError("cmd and args must not both be lists")
            (self.cmd, self.args) = (self.cmd[0], self.cmd[1:])
        elif self.args is None:
            self.args = []

    def debug(self, msg):
        if not MOZPROCESS_DEBUG:
            return
        cmd = self.cmd.split(os.sep)[-1:]
        print("DBG::MOZPROC ProcessHandlerMixin {} | {}".format(cmd, msg))

    @property
    def timedOut(self):
        """True if the process has timed out for any reason."""
        return self.didTimeout

    @property
    def outputTimedOut(self):
        """True if the process has timed out for no output."""
        return self.didOutputTimeout

    @property
    def commandline(self):
        """the string value of the command line (command + args)"""
        return subprocess.list2cmdline([self.cmd] + self.args)

    def run(self, timeout=None, outputTimeout=None):
        """
        Starts the process.

        If timeout is not None, the process will be allowed to continue for
        that number of seconds before being killed. If the process is killed
        due to a timeout, the onTimeout handler will be called.

        If outputTimeout is not None, the process will be allowed to continue
        for that number of seconds without producing any output before
        being killed.
        """
        self.didTimeout = False
        self.didOutputTimeout = False

        # default arguments
        args = dict(stdout=subprocess.PIPE,
                    stderr=self._stderr,
                    cwd=self.cwd,
                    env=self.env,
                    ignore_children=self._ignore_children)

        # build process arguments
        args.update(self.keywordargs)

        # launch the process
        self.proc = self.Process([self.cmd] + self.args, **args)

        if isPosix:
            # Keep track of the initial process group in case the process detaches itself
            self.proc.pgid = self._getpgid(self.proc.pid)
            self.proc.detached_pid = None

        self.processOutput(timeout=timeout, outputTimeout=outputTimeout)

    def kill(self, sig=None):
        """
        Kills the managed process.

        If you created the process with 'ignore_children=False' (the
        default) then it will also also kill all child processes spawned by
        it. If you specified 'ignore_children=True' when creating the
        process, only the root process will be killed.

        Note that this does not manage any state, save any output etc,
        it immediately kills the process.

        :param sig: Signal used to kill the process, defaults to SIGKILL
                    (has no effect on Windows)
        """
        if not hasattr(self, "proc"):
            raise RuntimeError("Process hasn't been started yet")

        self.proc.kill(sig=sig)

        # When we kill the the managed process we also have to wait for the
        # reader thread to be finished. Otherwise consumers would have to assume
        # that it still has not completely shutdown.
        rc = self.wait()
        if rc is None:
            self.debug("kill: wait failed -- process is still alive")
        return rc

    def poll(self):
        """Check if child process has terminated

        Returns the current returncode value:
        - None if the process hasn't terminated yet
        - A negative number if the process was killed by signal N (Unix only)
        - '0' if the process ended without failures

        """
        if not hasattr(self, "proc"):
            raise RuntimeError("Process hasn't been started yet")

        # Ensure that we first check for the reader status. Otherwise
        # we might mark the process as finished while output is still getting
        # processed.
        elif self.reader.is_alive():
            return None
        elif hasattr(self, "returncode"):
            return self.returncode
        else:
            return self.proc.poll()

    def processOutput(self, timeout=None, outputTimeout=None):
        """
        Handle process output until the process terminates or times out.

        If timeout is not None, the process will be allowed to continue for
        that number of seconds before being killed.

        If outputTimeout is not None, the process will be allowed to continue
        for that number of seconds without producing any output before
        being killed.
        """
        # this method is kept for backward compatibility
        if not hasattr(self, 'proc'):
            self.run(timeout=timeout, outputTimeout=outputTimeout)
            # self.run will call this again
            return
        if not self.reader.is_alive():
            self.reader.timeout = timeout
            self.reader.output_timeout = outputTimeout
            self.reader.start(self.proc)

    def wait(self, timeout=None):
        """
        Waits until all output has been read and the process is
        terminated.

        If timeout is not None, will return after timeout seconds.
        This timeout only causes the wait function to return and
        does not kill the process.

        Returns the process exit code value:
        - None if the process hasn't terminated yet
        - A negative number if the process was killed by signal N (Unix only)
        - '0' if the process ended without failures

        """
        # Thread.join() blocks the main thread until the reader thread is finished
        # wake up once a second in case a keyboard interrupt is sent
        if self.reader.thread and self.reader.thread is not threading.current_thread():
            count = 0
            while self.reader.is_alive():
                self.reader.join(timeout=1)
                count += 1
                if timeout is not None and count > timeout:
                    self.debug("wait timeout for reader thread")
                    return None

        self.returncode = self.proc.wait()
        return self.returncode

    @property
    def pid(self):
        if not hasattr(self, "proc"):
            raise RuntimeError("Process hasn't been started yet")

        return self.proc.pid

    @staticmethod
    def pid_exists(pid):
        if pid < 0:
            return False

        if isWin:
            try:
                process = winprocess.OpenProcess(
                    winprocess.PROCESS_QUERY_INFORMATION | winprocess.PROCESS_VM_READ, False, pid)
                return winprocess.GetExitCodeProcess(process) == winprocess.STILL_ACTIVE

            except WindowsError as e:
                # no such process
                if e.winerror == winprocess.ERROR_INVALID_PARAMETER:
                    return False

                # access denied
                if e.winerror == winprocess.ERROR_ACCESS_DENIED:
                    return True

                # re-raise for any other type of exception
                raise

        elif isPosix:
            try:
                os.kill(pid, 0)
            except OSError as e:
                return e.errno == errno.EPERM
            else:
                return True

    @classmethod
    def _getpgid(cls, pid):
        try:
            return os.getpgid(pid)
        except OSError as e:
            # Do not raise for "No such process"
            if e.errno != errno.ESRCH:
                raise

    def check_for_detached(self, new_pid):
        """Check if the current process has been detached and mark it appropriately.

        In case of application restarts the process can spawn itself into a new process group.
        From now on the process can no longer be tracked by mozprocess anymore and has to be
        marked as detached. If the consumer of mozprocess still knows the new process id it could
        check for the detached state.

        new_pid is the new process id of the child process.
        """
        if not hasattr(self, "proc"):
            raise RuntimeError("Process hasn't been started yet")

        if isPosix:
            new_pgid = self._getpgid(new_pid)

            if new_pgid and new_pgid != self.proc.pgid:
                self.proc.detached_pid = new_pid
                print('Child process with id "%s" has been marked as detached because it is no '
                      'longer in the managed process group. Keeping reference to the process id '
                      '"%s" which is the new child process.' %
                      (self.pid, new_pid), file=sys.stdout)


class CallableList(list):

    def __call__(self, *args, **kwargs):
        for e in self:
            e(*args, **kwargs)

    def __add__(self, lst):
        return CallableList(list.__add__(self, lst))


class ProcessReader(object):

    def __init__(self, stdout_callback=None, stderr_callback=None,
                 finished_callback=None, timeout_callback=None,
                 timeout=None, output_timeout=None):
        self.stdout_callback = stdout_callback or (lambda line: True)
        self.stderr_callback = stderr_callback or (lambda line: True)
        self.finished_callback = finished_callback or (lambda: True)
        self.timeout_callback = timeout_callback or (lambda: True)
        self.timeout = timeout
        self.output_timeout = output_timeout
        self.thread = None
        self.didOutputTimeout = False

    def debug(self, msg):
        if not MOZPROCESS_DEBUG:
            return
        print("DBG::MOZPROC ProcessReader | {}".format(msg))

    def _create_stream_reader(self, name, stream, queue, callback):
        thread = threading.Thread(name=name,
                                  target=self._read_stream,
                                  args=(stream, queue, callback))
        thread.daemon = True
        thread.start()
        return thread

    def _read_stream(self, stream, queue, callback):
        while True:
            line = stream.readline()
            if not line:
                break
            queue.put((line, callback))
        stream.close()

    def start(self, proc):
        queue = Queue()
        stdout_reader = None
        if proc.stdout:
            stdout_reader = self._create_stream_reader('ProcessReaderStdout',
                                                       proc.stdout,
                                                       queue,
                                                       self.stdout_callback)
        stderr_reader = None
        if proc.stderr and proc.stderr != proc.stdout:
            stderr_reader = self._create_stream_reader('ProcessReaderStderr',
                                                       proc.stderr,
                                                       queue,
                                                       self.stderr_callback)
        self.thread = threading.Thread(name='ProcessReader',
                                       target=self._read,
                                       args=(stdout_reader,
                                             stderr_reader,
                                             queue))
        self.thread.daemon = True
        self.thread.start()
        self.debug("ProcessReader started")

    def _read(self, stdout_reader, stderr_reader, queue):
        start_time = time.time()
        timed_out = False
        timeout = self.timeout
        if timeout is not None:
            timeout += start_time
        output_timeout = self.output_timeout
        if output_timeout is not None:
            output_timeout += start_time

        while (stdout_reader and stdout_reader.is_alive()) \
                or (stderr_reader and stderr_reader.is_alive()):
            has_line = True
            try:
                line, callback = queue.get(True, INTERVAL_PROCESS_ALIVE_CHECK)
            except Empty:
                has_line = False
            now = time.time()
            if not has_line:
                if output_timeout is not None and now > output_timeout:
                    timed_out = True
                    self.didOutputTimeout = True
                    break
            else:
                if output_timeout is not None:
                    output_timeout = now + self.output_timeout
                callback(line.rstrip())
            if timeout is not None and now > timeout:
                timed_out = True
                break
        self.debug("_read loop exited")
        # process remaining lines to read
        while not queue.empty():
            line, callback = queue.get(False)
            callback(line.rstrip())
        if timed_out:
            self.timeout_callback()
        if stdout_reader:
            stdout_reader.join()
        if stderr_reader:
            stderr_reader.join()
        if not timed_out:
            self.finished_callback()
        self.debug("_read exited")

    def is_alive(self):
        if self.thread:
            return self.thread.is_alive()
        return False

    def join(self, timeout=None):
        if self.thread:
            self.thread.join(timeout=timeout)


# default output handlers
# these should be callables that take the output line


class StoreOutput(object):
    """accumulate stdout"""

    def __init__(self):
        self.output = []

    def __call__(self, line):
        self.output.append(line)


class StreamOutput(object):
    """pass output to a stream and flush"""

    def __init__(self, stream):
        self.stream = stream

    def __call__(self, line):
        try:
            self.stream.write(line + '\n'.encode('utf8'))
        except UnicodeDecodeError:
            # TODO: Workaround for bug #991866 to make sure we can display when
            # when normal UTF-8 display is failing
            self.stream.write(line.decode('iso8859-1') + '\n')
        self.stream.flush()


class LogOutput(StreamOutput):
    """pass output to a file"""

    def __init__(self, filename):
        self.file_obj = open(filename, 'a')
        StreamOutput.__init__(self, self.file_obj)

    def __del__(self):
        if self.file_obj is not None:
            self.file_obj.close()


# front end class with the default handlers


class ProcessHandler(ProcessHandlerMixin):
    """
    Convenience class for handling processes with default output handlers.

    By default, all output is sent to stdout. This can be disabled by setting
    the *stream* argument to None.

    If processOutputLine keyword argument is specified the function or the
    list of functions specified by this argument will be called for each line
    of output; the output will not be written to stdout automatically then
    if stream is True (the default).

    If storeOutput==True, the output produced by the process will be saved
    as self.output.

    If logfile is not None, the output produced by the process will be
    appended to the given file.
    """

    def __init__(self, cmd, logfile=None, stream=True, storeOutput=True,
                 **kwargs):
        kwargs.setdefault('processOutputLine', [])
        if callable(kwargs['processOutputLine']):
            kwargs['processOutputLine'] = [kwargs['processOutputLine']]

        if logfile:
            logoutput = LogOutput(logfile)
            kwargs['processOutputLine'].append(logoutput)

        if stream is True:
            # Print to standard output only if no outputline provided
            if not kwargs['processOutputLine']:
                kwargs['processOutputLine'].append(StreamOutput(sys.stdout))
        elif stream:
            streamoutput = StreamOutput(stream)
            kwargs['processOutputLine'].append(streamoutput)

        self.output = None
        if storeOutput:
            storeoutput = StoreOutput()
            self.output = storeoutput.output
            kwargs['processOutputLine'].append(storeoutput)

        ProcessHandlerMixin.__init__(self, cmd, **kwargs)